drm/i915: More use of the cached LRC state
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
... / ...
CommitLineData
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4#include <linux/hashtable.h>
5#include "i915_gem_batch_pool.h"
6
7#define I915_CMD_HASH_ORDER 9
8
9/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
12 * workarounds!
13 */
14#define CACHELINE_BYTES 64
15#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
16
17/*
18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
21 *
22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
23 * cacheline, the Head Pointer must not be greater than the Tail
24 * Pointer."
25 */
26#define I915_RING_FREE_SPACE 64
27
28struct intel_hw_status_page {
29 u32 *page_addr;
30 unsigned int gfx_addr;
31 struct drm_i915_gem_object *obj;
32};
33
34#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
36
37#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
39
40#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
41#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
42
43#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
45
46#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
48
49#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
50#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
51
52/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
54 */
55#define i915_semaphore_seqno_size sizeof(uint64_t)
56#define GEN8_SIGNAL_OFFSET(__ring, to) \
57 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
58 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
59 (i915_semaphore_seqno_size * (to)))
60
61#define GEN8_WAIT_OFFSET(__ring, from) \
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
64 (i915_semaphore_seqno_size * (__ring)->id))
65
66#define GEN8_RING_SEMAPHORE_INIT do { \
67 if (!dev_priv->semaphore_obj) { \
68 break; \
69 } \
70 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
71 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
72 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
73 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
74 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
75 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
76 } while(0)
77
78enum intel_ring_hangcheck_action {
79 HANGCHECK_IDLE = 0,
80 HANGCHECK_WAIT,
81 HANGCHECK_ACTIVE,
82 HANGCHECK_ACTIVE_LOOP,
83 HANGCHECK_KICK,
84 HANGCHECK_HUNG,
85};
86
87#define HANGCHECK_SCORE_RING_HUNG 31
88
89struct intel_ring_hangcheck {
90 u64 acthd;
91 u64 max_acthd;
92 u32 seqno;
93 int score;
94 enum intel_ring_hangcheck_action action;
95 int deadlock;
96 u32 instdone[I915_NUM_INSTDONE_REG];
97};
98
99struct intel_ringbuffer {
100 struct drm_i915_gem_object *obj;
101 void __iomem *virtual_start;
102 struct i915_vma *vma;
103
104 struct intel_engine_cs *ring;
105 struct list_head link;
106
107 u32 head;
108 u32 tail;
109 int space;
110 int size;
111 int effective_size;
112 int reserved_size;
113 int reserved_tail;
114 bool reserved_in_use;
115
116 /** We track the position of the requests in the ring buffer, and
117 * when each is retired we increment last_retired_head as the GPU
118 * must have finished processing the request and so we know we
119 * can advance the ringbuffer up to that position.
120 *
121 * last_retired_head is set to -1 after the value is consumed so
122 * we can detect new retirements.
123 */
124 u32 last_retired_head;
125};
126
127struct intel_context;
128struct drm_i915_reg_descriptor;
129
130/*
131 * we use a single page to load ctx workarounds so all of these
132 * values are referred in terms of dwords
133 *
134 * struct i915_wa_ctx_bb:
135 * offset: specifies batch starting position, also helpful in case
136 * if we want to have multiple batches at different offsets based on
137 * some criteria. It is not a requirement at the moment but provides
138 * an option for future use.
139 * size: size of the batch in DWORDS
140 */
141struct i915_ctx_workarounds {
142 struct i915_wa_ctx_bb {
143 u32 offset;
144 u32 size;
145 } indirect_ctx, per_ctx;
146 struct drm_i915_gem_object *obj;
147};
148
149struct intel_engine_cs {
150 const char *name;
151 enum intel_ring_id {
152 RCS = 0,
153 BCS,
154 VCS,
155 VCS2, /* Keep instances of the same type engine together. */
156 VECS
157 } id;
158#define I915_NUM_RINGS 5
159#define _VCS(n) (VCS + (n))
160 unsigned int exec_id;
161 u32 mmio_base;
162 struct drm_device *dev;
163 struct intel_ringbuffer *buffer;
164 struct list_head buffers;
165
166 /*
167 * A pool of objects to use as shadow copies of client batch buffers
168 * when the command parser is enabled. Prevents the client from
169 * modifying the batch contents after software parsing.
170 */
171 struct i915_gem_batch_pool batch_pool;
172
173 struct intel_hw_status_page status_page;
174 struct i915_ctx_workarounds wa_ctx;
175
176 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
177 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
178 struct drm_i915_gem_request *trace_irq_req;
179 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
180 void (*irq_put)(struct intel_engine_cs *ring);
181
182 int (*init_hw)(struct intel_engine_cs *ring);
183
184 int (*init_context)(struct drm_i915_gem_request *req);
185
186 void (*write_tail)(struct intel_engine_cs *ring,
187 u32 value);
188 int __must_check (*flush)(struct drm_i915_gem_request *req,
189 u32 invalidate_domains,
190 u32 flush_domains);
191 int (*add_request)(struct drm_i915_gem_request *req);
192 /* Some chipsets are not quite as coherent as advertised and need
193 * an expensive kick to force a true read of the up-to-date seqno.
194 * However, the up-to-date seqno is not always required and the last
195 * seen value is good enough. Note that the seqno will always be
196 * monotonic, even if not coherent.
197 */
198 u32 (*get_seqno)(struct intel_engine_cs *ring,
199 bool lazy_coherency);
200 void (*set_seqno)(struct intel_engine_cs *ring,
201 u32 seqno);
202 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
203 u64 offset, u32 length,
204 unsigned dispatch_flags);
205#define I915_DISPATCH_SECURE 0x1
206#define I915_DISPATCH_PINNED 0x2
207#define I915_DISPATCH_RS 0x4
208 void (*cleanup)(struct intel_engine_cs *ring);
209
210 /* GEN8 signal/wait table - never trust comments!
211 * signal to signal to signal to signal to signal to
212 * RCS VCS BCS VECS VCS2
213 * --------------------------------------------------------------------
214 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
215 * |-------------------------------------------------------------------
216 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
217 * |-------------------------------------------------------------------
218 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
219 * |-------------------------------------------------------------------
220 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
221 * |-------------------------------------------------------------------
222 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
223 * |-------------------------------------------------------------------
224 *
225 * Generalization:
226 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
227 * ie. transpose of g(x, y)
228 *
229 * sync from sync from sync from sync from sync from
230 * RCS VCS BCS VECS VCS2
231 * --------------------------------------------------------------------
232 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
233 * |-------------------------------------------------------------------
234 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
235 * |-------------------------------------------------------------------
236 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
237 * |-------------------------------------------------------------------
238 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
239 * |-------------------------------------------------------------------
240 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
241 * |-------------------------------------------------------------------
242 *
243 * Generalization:
244 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
245 * ie. transpose of f(x, y)
246 */
247 struct {
248 u32 sync_seqno[I915_NUM_RINGS-1];
249
250 union {
251 struct {
252 /* our mbox written by others */
253 u32 wait[I915_NUM_RINGS];
254 /* mboxes this ring signals to */
255 i915_reg_t signal[I915_NUM_RINGS];
256 } mbox;
257 u64 signal_ggtt[I915_NUM_RINGS];
258 };
259
260 /* AKA wait() */
261 int (*sync_to)(struct drm_i915_gem_request *to_req,
262 struct intel_engine_cs *from,
263 u32 seqno);
264 int (*signal)(struct drm_i915_gem_request *signaller_req,
265 /* num_dwords needed by caller */
266 unsigned int num_dwords);
267 } semaphore;
268
269 /* Execlists */
270 spinlock_t execlist_lock;
271 struct list_head execlist_queue;
272 struct list_head execlist_retired_req_list;
273 u8 next_context_status_buffer;
274 bool disable_lite_restore_wa;
275 u32 ctx_desc_template;
276 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
277 int (*emit_request)(struct drm_i915_gem_request *request);
278 int (*emit_flush)(struct drm_i915_gem_request *request,
279 u32 invalidate_domains,
280 u32 flush_domains);
281 int (*emit_bb_start)(struct drm_i915_gem_request *req,
282 u64 offset, unsigned dispatch_flags);
283
284 /**
285 * List of objects currently involved in rendering from the
286 * ringbuffer.
287 *
288 * Includes buffers having the contents of their GPU caches
289 * flushed, not necessarily primitives. last_read_req
290 * represents when the rendering involved will be completed.
291 *
292 * A reference is held on the buffer while on this list.
293 */
294 struct list_head active_list;
295
296 /**
297 * List of breadcrumbs associated with GPU requests currently
298 * outstanding.
299 */
300 struct list_head request_list;
301
302 /**
303 * Seqno of request most recently submitted to request_list.
304 * Used exclusively by hang checker to avoid grabbing lock while
305 * inspecting request list.
306 */
307 u32 last_submitted_seqno;
308
309 bool gpu_caches_dirty;
310
311 wait_queue_head_t irq_queue;
312
313 struct intel_context *last_context;
314
315 struct intel_ring_hangcheck hangcheck;
316
317 struct {
318 struct drm_i915_gem_object *obj;
319 u32 gtt_offset;
320 volatile u32 *cpu_page;
321 } scratch;
322
323 bool needs_cmd_parser;
324
325 /*
326 * Table of commands the command parser needs to know about
327 * for this ring.
328 */
329 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
330
331 /*
332 * Table of registers allowed in commands that read/write registers.
333 */
334 const struct drm_i915_reg_descriptor *reg_table;
335 int reg_count;
336
337 /*
338 * Table of registers allowed in commands that read/write registers, but
339 * only from the DRM master.
340 */
341 const struct drm_i915_reg_descriptor *master_reg_table;
342 int master_reg_count;
343
344 /*
345 * Returns the bitmask for the length field of the specified command.
346 * Return 0 for an unrecognized/invalid command.
347 *
348 * If the command parser finds an entry for a command in the ring's
349 * cmd_tables, it gets the command's length based on the table entry.
350 * If not, it calls this function to determine the per-ring length field
351 * encoding for the command (i.e. certain opcode ranges use certain bits
352 * to encode the command length in the header).
353 */
354 u32 (*get_cmd_length_mask)(u32 cmd_header);
355};
356
357static inline bool
358intel_ring_initialized(struct intel_engine_cs *ring)
359{
360 return ring->dev != NULL;
361}
362
363static inline unsigned
364intel_ring_flag(struct intel_engine_cs *ring)
365{
366 return 1 << ring->id;
367}
368
369static inline u32
370intel_ring_sync_index(struct intel_engine_cs *ring,
371 struct intel_engine_cs *other)
372{
373 int idx;
374
375 /*
376 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
377 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
378 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
379 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
380 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
381 */
382
383 idx = (other - ring) - 1;
384 if (idx < 0)
385 idx += I915_NUM_RINGS;
386
387 return idx;
388}
389
390static inline void
391intel_flush_status_page(struct intel_engine_cs *ring, int reg)
392{
393 drm_clflush_virt_range(&ring->status_page.page_addr[reg],
394 sizeof(uint32_t));
395}
396
397static inline u32
398intel_read_status_page(struct intel_engine_cs *ring,
399 int reg)
400{
401 /* Ensure that the compiler doesn't optimize away the load. */
402 barrier();
403 return ring->status_page.page_addr[reg];
404}
405
406static inline void
407intel_write_status_page(struct intel_engine_cs *ring,
408 int reg, u32 value)
409{
410 ring->status_page.page_addr[reg] = value;
411}
412
413/*
414 * Reads a dword out of the status page, which is written to from the command
415 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
416 * MI_STORE_DATA_IMM.
417 *
418 * The following dwords have a reserved meaning:
419 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
420 * 0x04: ring 0 head pointer
421 * 0x05: ring 1 head pointer (915-class)
422 * 0x06: ring 2 head pointer (915-class)
423 * 0x10-0x1b: Context status DWords (GM45)
424 * 0x1f: Last written status offset. (GM45)
425 * 0x20-0x2f: Reserved (Gen6+)
426 *
427 * The area from dword 0x30 to 0x3ff is available for driver usage.
428 */
429#define I915_GEM_HWS_INDEX 0x30
430#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
431#define I915_GEM_HWS_SCRATCH_INDEX 0x40
432#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
433
434struct intel_ringbuffer *
435intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
436int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
437 struct intel_ringbuffer *ringbuf);
438void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
439void intel_ringbuffer_free(struct intel_ringbuffer *ring);
440
441void intel_stop_ring_buffer(struct intel_engine_cs *ring);
442void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
443
444int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
445
446int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
447int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
448static inline void intel_ring_emit(struct intel_engine_cs *ring,
449 u32 data)
450{
451 struct intel_ringbuffer *ringbuf = ring->buffer;
452 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
453 ringbuf->tail += 4;
454}
455static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
456 i915_reg_t reg)
457{
458 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
459}
460static inline void intel_ring_advance(struct intel_engine_cs *ring)
461{
462 struct intel_ringbuffer *ringbuf = ring->buffer;
463 ringbuf->tail &= ringbuf->size - 1;
464}
465int __intel_ring_space(int head, int tail, int size);
466void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
467int intel_ring_space(struct intel_ringbuffer *ringbuf);
468bool intel_ring_stopped(struct intel_engine_cs *ring);
469
470int __must_check intel_ring_idle(struct intel_engine_cs *ring);
471void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
472int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
473int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
474
475void intel_fini_pipe_control(struct intel_engine_cs *ring);
476int intel_init_pipe_control(struct intel_engine_cs *ring);
477
478int intel_init_render_ring_buffer(struct drm_device *dev);
479int intel_init_bsd_ring_buffer(struct drm_device *dev);
480int intel_init_bsd2_ring_buffer(struct drm_device *dev);
481int intel_init_blt_ring_buffer(struct drm_device *dev);
482int intel_init_vebox_ring_buffer(struct drm_device *dev);
483
484u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
485
486int init_workarounds_ring(struct intel_engine_cs *ring);
487
488static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
489{
490 return ringbuf->tail;
491}
492
493/*
494 * Arbitrary size for largest possible 'add request' sequence. The code paths
495 * are complex and variable. Empirical measurement shows that the worst case
496 * is ILK at 136 words. Reserving too much is better than reserving too little
497 * as that allows for corner cases that might have been missed. So the figure
498 * has been rounded up to 160 words.
499 */
500#define MIN_SPACE_FOR_ADD_REQUEST 160
501
502/*
503 * Reserve space in the ring to guarantee that the i915_add_request() call
504 * will always have sufficient room to do its stuff. The request creation
505 * code calls this automatically.
506 */
507void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
508/* Cancel the reservation, e.g. because the request is being discarded. */
509void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
510/* Use the reserved space - for use by i915_add_request() only. */
511void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
512/* Finish with the reserved space - for use by i915_add_request() only. */
513void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
514
515/* Legacy ringbuffer specific portion of reservation code: */
516int intel_ring_reserve_space(struct drm_i915_gem_request *request);
517
518#endif /* _INTEL_RINGBUFFER_H_ */
This page took 0.025839 seconds and 5 git commands to generate.