Backmerge v4.1-rc4 into into drm-next
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
3
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
6
7 #define I915_CMD_HASH_ORDER 9
8
9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
12 * workarounds!
13 */
14 #define CACHELINE_BYTES 64
15
16 /*
17 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
18 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
19 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
20 *
21 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
22 * cacheline, the Head Pointer must not be greater than the Tail
23 * Pointer."
24 */
25 #define I915_RING_FREE_SPACE 64
26
27 struct intel_hw_status_page {
28 u32 *page_addr;
29 unsigned int gfx_addr;
30 struct drm_i915_gem_object *obj;
31 };
32
33 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
34 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
35
36 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
37 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
38
39 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
40 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
41
42 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
43 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
44
45 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
46 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
47
48 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
49 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
50
51 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
52 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
53 */
54 #define i915_semaphore_seqno_size sizeof(uint64_t)
55 #define GEN8_SIGNAL_OFFSET(__ring, to) \
56 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
57 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
58 (i915_semaphore_seqno_size * (to)))
59
60 #define GEN8_WAIT_OFFSET(__ring, from) \
61 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
62 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
63 (i915_semaphore_seqno_size * (__ring)->id))
64
65 #define GEN8_RING_SEMAPHORE_INIT do { \
66 if (!dev_priv->semaphore_obj) { \
67 break; \
68 } \
69 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
70 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
71 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
72 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
73 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
74 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
75 } while(0)
76
77 enum intel_ring_hangcheck_action {
78 HANGCHECK_IDLE = 0,
79 HANGCHECK_WAIT,
80 HANGCHECK_ACTIVE,
81 HANGCHECK_ACTIVE_LOOP,
82 HANGCHECK_KICK,
83 HANGCHECK_HUNG,
84 };
85
86 #define HANGCHECK_SCORE_RING_HUNG 31
87
88 struct intel_ring_hangcheck {
89 u64 acthd;
90 u64 max_acthd;
91 u32 seqno;
92 int score;
93 enum intel_ring_hangcheck_action action;
94 int deadlock;
95 };
96
97 struct intel_ringbuffer {
98 struct drm_i915_gem_object *obj;
99 void __iomem *virtual_start;
100
101 struct intel_engine_cs *ring;
102
103 u32 head;
104 u32 tail;
105 int space;
106 int size;
107 int effective_size;
108
109 /** We track the position of the requests in the ring buffer, and
110 * when each is retired we increment last_retired_head as the GPU
111 * must have finished processing the request and so we know we
112 * can advance the ringbuffer up to that position.
113 *
114 * last_retired_head is set to -1 after the value is consumed so
115 * we can detect new retirements.
116 */
117 u32 last_retired_head;
118 };
119
120 struct intel_context;
121
122 struct intel_engine_cs {
123 const char *name;
124 enum intel_ring_id {
125 RCS = 0x0,
126 VCS,
127 BCS,
128 VECS,
129 VCS2
130 } id;
131 #define I915_NUM_RINGS 5
132 #define LAST_USER_RING (VECS + 1)
133 u32 mmio_base;
134 struct drm_device *dev;
135 struct intel_ringbuffer *buffer;
136
137 /*
138 * A pool of objects to use as shadow copies of client batch buffers
139 * when the command parser is enabled. Prevents the client from
140 * modifying the batch contents after software parsing.
141 */
142 struct i915_gem_batch_pool batch_pool;
143
144 struct intel_hw_status_page status_page;
145
146 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
147 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
148 struct drm_i915_gem_request *trace_irq_req;
149 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
150 void (*irq_put)(struct intel_engine_cs *ring);
151
152 int (*init_hw)(struct intel_engine_cs *ring);
153
154 int (*init_context)(struct intel_engine_cs *ring,
155 struct intel_context *ctx);
156
157 void (*write_tail)(struct intel_engine_cs *ring,
158 u32 value);
159 int __must_check (*flush)(struct intel_engine_cs *ring,
160 u32 invalidate_domains,
161 u32 flush_domains);
162 int (*add_request)(struct intel_engine_cs *ring);
163 /* Some chipsets are not quite as coherent as advertised and need
164 * an expensive kick to force a true read of the up-to-date seqno.
165 * However, the up-to-date seqno is not always required and the last
166 * seen value is good enough. Note that the seqno will always be
167 * monotonic, even if not coherent.
168 */
169 u32 (*get_seqno)(struct intel_engine_cs *ring,
170 bool lazy_coherency);
171 void (*set_seqno)(struct intel_engine_cs *ring,
172 u32 seqno);
173 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
174 u64 offset, u32 length,
175 unsigned dispatch_flags);
176 #define I915_DISPATCH_SECURE 0x1
177 #define I915_DISPATCH_PINNED 0x2
178 void (*cleanup)(struct intel_engine_cs *ring);
179
180 /* GEN8 signal/wait table - never trust comments!
181 * signal to signal to signal to signal to signal to
182 * RCS VCS BCS VECS VCS2
183 * --------------------------------------------------------------------
184 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
185 * |-------------------------------------------------------------------
186 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
187 * |-------------------------------------------------------------------
188 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
189 * |-------------------------------------------------------------------
190 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
191 * |-------------------------------------------------------------------
192 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
193 * |-------------------------------------------------------------------
194 *
195 * Generalization:
196 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
197 * ie. transpose of g(x, y)
198 *
199 * sync from sync from sync from sync from sync from
200 * RCS VCS BCS VECS VCS2
201 * --------------------------------------------------------------------
202 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
203 * |-------------------------------------------------------------------
204 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
205 * |-------------------------------------------------------------------
206 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
207 * |-------------------------------------------------------------------
208 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
209 * |-------------------------------------------------------------------
210 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
211 * |-------------------------------------------------------------------
212 *
213 * Generalization:
214 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
215 * ie. transpose of f(x, y)
216 */
217 struct {
218 u32 sync_seqno[I915_NUM_RINGS-1];
219
220 union {
221 struct {
222 /* our mbox written by others */
223 u32 wait[I915_NUM_RINGS];
224 /* mboxes this ring signals to */
225 u32 signal[I915_NUM_RINGS];
226 } mbox;
227 u64 signal_ggtt[I915_NUM_RINGS];
228 };
229
230 /* AKA wait() */
231 int (*sync_to)(struct intel_engine_cs *ring,
232 struct intel_engine_cs *to,
233 u32 seqno);
234 int (*signal)(struct intel_engine_cs *signaller,
235 /* num_dwords needed by caller */
236 unsigned int num_dwords);
237 } semaphore;
238
239 /* Execlists */
240 spinlock_t execlist_lock;
241 struct list_head execlist_queue;
242 struct list_head execlist_retired_req_list;
243 u8 next_context_status_buffer;
244 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
245 int (*emit_request)(struct intel_ringbuffer *ringbuf,
246 struct drm_i915_gem_request *request);
247 int (*emit_flush)(struct intel_ringbuffer *ringbuf,
248 struct intel_context *ctx,
249 u32 invalidate_domains,
250 u32 flush_domains);
251 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
252 struct intel_context *ctx,
253 u64 offset, unsigned dispatch_flags);
254
255 /**
256 * List of objects currently involved in rendering from the
257 * ringbuffer.
258 *
259 * Includes buffers having the contents of their GPU caches
260 * flushed, not necessarily primitives. last_read_req
261 * represents when the rendering involved will be completed.
262 *
263 * A reference is held on the buffer while on this list.
264 */
265 struct list_head active_list;
266
267 /**
268 * List of breadcrumbs associated with GPU requests currently
269 * outstanding.
270 */
271 struct list_head request_list;
272
273 /**
274 * Do we have some not yet emitted requests outstanding?
275 */
276 struct drm_i915_gem_request *outstanding_lazy_request;
277 bool gpu_caches_dirty;
278
279 wait_queue_head_t irq_queue;
280
281 struct intel_context *default_context;
282 struct intel_context *last_context;
283
284 struct intel_ring_hangcheck hangcheck;
285
286 struct {
287 struct drm_i915_gem_object *obj;
288 u32 gtt_offset;
289 volatile u32 *cpu_page;
290 } scratch;
291
292 bool needs_cmd_parser;
293
294 /*
295 * Table of commands the command parser needs to know about
296 * for this ring.
297 */
298 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
299
300 /*
301 * Table of registers allowed in commands that read/write registers.
302 */
303 const u32 *reg_table;
304 int reg_count;
305
306 /*
307 * Table of registers allowed in commands that read/write registers, but
308 * only from the DRM master.
309 */
310 const u32 *master_reg_table;
311 int master_reg_count;
312
313 /*
314 * Returns the bitmask for the length field of the specified command.
315 * Return 0 for an unrecognized/invalid command.
316 *
317 * If the command parser finds an entry for a command in the ring's
318 * cmd_tables, it gets the command's length based on the table entry.
319 * If not, it calls this function to determine the per-ring length field
320 * encoding for the command (i.e. certain opcode ranges use certain bits
321 * to encode the command length in the header).
322 */
323 u32 (*get_cmd_length_mask)(u32 cmd_header);
324 };
325
326 bool intel_ring_initialized(struct intel_engine_cs *ring);
327
328 static inline unsigned
329 intel_ring_flag(struct intel_engine_cs *ring)
330 {
331 return 1 << ring->id;
332 }
333
334 static inline u32
335 intel_ring_sync_index(struct intel_engine_cs *ring,
336 struct intel_engine_cs *other)
337 {
338 int idx;
339
340 /*
341 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
342 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
343 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
344 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
345 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
346 */
347
348 idx = (other - ring) - 1;
349 if (idx < 0)
350 idx += I915_NUM_RINGS;
351
352 return idx;
353 }
354
355 static inline u32
356 intel_read_status_page(struct intel_engine_cs *ring,
357 int reg)
358 {
359 /* Ensure that the compiler doesn't optimize away the load. */
360 barrier();
361 return ring->status_page.page_addr[reg];
362 }
363
364 static inline void
365 intel_write_status_page(struct intel_engine_cs *ring,
366 int reg, u32 value)
367 {
368 ring->status_page.page_addr[reg] = value;
369 }
370
371 /**
372 * Reads a dword out of the status page, which is written to from the command
373 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
374 * MI_STORE_DATA_IMM.
375 *
376 * The following dwords have a reserved meaning:
377 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
378 * 0x04: ring 0 head pointer
379 * 0x05: ring 1 head pointer (915-class)
380 * 0x06: ring 2 head pointer (915-class)
381 * 0x10-0x1b: Context status DWords (GM45)
382 * 0x1f: Last written status offset. (GM45)
383 * 0x20-0x2f: Reserved (Gen6+)
384 *
385 * The area from dword 0x30 to 0x3ff is available for driver usage.
386 */
387 #define I915_GEM_HWS_INDEX 0x30
388 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
389 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
390
391 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
392 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
393 struct intel_ringbuffer *ringbuf);
394 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
395 int intel_alloc_ringbuffer_obj(struct drm_device *dev,
396 struct intel_ringbuffer *ringbuf);
397
398 void intel_stop_ring_buffer(struct intel_engine_cs *ring);
399 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
400
401 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
402
403 int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
404 int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
405 static inline void intel_ring_emit(struct intel_engine_cs *ring,
406 u32 data)
407 {
408 struct intel_ringbuffer *ringbuf = ring->buffer;
409 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
410 ringbuf->tail += 4;
411 }
412 static inline void intel_ring_advance(struct intel_engine_cs *ring)
413 {
414 struct intel_ringbuffer *ringbuf = ring->buffer;
415 ringbuf->tail &= ringbuf->size - 1;
416 }
417 int __intel_ring_space(int head, int tail, int size);
418 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
419 int intel_ring_space(struct intel_ringbuffer *ringbuf);
420 bool intel_ring_stopped(struct intel_engine_cs *ring);
421 void __intel_ring_advance(struct intel_engine_cs *ring);
422
423 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
424 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
425 int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
426 int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
427
428 void intel_fini_pipe_control(struct intel_engine_cs *ring);
429 int intel_init_pipe_control(struct intel_engine_cs *ring);
430
431 int intel_init_render_ring_buffer(struct drm_device *dev);
432 int intel_init_bsd_ring_buffer(struct drm_device *dev);
433 int intel_init_bsd2_ring_buffer(struct drm_device *dev);
434 int intel_init_blt_ring_buffer(struct drm_device *dev);
435 int intel_init_vebox_ring_buffer(struct drm_device *dev);
436
437 u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
438
439 int init_workarounds_ring(struct intel_engine_cs *ring);
440
441 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
442 {
443 return ringbuf->tail;
444 }
445
446 static inline struct drm_i915_gem_request *
447 intel_ring_get_request(struct intel_engine_cs *ring)
448 {
449 BUG_ON(ring->outstanding_lazy_request == NULL);
450 return ring->outstanding_lazy_request;
451 }
452
453 #endif /* _INTEL_RINGBUFFER_H_ */
This page took 0.044053 seconds and 6 git commands to generate.