1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
7 #define I915_CMD_HASH_ORDER 9
9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
14 #define CACHELINE_BYTES 64
17 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
18 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
19 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
21 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
22 * cacheline, the Head Pointer must not be greater than the Tail
25 #define I915_RING_FREE_SPACE 64
27 struct intel_hw_status_page
{
29 unsigned int gfx_addr
;
30 struct drm_i915_gem_object
*obj
;
33 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
34 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
36 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
37 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
39 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
40 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
42 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
43 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
45 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
46 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
48 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
49 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
51 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
52 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
54 #define i915_semaphore_seqno_size sizeof(uint64_t)
55 #define GEN8_SIGNAL_OFFSET(__ring, to) \
56 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
57 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
58 (i915_semaphore_seqno_size * (to)))
60 #define GEN8_WAIT_OFFSET(__ring, from) \
61 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
62 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
63 (i915_semaphore_seqno_size * (__ring)->id))
65 #define GEN8_RING_SEMAPHORE_INIT do { \
66 if (!dev_priv->semaphore_obj) { \
69 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
70 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
71 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
72 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
73 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
74 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
77 enum intel_ring_hangcheck_action
{
81 HANGCHECK_ACTIVE_LOOP
,
86 #define HANGCHECK_SCORE_RING_HUNG 31
88 struct intel_ring_hangcheck
{
93 enum intel_ring_hangcheck_action action
;
97 struct intel_ringbuffer
{
98 struct drm_i915_gem_object
*obj
;
99 void __iomem
*virtual_start
;
101 struct intel_engine_cs
*ring
;
109 /** We track the position of the requests in the ring buffer, and
110 * when each is retired we increment last_retired_head as the GPU
111 * must have finished processing the request and so we know we
112 * can advance the ringbuffer up to that position.
114 * last_retired_head is set to -1 after the value is consumed so
115 * we can detect new retirements.
117 u32 last_retired_head
;
120 struct intel_context
;
121 struct drm_i915_reg_descriptor
;
123 struct intel_engine_cs
{
132 #define I915_NUM_RINGS 5
133 #define LAST_USER_RING (VECS + 1)
135 struct drm_device
*dev
;
136 struct intel_ringbuffer
*buffer
;
139 * A pool of objects to use as shadow copies of client batch buffers
140 * when the command parser is enabled. Prevents the client from
141 * modifying the batch contents after software parsing.
143 struct i915_gem_batch_pool batch_pool
;
145 struct intel_hw_status_page status_page
;
147 unsigned irq_refcount
; /* protected by dev_priv->irq_lock */
148 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
149 struct drm_i915_gem_request
*trace_irq_req
;
150 bool __must_check (*irq_get
)(struct intel_engine_cs
*ring
);
151 void (*irq_put
)(struct intel_engine_cs
*ring
);
153 int (*init_hw
)(struct intel_engine_cs
*ring
);
155 int (*init_context
)(struct intel_engine_cs
*ring
,
156 struct intel_context
*ctx
);
158 void (*write_tail
)(struct intel_engine_cs
*ring
,
160 int __must_check (*flush
)(struct intel_engine_cs
*ring
,
161 u32 invalidate_domains
,
163 int (*add_request
)(struct intel_engine_cs
*ring
);
164 /* Some chipsets are not quite as coherent as advertised and need
165 * an expensive kick to force a true read of the up-to-date seqno.
166 * However, the up-to-date seqno is not always required and the last
167 * seen value is good enough. Note that the seqno will always be
168 * monotonic, even if not coherent.
170 u32 (*get_seqno
)(struct intel_engine_cs
*ring
,
171 bool lazy_coherency
);
172 void (*set_seqno
)(struct intel_engine_cs
*ring
,
174 int (*dispatch_execbuffer
)(struct intel_engine_cs
*ring
,
175 u64 offset
, u32 length
,
176 unsigned dispatch_flags
);
177 #define I915_DISPATCH_SECURE 0x1
178 #define I915_DISPATCH_PINNED 0x2
179 void (*cleanup
)(struct intel_engine_cs
*ring
);
181 /* GEN8 signal/wait table - never trust comments!
182 * signal to signal to signal to signal to signal to
183 * RCS VCS BCS VECS VCS2
184 * --------------------------------------------------------------------
185 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
186 * |-------------------------------------------------------------------
187 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
188 * |-------------------------------------------------------------------
189 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
190 * |-------------------------------------------------------------------
191 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
192 * |-------------------------------------------------------------------
193 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
194 * |-------------------------------------------------------------------
197 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
198 * ie. transpose of g(x, y)
200 * sync from sync from sync from sync from sync from
201 * RCS VCS BCS VECS VCS2
202 * --------------------------------------------------------------------
203 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
204 * |-------------------------------------------------------------------
205 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
206 * |-------------------------------------------------------------------
207 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
208 * |-------------------------------------------------------------------
209 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
210 * |-------------------------------------------------------------------
211 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
212 * |-------------------------------------------------------------------
215 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
216 * ie. transpose of f(x, y)
219 u32 sync_seqno
[I915_NUM_RINGS
-1];
223 /* our mbox written by others */
224 u32 wait
[I915_NUM_RINGS
];
225 /* mboxes this ring signals to */
226 u32 signal
[I915_NUM_RINGS
];
228 u64 signal_ggtt
[I915_NUM_RINGS
];
232 int (*sync_to
)(struct intel_engine_cs
*ring
,
233 struct intel_engine_cs
*to
,
235 int (*signal
)(struct intel_engine_cs
*signaller
,
236 /* num_dwords needed by caller */
237 unsigned int num_dwords
);
241 spinlock_t execlist_lock
;
242 struct list_head execlist_queue
;
243 struct list_head execlist_retired_req_list
;
244 u8 next_context_status_buffer
;
245 u32 irq_keep_mask
; /* bitmask for interrupts that should not be masked */
246 int (*emit_request
)(struct intel_ringbuffer
*ringbuf
,
247 struct drm_i915_gem_request
*request
);
248 int (*emit_flush
)(struct intel_ringbuffer
*ringbuf
,
249 struct intel_context
*ctx
,
250 u32 invalidate_domains
,
252 int (*emit_bb_start
)(struct intel_ringbuffer
*ringbuf
,
253 struct intel_context
*ctx
,
254 u64 offset
, unsigned dispatch_flags
);
257 * List of objects currently involved in rendering from the
260 * Includes buffers having the contents of their GPU caches
261 * flushed, not necessarily primitives. last_read_req
262 * represents when the rendering involved will be completed.
264 * A reference is held on the buffer while on this list.
266 struct list_head active_list
;
269 * List of breadcrumbs associated with GPU requests currently
272 struct list_head request_list
;
275 * Do we have some not yet emitted requests outstanding?
277 struct drm_i915_gem_request
*outstanding_lazy_request
;
278 bool gpu_caches_dirty
;
280 wait_queue_head_t irq_queue
;
282 struct intel_context
*default_context
;
283 struct intel_context
*last_context
;
285 struct intel_ring_hangcheck hangcheck
;
288 struct drm_i915_gem_object
*obj
;
290 volatile u32
*cpu_page
;
293 bool needs_cmd_parser
;
296 * Table of commands the command parser needs to know about
299 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
302 * Table of registers allowed in commands that read/write registers.
304 const struct drm_i915_reg_descriptor
*reg_table
;
308 * Table of registers allowed in commands that read/write registers, but
309 * only from the DRM master.
311 const struct drm_i915_reg_descriptor
*master_reg_table
;
312 int master_reg_count
;
315 * Returns the bitmask for the length field of the specified command.
316 * Return 0 for an unrecognized/invalid command.
318 * If the command parser finds an entry for a command in the ring's
319 * cmd_tables, it gets the command's length based on the table entry.
320 * If not, it calls this function to determine the per-ring length field
321 * encoding for the command (i.e. certain opcode ranges use certain bits
322 * to encode the command length in the header).
324 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
327 bool intel_ring_initialized(struct intel_engine_cs
*ring
);
329 static inline unsigned
330 intel_ring_flag(struct intel_engine_cs
*ring
)
332 return 1 << ring
->id
;
336 intel_ring_sync_index(struct intel_engine_cs
*ring
,
337 struct intel_engine_cs
*other
)
342 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
343 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
344 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
345 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
346 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
349 idx
= (other
- ring
) - 1;
351 idx
+= I915_NUM_RINGS
;
357 intel_read_status_page(struct intel_engine_cs
*ring
,
360 /* Ensure that the compiler doesn't optimize away the load. */
362 return ring
->status_page
.page_addr
[reg
];
366 intel_write_status_page(struct intel_engine_cs
*ring
,
369 ring
->status_page
.page_addr
[reg
] = value
;
373 * Reads a dword out of the status page, which is written to from the command
374 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
377 * The following dwords have a reserved meaning:
378 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
379 * 0x04: ring 0 head pointer
380 * 0x05: ring 1 head pointer (915-class)
381 * 0x06: ring 2 head pointer (915-class)
382 * 0x10-0x1b: Context status DWords (GM45)
383 * 0x1f: Last written status offset. (GM45)
384 * 0x20-0x2f: Reserved (Gen6+)
386 * The area from dword 0x30 to 0x3ff is available for driver usage.
388 #define I915_GEM_HWS_INDEX 0x30
389 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
390 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
392 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer
*ringbuf
);
393 int intel_pin_and_map_ringbuffer_obj(struct drm_device
*dev
,
394 struct intel_ringbuffer
*ringbuf
);
395 void intel_destroy_ringbuffer_obj(struct intel_ringbuffer
*ringbuf
);
396 int intel_alloc_ringbuffer_obj(struct drm_device
*dev
,
397 struct intel_ringbuffer
*ringbuf
);
399 void intel_stop_ring_buffer(struct intel_engine_cs
*ring
);
400 void intel_cleanup_ring_buffer(struct intel_engine_cs
*ring
);
402 int intel_ring_alloc_request_extras(struct drm_i915_gem_request
*request
);
404 int __must_check
intel_ring_begin(struct intel_engine_cs
*ring
, int n
);
405 int __must_check
intel_ring_cacheline_align(struct intel_engine_cs
*ring
);
406 static inline void intel_ring_emit(struct intel_engine_cs
*ring
,
409 struct intel_ringbuffer
*ringbuf
= ring
->buffer
;
410 iowrite32(data
, ringbuf
->virtual_start
+ ringbuf
->tail
);
413 static inline void intel_ring_advance(struct intel_engine_cs
*ring
)
415 struct intel_ringbuffer
*ringbuf
= ring
->buffer
;
416 ringbuf
->tail
&= ringbuf
->size
- 1;
418 int __intel_ring_space(int head
, int tail
, int size
);
419 void intel_ring_update_space(struct intel_ringbuffer
*ringbuf
);
420 int intel_ring_space(struct intel_ringbuffer
*ringbuf
);
421 bool intel_ring_stopped(struct intel_engine_cs
*ring
);
422 void __intel_ring_advance(struct intel_engine_cs
*ring
);
424 int __must_check
intel_ring_idle(struct intel_engine_cs
*ring
);
425 void intel_ring_init_seqno(struct intel_engine_cs
*ring
, u32 seqno
);
426 int intel_ring_flush_all_caches(struct intel_engine_cs
*ring
);
427 int intel_ring_invalidate_all_caches(struct intel_engine_cs
*ring
);
429 void intel_fini_pipe_control(struct intel_engine_cs
*ring
);
430 int intel_init_pipe_control(struct intel_engine_cs
*ring
);
432 int intel_init_render_ring_buffer(struct drm_device
*dev
);
433 int intel_init_bsd_ring_buffer(struct drm_device
*dev
);
434 int intel_init_bsd2_ring_buffer(struct drm_device
*dev
);
435 int intel_init_blt_ring_buffer(struct drm_device
*dev
);
436 int intel_init_vebox_ring_buffer(struct drm_device
*dev
);
438 u64
intel_ring_get_active_head(struct intel_engine_cs
*ring
);
440 int init_workarounds_ring(struct intel_engine_cs
*ring
);
442 static inline u32
intel_ring_get_tail(struct intel_ringbuffer
*ringbuf
)
444 return ringbuf
->tail
;
447 static inline struct drm_i915_gem_request
*
448 intel_ring_get_request(struct intel_engine_cs
*ring
)
450 BUG_ON(ring
->outstanding_lazy_request
== NULL
);
451 return ring
->outstanding_lazy_request
;
454 #endif /* _INTEL_RINGBUFFER_H_ */