Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
44e895a8 | 4 | #include <linux/hashtable.h> |
06fbca71 | 5 | #include "i915_gem_batch_pool.h" |
44e895a8 BV |
6 | |
7 | #define I915_CMD_HASH_ORDER 9 | |
8 | ||
4712274c OM |
9 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
10 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just | |
11 | * to give some inclination as to some of the magic values used in the various | |
12 | * workarounds! | |
13 | */ | |
14 | #define CACHELINE_BYTES 64 | |
17ee950d | 15 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
4712274c | 16 | |
633cf8f5 VS |
17 | /* |
18 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | |
19 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | |
20 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | |
21 | * | |
22 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | |
23 | * cacheline, the Head Pointer must not be greater than the Tail | |
24 | * Pointer." | |
25 | */ | |
26 | #define I915_RING_FREE_SPACE 64 | |
27 | ||
8187a2b7 | 28 | struct intel_hw_status_page { |
4225d0f2 | 29 | u32 *page_addr; |
8187a2b7 | 30 | unsigned int gfx_addr; |
05394f39 | 31 | struct drm_i915_gem_object *obj; |
8187a2b7 ZN |
32 | }; |
33 | ||
b7287d80 BW |
34 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
35 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | |
cae5852d | 36 | |
b7287d80 BW |
37 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
38 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | |
cae5852d | 39 | |
b7287d80 BW |
40 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
41 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | |
cae5852d | 42 | |
b7287d80 BW |
43 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
44 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | |
cae5852d | 45 | |
b7287d80 BW |
46 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
47 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | |
870e86dd | 48 | |
e9fea574 | 49 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
9991ae78 | 50 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) |
e9fea574 | 51 | |
3e78998a BW |
52 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
53 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. | |
54 | */ | |
55 | #define i915_semaphore_seqno_size sizeof(uint64_t) | |
56 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ | |
57 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ | |
58 | ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ | |
59 | (i915_semaphore_seqno_size * (to))) | |
60 | ||
61 | #define GEN8_WAIT_OFFSET(__ring, from) \ | |
62 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ | |
63 | ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ | |
64 | (i915_semaphore_seqno_size * (__ring)->id)) | |
65 | ||
66 | #define GEN8_RING_SEMAPHORE_INIT do { \ | |
67 | if (!dev_priv->semaphore_obj) { \ | |
68 | break; \ | |
69 | } \ | |
70 | ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ | |
71 | ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ | |
72 | ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ | |
73 | ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ | |
74 | ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ | |
75 | ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ | |
76 | } while(0) | |
77 | ||
f2f4d82f | 78 | enum intel_ring_hangcheck_action { |
da661464 | 79 | HANGCHECK_IDLE = 0, |
f2f4d82f JN |
80 | HANGCHECK_WAIT, |
81 | HANGCHECK_ACTIVE, | |
f260fe7b | 82 | HANGCHECK_ACTIVE_LOOP, |
f2f4d82f JN |
83 | HANGCHECK_KICK, |
84 | HANGCHECK_HUNG, | |
85 | }; | |
ad8beaea | 86 | |
b6b0fac0 MK |
87 | #define HANGCHECK_SCORE_RING_HUNG 31 |
88 | ||
92cab734 | 89 | struct intel_ring_hangcheck { |
50877445 | 90 | u64 acthd; |
f260fe7b | 91 | u64 max_acthd; |
92cab734 | 92 | u32 seqno; |
05407ff8 | 93 | int score; |
ad8beaea | 94 | enum intel_ring_hangcheck_action action; |
4be17381 | 95 | int deadlock; |
61642ff0 | 96 | u32 instdone[I915_NUM_INSTDONE_REG]; |
92cab734 MK |
97 | }; |
98 | ||
8ee14975 OM |
99 | struct intel_ringbuffer { |
100 | struct drm_i915_gem_object *obj; | |
101 | void __iomem *virtual_start; | |
0eb973d3 | 102 | struct i915_vma *vma; |
8ee14975 | 103 | |
0c7dd53b | 104 | struct intel_engine_cs *ring; |
608c1a52 | 105 | struct list_head link; |
0c7dd53b | 106 | |
8ee14975 OM |
107 | u32 head; |
108 | u32 tail; | |
109 | int space; | |
110 | int size; | |
111 | int effective_size; | |
29b1b415 JH |
112 | int reserved_size; |
113 | int reserved_tail; | |
114 | bool reserved_in_use; | |
8ee14975 OM |
115 | |
116 | /** We track the position of the requests in the ring buffer, and | |
117 | * when each is retired we increment last_retired_head as the GPU | |
118 | * must have finished processing the request and so we know we | |
119 | * can advance the ringbuffer up to that position. | |
120 | * | |
121 | * last_retired_head is set to -1 after the value is consumed so | |
122 | * we can detect new retirements. | |
123 | */ | |
124 | u32 last_retired_head; | |
125 | }; | |
126 | ||
21076372 | 127 | struct intel_context; |
4e86f725 | 128 | struct drm_i915_reg_descriptor; |
21076372 | 129 | |
17ee950d AS |
130 | /* |
131 | * we use a single page to load ctx workarounds so all of these | |
132 | * values are referred in terms of dwords | |
133 | * | |
134 | * struct i915_wa_ctx_bb: | |
135 | * offset: specifies batch starting position, also helpful in case | |
136 | * if we want to have multiple batches at different offsets based on | |
137 | * some criteria. It is not a requirement at the moment but provides | |
138 | * an option for future use. | |
139 | * size: size of the batch in DWORDS | |
140 | */ | |
141 | struct i915_ctx_workarounds { | |
142 | struct i915_wa_ctx_bb { | |
143 | u32 offset; | |
144 | u32 size; | |
145 | } indirect_ctx, per_ctx; | |
146 | struct drm_i915_gem_object *obj; | |
147 | }; | |
148 | ||
a4872ba6 | 149 | struct intel_engine_cs { |
8187a2b7 | 150 | const char *name; |
9220434a | 151 | enum intel_ring_id { |
96154f2f DV |
152 | RCS = 0x0, |
153 | VCS, | |
154 | BCS, | |
4a3dd19d | 155 | VECS, |
845f74a7 | 156 | VCS2 |
9220434a | 157 | } id; |
845f74a7 | 158 | #define I915_NUM_RINGS 5 |
b1a93306 | 159 | #define LAST_USER_RING (VECS + 1) |
333e9fe9 | 160 | u32 mmio_base; |
8187a2b7 | 161 | struct drm_device *dev; |
8ee14975 | 162 | struct intel_ringbuffer *buffer; |
608c1a52 | 163 | struct list_head buffers; |
8187a2b7 | 164 | |
06fbca71 CW |
165 | /* |
166 | * A pool of objects to use as shadow copies of client batch buffers | |
167 | * when the command parser is enabled. Prevents the client from | |
168 | * modifying the batch contents after software parsing. | |
169 | */ | |
170 | struct i915_gem_batch_pool batch_pool; | |
171 | ||
8187a2b7 | 172 | struct intel_hw_status_page status_page; |
17ee950d | 173 | struct i915_ctx_workarounds wa_ctx; |
8187a2b7 | 174 | |
c7113cc3 | 175 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
6a848ccb | 176 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
581c26e8 | 177 | struct drm_i915_gem_request *trace_irq_req; |
a4872ba6 OM |
178 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
179 | void (*irq_put)(struct intel_engine_cs *ring); | |
8187a2b7 | 180 | |
ecfe00d8 | 181 | int (*init_hw)(struct intel_engine_cs *ring); |
8187a2b7 | 182 | |
8753181e | 183 | int (*init_context)(struct drm_i915_gem_request *req); |
86d7f238 | 184 | |
a4872ba6 | 185 | void (*write_tail)(struct intel_engine_cs *ring, |
297b0c5b | 186 | u32 value); |
a84c3ae1 | 187 | int __must_check (*flush)(struct drm_i915_gem_request *req, |
b72f3acb CW |
188 | u32 invalidate_domains, |
189 | u32 flush_domains); | |
ee044a88 | 190 | int (*add_request)(struct drm_i915_gem_request *req); |
b2eadbc8 CW |
191 | /* Some chipsets are not quite as coherent as advertised and need |
192 | * an expensive kick to force a true read of the up-to-date seqno. | |
193 | * However, the up-to-date seqno is not always required and the last | |
194 | * seen value is good enough. Note that the seqno will always be | |
195 | * monotonic, even if not coherent. | |
196 | */ | |
a4872ba6 | 197 | u32 (*get_seqno)(struct intel_engine_cs *ring, |
b2eadbc8 | 198 | bool lazy_coherency); |
a4872ba6 | 199 | void (*set_seqno)(struct intel_engine_cs *ring, |
b70ec5bf | 200 | u32 seqno); |
53fddaf7 | 201 | int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, |
9bcb144c | 202 | u64 offset, u32 length, |
8e004efc | 203 | unsigned dispatch_flags); |
d7d4eedd | 204 | #define I915_DISPATCH_SECURE 0x1 |
b45305fc | 205 | #define I915_DISPATCH_PINNED 0x2 |
919032ec | 206 | #define I915_DISPATCH_RS 0x4 |
a4872ba6 | 207 | void (*cleanup)(struct intel_engine_cs *ring); |
ebc348b2 | 208 | |
3e78998a BW |
209 | /* GEN8 signal/wait table - never trust comments! |
210 | * signal to signal to signal to signal to signal to | |
211 | * RCS VCS BCS VECS VCS2 | |
212 | * -------------------------------------------------------------------- | |
213 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | | |
214 | * |------------------------------------------------------------------- | |
215 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | | |
216 | * |------------------------------------------------------------------- | |
217 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | | |
218 | * |------------------------------------------------------------------- | |
219 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | | |
220 | * |------------------------------------------------------------------- | |
221 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | | |
222 | * |------------------------------------------------------------------- | |
223 | * | |
224 | * Generalization: | |
225 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) | |
226 | * ie. transpose of g(x, y) | |
227 | * | |
228 | * sync from sync from sync from sync from sync from | |
229 | * RCS VCS BCS VECS VCS2 | |
230 | * -------------------------------------------------------------------- | |
231 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | | |
232 | * |------------------------------------------------------------------- | |
233 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | | |
234 | * |------------------------------------------------------------------- | |
235 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | | |
236 | * |------------------------------------------------------------------- | |
237 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | | |
238 | * |------------------------------------------------------------------- | |
239 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | | |
240 | * |------------------------------------------------------------------- | |
241 | * | |
242 | * Generalization: | |
243 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) | |
244 | * ie. transpose of f(x, y) | |
245 | */ | |
ebc348b2 BW |
246 | struct { |
247 | u32 sync_seqno[I915_NUM_RINGS-1]; | |
78325f2d | 248 | |
3e78998a BW |
249 | union { |
250 | struct { | |
251 | /* our mbox written by others */ | |
252 | u32 wait[I915_NUM_RINGS]; | |
253 | /* mboxes this ring signals to */ | |
f0f59a00 | 254 | i915_reg_t signal[I915_NUM_RINGS]; |
3e78998a BW |
255 | } mbox; |
256 | u64 signal_ggtt[I915_NUM_RINGS]; | |
257 | }; | |
78325f2d BW |
258 | |
259 | /* AKA wait() */ | |
599d924c JH |
260 | int (*sync_to)(struct drm_i915_gem_request *to_req, |
261 | struct intel_engine_cs *from, | |
78325f2d | 262 | u32 seqno); |
f7169687 | 263 | int (*signal)(struct drm_i915_gem_request *signaller_req, |
024a43e1 BW |
264 | /* num_dwords needed by caller */ |
265 | unsigned int num_dwords); | |
ebc348b2 | 266 | } semaphore; |
ad776f8b | 267 | |
4da46e1e | 268 | /* Execlists */ |
acdd884a MT |
269 | spinlock_t execlist_lock; |
270 | struct list_head execlist_queue; | |
c86ee3a9 | 271 | struct list_head execlist_retired_req_list; |
e981e7b1 | 272 | u8 next_context_status_buffer; |
ca82580c TU |
273 | bool disable_lite_restore_wa; |
274 | u32 ctx_desc_template; | |
73d477f6 | 275 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
c4e76638 | 276 | int (*emit_request)(struct drm_i915_gem_request *request); |
7deb4d39 | 277 | int (*emit_flush)(struct drm_i915_gem_request *request, |
4712274c OM |
278 | u32 invalidate_domains, |
279 | u32 flush_domains); | |
be795fc1 | 280 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
8e004efc | 281 | u64 offset, unsigned dispatch_flags); |
4da46e1e | 282 | |
8187a2b7 ZN |
283 | /** |
284 | * List of objects currently involved in rendering from the | |
285 | * ringbuffer. | |
286 | * | |
287 | * Includes buffers having the contents of their GPU caches | |
97b2a6a1 | 288 | * flushed, not necessarily primitives. last_read_req |
8187a2b7 ZN |
289 | * represents when the rendering involved will be completed. |
290 | * | |
291 | * A reference is held on the buffer while on this list. | |
292 | */ | |
293 | struct list_head active_list; | |
294 | ||
295 | /** | |
296 | * List of breadcrumbs associated with GPU requests currently | |
297 | * outstanding. | |
298 | */ | |
299 | struct list_head request_list; | |
300 | ||
94f7bbe1 TE |
301 | /** |
302 | * Seqno of request most recently submitted to request_list. | |
303 | * Used exclusively by hang checker to avoid grabbing lock while | |
304 | * inspecting request list. | |
305 | */ | |
306 | u32 last_submitted_seqno; | |
307 | ||
cc889e0f | 308 | bool gpu_caches_dirty; |
a56ba56c | 309 | |
8187a2b7 | 310 | wait_queue_head_t irq_queue; |
8d19215b | 311 | |
273497e5 OM |
312 | struct intel_context *default_context; |
313 | struct intel_context *last_context; | |
40521054 | 314 | |
92cab734 MK |
315 | struct intel_ring_hangcheck hangcheck; |
316 | ||
0d1aacac CW |
317 | struct { |
318 | struct drm_i915_gem_object *obj; | |
319 | u32 gtt_offset; | |
320 | volatile u32 *cpu_page; | |
321 | } scratch; | |
351e3db2 | 322 | |
44e895a8 BV |
323 | bool needs_cmd_parser; |
324 | ||
351e3db2 | 325 | /* |
44e895a8 | 326 | * Table of commands the command parser needs to know about |
351e3db2 BV |
327 | * for this ring. |
328 | */ | |
44e895a8 | 329 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
351e3db2 BV |
330 | |
331 | /* | |
332 | * Table of registers allowed in commands that read/write registers. | |
333 | */ | |
4e86f725 | 334 | const struct drm_i915_reg_descriptor *reg_table; |
351e3db2 BV |
335 | int reg_count; |
336 | ||
337 | /* | |
338 | * Table of registers allowed in commands that read/write registers, but | |
339 | * only from the DRM master. | |
340 | */ | |
4e86f725 | 341 | const struct drm_i915_reg_descriptor *master_reg_table; |
351e3db2 BV |
342 | int master_reg_count; |
343 | ||
344 | /* | |
345 | * Returns the bitmask for the length field of the specified command. | |
346 | * Return 0 for an unrecognized/invalid command. | |
347 | * | |
348 | * If the command parser finds an entry for a command in the ring's | |
349 | * cmd_tables, it gets the command's length based on the table entry. | |
350 | * If not, it calls this function to determine the per-ring length field | |
351 | * encoding for the command (i.e. certain opcode ranges use certain bits | |
352 | * to encode the command length in the header). | |
353 | */ | |
354 | u32 (*get_cmd_length_mask)(u32 cmd_header); | |
8187a2b7 ZN |
355 | }; |
356 | ||
b0366a54 DG |
357 | static inline bool |
358 | intel_ring_initialized(struct intel_engine_cs *ring) | |
359 | { | |
360 | return ring->dev != NULL; | |
361 | } | |
b4519513 | 362 | |
96154f2f | 363 | static inline unsigned |
a4872ba6 | 364 | intel_ring_flag(struct intel_engine_cs *ring) |
96154f2f DV |
365 | { |
366 | return 1 << ring->id; | |
367 | } | |
368 | ||
1ec14ad3 | 369 | static inline u32 |
a4872ba6 OM |
370 | intel_ring_sync_index(struct intel_engine_cs *ring, |
371 | struct intel_engine_cs *other) | |
1ec14ad3 CW |
372 | { |
373 | int idx; | |
374 | ||
375 | /* | |
ddd4dbc6 RV |
376 | * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; |
377 | * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; | |
378 | * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; | |
379 | * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; | |
380 | * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; | |
1ec14ad3 CW |
381 | */ |
382 | ||
383 | idx = (other - ring) - 1; | |
384 | if (idx < 0) | |
385 | idx += I915_NUM_RINGS; | |
386 | ||
387 | return idx; | |
388 | } | |
389 | ||
319404df ID |
390 | static inline void |
391 | intel_flush_status_page(struct intel_engine_cs *ring, int reg) | |
392 | { | |
393 | drm_clflush_virt_range(&ring->status_page.page_addr[reg], | |
394 | sizeof(uint32_t)); | |
395 | } | |
396 | ||
8187a2b7 | 397 | static inline u32 |
a4872ba6 | 398 | intel_read_status_page(struct intel_engine_cs *ring, |
78501eac | 399 | int reg) |
8187a2b7 | 400 | { |
4225d0f2 DV |
401 | /* Ensure that the compiler doesn't optimize away the load. */ |
402 | barrier(); | |
403 | return ring->status_page.page_addr[reg]; | |
8187a2b7 ZN |
404 | } |
405 | ||
b70ec5bf | 406 | static inline void |
a4872ba6 | 407 | intel_write_status_page(struct intel_engine_cs *ring, |
b70ec5bf MK |
408 | int reg, u32 value) |
409 | { | |
410 | ring->status_page.page_addr[reg] = value; | |
411 | } | |
412 | ||
311bd68e CW |
413 | /** |
414 | * Reads a dword out of the status page, which is written to from the command | |
415 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | |
416 | * MI_STORE_DATA_IMM. | |
417 | * | |
418 | * The following dwords have a reserved meaning: | |
419 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | |
420 | * 0x04: ring 0 head pointer | |
421 | * 0x05: ring 1 head pointer (915-class) | |
422 | * 0x06: ring 2 head pointer (915-class) | |
423 | * 0x10-0x1b: Context status DWords (GM45) | |
424 | * 0x1f: Last written status offset. (GM45) | |
b07da53c | 425 | * 0x20-0x2f: Reserved (Gen6+) |
311bd68e | 426 | * |
b07da53c | 427 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
311bd68e | 428 | */ |
b07da53c TD |
429 | #define I915_GEM_HWS_INDEX 0x30 |
430 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 | |
9a289771 | 431 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
311bd68e | 432 | |
01101fa7 CW |
433 | struct intel_ringbuffer * |
434 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); | |
7ba717cf TD |
435 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
436 | struct intel_ringbuffer *ringbuf); | |
01101fa7 CW |
437 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
438 | void intel_ringbuffer_free(struct intel_ringbuffer *ring); | |
84c2377f | 439 | |
a4872ba6 OM |
440 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
441 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); | |
96f298aa | 442 | |
6689cb2b JH |
443 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
444 | ||
5fb9de1a | 445 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
bba09b12 | 446 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
a4872ba6 | 447 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
78501eac | 448 | u32 data) |
e898cd22 | 449 | { |
93b0a4e0 OM |
450 | struct intel_ringbuffer *ringbuf = ring->buffer; |
451 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); | |
452 | ringbuf->tail += 4; | |
e898cd22 | 453 | } |
f92a9162 | 454 | static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, |
f0f59a00 | 455 | i915_reg_t reg) |
f92a9162 | 456 | { |
f0f59a00 | 457 | intel_ring_emit(ring, i915_mmio_reg_offset(reg)); |
f92a9162 | 458 | } |
a4872ba6 | 459 | static inline void intel_ring_advance(struct intel_engine_cs *ring) |
09246732 | 460 | { |
93b0a4e0 OM |
461 | struct intel_ringbuffer *ringbuf = ring->buffer; |
462 | ringbuf->tail &= ringbuf->size - 1; | |
09246732 | 463 | } |
82e104cc | 464 | int __intel_ring_space(int head, int tail, int size); |
ebd0fd4b | 465 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
82e104cc OM |
466 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
467 | bool intel_ring_stopped(struct intel_engine_cs *ring); | |
09246732 | 468 | |
a4872ba6 OM |
469 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
470 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); | |
4866d729 | 471 | int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); |
2f20055d | 472 | int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); |
8187a2b7 | 473 | |
9b1136d5 OM |
474 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
475 | int intel_init_pipe_control(struct intel_engine_cs *ring); | |
476 | ||
5c1143bb XH |
477 | int intel_init_render_ring_buffer(struct drm_device *dev); |
478 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | |
845f74a7 | 479 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
549f7365 | 480 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
9a8a2213 | 481 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
8187a2b7 | 482 | |
a4872ba6 | 483 | u64 intel_ring_get_active_head(struct intel_engine_cs *ring); |
79f321b7 | 484 | |
771b9a53 MT |
485 | int init_workarounds_ring(struct intel_engine_cs *ring); |
486 | ||
1b5d063f | 487 | static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) |
a71d8d94 | 488 | { |
1b5d063f | 489 | return ringbuf->tail; |
a71d8d94 CW |
490 | } |
491 | ||
29b1b415 JH |
492 | /* |
493 | * Arbitrary size for largest possible 'add request' sequence. The code paths | |
494 | * are complex and variable. Empirical measurement shows that the worst case | |
495 | * is ILK at 136 words. Reserving too much is better than reserving too little | |
496 | * as that allows for corner cases that might have been missed. So the figure | |
497 | * has been rounded up to 160 words. | |
498 | */ | |
499 | #define MIN_SPACE_FOR_ADD_REQUEST 160 | |
500 | ||
501 | /* | |
502 | * Reserve space in the ring to guarantee that the i915_add_request() call | |
503 | * will always have sufficient room to do its stuff. The request creation | |
504 | * code calls this automatically. | |
505 | */ | |
506 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); | |
507 | /* Cancel the reservation, e.g. because the request is being discarded. */ | |
508 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); | |
509 | /* Use the reserved space - for use by i915_add_request() only. */ | |
510 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); | |
511 | /* Finish with the reserved space - for use by i915_add_request() only. */ | |
512 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); | |
513 | ||
79bbcc29 JH |
514 | /* Legacy ringbuffer specific portion of reservation code: */ |
515 | int intel_ring_reserve_space(struct drm_i915_gem_request *request); | |
516 | ||
8187a2b7 | 517 | #endif /* _INTEL_RINGBUFFER_H_ */ |