drm/i915: Add functions to emit register offsets to the ring
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
CommitLineData
8187a2b7
ZN
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
44e895a8 4#include <linux/hashtable.h>
06fbca71 5#include "i915_gem_batch_pool.h"
44e895a8
BV
6
7#define I915_CMD_HASH_ORDER 9
8
4712274c
OM
9/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
12 * workarounds!
13 */
14#define CACHELINE_BYTES 64
17ee950d 15#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
4712274c 16
633cf8f5
VS
17/*
18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
21 *
22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
23 * cacheline, the Head Pointer must not be greater than the Tail
24 * Pointer."
25 */
26#define I915_RING_FREE_SPACE 64
27
8187a2b7 28struct intel_hw_status_page {
4225d0f2 29 u32 *page_addr;
8187a2b7 30 unsigned int gfx_addr;
05394f39 31 struct drm_i915_gem_object *obj;
8187a2b7
ZN
32};
33
b7287d80
BW
34#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
cae5852d 36
b7287d80
BW
37#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
cae5852d 39
b7287d80
BW
40#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
41#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
cae5852d 42
b7287d80
BW
43#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
cae5852d 45
b7287d80
BW
46#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
870e86dd 48
e9fea574 49#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
9991ae78 50#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
e9fea574 51
3e78998a
BW
52/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
54 */
55#define i915_semaphore_seqno_size sizeof(uint64_t)
56#define GEN8_SIGNAL_OFFSET(__ring, to) \
57 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
58 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
59 (i915_semaphore_seqno_size * (to)))
60
61#define GEN8_WAIT_OFFSET(__ring, from) \
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
64 (i915_semaphore_seqno_size * (__ring)->id))
65
66#define GEN8_RING_SEMAPHORE_INIT do { \
67 if (!dev_priv->semaphore_obj) { \
68 break; \
69 } \
70 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
71 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
72 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
73 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
74 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
75 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
76 } while(0)
77
f2f4d82f 78enum intel_ring_hangcheck_action {
da661464 79 HANGCHECK_IDLE = 0,
f2f4d82f
JN
80 HANGCHECK_WAIT,
81 HANGCHECK_ACTIVE,
f260fe7b 82 HANGCHECK_ACTIVE_LOOP,
f2f4d82f
JN
83 HANGCHECK_KICK,
84 HANGCHECK_HUNG,
85};
ad8beaea 86
b6b0fac0
MK
87#define HANGCHECK_SCORE_RING_HUNG 31
88
92cab734 89struct intel_ring_hangcheck {
50877445 90 u64 acthd;
f260fe7b 91 u64 max_acthd;
92cab734 92 u32 seqno;
05407ff8 93 int score;
ad8beaea 94 enum intel_ring_hangcheck_action action;
4be17381 95 int deadlock;
92cab734
MK
96};
97
8ee14975
OM
98struct intel_ringbuffer {
99 struct drm_i915_gem_object *obj;
100 void __iomem *virtual_start;
101
0c7dd53b 102 struct intel_engine_cs *ring;
608c1a52 103 struct list_head link;
0c7dd53b 104
8ee14975
OM
105 u32 head;
106 u32 tail;
107 int space;
108 int size;
109 int effective_size;
29b1b415
JH
110 int reserved_size;
111 int reserved_tail;
112 bool reserved_in_use;
8ee14975
OM
113
114 /** We track the position of the requests in the ring buffer, and
115 * when each is retired we increment last_retired_head as the GPU
116 * must have finished processing the request and so we know we
117 * can advance the ringbuffer up to that position.
118 *
119 * last_retired_head is set to -1 after the value is consumed so
120 * we can detect new retirements.
121 */
122 u32 last_retired_head;
123};
124
21076372 125struct intel_context;
4e86f725 126struct drm_i915_reg_descriptor;
21076372 127
17ee950d
AS
128/*
129 * we use a single page to load ctx workarounds so all of these
130 * values are referred in terms of dwords
131 *
132 * struct i915_wa_ctx_bb:
133 * offset: specifies batch starting position, also helpful in case
134 * if we want to have multiple batches at different offsets based on
135 * some criteria. It is not a requirement at the moment but provides
136 * an option for future use.
137 * size: size of the batch in DWORDS
138 */
139struct i915_ctx_workarounds {
140 struct i915_wa_ctx_bb {
141 u32 offset;
142 u32 size;
143 } indirect_ctx, per_ctx;
144 struct drm_i915_gem_object *obj;
145};
146
a4872ba6 147struct intel_engine_cs {
8187a2b7 148 const char *name;
9220434a 149 enum intel_ring_id {
96154f2f
DV
150 RCS = 0x0,
151 VCS,
152 BCS,
4a3dd19d 153 VECS,
845f74a7 154 VCS2
9220434a 155 } id;
845f74a7 156#define I915_NUM_RINGS 5
b1a93306 157#define LAST_USER_RING (VECS + 1)
333e9fe9 158 u32 mmio_base;
8187a2b7 159 struct drm_device *dev;
8ee14975 160 struct intel_ringbuffer *buffer;
608c1a52 161 struct list_head buffers;
8187a2b7 162
06fbca71
CW
163 /*
164 * A pool of objects to use as shadow copies of client batch buffers
165 * when the command parser is enabled. Prevents the client from
166 * modifying the batch contents after software parsing.
167 */
168 struct i915_gem_batch_pool batch_pool;
169
8187a2b7 170 struct intel_hw_status_page status_page;
17ee950d 171 struct i915_ctx_workarounds wa_ctx;
8187a2b7 172
c7113cc3 173 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
6a848ccb 174 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
581c26e8 175 struct drm_i915_gem_request *trace_irq_req;
a4872ba6
OM
176 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
177 void (*irq_put)(struct intel_engine_cs *ring);
8187a2b7 178
ecfe00d8 179 int (*init_hw)(struct intel_engine_cs *ring);
8187a2b7 180
8753181e 181 int (*init_context)(struct drm_i915_gem_request *req);
86d7f238 182
a4872ba6 183 void (*write_tail)(struct intel_engine_cs *ring,
297b0c5b 184 u32 value);
a84c3ae1 185 int __must_check (*flush)(struct drm_i915_gem_request *req,
b72f3acb
CW
186 u32 invalidate_domains,
187 u32 flush_domains);
ee044a88 188 int (*add_request)(struct drm_i915_gem_request *req);
b2eadbc8
CW
189 /* Some chipsets are not quite as coherent as advertised and need
190 * an expensive kick to force a true read of the up-to-date seqno.
191 * However, the up-to-date seqno is not always required and the last
192 * seen value is good enough. Note that the seqno will always be
193 * monotonic, even if not coherent.
194 */
a4872ba6 195 u32 (*get_seqno)(struct intel_engine_cs *ring,
b2eadbc8 196 bool lazy_coherency);
a4872ba6 197 void (*set_seqno)(struct intel_engine_cs *ring,
b70ec5bf 198 u32 seqno);
53fddaf7 199 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
9bcb144c 200 u64 offset, u32 length,
8e004efc 201 unsigned dispatch_flags);
d7d4eedd 202#define I915_DISPATCH_SECURE 0x1
b45305fc 203#define I915_DISPATCH_PINNED 0x2
919032ec 204#define I915_DISPATCH_RS 0x4
a4872ba6 205 void (*cleanup)(struct intel_engine_cs *ring);
ebc348b2 206
3e78998a
BW
207 /* GEN8 signal/wait table - never trust comments!
208 * signal to signal to signal to signal to signal to
209 * RCS VCS BCS VECS VCS2
210 * --------------------------------------------------------------------
211 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
212 * |-------------------------------------------------------------------
213 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
214 * |-------------------------------------------------------------------
215 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
216 * |-------------------------------------------------------------------
217 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
218 * |-------------------------------------------------------------------
219 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
220 * |-------------------------------------------------------------------
221 *
222 * Generalization:
223 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
224 * ie. transpose of g(x, y)
225 *
226 * sync from sync from sync from sync from sync from
227 * RCS VCS BCS VECS VCS2
228 * --------------------------------------------------------------------
229 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
230 * |-------------------------------------------------------------------
231 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
232 * |-------------------------------------------------------------------
233 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
234 * |-------------------------------------------------------------------
235 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
236 * |-------------------------------------------------------------------
237 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
238 * |-------------------------------------------------------------------
239 *
240 * Generalization:
241 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
242 * ie. transpose of f(x, y)
243 */
ebc348b2
BW
244 struct {
245 u32 sync_seqno[I915_NUM_RINGS-1];
78325f2d 246
3e78998a
BW
247 union {
248 struct {
249 /* our mbox written by others */
250 u32 wait[I915_NUM_RINGS];
251 /* mboxes this ring signals to */
252 u32 signal[I915_NUM_RINGS];
253 } mbox;
254 u64 signal_ggtt[I915_NUM_RINGS];
255 };
78325f2d
BW
256
257 /* AKA wait() */
599d924c
JH
258 int (*sync_to)(struct drm_i915_gem_request *to_req,
259 struct intel_engine_cs *from,
78325f2d 260 u32 seqno);
f7169687 261 int (*signal)(struct drm_i915_gem_request *signaller_req,
024a43e1
BW
262 /* num_dwords needed by caller */
263 unsigned int num_dwords);
ebc348b2 264 } semaphore;
ad776f8b 265
4da46e1e 266 /* Execlists */
acdd884a
MT
267 spinlock_t execlist_lock;
268 struct list_head execlist_queue;
c86ee3a9 269 struct list_head execlist_retired_req_list;
e981e7b1 270 u8 next_context_status_buffer;
73d477f6 271 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
c4e76638 272 int (*emit_request)(struct drm_i915_gem_request *request);
7deb4d39 273 int (*emit_flush)(struct drm_i915_gem_request *request,
4712274c
OM
274 u32 invalidate_domains,
275 u32 flush_domains);
be795fc1 276 int (*emit_bb_start)(struct drm_i915_gem_request *req,
8e004efc 277 u64 offset, unsigned dispatch_flags);
4da46e1e 278
8187a2b7
ZN
279 /**
280 * List of objects currently involved in rendering from the
281 * ringbuffer.
282 *
283 * Includes buffers having the contents of their GPU caches
97b2a6a1 284 * flushed, not necessarily primitives. last_read_req
8187a2b7
ZN
285 * represents when the rendering involved will be completed.
286 *
287 * A reference is held on the buffer while on this list.
288 */
289 struct list_head active_list;
290
291 /**
292 * List of breadcrumbs associated with GPU requests currently
293 * outstanding.
294 */
295 struct list_head request_list;
296
94f7bbe1
TE
297 /**
298 * Seqno of request most recently submitted to request_list.
299 * Used exclusively by hang checker to avoid grabbing lock while
300 * inspecting request list.
301 */
302 u32 last_submitted_seqno;
303
cc889e0f 304 bool gpu_caches_dirty;
a56ba56c 305
8187a2b7 306 wait_queue_head_t irq_queue;
8d19215b 307
273497e5
OM
308 struct intel_context *default_context;
309 struct intel_context *last_context;
40521054 310
92cab734
MK
311 struct intel_ring_hangcheck hangcheck;
312
0d1aacac
CW
313 struct {
314 struct drm_i915_gem_object *obj;
315 u32 gtt_offset;
316 volatile u32 *cpu_page;
317 } scratch;
351e3db2 318
44e895a8
BV
319 bool needs_cmd_parser;
320
351e3db2 321 /*
44e895a8 322 * Table of commands the command parser needs to know about
351e3db2
BV
323 * for this ring.
324 */
44e895a8 325 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
351e3db2
BV
326
327 /*
328 * Table of registers allowed in commands that read/write registers.
329 */
4e86f725 330 const struct drm_i915_reg_descriptor *reg_table;
351e3db2
BV
331 int reg_count;
332
333 /*
334 * Table of registers allowed in commands that read/write registers, but
335 * only from the DRM master.
336 */
4e86f725 337 const struct drm_i915_reg_descriptor *master_reg_table;
351e3db2
BV
338 int master_reg_count;
339
340 /*
341 * Returns the bitmask for the length field of the specified command.
342 * Return 0 for an unrecognized/invalid command.
343 *
344 * If the command parser finds an entry for a command in the ring's
345 * cmd_tables, it gets the command's length based on the table entry.
346 * If not, it calls this function to determine the per-ring length field
347 * encoding for the command (i.e. certain opcode ranges use certain bits
348 * to encode the command length in the header).
349 */
350 u32 (*get_cmd_length_mask)(u32 cmd_header);
8187a2b7
ZN
351};
352
48d82387 353bool intel_ring_initialized(struct intel_engine_cs *ring);
b4519513 354
96154f2f 355static inline unsigned
a4872ba6 356intel_ring_flag(struct intel_engine_cs *ring)
96154f2f
DV
357{
358 return 1 << ring->id;
359}
360
1ec14ad3 361static inline u32
a4872ba6
OM
362intel_ring_sync_index(struct intel_engine_cs *ring,
363 struct intel_engine_cs *other)
1ec14ad3
CW
364{
365 int idx;
366
367 /*
ddd4dbc6
RV
368 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
369 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
370 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
371 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
372 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
1ec14ad3
CW
373 */
374
375 idx = (other - ring) - 1;
376 if (idx < 0)
377 idx += I915_NUM_RINGS;
378
379 return idx;
380}
381
319404df
ID
382static inline void
383intel_flush_status_page(struct intel_engine_cs *ring, int reg)
384{
385 drm_clflush_virt_range(&ring->status_page.page_addr[reg],
386 sizeof(uint32_t));
387}
388
8187a2b7 389static inline u32
a4872ba6 390intel_read_status_page(struct intel_engine_cs *ring,
78501eac 391 int reg)
8187a2b7 392{
4225d0f2
DV
393 /* Ensure that the compiler doesn't optimize away the load. */
394 barrier();
395 return ring->status_page.page_addr[reg];
8187a2b7
ZN
396}
397
b70ec5bf 398static inline void
a4872ba6 399intel_write_status_page(struct intel_engine_cs *ring,
b70ec5bf
MK
400 int reg, u32 value)
401{
402 ring->status_page.page_addr[reg] = value;
403}
404
311bd68e
CW
405/**
406 * Reads a dword out of the status page, which is written to from the command
407 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
408 * MI_STORE_DATA_IMM.
409 *
410 * The following dwords have a reserved meaning:
411 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
412 * 0x04: ring 0 head pointer
413 * 0x05: ring 1 head pointer (915-class)
414 * 0x06: ring 2 head pointer (915-class)
415 * 0x10-0x1b: Context status DWords (GM45)
416 * 0x1f: Last written status offset. (GM45)
b07da53c 417 * 0x20-0x2f: Reserved (Gen6+)
311bd68e 418 *
b07da53c 419 * The area from dword 0x30 to 0x3ff is available for driver usage.
311bd68e 420 */
b07da53c
TD
421#define I915_GEM_HWS_INDEX 0x30
422#define I915_GEM_HWS_SCRATCH_INDEX 0x40
9a289771 423#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
311bd68e 424
01101fa7
CW
425struct intel_ringbuffer *
426intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
7ba717cf
TD
427int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
428 struct intel_ringbuffer *ringbuf);
01101fa7
CW
429void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
430void intel_ringbuffer_free(struct intel_ringbuffer *ring);
84c2377f 431
a4872ba6
OM
432void intel_stop_ring_buffer(struct intel_engine_cs *ring);
433void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
96f298aa 434
6689cb2b
JH
435int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
436
5fb9de1a 437int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
bba09b12 438int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
a4872ba6 439static inline void intel_ring_emit(struct intel_engine_cs *ring,
78501eac 440 u32 data)
e898cd22 441{
93b0a4e0
OM
442 struct intel_ringbuffer *ringbuf = ring->buffer;
443 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
444 ringbuf->tail += 4;
e898cd22 445}
f92a9162
VS
446static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
447 u32 reg)
448{
449 intel_ring_emit(ring, reg);
450}
a4872ba6 451static inline void intel_ring_advance(struct intel_engine_cs *ring)
09246732 452{
93b0a4e0
OM
453 struct intel_ringbuffer *ringbuf = ring->buffer;
454 ringbuf->tail &= ringbuf->size - 1;
09246732 455}
82e104cc 456int __intel_ring_space(int head, int tail, int size);
ebd0fd4b 457void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
82e104cc
OM
458int intel_ring_space(struct intel_ringbuffer *ringbuf);
459bool intel_ring_stopped(struct intel_engine_cs *ring);
09246732 460
a4872ba6
OM
461int __must_check intel_ring_idle(struct intel_engine_cs *ring);
462void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
4866d729 463int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
2f20055d 464int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
8187a2b7 465
9b1136d5
OM
466void intel_fini_pipe_control(struct intel_engine_cs *ring);
467int intel_init_pipe_control(struct intel_engine_cs *ring);
468
5c1143bb
XH
469int intel_init_render_ring_buffer(struct drm_device *dev);
470int intel_init_bsd_ring_buffer(struct drm_device *dev);
845f74a7 471int intel_init_bsd2_ring_buffer(struct drm_device *dev);
549f7365 472int intel_init_blt_ring_buffer(struct drm_device *dev);
9a8a2213 473int intel_init_vebox_ring_buffer(struct drm_device *dev);
8187a2b7 474
a4872ba6 475u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
79f321b7 476
771b9a53
MT
477int init_workarounds_ring(struct intel_engine_cs *ring);
478
1b5d063f 479static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
a71d8d94 480{
1b5d063f 481 return ringbuf->tail;
a71d8d94
CW
482}
483
29b1b415
JH
484/*
485 * Arbitrary size for largest possible 'add request' sequence. The code paths
486 * are complex and variable. Empirical measurement shows that the worst case
487 * is ILK at 136 words. Reserving too much is better than reserving too little
488 * as that allows for corner cases that might have been missed. So the figure
489 * has been rounded up to 160 words.
490 */
491#define MIN_SPACE_FOR_ADD_REQUEST 160
492
493/*
494 * Reserve space in the ring to guarantee that the i915_add_request() call
495 * will always have sufficient room to do its stuff. The request creation
496 * code calls this automatically.
497 */
498void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
499/* Cancel the reservation, e.g. because the request is being discarded. */
500void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
501/* Use the reserved space - for use by i915_add_request() only. */
502void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
503/* Finish with the reserved space - for use by i915_add_request() only. */
504void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
505
79bbcc29
JH
506/* Legacy ringbuffer specific portion of reservation code: */
507int intel_ring_reserve_space(struct drm_i915_gem_request *request);
508
8187a2b7 509#endif /* _INTEL_RINGBUFFER_H_ */
This page took 0.318837 seconds and 5 git commands to generate.