1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
13 #define I915_RING_FREE_SPACE 64
15 struct intel_hw_status_page
{
17 unsigned int gfx_addr
;
18 struct drm_i915_gem_object
*obj
;
21 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
24 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
27 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
30 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
33 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
36 #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
37 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
38 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
40 enum intel_ring_hangcheck_action
{ wait
, active
, kick
, hung
};
42 struct intel_ring_hangcheck
{
47 enum intel_ring_hangcheck_action action
;
50 struct intel_ring_buffer
{
58 #define I915_NUM_RINGS 4
60 void __iomem
*virtual_start
;
61 struct drm_device
*dev
;
62 struct drm_i915_gem_object
*obj
;
69 struct intel_hw_status_page status_page
;
71 /** We track the position of the requests in the ring buffer, and
72 * when each is retired we increment last_retired_head as the GPU
73 * must have finished processing the request and so we know we
74 * can advance the ringbuffer up to that position.
76 * last_retired_head is set to -1 after the value is consumed so
77 * we can detect new retirements.
79 u32 last_retired_head
;
82 u32 gt
; /* protected by dev_priv->irq_lock */
83 u32 pm
; /* protected by dev_priv->rps.lock (sucks) */
85 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
87 u32 sync_seqno
[I915_NUM_RINGS
-1];
88 bool __must_check (*irq_get
)(struct intel_ring_buffer
*ring
);
89 void (*irq_put
)(struct intel_ring_buffer
*ring
);
91 int (*init
)(struct intel_ring_buffer
*ring
);
93 void (*write_tail
)(struct intel_ring_buffer
*ring
,
95 int __must_check (*flush
)(struct intel_ring_buffer
*ring
,
96 u32 invalidate_domains
,
98 int (*add_request
)(struct intel_ring_buffer
*ring
);
99 /* Some chipsets are not quite as coherent as advertised and need
100 * an expensive kick to force a true read of the up-to-date seqno.
101 * However, the up-to-date seqno is not always required and the last
102 * seen value is good enough. Note that the seqno will always be
103 * monotonic, even if not coherent.
105 u32 (*get_seqno
)(struct intel_ring_buffer
*ring
,
106 bool lazy_coherency
);
107 void (*set_seqno
)(struct intel_ring_buffer
*ring
,
109 int (*dispatch_execbuffer
)(struct intel_ring_buffer
*ring
,
110 u32 offset
, u32 length
,
112 #define I915_DISPATCH_SECURE 0x1
113 #define I915_DISPATCH_PINNED 0x2
114 void (*cleanup
)(struct intel_ring_buffer
*ring
);
115 int (*sync_to
)(struct intel_ring_buffer
*ring
,
116 struct intel_ring_buffer
*to
,
119 /* our mbox written by others */
120 u32 semaphore_register
[I915_NUM_RINGS
];
121 /* mboxes this ring signals to */
122 u32 signal_mbox
[I915_NUM_RINGS
];
125 * List of objects currently involved in rendering from the
128 * Includes buffers having the contents of their GPU caches
129 * flushed, not necessarily primitives. last_rendering_seqno
130 * represents when the rendering involved will be completed.
132 * A reference is held on the buffer while on this list.
134 struct list_head active_list
;
137 * List of breadcrumbs associated with GPU requests currently
140 struct list_head request_list
;
143 * Do we have some not yet emitted requests outstanding?
145 u32 outstanding_lazy_request
;
146 bool gpu_caches_dirty
;
149 wait_queue_head_t irq_queue
;
152 * Do an explicit TLB flush before MI_SET_CONTEXT
154 bool itlb_before_ctx_switch
;
155 struct i915_hw_context
*default_context
;
156 struct i915_hw_context
*last_context
;
158 struct intel_ring_hangcheck hangcheck
;
164 intel_ring_initialized(struct intel_ring_buffer
*ring
)
166 return ring
->obj
!= NULL
;
169 static inline unsigned
170 intel_ring_flag(struct intel_ring_buffer
*ring
)
172 return 1 << ring
->id
;
176 intel_ring_sync_index(struct intel_ring_buffer
*ring
,
177 struct intel_ring_buffer
*other
)
182 * cs -> 0 = vcs, 1 = bcs
183 * vcs -> 0 = bcs, 1 = cs,
184 * bcs -> 0 = cs, 1 = vcs.
187 idx
= (other
- ring
) - 1;
189 idx
+= I915_NUM_RINGS
;
195 intel_read_status_page(struct intel_ring_buffer
*ring
,
198 /* Ensure that the compiler doesn't optimize away the load. */
200 return ring
->status_page
.page_addr
[reg
];
204 intel_write_status_page(struct intel_ring_buffer
*ring
,
207 ring
->status_page
.page_addr
[reg
] = value
;
211 * Reads a dword out of the status page, which is written to from the command
212 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
215 * The following dwords have a reserved meaning:
216 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
217 * 0x04: ring 0 head pointer
218 * 0x05: ring 1 head pointer (915-class)
219 * 0x06: ring 2 head pointer (915-class)
220 * 0x10-0x1b: Context status DWords (GM45)
221 * 0x1f: Last written status offset. (GM45)
223 * The area from dword 0x20 to 0x3ff is available for driver usage.
225 #define I915_GEM_HWS_INDEX 0x20
226 #define I915_GEM_HWS_SCRATCH_INDEX 0x30
227 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
229 void intel_cleanup_ring_buffer(struct intel_ring_buffer
*ring
);
231 int __must_check
intel_ring_begin(struct intel_ring_buffer
*ring
, int n
);
232 static inline void intel_ring_emit(struct intel_ring_buffer
*ring
,
235 iowrite32(data
, ring
->virtual_start
+ ring
->tail
);
238 void intel_ring_advance(struct intel_ring_buffer
*ring
);
239 int __must_check
intel_ring_idle(struct intel_ring_buffer
*ring
);
240 void intel_ring_init_seqno(struct intel_ring_buffer
*ring
, u32 seqno
);
241 int intel_ring_flush_all_caches(struct intel_ring_buffer
*ring
);
242 int intel_ring_invalidate_all_caches(struct intel_ring_buffer
*ring
);
244 int intel_init_render_ring_buffer(struct drm_device
*dev
);
245 int intel_init_bsd_ring_buffer(struct drm_device
*dev
);
246 int intel_init_blt_ring_buffer(struct drm_device
*dev
);
247 int intel_init_vebox_ring_buffer(struct drm_device
*dev
);
249 u32
intel_ring_get_active_head(struct intel_ring_buffer
*ring
);
250 void intel_ring_setup_status_page(struct intel_ring_buffer
*ring
);
252 static inline u32
intel_ring_get_tail(struct intel_ring_buffer
*ring
)
257 static inline u32
intel_ring_get_seqno(struct intel_ring_buffer
*ring
)
259 BUG_ON(ring
->outstanding_lazy_request
== 0);
260 return ring
->outstanding_lazy_request
;
263 static inline void i915_trace_irq_get(struct intel_ring_buffer
*ring
, u32 seqno
)
265 if (ring
->trace_irq_seqno
== 0 && ring
->irq_get(ring
))
266 ring
->trace_irq_seqno
= seqno
;
270 int intel_render_ring_init_dri(struct drm_device
*dev
, u64 start
, u32 size
);
272 #endif /* _INTEL_RINGBUFFER_H_ */