Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
633cf8f5 VS |
4 | /* |
5 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | |
6 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | |
7 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | |
8 | * | |
9 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | |
10 | * cacheline, the Head Pointer must not be greater than the Tail | |
11 | * Pointer." | |
12 | */ | |
13 | #define I915_RING_FREE_SPACE 64 | |
14 | ||
8187a2b7 | 15 | struct intel_hw_status_page { |
4225d0f2 | 16 | u32 *page_addr; |
8187a2b7 | 17 | unsigned int gfx_addr; |
05394f39 | 18 | struct drm_i915_gem_object *obj; |
8187a2b7 ZN |
19 | }; |
20 | ||
b7287d80 BW |
21 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
22 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | |
cae5852d | 23 | |
b7287d80 BW |
24 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
25 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | |
cae5852d | 26 | |
b7287d80 BW |
27 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
28 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | |
cae5852d | 29 | |
b7287d80 BW |
30 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
31 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | |
cae5852d | 32 | |
b7287d80 BW |
33 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
34 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | |
870e86dd | 35 | |
b7287d80 BW |
36 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) |
37 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) | |
38 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) | |
1ec14ad3 | 39 | |
92cab734 MK |
40 | struct intel_ring_hangcheck { |
41 | u32 seqno; | |
05407ff8 MK |
42 | u32 acthd; |
43 | int score; | |
92cab734 MK |
44 | }; |
45 | ||
8187a2b7 ZN |
46 | struct intel_ring_buffer { |
47 | const char *name; | |
9220434a | 48 | enum intel_ring_id { |
96154f2f DV |
49 | RCS = 0x0, |
50 | VCS, | |
51 | BCS, | |
4a3dd19d | 52 | VECS, |
9220434a | 53 | } id; |
4a3dd19d | 54 | #define I915_NUM_RINGS 4 |
333e9fe9 | 55 | u32 mmio_base; |
311bd68e | 56 | void __iomem *virtual_start; |
8187a2b7 | 57 | struct drm_device *dev; |
05394f39 | 58 | struct drm_i915_gem_object *obj; |
8187a2b7 | 59 | |
8c0a6bfe CW |
60 | u32 head; |
61 | u32 tail; | |
780f0ca3 | 62 | int space; |
c2c347a9 | 63 | int size; |
55249baa | 64 | int effective_size; |
8187a2b7 ZN |
65 | struct intel_hw_status_page status_page; |
66 | ||
a71d8d94 CW |
67 | /** We track the position of the requests in the ring buffer, and |
68 | * when each is retired we increment last_retired_head as the GPU | |
69 | * must have finished processing the request and so we know we | |
70 | * can advance the ringbuffer up to that position. | |
71 | * | |
72 | * last_retired_head is set to -1 after the value is consumed so | |
73 | * we can detect new retirements. | |
74 | */ | |
75 | u32 last_retired_head; | |
76 | ||
aeb06593 | 77 | struct { |
a19d2933 BW |
78 | u32 gt; /* protected by dev_priv->irq_lock */ |
79 | u32 pm; /* protected by dev_priv->rps.lock (sucks) */ | |
80 | } irq_refcount; | |
6a848ccb | 81 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
db53a302 | 82 | u32 trace_irq_seqno; |
1ec14ad3 | 83 | u32 sync_seqno[I915_NUM_RINGS-1]; |
b13c2b96 | 84 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
1ec14ad3 | 85 | void (*irq_put)(struct intel_ring_buffer *ring); |
8187a2b7 | 86 | |
78501eac | 87 | int (*init)(struct intel_ring_buffer *ring); |
8187a2b7 | 88 | |
78501eac | 89 | void (*write_tail)(struct intel_ring_buffer *ring, |
297b0c5b | 90 | u32 value); |
b72f3acb CW |
91 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
92 | u32 invalidate_domains, | |
93 | u32 flush_domains); | |
9d773091 | 94 | int (*add_request)(struct intel_ring_buffer *ring); |
b2eadbc8 CW |
95 | /* Some chipsets are not quite as coherent as advertised and need |
96 | * an expensive kick to force a true read of the up-to-date seqno. | |
97 | * However, the up-to-date seqno is not always required and the last | |
98 | * seen value is good enough. Note that the seqno will always be | |
99 | * monotonic, even if not coherent. | |
100 | */ | |
101 | u32 (*get_seqno)(struct intel_ring_buffer *ring, | |
102 | bool lazy_coherency); | |
b70ec5bf MK |
103 | void (*set_seqno)(struct intel_ring_buffer *ring, |
104 | u32 seqno); | |
78501eac | 105 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
d7d4eedd CW |
106 | u32 offset, u32 length, |
107 | unsigned flags); | |
108 | #define I915_DISPATCH_SECURE 0x1 | |
b45305fc | 109 | #define I915_DISPATCH_PINNED 0x2 |
8d19215b | 110 | void (*cleanup)(struct intel_ring_buffer *ring); |
c8c99b0f BW |
111 | int (*sync_to)(struct intel_ring_buffer *ring, |
112 | struct intel_ring_buffer *to, | |
113 | u32 seqno); | |
ad776f8b | 114 | |
5586181f BW |
115 | /* our mbox written by others */ |
116 | u32 semaphore_register[I915_NUM_RINGS]; | |
ad776f8b BW |
117 | /* mboxes this ring signals to */ |
118 | u32 signal_mbox[I915_NUM_RINGS]; | |
119 | ||
8187a2b7 ZN |
120 | /** |
121 | * List of objects currently involved in rendering from the | |
122 | * ringbuffer. | |
123 | * | |
124 | * Includes buffers having the contents of their GPU caches | |
125 | * flushed, not necessarily primitives. last_rendering_seqno | |
126 | * represents when the rendering involved will be completed. | |
127 | * | |
128 | * A reference is held on the buffer while on this list. | |
129 | */ | |
130 | struct list_head active_list; | |
131 | ||
132 | /** | |
133 | * List of breadcrumbs associated with GPU requests currently | |
134 | * outstanding. | |
135 | */ | |
136 | struct list_head request_list; | |
137 | ||
a56ba56c CW |
138 | /** |
139 | * Do we have some not yet emitted requests outstanding? | |
140 | */ | |
5d97eb69 | 141 | u32 outstanding_lazy_request; |
cc889e0f | 142 | bool gpu_caches_dirty; |
a56ba56c | 143 | |
8187a2b7 | 144 | wait_queue_head_t irq_queue; |
8d19215b | 145 | |
12b0286f BW |
146 | /** |
147 | * Do an explicit TLB flush before MI_SET_CONTEXT | |
148 | */ | |
149 | bool itlb_before_ctx_switch; | |
40521054 | 150 | struct i915_hw_context *default_context; |
112522f6 | 151 | struct i915_hw_context *last_context; |
40521054 | 152 | |
92cab734 MK |
153 | struct intel_ring_hangcheck hangcheck; |
154 | ||
8d19215b | 155 | void *private; |
8187a2b7 ZN |
156 | }; |
157 | ||
b4519513 CW |
158 | static inline bool |
159 | intel_ring_initialized(struct intel_ring_buffer *ring) | |
160 | { | |
161 | return ring->obj != NULL; | |
162 | } | |
163 | ||
96154f2f DV |
164 | static inline unsigned |
165 | intel_ring_flag(struct intel_ring_buffer *ring) | |
166 | { | |
167 | return 1 << ring->id; | |
168 | } | |
169 | ||
1ec14ad3 CW |
170 | static inline u32 |
171 | intel_ring_sync_index(struct intel_ring_buffer *ring, | |
172 | struct intel_ring_buffer *other) | |
173 | { | |
174 | int idx; | |
175 | ||
176 | /* | |
177 | * cs -> 0 = vcs, 1 = bcs | |
178 | * vcs -> 0 = bcs, 1 = cs, | |
179 | * bcs -> 0 = cs, 1 = vcs. | |
180 | */ | |
181 | ||
182 | idx = (other - ring) - 1; | |
183 | if (idx < 0) | |
184 | idx += I915_NUM_RINGS; | |
185 | ||
186 | return idx; | |
187 | } | |
188 | ||
8187a2b7 ZN |
189 | static inline u32 |
190 | intel_read_status_page(struct intel_ring_buffer *ring, | |
78501eac | 191 | int reg) |
8187a2b7 | 192 | { |
4225d0f2 DV |
193 | /* Ensure that the compiler doesn't optimize away the load. */ |
194 | barrier(); | |
195 | return ring->status_page.page_addr[reg]; | |
8187a2b7 ZN |
196 | } |
197 | ||
b70ec5bf MK |
198 | static inline void |
199 | intel_write_status_page(struct intel_ring_buffer *ring, | |
200 | int reg, u32 value) | |
201 | { | |
202 | ring->status_page.page_addr[reg] = value; | |
203 | } | |
204 | ||
311bd68e CW |
205 | /** |
206 | * Reads a dword out of the status page, which is written to from the command | |
207 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | |
208 | * MI_STORE_DATA_IMM. | |
209 | * | |
210 | * The following dwords have a reserved meaning: | |
211 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | |
212 | * 0x04: ring 0 head pointer | |
213 | * 0x05: ring 1 head pointer (915-class) | |
214 | * 0x06: ring 2 head pointer (915-class) | |
215 | * 0x10-0x1b: Context status DWords (GM45) | |
216 | * 0x1f: Last written status offset. (GM45) | |
217 | * | |
218 | * The area from dword 0x20 to 0x3ff is available for driver usage. | |
219 | */ | |
311bd68e | 220 | #define I915_GEM_HWS_INDEX 0x20 |
9a289771 JB |
221 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 |
222 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | |
311bd68e | 223 | |
78501eac | 224 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
96f298aa | 225 | |
e1f99ce6 | 226 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
78501eac CW |
227 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
228 | u32 data) | |
e898cd22 | 229 | { |
78501eac | 230 | iowrite32(data, ring->virtual_start + ring->tail); |
e898cd22 CW |
231 | ring->tail += 4; |
232 | } | |
78501eac | 233 | void intel_ring_advance(struct intel_ring_buffer *ring); |
3e960501 | 234 | int __must_check intel_ring_idle(struct intel_ring_buffer *ring); |
f7e98ad4 | 235 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); |
a7b9761d CW |
236 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
237 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | |
8187a2b7 | 238 | |
5c1143bb XH |
239 | int intel_init_render_ring_buffer(struct drm_device *dev); |
240 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | |
549f7365 | 241 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
9a8a2213 | 242 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
8187a2b7 | 243 | |
78501eac CW |
244 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
245 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | |
79f321b7 | 246 | |
a71d8d94 CW |
247 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) |
248 | { | |
249 | return ring->tail; | |
250 | } | |
251 | ||
9d773091 CW |
252 | static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) |
253 | { | |
254 | BUG_ON(ring->outstanding_lazy_request == 0); | |
255 | return ring->outstanding_lazy_request; | |
256 | } | |
257 | ||
db53a302 CW |
258 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
259 | { | |
260 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | |
261 | ring->trace_irq_seqno = seqno; | |
262 | } | |
263 | ||
e8616b6c CW |
264 | /* DRI warts */ |
265 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | |
266 | ||
8187a2b7 | 267 | #endif /* _INTEL_RINGBUFFER_H_ */ |