drm/i915: Write RING_TAIL once per-request
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
3
4 /*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13 #define I915_RING_FREE_SPACE 64
14
15 struct intel_hw_status_page {
16 u32 *page_addr;
17 unsigned int gfx_addr;
18 struct drm_i915_gem_object *obj;
19 };
20
21 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
23
24 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
26
27 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
29
30 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
32
33 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35
36 enum intel_ring_hangcheck_action {
37 HANGCHECK_IDLE = 0,
38 HANGCHECK_WAIT,
39 HANGCHECK_ACTIVE,
40 HANGCHECK_KICK,
41 HANGCHECK_HUNG,
42 };
43
44 struct intel_ring_hangcheck {
45 bool deadlock;
46 u32 seqno;
47 u32 acthd;
48 int score;
49 enum intel_ring_hangcheck_action action;
50 };
51
52 struct intel_ring_buffer {
53 const char *name;
54 enum intel_ring_id {
55 RCS = 0x0,
56 VCS,
57 BCS,
58 VECS,
59 } id;
60 #define I915_NUM_RINGS 4
61 u32 mmio_base;
62 void __iomem *virtual_start;
63 struct drm_device *dev;
64 struct drm_i915_gem_object *obj;
65
66 u32 head;
67 u32 tail;
68 int space;
69 int size;
70 int effective_size;
71 struct intel_hw_status_page status_page;
72
73 /** We track the position of the requests in the ring buffer, and
74 * when each is retired we increment last_retired_head as the GPU
75 * must have finished processing the request and so we know we
76 * can advance the ringbuffer up to that position.
77 *
78 * last_retired_head is set to -1 after the value is consumed so
79 * we can detect new retirements.
80 */
81 u32 last_retired_head;
82
83 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
84 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
85 u32 trace_irq_seqno;
86 u32 sync_seqno[I915_NUM_RINGS-1];
87 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
88 void (*irq_put)(struct intel_ring_buffer *ring);
89
90 int (*init)(struct intel_ring_buffer *ring);
91
92 void (*write_tail)(struct intel_ring_buffer *ring,
93 u32 value);
94 int __must_check (*flush)(struct intel_ring_buffer *ring,
95 u32 invalidate_domains,
96 u32 flush_domains);
97 int (*add_request)(struct intel_ring_buffer *ring);
98 /* Some chipsets are not quite as coherent as advertised and need
99 * an expensive kick to force a true read of the up-to-date seqno.
100 * However, the up-to-date seqno is not always required and the last
101 * seen value is good enough. Note that the seqno will always be
102 * monotonic, even if not coherent.
103 */
104 u32 (*get_seqno)(struct intel_ring_buffer *ring,
105 bool lazy_coherency);
106 void (*set_seqno)(struct intel_ring_buffer *ring,
107 u32 seqno);
108 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
109 u32 offset, u32 length,
110 unsigned flags);
111 #define I915_DISPATCH_SECURE 0x1
112 #define I915_DISPATCH_PINNED 0x2
113 void (*cleanup)(struct intel_ring_buffer *ring);
114 int (*sync_to)(struct intel_ring_buffer *ring,
115 struct intel_ring_buffer *to,
116 u32 seqno);
117
118 /* our mbox written by others */
119 u32 semaphore_register[I915_NUM_RINGS];
120 /* mboxes this ring signals to */
121 u32 signal_mbox[I915_NUM_RINGS];
122
123 /**
124 * List of objects currently involved in rendering from the
125 * ringbuffer.
126 *
127 * Includes buffers having the contents of their GPU caches
128 * flushed, not necessarily primitives. last_rendering_seqno
129 * represents when the rendering involved will be completed.
130 *
131 * A reference is held on the buffer while on this list.
132 */
133 struct list_head active_list;
134
135 /**
136 * List of breadcrumbs associated with GPU requests currently
137 * outstanding.
138 */
139 struct list_head request_list;
140
141 /**
142 * Do we have some not yet emitted requests outstanding?
143 */
144 struct drm_i915_gem_request *preallocated_lazy_request;
145 u32 outstanding_lazy_seqno;
146 bool gpu_caches_dirty;
147 bool fbc_dirty;
148
149 wait_queue_head_t irq_queue;
150
151 /**
152 * Do an explicit TLB flush before MI_SET_CONTEXT
153 */
154 bool itlb_before_ctx_switch;
155 struct i915_hw_context *default_context;
156 struct i915_hw_context *last_context;
157
158 struct intel_ring_hangcheck hangcheck;
159
160 struct {
161 struct drm_i915_gem_object *obj;
162 u32 gtt_offset;
163 volatile u32 *cpu_page;
164 } scratch;
165 };
166
167 static inline bool
168 intel_ring_initialized(struct intel_ring_buffer *ring)
169 {
170 return ring->obj != NULL;
171 }
172
173 static inline unsigned
174 intel_ring_flag(struct intel_ring_buffer *ring)
175 {
176 return 1 << ring->id;
177 }
178
179 static inline u32
180 intel_ring_sync_index(struct intel_ring_buffer *ring,
181 struct intel_ring_buffer *other)
182 {
183 int idx;
184
185 /*
186 * cs -> 0 = vcs, 1 = bcs
187 * vcs -> 0 = bcs, 1 = cs,
188 * bcs -> 0 = cs, 1 = vcs.
189 */
190
191 idx = (other - ring) - 1;
192 if (idx < 0)
193 idx += I915_NUM_RINGS;
194
195 return idx;
196 }
197
198 static inline u32
199 intel_read_status_page(struct intel_ring_buffer *ring,
200 int reg)
201 {
202 /* Ensure that the compiler doesn't optimize away the load. */
203 barrier();
204 return ring->status_page.page_addr[reg];
205 }
206
207 static inline void
208 intel_write_status_page(struct intel_ring_buffer *ring,
209 int reg, u32 value)
210 {
211 ring->status_page.page_addr[reg] = value;
212 }
213
214 /**
215 * Reads a dword out of the status page, which is written to from the command
216 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
217 * MI_STORE_DATA_IMM.
218 *
219 * The following dwords have a reserved meaning:
220 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
221 * 0x04: ring 0 head pointer
222 * 0x05: ring 1 head pointer (915-class)
223 * 0x06: ring 2 head pointer (915-class)
224 * 0x10-0x1b: Context status DWords (GM45)
225 * 0x1f: Last written status offset. (GM45)
226 *
227 * The area from dword 0x20 to 0x3ff is available for driver usage.
228 */
229 #define I915_GEM_HWS_INDEX 0x20
230 #define I915_GEM_HWS_SCRATCH_INDEX 0x30
231 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
232
233 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
234
235 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
236 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
237 u32 data)
238 {
239 iowrite32(data, ring->virtual_start + ring->tail);
240 ring->tail += 4;
241 }
242 static inline void intel_ring_advance(struct intel_ring_buffer *ring)
243 {
244 ring->tail &= ring->size - 1;
245 }
246 void __intel_ring_advance(struct intel_ring_buffer *ring);
247
248 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
249 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
250 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
251 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
252
253 int intel_init_render_ring_buffer(struct drm_device *dev);
254 int intel_init_bsd_ring_buffer(struct drm_device *dev);
255 int intel_init_blt_ring_buffer(struct drm_device *dev);
256 int intel_init_vebox_ring_buffer(struct drm_device *dev);
257
258 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
259 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
260
261 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
262 {
263 return ring->tail;
264 }
265
266 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
267 {
268 BUG_ON(ring->outstanding_lazy_seqno == 0);
269 return ring->outstanding_lazy_seqno;
270 }
271
272 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
273 {
274 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
275 ring->trace_irq_seqno = seqno;
276 }
277
278 /* DRI warts */
279 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
280
281 #endif /* _INTEL_RINGBUFFER_H_ */
This page took 0.078317 seconds and 5 git commands to generate.