drm/i915: export error state ref handling
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
CommitLineData
8187a2b7
ZN
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
633cf8f5
VS
4/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14
8187a2b7 15struct intel_hw_status_page {
4225d0f2 16 u32 *page_addr;
8187a2b7 17 unsigned int gfx_addr;
05394f39 18 struct drm_i915_gem_object *obj;
8187a2b7
ZN
19};
20
b7287d80
BW
21#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
cae5852d 23
b7287d80
BW
24#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
cae5852d 26
b7287d80
BW
27#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
cae5852d 29
b7287d80
BW
30#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
cae5852d 32
b7287d80
BW
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
870e86dd 35
b7287d80
BW
36#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
1ec14ad3 39
ad8beaea
MK
40enum intel_ring_hangcheck_action { wait, active, kick, hung };
41
92cab734 42struct intel_ring_hangcheck {
6274f212 43 bool deadlock;
92cab734 44 u32 seqno;
05407ff8
MK
45 u32 acthd;
46 int score;
ad8beaea 47 enum intel_ring_hangcheck_action action;
92cab734
MK
48};
49
8187a2b7
ZN
50struct intel_ring_buffer {
51 const char *name;
9220434a 52 enum intel_ring_id {
96154f2f
DV
53 RCS = 0x0,
54 VCS,
55 BCS,
4a3dd19d 56 VECS,
9220434a 57 } id;
4a3dd19d 58#define I915_NUM_RINGS 4
333e9fe9 59 u32 mmio_base;
311bd68e 60 void __iomem *virtual_start;
8187a2b7 61 struct drm_device *dev;
05394f39 62 struct drm_i915_gem_object *obj;
8187a2b7 63
8c0a6bfe
CW
64 u32 head;
65 u32 tail;
780f0ca3 66 int space;
c2c347a9 67 int size;
55249baa 68 int effective_size;
8187a2b7
ZN
69 struct intel_hw_status_page status_page;
70
a71d8d94
CW
71 /** We track the position of the requests in the ring buffer, and
72 * when each is retired we increment last_retired_head as the GPU
73 * must have finished processing the request and so we know we
74 * can advance the ringbuffer up to that position.
75 *
76 * last_retired_head is set to -1 after the value is consumed so
77 * we can detect new retirements.
78 */
79 u32 last_retired_head;
80
aeb06593 81 struct {
a19d2933
BW
82 u32 gt; /* protected by dev_priv->irq_lock */
83 u32 pm; /* protected by dev_priv->rps.lock (sucks) */
84 } irq_refcount;
6a848ccb 85 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
db53a302 86 u32 trace_irq_seqno;
1ec14ad3 87 u32 sync_seqno[I915_NUM_RINGS-1];
b13c2b96 88 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
1ec14ad3 89 void (*irq_put)(struct intel_ring_buffer *ring);
8187a2b7 90
78501eac 91 int (*init)(struct intel_ring_buffer *ring);
8187a2b7 92
78501eac 93 void (*write_tail)(struct intel_ring_buffer *ring,
297b0c5b 94 u32 value);
b72f3acb
CW
95 int __must_check (*flush)(struct intel_ring_buffer *ring,
96 u32 invalidate_domains,
97 u32 flush_domains);
9d773091 98 int (*add_request)(struct intel_ring_buffer *ring);
b2eadbc8
CW
99 /* Some chipsets are not quite as coherent as advertised and need
100 * an expensive kick to force a true read of the up-to-date seqno.
101 * However, the up-to-date seqno is not always required and the last
102 * seen value is good enough. Note that the seqno will always be
103 * monotonic, even if not coherent.
104 */
105 u32 (*get_seqno)(struct intel_ring_buffer *ring,
106 bool lazy_coherency);
b70ec5bf
MK
107 void (*set_seqno)(struct intel_ring_buffer *ring,
108 u32 seqno);
78501eac 109 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
d7d4eedd
CW
110 u32 offset, u32 length,
111 unsigned flags);
112#define I915_DISPATCH_SECURE 0x1
b45305fc 113#define I915_DISPATCH_PINNED 0x2
8d19215b 114 void (*cleanup)(struct intel_ring_buffer *ring);
c8c99b0f
BW
115 int (*sync_to)(struct intel_ring_buffer *ring,
116 struct intel_ring_buffer *to,
117 u32 seqno);
ad776f8b 118
5586181f
BW
119 /* our mbox written by others */
120 u32 semaphore_register[I915_NUM_RINGS];
ad776f8b
BW
121 /* mboxes this ring signals to */
122 u32 signal_mbox[I915_NUM_RINGS];
123
8187a2b7
ZN
124 /**
125 * List of objects currently involved in rendering from the
126 * ringbuffer.
127 *
128 * Includes buffers having the contents of their GPU caches
129 * flushed, not necessarily primitives. last_rendering_seqno
130 * represents when the rendering involved will be completed.
131 *
132 * A reference is held on the buffer while on this list.
133 */
134 struct list_head active_list;
135
136 /**
137 * List of breadcrumbs associated with GPU requests currently
138 * outstanding.
139 */
140 struct list_head request_list;
141
a56ba56c
CW
142 /**
143 * Do we have some not yet emitted requests outstanding?
144 */
5d97eb69 145 u32 outstanding_lazy_request;
cc889e0f 146 bool gpu_caches_dirty;
c65355bb 147 bool fbc_dirty;
a56ba56c 148
8187a2b7 149 wait_queue_head_t irq_queue;
8d19215b 150
12b0286f
BW
151 /**
152 * Do an explicit TLB flush before MI_SET_CONTEXT
153 */
154 bool itlb_before_ctx_switch;
40521054 155 struct i915_hw_context *default_context;
112522f6 156 struct i915_hw_context *last_context;
40521054 157
92cab734
MK
158 struct intel_ring_hangcheck hangcheck;
159
8d19215b 160 void *private;
8187a2b7
ZN
161};
162
b4519513
CW
163static inline bool
164intel_ring_initialized(struct intel_ring_buffer *ring)
165{
166 return ring->obj != NULL;
167}
168
96154f2f
DV
169static inline unsigned
170intel_ring_flag(struct intel_ring_buffer *ring)
171{
172 return 1 << ring->id;
173}
174
1ec14ad3
CW
175static inline u32
176intel_ring_sync_index(struct intel_ring_buffer *ring,
177 struct intel_ring_buffer *other)
178{
179 int idx;
180
181 /*
182 * cs -> 0 = vcs, 1 = bcs
183 * vcs -> 0 = bcs, 1 = cs,
184 * bcs -> 0 = cs, 1 = vcs.
185 */
186
187 idx = (other - ring) - 1;
188 if (idx < 0)
189 idx += I915_NUM_RINGS;
190
191 return idx;
192}
193
8187a2b7
ZN
194static inline u32
195intel_read_status_page(struct intel_ring_buffer *ring,
78501eac 196 int reg)
8187a2b7 197{
4225d0f2
DV
198 /* Ensure that the compiler doesn't optimize away the load. */
199 barrier();
200 return ring->status_page.page_addr[reg];
8187a2b7
ZN
201}
202
b70ec5bf
MK
203static inline void
204intel_write_status_page(struct intel_ring_buffer *ring,
205 int reg, u32 value)
206{
207 ring->status_page.page_addr[reg] = value;
208}
209
311bd68e
CW
210/**
211 * Reads a dword out of the status page, which is written to from the command
212 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
213 * MI_STORE_DATA_IMM.
214 *
215 * The following dwords have a reserved meaning:
216 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
217 * 0x04: ring 0 head pointer
218 * 0x05: ring 1 head pointer (915-class)
219 * 0x06: ring 2 head pointer (915-class)
220 * 0x10-0x1b: Context status DWords (GM45)
221 * 0x1f: Last written status offset. (GM45)
222 *
223 * The area from dword 0x20 to 0x3ff is available for driver usage.
224 */
311bd68e 225#define I915_GEM_HWS_INDEX 0x20
9a289771
JB
226#define I915_GEM_HWS_SCRATCH_INDEX 0x30
227#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
311bd68e 228
78501eac 229void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
96f298aa 230
e1f99ce6 231int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
78501eac
CW
232static inline void intel_ring_emit(struct intel_ring_buffer *ring,
233 u32 data)
e898cd22 234{
78501eac 235 iowrite32(data, ring->virtual_start + ring->tail);
e898cd22
CW
236 ring->tail += 4;
237}
78501eac 238void intel_ring_advance(struct intel_ring_buffer *ring);
3e960501 239int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
f7e98ad4 240void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
a7b9761d
CW
241int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
242int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
8187a2b7 243
5c1143bb
XH
244int intel_init_render_ring_buffer(struct drm_device *dev);
245int intel_init_bsd_ring_buffer(struct drm_device *dev);
549f7365 246int intel_init_blt_ring_buffer(struct drm_device *dev);
9a8a2213 247int intel_init_vebox_ring_buffer(struct drm_device *dev);
8187a2b7 248
78501eac
CW
249u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
250void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
79f321b7 251
a71d8d94
CW
252static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
253{
254 return ring->tail;
255}
256
9d773091
CW
257static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
258{
259 BUG_ON(ring->outstanding_lazy_request == 0);
260 return ring->outstanding_lazy_request;
261}
262
db53a302
CW
263static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
264{
265 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
266 ring->trace_irq_seqno = seqno;
267}
268
e8616b6c
CW
269/* DRI warts */
270int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
271
8187a2b7 272#endif /* _INTEL_RINGBUFFER_H_ */
This page took 0.209621 seconds and 5 git commands to generate.