drm/i915: Fix HSW parity test
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
CommitLineData
62fdfeaf
EA
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
760285e7 30#include <drm/drmP.h>
62fdfeaf 31#include "i915_drv.h"
760285e7 32#include <drm/i915_drm.h>
62fdfeaf 33#include "i915_trace.h"
881f47b6 34#include "intel_drv.h"
62fdfeaf 35
c7dca47b
CW
36static inline int ring_space(struct intel_ring_buffer *ring)
37{
633cf8f5 38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
c7dca47b
CW
39 if (space < 0)
40 space += ring->size;
41 return space;
42}
43
09246732
CW
44void __intel_ring_advance(struct intel_ring_buffer *ring)
45{
46 struct drm_i915_private *dev_priv = ring->dev->dev_private;
47
48 ring->tail &= ring->size - 1;
49 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
50 return;
51 ring->write_tail(ring, ring->tail);
52}
53
b72f3acb 54static int
46f0f8d1
CW
55gen2_render_ring_flush(struct intel_ring_buffer *ring,
56 u32 invalidate_domains,
57 u32 flush_domains)
58{
59 u32 cmd;
60 int ret;
61
62 cmd = MI_FLUSH;
31b14c9f 63 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
46f0f8d1
CW
64 cmd |= MI_NO_WRITE_FLUSH;
65
66 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
67 cmd |= MI_READ_FLUSH;
68
69 ret = intel_ring_begin(ring, 2);
70 if (ret)
71 return ret;
72
73 intel_ring_emit(ring, cmd);
74 intel_ring_emit(ring, MI_NOOP);
75 intel_ring_advance(ring);
76
77 return 0;
78}
79
80static int
81gen4_render_ring_flush(struct intel_ring_buffer *ring,
82 u32 invalidate_domains,
83 u32 flush_domains)
62fdfeaf 84{
78501eac 85 struct drm_device *dev = ring->dev;
6f392d54 86 u32 cmd;
b72f3acb 87 int ret;
6f392d54 88
36d527de
CW
89 /*
90 * read/write caches:
91 *
92 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
94 * also flushed at 2d versus 3d pipeline switches.
95 *
96 * read-only caches:
97 *
98 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99 * MI_READ_FLUSH is set, and is always flushed on 965.
100 *
101 * I915_GEM_DOMAIN_COMMAND may not exist?
102 *
103 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104 * invalidated when MI_EXE_FLUSH is set.
105 *
106 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107 * invalidated with every MI_FLUSH.
108 *
109 * TLBs:
110 *
111 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114 * are flushed at any MI_FLUSH.
115 */
116
117 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
46f0f8d1 118 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
36d527de 119 cmd &= ~MI_NO_WRITE_FLUSH;
36d527de
CW
120 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121 cmd |= MI_EXE_FLUSH;
62fdfeaf 122
36d527de
CW
123 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
124 (IS_G4X(dev) || IS_GEN5(dev)))
125 cmd |= MI_INVALIDATE_ISP;
70eac33e 126
36d527de
CW
127 ret = intel_ring_begin(ring, 2);
128 if (ret)
129 return ret;
b72f3acb 130
36d527de
CW
131 intel_ring_emit(ring, cmd);
132 intel_ring_emit(ring, MI_NOOP);
133 intel_ring_advance(ring);
b72f3acb
CW
134
135 return 0;
8187a2b7
ZN
136}
137
8d315287
JB
138/**
139 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
140 * implementing two workarounds on gen6. From section 1.4.7.1
141 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 *
143 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
144 * produced by non-pipelined state commands), software needs to first
145 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146 * 0.
147 *
148 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
149 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 *
151 * And the workaround for these two requires this workaround first:
152 *
153 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
154 * BEFORE the pipe-control with a post-sync op and no write-cache
155 * flushes.
156 *
157 * And this last workaround is tricky because of the requirements on
158 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159 * volume 2 part 1:
160 *
161 * "1 of the following must also be set:
162 * - Render Target Cache Flush Enable ([12] of DW1)
163 * - Depth Cache Flush Enable ([0] of DW1)
164 * - Stall at Pixel Scoreboard ([1] of DW1)
165 * - Depth Stall ([13] of DW1)
166 * - Post-Sync Operation ([13] of DW1)
167 * - Notify Enable ([8] of DW1)"
168 *
169 * The cache flushes require the workaround flush that triggered this
170 * one, so we can't use it. Depth stall would trigger the same.
171 * Post-sync nonzero is what triggered this second workaround, so we
172 * can't use that one either. Notify enable is IRQs, which aren't
173 * really our business. That leaves only stall at scoreboard.
174 */
175static int
176intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
177{
0d1aacac 178 u32 scratch_addr = ring->scratch.gtt_offset + 128;
8d315287
JB
179 int ret;
180
181
182 ret = intel_ring_begin(ring, 6);
183 if (ret)
184 return ret;
185
186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188 PIPE_CONTROL_STALL_AT_SCOREBOARD);
189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
190 intel_ring_emit(ring, 0); /* low dword */
191 intel_ring_emit(ring, 0); /* high dword */
192 intel_ring_emit(ring, MI_NOOP);
193 intel_ring_advance(ring);
194
195 ret = intel_ring_begin(ring, 6);
196 if (ret)
197 return ret;
198
199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, MI_NOOP);
205 intel_ring_advance(ring);
206
207 return 0;
208}
209
210static int
211gen6_render_ring_flush(struct intel_ring_buffer *ring,
212 u32 invalidate_domains, u32 flush_domains)
213{
214 u32 flags = 0;
0d1aacac 215 u32 scratch_addr = ring->scratch.gtt_offset + 128;
8d315287
JB
216 int ret;
217
b3111509
PZ
218 /* Force SNB workarounds for PIPE_CONTROL flushes */
219 ret = intel_emit_post_sync_nonzero_flush(ring);
220 if (ret)
221 return ret;
222
8d315287
JB
223 /* Just flush everything. Experiments have shown that reducing the
224 * number of bits based on the write domains has little performance
225 * impact.
226 */
7d54a904
CW
227 if (flush_domains) {
228 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
230 /*
231 * Ensure that any following seqno writes only happen
232 * when the render cache is indeed flushed.
233 */
97f209bc 234 flags |= PIPE_CONTROL_CS_STALL;
7d54a904
CW
235 }
236 if (invalidate_domains) {
237 flags |= PIPE_CONTROL_TLB_INVALIDATE;
238 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
239 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
243 /*
244 * TLB invalidate requires a post-sync write.
245 */
3ac78313 246 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
7d54a904 247 }
8d315287 248
6c6cf5aa 249 ret = intel_ring_begin(ring, 4);
8d315287
JB
250 if (ret)
251 return ret;
252
6c6cf5aa 253 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
8d315287
JB
254 intel_ring_emit(ring, flags);
255 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
6c6cf5aa 256 intel_ring_emit(ring, 0);
8d315287
JB
257 intel_ring_advance(ring);
258
259 return 0;
260}
261
f3987631
PZ
262static int
263gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
264{
265 int ret;
266
267 ret = intel_ring_begin(ring, 4);
268 if (ret)
269 return ret;
270
271 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
272 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
273 PIPE_CONTROL_STALL_AT_SCOREBOARD);
274 intel_ring_emit(ring, 0);
275 intel_ring_emit(ring, 0);
276 intel_ring_advance(ring);
277
278 return 0;
279}
280
fd3da6c9
RV
281static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
282{
283 int ret;
284
285 if (!ring->fbc_dirty)
286 return 0;
287
288 ret = intel_ring_begin(ring, 4);
289 if (ret)
290 return ret;
291 intel_ring_emit(ring, MI_NOOP);
292 /* WaFbcNukeOn3DBlt:ivb/hsw */
293 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
294 intel_ring_emit(ring, MSG_FBC_REND_STATE);
295 intel_ring_emit(ring, value);
296 intel_ring_advance(ring);
297
298 ring->fbc_dirty = false;
299 return 0;
300}
301
4772eaeb
PZ
302static int
303gen7_render_ring_flush(struct intel_ring_buffer *ring,
304 u32 invalidate_domains, u32 flush_domains)
305{
306 u32 flags = 0;
0d1aacac 307 u32 scratch_addr = ring->scratch.gtt_offset + 128;
4772eaeb
PZ
308 int ret;
309
f3987631
PZ
310 /*
311 * Ensure that any following seqno writes only happen when the render
312 * cache is indeed flushed.
313 *
314 * Workaround: 4th PIPE_CONTROL command (except the ones with only
315 * read-cache invalidate bits set) must have the CS_STALL bit set. We
316 * don't try to be clever and just set it unconditionally.
317 */
318 flags |= PIPE_CONTROL_CS_STALL;
319
4772eaeb
PZ
320 /* Just flush everything. Experiments have shown that reducing the
321 * number of bits based on the write domains has little performance
322 * impact.
323 */
324 if (flush_domains) {
325 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
326 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4772eaeb
PZ
327 }
328 if (invalidate_domains) {
329 flags |= PIPE_CONTROL_TLB_INVALIDATE;
330 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
331 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
332 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
333 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
334 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
335 /*
336 * TLB invalidate requires a post-sync write.
337 */
338 flags |= PIPE_CONTROL_QW_WRITE;
b9e1faa7 339 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
f3987631
PZ
340
341 /* Workaround: we must issue a pipe_control with CS-stall bit
342 * set before a pipe_control command that has the state cache
343 * invalidate bit set. */
344 gen7_render_ring_cs_stall_wa(ring);
4772eaeb
PZ
345 }
346
347 ret = intel_ring_begin(ring, 4);
348 if (ret)
349 return ret;
350
351 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
352 intel_ring_emit(ring, flags);
b9e1faa7 353 intel_ring_emit(ring, scratch_addr);
4772eaeb
PZ
354 intel_ring_emit(ring, 0);
355 intel_ring_advance(ring);
356
fd3da6c9
RV
357 if (flush_domains)
358 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
359
4772eaeb
PZ
360 return 0;
361}
362
78501eac 363static void ring_write_tail(struct intel_ring_buffer *ring,
297b0c5b 364 u32 value)
d46eefa2 365{
78501eac 366 drm_i915_private_t *dev_priv = ring->dev->dev_private;
297b0c5b 367 I915_WRITE_TAIL(ring, value);
d46eefa2
XH
368}
369
78501eac 370u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
8187a2b7 371{
78501eac
CW
372 drm_i915_private_t *dev_priv = ring->dev->dev_private;
373 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
3d281d8c 374 RING_ACTHD(ring->mmio_base) : ACTHD;
8187a2b7
ZN
375
376 return I915_READ(acthd_reg);
377}
378
035dc1e0
DV
379static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
380{
381 struct drm_i915_private *dev_priv = ring->dev->dev_private;
382 u32 addr;
383
384 addr = dev_priv->status_page_dmah->busaddr;
385 if (INTEL_INFO(ring->dev)->gen >= 4)
386 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
387 I915_WRITE(HWS_PGA, addr);
388}
389
78501eac 390static int init_ring_common(struct intel_ring_buffer *ring)
8187a2b7 391{
b7884eb4
DV
392 struct drm_device *dev = ring->dev;
393 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39 394 struct drm_i915_gem_object *obj = ring->obj;
b7884eb4 395 int ret = 0;
8187a2b7 396 u32 head;
8187a2b7 397
b7884eb4
DV
398 if (HAS_FORCE_WAKE(dev))
399 gen6_gt_force_wake_get(dev_priv);
400
035dc1e0
DV
401 if (I915_NEED_GFX_HWS(dev))
402 intel_ring_setup_status_page(ring);
403 else
404 ring_setup_phys_status_page(ring);
405
8187a2b7 406 /* Stop the ring if it's running. */
7f2ab699 407 I915_WRITE_CTL(ring, 0);
570ef608 408 I915_WRITE_HEAD(ring, 0);
78501eac 409 ring->write_tail(ring, 0);
8187a2b7 410
570ef608 411 head = I915_READ_HEAD(ring) & HEAD_ADDR;
8187a2b7
ZN
412
413 /* G45 ring initialization fails to reset head to zero */
414 if (head != 0) {
6fd0d56e
CW
415 DRM_DEBUG_KMS("%s head not reset to zero "
416 "ctl %08x head %08x tail %08x start %08x\n",
417 ring->name,
418 I915_READ_CTL(ring),
419 I915_READ_HEAD(ring),
420 I915_READ_TAIL(ring),
421 I915_READ_START(ring));
8187a2b7 422
570ef608 423 I915_WRITE_HEAD(ring, 0);
8187a2b7 424
6fd0d56e
CW
425 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
426 DRM_ERROR("failed to set %s head to zero "
427 "ctl %08x head %08x tail %08x start %08x\n",
428 ring->name,
429 I915_READ_CTL(ring),
430 I915_READ_HEAD(ring),
431 I915_READ_TAIL(ring),
432 I915_READ_START(ring));
433 }
8187a2b7
ZN
434 }
435
0d8957c8
DV
436 /* Initialize the ring. This must happen _after_ we've cleared the ring
437 * registers with the above sequence (the readback of the HEAD registers
438 * also enforces ordering), otherwise the hw might lose the new ring
439 * register values. */
f343c5f6 440 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
7f2ab699 441 I915_WRITE_CTL(ring,
ae69b42a 442 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
5d031e5b 443 | RING_VALID);
8187a2b7 444
8187a2b7 445 /* If the head is still not zero, the ring is dead */
f01db988 446 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
f343c5f6 447 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
f01db988 448 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
e74cfed5
CW
449 DRM_ERROR("%s initialization failed "
450 "ctl %08x head %08x tail %08x start %08x\n",
451 ring->name,
452 I915_READ_CTL(ring),
453 I915_READ_HEAD(ring),
454 I915_READ_TAIL(ring),
455 I915_READ_START(ring));
b7884eb4
DV
456 ret = -EIO;
457 goto out;
8187a2b7
ZN
458 }
459
78501eac
CW
460 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
461 i915_kernel_lost_context(ring->dev);
8187a2b7 462 else {
c7dca47b 463 ring->head = I915_READ_HEAD(ring);
870e86dd 464 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
c7dca47b 465 ring->space = ring_space(ring);
c3b20037 466 ring->last_retired_head = -1;
8187a2b7 467 }
1ec14ad3 468
50f018df
CW
469 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
470
b7884eb4
DV
471out:
472 if (HAS_FORCE_WAKE(dev))
473 gen6_gt_force_wake_put(dev_priv);
474
475 return ret;
8187a2b7
ZN
476}
477
c6df541c
CW
478static int
479init_pipe_control(struct intel_ring_buffer *ring)
480{
c6df541c
CW
481 int ret;
482
0d1aacac 483 if (ring->scratch.obj)
c6df541c
CW
484 return 0;
485
0d1aacac
CW
486 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
487 if (ring->scratch.obj == NULL) {
c6df541c
CW
488 DRM_ERROR("Failed to allocate seqno page\n");
489 ret = -ENOMEM;
490 goto err;
491 }
e4ffd173 492
0d1aacac 493 i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
c6df541c 494
0d1aacac 495 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
c6df541c
CW
496 if (ret)
497 goto err_unref;
498
0d1aacac
CW
499 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
500 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
501 if (ring->scratch.cpu_page == NULL) {
56b085a0 502 ret = -ENOMEM;
c6df541c 503 goto err_unpin;
56b085a0 504 }
c6df541c 505
2b1086cc 506 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
0d1aacac 507 ring->name, ring->scratch.gtt_offset);
c6df541c
CW
508 return 0;
509
510err_unpin:
0d1aacac 511 i915_gem_object_unpin(ring->scratch.obj);
c6df541c 512err_unref:
0d1aacac 513 drm_gem_object_unreference(&ring->scratch.obj->base);
c6df541c 514err:
c6df541c
CW
515 return ret;
516}
517
78501eac 518static int init_render_ring(struct intel_ring_buffer *ring)
8187a2b7 519{
78501eac 520 struct drm_device *dev = ring->dev;
1ec14ad3 521 struct drm_i915_private *dev_priv = dev->dev_private;
78501eac 522 int ret = init_ring_common(ring);
a69ffdbf 523
1c8c38c5 524 if (INTEL_INFO(dev)->gen > 3)
6b26c86d 525 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1c8c38c5
CW
526
527 /* We need to disable the AsyncFlip performance optimisations in order
528 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
529 * programmed to '1' on all products.
8693a824
DL
530 *
531 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1c8c38c5
CW
532 */
533 if (INTEL_INFO(dev)->gen >= 6)
534 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
535
f05bb0c7
CW
536 /* Required for the hardware to program scanline values for waiting */
537 if (INTEL_INFO(dev)->gen == 6)
538 I915_WRITE(GFX_MODE,
539 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
540
1c8c38c5
CW
541 if (IS_GEN7(dev))
542 I915_WRITE(GFX_MODE_GEN7,
543 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
544 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
78501eac 545
8d315287 546 if (INTEL_INFO(dev)->gen >= 5) {
c6df541c
CW
547 ret = init_pipe_control(ring);
548 if (ret)
549 return ret;
550 }
551
5e13a0c5 552 if (IS_GEN6(dev)) {
3a69ddd6
KG
553 /* From the Sandybridge PRM, volume 1 part 3, page 24:
554 * "If this bit is set, STCunit will have LRA as replacement
555 * policy. [...] This bit must be reset. LRA replacement
556 * policy is not supported."
557 */
558 I915_WRITE(CACHE_MODE_0,
5e13a0c5 559 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
12b0286f
BW
560
561 /* This is not explicitly set for GEN6, so read the register.
562 * see intel_ring_mi_set_context() for why we care.
563 * TODO: consider explicitly setting the bit for GEN5
564 */
565 ring->itlb_before_ctx_switch =
566 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
84f9f938
BW
567 }
568
6b26c86d
DV
569 if (INTEL_INFO(dev)->gen >= 6)
570 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
84f9f938 571
e1ef7cc2 572 if (HAS_L3_GPU_CACHE(dev))
cc609d5d 573 I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
15b9f80e 574
8187a2b7
ZN
575 return ret;
576}
577
c6df541c
CW
578static void render_ring_cleanup(struct intel_ring_buffer *ring)
579{
b45305fc
DV
580 struct drm_device *dev = ring->dev;
581
0d1aacac 582 if (ring->scratch.obj == NULL)
c6df541c
CW
583 return;
584
0d1aacac
CW
585 if (INTEL_INFO(dev)->gen >= 5) {
586 kunmap(sg_page(ring->scratch.obj->pages->sgl));
587 i915_gem_object_unpin(ring->scratch.obj);
588 }
aaf8a516 589
0d1aacac
CW
590 drm_gem_object_unreference(&ring->scratch.obj->base);
591 ring->scratch.obj = NULL;
c6df541c
CW
592}
593
1ec14ad3 594static void
c8c99b0f 595update_mboxes(struct intel_ring_buffer *ring,
9d773091 596 u32 mmio_offset)
1ec14ad3 597{
ad776f8b
BW
598/* NB: In order to be able to do semaphore MBOX updates for varying number
599 * of rings, it's easiest if we round up each individual update to a
600 * multiple of 2 (since ring updates must always be a multiple of 2)
601 * even though the actual update only requires 3 dwords.
602 */
603#define MBOX_UPDATE_DWORDS 4
1c8b46fc 604 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
c8c99b0f 605 intel_ring_emit(ring, mmio_offset);
1823521d 606 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
ad776f8b 607 intel_ring_emit(ring, MI_NOOP);
1ec14ad3
CW
608}
609
c8c99b0f
BW
610/**
611 * gen6_add_request - Update the semaphore mailbox registers
612 *
613 * @ring - ring that is adding a request
614 * @seqno - return seqno stuck into the ring
615 *
616 * Update the mailbox registers in the *other* rings with the current seqno.
617 * This acts like a signal in the canonical semaphore.
618 */
1ec14ad3 619static int
9d773091 620gen6_add_request(struct intel_ring_buffer *ring)
1ec14ad3 621{
ad776f8b
BW
622 struct drm_device *dev = ring->dev;
623 struct drm_i915_private *dev_priv = dev->dev_private;
624 struct intel_ring_buffer *useless;
625 int i, ret;
1ec14ad3 626
ad776f8b
BW
627 ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
628 MBOX_UPDATE_DWORDS) +
629 4);
1ec14ad3
CW
630 if (ret)
631 return ret;
ad776f8b 632#undef MBOX_UPDATE_DWORDS
1ec14ad3 633
ad776f8b
BW
634 for_each_ring(useless, dev_priv, i) {
635 u32 mbox_reg = ring->signal_mbox[i];
636 if (mbox_reg != GEN6_NOSYNC)
637 update_mboxes(ring, mbox_reg);
638 }
1ec14ad3
CW
639
640 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
641 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1823521d 642 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1ec14ad3 643 intel_ring_emit(ring, MI_USER_INTERRUPT);
09246732 644 __intel_ring_advance(ring);
1ec14ad3 645
1ec14ad3
CW
646 return 0;
647}
648
f72b3435
MK
649static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
650 u32 seqno)
651{
652 struct drm_i915_private *dev_priv = dev->dev_private;
653 return dev_priv->last_seqno < seqno;
654}
655
c8c99b0f
BW
656/**
657 * intel_ring_sync - sync the waiter to the signaller on seqno
658 *
659 * @waiter - ring that is waiting
660 * @signaller - ring which has, or will signal
661 * @seqno - seqno which the waiter will block on
662 */
663static int
686cb5f9
DV
664gen6_ring_sync(struct intel_ring_buffer *waiter,
665 struct intel_ring_buffer *signaller,
666 u32 seqno)
1ec14ad3
CW
667{
668 int ret;
c8c99b0f
BW
669 u32 dw1 = MI_SEMAPHORE_MBOX |
670 MI_SEMAPHORE_COMPARE |
671 MI_SEMAPHORE_REGISTER;
1ec14ad3 672
1500f7ea
BW
673 /* Throughout all of the GEM code, seqno passed implies our current
674 * seqno is >= the last seqno executed. However for hardware the
675 * comparison is strictly greater than.
676 */
677 seqno -= 1;
678
686cb5f9
DV
679 WARN_ON(signaller->semaphore_register[waiter->id] ==
680 MI_SEMAPHORE_SYNC_INVALID);
681
c8c99b0f 682 ret = intel_ring_begin(waiter, 4);
1ec14ad3
CW
683 if (ret)
684 return ret;
685
f72b3435
MK
686 /* If seqno wrap happened, omit the wait with no-ops */
687 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
688 intel_ring_emit(waiter,
689 dw1 |
690 signaller->semaphore_register[waiter->id]);
691 intel_ring_emit(waiter, seqno);
692 intel_ring_emit(waiter, 0);
693 intel_ring_emit(waiter, MI_NOOP);
694 } else {
695 intel_ring_emit(waiter, MI_NOOP);
696 intel_ring_emit(waiter, MI_NOOP);
697 intel_ring_emit(waiter, MI_NOOP);
698 intel_ring_emit(waiter, MI_NOOP);
699 }
c8c99b0f 700 intel_ring_advance(waiter);
1ec14ad3
CW
701
702 return 0;
703}
704
c6df541c
CW
705#define PIPE_CONTROL_FLUSH(ring__, addr__) \
706do { \
fcbc34e4
KG
707 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
708 PIPE_CONTROL_DEPTH_STALL); \
c6df541c
CW
709 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
710 intel_ring_emit(ring__, 0); \
711 intel_ring_emit(ring__, 0); \
712} while (0)
713
714static int
9d773091 715pc_render_add_request(struct intel_ring_buffer *ring)
c6df541c 716{
0d1aacac 717 u32 scratch_addr = ring->scratch.gtt_offset + 128;
c6df541c
CW
718 int ret;
719
720 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
721 * incoherent with writes to memory, i.e. completely fubar,
722 * so we need to use PIPE_NOTIFY instead.
723 *
724 * However, we also need to workaround the qword write
725 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
726 * memory before requesting an interrupt.
727 */
728 ret = intel_ring_begin(ring, 32);
729 if (ret)
730 return ret;
731
fcbc34e4 732 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
733 PIPE_CONTROL_WRITE_FLUSH |
734 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
0d1aacac 735 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1823521d 736 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
c6df541c
CW
737 intel_ring_emit(ring, 0);
738 PIPE_CONTROL_FLUSH(ring, scratch_addr);
739 scratch_addr += 128; /* write to separate cachelines */
740 PIPE_CONTROL_FLUSH(ring, scratch_addr);
741 scratch_addr += 128;
742 PIPE_CONTROL_FLUSH(ring, scratch_addr);
743 scratch_addr += 128;
744 PIPE_CONTROL_FLUSH(ring, scratch_addr);
745 scratch_addr += 128;
746 PIPE_CONTROL_FLUSH(ring, scratch_addr);
747 scratch_addr += 128;
748 PIPE_CONTROL_FLUSH(ring, scratch_addr);
a71d8d94 749
fcbc34e4 750 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
751 PIPE_CONTROL_WRITE_FLUSH |
752 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
c6df541c 753 PIPE_CONTROL_NOTIFY);
0d1aacac 754 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1823521d 755 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
c6df541c 756 intel_ring_emit(ring, 0);
09246732 757 __intel_ring_advance(ring);
c6df541c 758
c6df541c
CW
759 return 0;
760}
761
4cd53c0c 762static u32
b2eadbc8 763gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
4cd53c0c 764{
4cd53c0c
DV
765 /* Workaround to force correct ordering between irq and seqno writes on
766 * ivb (and maybe also on snb) by reading from a CS register (like
767 * ACTHD) before reading the status page. */
b2eadbc8 768 if (!lazy_coherency)
4cd53c0c
DV
769 intel_ring_get_active_head(ring);
770 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
771}
772
8187a2b7 773static u32
b2eadbc8 774ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
8187a2b7 775{
1ec14ad3
CW
776 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
777}
778
b70ec5bf
MK
779static void
780ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
781{
782 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
783}
784
c6df541c 785static u32
b2eadbc8 786pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
c6df541c 787{
0d1aacac 788 return ring->scratch.cpu_page[0];
c6df541c
CW
789}
790
b70ec5bf
MK
791static void
792pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
793{
0d1aacac 794 ring->scratch.cpu_page[0] = seqno;
b70ec5bf
MK
795}
796
e48d8634
DV
797static bool
798gen5_ring_get_irq(struct intel_ring_buffer *ring)
799{
800 struct drm_device *dev = ring->dev;
801 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 802 unsigned long flags;
e48d8634
DV
803
804 if (!dev->irq_enabled)
805 return false;
806
7338aefa 807 spin_lock_irqsave(&dev_priv->irq_lock, flags);
43eaea13
PZ
808 if (ring->irq_refcount++ == 0)
809 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
7338aefa 810 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
811
812 return true;
813}
814
815static void
816gen5_ring_put_irq(struct intel_ring_buffer *ring)
817{
818 struct drm_device *dev = ring->dev;
819 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 820 unsigned long flags;
e48d8634 821
7338aefa 822 spin_lock_irqsave(&dev_priv->irq_lock, flags);
43eaea13
PZ
823 if (--ring->irq_refcount == 0)
824 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
7338aefa 825 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
826}
827
b13c2b96 828static bool
e3670319 829i9xx_ring_get_irq(struct intel_ring_buffer *ring)
62fdfeaf 830{
78501eac 831 struct drm_device *dev = ring->dev;
01a03331 832 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 833 unsigned long flags;
62fdfeaf 834
b13c2b96
CW
835 if (!dev->irq_enabled)
836 return false;
837
7338aefa 838 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 839 if (ring->irq_refcount++ == 0) {
f637fde4
DV
840 dev_priv->irq_mask &= ~ring->irq_enable_mask;
841 I915_WRITE(IMR, dev_priv->irq_mask);
842 POSTING_READ(IMR);
843 }
7338aefa 844 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
b13c2b96
CW
845
846 return true;
62fdfeaf
EA
847}
848
8187a2b7 849static void
e3670319 850i9xx_ring_put_irq(struct intel_ring_buffer *ring)
62fdfeaf 851{
78501eac 852 struct drm_device *dev = ring->dev;
01a03331 853 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 854 unsigned long flags;
62fdfeaf 855
7338aefa 856 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 857 if (--ring->irq_refcount == 0) {
f637fde4
DV
858 dev_priv->irq_mask |= ring->irq_enable_mask;
859 I915_WRITE(IMR, dev_priv->irq_mask);
860 POSTING_READ(IMR);
861 }
7338aefa 862 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
62fdfeaf
EA
863}
864
c2798b19
CW
865static bool
866i8xx_ring_get_irq(struct intel_ring_buffer *ring)
867{
868 struct drm_device *dev = ring->dev;
869 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 870 unsigned long flags;
c2798b19
CW
871
872 if (!dev->irq_enabled)
873 return false;
874
7338aefa 875 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 876 if (ring->irq_refcount++ == 0) {
c2798b19
CW
877 dev_priv->irq_mask &= ~ring->irq_enable_mask;
878 I915_WRITE16(IMR, dev_priv->irq_mask);
879 POSTING_READ16(IMR);
880 }
7338aefa 881 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
882
883 return true;
884}
885
886static void
887i8xx_ring_put_irq(struct intel_ring_buffer *ring)
888{
889 struct drm_device *dev = ring->dev;
890 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 891 unsigned long flags;
c2798b19 892
7338aefa 893 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 894 if (--ring->irq_refcount == 0) {
c2798b19
CW
895 dev_priv->irq_mask |= ring->irq_enable_mask;
896 I915_WRITE16(IMR, dev_priv->irq_mask);
897 POSTING_READ16(IMR);
898 }
7338aefa 899 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
900}
901
78501eac 902void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
8187a2b7 903{
4593010b 904 struct drm_device *dev = ring->dev;
78501eac 905 drm_i915_private_t *dev_priv = ring->dev->dev_private;
4593010b
EA
906 u32 mmio = 0;
907
908 /* The ring status page addresses are no longer next to the rest of
909 * the ring registers as of gen7.
910 */
911 if (IS_GEN7(dev)) {
912 switch (ring->id) {
96154f2f 913 case RCS:
4593010b
EA
914 mmio = RENDER_HWS_PGA_GEN7;
915 break;
96154f2f 916 case BCS:
4593010b
EA
917 mmio = BLT_HWS_PGA_GEN7;
918 break;
96154f2f 919 case VCS:
4593010b
EA
920 mmio = BSD_HWS_PGA_GEN7;
921 break;
4a3dd19d 922 case VECS:
9a8a2213
BW
923 mmio = VEBOX_HWS_PGA_GEN7;
924 break;
4593010b
EA
925 }
926 } else if (IS_GEN6(ring->dev)) {
927 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
928 } else {
929 mmio = RING_HWS_PGA(ring->mmio_base);
930 }
931
78501eac
CW
932 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
933 POSTING_READ(mmio);
884020bf
CW
934
935 /* Flush the TLB for this page */
936 if (INTEL_INFO(dev)->gen >= 6) {
937 u32 reg = RING_INSTPM(ring->mmio_base);
938 I915_WRITE(reg,
939 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
940 INSTPM_SYNC_FLUSH));
941 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
942 1000))
943 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
944 ring->name);
945 }
8187a2b7
ZN
946}
947
b72f3acb 948static int
78501eac
CW
949bsd_ring_flush(struct intel_ring_buffer *ring,
950 u32 invalidate_domains,
951 u32 flush_domains)
d1b851fc 952{
b72f3acb
CW
953 int ret;
954
b72f3acb
CW
955 ret = intel_ring_begin(ring, 2);
956 if (ret)
957 return ret;
958
959 intel_ring_emit(ring, MI_FLUSH);
960 intel_ring_emit(ring, MI_NOOP);
961 intel_ring_advance(ring);
962 return 0;
d1b851fc
ZN
963}
964
3cce469c 965static int
9d773091 966i9xx_add_request(struct intel_ring_buffer *ring)
d1b851fc 967{
3cce469c
CW
968 int ret;
969
970 ret = intel_ring_begin(ring, 4);
971 if (ret)
972 return ret;
6f392d54 973
3cce469c
CW
974 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
975 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1823521d 976 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
3cce469c 977 intel_ring_emit(ring, MI_USER_INTERRUPT);
09246732 978 __intel_ring_advance(ring);
d1b851fc 979
3cce469c 980 return 0;
d1b851fc
ZN
981}
982
0f46832f 983static bool
25c06300 984gen6_ring_get_irq(struct intel_ring_buffer *ring)
0f46832f
CW
985{
986 struct drm_device *dev = ring->dev;
01a03331 987 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 988 unsigned long flags;
0f46832f
CW
989
990 if (!dev->irq_enabled)
991 return false;
992
4cd53c0c
DV
993 /* It looks like we need to prevent the gt from suspending while waiting
994 * for an notifiy irq, otherwise irqs seem to get lost on at least the
995 * blt/bsd rings on ivb. */
99ffa162 996 gen6_gt_force_wake_get(dev_priv);
4cd53c0c 997
7338aefa 998 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 999 if (ring->irq_refcount++ == 0) {
e1ef7cc2 1000 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
cc609d5d
BW
1001 I915_WRITE_IMR(ring,
1002 ~(ring->irq_enable_mask |
1003 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
15b9f80e
BW
1004 else
1005 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
43eaea13 1006 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
0f46832f 1007 }
7338aefa 1008 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
0f46832f
CW
1009
1010 return true;
1011}
1012
1013static void
25c06300 1014gen6_ring_put_irq(struct intel_ring_buffer *ring)
0f46832f
CW
1015{
1016 struct drm_device *dev = ring->dev;
01a03331 1017 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 1018 unsigned long flags;
0f46832f 1019
7338aefa 1020 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1021 if (--ring->irq_refcount == 0) {
e1ef7cc2 1022 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
cc609d5d
BW
1023 I915_WRITE_IMR(ring,
1024 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
15b9f80e
BW
1025 else
1026 I915_WRITE_IMR(ring, ~0);
43eaea13 1027 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1ec14ad3 1028 }
7338aefa 1029 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
4cd53c0c 1030
99ffa162 1031 gen6_gt_force_wake_put(dev_priv);
d1b851fc
ZN
1032}
1033
a19d2933
BW
1034static bool
1035hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1036{
1037 struct drm_device *dev = ring->dev;
1038 struct drm_i915_private *dev_priv = dev->dev_private;
1039 unsigned long flags;
1040
1041 if (!dev->irq_enabled)
1042 return false;
1043
59cdb63d 1044 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1045 if (ring->irq_refcount++ == 0) {
a19d2933 1046 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
edbfdb45 1047 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
a19d2933 1048 }
59cdb63d 1049 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
a19d2933
BW
1050
1051 return true;
1052}
1053
1054static void
1055hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1056{
1057 struct drm_device *dev = ring->dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 unsigned long flags;
1060
1061 if (!dev->irq_enabled)
1062 return;
1063
59cdb63d 1064 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1065 if (--ring->irq_refcount == 0) {
a19d2933 1066 I915_WRITE_IMR(ring, ~0);
edbfdb45 1067 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
a19d2933 1068 }
59cdb63d 1069 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
a19d2933
BW
1070}
1071
d1b851fc 1072static int
d7d4eedd
CW
1073i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1074 u32 offset, u32 length,
1075 unsigned flags)
d1b851fc 1076{
e1f99ce6 1077 int ret;
78501eac 1078
e1f99ce6
CW
1079 ret = intel_ring_begin(ring, 2);
1080 if (ret)
1081 return ret;
1082
78501eac 1083 intel_ring_emit(ring,
65f56876
CW
1084 MI_BATCH_BUFFER_START |
1085 MI_BATCH_GTT |
d7d4eedd 1086 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
c4e7a414 1087 intel_ring_emit(ring, offset);
78501eac
CW
1088 intel_ring_advance(ring);
1089
d1b851fc
ZN
1090 return 0;
1091}
1092
b45305fc
DV
1093/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1094#define I830_BATCH_LIMIT (256*1024)
8187a2b7 1095static int
fb3256da 1096i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
d7d4eedd
CW
1097 u32 offset, u32 len,
1098 unsigned flags)
62fdfeaf 1099{
c4e7a414 1100 int ret;
62fdfeaf 1101
b45305fc
DV
1102 if (flags & I915_DISPATCH_PINNED) {
1103 ret = intel_ring_begin(ring, 4);
1104 if (ret)
1105 return ret;
62fdfeaf 1106
b45305fc
DV
1107 intel_ring_emit(ring, MI_BATCH_BUFFER);
1108 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1109 intel_ring_emit(ring, offset + len - 8);
1110 intel_ring_emit(ring, MI_NOOP);
1111 intel_ring_advance(ring);
1112 } else {
0d1aacac 1113 u32 cs_offset = ring->scratch.gtt_offset;
b45305fc
DV
1114
1115 if (len > I830_BATCH_LIMIT)
1116 return -ENOSPC;
1117
1118 ret = intel_ring_begin(ring, 9+3);
1119 if (ret)
1120 return ret;
1121 /* Blit the batch (which has now all relocs applied) to the stable batch
1122 * scratch bo area (so that the CS never stumbles over its tlb
1123 * invalidation bug) ... */
1124 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1125 XY_SRC_COPY_BLT_WRITE_ALPHA |
1126 XY_SRC_COPY_BLT_WRITE_RGB);
1127 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1128 intel_ring_emit(ring, 0);
1129 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1130 intel_ring_emit(ring, cs_offset);
1131 intel_ring_emit(ring, 0);
1132 intel_ring_emit(ring, 4096);
1133 intel_ring_emit(ring, offset);
1134 intel_ring_emit(ring, MI_FLUSH);
1135
1136 /* ... and execute it. */
1137 intel_ring_emit(ring, MI_BATCH_BUFFER);
1138 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1139 intel_ring_emit(ring, cs_offset + len - 8);
1140 intel_ring_advance(ring);
1141 }
e1f99ce6 1142
fb3256da
DV
1143 return 0;
1144}
1145
1146static int
1147i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
d7d4eedd
CW
1148 u32 offset, u32 len,
1149 unsigned flags)
fb3256da
DV
1150{
1151 int ret;
1152
1153 ret = intel_ring_begin(ring, 2);
1154 if (ret)
1155 return ret;
1156
65f56876 1157 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
d7d4eedd 1158 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
c4e7a414 1159 intel_ring_advance(ring);
62fdfeaf 1160
62fdfeaf
EA
1161 return 0;
1162}
1163
78501eac 1164static void cleanup_status_page(struct intel_ring_buffer *ring)
62fdfeaf 1165{
05394f39 1166 struct drm_i915_gem_object *obj;
62fdfeaf 1167
8187a2b7
ZN
1168 obj = ring->status_page.obj;
1169 if (obj == NULL)
62fdfeaf 1170 return;
62fdfeaf 1171
9da3da66 1172 kunmap(sg_page(obj->pages->sgl));
62fdfeaf 1173 i915_gem_object_unpin(obj);
05394f39 1174 drm_gem_object_unreference(&obj->base);
8187a2b7 1175 ring->status_page.obj = NULL;
62fdfeaf
EA
1176}
1177
78501eac 1178static int init_status_page(struct intel_ring_buffer *ring)
62fdfeaf 1179{
78501eac 1180 struct drm_device *dev = ring->dev;
05394f39 1181 struct drm_i915_gem_object *obj;
62fdfeaf
EA
1182 int ret;
1183
62fdfeaf
EA
1184 obj = i915_gem_alloc_object(dev, 4096);
1185 if (obj == NULL) {
1186 DRM_ERROR("Failed to allocate status page\n");
1187 ret = -ENOMEM;
1188 goto err;
1189 }
e4ffd173
CW
1190
1191 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
62fdfeaf 1192
c37e2204 1193 ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
62fdfeaf 1194 if (ret != 0) {
62fdfeaf
EA
1195 goto err_unref;
1196 }
1197
f343c5f6 1198 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
9da3da66 1199 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
8187a2b7 1200 if (ring->status_page.page_addr == NULL) {
2e6c21ed 1201 ret = -ENOMEM;
62fdfeaf
EA
1202 goto err_unpin;
1203 }
8187a2b7
ZN
1204 ring->status_page.obj = obj;
1205 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
62fdfeaf 1206
8187a2b7
ZN
1207 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1208 ring->name, ring->status_page.gfx_addr);
62fdfeaf
EA
1209
1210 return 0;
1211
1212err_unpin:
1213 i915_gem_object_unpin(obj);
1214err_unref:
05394f39 1215 drm_gem_object_unreference(&obj->base);
62fdfeaf 1216err:
8187a2b7 1217 return ret;
62fdfeaf
EA
1218}
1219
035dc1e0 1220static int init_phys_status_page(struct intel_ring_buffer *ring)
6b8294a4
CW
1221{
1222 struct drm_i915_private *dev_priv = ring->dev->dev_private;
6b8294a4
CW
1223
1224 if (!dev_priv->status_page_dmah) {
1225 dev_priv->status_page_dmah =
1226 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1227 if (!dev_priv->status_page_dmah)
1228 return -ENOMEM;
1229 }
1230
6b8294a4
CW
1231 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1232 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1233
1234 return 0;
1235}
1236
c43b5634
BW
1237static int intel_init_ring_buffer(struct drm_device *dev,
1238 struct intel_ring_buffer *ring)
62fdfeaf 1239{
05394f39 1240 struct drm_i915_gem_object *obj;
dd2757f8 1241 struct drm_i915_private *dev_priv = dev->dev_private;
dd785e35
CW
1242 int ret;
1243
8187a2b7 1244 ring->dev = dev;
23bc5982
CW
1245 INIT_LIST_HEAD(&ring->active_list);
1246 INIT_LIST_HEAD(&ring->request_list);
dfc9ef2f 1247 ring->size = 32 * PAGE_SIZE;
9d773091 1248 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
0dc79fb2 1249
b259f673 1250 init_waitqueue_head(&ring->irq_queue);
62fdfeaf 1251
8187a2b7 1252 if (I915_NEED_GFX_HWS(dev)) {
78501eac 1253 ret = init_status_page(ring);
8187a2b7
ZN
1254 if (ret)
1255 return ret;
6b8294a4
CW
1256 } else {
1257 BUG_ON(ring->id != RCS);
035dc1e0 1258 ret = init_phys_status_page(ring);
6b8294a4
CW
1259 if (ret)
1260 return ret;
8187a2b7 1261 }
62fdfeaf 1262
ebc052e0
CW
1263 obj = NULL;
1264 if (!HAS_LLC(dev))
1265 obj = i915_gem_object_create_stolen(dev, ring->size);
1266 if (obj == NULL)
1267 obj = i915_gem_alloc_object(dev, ring->size);
62fdfeaf
EA
1268 if (obj == NULL) {
1269 DRM_ERROR("Failed to allocate ringbuffer\n");
8187a2b7 1270 ret = -ENOMEM;
dd785e35 1271 goto err_hws;
62fdfeaf 1272 }
62fdfeaf 1273
05394f39 1274 ring->obj = obj;
8187a2b7 1275
c37e2204 1276 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
dd785e35
CW
1277 if (ret)
1278 goto err_unref;
62fdfeaf 1279
3eef8918
CW
1280 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1281 if (ret)
1282 goto err_unpin;
1283
dd2757f8 1284 ring->virtual_start =
f343c5f6 1285 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
dd2757f8 1286 ring->size);
4225d0f2 1287 if (ring->virtual_start == NULL) {
62fdfeaf 1288 DRM_ERROR("Failed to map ringbuffer.\n");
8187a2b7 1289 ret = -EINVAL;
dd785e35 1290 goto err_unpin;
62fdfeaf
EA
1291 }
1292
78501eac 1293 ret = ring->init(ring);
dd785e35
CW
1294 if (ret)
1295 goto err_unmap;
62fdfeaf 1296
55249baa
CW
1297 /* Workaround an erratum on the i830 which causes a hang if
1298 * the TAIL pointer points to within the last 2 cachelines
1299 * of the buffer.
1300 */
1301 ring->effective_size = ring->size;
27c1cbd0 1302 if (IS_I830(ring->dev) || IS_845G(ring->dev))
55249baa
CW
1303 ring->effective_size -= 128;
1304
c584fe47 1305 return 0;
dd785e35
CW
1306
1307err_unmap:
4225d0f2 1308 iounmap(ring->virtual_start);
dd785e35
CW
1309err_unpin:
1310 i915_gem_object_unpin(obj);
1311err_unref:
05394f39
CW
1312 drm_gem_object_unreference(&obj->base);
1313 ring->obj = NULL;
dd785e35 1314err_hws:
78501eac 1315 cleanup_status_page(ring);
8187a2b7 1316 return ret;
62fdfeaf
EA
1317}
1318
78501eac 1319void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
62fdfeaf 1320{
33626e6a
CW
1321 struct drm_i915_private *dev_priv;
1322 int ret;
1323
05394f39 1324 if (ring->obj == NULL)
62fdfeaf
EA
1325 return;
1326
33626e6a
CW
1327 /* Disable the ring buffer. The ring must be idle at this point */
1328 dev_priv = ring->dev->dev_private;
3e960501 1329 ret = intel_ring_idle(ring);
29ee3991
CW
1330 if (ret)
1331 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1332 ring->name, ret);
1333
33626e6a
CW
1334 I915_WRITE_CTL(ring, 0);
1335
4225d0f2 1336 iounmap(ring->virtual_start);
62fdfeaf 1337
05394f39
CW
1338 i915_gem_object_unpin(ring->obj);
1339 drm_gem_object_unreference(&ring->obj->base);
1340 ring->obj = NULL;
78501eac 1341
8d19215b
ZN
1342 if (ring->cleanup)
1343 ring->cleanup(ring);
1344
78501eac 1345 cleanup_status_page(ring);
62fdfeaf
EA
1346}
1347
a71d8d94
CW
1348static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1349{
a71d8d94
CW
1350 int ret;
1351
199b2bc2 1352 ret = i915_wait_seqno(ring, seqno);
b2da9fe5
BW
1353 if (!ret)
1354 i915_gem_retire_requests_ring(ring);
a71d8d94
CW
1355
1356 return ret;
1357}
1358
1359static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1360{
1361 struct drm_i915_gem_request *request;
1362 u32 seqno = 0;
1363 int ret;
1364
1365 i915_gem_retire_requests_ring(ring);
1366
1367 if (ring->last_retired_head != -1) {
1368 ring->head = ring->last_retired_head;
1369 ring->last_retired_head = -1;
1370 ring->space = ring_space(ring);
1371 if (ring->space >= n)
1372 return 0;
1373 }
1374
1375 list_for_each_entry(request, &ring->request_list, list) {
1376 int space;
1377
1378 if (request->tail == -1)
1379 continue;
1380
633cf8f5 1381 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
a71d8d94
CW
1382 if (space < 0)
1383 space += ring->size;
1384 if (space >= n) {
1385 seqno = request->seqno;
1386 break;
1387 }
1388
1389 /* Consume this request in case we need more space than
1390 * is available and so need to prevent a race between
1391 * updating last_retired_head and direct reads of
1392 * I915_RING_HEAD. It also provides a nice sanity check.
1393 */
1394 request->tail = -1;
1395 }
1396
1397 if (seqno == 0)
1398 return -ENOSPC;
1399
1400 ret = intel_ring_wait_seqno(ring, seqno);
1401 if (ret)
1402 return ret;
1403
1404 if (WARN_ON(ring->last_retired_head == -1))
1405 return -ENOSPC;
1406
1407 ring->head = ring->last_retired_head;
1408 ring->last_retired_head = -1;
1409 ring->space = ring_space(ring);
1410 if (WARN_ON(ring->space < n))
1411 return -ENOSPC;
1412
1413 return 0;
1414}
1415
3e960501 1416static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
62fdfeaf 1417{
78501eac 1418 struct drm_device *dev = ring->dev;
cae5852d 1419 struct drm_i915_private *dev_priv = dev->dev_private;
78501eac 1420 unsigned long end;
a71d8d94 1421 int ret;
c7dca47b 1422
a71d8d94
CW
1423 ret = intel_ring_wait_request(ring, n);
1424 if (ret != -ENOSPC)
1425 return ret;
1426
09246732
CW
1427 /* force the tail write in case we have been skipping them */
1428 __intel_ring_advance(ring);
1429
db53a302 1430 trace_i915_ring_wait_begin(ring);
63ed2cb2
DV
1431 /* With GEM the hangcheck timer should kick us out of the loop,
1432 * leaving it early runs the risk of corrupting GEM state (due
1433 * to running on almost untested codepaths). But on resume
1434 * timers don't work yet, so prevent a complete hang in that
1435 * case by choosing an insanely large timeout. */
1436 end = jiffies + 60 * HZ;
e6bfaf85 1437
8187a2b7 1438 do {
c7dca47b
CW
1439 ring->head = I915_READ_HEAD(ring);
1440 ring->space = ring_space(ring);
62fdfeaf 1441 if (ring->space >= n) {
db53a302 1442 trace_i915_ring_wait_end(ring);
62fdfeaf
EA
1443 return 0;
1444 }
1445
1446 if (dev->primary->master) {
1447 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1448 if (master_priv->sarea_priv)
1449 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1450 }
d1b851fc 1451
e60a0b10 1452 msleep(1);
d6b2c790 1453
33196ded
DV
1454 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1455 dev_priv->mm.interruptible);
d6b2c790
DV
1456 if (ret)
1457 return ret;
8187a2b7 1458 } while (!time_after(jiffies, end));
db53a302 1459 trace_i915_ring_wait_end(ring);
8187a2b7
ZN
1460 return -EBUSY;
1461}
62fdfeaf 1462
3e960501
CW
1463static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1464{
1465 uint32_t __iomem *virt;
1466 int rem = ring->size - ring->tail;
1467
1468 if (ring->space < rem) {
1469 int ret = ring_wait_for_space(ring, rem);
1470 if (ret)
1471 return ret;
1472 }
1473
1474 virt = ring->virtual_start + ring->tail;
1475 rem /= 4;
1476 while (rem--)
1477 iowrite32(MI_NOOP, virt++);
1478
1479 ring->tail = 0;
1480 ring->space = ring_space(ring);
1481
1482 return 0;
1483}
1484
1485int intel_ring_idle(struct intel_ring_buffer *ring)
1486{
1487 u32 seqno;
1488 int ret;
1489
1490 /* We need to add any requests required to flush the objects and ring */
1823521d 1491 if (ring->outstanding_lazy_seqno) {
0025c077 1492 ret = i915_add_request(ring, NULL);
3e960501
CW
1493 if (ret)
1494 return ret;
1495 }
1496
1497 /* Wait upon the last request to be completed */
1498 if (list_empty(&ring->request_list))
1499 return 0;
1500
1501 seqno = list_entry(ring->request_list.prev,
1502 struct drm_i915_gem_request,
1503 list)->seqno;
1504
1505 return i915_wait_seqno(ring, seqno);
1506}
1507
9d773091
CW
1508static int
1509intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1510{
1823521d 1511 if (ring->outstanding_lazy_seqno)
9d773091
CW
1512 return 0;
1513
3c0e234c
CW
1514 if (ring->preallocated_lazy_request == NULL) {
1515 struct drm_i915_gem_request *request;
1516
1517 request = kmalloc(sizeof(*request), GFP_KERNEL);
1518 if (request == NULL)
1519 return -ENOMEM;
1520
1521 ring->preallocated_lazy_request = request;
1522 }
1523
1823521d 1524 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
9d773091
CW
1525}
1526
cbcc80df
MK
1527static int __intel_ring_begin(struct intel_ring_buffer *ring,
1528 int bytes)
1529{
1530 int ret;
1531
1532 if (unlikely(ring->tail + bytes > ring->effective_size)) {
1533 ret = intel_wrap_ring_buffer(ring);
1534 if (unlikely(ret))
1535 return ret;
1536 }
1537
1538 if (unlikely(ring->space < bytes)) {
1539 ret = ring_wait_for_space(ring, bytes);
1540 if (unlikely(ret))
1541 return ret;
1542 }
1543
1544 ring->space -= bytes;
1545 return 0;
1546}
1547
e1f99ce6
CW
1548int intel_ring_begin(struct intel_ring_buffer *ring,
1549 int num_dwords)
8187a2b7 1550{
de2b9985 1551 drm_i915_private_t *dev_priv = ring->dev->dev_private;
e1f99ce6 1552 int ret;
78501eac 1553
33196ded
DV
1554 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1555 dev_priv->mm.interruptible);
de2b9985
DV
1556 if (ret)
1557 return ret;
21dd3734 1558
9d773091
CW
1559 /* Preallocate the olr before touching the ring */
1560 ret = intel_ring_alloc_seqno(ring);
1561 if (ret)
1562 return ret;
1563
cbcc80df 1564 return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
8187a2b7 1565}
78501eac 1566
f7e98ad4 1567void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
498d2ac1 1568{
f7e98ad4 1569 struct drm_i915_private *dev_priv = ring->dev->dev_private;
498d2ac1 1570
1823521d 1571 BUG_ON(ring->outstanding_lazy_seqno);
498d2ac1 1572
f7e98ad4
MK
1573 if (INTEL_INFO(ring->dev)->gen >= 6) {
1574 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1575 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
5020150b
BW
1576 if (HAS_VEBOX(ring->dev))
1577 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
e1f99ce6 1578 }
d97ed339 1579
f7e98ad4 1580 ring->set_seqno(ring, seqno);
92cab734 1581 ring->hangcheck.seqno = seqno;
8187a2b7 1582}
62fdfeaf 1583
78501eac 1584static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
297b0c5b 1585 u32 value)
881f47b6 1586{
0206e353 1587 drm_i915_private_t *dev_priv = ring->dev->dev_private;
881f47b6
XH
1588
1589 /* Every tail move must follow the sequence below */
12f55818
CW
1590
1591 /* Disable notification that the ring is IDLE. The GT
1592 * will then assume that it is busy and bring it out of rc6.
1593 */
0206e353 1594 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818
CW
1595 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1596
1597 /* Clear the context id. Here be magic! */
1598 I915_WRITE64(GEN6_BSD_RNCID, 0x0);
0206e353 1599
12f55818 1600 /* Wait for the ring not to be idle, i.e. for it to wake up. */
0206e353 1601 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
12f55818
CW
1602 GEN6_BSD_SLEEP_INDICATOR) == 0,
1603 50))
1604 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
0206e353 1605
12f55818 1606 /* Now that the ring is fully powered up, update the tail */
0206e353 1607 I915_WRITE_TAIL(ring, value);
12f55818
CW
1608 POSTING_READ(RING_TAIL(ring->mmio_base));
1609
1610 /* Let the ring send IDLE messages to the GT again,
1611 * and so let it sleep to conserve power when idle.
1612 */
0206e353 1613 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818 1614 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
881f47b6
XH
1615}
1616
ea251324
BW
1617static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1618 u32 invalidate, u32 flush)
881f47b6 1619{
71a77e07 1620 uint32_t cmd;
b72f3acb
CW
1621 int ret;
1622
b72f3acb
CW
1623 ret = intel_ring_begin(ring, 4);
1624 if (ret)
1625 return ret;
1626
71a77e07 1627 cmd = MI_FLUSH_DW;
9a289771
JB
1628 /*
1629 * Bspec vol 1c.5 - video engine command streamer:
1630 * "If ENABLED, all TLBs will be invalidated once the flush
1631 * operation is complete. This bit is only valid when the
1632 * Post-Sync Operation field is a value of 1h or 3h."
1633 */
71a77e07 1634 if (invalidate & I915_GEM_GPU_DOMAINS)
9a289771
JB
1635 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1636 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
71a77e07 1637 intel_ring_emit(ring, cmd);
9a289771 1638 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
b72f3acb 1639 intel_ring_emit(ring, 0);
71a77e07 1640 intel_ring_emit(ring, MI_NOOP);
b72f3acb
CW
1641 intel_ring_advance(ring);
1642 return 0;
881f47b6
XH
1643}
1644
d7d4eedd
CW
1645static int
1646hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1647 u32 offset, u32 len,
1648 unsigned flags)
1649{
1650 int ret;
1651
1652 ret = intel_ring_begin(ring, 2);
1653 if (ret)
1654 return ret;
1655
1656 intel_ring_emit(ring,
1657 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1658 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1659 /* bit0-7 is the length on GEN6+ */
1660 intel_ring_emit(ring, offset);
1661 intel_ring_advance(ring);
1662
1663 return 0;
1664}
1665
881f47b6 1666static int
78501eac 1667gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
d7d4eedd
CW
1668 u32 offset, u32 len,
1669 unsigned flags)
881f47b6 1670{
0206e353 1671 int ret;
ab6f8e32 1672
0206e353
AJ
1673 ret = intel_ring_begin(ring, 2);
1674 if (ret)
1675 return ret;
e1f99ce6 1676
d7d4eedd
CW
1677 intel_ring_emit(ring,
1678 MI_BATCH_BUFFER_START |
1679 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
0206e353
AJ
1680 /* bit0-7 is the length on GEN6+ */
1681 intel_ring_emit(ring, offset);
1682 intel_ring_advance(ring);
ab6f8e32 1683
0206e353 1684 return 0;
881f47b6
XH
1685}
1686
549f7365
CW
1687/* Blitter support (SandyBridge+) */
1688
ea251324
BW
1689static int gen6_ring_flush(struct intel_ring_buffer *ring,
1690 u32 invalidate, u32 flush)
8d19215b 1691{
fd3da6c9 1692 struct drm_device *dev = ring->dev;
71a77e07 1693 uint32_t cmd;
b72f3acb
CW
1694 int ret;
1695
6a233c78 1696 ret = intel_ring_begin(ring, 4);
b72f3acb
CW
1697 if (ret)
1698 return ret;
1699
71a77e07 1700 cmd = MI_FLUSH_DW;
9a289771
JB
1701 /*
1702 * Bspec vol 1c.3 - blitter engine command streamer:
1703 * "If ENABLED, all TLBs will be invalidated once the flush
1704 * operation is complete. This bit is only valid when the
1705 * Post-Sync Operation field is a value of 1h or 3h."
1706 */
71a77e07 1707 if (invalidate & I915_GEM_DOMAIN_RENDER)
9a289771 1708 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
b3fcabb1 1709 MI_FLUSH_DW_OP_STOREDW;
71a77e07 1710 intel_ring_emit(ring, cmd);
9a289771 1711 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
b72f3acb 1712 intel_ring_emit(ring, 0);
71a77e07 1713 intel_ring_emit(ring, MI_NOOP);
b72f3acb 1714 intel_ring_advance(ring);
fd3da6c9
RV
1715
1716 if (IS_GEN7(dev) && flush)
1717 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1718
b72f3acb 1719 return 0;
8d19215b
ZN
1720}
1721
5c1143bb
XH
1722int intel_init_render_ring_buffer(struct drm_device *dev)
1723{
1724 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1725 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5c1143bb 1726
59465b5f
DV
1727 ring->name = "render ring";
1728 ring->id = RCS;
1729 ring->mmio_base = RENDER_RING_BASE;
1730
1ec14ad3
CW
1731 if (INTEL_INFO(dev)->gen >= 6) {
1732 ring->add_request = gen6_add_request;
4772eaeb 1733 ring->flush = gen7_render_ring_flush;
6c6cf5aa 1734 if (INTEL_INFO(dev)->gen == 6)
b3111509 1735 ring->flush = gen6_render_ring_flush;
25c06300
BW
1736 ring->irq_get = gen6_ring_get_irq;
1737 ring->irq_put = gen6_ring_put_irq;
cc609d5d 1738 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
4cd53c0c 1739 ring->get_seqno = gen6_ring_get_seqno;
b70ec5bf 1740 ring->set_seqno = ring_set_seqno;
686cb5f9 1741 ring->sync_to = gen6_ring_sync;
5586181f
BW
1742 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1743 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
1744 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
1950de14 1745 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
ad776f8b
BW
1746 ring->signal_mbox[RCS] = GEN6_NOSYNC;
1747 ring->signal_mbox[VCS] = GEN6_VRSYNC;
1748 ring->signal_mbox[BCS] = GEN6_BRSYNC;
1950de14 1749 ring->signal_mbox[VECS] = GEN6_VERSYNC;
c6df541c
CW
1750 } else if (IS_GEN5(dev)) {
1751 ring->add_request = pc_render_add_request;
46f0f8d1 1752 ring->flush = gen4_render_ring_flush;
c6df541c 1753 ring->get_seqno = pc_render_get_seqno;
b70ec5bf 1754 ring->set_seqno = pc_render_set_seqno;
e48d8634
DV
1755 ring->irq_get = gen5_ring_get_irq;
1756 ring->irq_put = gen5_ring_put_irq;
cc609d5d
BW
1757 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
1758 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
59465b5f 1759 } else {
8620a3a9 1760 ring->add_request = i9xx_add_request;
46f0f8d1
CW
1761 if (INTEL_INFO(dev)->gen < 4)
1762 ring->flush = gen2_render_ring_flush;
1763 else
1764 ring->flush = gen4_render_ring_flush;
59465b5f 1765 ring->get_seqno = ring_get_seqno;
b70ec5bf 1766 ring->set_seqno = ring_set_seqno;
c2798b19
CW
1767 if (IS_GEN2(dev)) {
1768 ring->irq_get = i8xx_ring_get_irq;
1769 ring->irq_put = i8xx_ring_put_irq;
1770 } else {
1771 ring->irq_get = i9xx_ring_get_irq;
1772 ring->irq_put = i9xx_ring_put_irq;
1773 }
e3670319 1774 ring->irq_enable_mask = I915_USER_INTERRUPT;
1ec14ad3 1775 }
59465b5f 1776 ring->write_tail = ring_write_tail;
d7d4eedd
CW
1777 if (IS_HASWELL(dev))
1778 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1779 else if (INTEL_INFO(dev)->gen >= 6)
fb3256da
DV
1780 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1781 else if (INTEL_INFO(dev)->gen >= 4)
1782 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1783 else if (IS_I830(dev) || IS_845G(dev))
1784 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1785 else
1786 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
59465b5f
DV
1787 ring->init = init_render_ring;
1788 ring->cleanup = render_ring_cleanup;
1789
b45305fc
DV
1790 /* Workaround batchbuffer to combat CS tlb bug. */
1791 if (HAS_BROKEN_CS_TLB(dev)) {
1792 struct drm_i915_gem_object *obj;
1793 int ret;
1794
1795 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1796 if (obj == NULL) {
1797 DRM_ERROR("Failed to allocate batch bo\n");
1798 return -ENOMEM;
1799 }
1800
c37e2204 1801 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
b45305fc
DV
1802 if (ret != 0) {
1803 drm_gem_object_unreference(&obj->base);
1804 DRM_ERROR("Failed to ping batch bo\n");
1805 return ret;
1806 }
1807
0d1aacac
CW
1808 ring->scratch.obj = obj;
1809 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
b45305fc
DV
1810 }
1811
1ec14ad3 1812 return intel_init_ring_buffer(dev, ring);
5c1143bb
XH
1813}
1814
e8616b6c
CW
1815int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1816{
1817 drm_i915_private_t *dev_priv = dev->dev_private;
1818 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6b8294a4 1819 int ret;
e8616b6c 1820
59465b5f
DV
1821 ring->name = "render ring";
1822 ring->id = RCS;
1823 ring->mmio_base = RENDER_RING_BASE;
1824
e8616b6c 1825 if (INTEL_INFO(dev)->gen >= 6) {
b4178f8a
DV
1826 /* non-kms not supported on gen6+ */
1827 return -ENODEV;
e8616b6c 1828 }
28f0cbf7
DV
1829
1830 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1831 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1832 * the special gen5 functions. */
1833 ring->add_request = i9xx_add_request;
46f0f8d1
CW
1834 if (INTEL_INFO(dev)->gen < 4)
1835 ring->flush = gen2_render_ring_flush;
1836 else
1837 ring->flush = gen4_render_ring_flush;
28f0cbf7 1838 ring->get_seqno = ring_get_seqno;
b70ec5bf 1839 ring->set_seqno = ring_set_seqno;
c2798b19
CW
1840 if (IS_GEN2(dev)) {
1841 ring->irq_get = i8xx_ring_get_irq;
1842 ring->irq_put = i8xx_ring_put_irq;
1843 } else {
1844 ring->irq_get = i9xx_ring_get_irq;
1845 ring->irq_put = i9xx_ring_put_irq;
1846 }
28f0cbf7 1847 ring->irq_enable_mask = I915_USER_INTERRUPT;
59465b5f 1848 ring->write_tail = ring_write_tail;
fb3256da
DV
1849 if (INTEL_INFO(dev)->gen >= 4)
1850 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1851 else if (IS_I830(dev) || IS_845G(dev))
1852 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1853 else
1854 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
59465b5f
DV
1855 ring->init = init_render_ring;
1856 ring->cleanup = render_ring_cleanup;
e8616b6c
CW
1857
1858 ring->dev = dev;
1859 INIT_LIST_HEAD(&ring->active_list);
1860 INIT_LIST_HEAD(&ring->request_list);
e8616b6c
CW
1861
1862 ring->size = size;
1863 ring->effective_size = ring->size;
17f10fdc 1864 if (IS_I830(ring->dev) || IS_845G(ring->dev))
e8616b6c
CW
1865 ring->effective_size -= 128;
1866
4225d0f2
DV
1867 ring->virtual_start = ioremap_wc(start, size);
1868 if (ring->virtual_start == NULL) {
e8616b6c
CW
1869 DRM_ERROR("can not ioremap virtual address for"
1870 " ring buffer\n");
1871 return -ENOMEM;
1872 }
1873
6b8294a4 1874 if (!I915_NEED_GFX_HWS(dev)) {
035dc1e0 1875 ret = init_phys_status_page(ring);
6b8294a4
CW
1876 if (ret)
1877 return ret;
1878 }
1879
e8616b6c
CW
1880 return 0;
1881}
1882
5c1143bb
XH
1883int intel_init_bsd_ring_buffer(struct drm_device *dev)
1884{
1885 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1886 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
5c1143bb 1887
58fa3835
DV
1888 ring->name = "bsd ring";
1889 ring->id = VCS;
1890
0fd2c201 1891 ring->write_tail = ring_write_tail;
58fa3835
DV
1892 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1893 ring->mmio_base = GEN6_BSD_RING_BASE;
0fd2c201
DV
1894 /* gen6 bsd needs a special wa for tail updates */
1895 if (IS_GEN6(dev))
1896 ring->write_tail = gen6_bsd_ring_write_tail;
ea251324 1897 ring->flush = gen6_bsd_ring_flush;
58fa3835
DV
1898 ring->add_request = gen6_add_request;
1899 ring->get_seqno = gen6_ring_get_seqno;
b70ec5bf 1900 ring->set_seqno = ring_set_seqno;
cc609d5d 1901 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
58fa3835
DV
1902 ring->irq_get = gen6_ring_get_irq;
1903 ring->irq_put = gen6_ring_put_irq;
1904 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
686cb5f9 1905 ring->sync_to = gen6_ring_sync;
5586181f
BW
1906 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1907 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
1908 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
1950de14 1909 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
ad776f8b
BW
1910 ring->signal_mbox[RCS] = GEN6_RVSYNC;
1911 ring->signal_mbox[VCS] = GEN6_NOSYNC;
1912 ring->signal_mbox[BCS] = GEN6_BVSYNC;
1950de14 1913 ring->signal_mbox[VECS] = GEN6_VEVSYNC;
58fa3835
DV
1914 } else {
1915 ring->mmio_base = BSD_RING_BASE;
58fa3835 1916 ring->flush = bsd_ring_flush;
8620a3a9 1917 ring->add_request = i9xx_add_request;
58fa3835 1918 ring->get_seqno = ring_get_seqno;
b70ec5bf 1919 ring->set_seqno = ring_set_seqno;
e48d8634 1920 if (IS_GEN5(dev)) {
cc609d5d 1921 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
e48d8634
DV
1922 ring->irq_get = gen5_ring_get_irq;
1923 ring->irq_put = gen5_ring_put_irq;
1924 } else {
e3670319 1925 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
e48d8634
DV
1926 ring->irq_get = i9xx_ring_get_irq;
1927 ring->irq_put = i9xx_ring_put_irq;
1928 }
fb3256da 1929 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
58fa3835
DV
1930 }
1931 ring->init = init_ring_common;
1932
1ec14ad3 1933 return intel_init_ring_buffer(dev, ring);
5c1143bb 1934}
549f7365
CW
1935
1936int intel_init_blt_ring_buffer(struct drm_device *dev)
1937{
1938 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1939 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
549f7365 1940
3535d9dd
DV
1941 ring->name = "blitter ring";
1942 ring->id = BCS;
1943
1944 ring->mmio_base = BLT_RING_BASE;
1945 ring->write_tail = ring_write_tail;
ea251324 1946 ring->flush = gen6_ring_flush;
3535d9dd
DV
1947 ring->add_request = gen6_add_request;
1948 ring->get_seqno = gen6_ring_get_seqno;
b70ec5bf 1949 ring->set_seqno = ring_set_seqno;
cc609d5d 1950 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
3535d9dd
DV
1951 ring->irq_get = gen6_ring_get_irq;
1952 ring->irq_put = gen6_ring_put_irq;
1953 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
686cb5f9 1954 ring->sync_to = gen6_ring_sync;
5586181f
BW
1955 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1956 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
1957 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
1950de14 1958 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
ad776f8b
BW
1959 ring->signal_mbox[RCS] = GEN6_RBSYNC;
1960 ring->signal_mbox[VCS] = GEN6_VBSYNC;
1961 ring->signal_mbox[BCS] = GEN6_NOSYNC;
1950de14 1962 ring->signal_mbox[VECS] = GEN6_VEBSYNC;
3535d9dd 1963 ring->init = init_ring_common;
549f7365 1964
1ec14ad3 1965 return intel_init_ring_buffer(dev, ring);
549f7365 1966}
a7b9761d 1967
9a8a2213
BW
1968int intel_init_vebox_ring_buffer(struct drm_device *dev)
1969{
1970 drm_i915_private_t *dev_priv = dev->dev_private;
1971 struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
1972
1973 ring->name = "video enhancement ring";
1974 ring->id = VECS;
1975
1976 ring->mmio_base = VEBOX_RING_BASE;
1977 ring->write_tail = ring_write_tail;
1978 ring->flush = gen6_ring_flush;
1979 ring->add_request = gen6_add_request;
1980 ring->get_seqno = gen6_ring_get_seqno;
1981 ring->set_seqno = ring_set_seqno;
c0d6a3dd 1982 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
a19d2933
BW
1983 ring->irq_get = hsw_vebox_get_irq;
1984 ring->irq_put = hsw_vebox_put_irq;
9a8a2213
BW
1985 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1986 ring->sync_to = gen6_ring_sync;
1987 ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1988 ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
1989 ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
1990 ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
1991 ring->signal_mbox[RCS] = GEN6_RVESYNC;
1992 ring->signal_mbox[VCS] = GEN6_VVESYNC;
1993 ring->signal_mbox[BCS] = GEN6_BVESYNC;
1994 ring->signal_mbox[VECS] = GEN6_NOSYNC;
1995 ring->init = init_ring_common;
1996
1997 return intel_init_ring_buffer(dev, ring);
1998}
1999
a7b9761d
CW
2000int
2001intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
2002{
2003 int ret;
2004
2005 if (!ring->gpu_caches_dirty)
2006 return 0;
2007
2008 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2009 if (ret)
2010 return ret;
2011
2012 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2013
2014 ring->gpu_caches_dirty = false;
2015 return 0;
2016}
2017
2018int
2019intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2020{
2021 uint32_t flush_domains;
2022 int ret;
2023
2024 flush_domains = 0;
2025 if (ring->gpu_caches_dirty)
2026 flush_domains = I915_GEM_GPU_DOMAINS;
2027
2028 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2029 if (ret)
2030 return ret;
2031
2032 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2033
2034 ring->gpu_caches_dirty = false;
2035 return 0;
2036}
This page took 0.333479 seconds and 5 git commands to generate.