b3d8f766fa7f56f42ec7b728b56e3a17799618b6
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
38 * to give some inclination as to some of the magic values used in the various
39 * workarounds!
40 */
41 #define CACHELINE_BYTES 64
42
43 static inline int __ring_space(int head, int tail, int size)
44 {
45 int space = head - (tail + I915_RING_FREE_SPACE);
46 if (space < 0)
47 space += size;
48 return space;
49 }
50
51 static inline int ring_space(struct intel_ringbuffer *ringbuf)
52 {
53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
54 }
55
56 static bool intel_ring_stopped(struct intel_engine_cs *ring)
57 {
58 struct drm_i915_private *dev_priv = ring->dev->dev_private;
59 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
60 }
61
62 void __intel_ring_advance(struct intel_engine_cs *ring)
63 {
64 struct intel_ringbuffer *ringbuf = ring->buffer;
65 ringbuf->tail &= ringbuf->size - 1;
66 if (intel_ring_stopped(ring))
67 return;
68 ring->write_tail(ring, ringbuf->tail);
69 }
70
71 static int
72 gen2_render_ring_flush(struct intel_engine_cs *ring,
73 u32 invalidate_domains,
74 u32 flush_domains)
75 {
76 u32 cmd;
77 int ret;
78
79 cmd = MI_FLUSH;
80 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
81 cmd |= MI_NO_WRITE_FLUSH;
82
83 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
84 cmd |= MI_READ_FLUSH;
85
86 ret = intel_ring_begin(ring, 2);
87 if (ret)
88 return ret;
89
90 intel_ring_emit(ring, cmd);
91 intel_ring_emit(ring, MI_NOOP);
92 intel_ring_advance(ring);
93
94 return 0;
95 }
96
97 static int
98 gen4_render_ring_flush(struct intel_engine_cs *ring,
99 u32 invalidate_domains,
100 u32 flush_domains)
101 {
102 struct drm_device *dev = ring->dev;
103 u32 cmd;
104 int ret;
105
106 /*
107 * read/write caches:
108 *
109 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
110 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
111 * also flushed at 2d versus 3d pipeline switches.
112 *
113 * read-only caches:
114 *
115 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
116 * MI_READ_FLUSH is set, and is always flushed on 965.
117 *
118 * I915_GEM_DOMAIN_COMMAND may not exist?
119 *
120 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
121 * invalidated when MI_EXE_FLUSH is set.
122 *
123 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
124 * invalidated with every MI_FLUSH.
125 *
126 * TLBs:
127 *
128 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
129 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
130 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
131 * are flushed at any MI_FLUSH.
132 */
133
134 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
135 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
136 cmd &= ~MI_NO_WRITE_FLUSH;
137 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
138 cmd |= MI_EXE_FLUSH;
139
140 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
141 (IS_G4X(dev) || IS_GEN5(dev)))
142 cmd |= MI_INVALIDATE_ISP;
143
144 ret = intel_ring_begin(ring, 2);
145 if (ret)
146 return ret;
147
148 intel_ring_emit(ring, cmd);
149 intel_ring_emit(ring, MI_NOOP);
150 intel_ring_advance(ring);
151
152 return 0;
153 }
154
155 /**
156 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
157 * implementing two workarounds on gen6. From section 1.4.7.1
158 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
159 *
160 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
161 * produced by non-pipelined state commands), software needs to first
162 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
163 * 0.
164 *
165 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
166 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
167 *
168 * And the workaround for these two requires this workaround first:
169 *
170 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
171 * BEFORE the pipe-control with a post-sync op and no write-cache
172 * flushes.
173 *
174 * And this last workaround is tricky because of the requirements on
175 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
176 * volume 2 part 1:
177 *
178 * "1 of the following must also be set:
179 * - Render Target Cache Flush Enable ([12] of DW1)
180 * - Depth Cache Flush Enable ([0] of DW1)
181 * - Stall at Pixel Scoreboard ([1] of DW1)
182 * - Depth Stall ([13] of DW1)
183 * - Post-Sync Operation ([13] of DW1)
184 * - Notify Enable ([8] of DW1)"
185 *
186 * The cache flushes require the workaround flush that triggered this
187 * one, so we can't use it. Depth stall would trigger the same.
188 * Post-sync nonzero is what triggered this second workaround, so we
189 * can't use that one either. Notify enable is IRQs, which aren't
190 * really our business. That leaves only stall at scoreboard.
191 */
192 static int
193 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
194 {
195 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
196 int ret;
197
198
199 ret = intel_ring_begin(ring, 6);
200 if (ret)
201 return ret;
202
203 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
204 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
205 PIPE_CONTROL_STALL_AT_SCOREBOARD);
206 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
207 intel_ring_emit(ring, 0); /* low dword */
208 intel_ring_emit(ring, 0); /* high dword */
209 intel_ring_emit(ring, MI_NOOP);
210 intel_ring_advance(ring);
211
212 ret = intel_ring_begin(ring, 6);
213 if (ret)
214 return ret;
215
216 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
217 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
218 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
219 intel_ring_emit(ring, 0);
220 intel_ring_emit(ring, 0);
221 intel_ring_emit(ring, MI_NOOP);
222 intel_ring_advance(ring);
223
224 return 0;
225 }
226
227 static int
228 gen6_render_ring_flush(struct intel_engine_cs *ring,
229 u32 invalidate_domains, u32 flush_domains)
230 {
231 u32 flags = 0;
232 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
233 int ret;
234
235 /* Force SNB workarounds for PIPE_CONTROL flushes */
236 ret = intel_emit_post_sync_nonzero_flush(ring);
237 if (ret)
238 return ret;
239
240 /* Just flush everything. Experiments have shown that reducing the
241 * number of bits based on the write domains has little performance
242 * impact.
243 */
244 if (flush_domains) {
245 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
246 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
247 /*
248 * Ensure that any following seqno writes only happen
249 * when the render cache is indeed flushed.
250 */
251 flags |= PIPE_CONTROL_CS_STALL;
252 }
253 if (invalidate_domains) {
254 flags |= PIPE_CONTROL_TLB_INVALIDATE;
255 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
256 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
257 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
258 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
259 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
260 /*
261 * TLB invalidate requires a post-sync write.
262 */
263 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
264 }
265
266 ret = intel_ring_begin(ring, 4);
267 if (ret)
268 return ret;
269
270 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
271 intel_ring_emit(ring, flags);
272 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
273 intel_ring_emit(ring, 0);
274 intel_ring_advance(ring);
275
276 return 0;
277 }
278
279 static int
280 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
281 {
282 int ret;
283
284 ret = intel_ring_begin(ring, 4);
285 if (ret)
286 return ret;
287
288 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
289 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
290 PIPE_CONTROL_STALL_AT_SCOREBOARD);
291 intel_ring_emit(ring, 0);
292 intel_ring_emit(ring, 0);
293 intel_ring_advance(ring);
294
295 return 0;
296 }
297
298 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
299 {
300 int ret;
301
302 if (!ring->fbc_dirty)
303 return 0;
304
305 ret = intel_ring_begin(ring, 6);
306 if (ret)
307 return ret;
308 /* WaFbcNukeOn3DBlt:ivb/hsw */
309 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
310 intel_ring_emit(ring, MSG_FBC_REND_STATE);
311 intel_ring_emit(ring, value);
312 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
313 intel_ring_emit(ring, MSG_FBC_REND_STATE);
314 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
315 intel_ring_advance(ring);
316
317 ring->fbc_dirty = false;
318 return 0;
319 }
320
321 static int
322 gen7_render_ring_flush(struct intel_engine_cs *ring,
323 u32 invalidate_domains, u32 flush_domains)
324 {
325 u32 flags = 0;
326 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
327 int ret;
328
329 /*
330 * Ensure that any following seqno writes only happen when the render
331 * cache is indeed flushed.
332 *
333 * Workaround: 4th PIPE_CONTROL command (except the ones with only
334 * read-cache invalidate bits set) must have the CS_STALL bit set. We
335 * don't try to be clever and just set it unconditionally.
336 */
337 flags |= PIPE_CONTROL_CS_STALL;
338
339 /* Just flush everything. Experiments have shown that reducing the
340 * number of bits based on the write domains has little performance
341 * impact.
342 */
343 if (flush_domains) {
344 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
345 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
346 }
347 if (invalidate_domains) {
348 flags |= PIPE_CONTROL_TLB_INVALIDATE;
349 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
350 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
351 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
352 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
353 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
354 /*
355 * TLB invalidate requires a post-sync write.
356 */
357 flags |= PIPE_CONTROL_QW_WRITE;
358 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
359
360 /* Workaround: we must issue a pipe_control with CS-stall bit
361 * set before a pipe_control command that has the state cache
362 * invalidate bit set. */
363 gen7_render_ring_cs_stall_wa(ring);
364 }
365
366 ret = intel_ring_begin(ring, 4);
367 if (ret)
368 return ret;
369
370 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
371 intel_ring_emit(ring, flags);
372 intel_ring_emit(ring, scratch_addr);
373 intel_ring_emit(ring, 0);
374 intel_ring_advance(ring);
375
376 if (!invalidate_domains && flush_domains)
377 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
378
379 return 0;
380 }
381
382 static int
383 gen8_render_ring_flush(struct intel_engine_cs *ring,
384 u32 invalidate_domains, u32 flush_domains)
385 {
386 u32 flags = 0;
387 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
388 int ret;
389
390 flags |= PIPE_CONTROL_CS_STALL;
391
392 if (flush_domains) {
393 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
394 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
395 }
396 if (invalidate_domains) {
397 flags |= PIPE_CONTROL_TLB_INVALIDATE;
398 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
399 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
400 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
401 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
402 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
403 flags |= PIPE_CONTROL_QW_WRITE;
404 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
405 }
406
407 ret = intel_ring_begin(ring, 6);
408 if (ret)
409 return ret;
410
411 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
412 intel_ring_emit(ring, flags);
413 intel_ring_emit(ring, scratch_addr);
414 intel_ring_emit(ring, 0);
415 intel_ring_emit(ring, 0);
416 intel_ring_emit(ring, 0);
417 intel_ring_advance(ring);
418
419 return 0;
420
421 }
422
423 static void ring_write_tail(struct intel_engine_cs *ring,
424 u32 value)
425 {
426 struct drm_i915_private *dev_priv = ring->dev->dev_private;
427 I915_WRITE_TAIL(ring, value);
428 }
429
430 u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
431 {
432 struct drm_i915_private *dev_priv = ring->dev->dev_private;
433 u64 acthd;
434
435 if (INTEL_INFO(ring->dev)->gen >= 8)
436 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
437 RING_ACTHD_UDW(ring->mmio_base));
438 else if (INTEL_INFO(ring->dev)->gen >= 4)
439 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
440 else
441 acthd = I915_READ(ACTHD);
442
443 return acthd;
444 }
445
446 static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
447 {
448 struct drm_i915_private *dev_priv = ring->dev->dev_private;
449 u32 addr;
450
451 addr = dev_priv->status_page_dmah->busaddr;
452 if (INTEL_INFO(ring->dev)->gen >= 4)
453 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
454 I915_WRITE(HWS_PGA, addr);
455 }
456
457 static bool stop_ring(struct intel_engine_cs *ring)
458 {
459 struct drm_i915_private *dev_priv = to_i915(ring->dev);
460
461 if (!IS_GEN2(ring->dev)) {
462 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
463 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
464 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
465 return false;
466 }
467 }
468
469 I915_WRITE_CTL(ring, 0);
470 I915_WRITE_HEAD(ring, 0);
471 ring->write_tail(ring, 0);
472
473 if (!IS_GEN2(ring->dev)) {
474 (void)I915_READ_CTL(ring);
475 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
476 }
477
478 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
479 }
480
481 static int init_ring_common(struct intel_engine_cs *ring)
482 {
483 struct drm_device *dev = ring->dev;
484 struct drm_i915_private *dev_priv = dev->dev_private;
485 struct intel_ringbuffer *ringbuf = ring->buffer;
486 struct drm_i915_gem_object *obj = ringbuf->obj;
487 int ret = 0;
488
489 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
490
491 if (!stop_ring(ring)) {
492 /* G45 ring initialization often fails to reset head to zero */
493 DRM_DEBUG_KMS("%s head not reset to zero "
494 "ctl %08x head %08x tail %08x start %08x\n",
495 ring->name,
496 I915_READ_CTL(ring),
497 I915_READ_HEAD(ring),
498 I915_READ_TAIL(ring),
499 I915_READ_START(ring));
500
501 if (!stop_ring(ring)) {
502 DRM_ERROR("failed to set %s head to zero "
503 "ctl %08x head %08x tail %08x start %08x\n",
504 ring->name,
505 I915_READ_CTL(ring),
506 I915_READ_HEAD(ring),
507 I915_READ_TAIL(ring),
508 I915_READ_START(ring));
509 ret = -EIO;
510 goto out;
511 }
512 }
513
514 if (I915_NEED_GFX_HWS(dev))
515 intel_ring_setup_status_page(ring);
516 else
517 ring_setup_phys_status_page(ring);
518
519 /* Initialize the ring. This must happen _after_ we've cleared the ring
520 * registers with the above sequence (the readback of the HEAD registers
521 * also enforces ordering), otherwise the hw might lose the new ring
522 * register values. */
523 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
524 I915_WRITE_CTL(ring,
525 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
526 | RING_VALID);
527
528 /* If the head is still not zero, the ring is dead */
529 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
530 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
531 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
532 DRM_ERROR("%s initialization failed "
533 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
534 ring->name,
535 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
536 I915_READ_HEAD(ring), I915_READ_TAIL(ring),
537 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
538 ret = -EIO;
539 goto out;
540 }
541
542 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
543 i915_kernel_lost_context(ring->dev);
544 else {
545 ringbuf->head = I915_READ_HEAD(ring);
546 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
547 ringbuf->space = ring_space(ringbuf);
548 ringbuf->last_retired_head = -1;
549 }
550
551 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
552
553 out:
554 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
555
556 return ret;
557 }
558
559 static int
560 init_pipe_control(struct intel_engine_cs *ring)
561 {
562 int ret;
563
564 if (ring->scratch.obj)
565 return 0;
566
567 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
568 if (ring->scratch.obj == NULL) {
569 DRM_ERROR("Failed to allocate seqno page\n");
570 ret = -ENOMEM;
571 goto err;
572 }
573
574 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
575 if (ret)
576 goto err_unref;
577
578 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
579 if (ret)
580 goto err_unref;
581
582 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
583 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
584 if (ring->scratch.cpu_page == NULL) {
585 ret = -ENOMEM;
586 goto err_unpin;
587 }
588
589 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
590 ring->name, ring->scratch.gtt_offset);
591 return 0;
592
593 err_unpin:
594 i915_gem_object_ggtt_unpin(ring->scratch.obj);
595 err_unref:
596 drm_gem_object_unreference(&ring->scratch.obj->base);
597 err:
598 return ret;
599 }
600
601 static int init_render_ring(struct intel_engine_cs *ring)
602 {
603 struct drm_device *dev = ring->dev;
604 struct drm_i915_private *dev_priv = dev->dev_private;
605 int ret = init_ring_common(ring);
606 if (ret)
607 return ret;
608
609 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
610 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
611 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
612
613 /* We need to disable the AsyncFlip performance optimisations in order
614 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
615 * programmed to '1' on all products.
616 *
617 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
618 */
619 if (INTEL_INFO(dev)->gen >= 6)
620 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
621
622 /* Required for the hardware to program scanline values for waiting */
623 /* WaEnableFlushTlbInvalidationMode:snb */
624 if (INTEL_INFO(dev)->gen == 6)
625 I915_WRITE(GFX_MODE,
626 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
627
628 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
629 if (IS_GEN7(dev))
630 I915_WRITE(GFX_MODE_GEN7,
631 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
632 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
633
634 if (INTEL_INFO(dev)->gen >= 5) {
635 ret = init_pipe_control(ring);
636 if (ret)
637 return ret;
638 }
639
640 if (IS_GEN6(dev)) {
641 /* From the Sandybridge PRM, volume 1 part 3, page 24:
642 * "If this bit is set, STCunit will have LRA as replacement
643 * policy. [...] This bit must be reset. LRA replacement
644 * policy is not supported."
645 */
646 I915_WRITE(CACHE_MODE_0,
647 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
648 }
649
650 if (INTEL_INFO(dev)->gen >= 6)
651 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
652
653 if (HAS_L3_DPF(dev))
654 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
655
656 return ret;
657 }
658
659 static void render_ring_cleanup(struct intel_engine_cs *ring)
660 {
661 struct drm_device *dev = ring->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
663
664 if (dev_priv->semaphore_obj) {
665 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
666 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
667 dev_priv->semaphore_obj = NULL;
668 }
669
670 if (ring->scratch.obj == NULL)
671 return;
672
673 if (INTEL_INFO(dev)->gen >= 5) {
674 kunmap(sg_page(ring->scratch.obj->pages->sgl));
675 i915_gem_object_ggtt_unpin(ring->scratch.obj);
676 }
677
678 drm_gem_object_unreference(&ring->scratch.obj->base);
679 ring->scratch.obj = NULL;
680 }
681
682 static int gen8_rcs_signal(struct intel_engine_cs *signaller,
683 unsigned int num_dwords)
684 {
685 #define MBOX_UPDATE_DWORDS 8
686 struct drm_device *dev = signaller->dev;
687 struct drm_i915_private *dev_priv = dev->dev_private;
688 struct intel_engine_cs *waiter;
689 int i, ret, num_rings;
690
691 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
692 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
693 #undef MBOX_UPDATE_DWORDS
694
695 ret = intel_ring_begin(signaller, num_dwords);
696 if (ret)
697 return ret;
698
699 for_each_ring(waiter, dev_priv, i) {
700 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
701 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
702 continue;
703
704 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
705 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
706 PIPE_CONTROL_QW_WRITE |
707 PIPE_CONTROL_FLUSH_ENABLE);
708 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
709 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
710 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
711 intel_ring_emit(signaller, 0);
712 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
713 MI_SEMAPHORE_TARGET(waiter->id));
714 intel_ring_emit(signaller, 0);
715 }
716
717 return 0;
718 }
719
720 static int gen8_xcs_signal(struct intel_engine_cs *signaller,
721 unsigned int num_dwords)
722 {
723 #define MBOX_UPDATE_DWORDS 6
724 struct drm_device *dev = signaller->dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726 struct intel_engine_cs *waiter;
727 int i, ret, num_rings;
728
729 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
730 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
731 #undef MBOX_UPDATE_DWORDS
732
733 ret = intel_ring_begin(signaller, num_dwords);
734 if (ret)
735 return ret;
736
737 for_each_ring(waiter, dev_priv, i) {
738 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
739 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
740 continue;
741
742 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
743 MI_FLUSH_DW_OP_STOREDW);
744 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
745 MI_FLUSH_DW_USE_GTT);
746 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
747 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
748 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
749 MI_SEMAPHORE_TARGET(waiter->id));
750 intel_ring_emit(signaller, 0);
751 }
752
753 return 0;
754 }
755
756 static int gen6_signal(struct intel_engine_cs *signaller,
757 unsigned int num_dwords)
758 {
759 struct drm_device *dev = signaller->dev;
760 struct drm_i915_private *dev_priv = dev->dev_private;
761 struct intel_engine_cs *useless;
762 int i, ret, num_rings;
763
764 #define MBOX_UPDATE_DWORDS 3
765 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
766 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
767 #undef MBOX_UPDATE_DWORDS
768
769 ret = intel_ring_begin(signaller, num_dwords);
770 if (ret)
771 return ret;
772
773 for_each_ring(useless, dev_priv, i) {
774 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
775 if (mbox_reg != GEN6_NOSYNC) {
776 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
777 intel_ring_emit(signaller, mbox_reg);
778 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
779 }
780 }
781
782 /* If num_dwords was rounded, make sure the tail pointer is correct */
783 if (num_rings % 2 == 0)
784 intel_ring_emit(signaller, MI_NOOP);
785
786 return 0;
787 }
788
789 /**
790 * gen6_add_request - Update the semaphore mailbox registers
791 *
792 * @ring - ring that is adding a request
793 * @seqno - return seqno stuck into the ring
794 *
795 * Update the mailbox registers in the *other* rings with the current seqno.
796 * This acts like a signal in the canonical semaphore.
797 */
798 static int
799 gen6_add_request(struct intel_engine_cs *ring)
800 {
801 int ret;
802
803 if (ring->semaphore.signal)
804 ret = ring->semaphore.signal(ring, 4);
805 else
806 ret = intel_ring_begin(ring, 4);
807
808 if (ret)
809 return ret;
810
811 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
812 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
813 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
814 intel_ring_emit(ring, MI_USER_INTERRUPT);
815 __intel_ring_advance(ring);
816
817 return 0;
818 }
819
820 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
821 u32 seqno)
822 {
823 struct drm_i915_private *dev_priv = dev->dev_private;
824 return dev_priv->last_seqno < seqno;
825 }
826
827 /**
828 * intel_ring_sync - sync the waiter to the signaller on seqno
829 *
830 * @waiter - ring that is waiting
831 * @signaller - ring which has, or will signal
832 * @seqno - seqno which the waiter will block on
833 */
834
835 static int
836 gen8_ring_sync(struct intel_engine_cs *waiter,
837 struct intel_engine_cs *signaller,
838 u32 seqno)
839 {
840 struct drm_i915_private *dev_priv = waiter->dev->dev_private;
841 int ret;
842
843 ret = intel_ring_begin(waiter, 4);
844 if (ret)
845 return ret;
846
847 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
848 MI_SEMAPHORE_GLOBAL_GTT |
849 MI_SEMAPHORE_POLL |
850 MI_SEMAPHORE_SAD_GTE_SDD);
851 intel_ring_emit(waiter, seqno);
852 intel_ring_emit(waiter,
853 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
854 intel_ring_emit(waiter,
855 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
856 intel_ring_advance(waiter);
857 return 0;
858 }
859
860 static int
861 gen6_ring_sync(struct intel_engine_cs *waiter,
862 struct intel_engine_cs *signaller,
863 u32 seqno)
864 {
865 u32 dw1 = MI_SEMAPHORE_MBOX |
866 MI_SEMAPHORE_COMPARE |
867 MI_SEMAPHORE_REGISTER;
868 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
869 int ret;
870
871 /* Throughout all of the GEM code, seqno passed implies our current
872 * seqno is >= the last seqno executed. However for hardware the
873 * comparison is strictly greater than.
874 */
875 seqno -= 1;
876
877 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
878
879 ret = intel_ring_begin(waiter, 4);
880 if (ret)
881 return ret;
882
883 /* If seqno wrap happened, omit the wait with no-ops */
884 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
885 intel_ring_emit(waiter, dw1 | wait_mbox);
886 intel_ring_emit(waiter, seqno);
887 intel_ring_emit(waiter, 0);
888 intel_ring_emit(waiter, MI_NOOP);
889 } else {
890 intel_ring_emit(waiter, MI_NOOP);
891 intel_ring_emit(waiter, MI_NOOP);
892 intel_ring_emit(waiter, MI_NOOP);
893 intel_ring_emit(waiter, MI_NOOP);
894 }
895 intel_ring_advance(waiter);
896
897 return 0;
898 }
899
900 #define PIPE_CONTROL_FLUSH(ring__, addr__) \
901 do { \
902 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
903 PIPE_CONTROL_DEPTH_STALL); \
904 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
905 intel_ring_emit(ring__, 0); \
906 intel_ring_emit(ring__, 0); \
907 } while (0)
908
909 static int
910 pc_render_add_request(struct intel_engine_cs *ring)
911 {
912 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
913 int ret;
914
915 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
916 * incoherent with writes to memory, i.e. completely fubar,
917 * so we need to use PIPE_NOTIFY instead.
918 *
919 * However, we also need to workaround the qword write
920 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
921 * memory before requesting an interrupt.
922 */
923 ret = intel_ring_begin(ring, 32);
924 if (ret)
925 return ret;
926
927 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
928 PIPE_CONTROL_WRITE_FLUSH |
929 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
930 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
931 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
932 intel_ring_emit(ring, 0);
933 PIPE_CONTROL_FLUSH(ring, scratch_addr);
934 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
935 PIPE_CONTROL_FLUSH(ring, scratch_addr);
936 scratch_addr += 2 * CACHELINE_BYTES;
937 PIPE_CONTROL_FLUSH(ring, scratch_addr);
938 scratch_addr += 2 * CACHELINE_BYTES;
939 PIPE_CONTROL_FLUSH(ring, scratch_addr);
940 scratch_addr += 2 * CACHELINE_BYTES;
941 PIPE_CONTROL_FLUSH(ring, scratch_addr);
942 scratch_addr += 2 * CACHELINE_BYTES;
943 PIPE_CONTROL_FLUSH(ring, scratch_addr);
944
945 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
946 PIPE_CONTROL_WRITE_FLUSH |
947 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
948 PIPE_CONTROL_NOTIFY);
949 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
950 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
951 intel_ring_emit(ring, 0);
952 __intel_ring_advance(ring);
953
954 return 0;
955 }
956
957 static u32
958 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
959 {
960 /* Workaround to force correct ordering between irq and seqno writes on
961 * ivb (and maybe also on snb) by reading from a CS register (like
962 * ACTHD) before reading the status page. */
963 if (!lazy_coherency) {
964 struct drm_i915_private *dev_priv = ring->dev->dev_private;
965 POSTING_READ(RING_ACTHD(ring->mmio_base));
966 }
967
968 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
969 }
970
971 static u32
972 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
973 {
974 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
975 }
976
977 static void
978 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
979 {
980 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
981 }
982
983 static u32
984 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
985 {
986 return ring->scratch.cpu_page[0];
987 }
988
989 static void
990 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
991 {
992 ring->scratch.cpu_page[0] = seqno;
993 }
994
995 static bool
996 gen5_ring_get_irq(struct intel_engine_cs *ring)
997 {
998 struct drm_device *dev = ring->dev;
999 struct drm_i915_private *dev_priv = dev->dev_private;
1000 unsigned long flags;
1001
1002 if (!dev->irq_enabled)
1003 return false;
1004
1005 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1006 if (ring->irq_refcount++ == 0)
1007 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1008 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1009
1010 return true;
1011 }
1012
1013 static void
1014 gen5_ring_put_irq(struct intel_engine_cs *ring)
1015 {
1016 struct drm_device *dev = ring->dev;
1017 struct drm_i915_private *dev_priv = dev->dev_private;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1021 if (--ring->irq_refcount == 0)
1022 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1023 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1024 }
1025
1026 static bool
1027 i9xx_ring_get_irq(struct intel_engine_cs *ring)
1028 {
1029 struct drm_device *dev = ring->dev;
1030 struct drm_i915_private *dev_priv = dev->dev_private;
1031 unsigned long flags;
1032
1033 if (!dev->irq_enabled)
1034 return false;
1035
1036 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1037 if (ring->irq_refcount++ == 0) {
1038 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1039 I915_WRITE(IMR, dev_priv->irq_mask);
1040 POSTING_READ(IMR);
1041 }
1042 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1043
1044 return true;
1045 }
1046
1047 static void
1048 i9xx_ring_put_irq(struct intel_engine_cs *ring)
1049 {
1050 struct drm_device *dev = ring->dev;
1051 struct drm_i915_private *dev_priv = dev->dev_private;
1052 unsigned long flags;
1053
1054 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1055 if (--ring->irq_refcount == 0) {
1056 dev_priv->irq_mask |= ring->irq_enable_mask;
1057 I915_WRITE(IMR, dev_priv->irq_mask);
1058 POSTING_READ(IMR);
1059 }
1060 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1061 }
1062
1063 static bool
1064 i8xx_ring_get_irq(struct intel_engine_cs *ring)
1065 {
1066 struct drm_device *dev = ring->dev;
1067 struct drm_i915_private *dev_priv = dev->dev_private;
1068 unsigned long flags;
1069
1070 if (!dev->irq_enabled)
1071 return false;
1072
1073 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1074 if (ring->irq_refcount++ == 0) {
1075 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1076 I915_WRITE16(IMR, dev_priv->irq_mask);
1077 POSTING_READ16(IMR);
1078 }
1079 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1080
1081 return true;
1082 }
1083
1084 static void
1085 i8xx_ring_put_irq(struct intel_engine_cs *ring)
1086 {
1087 struct drm_device *dev = ring->dev;
1088 struct drm_i915_private *dev_priv = dev->dev_private;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1092 if (--ring->irq_refcount == 0) {
1093 dev_priv->irq_mask |= ring->irq_enable_mask;
1094 I915_WRITE16(IMR, dev_priv->irq_mask);
1095 POSTING_READ16(IMR);
1096 }
1097 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1098 }
1099
1100 void intel_ring_setup_status_page(struct intel_engine_cs *ring)
1101 {
1102 struct drm_device *dev = ring->dev;
1103 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1104 u32 mmio = 0;
1105
1106 /* The ring status page addresses are no longer next to the rest of
1107 * the ring registers as of gen7.
1108 */
1109 if (IS_GEN7(dev)) {
1110 switch (ring->id) {
1111 case RCS:
1112 mmio = RENDER_HWS_PGA_GEN7;
1113 break;
1114 case BCS:
1115 mmio = BLT_HWS_PGA_GEN7;
1116 break;
1117 /*
1118 * VCS2 actually doesn't exist on Gen7. Only shut up
1119 * gcc switch check warning
1120 */
1121 case VCS2:
1122 case VCS:
1123 mmio = BSD_HWS_PGA_GEN7;
1124 break;
1125 case VECS:
1126 mmio = VEBOX_HWS_PGA_GEN7;
1127 break;
1128 }
1129 } else if (IS_GEN6(ring->dev)) {
1130 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
1131 } else {
1132 /* XXX: gen8 returns to sanity */
1133 mmio = RING_HWS_PGA(ring->mmio_base);
1134 }
1135
1136 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1137 POSTING_READ(mmio);
1138
1139 /*
1140 * Flush the TLB for this page
1141 *
1142 * FIXME: These two bits have disappeared on gen8, so a question
1143 * arises: do we still need this and if so how should we go about
1144 * invalidating the TLB?
1145 */
1146 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
1147 u32 reg = RING_INSTPM(ring->mmio_base);
1148
1149 /* ring should be idle before issuing a sync flush*/
1150 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1151
1152 I915_WRITE(reg,
1153 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1154 INSTPM_SYNC_FLUSH));
1155 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1156 1000))
1157 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1158 ring->name);
1159 }
1160 }
1161
1162 static int
1163 bsd_ring_flush(struct intel_engine_cs *ring,
1164 u32 invalidate_domains,
1165 u32 flush_domains)
1166 {
1167 int ret;
1168
1169 ret = intel_ring_begin(ring, 2);
1170 if (ret)
1171 return ret;
1172
1173 intel_ring_emit(ring, MI_FLUSH);
1174 intel_ring_emit(ring, MI_NOOP);
1175 intel_ring_advance(ring);
1176 return 0;
1177 }
1178
1179 static int
1180 i9xx_add_request(struct intel_engine_cs *ring)
1181 {
1182 int ret;
1183
1184 ret = intel_ring_begin(ring, 4);
1185 if (ret)
1186 return ret;
1187
1188 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1189 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1190 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1191 intel_ring_emit(ring, MI_USER_INTERRUPT);
1192 __intel_ring_advance(ring);
1193
1194 return 0;
1195 }
1196
1197 static bool
1198 gen6_ring_get_irq(struct intel_engine_cs *ring)
1199 {
1200 struct drm_device *dev = ring->dev;
1201 struct drm_i915_private *dev_priv = dev->dev_private;
1202 unsigned long flags;
1203
1204 if (!dev->irq_enabled)
1205 return false;
1206
1207 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1208 if (ring->irq_refcount++ == 0) {
1209 if (HAS_L3_DPF(dev) && ring->id == RCS)
1210 I915_WRITE_IMR(ring,
1211 ~(ring->irq_enable_mask |
1212 GT_PARITY_ERROR(dev)));
1213 else
1214 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1215 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1216 }
1217 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1218
1219 return true;
1220 }
1221
1222 static void
1223 gen6_ring_put_irq(struct intel_engine_cs *ring)
1224 {
1225 struct drm_device *dev = ring->dev;
1226 struct drm_i915_private *dev_priv = dev->dev_private;
1227 unsigned long flags;
1228
1229 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1230 if (--ring->irq_refcount == 0) {
1231 if (HAS_L3_DPF(dev) && ring->id == RCS)
1232 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1233 else
1234 I915_WRITE_IMR(ring, ~0);
1235 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1236 }
1237 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1238 }
1239
1240 static bool
1241 hsw_vebox_get_irq(struct intel_engine_cs *ring)
1242 {
1243 struct drm_device *dev = ring->dev;
1244 struct drm_i915_private *dev_priv = dev->dev_private;
1245 unsigned long flags;
1246
1247 if (!dev->irq_enabled)
1248 return false;
1249
1250 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1251 if (ring->irq_refcount++ == 0) {
1252 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1253 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1254 }
1255 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1256
1257 return true;
1258 }
1259
1260 static void
1261 hsw_vebox_put_irq(struct intel_engine_cs *ring)
1262 {
1263 struct drm_device *dev = ring->dev;
1264 struct drm_i915_private *dev_priv = dev->dev_private;
1265 unsigned long flags;
1266
1267 if (!dev->irq_enabled)
1268 return;
1269
1270 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1271 if (--ring->irq_refcount == 0) {
1272 I915_WRITE_IMR(ring, ~0);
1273 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1274 }
1275 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1276 }
1277
1278 static bool
1279 gen8_ring_get_irq(struct intel_engine_cs *ring)
1280 {
1281 struct drm_device *dev = ring->dev;
1282 struct drm_i915_private *dev_priv = dev->dev_private;
1283 unsigned long flags;
1284
1285 if (!dev->irq_enabled)
1286 return false;
1287
1288 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1289 if (ring->irq_refcount++ == 0) {
1290 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1291 I915_WRITE_IMR(ring,
1292 ~(ring->irq_enable_mask |
1293 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1294 } else {
1295 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1296 }
1297 POSTING_READ(RING_IMR(ring->mmio_base));
1298 }
1299 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1300
1301 return true;
1302 }
1303
1304 static void
1305 gen8_ring_put_irq(struct intel_engine_cs *ring)
1306 {
1307 struct drm_device *dev = ring->dev;
1308 struct drm_i915_private *dev_priv = dev->dev_private;
1309 unsigned long flags;
1310
1311 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1312 if (--ring->irq_refcount == 0) {
1313 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1314 I915_WRITE_IMR(ring,
1315 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1316 } else {
1317 I915_WRITE_IMR(ring, ~0);
1318 }
1319 POSTING_READ(RING_IMR(ring->mmio_base));
1320 }
1321 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1322 }
1323
1324 static int
1325 i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1326 u64 offset, u32 length,
1327 unsigned flags)
1328 {
1329 int ret;
1330
1331 ret = intel_ring_begin(ring, 2);
1332 if (ret)
1333 return ret;
1334
1335 intel_ring_emit(ring,
1336 MI_BATCH_BUFFER_START |
1337 MI_BATCH_GTT |
1338 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1339 intel_ring_emit(ring, offset);
1340 intel_ring_advance(ring);
1341
1342 return 0;
1343 }
1344
1345 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1346 #define I830_BATCH_LIMIT (256*1024)
1347 static int
1348 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1349 u64 offset, u32 len,
1350 unsigned flags)
1351 {
1352 int ret;
1353
1354 if (flags & I915_DISPATCH_PINNED) {
1355 ret = intel_ring_begin(ring, 4);
1356 if (ret)
1357 return ret;
1358
1359 intel_ring_emit(ring, MI_BATCH_BUFFER);
1360 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1361 intel_ring_emit(ring, offset + len - 8);
1362 intel_ring_emit(ring, MI_NOOP);
1363 intel_ring_advance(ring);
1364 } else {
1365 u32 cs_offset = ring->scratch.gtt_offset;
1366
1367 if (len > I830_BATCH_LIMIT)
1368 return -ENOSPC;
1369
1370 ret = intel_ring_begin(ring, 9+3);
1371 if (ret)
1372 return ret;
1373 /* Blit the batch (which has now all relocs applied) to the stable batch
1374 * scratch bo area (so that the CS never stumbles over its tlb
1375 * invalidation bug) ... */
1376 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1377 XY_SRC_COPY_BLT_WRITE_ALPHA |
1378 XY_SRC_COPY_BLT_WRITE_RGB);
1379 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1380 intel_ring_emit(ring, 0);
1381 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1382 intel_ring_emit(ring, cs_offset);
1383 intel_ring_emit(ring, 0);
1384 intel_ring_emit(ring, 4096);
1385 intel_ring_emit(ring, offset);
1386 intel_ring_emit(ring, MI_FLUSH);
1387
1388 /* ... and execute it. */
1389 intel_ring_emit(ring, MI_BATCH_BUFFER);
1390 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1391 intel_ring_emit(ring, cs_offset + len - 8);
1392 intel_ring_advance(ring);
1393 }
1394
1395 return 0;
1396 }
1397
1398 static int
1399 i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1400 u64 offset, u32 len,
1401 unsigned flags)
1402 {
1403 int ret;
1404
1405 ret = intel_ring_begin(ring, 2);
1406 if (ret)
1407 return ret;
1408
1409 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1410 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1411 intel_ring_advance(ring);
1412
1413 return 0;
1414 }
1415
1416 static void cleanup_status_page(struct intel_engine_cs *ring)
1417 {
1418 struct drm_i915_gem_object *obj;
1419
1420 obj = ring->status_page.obj;
1421 if (obj == NULL)
1422 return;
1423
1424 kunmap(sg_page(obj->pages->sgl));
1425 i915_gem_object_ggtt_unpin(obj);
1426 drm_gem_object_unreference(&obj->base);
1427 ring->status_page.obj = NULL;
1428 }
1429
1430 static int init_status_page(struct intel_engine_cs *ring)
1431 {
1432 struct drm_i915_gem_object *obj;
1433
1434 if ((obj = ring->status_page.obj) == NULL) {
1435 unsigned flags;
1436 int ret;
1437
1438 obj = i915_gem_alloc_object(ring->dev, 4096);
1439 if (obj == NULL) {
1440 DRM_ERROR("Failed to allocate status page\n");
1441 return -ENOMEM;
1442 }
1443
1444 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1445 if (ret)
1446 goto err_unref;
1447
1448 flags = 0;
1449 if (!HAS_LLC(ring->dev))
1450 /* On g33, we cannot place HWS above 256MiB, so
1451 * restrict its pinning to the low mappable arena.
1452 * Though this restriction is not documented for
1453 * gen4, gen5, or byt, they also behave similarly
1454 * and hang if the HWS is placed at the top of the
1455 * GTT. To generalise, it appears that all !llc
1456 * platforms have issues with us placing the HWS
1457 * above the mappable region (even though we never
1458 * actualy map it).
1459 */
1460 flags |= PIN_MAPPABLE;
1461 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1462 if (ret) {
1463 err_unref:
1464 drm_gem_object_unreference(&obj->base);
1465 return ret;
1466 }
1467
1468 ring->status_page.obj = obj;
1469 }
1470
1471 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1472 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1473 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1474
1475 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1476 ring->name, ring->status_page.gfx_addr);
1477
1478 return 0;
1479 }
1480
1481 static int init_phys_status_page(struct intel_engine_cs *ring)
1482 {
1483 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1484
1485 if (!dev_priv->status_page_dmah) {
1486 dev_priv->status_page_dmah =
1487 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1488 if (!dev_priv->status_page_dmah)
1489 return -ENOMEM;
1490 }
1491
1492 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1493 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1494
1495 return 0;
1496 }
1497
1498 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1499 {
1500 if (!ringbuf->obj)
1501 return;
1502
1503 iounmap(ringbuf->virtual_start);
1504 i915_gem_object_ggtt_unpin(ringbuf->obj);
1505 drm_gem_object_unreference(&ringbuf->obj->base);
1506 ringbuf->obj = NULL;
1507 }
1508
1509 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1510 struct intel_ringbuffer *ringbuf)
1511 {
1512 struct drm_i915_private *dev_priv = to_i915(dev);
1513 struct drm_i915_gem_object *obj;
1514 int ret;
1515
1516 if (ringbuf->obj)
1517 return 0;
1518
1519 obj = NULL;
1520 if (!HAS_LLC(dev))
1521 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1522 if (obj == NULL)
1523 obj = i915_gem_alloc_object(dev, ringbuf->size);
1524 if (obj == NULL)
1525 return -ENOMEM;
1526
1527 /* mark ring buffers as read-only from GPU side by default */
1528 obj->gt_ro = 1;
1529
1530 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1531 if (ret)
1532 goto err_unref;
1533
1534 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1535 if (ret)
1536 goto err_unpin;
1537
1538 ringbuf->virtual_start =
1539 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1540 ringbuf->size);
1541 if (ringbuf->virtual_start == NULL) {
1542 ret = -EINVAL;
1543 goto err_unpin;
1544 }
1545
1546 ringbuf->obj = obj;
1547 return 0;
1548
1549 err_unpin:
1550 i915_gem_object_ggtt_unpin(obj);
1551 err_unref:
1552 drm_gem_object_unreference(&obj->base);
1553 return ret;
1554 }
1555
1556 static int intel_init_ring_buffer(struct drm_device *dev,
1557 struct intel_engine_cs *ring)
1558 {
1559 struct intel_ringbuffer *ringbuf = ring->buffer;
1560 int ret;
1561
1562 if (ringbuf == NULL) {
1563 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1564 if (!ringbuf)
1565 return -ENOMEM;
1566 ring->buffer = ringbuf;
1567 }
1568
1569 ring->dev = dev;
1570 INIT_LIST_HEAD(&ring->active_list);
1571 INIT_LIST_HEAD(&ring->request_list);
1572 ringbuf->size = 32 * PAGE_SIZE;
1573 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1574
1575 init_waitqueue_head(&ring->irq_queue);
1576
1577 if (I915_NEED_GFX_HWS(dev)) {
1578 ret = init_status_page(ring);
1579 if (ret)
1580 goto error;
1581 } else {
1582 BUG_ON(ring->id != RCS);
1583 ret = init_phys_status_page(ring);
1584 if (ret)
1585 goto error;
1586 }
1587
1588 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1589 if (ret) {
1590 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1591 goto error;
1592 }
1593
1594 /* Workaround an erratum on the i830 which causes a hang if
1595 * the TAIL pointer points to within the last 2 cachelines
1596 * of the buffer.
1597 */
1598 ringbuf->effective_size = ringbuf->size;
1599 if (IS_I830(dev) || IS_845G(dev))
1600 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1601
1602 ret = i915_cmd_parser_init_ring(ring);
1603 if (ret)
1604 goto error;
1605
1606 ret = ring->init(ring);
1607 if (ret)
1608 goto error;
1609
1610 return 0;
1611
1612 error:
1613 kfree(ringbuf);
1614 ring->buffer = NULL;
1615 return ret;
1616 }
1617
1618 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1619 {
1620 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1621 struct intel_ringbuffer *ringbuf = ring->buffer;
1622
1623 if (!intel_ring_initialized(ring))
1624 return;
1625
1626 intel_stop_ring_buffer(ring);
1627 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1628
1629 intel_destroy_ringbuffer_obj(ringbuf);
1630 ring->preallocated_lazy_request = NULL;
1631 ring->outstanding_lazy_seqno = 0;
1632
1633 if (ring->cleanup)
1634 ring->cleanup(ring);
1635
1636 cleanup_status_page(ring);
1637
1638 i915_cmd_parser_fini_ring(ring);
1639
1640 kfree(ringbuf);
1641 ring->buffer = NULL;
1642 }
1643
1644 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1645 {
1646 struct intel_ringbuffer *ringbuf = ring->buffer;
1647 struct drm_i915_gem_request *request;
1648 u32 seqno = 0;
1649 int ret;
1650
1651 if (ringbuf->last_retired_head != -1) {
1652 ringbuf->head = ringbuf->last_retired_head;
1653 ringbuf->last_retired_head = -1;
1654
1655 ringbuf->space = ring_space(ringbuf);
1656 if (ringbuf->space >= n)
1657 return 0;
1658 }
1659
1660 list_for_each_entry(request, &ring->request_list, list) {
1661 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
1662 seqno = request->seqno;
1663 break;
1664 }
1665 }
1666
1667 if (seqno == 0)
1668 return -ENOSPC;
1669
1670 ret = i915_wait_seqno(ring, seqno);
1671 if (ret)
1672 return ret;
1673
1674 i915_gem_retire_requests_ring(ring);
1675 ringbuf->head = ringbuf->last_retired_head;
1676 ringbuf->last_retired_head = -1;
1677
1678 ringbuf->space = ring_space(ringbuf);
1679 return 0;
1680 }
1681
1682 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1683 {
1684 struct drm_device *dev = ring->dev;
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686 struct intel_ringbuffer *ringbuf = ring->buffer;
1687 unsigned long end;
1688 int ret;
1689
1690 ret = intel_ring_wait_request(ring, n);
1691 if (ret != -ENOSPC)
1692 return ret;
1693
1694 /* force the tail write in case we have been skipping them */
1695 __intel_ring_advance(ring);
1696
1697 /* With GEM the hangcheck timer should kick us out of the loop,
1698 * leaving it early runs the risk of corrupting GEM state (due
1699 * to running on almost untested codepaths). But on resume
1700 * timers don't work yet, so prevent a complete hang in that
1701 * case by choosing an insanely large timeout. */
1702 end = jiffies + 60 * HZ;
1703
1704 trace_i915_ring_wait_begin(ring);
1705 do {
1706 ringbuf->head = I915_READ_HEAD(ring);
1707 ringbuf->space = ring_space(ringbuf);
1708 if (ringbuf->space >= n) {
1709 ret = 0;
1710 break;
1711 }
1712
1713 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1714 dev->primary->master) {
1715 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1716 if (master_priv->sarea_priv)
1717 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1718 }
1719
1720 msleep(1);
1721
1722 if (dev_priv->mm.interruptible && signal_pending(current)) {
1723 ret = -ERESTARTSYS;
1724 break;
1725 }
1726
1727 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1728 dev_priv->mm.interruptible);
1729 if (ret)
1730 break;
1731
1732 if (time_after(jiffies, end)) {
1733 ret = -EBUSY;
1734 break;
1735 }
1736 } while (1);
1737 trace_i915_ring_wait_end(ring);
1738 return ret;
1739 }
1740
1741 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1742 {
1743 uint32_t __iomem *virt;
1744 struct intel_ringbuffer *ringbuf = ring->buffer;
1745 int rem = ringbuf->size - ringbuf->tail;
1746
1747 if (ringbuf->space < rem) {
1748 int ret = ring_wait_for_space(ring, rem);
1749 if (ret)
1750 return ret;
1751 }
1752
1753 virt = ringbuf->virtual_start + ringbuf->tail;
1754 rem /= 4;
1755 while (rem--)
1756 iowrite32(MI_NOOP, virt++);
1757
1758 ringbuf->tail = 0;
1759 ringbuf->space = ring_space(ringbuf);
1760
1761 return 0;
1762 }
1763
1764 int intel_ring_idle(struct intel_engine_cs *ring)
1765 {
1766 u32 seqno;
1767 int ret;
1768
1769 /* We need to add any requests required to flush the objects and ring */
1770 if (ring->outstanding_lazy_seqno) {
1771 ret = i915_add_request(ring, NULL);
1772 if (ret)
1773 return ret;
1774 }
1775
1776 /* Wait upon the last request to be completed */
1777 if (list_empty(&ring->request_list))
1778 return 0;
1779
1780 seqno = list_entry(ring->request_list.prev,
1781 struct drm_i915_gem_request,
1782 list)->seqno;
1783
1784 return i915_wait_seqno(ring, seqno);
1785 }
1786
1787 static int
1788 intel_ring_alloc_seqno(struct intel_engine_cs *ring)
1789 {
1790 if (ring->outstanding_lazy_seqno)
1791 return 0;
1792
1793 if (ring->preallocated_lazy_request == NULL) {
1794 struct drm_i915_gem_request *request;
1795
1796 request = kmalloc(sizeof(*request), GFP_KERNEL);
1797 if (request == NULL)
1798 return -ENOMEM;
1799
1800 ring->preallocated_lazy_request = request;
1801 }
1802
1803 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1804 }
1805
1806 static int __intel_ring_prepare(struct intel_engine_cs *ring,
1807 int bytes)
1808 {
1809 struct intel_ringbuffer *ringbuf = ring->buffer;
1810 int ret;
1811
1812 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
1813 ret = intel_wrap_ring_buffer(ring);
1814 if (unlikely(ret))
1815 return ret;
1816 }
1817
1818 if (unlikely(ringbuf->space < bytes)) {
1819 ret = ring_wait_for_space(ring, bytes);
1820 if (unlikely(ret))
1821 return ret;
1822 }
1823
1824 return 0;
1825 }
1826
1827 int intel_ring_begin(struct intel_engine_cs *ring,
1828 int num_dwords)
1829 {
1830 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1831 int ret;
1832
1833 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1834 dev_priv->mm.interruptible);
1835 if (ret)
1836 return ret;
1837
1838 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1839 if (ret)
1840 return ret;
1841
1842 /* Preallocate the olr before touching the ring */
1843 ret = intel_ring_alloc_seqno(ring);
1844 if (ret)
1845 return ret;
1846
1847 ring->buffer->space -= num_dwords * sizeof(uint32_t);
1848 return 0;
1849 }
1850
1851 /* Align the ring tail to a cacheline boundary */
1852 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
1853 {
1854 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1855 int ret;
1856
1857 if (num_dwords == 0)
1858 return 0;
1859
1860 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1861 ret = intel_ring_begin(ring, num_dwords);
1862 if (ret)
1863 return ret;
1864
1865 while (num_dwords--)
1866 intel_ring_emit(ring, MI_NOOP);
1867
1868 intel_ring_advance(ring);
1869
1870 return 0;
1871 }
1872
1873 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
1874 {
1875 struct drm_device *dev = ring->dev;
1876 struct drm_i915_private *dev_priv = dev->dev_private;
1877
1878 BUG_ON(ring->outstanding_lazy_seqno);
1879
1880 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
1881 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1882 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1883 if (HAS_VEBOX(dev))
1884 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1885 }
1886
1887 ring->set_seqno(ring, seqno);
1888 ring->hangcheck.seqno = seqno;
1889 }
1890
1891 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
1892 u32 value)
1893 {
1894 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1895
1896 /* Every tail move must follow the sequence below */
1897
1898 /* Disable notification that the ring is IDLE. The GT
1899 * will then assume that it is busy and bring it out of rc6.
1900 */
1901 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1902 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1903
1904 /* Clear the context id. Here be magic! */
1905 I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1906
1907 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1908 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1909 GEN6_BSD_SLEEP_INDICATOR) == 0,
1910 50))
1911 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1912
1913 /* Now that the ring is fully powered up, update the tail */
1914 I915_WRITE_TAIL(ring, value);
1915 POSTING_READ(RING_TAIL(ring->mmio_base));
1916
1917 /* Let the ring send IDLE messages to the GT again,
1918 * and so let it sleep to conserve power when idle.
1919 */
1920 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1921 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1922 }
1923
1924 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
1925 u32 invalidate, u32 flush)
1926 {
1927 uint32_t cmd;
1928 int ret;
1929
1930 ret = intel_ring_begin(ring, 4);
1931 if (ret)
1932 return ret;
1933
1934 cmd = MI_FLUSH_DW;
1935 if (INTEL_INFO(ring->dev)->gen >= 8)
1936 cmd += 1;
1937 /*
1938 * Bspec vol 1c.5 - video engine command streamer:
1939 * "If ENABLED, all TLBs will be invalidated once the flush
1940 * operation is complete. This bit is only valid when the
1941 * Post-Sync Operation field is a value of 1h or 3h."
1942 */
1943 if (invalidate & I915_GEM_GPU_DOMAINS)
1944 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1945 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1946 intel_ring_emit(ring, cmd);
1947 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1948 if (INTEL_INFO(ring->dev)->gen >= 8) {
1949 intel_ring_emit(ring, 0); /* upper addr */
1950 intel_ring_emit(ring, 0); /* value */
1951 } else {
1952 intel_ring_emit(ring, 0);
1953 intel_ring_emit(ring, MI_NOOP);
1954 }
1955 intel_ring_advance(ring);
1956 return 0;
1957 }
1958
1959 static int
1960 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1961 u64 offset, u32 len,
1962 unsigned flags)
1963 {
1964 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1965 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1966 !(flags & I915_DISPATCH_SECURE);
1967 int ret;
1968
1969 ret = intel_ring_begin(ring, 4);
1970 if (ret)
1971 return ret;
1972
1973 /* FIXME(BDW): Address space and security selectors. */
1974 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1975 intel_ring_emit(ring, lower_32_bits(offset));
1976 intel_ring_emit(ring, upper_32_bits(offset));
1977 intel_ring_emit(ring, MI_NOOP);
1978 intel_ring_advance(ring);
1979
1980 return 0;
1981 }
1982
1983 static int
1984 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1985 u64 offset, u32 len,
1986 unsigned flags)
1987 {
1988 int ret;
1989
1990 ret = intel_ring_begin(ring, 2);
1991 if (ret)
1992 return ret;
1993
1994 intel_ring_emit(ring,
1995 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1996 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1997 /* bit0-7 is the length on GEN6+ */
1998 intel_ring_emit(ring, offset);
1999 intel_ring_advance(ring);
2000
2001 return 0;
2002 }
2003
2004 static int
2005 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2006 u64 offset, u32 len,
2007 unsigned flags)
2008 {
2009 int ret;
2010
2011 ret = intel_ring_begin(ring, 2);
2012 if (ret)
2013 return ret;
2014
2015 intel_ring_emit(ring,
2016 MI_BATCH_BUFFER_START |
2017 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
2018 /* bit0-7 is the length on GEN6+ */
2019 intel_ring_emit(ring, offset);
2020 intel_ring_advance(ring);
2021
2022 return 0;
2023 }
2024
2025 /* Blitter support (SandyBridge+) */
2026
2027 static int gen6_ring_flush(struct intel_engine_cs *ring,
2028 u32 invalidate, u32 flush)
2029 {
2030 struct drm_device *dev = ring->dev;
2031 uint32_t cmd;
2032 int ret;
2033
2034 ret = intel_ring_begin(ring, 4);
2035 if (ret)
2036 return ret;
2037
2038 cmd = MI_FLUSH_DW;
2039 if (INTEL_INFO(ring->dev)->gen >= 8)
2040 cmd += 1;
2041 /*
2042 * Bspec vol 1c.3 - blitter engine command streamer:
2043 * "If ENABLED, all TLBs will be invalidated once the flush
2044 * operation is complete. This bit is only valid when the
2045 * Post-Sync Operation field is a value of 1h or 3h."
2046 */
2047 if (invalidate & I915_GEM_DOMAIN_RENDER)
2048 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
2049 MI_FLUSH_DW_OP_STOREDW;
2050 intel_ring_emit(ring, cmd);
2051 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2052 if (INTEL_INFO(ring->dev)->gen >= 8) {
2053 intel_ring_emit(ring, 0); /* upper addr */
2054 intel_ring_emit(ring, 0); /* value */
2055 } else {
2056 intel_ring_emit(ring, 0);
2057 intel_ring_emit(ring, MI_NOOP);
2058 }
2059 intel_ring_advance(ring);
2060
2061 if (IS_GEN7(dev) && !invalidate && flush)
2062 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2063
2064 return 0;
2065 }
2066
2067 int intel_init_render_ring_buffer(struct drm_device *dev)
2068 {
2069 struct drm_i915_private *dev_priv = dev->dev_private;
2070 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2071 struct drm_i915_gem_object *obj;
2072 int ret;
2073
2074 ring->name = "render ring";
2075 ring->id = RCS;
2076 ring->mmio_base = RENDER_RING_BASE;
2077
2078 if (INTEL_INFO(dev)->gen >= 8) {
2079 if (i915_semaphore_is_enabled(dev)) {
2080 obj = i915_gem_alloc_object(dev, 4096);
2081 if (obj == NULL) {
2082 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2083 i915.semaphores = 0;
2084 } else {
2085 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2086 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2087 if (ret != 0) {
2088 drm_gem_object_unreference(&obj->base);
2089 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2090 i915.semaphores = 0;
2091 } else
2092 dev_priv->semaphore_obj = obj;
2093 }
2094 }
2095 ring->add_request = gen6_add_request;
2096 ring->flush = gen8_render_ring_flush;
2097 ring->irq_get = gen8_ring_get_irq;
2098 ring->irq_put = gen8_ring_put_irq;
2099 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2100 ring->get_seqno = gen6_ring_get_seqno;
2101 ring->set_seqno = ring_set_seqno;
2102 if (i915_semaphore_is_enabled(dev)) {
2103 WARN_ON(!dev_priv->semaphore_obj);
2104 ring->semaphore.sync_to = gen8_ring_sync;
2105 ring->semaphore.signal = gen8_rcs_signal;
2106 GEN8_RING_SEMAPHORE_INIT;
2107 }
2108 } else if (INTEL_INFO(dev)->gen >= 6) {
2109 ring->add_request = gen6_add_request;
2110 ring->flush = gen7_render_ring_flush;
2111 if (INTEL_INFO(dev)->gen == 6)
2112 ring->flush = gen6_render_ring_flush;
2113 ring->irq_get = gen6_ring_get_irq;
2114 ring->irq_put = gen6_ring_put_irq;
2115 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2116 ring->get_seqno = gen6_ring_get_seqno;
2117 ring->set_seqno = ring_set_seqno;
2118 if (i915_semaphore_is_enabled(dev)) {
2119 ring->semaphore.sync_to = gen6_ring_sync;
2120 ring->semaphore.signal = gen6_signal;
2121 /*
2122 * The current semaphore is only applied on pre-gen8
2123 * platform. And there is no VCS2 ring on the pre-gen8
2124 * platform. So the semaphore between RCS and VCS2 is
2125 * initialized as INVALID. Gen8 will initialize the
2126 * sema between VCS2 and RCS later.
2127 */
2128 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2129 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2130 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2131 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2132 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2133 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2134 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2135 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2136 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2137 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2138 }
2139 } else if (IS_GEN5(dev)) {
2140 ring->add_request = pc_render_add_request;
2141 ring->flush = gen4_render_ring_flush;
2142 ring->get_seqno = pc_render_get_seqno;
2143 ring->set_seqno = pc_render_set_seqno;
2144 ring->irq_get = gen5_ring_get_irq;
2145 ring->irq_put = gen5_ring_put_irq;
2146 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2147 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2148 } else {
2149 ring->add_request = i9xx_add_request;
2150 if (INTEL_INFO(dev)->gen < 4)
2151 ring->flush = gen2_render_ring_flush;
2152 else
2153 ring->flush = gen4_render_ring_flush;
2154 ring->get_seqno = ring_get_seqno;
2155 ring->set_seqno = ring_set_seqno;
2156 if (IS_GEN2(dev)) {
2157 ring->irq_get = i8xx_ring_get_irq;
2158 ring->irq_put = i8xx_ring_put_irq;
2159 } else {
2160 ring->irq_get = i9xx_ring_get_irq;
2161 ring->irq_put = i9xx_ring_put_irq;
2162 }
2163 ring->irq_enable_mask = I915_USER_INTERRUPT;
2164 }
2165 ring->write_tail = ring_write_tail;
2166
2167 if (IS_HASWELL(dev))
2168 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2169 else if (IS_GEN8(dev))
2170 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2171 else if (INTEL_INFO(dev)->gen >= 6)
2172 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2173 else if (INTEL_INFO(dev)->gen >= 4)
2174 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2175 else if (IS_I830(dev) || IS_845G(dev))
2176 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2177 else
2178 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2179 ring->init = init_render_ring;
2180 ring->cleanup = render_ring_cleanup;
2181
2182 /* Workaround batchbuffer to combat CS tlb bug. */
2183 if (HAS_BROKEN_CS_TLB(dev)) {
2184 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
2185 if (obj == NULL) {
2186 DRM_ERROR("Failed to allocate batch bo\n");
2187 return -ENOMEM;
2188 }
2189
2190 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2191 if (ret != 0) {
2192 drm_gem_object_unreference(&obj->base);
2193 DRM_ERROR("Failed to ping batch bo\n");
2194 return ret;
2195 }
2196
2197 ring->scratch.obj = obj;
2198 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2199 }
2200
2201 return intel_init_ring_buffer(dev, ring);
2202 }
2203
2204 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2205 {
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2208 struct intel_ringbuffer *ringbuf = ring->buffer;
2209 int ret;
2210
2211 if (ringbuf == NULL) {
2212 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2213 if (!ringbuf)
2214 return -ENOMEM;
2215 ring->buffer = ringbuf;
2216 }
2217
2218 ring->name = "render ring";
2219 ring->id = RCS;
2220 ring->mmio_base = RENDER_RING_BASE;
2221
2222 if (INTEL_INFO(dev)->gen >= 6) {
2223 /* non-kms not supported on gen6+ */
2224 ret = -ENODEV;
2225 goto err_ringbuf;
2226 }
2227
2228 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2229 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2230 * the special gen5 functions. */
2231 ring->add_request = i9xx_add_request;
2232 if (INTEL_INFO(dev)->gen < 4)
2233 ring->flush = gen2_render_ring_flush;
2234 else
2235 ring->flush = gen4_render_ring_flush;
2236 ring->get_seqno = ring_get_seqno;
2237 ring->set_seqno = ring_set_seqno;
2238 if (IS_GEN2(dev)) {
2239 ring->irq_get = i8xx_ring_get_irq;
2240 ring->irq_put = i8xx_ring_put_irq;
2241 } else {
2242 ring->irq_get = i9xx_ring_get_irq;
2243 ring->irq_put = i9xx_ring_put_irq;
2244 }
2245 ring->irq_enable_mask = I915_USER_INTERRUPT;
2246 ring->write_tail = ring_write_tail;
2247 if (INTEL_INFO(dev)->gen >= 4)
2248 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2249 else if (IS_I830(dev) || IS_845G(dev))
2250 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2251 else
2252 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2253 ring->init = init_render_ring;
2254 ring->cleanup = render_ring_cleanup;
2255
2256 ring->dev = dev;
2257 INIT_LIST_HEAD(&ring->active_list);
2258 INIT_LIST_HEAD(&ring->request_list);
2259
2260 ringbuf->size = size;
2261 ringbuf->effective_size = ringbuf->size;
2262 if (IS_I830(ring->dev) || IS_845G(ring->dev))
2263 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2264
2265 ringbuf->virtual_start = ioremap_wc(start, size);
2266 if (ringbuf->virtual_start == NULL) {
2267 DRM_ERROR("can not ioremap virtual address for"
2268 " ring buffer\n");
2269 ret = -ENOMEM;
2270 goto err_ringbuf;
2271 }
2272
2273 if (!I915_NEED_GFX_HWS(dev)) {
2274 ret = init_phys_status_page(ring);
2275 if (ret)
2276 goto err_vstart;
2277 }
2278
2279 return 0;
2280
2281 err_vstart:
2282 iounmap(ringbuf->virtual_start);
2283 err_ringbuf:
2284 kfree(ringbuf);
2285 ring->buffer = NULL;
2286 return ret;
2287 }
2288
2289 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2290 {
2291 struct drm_i915_private *dev_priv = dev->dev_private;
2292 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2293
2294 ring->name = "bsd ring";
2295 ring->id = VCS;
2296
2297 ring->write_tail = ring_write_tail;
2298 if (INTEL_INFO(dev)->gen >= 6) {
2299 ring->mmio_base = GEN6_BSD_RING_BASE;
2300 /* gen6 bsd needs a special wa for tail updates */
2301 if (IS_GEN6(dev))
2302 ring->write_tail = gen6_bsd_ring_write_tail;
2303 ring->flush = gen6_bsd_ring_flush;
2304 ring->add_request = gen6_add_request;
2305 ring->get_seqno = gen6_ring_get_seqno;
2306 ring->set_seqno = ring_set_seqno;
2307 if (INTEL_INFO(dev)->gen >= 8) {
2308 ring->irq_enable_mask =
2309 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2310 ring->irq_get = gen8_ring_get_irq;
2311 ring->irq_put = gen8_ring_put_irq;
2312 ring->dispatch_execbuffer =
2313 gen8_ring_dispatch_execbuffer;
2314 if (i915_semaphore_is_enabled(dev)) {
2315 ring->semaphore.sync_to = gen8_ring_sync;
2316 ring->semaphore.signal = gen8_xcs_signal;
2317 GEN8_RING_SEMAPHORE_INIT;
2318 }
2319 } else {
2320 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2321 ring->irq_get = gen6_ring_get_irq;
2322 ring->irq_put = gen6_ring_put_irq;
2323 ring->dispatch_execbuffer =
2324 gen6_ring_dispatch_execbuffer;
2325 if (i915_semaphore_is_enabled(dev)) {
2326 ring->semaphore.sync_to = gen6_ring_sync;
2327 ring->semaphore.signal = gen6_signal;
2328 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2329 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2330 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2331 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2332 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2333 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2334 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2335 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2336 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2337 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2338 }
2339 }
2340 } else {
2341 ring->mmio_base = BSD_RING_BASE;
2342 ring->flush = bsd_ring_flush;
2343 ring->add_request = i9xx_add_request;
2344 ring->get_seqno = ring_get_seqno;
2345 ring->set_seqno = ring_set_seqno;
2346 if (IS_GEN5(dev)) {
2347 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2348 ring->irq_get = gen5_ring_get_irq;
2349 ring->irq_put = gen5_ring_put_irq;
2350 } else {
2351 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2352 ring->irq_get = i9xx_ring_get_irq;
2353 ring->irq_put = i9xx_ring_put_irq;
2354 }
2355 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2356 }
2357 ring->init = init_ring_common;
2358
2359 return intel_init_ring_buffer(dev, ring);
2360 }
2361
2362 /**
2363 * Initialize the second BSD ring for Broadwell GT3.
2364 * It is noted that this only exists on Broadwell GT3.
2365 */
2366 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2367 {
2368 struct drm_i915_private *dev_priv = dev->dev_private;
2369 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2370
2371 if ((INTEL_INFO(dev)->gen != 8)) {
2372 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2373 return -EINVAL;
2374 }
2375
2376 ring->name = "bsd2 ring";
2377 ring->id = VCS2;
2378
2379 ring->write_tail = ring_write_tail;
2380 ring->mmio_base = GEN8_BSD2_RING_BASE;
2381 ring->flush = gen6_bsd_ring_flush;
2382 ring->add_request = gen6_add_request;
2383 ring->get_seqno = gen6_ring_get_seqno;
2384 ring->set_seqno = ring_set_seqno;
2385 ring->irq_enable_mask =
2386 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2387 ring->irq_get = gen8_ring_get_irq;
2388 ring->irq_put = gen8_ring_put_irq;
2389 ring->dispatch_execbuffer =
2390 gen8_ring_dispatch_execbuffer;
2391 if (i915_semaphore_is_enabled(dev)) {
2392 ring->semaphore.sync_to = gen8_ring_sync;
2393 ring->semaphore.signal = gen8_xcs_signal;
2394 GEN8_RING_SEMAPHORE_INIT;
2395 }
2396 ring->init = init_ring_common;
2397
2398 return intel_init_ring_buffer(dev, ring);
2399 }
2400
2401 int intel_init_blt_ring_buffer(struct drm_device *dev)
2402 {
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2404 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2405
2406 ring->name = "blitter ring";
2407 ring->id = BCS;
2408
2409 ring->mmio_base = BLT_RING_BASE;
2410 ring->write_tail = ring_write_tail;
2411 ring->flush = gen6_ring_flush;
2412 ring->add_request = gen6_add_request;
2413 ring->get_seqno = gen6_ring_get_seqno;
2414 ring->set_seqno = ring_set_seqno;
2415 if (INTEL_INFO(dev)->gen >= 8) {
2416 ring->irq_enable_mask =
2417 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2418 ring->irq_get = gen8_ring_get_irq;
2419 ring->irq_put = gen8_ring_put_irq;
2420 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2421 if (i915_semaphore_is_enabled(dev)) {
2422 ring->semaphore.sync_to = gen8_ring_sync;
2423 ring->semaphore.signal = gen8_xcs_signal;
2424 GEN8_RING_SEMAPHORE_INIT;
2425 }
2426 } else {
2427 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2428 ring->irq_get = gen6_ring_get_irq;
2429 ring->irq_put = gen6_ring_put_irq;
2430 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2431 if (i915_semaphore_is_enabled(dev)) {
2432 ring->semaphore.signal = gen6_signal;
2433 ring->semaphore.sync_to = gen6_ring_sync;
2434 /*
2435 * The current semaphore is only applied on pre-gen8
2436 * platform. And there is no VCS2 ring on the pre-gen8
2437 * platform. So the semaphore between BCS and VCS2 is
2438 * initialized as INVALID. Gen8 will initialize the
2439 * sema between BCS and VCS2 later.
2440 */
2441 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2442 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2443 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2444 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2445 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2446 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2447 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2448 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2449 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2450 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2451 }
2452 }
2453 ring->init = init_ring_common;
2454
2455 return intel_init_ring_buffer(dev, ring);
2456 }
2457
2458 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2459 {
2460 struct drm_i915_private *dev_priv = dev->dev_private;
2461 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2462
2463 ring->name = "video enhancement ring";
2464 ring->id = VECS;
2465
2466 ring->mmio_base = VEBOX_RING_BASE;
2467 ring->write_tail = ring_write_tail;
2468 ring->flush = gen6_ring_flush;
2469 ring->add_request = gen6_add_request;
2470 ring->get_seqno = gen6_ring_get_seqno;
2471 ring->set_seqno = ring_set_seqno;
2472
2473 if (INTEL_INFO(dev)->gen >= 8) {
2474 ring->irq_enable_mask =
2475 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2476 ring->irq_get = gen8_ring_get_irq;
2477 ring->irq_put = gen8_ring_put_irq;
2478 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2479 if (i915_semaphore_is_enabled(dev)) {
2480 ring->semaphore.sync_to = gen8_ring_sync;
2481 ring->semaphore.signal = gen8_xcs_signal;
2482 GEN8_RING_SEMAPHORE_INIT;
2483 }
2484 } else {
2485 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2486 ring->irq_get = hsw_vebox_get_irq;
2487 ring->irq_put = hsw_vebox_put_irq;
2488 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2489 if (i915_semaphore_is_enabled(dev)) {
2490 ring->semaphore.sync_to = gen6_ring_sync;
2491 ring->semaphore.signal = gen6_signal;
2492 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2493 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2494 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2495 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2496 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2497 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2498 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2499 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2500 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2501 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2502 }
2503 }
2504 ring->init = init_ring_common;
2505
2506 return intel_init_ring_buffer(dev, ring);
2507 }
2508
2509 int
2510 intel_ring_flush_all_caches(struct intel_engine_cs *ring)
2511 {
2512 int ret;
2513
2514 if (!ring->gpu_caches_dirty)
2515 return 0;
2516
2517 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2518 if (ret)
2519 return ret;
2520
2521 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2522
2523 ring->gpu_caches_dirty = false;
2524 return 0;
2525 }
2526
2527 int
2528 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
2529 {
2530 uint32_t flush_domains;
2531 int ret;
2532
2533 flush_domains = 0;
2534 if (ring->gpu_caches_dirty)
2535 flush_domains = I915_GEM_GPU_DOMAINS;
2536
2537 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2538 if (ret)
2539 return ret;
2540
2541 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2542
2543 ring->gpu_caches_dirty = false;
2544 return 0;
2545 }
2546
2547 void
2548 intel_stop_ring_buffer(struct intel_engine_cs *ring)
2549 {
2550 int ret;
2551
2552 if (!intel_ring_initialized(ring))
2553 return;
2554
2555 ret = intel_ring_idle(ring);
2556 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2557 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2558 ring->name, ret);
2559
2560 stop_ring(ring);
2561 }
This page took 0.091029 seconds and 5 git commands to generate.