drm/i915: Ensure OLS & PLR are always in sync
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
CommitLineData
62fdfeaf
EA
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
760285e7 30#include <drm/drmP.h>
62fdfeaf 31#include "i915_drv.h"
760285e7 32#include <drm/i915_drm.h>
62fdfeaf 33#include "i915_trace.h"
881f47b6 34#include "intel_drv.h"
62fdfeaf 35
48d82387
OM
36bool
37intel_ring_initialized(struct intel_engine_cs *ring)
38{
39 struct drm_device *dev = ring->dev;
40
41 if (!dev)
42 return false;
43
44 if (i915.enable_execlists) {
45 struct intel_context *dctx = ring->default_context;
46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
47
48 return ringbuf->obj;
49 } else
50 return ring->buffer && ring->buffer->obj;
51}
18393f63 52
82e104cc 53int __intel_ring_space(int head, int tail, int size)
c7dca47b 54{
1cf0ba14 55 int space = head - (tail + I915_RING_FREE_SPACE);
c7dca47b 56 if (space < 0)
1cf0ba14 57 space += size;
c7dca47b
CW
58 return space;
59}
60
82e104cc 61int intel_ring_space(struct intel_ringbuffer *ringbuf)
1cf0ba14 62{
82e104cc
OM
63 return __intel_ring_space(ringbuf->head & HEAD_ADDR,
64 ringbuf->tail, ringbuf->size);
1cf0ba14
CW
65}
66
82e104cc 67bool intel_ring_stopped(struct intel_engine_cs *ring)
09246732
CW
68{
69 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88b4aa87
MK
70 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
71}
09246732 72
a4872ba6 73void __intel_ring_advance(struct intel_engine_cs *ring)
88b4aa87 74{
93b0a4e0
OM
75 struct intel_ringbuffer *ringbuf = ring->buffer;
76 ringbuf->tail &= ringbuf->size - 1;
88b4aa87 77 if (intel_ring_stopped(ring))
09246732 78 return;
93b0a4e0 79 ring->write_tail(ring, ringbuf->tail);
09246732
CW
80}
81
b72f3acb 82static int
a4872ba6 83gen2_render_ring_flush(struct intel_engine_cs *ring,
46f0f8d1
CW
84 u32 invalidate_domains,
85 u32 flush_domains)
86{
87 u32 cmd;
88 int ret;
89
90 cmd = MI_FLUSH;
31b14c9f 91 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
46f0f8d1
CW
92 cmd |= MI_NO_WRITE_FLUSH;
93
94 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
95 cmd |= MI_READ_FLUSH;
96
97 ret = intel_ring_begin(ring, 2);
98 if (ret)
99 return ret;
100
101 intel_ring_emit(ring, cmd);
102 intel_ring_emit(ring, MI_NOOP);
103 intel_ring_advance(ring);
104
105 return 0;
106}
107
108static int
a4872ba6 109gen4_render_ring_flush(struct intel_engine_cs *ring,
46f0f8d1
CW
110 u32 invalidate_domains,
111 u32 flush_domains)
62fdfeaf 112{
78501eac 113 struct drm_device *dev = ring->dev;
6f392d54 114 u32 cmd;
b72f3acb 115 int ret;
6f392d54 116
36d527de
CW
117 /*
118 * read/write caches:
119 *
120 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
121 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
122 * also flushed at 2d versus 3d pipeline switches.
123 *
124 * read-only caches:
125 *
126 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
127 * MI_READ_FLUSH is set, and is always flushed on 965.
128 *
129 * I915_GEM_DOMAIN_COMMAND may not exist?
130 *
131 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
132 * invalidated when MI_EXE_FLUSH is set.
133 *
134 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
135 * invalidated with every MI_FLUSH.
136 *
137 * TLBs:
138 *
139 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
140 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
141 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
142 * are flushed at any MI_FLUSH.
143 */
144
145 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
46f0f8d1 146 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
36d527de 147 cmd &= ~MI_NO_WRITE_FLUSH;
36d527de
CW
148 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
149 cmd |= MI_EXE_FLUSH;
62fdfeaf 150
36d527de
CW
151 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
152 (IS_G4X(dev) || IS_GEN5(dev)))
153 cmd |= MI_INVALIDATE_ISP;
70eac33e 154
36d527de
CW
155 ret = intel_ring_begin(ring, 2);
156 if (ret)
157 return ret;
b72f3acb 158
36d527de
CW
159 intel_ring_emit(ring, cmd);
160 intel_ring_emit(ring, MI_NOOP);
161 intel_ring_advance(ring);
b72f3acb
CW
162
163 return 0;
8187a2b7
ZN
164}
165
8d315287
JB
166/**
167 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
168 * implementing two workarounds on gen6. From section 1.4.7.1
169 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
170 *
171 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
172 * produced by non-pipelined state commands), software needs to first
173 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
174 * 0.
175 *
176 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
177 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
178 *
179 * And the workaround for these two requires this workaround first:
180 *
181 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
182 * BEFORE the pipe-control with a post-sync op and no write-cache
183 * flushes.
184 *
185 * And this last workaround is tricky because of the requirements on
186 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
187 * volume 2 part 1:
188 *
189 * "1 of the following must also be set:
190 * - Render Target Cache Flush Enable ([12] of DW1)
191 * - Depth Cache Flush Enable ([0] of DW1)
192 * - Stall at Pixel Scoreboard ([1] of DW1)
193 * - Depth Stall ([13] of DW1)
194 * - Post-Sync Operation ([13] of DW1)
195 * - Notify Enable ([8] of DW1)"
196 *
197 * The cache flushes require the workaround flush that triggered this
198 * one, so we can't use it. Depth stall would trigger the same.
199 * Post-sync nonzero is what triggered this second workaround, so we
200 * can't use that one either. Notify enable is IRQs, which aren't
201 * really our business. That leaves only stall at scoreboard.
202 */
203static int
a4872ba6 204intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
8d315287 205{
18393f63 206 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
8d315287
JB
207 int ret;
208
209
210 ret = intel_ring_begin(ring, 6);
211 if (ret)
212 return ret;
213
214 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
215 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
216 PIPE_CONTROL_STALL_AT_SCOREBOARD);
217 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
218 intel_ring_emit(ring, 0); /* low dword */
219 intel_ring_emit(ring, 0); /* high dword */
220 intel_ring_emit(ring, MI_NOOP);
221 intel_ring_advance(ring);
222
223 ret = intel_ring_begin(ring, 6);
224 if (ret)
225 return ret;
226
227 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
228 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
229 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
230 intel_ring_emit(ring, 0);
231 intel_ring_emit(ring, 0);
232 intel_ring_emit(ring, MI_NOOP);
233 intel_ring_advance(ring);
234
235 return 0;
236}
237
238static int
a4872ba6 239gen6_render_ring_flush(struct intel_engine_cs *ring,
8d315287
JB
240 u32 invalidate_domains, u32 flush_domains)
241{
242 u32 flags = 0;
18393f63 243 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
8d315287
JB
244 int ret;
245
b3111509
PZ
246 /* Force SNB workarounds for PIPE_CONTROL flushes */
247 ret = intel_emit_post_sync_nonzero_flush(ring);
248 if (ret)
249 return ret;
250
8d315287
JB
251 /* Just flush everything. Experiments have shown that reducing the
252 * number of bits based on the write domains has little performance
253 * impact.
254 */
7d54a904
CW
255 if (flush_domains) {
256 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
257 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
258 /*
259 * Ensure that any following seqno writes only happen
260 * when the render cache is indeed flushed.
261 */
97f209bc 262 flags |= PIPE_CONTROL_CS_STALL;
7d54a904
CW
263 }
264 if (invalidate_domains) {
265 flags |= PIPE_CONTROL_TLB_INVALIDATE;
266 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
267 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
268 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
269 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
270 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
271 /*
272 * TLB invalidate requires a post-sync write.
273 */
3ac78313 274 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
7d54a904 275 }
8d315287 276
6c6cf5aa 277 ret = intel_ring_begin(ring, 4);
8d315287
JB
278 if (ret)
279 return ret;
280
6c6cf5aa 281 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
8d315287
JB
282 intel_ring_emit(ring, flags);
283 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
6c6cf5aa 284 intel_ring_emit(ring, 0);
8d315287
JB
285 intel_ring_advance(ring);
286
287 return 0;
288}
289
f3987631 290static int
a4872ba6 291gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
f3987631
PZ
292{
293 int ret;
294
295 ret = intel_ring_begin(ring, 4);
296 if (ret)
297 return ret;
298
299 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
300 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
301 PIPE_CONTROL_STALL_AT_SCOREBOARD);
302 intel_ring_emit(ring, 0);
303 intel_ring_emit(ring, 0);
304 intel_ring_advance(ring);
305
306 return 0;
307}
308
a4872ba6 309static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
fd3da6c9
RV
310{
311 int ret;
312
313 if (!ring->fbc_dirty)
314 return 0;
315
37c1d94f 316 ret = intel_ring_begin(ring, 6);
fd3da6c9
RV
317 if (ret)
318 return ret;
fd3da6c9
RV
319 /* WaFbcNukeOn3DBlt:ivb/hsw */
320 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
321 intel_ring_emit(ring, MSG_FBC_REND_STATE);
322 intel_ring_emit(ring, value);
37c1d94f
VS
323 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
324 intel_ring_emit(ring, MSG_FBC_REND_STATE);
325 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
fd3da6c9
RV
326 intel_ring_advance(ring);
327
328 ring->fbc_dirty = false;
329 return 0;
330}
331
4772eaeb 332static int
a4872ba6 333gen7_render_ring_flush(struct intel_engine_cs *ring,
4772eaeb
PZ
334 u32 invalidate_domains, u32 flush_domains)
335{
336 u32 flags = 0;
18393f63 337 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
4772eaeb
PZ
338 int ret;
339
f3987631
PZ
340 /*
341 * Ensure that any following seqno writes only happen when the render
342 * cache is indeed flushed.
343 *
344 * Workaround: 4th PIPE_CONTROL command (except the ones with only
345 * read-cache invalidate bits set) must have the CS_STALL bit set. We
346 * don't try to be clever and just set it unconditionally.
347 */
348 flags |= PIPE_CONTROL_CS_STALL;
349
4772eaeb
PZ
350 /* Just flush everything. Experiments have shown that reducing the
351 * number of bits based on the write domains has little performance
352 * impact.
353 */
354 if (flush_domains) {
355 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
356 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4772eaeb
PZ
357 }
358 if (invalidate_domains) {
359 flags |= PIPE_CONTROL_TLB_INVALIDATE;
360 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
361 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365 /*
366 * TLB invalidate requires a post-sync write.
367 */
368 flags |= PIPE_CONTROL_QW_WRITE;
b9e1faa7 369 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
f3987631
PZ
370
371 /* Workaround: we must issue a pipe_control with CS-stall bit
372 * set before a pipe_control command that has the state cache
373 * invalidate bit set. */
374 gen7_render_ring_cs_stall_wa(ring);
4772eaeb
PZ
375 }
376
377 ret = intel_ring_begin(ring, 4);
378 if (ret)
379 return ret;
380
381 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
382 intel_ring_emit(ring, flags);
b9e1faa7 383 intel_ring_emit(ring, scratch_addr);
4772eaeb
PZ
384 intel_ring_emit(ring, 0);
385 intel_ring_advance(ring);
386
9688ecad 387 if (!invalidate_domains && flush_domains)
fd3da6c9
RV
388 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
389
4772eaeb
PZ
390 return 0;
391}
392
884ceace
KG
393static int
394gen8_emit_pipe_control(struct intel_engine_cs *ring,
395 u32 flags, u32 scratch_addr)
396{
397 int ret;
398
399 ret = intel_ring_begin(ring, 6);
400 if (ret)
401 return ret;
402
403 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
404 intel_ring_emit(ring, flags);
405 intel_ring_emit(ring, scratch_addr);
406 intel_ring_emit(ring, 0);
407 intel_ring_emit(ring, 0);
408 intel_ring_emit(ring, 0);
409 intel_ring_advance(ring);
410
411 return 0;
412}
413
a5f3d68e 414static int
a4872ba6 415gen8_render_ring_flush(struct intel_engine_cs *ring,
a5f3d68e
BW
416 u32 invalidate_domains, u32 flush_domains)
417{
418 u32 flags = 0;
18393f63 419 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
02c9f7e3 420 int ret;
a5f3d68e
BW
421
422 flags |= PIPE_CONTROL_CS_STALL;
423
424 if (flush_domains) {
425 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
426 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
427 }
428 if (invalidate_domains) {
429 flags |= PIPE_CONTROL_TLB_INVALIDATE;
430 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
431 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
432 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
433 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
434 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
435 flags |= PIPE_CONTROL_QW_WRITE;
436 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
02c9f7e3
KG
437
438 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
439 ret = gen8_emit_pipe_control(ring,
440 PIPE_CONTROL_CS_STALL |
441 PIPE_CONTROL_STALL_AT_SCOREBOARD,
442 0);
443 if (ret)
444 return ret;
a5f3d68e
BW
445 }
446
c5ad011d
RV
447 ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
448 if (ret)
449 return ret;
450
451 if (!invalidate_domains && flush_domains)
452 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
453
454 return 0;
a5f3d68e
BW
455}
456
a4872ba6 457static void ring_write_tail(struct intel_engine_cs *ring,
297b0c5b 458 u32 value)
d46eefa2 459{
4640c4ff 460 struct drm_i915_private *dev_priv = ring->dev->dev_private;
297b0c5b 461 I915_WRITE_TAIL(ring, value);
d46eefa2
XH
462}
463
a4872ba6 464u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
8187a2b7 465{
4640c4ff 466 struct drm_i915_private *dev_priv = ring->dev->dev_private;
50877445 467 u64 acthd;
8187a2b7 468
50877445
CW
469 if (INTEL_INFO(ring->dev)->gen >= 8)
470 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
471 RING_ACTHD_UDW(ring->mmio_base));
472 else if (INTEL_INFO(ring->dev)->gen >= 4)
473 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
474 else
475 acthd = I915_READ(ACTHD);
476
477 return acthd;
8187a2b7
ZN
478}
479
a4872ba6 480static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
035dc1e0
DV
481{
482 struct drm_i915_private *dev_priv = ring->dev->dev_private;
483 u32 addr;
484
485 addr = dev_priv->status_page_dmah->busaddr;
486 if (INTEL_INFO(ring->dev)->gen >= 4)
487 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
488 I915_WRITE(HWS_PGA, addr);
489}
490
a4872ba6 491static bool stop_ring(struct intel_engine_cs *ring)
8187a2b7 492{
9991ae78 493 struct drm_i915_private *dev_priv = to_i915(ring->dev);
8187a2b7 494
9991ae78
CW
495 if (!IS_GEN2(ring->dev)) {
496 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
403bdd10
DV
497 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
498 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
9bec9b13
CW
499 /* Sometimes we observe that the idle flag is not
500 * set even though the ring is empty. So double
501 * check before giving up.
502 */
503 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
504 return false;
9991ae78
CW
505 }
506 }
b7884eb4 507
7f2ab699 508 I915_WRITE_CTL(ring, 0);
570ef608 509 I915_WRITE_HEAD(ring, 0);
78501eac 510 ring->write_tail(ring, 0);
8187a2b7 511
9991ae78
CW
512 if (!IS_GEN2(ring->dev)) {
513 (void)I915_READ_CTL(ring);
514 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
515 }
a51435a3 516
9991ae78
CW
517 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
518}
8187a2b7 519
a4872ba6 520static int init_ring_common(struct intel_engine_cs *ring)
9991ae78
CW
521{
522 struct drm_device *dev = ring->dev;
523 struct drm_i915_private *dev_priv = dev->dev_private;
93b0a4e0
OM
524 struct intel_ringbuffer *ringbuf = ring->buffer;
525 struct drm_i915_gem_object *obj = ringbuf->obj;
9991ae78
CW
526 int ret = 0;
527
528 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
529
530 if (!stop_ring(ring)) {
531 /* G45 ring initialization often fails to reset head to zero */
6fd0d56e
CW
532 DRM_DEBUG_KMS("%s head not reset to zero "
533 "ctl %08x head %08x tail %08x start %08x\n",
534 ring->name,
535 I915_READ_CTL(ring),
536 I915_READ_HEAD(ring),
537 I915_READ_TAIL(ring),
538 I915_READ_START(ring));
8187a2b7 539
9991ae78 540 if (!stop_ring(ring)) {
6fd0d56e
CW
541 DRM_ERROR("failed to set %s head to zero "
542 "ctl %08x head %08x tail %08x start %08x\n",
543 ring->name,
544 I915_READ_CTL(ring),
545 I915_READ_HEAD(ring),
546 I915_READ_TAIL(ring),
547 I915_READ_START(ring));
9991ae78
CW
548 ret = -EIO;
549 goto out;
6fd0d56e 550 }
8187a2b7
ZN
551 }
552
9991ae78
CW
553 if (I915_NEED_GFX_HWS(dev))
554 intel_ring_setup_status_page(ring);
555 else
556 ring_setup_phys_status_page(ring);
557
ece4a17d
JK
558 /* Enforce ordering by reading HEAD register back */
559 I915_READ_HEAD(ring);
560
0d8957c8
DV
561 /* Initialize the ring. This must happen _after_ we've cleared the ring
562 * registers with the above sequence (the readback of the HEAD registers
563 * also enforces ordering), otherwise the hw might lose the new ring
564 * register values. */
f343c5f6 565 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
95468892
CW
566
567 /* WaClearRingBufHeadRegAtInit:ctg,elk */
568 if (I915_READ_HEAD(ring))
569 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
570 ring->name, I915_READ_HEAD(ring));
571 I915_WRITE_HEAD(ring, 0);
572 (void)I915_READ_HEAD(ring);
573
7f2ab699 574 I915_WRITE_CTL(ring,
93b0a4e0 575 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
5d031e5b 576 | RING_VALID);
8187a2b7 577
8187a2b7 578 /* If the head is still not zero, the ring is dead */
f01db988 579 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
f343c5f6 580 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
f01db988 581 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
e74cfed5 582 DRM_ERROR("%s initialization failed "
48e48a0b
CW
583 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
584 ring->name,
585 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
586 I915_READ_HEAD(ring), I915_READ_TAIL(ring),
587 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
b7884eb4
DV
588 ret = -EIO;
589 goto out;
8187a2b7
ZN
590 }
591
5c6c6003
CW
592 ringbuf->head = I915_READ_HEAD(ring);
593 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
594 ringbuf->space = intel_ring_space(ringbuf);
595 ringbuf->last_retired_head = -1;
1ec14ad3 596
50f018df
CW
597 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
598
b7884eb4 599out:
c8d9a590 600 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
b7884eb4
DV
601
602 return ret;
8187a2b7
ZN
603}
604
9b1136d5
OM
605void
606intel_fini_pipe_control(struct intel_engine_cs *ring)
607{
608 struct drm_device *dev = ring->dev;
609
610 if (ring->scratch.obj == NULL)
611 return;
612
613 if (INTEL_INFO(dev)->gen >= 5) {
614 kunmap(sg_page(ring->scratch.obj->pages->sgl));
615 i915_gem_object_ggtt_unpin(ring->scratch.obj);
616 }
617
618 drm_gem_object_unreference(&ring->scratch.obj->base);
619 ring->scratch.obj = NULL;
620}
621
622int
623intel_init_pipe_control(struct intel_engine_cs *ring)
c6df541c 624{
c6df541c
CW
625 int ret;
626
0d1aacac 627 if (ring->scratch.obj)
c6df541c
CW
628 return 0;
629
0d1aacac
CW
630 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
631 if (ring->scratch.obj == NULL) {
c6df541c
CW
632 DRM_ERROR("Failed to allocate seqno page\n");
633 ret = -ENOMEM;
634 goto err;
635 }
e4ffd173 636
a9cc726c
DV
637 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
638 if (ret)
639 goto err_unref;
c6df541c 640
1ec9e26d 641 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
c6df541c
CW
642 if (ret)
643 goto err_unref;
644
0d1aacac
CW
645 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
646 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
647 if (ring->scratch.cpu_page == NULL) {
56b085a0 648 ret = -ENOMEM;
c6df541c 649 goto err_unpin;
56b085a0 650 }
c6df541c 651
2b1086cc 652 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
0d1aacac 653 ring->name, ring->scratch.gtt_offset);
c6df541c
CW
654 return 0;
655
656err_unpin:
d7f46fc4 657 i915_gem_object_ggtt_unpin(ring->scratch.obj);
c6df541c 658err_unref:
0d1aacac 659 drm_gem_object_unreference(&ring->scratch.obj->base);
c6df541c 660err:
c6df541c
CW
661 return ret;
662}
663
771b9a53
MT
664static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
665 struct intel_context *ctx)
86d7f238 666{
7225342a 667 int ret, i;
888b5995
AS
668 struct drm_device *dev = ring->dev;
669 struct drm_i915_private *dev_priv = dev->dev_private;
7225342a 670 struct i915_workarounds *w = &dev_priv->workarounds;
888b5995 671
7225342a
MK
672 if (WARN_ON(w->count == 0))
673 return 0;
888b5995 674
7225342a
MK
675 ring->gpu_caches_dirty = true;
676 ret = intel_ring_flush_all_caches(ring);
677 if (ret)
678 return ret;
888b5995 679
22a916aa 680 ret = intel_ring_begin(ring, (w->count * 2 + 2));
7225342a
MK
681 if (ret)
682 return ret;
683
22a916aa 684 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
7225342a 685 for (i = 0; i < w->count; i++) {
7225342a
MK
686 intel_ring_emit(ring, w->reg[i].addr);
687 intel_ring_emit(ring, w->reg[i].value);
688 }
22a916aa 689 intel_ring_emit(ring, MI_NOOP);
7225342a
MK
690
691 intel_ring_advance(ring);
692
693 ring->gpu_caches_dirty = true;
694 ret = intel_ring_flush_all_caches(ring);
695 if (ret)
696 return ret;
888b5995 697
7225342a 698 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
888b5995 699
7225342a 700 return 0;
86d7f238
AS
701}
702
7225342a
MK
703static int wa_add(struct drm_i915_private *dev_priv,
704 const u32 addr, const u32 val, const u32 mask)
705{
706 const u32 idx = dev_priv->workarounds.count;
707
708 if (WARN_ON(idx >= I915_MAX_WA_REGS))
709 return -ENOSPC;
710
711 dev_priv->workarounds.reg[idx].addr = addr;
712 dev_priv->workarounds.reg[idx].value = val;
713 dev_priv->workarounds.reg[idx].mask = mask;
714
715 dev_priv->workarounds.count++;
716
717 return 0;
86d7f238
AS
718}
719
7225342a
MK
720#define WA_REG(addr, val, mask) { \
721 const int r = wa_add(dev_priv, (addr), (val), (mask)); \
722 if (r) \
723 return r; \
724 }
725
726#define WA_SET_BIT_MASKED(addr, mask) \
727 WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff)
728
729#define WA_CLR_BIT_MASKED(addr, mask) \
730 WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff)
731
732#define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask)
733#define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask)
734
735#define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff)
736
00e1e623 737static int bdw_init_workarounds(struct intel_engine_cs *ring)
86d7f238 738{
888b5995
AS
739 struct drm_device *dev = ring->dev;
740 struct drm_i915_private *dev_priv = dev->dev_private;
86d7f238 741
86d7f238 742 /* WaDisablePartialInstShootdown:bdw */
101b376d 743 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
7225342a
MK
744 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
745 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
746 STALL_DOP_GATING_DISABLE);
86d7f238 747
101b376d 748 /* WaDisableDopClockGating:bdw */
7225342a
MK
749 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
750 DOP_CLOCK_GATING_DISABLE);
86d7f238 751
7225342a
MK
752 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
753 GEN8_SAMPLER_POWER_BYPASS_DIS);
86d7f238
AS
754
755 /* Use Force Non-Coherent whenever executing a 3D context. This is a
756 * workaround for for a possible hang in the unlikely event a TLB
757 * invalidation occurs during a PSD flush.
758 */
da09654d 759 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
7225342a
MK
760 WA_SET_BIT_MASKED(HDC_CHICKEN0,
761 HDC_FORCE_NON_COHERENT |
762 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
86d7f238
AS
763
764 /* Wa4x4STCOptimizationDisable:bdw */
7225342a
MK
765 WA_SET_BIT_MASKED(CACHE_MODE_1,
766 GEN8_4x4_STC_OPTIMIZATION_DISABLE);
86d7f238
AS
767
768 /*
769 * BSpec recommends 8x4 when MSAA is used,
770 * however in practice 16x4 seems fastest.
771 *
772 * Note that PS/WM thread counts depend on the WIZ hashing
773 * disable bit, which we don't touch here, but it's good
774 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
775 */
7225342a
MK
776 WA_SET_BIT_MASKED(GEN7_GT_MODE,
777 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
888b5995 778
86d7f238
AS
779 return 0;
780}
781
00e1e623
VS
782static int chv_init_workarounds(struct intel_engine_cs *ring)
783{
00e1e623
VS
784 struct drm_device *dev = ring->dev;
785 struct drm_i915_private *dev_priv = dev->dev_private;
786
00e1e623 787 /* WaDisablePartialInstShootdown:chv */
00e1e623 788 /* WaDisableThreadStallDopClockGating:chv */
7225342a 789 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
605f1433
AS
790 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
791 STALL_DOP_GATING_DISABLE);
00e1e623 792
95289009
AS
793 /* Use Force Non-Coherent whenever executing a 3D context. This is a
794 * workaround for a possible hang in the unlikely event a TLB
795 * invalidation occurs during a PSD flush.
796 */
797 /* WaForceEnableNonCoherent:chv */
798 /* WaHdcDisableFetchWhenMasked:chv */
799 WA_SET_BIT_MASKED(HDC_CHICKEN0,
800 HDC_FORCE_NON_COHERENT |
801 HDC_DONOT_FETCH_MEM_WHEN_MASKED);
802
7225342a
MK
803 return 0;
804}
805
771b9a53 806int init_workarounds_ring(struct intel_engine_cs *ring)
7225342a
MK
807{
808 struct drm_device *dev = ring->dev;
809 struct drm_i915_private *dev_priv = dev->dev_private;
810
811 WARN_ON(ring->id != RCS);
812
813 dev_priv->workarounds.count = 0;
814
815 if (IS_BROADWELL(dev))
816 return bdw_init_workarounds(ring);
817
818 if (IS_CHERRYVIEW(dev))
819 return chv_init_workarounds(ring);
00e1e623
VS
820
821 return 0;
822}
823
a4872ba6 824static int init_render_ring(struct intel_engine_cs *ring)
8187a2b7 825{
78501eac 826 struct drm_device *dev = ring->dev;
1ec14ad3 827 struct drm_i915_private *dev_priv = dev->dev_private;
78501eac 828 int ret = init_ring_common(ring);
9c33baa6
KZ
829 if (ret)
830 return ret;
a69ffdbf 831
61a563a2
AG
832 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
833 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
6b26c86d 834 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1c8c38c5
CW
835
836 /* We need to disable the AsyncFlip performance optimisations in order
837 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
838 * programmed to '1' on all products.
8693a824 839 *
b3f797ac 840 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1c8c38c5 841 */
fbdcb068 842 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
1c8c38c5
CW
843 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
844
f05bb0c7 845 /* Required for the hardware to program scanline values for waiting */
01fa0302 846 /* WaEnableFlushTlbInvalidationMode:snb */
f05bb0c7
CW
847 if (INTEL_INFO(dev)->gen == 6)
848 I915_WRITE(GFX_MODE,
aa83e30d 849 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
f05bb0c7 850
01fa0302 851 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1c8c38c5
CW
852 if (IS_GEN7(dev))
853 I915_WRITE(GFX_MODE_GEN7,
01fa0302 854 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1c8c38c5 855 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
78501eac 856
8d315287 857 if (INTEL_INFO(dev)->gen >= 5) {
9b1136d5 858 ret = intel_init_pipe_control(ring);
c6df541c
CW
859 if (ret)
860 return ret;
861 }
862
5e13a0c5 863 if (IS_GEN6(dev)) {
3a69ddd6
KG
864 /* From the Sandybridge PRM, volume 1 part 3, page 24:
865 * "If this bit is set, STCunit will have LRA as replacement
866 * policy. [...] This bit must be reset. LRA replacement
867 * policy is not supported."
868 */
869 I915_WRITE(CACHE_MODE_0,
5e13a0c5 870 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
84f9f938
BW
871 }
872
6b26c86d
DV
873 if (INTEL_INFO(dev)->gen >= 6)
874 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
84f9f938 875
040d2baa 876 if (HAS_L3_DPF(dev))
35a85ac6 877 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
15b9f80e 878
7225342a 879 return init_workarounds_ring(ring);
8187a2b7
ZN
880}
881
a4872ba6 882static void render_ring_cleanup(struct intel_engine_cs *ring)
c6df541c 883{
b45305fc 884 struct drm_device *dev = ring->dev;
3e78998a
BW
885 struct drm_i915_private *dev_priv = dev->dev_private;
886
887 if (dev_priv->semaphore_obj) {
888 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
889 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
890 dev_priv->semaphore_obj = NULL;
891 }
b45305fc 892
9b1136d5 893 intel_fini_pipe_control(ring);
c6df541c
CW
894}
895
3e78998a
BW
896static int gen8_rcs_signal(struct intel_engine_cs *signaller,
897 unsigned int num_dwords)
898{
899#define MBOX_UPDATE_DWORDS 8
900 struct drm_device *dev = signaller->dev;
901 struct drm_i915_private *dev_priv = dev->dev_private;
902 struct intel_engine_cs *waiter;
903 int i, ret, num_rings;
904
905 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
906 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
907#undef MBOX_UPDATE_DWORDS
908
909 ret = intel_ring_begin(signaller, num_dwords);
910 if (ret)
911 return ret;
912
913 for_each_ring(waiter, dev_priv, i) {
914 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
915 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
916 continue;
917
918 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
919 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
920 PIPE_CONTROL_QW_WRITE |
921 PIPE_CONTROL_FLUSH_ENABLE);
922 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
923 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
924 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
925 intel_ring_emit(signaller, 0);
926 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
927 MI_SEMAPHORE_TARGET(waiter->id));
928 intel_ring_emit(signaller, 0);
929 }
930
931 return 0;
932}
933
934static int gen8_xcs_signal(struct intel_engine_cs *signaller,
935 unsigned int num_dwords)
936{
937#define MBOX_UPDATE_DWORDS 6
938 struct drm_device *dev = signaller->dev;
939 struct drm_i915_private *dev_priv = dev->dev_private;
940 struct intel_engine_cs *waiter;
941 int i, ret, num_rings;
942
943 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
944 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
945#undef MBOX_UPDATE_DWORDS
946
947 ret = intel_ring_begin(signaller, num_dwords);
948 if (ret)
949 return ret;
950
951 for_each_ring(waiter, dev_priv, i) {
952 u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
953 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
954 continue;
955
956 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
957 MI_FLUSH_DW_OP_STOREDW);
958 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
959 MI_FLUSH_DW_USE_GTT);
960 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
961 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
962 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
963 MI_SEMAPHORE_TARGET(waiter->id));
964 intel_ring_emit(signaller, 0);
965 }
966
967 return 0;
968}
969
a4872ba6 970static int gen6_signal(struct intel_engine_cs *signaller,
024a43e1 971 unsigned int num_dwords)
1ec14ad3 972{
024a43e1
BW
973 struct drm_device *dev = signaller->dev;
974 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 975 struct intel_engine_cs *useless;
a1444b79 976 int i, ret, num_rings;
78325f2d 977
a1444b79
BW
978#define MBOX_UPDATE_DWORDS 3
979 num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
980 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
981#undef MBOX_UPDATE_DWORDS
024a43e1
BW
982
983 ret = intel_ring_begin(signaller, num_dwords);
984 if (ret)
985 return ret;
024a43e1 986
78325f2d
BW
987 for_each_ring(useless, dev_priv, i) {
988 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
989 if (mbox_reg != GEN6_NOSYNC) {
990 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
991 intel_ring_emit(signaller, mbox_reg);
992 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
78325f2d
BW
993 }
994 }
024a43e1 995
a1444b79
BW
996 /* If num_dwords was rounded, make sure the tail pointer is correct */
997 if (num_rings % 2 == 0)
998 intel_ring_emit(signaller, MI_NOOP);
999
024a43e1 1000 return 0;
1ec14ad3
CW
1001}
1002
c8c99b0f
BW
1003/**
1004 * gen6_add_request - Update the semaphore mailbox registers
1005 *
1006 * @ring - ring that is adding a request
1007 * @seqno - return seqno stuck into the ring
1008 *
1009 * Update the mailbox registers in the *other* rings with the current seqno.
1010 * This acts like a signal in the canonical semaphore.
1011 */
1ec14ad3 1012static int
a4872ba6 1013gen6_add_request(struct intel_engine_cs *ring)
1ec14ad3 1014{
024a43e1 1015 int ret;
52ed2325 1016
707d9cf9
BW
1017 if (ring->semaphore.signal)
1018 ret = ring->semaphore.signal(ring, 4);
1019 else
1020 ret = intel_ring_begin(ring, 4);
1021
1ec14ad3
CW
1022 if (ret)
1023 return ret;
1024
1ec14ad3
CW
1025 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1026 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1823521d 1027 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1ec14ad3 1028 intel_ring_emit(ring, MI_USER_INTERRUPT);
09246732 1029 __intel_ring_advance(ring);
1ec14ad3 1030
1ec14ad3
CW
1031 return 0;
1032}
1033
f72b3435
MK
1034static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
1035 u32 seqno)
1036{
1037 struct drm_i915_private *dev_priv = dev->dev_private;
1038 return dev_priv->last_seqno < seqno;
1039}
1040
c8c99b0f
BW
1041/**
1042 * intel_ring_sync - sync the waiter to the signaller on seqno
1043 *
1044 * @waiter - ring that is waiting
1045 * @signaller - ring which has, or will signal
1046 * @seqno - seqno which the waiter will block on
1047 */
5ee426ca
BW
1048
1049static int
1050gen8_ring_sync(struct intel_engine_cs *waiter,
1051 struct intel_engine_cs *signaller,
1052 u32 seqno)
1053{
1054 struct drm_i915_private *dev_priv = waiter->dev->dev_private;
1055 int ret;
1056
1057 ret = intel_ring_begin(waiter, 4);
1058 if (ret)
1059 return ret;
1060
1061 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1062 MI_SEMAPHORE_GLOBAL_GTT |
bae4fcd2 1063 MI_SEMAPHORE_POLL |
5ee426ca
BW
1064 MI_SEMAPHORE_SAD_GTE_SDD);
1065 intel_ring_emit(waiter, seqno);
1066 intel_ring_emit(waiter,
1067 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1068 intel_ring_emit(waiter,
1069 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1070 intel_ring_advance(waiter);
1071 return 0;
1072}
1073
c8c99b0f 1074static int
a4872ba6
OM
1075gen6_ring_sync(struct intel_engine_cs *waiter,
1076 struct intel_engine_cs *signaller,
686cb5f9 1077 u32 seqno)
1ec14ad3 1078{
c8c99b0f
BW
1079 u32 dw1 = MI_SEMAPHORE_MBOX |
1080 MI_SEMAPHORE_COMPARE |
1081 MI_SEMAPHORE_REGISTER;
ebc348b2
BW
1082 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
1083 int ret;
1ec14ad3 1084
1500f7ea
BW
1085 /* Throughout all of the GEM code, seqno passed implies our current
1086 * seqno is >= the last seqno executed. However for hardware the
1087 * comparison is strictly greater than.
1088 */
1089 seqno -= 1;
1090
ebc348b2 1091 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
686cb5f9 1092
c8c99b0f 1093 ret = intel_ring_begin(waiter, 4);
1ec14ad3
CW
1094 if (ret)
1095 return ret;
1096
f72b3435
MK
1097 /* If seqno wrap happened, omit the wait with no-ops */
1098 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
ebc348b2 1099 intel_ring_emit(waiter, dw1 | wait_mbox);
f72b3435
MK
1100 intel_ring_emit(waiter, seqno);
1101 intel_ring_emit(waiter, 0);
1102 intel_ring_emit(waiter, MI_NOOP);
1103 } else {
1104 intel_ring_emit(waiter, MI_NOOP);
1105 intel_ring_emit(waiter, MI_NOOP);
1106 intel_ring_emit(waiter, MI_NOOP);
1107 intel_ring_emit(waiter, MI_NOOP);
1108 }
c8c99b0f 1109 intel_ring_advance(waiter);
1ec14ad3
CW
1110
1111 return 0;
1112}
1113
c6df541c
CW
1114#define PIPE_CONTROL_FLUSH(ring__, addr__) \
1115do { \
fcbc34e4
KG
1116 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
1117 PIPE_CONTROL_DEPTH_STALL); \
c6df541c
CW
1118 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
1119 intel_ring_emit(ring__, 0); \
1120 intel_ring_emit(ring__, 0); \
1121} while (0)
1122
1123static int
a4872ba6 1124pc_render_add_request(struct intel_engine_cs *ring)
c6df541c 1125{
18393f63 1126 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
c6df541c
CW
1127 int ret;
1128
1129 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
1130 * incoherent with writes to memory, i.e. completely fubar,
1131 * so we need to use PIPE_NOTIFY instead.
1132 *
1133 * However, we also need to workaround the qword write
1134 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
1135 * memory before requesting an interrupt.
1136 */
1137 ret = intel_ring_begin(ring, 32);
1138 if (ret)
1139 return ret;
1140
fcbc34e4 1141 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
1142 PIPE_CONTROL_WRITE_FLUSH |
1143 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
0d1aacac 1144 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1823521d 1145 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
c6df541c
CW
1146 intel_ring_emit(ring, 0);
1147 PIPE_CONTROL_FLUSH(ring, scratch_addr);
18393f63 1148 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
c6df541c 1149 PIPE_CONTROL_FLUSH(ring, scratch_addr);
18393f63 1150 scratch_addr += 2 * CACHELINE_BYTES;
c6df541c 1151 PIPE_CONTROL_FLUSH(ring, scratch_addr);
18393f63 1152 scratch_addr += 2 * CACHELINE_BYTES;
c6df541c 1153 PIPE_CONTROL_FLUSH(ring, scratch_addr);
18393f63 1154 scratch_addr += 2 * CACHELINE_BYTES;
c6df541c 1155 PIPE_CONTROL_FLUSH(ring, scratch_addr);
18393f63 1156 scratch_addr += 2 * CACHELINE_BYTES;
c6df541c 1157 PIPE_CONTROL_FLUSH(ring, scratch_addr);
a71d8d94 1158
fcbc34e4 1159 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
1160 PIPE_CONTROL_WRITE_FLUSH |
1161 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
c6df541c 1162 PIPE_CONTROL_NOTIFY);
0d1aacac 1163 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1823521d 1164 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
c6df541c 1165 intel_ring_emit(ring, 0);
09246732 1166 __intel_ring_advance(ring);
c6df541c 1167
c6df541c
CW
1168 return 0;
1169}
1170
4cd53c0c 1171static u32
a4872ba6 1172gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
4cd53c0c 1173{
4cd53c0c
DV
1174 /* Workaround to force correct ordering between irq and seqno writes on
1175 * ivb (and maybe also on snb) by reading from a CS register (like
1176 * ACTHD) before reading the status page. */
50877445
CW
1177 if (!lazy_coherency) {
1178 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1179 POSTING_READ(RING_ACTHD(ring->mmio_base));
1180 }
1181
4cd53c0c
DV
1182 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1183}
1184
8187a2b7 1185static u32
a4872ba6 1186ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
8187a2b7 1187{
1ec14ad3
CW
1188 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1189}
1190
b70ec5bf 1191static void
a4872ba6 1192ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
b70ec5bf
MK
1193{
1194 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1195}
1196
c6df541c 1197static u32
a4872ba6 1198pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
c6df541c 1199{
0d1aacac 1200 return ring->scratch.cpu_page[0];
c6df541c
CW
1201}
1202
b70ec5bf 1203static void
a4872ba6 1204pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
b70ec5bf 1205{
0d1aacac 1206 ring->scratch.cpu_page[0] = seqno;
b70ec5bf
MK
1207}
1208
e48d8634 1209static bool
a4872ba6 1210gen5_ring_get_irq(struct intel_engine_cs *ring)
e48d8634
DV
1211{
1212 struct drm_device *dev = ring->dev;
4640c4ff 1213 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1214 unsigned long flags;
e48d8634 1215
7cd512f1 1216 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
e48d8634
DV
1217 return false;
1218
7338aefa 1219 spin_lock_irqsave(&dev_priv->irq_lock, flags);
43eaea13 1220 if (ring->irq_refcount++ == 0)
480c8033 1221 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
7338aefa 1222 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
1223
1224 return true;
1225}
1226
1227static void
a4872ba6 1228gen5_ring_put_irq(struct intel_engine_cs *ring)
e48d8634
DV
1229{
1230 struct drm_device *dev = ring->dev;
4640c4ff 1231 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1232 unsigned long flags;
e48d8634 1233
7338aefa 1234 spin_lock_irqsave(&dev_priv->irq_lock, flags);
43eaea13 1235 if (--ring->irq_refcount == 0)
480c8033 1236 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
7338aefa 1237 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
1238}
1239
b13c2b96 1240static bool
a4872ba6 1241i9xx_ring_get_irq(struct intel_engine_cs *ring)
62fdfeaf 1242{
78501eac 1243 struct drm_device *dev = ring->dev;
4640c4ff 1244 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1245 unsigned long flags;
62fdfeaf 1246
7cd512f1 1247 if (!intel_irqs_enabled(dev_priv))
b13c2b96
CW
1248 return false;
1249
7338aefa 1250 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1251 if (ring->irq_refcount++ == 0) {
f637fde4
DV
1252 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1253 I915_WRITE(IMR, dev_priv->irq_mask);
1254 POSTING_READ(IMR);
1255 }
7338aefa 1256 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
b13c2b96
CW
1257
1258 return true;
62fdfeaf
EA
1259}
1260
8187a2b7 1261static void
a4872ba6 1262i9xx_ring_put_irq(struct intel_engine_cs *ring)
62fdfeaf 1263{
78501eac 1264 struct drm_device *dev = ring->dev;
4640c4ff 1265 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1266 unsigned long flags;
62fdfeaf 1267
7338aefa 1268 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1269 if (--ring->irq_refcount == 0) {
f637fde4
DV
1270 dev_priv->irq_mask |= ring->irq_enable_mask;
1271 I915_WRITE(IMR, dev_priv->irq_mask);
1272 POSTING_READ(IMR);
1273 }
7338aefa 1274 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
62fdfeaf
EA
1275}
1276
c2798b19 1277static bool
a4872ba6 1278i8xx_ring_get_irq(struct intel_engine_cs *ring)
c2798b19
CW
1279{
1280 struct drm_device *dev = ring->dev;
4640c4ff 1281 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1282 unsigned long flags;
c2798b19 1283
7cd512f1 1284 if (!intel_irqs_enabled(dev_priv))
c2798b19
CW
1285 return false;
1286
7338aefa 1287 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1288 if (ring->irq_refcount++ == 0) {
c2798b19
CW
1289 dev_priv->irq_mask &= ~ring->irq_enable_mask;
1290 I915_WRITE16(IMR, dev_priv->irq_mask);
1291 POSTING_READ16(IMR);
1292 }
7338aefa 1293 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
1294
1295 return true;
1296}
1297
1298static void
a4872ba6 1299i8xx_ring_put_irq(struct intel_engine_cs *ring)
c2798b19
CW
1300{
1301 struct drm_device *dev = ring->dev;
4640c4ff 1302 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1303 unsigned long flags;
c2798b19 1304
7338aefa 1305 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1306 if (--ring->irq_refcount == 0) {
c2798b19
CW
1307 dev_priv->irq_mask |= ring->irq_enable_mask;
1308 I915_WRITE16(IMR, dev_priv->irq_mask);
1309 POSTING_READ16(IMR);
1310 }
7338aefa 1311 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
1312}
1313
a4872ba6 1314void intel_ring_setup_status_page(struct intel_engine_cs *ring)
8187a2b7 1315{
4593010b 1316 struct drm_device *dev = ring->dev;
4640c4ff 1317 struct drm_i915_private *dev_priv = ring->dev->dev_private;
4593010b
EA
1318 u32 mmio = 0;
1319
1320 /* The ring status page addresses are no longer next to the rest of
1321 * the ring registers as of gen7.
1322 */
1323 if (IS_GEN7(dev)) {
1324 switch (ring->id) {
96154f2f 1325 case RCS:
4593010b
EA
1326 mmio = RENDER_HWS_PGA_GEN7;
1327 break;
96154f2f 1328 case BCS:
4593010b
EA
1329 mmio = BLT_HWS_PGA_GEN7;
1330 break;
77fe2ff3
ZY
1331 /*
1332 * VCS2 actually doesn't exist on Gen7. Only shut up
1333 * gcc switch check warning
1334 */
1335 case VCS2:
96154f2f 1336 case VCS:
4593010b
EA
1337 mmio = BSD_HWS_PGA_GEN7;
1338 break;
4a3dd19d 1339 case VECS:
9a8a2213
BW
1340 mmio = VEBOX_HWS_PGA_GEN7;
1341 break;
4593010b
EA
1342 }
1343 } else if (IS_GEN6(ring->dev)) {
1344 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
1345 } else {
eb0d4b75 1346 /* XXX: gen8 returns to sanity */
4593010b
EA
1347 mmio = RING_HWS_PGA(ring->mmio_base);
1348 }
1349
78501eac
CW
1350 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1351 POSTING_READ(mmio);
884020bf 1352
dc616b89
DL
1353 /*
1354 * Flush the TLB for this page
1355 *
1356 * FIXME: These two bits have disappeared on gen8, so a question
1357 * arises: do we still need this and if so how should we go about
1358 * invalidating the TLB?
1359 */
1360 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
884020bf 1361 u32 reg = RING_INSTPM(ring->mmio_base);
02f6a1e7
NKK
1362
1363 /* ring should be idle before issuing a sync flush*/
1364 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1365
884020bf
CW
1366 I915_WRITE(reg,
1367 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1368 INSTPM_SYNC_FLUSH));
1369 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1370 1000))
1371 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1372 ring->name);
1373 }
8187a2b7
ZN
1374}
1375
b72f3acb 1376static int
a4872ba6 1377bsd_ring_flush(struct intel_engine_cs *ring,
78501eac
CW
1378 u32 invalidate_domains,
1379 u32 flush_domains)
d1b851fc 1380{
b72f3acb
CW
1381 int ret;
1382
b72f3acb
CW
1383 ret = intel_ring_begin(ring, 2);
1384 if (ret)
1385 return ret;
1386
1387 intel_ring_emit(ring, MI_FLUSH);
1388 intel_ring_emit(ring, MI_NOOP);
1389 intel_ring_advance(ring);
1390 return 0;
d1b851fc
ZN
1391}
1392
3cce469c 1393static int
a4872ba6 1394i9xx_add_request(struct intel_engine_cs *ring)
d1b851fc 1395{
3cce469c
CW
1396 int ret;
1397
1398 ret = intel_ring_begin(ring, 4);
1399 if (ret)
1400 return ret;
6f392d54 1401
3cce469c
CW
1402 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1403 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1823521d 1404 intel_ring_emit(ring, ring->outstanding_lazy_seqno);
3cce469c 1405 intel_ring_emit(ring, MI_USER_INTERRUPT);
09246732 1406 __intel_ring_advance(ring);
d1b851fc 1407
3cce469c 1408 return 0;
d1b851fc
ZN
1409}
1410
0f46832f 1411static bool
a4872ba6 1412gen6_ring_get_irq(struct intel_engine_cs *ring)
0f46832f
CW
1413{
1414 struct drm_device *dev = ring->dev;
4640c4ff 1415 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1416 unsigned long flags;
0f46832f 1417
7cd512f1
DV
1418 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1419 return false;
0f46832f 1420
7338aefa 1421 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1422 if (ring->irq_refcount++ == 0) {
040d2baa 1423 if (HAS_L3_DPF(dev) && ring->id == RCS)
cc609d5d
BW
1424 I915_WRITE_IMR(ring,
1425 ~(ring->irq_enable_mask |
35a85ac6 1426 GT_PARITY_ERROR(dev)));
15b9f80e
BW
1427 else
1428 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
480c8033 1429 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
0f46832f 1430 }
7338aefa 1431 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
0f46832f
CW
1432
1433 return true;
1434}
1435
1436static void
a4872ba6 1437gen6_ring_put_irq(struct intel_engine_cs *ring)
0f46832f
CW
1438{
1439 struct drm_device *dev = ring->dev;
4640c4ff 1440 struct drm_i915_private *dev_priv = dev->dev_private;
7338aefa 1441 unsigned long flags;
0f46832f 1442
7338aefa 1443 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1444 if (--ring->irq_refcount == 0) {
040d2baa 1445 if (HAS_L3_DPF(dev) && ring->id == RCS)
35a85ac6 1446 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
15b9f80e
BW
1447 else
1448 I915_WRITE_IMR(ring, ~0);
480c8033 1449 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1ec14ad3 1450 }
7338aefa 1451 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
d1b851fc
ZN
1452}
1453
a19d2933 1454static bool
a4872ba6 1455hsw_vebox_get_irq(struct intel_engine_cs *ring)
a19d2933
BW
1456{
1457 struct drm_device *dev = ring->dev;
1458 struct drm_i915_private *dev_priv = dev->dev_private;
1459 unsigned long flags;
1460
7cd512f1 1461 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
a19d2933
BW
1462 return false;
1463
59cdb63d 1464 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1465 if (ring->irq_refcount++ == 0) {
a19d2933 1466 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
480c8033 1467 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
a19d2933 1468 }
59cdb63d 1469 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
a19d2933
BW
1470
1471 return true;
1472}
1473
1474static void
a4872ba6 1475hsw_vebox_put_irq(struct intel_engine_cs *ring)
a19d2933
BW
1476{
1477 struct drm_device *dev = ring->dev;
1478 struct drm_i915_private *dev_priv = dev->dev_private;
1479 unsigned long flags;
1480
59cdb63d 1481 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c7113cc3 1482 if (--ring->irq_refcount == 0) {
a19d2933 1483 I915_WRITE_IMR(ring, ~0);
480c8033 1484 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
a19d2933 1485 }
59cdb63d 1486 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
a19d2933
BW
1487}
1488
abd58f01 1489static bool
a4872ba6 1490gen8_ring_get_irq(struct intel_engine_cs *ring)
abd58f01
BW
1491{
1492 struct drm_device *dev = ring->dev;
1493 struct drm_i915_private *dev_priv = dev->dev_private;
1494 unsigned long flags;
1495
7cd512f1 1496 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
abd58f01
BW
1497 return false;
1498
1499 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1500 if (ring->irq_refcount++ == 0) {
1501 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1502 I915_WRITE_IMR(ring,
1503 ~(ring->irq_enable_mask |
1504 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1505 } else {
1506 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1507 }
1508 POSTING_READ(RING_IMR(ring->mmio_base));
1509 }
1510 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1511
1512 return true;
1513}
1514
1515static void
a4872ba6 1516gen8_ring_put_irq(struct intel_engine_cs *ring)
abd58f01
BW
1517{
1518 struct drm_device *dev = ring->dev;
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 unsigned long flags;
1521
1522 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1523 if (--ring->irq_refcount == 0) {
1524 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1525 I915_WRITE_IMR(ring,
1526 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1527 } else {
1528 I915_WRITE_IMR(ring, ~0);
1529 }
1530 POSTING_READ(RING_IMR(ring->mmio_base));
1531 }
1532 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1533}
1534
d1b851fc 1535static int
a4872ba6 1536i965_dispatch_execbuffer(struct intel_engine_cs *ring,
9bcb144c 1537 u64 offset, u32 length,
d7d4eedd 1538 unsigned flags)
d1b851fc 1539{
e1f99ce6 1540 int ret;
78501eac 1541
e1f99ce6
CW
1542 ret = intel_ring_begin(ring, 2);
1543 if (ret)
1544 return ret;
1545
78501eac 1546 intel_ring_emit(ring,
65f56876
CW
1547 MI_BATCH_BUFFER_START |
1548 MI_BATCH_GTT |
d7d4eedd 1549 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
c4e7a414 1550 intel_ring_emit(ring, offset);
78501eac
CW
1551 intel_ring_advance(ring);
1552
d1b851fc
ZN
1553 return 0;
1554}
1555
b45305fc
DV
1556/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1557#define I830_BATCH_LIMIT (256*1024)
c4d69da1
CW
1558#define I830_TLB_ENTRIES (2)
1559#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
8187a2b7 1560static int
a4872ba6 1561i830_dispatch_execbuffer(struct intel_engine_cs *ring,
9bcb144c 1562 u64 offset, u32 len,
d7d4eedd 1563 unsigned flags)
62fdfeaf 1564{
c4d69da1 1565 u32 cs_offset = ring->scratch.gtt_offset;
c4e7a414 1566 int ret;
62fdfeaf 1567
c4d69da1
CW
1568 ret = intel_ring_begin(ring, 6);
1569 if (ret)
1570 return ret;
62fdfeaf 1571
c4d69da1
CW
1572 /* Evict the invalid PTE TLBs */
1573 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1574 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1575 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1576 intel_ring_emit(ring, cs_offset);
1577 intel_ring_emit(ring, 0xdeadbeef);
1578 intel_ring_emit(ring, MI_NOOP);
1579 intel_ring_advance(ring);
b45305fc 1580
c4d69da1 1581 if ((flags & I915_DISPATCH_PINNED) == 0) {
b45305fc
DV
1582 if (len > I830_BATCH_LIMIT)
1583 return -ENOSPC;
1584
c4d69da1 1585 ret = intel_ring_begin(ring, 6 + 2);
b45305fc
DV
1586 if (ret)
1587 return ret;
c4d69da1
CW
1588
1589 /* Blit the batch (which has now all relocs applied) to the
1590 * stable batch scratch bo area (so that the CS never
1591 * stumbles over its tlb invalidation bug) ...
1592 */
1593 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1594 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
611a7a4f 1595 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
b45305fc 1596 intel_ring_emit(ring, cs_offset);
b45305fc
DV
1597 intel_ring_emit(ring, 4096);
1598 intel_ring_emit(ring, offset);
c4d69da1 1599
b45305fc 1600 intel_ring_emit(ring, MI_FLUSH);
c4d69da1
CW
1601 intel_ring_emit(ring, MI_NOOP);
1602 intel_ring_advance(ring);
b45305fc
DV
1603
1604 /* ... and execute it. */
c4d69da1 1605 offset = cs_offset;
b45305fc 1606 }
e1f99ce6 1607
c4d69da1
CW
1608 ret = intel_ring_begin(ring, 4);
1609 if (ret)
1610 return ret;
1611
1612 intel_ring_emit(ring, MI_BATCH_BUFFER);
1613 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1614 intel_ring_emit(ring, offset + len - 8);
1615 intel_ring_emit(ring, MI_NOOP);
1616 intel_ring_advance(ring);
1617
fb3256da
DV
1618 return 0;
1619}
1620
1621static int
a4872ba6 1622i915_dispatch_execbuffer(struct intel_engine_cs *ring,
9bcb144c 1623 u64 offset, u32 len,
d7d4eedd 1624 unsigned flags)
fb3256da
DV
1625{
1626 int ret;
1627
1628 ret = intel_ring_begin(ring, 2);
1629 if (ret)
1630 return ret;
1631
65f56876 1632 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
d7d4eedd 1633 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
c4e7a414 1634 intel_ring_advance(ring);
62fdfeaf 1635
62fdfeaf
EA
1636 return 0;
1637}
1638
a4872ba6 1639static void cleanup_status_page(struct intel_engine_cs *ring)
62fdfeaf 1640{
05394f39 1641 struct drm_i915_gem_object *obj;
62fdfeaf 1642
8187a2b7
ZN
1643 obj = ring->status_page.obj;
1644 if (obj == NULL)
62fdfeaf 1645 return;
62fdfeaf 1646
9da3da66 1647 kunmap(sg_page(obj->pages->sgl));
d7f46fc4 1648 i915_gem_object_ggtt_unpin(obj);
05394f39 1649 drm_gem_object_unreference(&obj->base);
8187a2b7 1650 ring->status_page.obj = NULL;
62fdfeaf
EA
1651}
1652
a4872ba6 1653static int init_status_page(struct intel_engine_cs *ring)
62fdfeaf 1654{
05394f39 1655 struct drm_i915_gem_object *obj;
62fdfeaf 1656
e3efda49 1657 if ((obj = ring->status_page.obj) == NULL) {
1f767e02 1658 unsigned flags;
e3efda49 1659 int ret;
e4ffd173 1660
e3efda49
CW
1661 obj = i915_gem_alloc_object(ring->dev, 4096);
1662 if (obj == NULL) {
1663 DRM_ERROR("Failed to allocate status page\n");
1664 return -ENOMEM;
1665 }
62fdfeaf 1666
e3efda49
CW
1667 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1668 if (ret)
1669 goto err_unref;
1670
1f767e02
CW
1671 flags = 0;
1672 if (!HAS_LLC(ring->dev))
1673 /* On g33, we cannot place HWS above 256MiB, so
1674 * restrict its pinning to the low mappable arena.
1675 * Though this restriction is not documented for
1676 * gen4, gen5, or byt, they also behave similarly
1677 * and hang if the HWS is placed at the top of the
1678 * GTT. To generalise, it appears that all !llc
1679 * platforms have issues with us placing the HWS
1680 * above the mappable region (even though we never
1681 * actualy map it).
1682 */
1683 flags |= PIN_MAPPABLE;
1684 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
e3efda49
CW
1685 if (ret) {
1686err_unref:
1687 drm_gem_object_unreference(&obj->base);
1688 return ret;
1689 }
1690
1691 ring->status_page.obj = obj;
1692 }
62fdfeaf 1693
f343c5f6 1694 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
9da3da66 1695 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
8187a2b7 1696 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
62fdfeaf 1697
8187a2b7
ZN
1698 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1699 ring->name, ring->status_page.gfx_addr);
62fdfeaf
EA
1700
1701 return 0;
62fdfeaf
EA
1702}
1703
a4872ba6 1704static int init_phys_status_page(struct intel_engine_cs *ring)
6b8294a4
CW
1705{
1706 struct drm_i915_private *dev_priv = ring->dev->dev_private;
6b8294a4
CW
1707
1708 if (!dev_priv->status_page_dmah) {
1709 dev_priv->status_page_dmah =
1710 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1711 if (!dev_priv->status_page_dmah)
1712 return -ENOMEM;
1713 }
1714
6b8294a4
CW
1715 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1716 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1717
1718 return 0;
1719}
1720
7ba717cf 1721void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2919d291 1722{
2919d291 1723 iounmap(ringbuf->virtual_start);
7ba717cf 1724 ringbuf->virtual_start = NULL;
2919d291 1725 i915_gem_object_ggtt_unpin(ringbuf->obj);
7ba717cf
TD
1726}
1727
1728int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1729 struct intel_ringbuffer *ringbuf)
1730{
1731 struct drm_i915_private *dev_priv = to_i915(dev);
1732 struct drm_i915_gem_object *obj = ringbuf->obj;
1733 int ret;
1734
1735 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1736 if (ret)
1737 return ret;
1738
1739 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1740 if (ret) {
1741 i915_gem_object_ggtt_unpin(obj);
1742 return ret;
1743 }
1744
1745 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
1746 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
1747 if (ringbuf->virtual_start == NULL) {
1748 i915_gem_object_ggtt_unpin(obj);
1749 return -EINVAL;
1750 }
1751
1752 return 0;
1753}
1754
1755void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1756{
2919d291
OM
1757 drm_gem_object_unreference(&ringbuf->obj->base);
1758 ringbuf->obj = NULL;
1759}
1760
84c2377f
OM
1761int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1762 struct intel_ringbuffer *ringbuf)
62fdfeaf 1763{
05394f39 1764 struct drm_i915_gem_object *obj;
62fdfeaf 1765
ebc052e0
CW
1766 obj = NULL;
1767 if (!HAS_LLC(dev))
93b0a4e0 1768 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
ebc052e0 1769 if (obj == NULL)
93b0a4e0 1770 obj = i915_gem_alloc_object(dev, ringbuf->size);
e3efda49
CW
1771 if (obj == NULL)
1772 return -ENOMEM;
8187a2b7 1773
24f3a8cf
AG
1774 /* mark ring buffers as read-only from GPU side by default */
1775 obj->gt_ro = 1;
1776
93b0a4e0 1777 ringbuf->obj = obj;
e3efda49 1778
7ba717cf 1779 return 0;
e3efda49
CW
1780}
1781
1782static int intel_init_ring_buffer(struct drm_device *dev,
a4872ba6 1783 struct intel_engine_cs *ring)
e3efda49 1784{
8ee14975 1785 struct intel_ringbuffer *ringbuf = ring->buffer;
e3efda49
CW
1786 int ret;
1787
8ee14975
OM
1788 if (ringbuf == NULL) {
1789 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1790 if (!ringbuf)
1791 return -ENOMEM;
1792 ring->buffer = ringbuf;
1793 }
1794
e3efda49
CW
1795 ring->dev = dev;
1796 INIT_LIST_HEAD(&ring->active_list);
1797 INIT_LIST_HEAD(&ring->request_list);
cc9130be 1798 INIT_LIST_HEAD(&ring->execlist_queue);
93b0a4e0 1799 ringbuf->size = 32 * PAGE_SIZE;
0c7dd53b 1800 ringbuf->ring = ring;
ebc348b2 1801 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
e3efda49
CW
1802
1803 init_waitqueue_head(&ring->irq_queue);
1804
1805 if (I915_NEED_GFX_HWS(dev)) {
1806 ret = init_status_page(ring);
1807 if (ret)
8ee14975 1808 goto error;
e3efda49
CW
1809 } else {
1810 BUG_ON(ring->id != RCS);
1811 ret = init_phys_status_page(ring);
1812 if (ret)
8ee14975 1813 goto error;
e3efda49
CW
1814 }
1815
7ba717cf
TD
1816 if (ringbuf->obj == NULL) {
1817 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1818 if (ret) {
1819 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
1820 ring->name, ret);
1821 goto error;
1822 }
1823
1824 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1825 if (ret) {
1826 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
1827 ring->name, ret);
1828 intel_destroy_ringbuffer_obj(ringbuf);
1829 goto error;
1830 }
e3efda49 1831 }
62fdfeaf 1832
55249baa
CW
1833 /* Workaround an erratum on the i830 which causes a hang if
1834 * the TAIL pointer points to within the last 2 cachelines
1835 * of the buffer.
1836 */
93b0a4e0 1837 ringbuf->effective_size = ringbuf->size;
e3efda49 1838 if (IS_I830(dev) || IS_845G(dev))
93b0a4e0 1839 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
55249baa 1840
44e895a8
BV
1841 ret = i915_cmd_parser_init_ring(ring);
1842 if (ret)
8ee14975
OM
1843 goto error;
1844
1845 ret = ring->init(ring);
1846 if (ret)
1847 goto error;
1848
1849 return 0;
351e3db2 1850
8ee14975
OM
1851error:
1852 kfree(ringbuf);
1853 ring->buffer = NULL;
1854 return ret;
62fdfeaf
EA
1855}
1856
a4872ba6 1857void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
62fdfeaf 1858{
6402c330
JH
1859 struct drm_i915_private *dev_priv;
1860 struct intel_ringbuffer *ringbuf;
33626e6a 1861
93b0a4e0 1862 if (!intel_ring_initialized(ring))
62fdfeaf
EA
1863 return;
1864
6402c330
JH
1865 dev_priv = to_i915(ring->dev);
1866 ringbuf = ring->buffer;
1867
e3efda49 1868 intel_stop_ring_buffer(ring);
de8f0a50 1869 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
33626e6a 1870
7ba717cf 1871 intel_unpin_ringbuffer_obj(ringbuf);
2919d291 1872 intel_destroy_ringbuffer_obj(ringbuf);
3d57e5bd
BW
1873 ring->preallocated_lazy_request = NULL;
1874 ring->outstanding_lazy_seqno = 0;
78501eac 1875
8d19215b
ZN
1876 if (ring->cleanup)
1877 ring->cleanup(ring);
1878
78501eac 1879 cleanup_status_page(ring);
44e895a8
BV
1880
1881 i915_cmd_parser_fini_ring(ring);
8ee14975 1882
93b0a4e0 1883 kfree(ringbuf);
8ee14975 1884 ring->buffer = NULL;
62fdfeaf
EA
1885}
1886
a4872ba6 1887static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
a71d8d94 1888{
93b0a4e0 1889 struct intel_ringbuffer *ringbuf = ring->buffer;
a71d8d94 1890 struct drm_i915_gem_request *request;
1cf0ba14 1891 u32 seqno = 0;
a71d8d94
CW
1892 int ret;
1893
93b0a4e0
OM
1894 if (ringbuf->last_retired_head != -1) {
1895 ringbuf->head = ringbuf->last_retired_head;
1896 ringbuf->last_retired_head = -1;
1f70999f 1897
82e104cc 1898 ringbuf->space = intel_ring_space(ringbuf);
93b0a4e0 1899 if (ringbuf->space >= n)
a71d8d94
CW
1900 return 0;
1901 }
1902
1903 list_for_each_entry(request, &ring->request_list, list) {
82e104cc
OM
1904 if (__intel_ring_space(request->tail, ringbuf->tail,
1905 ringbuf->size) >= n) {
a71d8d94
CW
1906 seqno = request->seqno;
1907 break;
1908 }
a71d8d94
CW
1909 }
1910
1911 if (seqno == 0)
1912 return -ENOSPC;
1913
1f70999f 1914 ret = i915_wait_seqno(ring, seqno);
a71d8d94
CW
1915 if (ret)
1916 return ret;
1917
1cf0ba14 1918 i915_gem_retire_requests_ring(ring);
93b0a4e0
OM
1919 ringbuf->head = ringbuf->last_retired_head;
1920 ringbuf->last_retired_head = -1;
a71d8d94 1921
82e104cc 1922 ringbuf->space = intel_ring_space(ringbuf);
a71d8d94
CW
1923 return 0;
1924}
1925
a4872ba6 1926static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
62fdfeaf 1927{
78501eac 1928 struct drm_device *dev = ring->dev;
cae5852d 1929 struct drm_i915_private *dev_priv = dev->dev_private;
93b0a4e0 1930 struct intel_ringbuffer *ringbuf = ring->buffer;
78501eac 1931 unsigned long end;
a71d8d94 1932 int ret;
c7dca47b 1933
a71d8d94
CW
1934 ret = intel_ring_wait_request(ring, n);
1935 if (ret != -ENOSPC)
1936 return ret;
1937
09246732
CW
1938 /* force the tail write in case we have been skipping them */
1939 __intel_ring_advance(ring);
1940
63ed2cb2
DV
1941 /* With GEM the hangcheck timer should kick us out of the loop,
1942 * leaving it early runs the risk of corrupting GEM state (due
1943 * to running on almost untested codepaths). But on resume
1944 * timers don't work yet, so prevent a complete hang in that
1945 * case by choosing an insanely large timeout. */
1946 end = jiffies + 60 * HZ;
e6bfaf85 1947
dcfe0506 1948 trace_i915_ring_wait_begin(ring);
8187a2b7 1949 do {
93b0a4e0 1950 ringbuf->head = I915_READ_HEAD(ring);
82e104cc 1951 ringbuf->space = intel_ring_space(ringbuf);
93b0a4e0 1952 if (ringbuf->space >= n) {
dcfe0506
CW
1953 ret = 0;
1954 break;
62fdfeaf
EA
1955 }
1956
e60a0b10 1957 msleep(1);
d6b2c790 1958
dcfe0506
CW
1959 if (dev_priv->mm.interruptible && signal_pending(current)) {
1960 ret = -ERESTARTSYS;
1961 break;
1962 }
1963
33196ded
DV
1964 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1965 dev_priv->mm.interruptible);
d6b2c790 1966 if (ret)
dcfe0506
CW
1967 break;
1968
1969 if (time_after(jiffies, end)) {
1970 ret = -EBUSY;
1971 break;
1972 }
1973 } while (1);
db53a302 1974 trace_i915_ring_wait_end(ring);
dcfe0506 1975 return ret;
8187a2b7 1976}
62fdfeaf 1977
a4872ba6 1978static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
3e960501
CW
1979{
1980 uint32_t __iomem *virt;
93b0a4e0
OM
1981 struct intel_ringbuffer *ringbuf = ring->buffer;
1982 int rem = ringbuf->size - ringbuf->tail;
3e960501 1983
93b0a4e0 1984 if (ringbuf->space < rem) {
3e960501
CW
1985 int ret = ring_wait_for_space(ring, rem);
1986 if (ret)
1987 return ret;
1988 }
1989
93b0a4e0 1990 virt = ringbuf->virtual_start + ringbuf->tail;
3e960501
CW
1991 rem /= 4;
1992 while (rem--)
1993 iowrite32(MI_NOOP, virt++);
1994
93b0a4e0 1995 ringbuf->tail = 0;
82e104cc 1996 ringbuf->space = intel_ring_space(ringbuf);
3e960501
CW
1997
1998 return 0;
1999}
2000
a4872ba6 2001int intel_ring_idle(struct intel_engine_cs *ring)
3e960501
CW
2002{
2003 u32 seqno;
2004 int ret;
2005
2006 /* We need to add any requests required to flush the objects and ring */
1823521d 2007 if (ring->outstanding_lazy_seqno) {
0025c077 2008 ret = i915_add_request(ring, NULL);
3e960501
CW
2009 if (ret)
2010 return ret;
2011 }
2012
2013 /* Wait upon the last request to be completed */
2014 if (list_empty(&ring->request_list))
2015 return 0;
2016
2017 seqno = list_entry(ring->request_list.prev,
2018 struct drm_i915_gem_request,
2019 list)->seqno;
2020
2021 return i915_wait_seqno(ring, seqno);
2022}
2023
9d773091 2024static int
a4872ba6 2025intel_ring_alloc_seqno(struct intel_engine_cs *ring)
9d773091 2026{
9eba5d4a
JH
2027 int ret;
2028 struct drm_i915_gem_request *request;
2029
2030 /* XXX: The aim is to replace seqno values with request structures.
2031 * A step along the way is to switch to using the PLR in preference
2032 * to the OLS. That requires the PLR to only be valid when the OLS
2033 * is also valid. I.e., the two must be kept in step. */
2034
2035 if (ring->outstanding_lazy_seqno) {
2036 WARN_ON(ring->preallocated_lazy_request == NULL);
9d773091 2037 return 0;
9eba5d4a 2038 }
9d773091 2039
9eba5d4a 2040 WARN_ON(ring->preallocated_lazy_request != NULL);
3c0e234c 2041
9eba5d4a
JH
2042 request = kmalloc(sizeof(*request), GFP_KERNEL);
2043 if (request == NULL)
2044 return -ENOMEM;
3c0e234c 2045
9eba5d4a
JH
2046 ret = i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
2047 if (ret) {
2048 kfree(request);
2049 return ret;
3c0e234c
CW
2050 }
2051
9eba5d4a
JH
2052 ring->preallocated_lazy_request = request;
2053 return 0;
9d773091
CW
2054}
2055
a4872ba6 2056static int __intel_ring_prepare(struct intel_engine_cs *ring,
304d695c 2057 int bytes)
cbcc80df 2058{
93b0a4e0 2059 struct intel_ringbuffer *ringbuf = ring->buffer;
cbcc80df
MK
2060 int ret;
2061
93b0a4e0 2062 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
cbcc80df
MK
2063 ret = intel_wrap_ring_buffer(ring);
2064 if (unlikely(ret))
2065 return ret;
2066 }
2067
93b0a4e0 2068 if (unlikely(ringbuf->space < bytes)) {
cbcc80df
MK
2069 ret = ring_wait_for_space(ring, bytes);
2070 if (unlikely(ret))
2071 return ret;
2072 }
2073
cbcc80df
MK
2074 return 0;
2075}
2076
a4872ba6 2077int intel_ring_begin(struct intel_engine_cs *ring,
e1f99ce6 2078 int num_dwords)
8187a2b7 2079{
4640c4ff 2080 struct drm_i915_private *dev_priv = ring->dev->dev_private;
e1f99ce6 2081 int ret;
78501eac 2082
33196ded
DV
2083 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
2084 dev_priv->mm.interruptible);
de2b9985
DV
2085 if (ret)
2086 return ret;
21dd3734 2087
304d695c
CW
2088 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
2089 if (ret)
2090 return ret;
2091
9d773091
CW
2092 /* Preallocate the olr before touching the ring */
2093 ret = intel_ring_alloc_seqno(ring);
2094 if (ret)
2095 return ret;
2096
ee1b1e5e 2097 ring->buffer->space -= num_dwords * sizeof(uint32_t);
304d695c 2098 return 0;
8187a2b7 2099}
78501eac 2100
753b1ad4 2101/* Align the ring tail to a cacheline boundary */
a4872ba6 2102int intel_ring_cacheline_align(struct intel_engine_cs *ring)
753b1ad4 2103{
ee1b1e5e 2104 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
753b1ad4
VS
2105 int ret;
2106
2107 if (num_dwords == 0)
2108 return 0;
2109
18393f63 2110 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
753b1ad4
VS
2111 ret = intel_ring_begin(ring, num_dwords);
2112 if (ret)
2113 return ret;
2114
2115 while (num_dwords--)
2116 intel_ring_emit(ring, MI_NOOP);
2117
2118 intel_ring_advance(ring);
2119
2120 return 0;
2121}
2122
a4872ba6 2123void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
498d2ac1 2124{
3b2cc8ab
OM
2125 struct drm_device *dev = ring->dev;
2126 struct drm_i915_private *dev_priv = dev->dev_private;
498d2ac1 2127
1823521d 2128 BUG_ON(ring->outstanding_lazy_seqno);
498d2ac1 2129
3b2cc8ab 2130 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
f7e98ad4
MK
2131 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
2132 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
3b2cc8ab 2133 if (HAS_VEBOX(dev))
5020150b 2134 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
e1f99ce6 2135 }
d97ed339 2136
f7e98ad4 2137 ring->set_seqno(ring, seqno);
92cab734 2138 ring->hangcheck.seqno = seqno;
8187a2b7 2139}
62fdfeaf 2140
a4872ba6 2141static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
297b0c5b 2142 u32 value)
881f47b6 2143{
4640c4ff 2144 struct drm_i915_private *dev_priv = ring->dev->dev_private;
881f47b6
XH
2145
2146 /* Every tail move must follow the sequence below */
12f55818
CW
2147
2148 /* Disable notification that the ring is IDLE. The GT
2149 * will then assume that it is busy and bring it out of rc6.
2150 */
0206e353 2151 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818
CW
2152 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2153
2154 /* Clear the context id. Here be magic! */
2155 I915_WRITE64(GEN6_BSD_RNCID, 0x0);
0206e353 2156
12f55818 2157 /* Wait for the ring not to be idle, i.e. for it to wake up. */
0206e353 2158 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
12f55818
CW
2159 GEN6_BSD_SLEEP_INDICATOR) == 0,
2160 50))
2161 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
0206e353 2162
12f55818 2163 /* Now that the ring is fully powered up, update the tail */
0206e353 2164 I915_WRITE_TAIL(ring, value);
12f55818
CW
2165 POSTING_READ(RING_TAIL(ring->mmio_base));
2166
2167 /* Let the ring send IDLE messages to the GT again,
2168 * and so let it sleep to conserve power when idle.
2169 */
0206e353 2170 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818 2171 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
881f47b6
XH
2172}
2173
a4872ba6 2174static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
ea251324 2175 u32 invalidate, u32 flush)
881f47b6 2176{
71a77e07 2177 uint32_t cmd;
b72f3acb
CW
2178 int ret;
2179
b72f3acb
CW
2180 ret = intel_ring_begin(ring, 4);
2181 if (ret)
2182 return ret;
2183
71a77e07 2184 cmd = MI_FLUSH_DW;
075b3bba
BW
2185 if (INTEL_INFO(ring->dev)->gen >= 8)
2186 cmd += 1;
9a289771
JB
2187 /*
2188 * Bspec vol 1c.5 - video engine command streamer:
2189 * "If ENABLED, all TLBs will be invalidated once the flush
2190 * operation is complete. This bit is only valid when the
2191 * Post-Sync Operation field is a value of 1h or 3h."
2192 */
71a77e07 2193 if (invalidate & I915_GEM_GPU_DOMAINS)
9a289771
JB
2194 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
2195 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
71a77e07 2196 intel_ring_emit(ring, cmd);
9a289771 2197 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
075b3bba
BW
2198 if (INTEL_INFO(ring->dev)->gen >= 8) {
2199 intel_ring_emit(ring, 0); /* upper addr */
2200 intel_ring_emit(ring, 0); /* value */
2201 } else {
2202 intel_ring_emit(ring, 0);
2203 intel_ring_emit(ring, MI_NOOP);
2204 }
b72f3acb
CW
2205 intel_ring_advance(ring);
2206 return 0;
881f47b6
XH
2207}
2208
1c7a0623 2209static int
a4872ba6 2210gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
9bcb144c 2211 u64 offset, u32 len,
1c7a0623
BW
2212 unsigned flags)
2213{
896ab1a5 2214 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
1c7a0623
BW
2215 int ret;
2216
2217 ret = intel_ring_begin(ring, 4);
2218 if (ret)
2219 return ret;
2220
2221 /* FIXME(BDW): Address space and security selectors. */
28cf5415 2222 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
9bcb144c
BW
2223 intel_ring_emit(ring, lower_32_bits(offset));
2224 intel_ring_emit(ring, upper_32_bits(offset));
1c7a0623
BW
2225 intel_ring_emit(ring, MI_NOOP);
2226 intel_ring_advance(ring);
2227
2228 return 0;
2229}
2230
d7d4eedd 2231static int
a4872ba6 2232hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
9bcb144c 2233 u64 offset, u32 len,
d7d4eedd
CW
2234 unsigned flags)
2235{
2236 int ret;
2237
2238 ret = intel_ring_begin(ring, 2);
2239 if (ret)
2240 return ret;
2241
2242 intel_ring_emit(ring,
77072258
CW
2243 MI_BATCH_BUFFER_START |
2244 (flags & I915_DISPATCH_SECURE ?
2245 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
d7d4eedd
CW
2246 /* bit0-7 is the length on GEN6+ */
2247 intel_ring_emit(ring, offset);
2248 intel_ring_advance(ring);
2249
2250 return 0;
2251}
2252
881f47b6 2253static int
a4872ba6 2254gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
9bcb144c 2255 u64 offset, u32 len,
d7d4eedd 2256 unsigned flags)
881f47b6 2257{
0206e353 2258 int ret;
ab6f8e32 2259
0206e353
AJ
2260 ret = intel_ring_begin(ring, 2);
2261 if (ret)
2262 return ret;
e1f99ce6 2263
d7d4eedd
CW
2264 intel_ring_emit(ring,
2265 MI_BATCH_BUFFER_START |
2266 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
0206e353
AJ
2267 /* bit0-7 is the length on GEN6+ */
2268 intel_ring_emit(ring, offset);
2269 intel_ring_advance(ring);
ab6f8e32 2270
0206e353 2271 return 0;
881f47b6
XH
2272}
2273
549f7365
CW
2274/* Blitter support (SandyBridge+) */
2275
a4872ba6 2276static int gen6_ring_flush(struct intel_engine_cs *ring,
ea251324 2277 u32 invalidate, u32 flush)
8d19215b 2278{
fd3da6c9 2279 struct drm_device *dev = ring->dev;
1d73c2a8 2280 struct drm_i915_private *dev_priv = dev->dev_private;
71a77e07 2281 uint32_t cmd;
b72f3acb
CW
2282 int ret;
2283
6a233c78 2284 ret = intel_ring_begin(ring, 4);
b72f3acb
CW
2285 if (ret)
2286 return ret;
2287
71a77e07 2288 cmd = MI_FLUSH_DW;
075b3bba
BW
2289 if (INTEL_INFO(ring->dev)->gen >= 8)
2290 cmd += 1;
9a289771
JB
2291 /*
2292 * Bspec vol 1c.3 - blitter engine command streamer:
2293 * "If ENABLED, all TLBs will be invalidated once the flush
2294 * operation is complete. This bit is only valid when the
2295 * Post-Sync Operation field is a value of 1h or 3h."
2296 */
71a77e07 2297 if (invalidate & I915_GEM_DOMAIN_RENDER)
9a289771 2298 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
b3fcabb1 2299 MI_FLUSH_DW_OP_STOREDW;
71a77e07 2300 intel_ring_emit(ring, cmd);
9a289771 2301 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
075b3bba
BW
2302 if (INTEL_INFO(ring->dev)->gen >= 8) {
2303 intel_ring_emit(ring, 0); /* upper addr */
2304 intel_ring_emit(ring, 0); /* value */
2305 } else {
2306 intel_ring_emit(ring, 0);
2307 intel_ring_emit(ring, MI_NOOP);
2308 }
b72f3acb 2309 intel_ring_advance(ring);
fd3da6c9 2310
1d73c2a8
RV
2311 if (!invalidate && flush) {
2312 if (IS_GEN7(dev))
2313 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2314 else if (IS_BROADWELL(dev))
2315 dev_priv->fbc.need_sw_cache_clean = true;
2316 }
fd3da6c9 2317
b72f3acb 2318 return 0;
8d19215b
ZN
2319}
2320
5c1143bb
XH
2321int intel_init_render_ring_buffer(struct drm_device *dev)
2322{
4640c4ff 2323 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 2324 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
3e78998a
BW
2325 struct drm_i915_gem_object *obj;
2326 int ret;
5c1143bb 2327
59465b5f
DV
2328 ring->name = "render ring";
2329 ring->id = RCS;
2330 ring->mmio_base = RENDER_RING_BASE;
2331
707d9cf9 2332 if (INTEL_INFO(dev)->gen >= 8) {
3e78998a
BW
2333 if (i915_semaphore_is_enabled(dev)) {
2334 obj = i915_gem_alloc_object(dev, 4096);
2335 if (obj == NULL) {
2336 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2337 i915.semaphores = 0;
2338 } else {
2339 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2340 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2341 if (ret != 0) {
2342 drm_gem_object_unreference(&obj->base);
2343 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2344 i915.semaphores = 0;
2345 } else
2346 dev_priv->semaphore_obj = obj;
2347 }
2348 }
7225342a
MK
2349
2350 ring->init_context = intel_ring_workarounds_emit;
707d9cf9
BW
2351 ring->add_request = gen6_add_request;
2352 ring->flush = gen8_render_ring_flush;
2353 ring->irq_get = gen8_ring_get_irq;
2354 ring->irq_put = gen8_ring_put_irq;
2355 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2356 ring->get_seqno = gen6_ring_get_seqno;
2357 ring->set_seqno = ring_set_seqno;
2358 if (i915_semaphore_is_enabled(dev)) {
3e78998a 2359 WARN_ON(!dev_priv->semaphore_obj);
5ee426ca 2360 ring->semaphore.sync_to = gen8_ring_sync;
3e78998a
BW
2361 ring->semaphore.signal = gen8_rcs_signal;
2362 GEN8_RING_SEMAPHORE_INIT;
707d9cf9
BW
2363 }
2364 } else if (INTEL_INFO(dev)->gen >= 6) {
1ec14ad3 2365 ring->add_request = gen6_add_request;
4772eaeb 2366 ring->flush = gen7_render_ring_flush;
6c6cf5aa 2367 if (INTEL_INFO(dev)->gen == 6)
b3111509 2368 ring->flush = gen6_render_ring_flush;
707d9cf9
BW
2369 ring->irq_get = gen6_ring_get_irq;
2370 ring->irq_put = gen6_ring_put_irq;
cc609d5d 2371 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
4cd53c0c 2372 ring->get_seqno = gen6_ring_get_seqno;
b70ec5bf 2373 ring->set_seqno = ring_set_seqno;
707d9cf9
BW
2374 if (i915_semaphore_is_enabled(dev)) {
2375 ring->semaphore.sync_to = gen6_ring_sync;
2376 ring->semaphore.signal = gen6_signal;
2377 /*
2378 * The current semaphore is only applied on pre-gen8
2379 * platform. And there is no VCS2 ring on the pre-gen8
2380 * platform. So the semaphore between RCS and VCS2 is
2381 * initialized as INVALID. Gen8 will initialize the
2382 * sema between VCS2 and RCS later.
2383 */
2384 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2385 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2386 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2387 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2388 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2389 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2390 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2391 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2392 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2393 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2394 }
c6df541c
CW
2395 } else if (IS_GEN5(dev)) {
2396 ring->add_request = pc_render_add_request;
46f0f8d1 2397 ring->flush = gen4_render_ring_flush;
c6df541c 2398 ring->get_seqno = pc_render_get_seqno;
b70ec5bf 2399 ring->set_seqno = pc_render_set_seqno;
e48d8634
DV
2400 ring->irq_get = gen5_ring_get_irq;
2401 ring->irq_put = gen5_ring_put_irq;
cc609d5d
BW
2402 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2403 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
59465b5f 2404 } else {
8620a3a9 2405 ring->add_request = i9xx_add_request;
46f0f8d1
CW
2406 if (INTEL_INFO(dev)->gen < 4)
2407 ring->flush = gen2_render_ring_flush;
2408 else
2409 ring->flush = gen4_render_ring_flush;
59465b5f 2410 ring->get_seqno = ring_get_seqno;
b70ec5bf 2411 ring->set_seqno = ring_set_seqno;
c2798b19
CW
2412 if (IS_GEN2(dev)) {
2413 ring->irq_get = i8xx_ring_get_irq;
2414 ring->irq_put = i8xx_ring_put_irq;
2415 } else {
2416 ring->irq_get = i9xx_ring_get_irq;
2417 ring->irq_put = i9xx_ring_put_irq;
2418 }
e3670319 2419 ring->irq_enable_mask = I915_USER_INTERRUPT;
1ec14ad3 2420 }
59465b5f 2421 ring->write_tail = ring_write_tail;
707d9cf9 2422
d7d4eedd
CW
2423 if (IS_HASWELL(dev))
2424 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1c7a0623
BW
2425 else if (IS_GEN8(dev))
2426 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
d7d4eedd 2427 else if (INTEL_INFO(dev)->gen >= 6)
fb3256da
DV
2428 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2429 else if (INTEL_INFO(dev)->gen >= 4)
2430 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2431 else if (IS_I830(dev) || IS_845G(dev))
2432 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2433 else
2434 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
59465b5f
DV
2435 ring->init = init_render_ring;
2436 ring->cleanup = render_ring_cleanup;
2437
b45305fc
DV
2438 /* Workaround batchbuffer to combat CS tlb bug. */
2439 if (HAS_BROKEN_CS_TLB(dev)) {
c4d69da1 2440 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
b45305fc
DV
2441 if (obj == NULL) {
2442 DRM_ERROR("Failed to allocate batch bo\n");
2443 return -ENOMEM;
2444 }
2445
be1fa129 2446 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
b45305fc
DV
2447 if (ret != 0) {
2448 drm_gem_object_unreference(&obj->base);
2449 DRM_ERROR("Failed to ping batch bo\n");
2450 return ret;
2451 }
2452
0d1aacac
CW
2453 ring->scratch.obj = obj;
2454 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
b45305fc
DV
2455 }
2456
1ec14ad3 2457 return intel_init_ring_buffer(dev, ring);
5c1143bb
XH
2458}
2459
2460int intel_init_bsd_ring_buffer(struct drm_device *dev)
2461{
4640c4ff 2462 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 2463 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
5c1143bb 2464
58fa3835
DV
2465 ring->name = "bsd ring";
2466 ring->id = VCS;
2467
0fd2c201 2468 ring->write_tail = ring_write_tail;
780f18c8 2469 if (INTEL_INFO(dev)->gen >= 6) {
58fa3835 2470 ring->mmio_base = GEN6_BSD_RING_BASE;
0fd2c201
DV
2471 /* gen6 bsd needs a special wa for tail updates */
2472 if (IS_GEN6(dev))
2473 ring->write_tail = gen6_bsd_ring_write_tail;
ea251324 2474 ring->flush = gen6_bsd_ring_flush;
58fa3835
DV
2475 ring->add_request = gen6_add_request;
2476 ring->get_seqno = gen6_ring_get_seqno;
b70ec5bf 2477 ring->set_seqno = ring_set_seqno;
abd58f01
BW
2478 if (INTEL_INFO(dev)->gen >= 8) {
2479 ring->irq_enable_mask =
2480 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2481 ring->irq_get = gen8_ring_get_irq;
2482 ring->irq_put = gen8_ring_put_irq;
1c7a0623
BW
2483 ring->dispatch_execbuffer =
2484 gen8_ring_dispatch_execbuffer;
707d9cf9 2485 if (i915_semaphore_is_enabled(dev)) {
5ee426ca 2486 ring->semaphore.sync_to = gen8_ring_sync;
3e78998a
BW
2487 ring->semaphore.signal = gen8_xcs_signal;
2488 GEN8_RING_SEMAPHORE_INIT;
707d9cf9 2489 }
abd58f01
BW
2490 } else {
2491 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2492 ring->irq_get = gen6_ring_get_irq;
2493 ring->irq_put = gen6_ring_put_irq;
1c7a0623
BW
2494 ring->dispatch_execbuffer =
2495 gen6_ring_dispatch_execbuffer;
707d9cf9
BW
2496 if (i915_semaphore_is_enabled(dev)) {
2497 ring->semaphore.sync_to = gen6_ring_sync;
2498 ring->semaphore.signal = gen6_signal;
2499 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2500 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2501 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2502 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2503 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2504 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2505 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2506 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2507 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2508 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2509 }
abd58f01 2510 }
58fa3835
DV
2511 } else {
2512 ring->mmio_base = BSD_RING_BASE;
58fa3835 2513 ring->flush = bsd_ring_flush;
8620a3a9 2514 ring->add_request = i9xx_add_request;
58fa3835 2515 ring->get_seqno = ring_get_seqno;
b70ec5bf 2516 ring->set_seqno = ring_set_seqno;
e48d8634 2517 if (IS_GEN5(dev)) {
cc609d5d 2518 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
e48d8634
DV
2519 ring->irq_get = gen5_ring_get_irq;
2520 ring->irq_put = gen5_ring_put_irq;
2521 } else {
e3670319 2522 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
e48d8634
DV
2523 ring->irq_get = i9xx_ring_get_irq;
2524 ring->irq_put = i9xx_ring_put_irq;
2525 }
fb3256da 2526 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
58fa3835
DV
2527 }
2528 ring->init = init_ring_common;
2529
1ec14ad3 2530 return intel_init_ring_buffer(dev, ring);
5c1143bb 2531}
549f7365 2532
845f74a7
ZY
2533/**
2534 * Initialize the second BSD ring for Broadwell GT3.
2535 * It is noted that this only exists on Broadwell GT3.
2536 */
2537int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2538{
2539 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 2540 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
845f74a7
ZY
2541
2542 if ((INTEL_INFO(dev)->gen != 8)) {
2543 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2544 return -EINVAL;
2545 }
2546
f7b64236 2547 ring->name = "bsd2 ring";
845f74a7
ZY
2548 ring->id = VCS2;
2549
2550 ring->write_tail = ring_write_tail;
2551 ring->mmio_base = GEN8_BSD2_RING_BASE;
2552 ring->flush = gen6_bsd_ring_flush;
2553 ring->add_request = gen6_add_request;
2554 ring->get_seqno = gen6_ring_get_seqno;
2555 ring->set_seqno = ring_set_seqno;
2556 ring->irq_enable_mask =
2557 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2558 ring->irq_get = gen8_ring_get_irq;
2559 ring->irq_put = gen8_ring_put_irq;
2560 ring->dispatch_execbuffer =
2561 gen8_ring_dispatch_execbuffer;
3e78998a 2562 if (i915_semaphore_is_enabled(dev)) {
5ee426ca 2563 ring->semaphore.sync_to = gen8_ring_sync;
3e78998a
BW
2564 ring->semaphore.signal = gen8_xcs_signal;
2565 GEN8_RING_SEMAPHORE_INIT;
2566 }
845f74a7
ZY
2567 ring->init = init_ring_common;
2568
2569 return intel_init_ring_buffer(dev, ring);
2570}
2571
549f7365
CW
2572int intel_init_blt_ring_buffer(struct drm_device *dev)
2573{
4640c4ff 2574 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 2575 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
549f7365 2576
3535d9dd
DV
2577 ring->name = "blitter ring";
2578 ring->id = BCS;
2579
2580 ring->mmio_base = BLT_RING_BASE;
2581 ring->write_tail = ring_write_tail;
ea251324 2582 ring->flush = gen6_ring_flush;
3535d9dd
DV
2583 ring->add_request = gen6_add_request;
2584 ring->get_seqno = gen6_ring_get_seqno;
b70ec5bf 2585 ring->set_seqno = ring_set_seqno;
abd58f01
BW
2586 if (INTEL_INFO(dev)->gen >= 8) {
2587 ring->irq_enable_mask =
2588 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2589 ring->irq_get = gen8_ring_get_irq;
2590 ring->irq_put = gen8_ring_put_irq;
1c7a0623 2591 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
707d9cf9 2592 if (i915_semaphore_is_enabled(dev)) {
5ee426ca 2593 ring->semaphore.sync_to = gen8_ring_sync;
3e78998a
BW
2594 ring->semaphore.signal = gen8_xcs_signal;
2595 GEN8_RING_SEMAPHORE_INIT;
707d9cf9 2596 }
abd58f01
BW
2597 } else {
2598 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2599 ring->irq_get = gen6_ring_get_irq;
2600 ring->irq_put = gen6_ring_put_irq;
1c7a0623 2601 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
707d9cf9
BW
2602 if (i915_semaphore_is_enabled(dev)) {
2603 ring->semaphore.signal = gen6_signal;
2604 ring->semaphore.sync_to = gen6_ring_sync;
2605 /*
2606 * The current semaphore is only applied on pre-gen8
2607 * platform. And there is no VCS2 ring on the pre-gen8
2608 * platform. So the semaphore between BCS and VCS2 is
2609 * initialized as INVALID. Gen8 will initialize the
2610 * sema between BCS and VCS2 later.
2611 */
2612 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2613 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2614 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2615 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2616 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2617 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2618 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2619 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2620 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2621 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2622 }
abd58f01 2623 }
3535d9dd 2624 ring->init = init_ring_common;
549f7365 2625
1ec14ad3 2626 return intel_init_ring_buffer(dev, ring);
549f7365 2627}
a7b9761d 2628
9a8a2213
BW
2629int intel_init_vebox_ring_buffer(struct drm_device *dev)
2630{
4640c4ff 2631 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 2632 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
9a8a2213
BW
2633
2634 ring->name = "video enhancement ring";
2635 ring->id = VECS;
2636
2637 ring->mmio_base = VEBOX_RING_BASE;
2638 ring->write_tail = ring_write_tail;
2639 ring->flush = gen6_ring_flush;
2640 ring->add_request = gen6_add_request;
2641 ring->get_seqno = gen6_ring_get_seqno;
2642 ring->set_seqno = ring_set_seqno;
abd58f01
BW
2643
2644 if (INTEL_INFO(dev)->gen >= 8) {
2645 ring->irq_enable_mask =
40c499f9 2646 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
abd58f01
BW
2647 ring->irq_get = gen8_ring_get_irq;
2648 ring->irq_put = gen8_ring_put_irq;
1c7a0623 2649 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
707d9cf9 2650 if (i915_semaphore_is_enabled(dev)) {
5ee426ca 2651 ring->semaphore.sync_to = gen8_ring_sync;
3e78998a
BW
2652 ring->semaphore.signal = gen8_xcs_signal;
2653 GEN8_RING_SEMAPHORE_INIT;
707d9cf9 2654 }
abd58f01
BW
2655 } else {
2656 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2657 ring->irq_get = hsw_vebox_get_irq;
2658 ring->irq_put = hsw_vebox_put_irq;
1c7a0623 2659 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
707d9cf9
BW
2660 if (i915_semaphore_is_enabled(dev)) {
2661 ring->semaphore.sync_to = gen6_ring_sync;
2662 ring->semaphore.signal = gen6_signal;
2663 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2664 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2665 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2666 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2667 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2668 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2669 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2670 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2671 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2672 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2673 }
abd58f01 2674 }
9a8a2213
BW
2675 ring->init = init_ring_common;
2676
2677 return intel_init_ring_buffer(dev, ring);
2678}
2679
a7b9761d 2680int
a4872ba6 2681intel_ring_flush_all_caches(struct intel_engine_cs *ring)
a7b9761d
CW
2682{
2683 int ret;
2684
2685 if (!ring->gpu_caches_dirty)
2686 return 0;
2687
2688 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2689 if (ret)
2690 return ret;
2691
2692 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2693
2694 ring->gpu_caches_dirty = false;
2695 return 0;
2696}
2697
2698int
a4872ba6 2699intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
a7b9761d
CW
2700{
2701 uint32_t flush_domains;
2702 int ret;
2703
2704 flush_domains = 0;
2705 if (ring->gpu_caches_dirty)
2706 flush_domains = I915_GEM_GPU_DOMAINS;
2707
2708 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2709 if (ret)
2710 return ret;
2711
2712 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2713
2714 ring->gpu_caches_dirty = false;
2715 return 0;
2716}
e3efda49
CW
2717
2718void
a4872ba6 2719intel_stop_ring_buffer(struct intel_engine_cs *ring)
e3efda49
CW
2720{
2721 int ret;
2722
2723 if (!intel_ring_initialized(ring))
2724 return;
2725
2726 ret = intel_ring_idle(ring);
2727 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2728 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2729 ring->name, ret);
2730
2731 stop_ring(ring);
2732}
This page took 0.542106 seconds and 5 git commands to generate.