drm/i915/bdw: Help out the ctx switch interrupt handler
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31 /*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40 #include <drm/drmP.h>
41 #include <drm/i915_drm.h>
42 #include "i915_drv.h"
43
44 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47 #define GEN8_LR_CONTEXT_ALIGN 4096
48
49 #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50 #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
51 #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
52 #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
53 #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
54
55 #define RING_EXECLIST_QFULL (1 << 0x2)
56 #define RING_EXECLIST1_VALID (1 << 0x3)
57 #define RING_EXECLIST0_VALID (1 << 0x4)
58 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
59 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
60 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
61
62 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
63 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
64 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
65 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
66 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
67 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
68
69 #define CTX_LRI_HEADER_0 0x01
70 #define CTX_CONTEXT_CONTROL 0x02
71 #define CTX_RING_HEAD 0x04
72 #define CTX_RING_TAIL 0x06
73 #define CTX_RING_BUFFER_START 0x08
74 #define CTX_RING_BUFFER_CONTROL 0x0a
75 #define CTX_BB_HEAD_U 0x0c
76 #define CTX_BB_HEAD_L 0x0e
77 #define CTX_BB_STATE 0x10
78 #define CTX_SECOND_BB_HEAD_U 0x12
79 #define CTX_SECOND_BB_HEAD_L 0x14
80 #define CTX_SECOND_BB_STATE 0x16
81 #define CTX_BB_PER_CTX_PTR 0x18
82 #define CTX_RCS_INDIRECT_CTX 0x1a
83 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
84 #define CTX_LRI_HEADER_1 0x21
85 #define CTX_CTX_TIMESTAMP 0x22
86 #define CTX_PDP3_UDW 0x24
87 #define CTX_PDP3_LDW 0x26
88 #define CTX_PDP2_UDW 0x28
89 #define CTX_PDP2_LDW 0x2a
90 #define CTX_PDP1_UDW 0x2c
91 #define CTX_PDP1_LDW 0x2e
92 #define CTX_PDP0_UDW 0x30
93 #define CTX_PDP0_LDW 0x32
94 #define CTX_LRI_HEADER_2 0x41
95 #define CTX_R_PWR_CLK_STATE 0x42
96 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
97
98 #define GEN8_CTX_VALID (1<<0)
99 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
100 #define GEN8_CTX_FORCE_RESTORE (1<<2)
101 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
102 #define GEN8_CTX_PRIVILEGE (1<<8)
103 enum {
104 ADVANCED_CONTEXT = 0,
105 LEGACY_CONTEXT,
106 ADVANCED_AD_CONTEXT,
107 LEGACY_64B_CONTEXT
108 };
109 #define GEN8_CTX_MODE_SHIFT 3
110 enum {
111 FAULT_AND_HANG = 0,
112 FAULT_AND_HALT, /* Debug only */
113 FAULT_AND_STREAM,
114 FAULT_AND_CONTINUE /* Unsupported */
115 };
116 #define GEN8_CTX_ID_SHIFT 32
117
118 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
119 {
120 WARN_ON(i915.enable_ppgtt == -1);
121
122 if (enable_execlists == 0)
123 return 0;
124
125 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
126 i915.use_mmio_flip >= 0)
127 return 1;
128
129 return 0;
130 }
131
132 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
133 {
134 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
135
136 /* LRCA is required to be 4K aligned so the more significant 20 bits
137 * are globally unique */
138 return lrca >> 12;
139 }
140
141 static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
142 {
143 uint64_t desc;
144 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
145
146 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
147
148 desc = GEN8_CTX_VALID;
149 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
150 desc |= GEN8_CTX_L3LLC_COHERENT;
151 desc |= GEN8_CTX_PRIVILEGE;
152 desc |= lrca;
153 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
154
155 /* TODO: WaDisableLiteRestore when we start using semaphore
156 * signalling between Command Streamers */
157 /* desc |= GEN8_CTX_FORCE_RESTORE; */
158
159 return desc;
160 }
161
162 static void execlists_elsp_write(struct intel_engine_cs *ring,
163 struct drm_i915_gem_object *ctx_obj0,
164 struct drm_i915_gem_object *ctx_obj1)
165 {
166 struct drm_i915_private *dev_priv = ring->dev->dev_private;
167 uint64_t temp = 0;
168 uint32_t desc[4];
169 unsigned long flags;
170
171 /* XXX: You must always write both descriptors in the order below. */
172 if (ctx_obj1)
173 temp = execlists_ctx_descriptor(ctx_obj1);
174 else
175 temp = 0;
176 desc[1] = (u32)(temp >> 32);
177 desc[0] = (u32)temp;
178
179 temp = execlists_ctx_descriptor(ctx_obj0);
180 desc[3] = (u32)(temp >> 32);
181 desc[2] = (u32)temp;
182
183 /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
184 * are in progress.
185 *
186 * The other problem is that we can't just call gen6_gt_force_wake_get()
187 * because that function calls intel_runtime_pm_get(), which might sleep.
188 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
189 */
190 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
191 if (dev_priv->uncore.forcewake_count++ == 0)
192 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
193 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
194
195 I915_WRITE(RING_ELSP(ring), desc[1]);
196 I915_WRITE(RING_ELSP(ring), desc[0]);
197 I915_WRITE(RING_ELSP(ring), desc[3]);
198 /* The context is automatically loaded after the following */
199 I915_WRITE(RING_ELSP(ring), desc[2]);
200
201 /* ELSP is a wo register, so use another nearby reg for posting instead */
202 POSTING_READ(RING_EXECLIST_STATUS(ring));
203
204 /* Release Force Wakeup (see the big comment above). */
205 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
206 if (--dev_priv->uncore.forcewake_count == 0)
207 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
208 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
209 }
210
211 static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
212 {
213 struct page *page;
214 uint32_t *reg_state;
215
216 page = i915_gem_object_get_page(ctx_obj, 1);
217 reg_state = kmap_atomic(page);
218
219 reg_state[CTX_RING_TAIL+1] = tail;
220
221 kunmap_atomic(reg_state);
222
223 return 0;
224 }
225
226 static int execlists_submit_context(struct intel_engine_cs *ring,
227 struct intel_context *to0, u32 tail0,
228 struct intel_context *to1, u32 tail1)
229 {
230 struct drm_i915_gem_object *ctx_obj0;
231 struct drm_i915_gem_object *ctx_obj1 = NULL;
232
233 ctx_obj0 = to0->engine[ring->id].state;
234 BUG_ON(!ctx_obj0);
235 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
236
237 execlists_ctx_write_tail(ctx_obj0, tail0);
238
239 if (to1) {
240 ctx_obj1 = to1->engine[ring->id].state;
241 BUG_ON(!ctx_obj1);
242 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
243
244 execlists_ctx_write_tail(ctx_obj1, tail1);
245 }
246
247 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
248
249 return 0;
250 }
251
252 static void execlists_context_unqueue(struct intel_engine_cs *ring)
253 {
254 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
255 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
256 struct drm_i915_private *dev_priv = ring->dev->dev_private;
257
258 assert_spin_locked(&ring->execlist_lock);
259
260 if (list_empty(&ring->execlist_queue))
261 return;
262
263 /* Try to read in pairs */
264 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
265 execlist_link) {
266 if (!req0) {
267 req0 = cursor;
268 } else if (req0->ctx == cursor->ctx) {
269 /* Same ctx: ignore first request, as second request
270 * will update tail past first request's workload */
271 cursor->elsp_submitted = req0->elsp_submitted;
272 list_del(&req0->execlist_link);
273 queue_work(dev_priv->wq, &req0->work);
274 req0 = cursor;
275 } else {
276 req1 = cursor;
277 break;
278 }
279 }
280
281 WARN_ON(req1 && req1->elsp_submitted);
282
283 WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
284 req1 ? req1->ctx : NULL,
285 req1 ? req1->tail : 0));
286
287 req0->elsp_submitted++;
288 if (req1)
289 req1->elsp_submitted++;
290 }
291
292 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
293 u32 request_id)
294 {
295 struct drm_i915_private *dev_priv = ring->dev->dev_private;
296 struct intel_ctx_submit_request *head_req;
297
298 assert_spin_locked(&ring->execlist_lock);
299
300 head_req = list_first_entry_or_null(&ring->execlist_queue,
301 struct intel_ctx_submit_request,
302 execlist_link);
303
304 if (head_req != NULL) {
305 struct drm_i915_gem_object *ctx_obj =
306 head_req->ctx->engine[ring->id].state;
307 if (intel_execlists_ctx_id(ctx_obj) == request_id) {
308 WARN(head_req->elsp_submitted == 0,
309 "Never submitted head request\n");
310
311 if (--head_req->elsp_submitted <= 0) {
312 list_del(&head_req->execlist_link);
313 queue_work(dev_priv->wq, &head_req->work);
314 return true;
315 }
316 }
317 }
318
319 return false;
320 }
321
322 void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
323 {
324 struct drm_i915_private *dev_priv = ring->dev->dev_private;
325 u32 status_pointer;
326 u8 read_pointer;
327 u8 write_pointer;
328 u32 status;
329 u32 status_id;
330 u32 submit_contexts = 0;
331
332 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
333
334 read_pointer = ring->next_context_status_buffer;
335 write_pointer = status_pointer & 0x07;
336 if (read_pointer > write_pointer)
337 write_pointer += 6;
338
339 spin_lock(&ring->execlist_lock);
340
341 while (read_pointer < write_pointer) {
342 read_pointer++;
343 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
344 (read_pointer % 6) * 8);
345 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
346 (read_pointer % 6) * 8 + 4);
347
348 if (status & GEN8_CTX_STATUS_PREEMPTED) {
349 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
350 if (execlists_check_remove_request(ring, status_id))
351 WARN(1, "Lite Restored request removed from queue\n");
352 } else
353 WARN(1, "Preemption without Lite Restore\n");
354 }
355
356 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
357 (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
358 if (execlists_check_remove_request(ring, status_id))
359 submit_contexts++;
360 }
361 }
362
363 if (submit_contexts != 0)
364 execlists_context_unqueue(ring);
365
366 spin_unlock(&ring->execlist_lock);
367
368 WARN(submit_contexts > 2, "More than two context complete events?\n");
369 ring->next_context_status_buffer = write_pointer % 6;
370
371 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
372 ((u32)ring->next_context_status_buffer & 0x07) << 8);
373 }
374
375 static void execlists_free_request_task(struct work_struct *work)
376 {
377 struct intel_ctx_submit_request *req =
378 container_of(work, struct intel_ctx_submit_request, work);
379 struct drm_device *dev = req->ring->dev;
380 struct drm_i915_private *dev_priv = dev->dev_private;
381
382 intel_runtime_pm_put(dev_priv);
383
384 mutex_lock(&dev->struct_mutex);
385 i915_gem_context_unreference(req->ctx);
386 mutex_unlock(&dev->struct_mutex);
387
388 kfree(req);
389 }
390
391 static int execlists_context_queue(struct intel_engine_cs *ring,
392 struct intel_context *to,
393 u32 tail)
394 {
395 struct intel_ctx_submit_request *req = NULL, *cursor;
396 struct drm_i915_private *dev_priv = ring->dev->dev_private;
397 unsigned long flags;
398 int num_elements = 0;
399
400 req = kzalloc(sizeof(*req), GFP_KERNEL);
401 if (req == NULL)
402 return -ENOMEM;
403 req->ctx = to;
404 i915_gem_context_reference(req->ctx);
405 req->ring = ring;
406 req->tail = tail;
407 INIT_WORK(&req->work, execlists_free_request_task);
408
409 intel_runtime_pm_get(dev_priv);
410
411 spin_lock_irqsave(&ring->execlist_lock, flags);
412
413 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
414 if (++num_elements > 2)
415 break;
416
417 if (num_elements > 2) {
418 struct intel_ctx_submit_request *tail_req;
419
420 tail_req = list_last_entry(&ring->execlist_queue,
421 struct intel_ctx_submit_request,
422 execlist_link);
423
424 if (to == tail_req->ctx) {
425 WARN(tail_req->elsp_submitted != 0,
426 "More than 2 already-submitted reqs queued\n");
427 list_del(&tail_req->execlist_link);
428 queue_work(dev_priv->wq, &tail_req->work);
429 }
430 }
431
432 list_add_tail(&req->execlist_link, &ring->execlist_queue);
433 if (num_elements == 0)
434 execlists_context_unqueue(ring);
435
436 spin_unlock_irqrestore(&ring->execlist_lock, flags);
437
438 return 0;
439 }
440
441 static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
442 {
443 struct intel_engine_cs *ring = ringbuf->ring;
444 uint32_t flush_domains;
445 int ret;
446
447 flush_domains = 0;
448 if (ring->gpu_caches_dirty)
449 flush_domains = I915_GEM_GPU_DOMAINS;
450
451 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
452 if (ret)
453 return ret;
454
455 ring->gpu_caches_dirty = false;
456 return 0;
457 }
458
459 static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
460 struct list_head *vmas)
461 {
462 struct intel_engine_cs *ring = ringbuf->ring;
463 struct i915_vma *vma;
464 uint32_t flush_domains = 0;
465 bool flush_chipset = false;
466 int ret;
467
468 list_for_each_entry(vma, vmas, exec_list) {
469 struct drm_i915_gem_object *obj = vma->obj;
470
471 ret = i915_gem_object_sync(obj, ring);
472 if (ret)
473 return ret;
474
475 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
476 flush_chipset |= i915_gem_clflush_object(obj, false);
477
478 flush_domains |= obj->base.write_domain;
479 }
480
481 if (flush_domains & I915_GEM_DOMAIN_GTT)
482 wmb();
483
484 /* Unconditionally invalidate gpu caches and ensure that we do flush
485 * any residual writes from the previous batch.
486 */
487 return logical_ring_invalidate_all_caches(ringbuf);
488 }
489
490 int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
491 struct intel_engine_cs *ring,
492 struct intel_context *ctx,
493 struct drm_i915_gem_execbuffer2 *args,
494 struct list_head *vmas,
495 struct drm_i915_gem_object *batch_obj,
496 u64 exec_start, u32 flags)
497 {
498 struct drm_i915_private *dev_priv = dev->dev_private;
499 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
500 int instp_mode;
501 u32 instp_mask;
502 int ret;
503
504 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
505 instp_mask = I915_EXEC_CONSTANTS_MASK;
506 switch (instp_mode) {
507 case I915_EXEC_CONSTANTS_REL_GENERAL:
508 case I915_EXEC_CONSTANTS_ABSOLUTE:
509 case I915_EXEC_CONSTANTS_REL_SURFACE:
510 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
511 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
512 return -EINVAL;
513 }
514
515 if (instp_mode != dev_priv->relative_constants_mode) {
516 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
517 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
518 return -EINVAL;
519 }
520
521 /* The HW changed the meaning on this bit on gen6 */
522 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
523 }
524 break;
525 default:
526 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
527 return -EINVAL;
528 }
529
530 if (args->num_cliprects != 0) {
531 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
532 return -EINVAL;
533 } else {
534 if (args->DR4 == 0xffffffff) {
535 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
536 args->DR4 = 0;
537 }
538
539 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
540 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
541 return -EINVAL;
542 }
543 }
544
545 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
546 DRM_DEBUG("sol reset is gen7 only\n");
547 return -EINVAL;
548 }
549
550 ret = execlists_move_to_gpu(ringbuf, vmas);
551 if (ret)
552 return ret;
553
554 if (ring == &dev_priv->ring[RCS] &&
555 instp_mode != dev_priv->relative_constants_mode) {
556 ret = intel_logical_ring_begin(ringbuf, 4);
557 if (ret)
558 return ret;
559
560 intel_logical_ring_emit(ringbuf, MI_NOOP);
561 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
562 intel_logical_ring_emit(ringbuf, INSTPM);
563 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
564 intel_logical_ring_advance(ringbuf);
565
566 dev_priv->relative_constants_mode = instp_mode;
567 }
568
569 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
570 if (ret)
571 return ret;
572
573 i915_gem_execbuffer_move_to_active(vmas, ring);
574 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
575
576 return 0;
577 }
578
579 void intel_logical_ring_stop(struct intel_engine_cs *ring)
580 {
581 struct drm_i915_private *dev_priv = ring->dev->dev_private;
582 int ret;
583
584 if (!intel_ring_initialized(ring))
585 return;
586
587 ret = intel_ring_idle(ring);
588 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
589 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
590 ring->name, ret);
591
592 /* TODO: Is this correct with Execlists enabled? */
593 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
594 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
595 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
596 return;
597 }
598 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
599 }
600
601 int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
602 {
603 struct intel_engine_cs *ring = ringbuf->ring;
604 int ret;
605
606 if (!ring->gpu_caches_dirty)
607 return 0;
608
609 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
610 if (ret)
611 return ret;
612
613 ring->gpu_caches_dirty = false;
614 return 0;
615 }
616
617 void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
618 {
619 struct intel_engine_cs *ring = ringbuf->ring;
620 struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
621
622 intel_logical_ring_advance(ringbuf);
623
624 if (intel_ring_stopped(ring))
625 return;
626
627 execlists_context_queue(ring, ctx, ringbuf->tail);
628 }
629
630 static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
631 struct intel_context *ctx)
632 {
633 if (ring->outstanding_lazy_seqno)
634 return 0;
635
636 if (ring->preallocated_lazy_request == NULL) {
637 struct drm_i915_gem_request *request;
638
639 request = kmalloc(sizeof(*request), GFP_KERNEL);
640 if (request == NULL)
641 return -ENOMEM;
642
643 /* Hold a reference to the context this request belongs to
644 * (we will need it when the time comes to emit/retire the
645 * request).
646 */
647 request->ctx = ctx;
648 i915_gem_context_reference(request->ctx);
649
650 ring->preallocated_lazy_request = request;
651 }
652
653 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
654 }
655
656 static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
657 int bytes)
658 {
659 struct intel_engine_cs *ring = ringbuf->ring;
660 struct drm_i915_gem_request *request;
661 u32 seqno = 0;
662 int ret;
663
664 if (ringbuf->last_retired_head != -1) {
665 ringbuf->head = ringbuf->last_retired_head;
666 ringbuf->last_retired_head = -1;
667
668 ringbuf->space = intel_ring_space(ringbuf);
669 if (ringbuf->space >= bytes)
670 return 0;
671 }
672
673 list_for_each_entry(request, &ring->request_list, list) {
674 if (__intel_ring_space(request->tail, ringbuf->tail,
675 ringbuf->size) >= bytes) {
676 seqno = request->seqno;
677 break;
678 }
679 }
680
681 if (seqno == 0)
682 return -ENOSPC;
683
684 ret = i915_wait_seqno(ring, seqno);
685 if (ret)
686 return ret;
687
688 i915_gem_retire_requests_ring(ring);
689 ringbuf->head = ringbuf->last_retired_head;
690 ringbuf->last_retired_head = -1;
691
692 ringbuf->space = intel_ring_space(ringbuf);
693 return 0;
694 }
695
696 static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
697 int bytes)
698 {
699 struct intel_engine_cs *ring = ringbuf->ring;
700 struct drm_device *dev = ring->dev;
701 struct drm_i915_private *dev_priv = dev->dev_private;
702 unsigned long end;
703 int ret;
704
705 ret = logical_ring_wait_request(ringbuf, bytes);
706 if (ret != -ENOSPC)
707 return ret;
708
709 /* Force the context submission in case we have been skipping it */
710 intel_logical_ring_advance_and_submit(ringbuf);
711
712 /* With GEM the hangcheck timer should kick us out of the loop,
713 * leaving it early runs the risk of corrupting GEM state (due
714 * to running on almost untested codepaths). But on resume
715 * timers don't work yet, so prevent a complete hang in that
716 * case by choosing an insanely large timeout. */
717 end = jiffies + 60 * HZ;
718
719 do {
720 ringbuf->head = I915_READ_HEAD(ring);
721 ringbuf->space = intel_ring_space(ringbuf);
722 if (ringbuf->space >= bytes) {
723 ret = 0;
724 break;
725 }
726
727 msleep(1);
728
729 if (dev_priv->mm.interruptible && signal_pending(current)) {
730 ret = -ERESTARTSYS;
731 break;
732 }
733
734 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
735 dev_priv->mm.interruptible);
736 if (ret)
737 break;
738
739 if (time_after(jiffies, end)) {
740 ret = -EBUSY;
741 break;
742 }
743 } while (1);
744
745 return ret;
746 }
747
748 static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
749 {
750 uint32_t __iomem *virt;
751 int rem = ringbuf->size - ringbuf->tail;
752
753 if (ringbuf->space < rem) {
754 int ret = logical_ring_wait_for_space(ringbuf, rem);
755
756 if (ret)
757 return ret;
758 }
759
760 virt = ringbuf->virtual_start + ringbuf->tail;
761 rem /= 4;
762 while (rem--)
763 iowrite32(MI_NOOP, virt++);
764
765 ringbuf->tail = 0;
766 ringbuf->space = intel_ring_space(ringbuf);
767
768 return 0;
769 }
770
771 static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
772 {
773 int ret;
774
775 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
776 ret = logical_ring_wrap_buffer(ringbuf);
777 if (unlikely(ret))
778 return ret;
779 }
780
781 if (unlikely(ringbuf->space < bytes)) {
782 ret = logical_ring_wait_for_space(ringbuf, bytes);
783 if (unlikely(ret))
784 return ret;
785 }
786
787 return 0;
788 }
789
790 int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
791 {
792 struct intel_engine_cs *ring = ringbuf->ring;
793 struct drm_device *dev = ring->dev;
794 struct drm_i915_private *dev_priv = dev->dev_private;
795 int ret;
796
797 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
798 dev_priv->mm.interruptible);
799 if (ret)
800 return ret;
801
802 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
803 if (ret)
804 return ret;
805
806 /* Preallocate the olr before touching the ring */
807 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
808 if (ret)
809 return ret;
810
811 ringbuf->space -= num_dwords * sizeof(uint32_t);
812 return 0;
813 }
814
815 static int gen8_init_common_ring(struct intel_engine_cs *ring)
816 {
817 struct drm_device *dev = ring->dev;
818 struct drm_i915_private *dev_priv = dev->dev_private;
819
820 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
821 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
822
823 I915_WRITE(RING_MODE_GEN7(ring),
824 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
825 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
826 POSTING_READ(RING_MODE_GEN7(ring));
827 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
828
829 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
830
831 return 0;
832 }
833
834 static int gen8_init_render_ring(struct intel_engine_cs *ring)
835 {
836 struct drm_device *dev = ring->dev;
837 struct drm_i915_private *dev_priv = dev->dev_private;
838 int ret;
839
840 ret = gen8_init_common_ring(ring);
841 if (ret)
842 return ret;
843
844 /* We need to disable the AsyncFlip performance optimisations in order
845 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
846 * programmed to '1' on all products.
847 *
848 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
849 */
850 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
851
852 ret = intel_init_pipe_control(ring);
853 if (ret)
854 return ret;
855
856 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
857
858 return ret;
859 }
860
861 static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
862 u64 offset, unsigned flags)
863 {
864 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
865 int ret;
866
867 ret = intel_logical_ring_begin(ringbuf, 4);
868 if (ret)
869 return ret;
870
871 /* FIXME(BDW): Address space and security selectors. */
872 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
873 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
874 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
875 intel_logical_ring_emit(ringbuf, MI_NOOP);
876 intel_logical_ring_advance(ringbuf);
877
878 return 0;
879 }
880
881 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
882 {
883 struct drm_device *dev = ring->dev;
884 struct drm_i915_private *dev_priv = dev->dev_private;
885 unsigned long flags;
886
887 if (!dev->irq_enabled)
888 return false;
889
890 spin_lock_irqsave(&dev_priv->irq_lock, flags);
891 if (ring->irq_refcount++ == 0) {
892 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
893 POSTING_READ(RING_IMR(ring->mmio_base));
894 }
895 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
896
897 return true;
898 }
899
900 static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
901 {
902 struct drm_device *dev = ring->dev;
903 struct drm_i915_private *dev_priv = dev->dev_private;
904 unsigned long flags;
905
906 spin_lock_irqsave(&dev_priv->irq_lock, flags);
907 if (--ring->irq_refcount == 0) {
908 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
909 POSTING_READ(RING_IMR(ring->mmio_base));
910 }
911 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
912 }
913
914 static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
915 u32 invalidate_domains,
916 u32 unused)
917 {
918 struct intel_engine_cs *ring = ringbuf->ring;
919 struct drm_device *dev = ring->dev;
920 struct drm_i915_private *dev_priv = dev->dev_private;
921 uint32_t cmd;
922 int ret;
923
924 ret = intel_logical_ring_begin(ringbuf, 4);
925 if (ret)
926 return ret;
927
928 cmd = MI_FLUSH_DW + 1;
929
930 if (ring == &dev_priv->ring[VCS]) {
931 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
932 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
933 MI_FLUSH_DW_STORE_INDEX |
934 MI_FLUSH_DW_OP_STOREDW;
935 } else {
936 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
937 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
938 MI_FLUSH_DW_OP_STOREDW;
939 }
940
941 intel_logical_ring_emit(ringbuf, cmd);
942 intel_logical_ring_emit(ringbuf,
943 I915_GEM_HWS_SCRATCH_ADDR |
944 MI_FLUSH_DW_USE_GTT);
945 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
946 intel_logical_ring_emit(ringbuf, 0); /* value */
947 intel_logical_ring_advance(ringbuf);
948
949 return 0;
950 }
951
952 static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
953 u32 invalidate_domains,
954 u32 flush_domains)
955 {
956 struct intel_engine_cs *ring = ringbuf->ring;
957 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
958 u32 flags = 0;
959 int ret;
960
961 flags |= PIPE_CONTROL_CS_STALL;
962
963 if (flush_domains) {
964 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
965 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
966 }
967
968 if (invalidate_domains) {
969 flags |= PIPE_CONTROL_TLB_INVALIDATE;
970 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
971 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
972 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
973 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
974 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
975 flags |= PIPE_CONTROL_QW_WRITE;
976 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
977 }
978
979 ret = intel_logical_ring_begin(ringbuf, 6);
980 if (ret)
981 return ret;
982
983 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
984 intel_logical_ring_emit(ringbuf, flags);
985 intel_logical_ring_emit(ringbuf, scratch_addr);
986 intel_logical_ring_emit(ringbuf, 0);
987 intel_logical_ring_emit(ringbuf, 0);
988 intel_logical_ring_emit(ringbuf, 0);
989 intel_logical_ring_advance(ringbuf);
990
991 return 0;
992 }
993
994 static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
995 {
996 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
997 }
998
999 static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1000 {
1001 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1002 }
1003
1004 static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
1005 {
1006 struct intel_engine_cs *ring = ringbuf->ring;
1007 u32 cmd;
1008 int ret;
1009
1010 ret = intel_logical_ring_begin(ringbuf, 6);
1011 if (ret)
1012 return ret;
1013
1014 cmd = MI_STORE_DWORD_IMM_GEN8;
1015 cmd |= MI_GLOBAL_GTT;
1016
1017 intel_logical_ring_emit(ringbuf, cmd);
1018 intel_logical_ring_emit(ringbuf,
1019 (ring->status_page.gfx_addr +
1020 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
1021 intel_logical_ring_emit(ringbuf, 0);
1022 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
1023 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1024 intel_logical_ring_emit(ringbuf, MI_NOOP);
1025 intel_logical_ring_advance_and_submit(ringbuf);
1026
1027 return 0;
1028 }
1029
1030 void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1031 {
1032 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1033
1034 if (!intel_ring_initialized(ring))
1035 return;
1036
1037 intel_logical_ring_stop(ring);
1038 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1039 ring->preallocated_lazy_request = NULL;
1040 ring->outstanding_lazy_seqno = 0;
1041
1042 if (ring->cleanup)
1043 ring->cleanup(ring);
1044
1045 i915_cmd_parser_fini_ring(ring);
1046
1047 if (ring->status_page.obj) {
1048 kunmap(sg_page(ring->status_page.obj->pages->sgl));
1049 ring->status_page.obj = NULL;
1050 }
1051 }
1052
1053 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1054 {
1055 int ret;
1056 struct intel_context *dctx = ring->default_context;
1057 struct drm_i915_gem_object *dctx_obj;
1058
1059 /* Intentionally left blank. */
1060 ring->buffer = NULL;
1061
1062 ring->dev = dev;
1063 INIT_LIST_HEAD(&ring->active_list);
1064 INIT_LIST_HEAD(&ring->request_list);
1065 init_waitqueue_head(&ring->irq_queue);
1066
1067 INIT_LIST_HEAD(&ring->execlist_queue);
1068 spin_lock_init(&ring->execlist_lock);
1069 ring->next_context_status_buffer = 0;
1070
1071 ret = intel_lr_context_deferred_create(dctx, ring);
1072 if (ret)
1073 return ret;
1074
1075 /* The status page is offset 0 from the context object in LRCs. */
1076 dctx_obj = dctx->engine[ring->id].state;
1077 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
1078 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
1079 if (ring->status_page.page_addr == NULL)
1080 return -ENOMEM;
1081 ring->status_page.obj = dctx_obj;
1082
1083 ret = i915_cmd_parser_init_ring(ring);
1084 if (ret)
1085 return ret;
1086
1087 if (ring->init) {
1088 ret = ring->init(ring);
1089 if (ret)
1090 return ret;
1091 }
1092
1093 return 0;
1094 }
1095
1096 static int logical_render_ring_init(struct drm_device *dev)
1097 {
1098 struct drm_i915_private *dev_priv = dev->dev_private;
1099 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1100
1101 ring->name = "render ring";
1102 ring->id = RCS;
1103 ring->mmio_base = RENDER_RING_BASE;
1104 ring->irq_enable_mask =
1105 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1106 ring->irq_keep_mask =
1107 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1108 if (HAS_L3_DPF(dev))
1109 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1110
1111 ring->init = gen8_init_render_ring;
1112 ring->cleanup = intel_fini_pipe_control;
1113 ring->get_seqno = gen8_get_seqno;
1114 ring->set_seqno = gen8_set_seqno;
1115 ring->emit_request = gen8_emit_request;
1116 ring->emit_flush = gen8_emit_flush_render;
1117 ring->irq_get = gen8_logical_ring_get_irq;
1118 ring->irq_put = gen8_logical_ring_put_irq;
1119 ring->emit_bb_start = gen8_emit_bb_start;
1120
1121 return logical_ring_init(dev, ring);
1122 }
1123
1124 static int logical_bsd_ring_init(struct drm_device *dev)
1125 {
1126 struct drm_i915_private *dev_priv = dev->dev_private;
1127 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
1128
1129 ring->name = "bsd ring";
1130 ring->id = VCS;
1131 ring->mmio_base = GEN6_BSD_RING_BASE;
1132 ring->irq_enable_mask =
1133 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1134 ring->irq_keep_mask =
1135 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1136
1137 ring->init = gen8_init_common_ring;
1138 ring->get_seqno = gen8_get_seqno;
1139 ring->set_seqno = gen8_set_seqno;
1140 ring->emit_request = gen8_emit_request;
1141 ring->emit_flush = gen8_emit_flush;
1142 ring->irq_get = gen8_logical_ring_get_irq;
1143 ring->irq_put = gen8_logical_ring_put_irq;
1144 ring->emit_bb_start = gen8_emit_bb_start;
1145
1146 return logical_ring_init(dev, ring);
1147 }
1148
1149 static int logical_bsd2_ring_init(struct drm_device *dev)
1150 {
1151 struct drm_i915_private *dev_priv = dev->dev_private;
1152 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
1153
1154 ring->name = "bds2 ring";
1155 ring->id = VCS2;
1156 ring->mmio_base = GEN8_BSD2_RING_BASE;
1157 ring->irq_enable_mask =
1158 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
1159 ring->irq_keep_mask =
1160 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
1161
1162 ring->init = gen8_init_common_ring;
1163 ring->get_seqno = gen8_get_seqno;
1164 ring->set_seqno = gen8_set_seqno;
1165 ring->emit_request = gen8_emit_request;
1166 ring->emit_flush = gen8_emit_flush;
1167 ring->irq_get = gen8_logical_ring_get_irq;
1168 ring->irq_put = gen8_logical_ring_put_irq;
1169 ring->emit_bb_start = gen8_emit_bb_start;
1170
1171 return logical_ring_init(dev, ring);
1172 }
1173
1174 static int logical_blt_ring_init(struct drm_device *dev)
1175 {
1176 struct drm_i915_private *dev_priv = dev->dev_private;
1177 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
1178
1179 ring->name = "blitter ring";
1180 ring->id = BCS;
1181 ring->mmio_base = BLT_RING_BASE;
1182 ring->irq_enable_mask =
1183 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1184 ring->irq_keep_mask =
1185 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1186
1187 ring->init = gen8_init_common_ring;
1188 ring->get_seqno = gen8_get_seqno;
1189 ring->set_seqno = gen8_set_seqno;
1190 ring->emit_request = gen8_emit_request;
1191 ring->emit_flush = gen8_emit_flush;
1192 ring->irq_get = gen8_logical_ring_get_irq;
1193 ring->irq_put = gen8_logical_ring_put_irq;
1194 ring->emit_bb_start = gen8_emit_bb_start;
1195
1196 return logical_ring_init(dev, ring);
1197 }
1198
1199 static int logical_vebox_ring_init(struct drm_device *dev)
1200 {
1201 struct drm_i915_private *dev_priv = dev->dev_private;
1202 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
1203
1204 ring->name = "video enhancement ring";
1205 ring->id = VECS;
1206 ring->mmio_base = VEBOX_RING_BASE;
1207 ring->irq_enable_mask =
1208 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
1209 ring->irq_keep_mask =
1210 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
1211
1212 ring->init = gen8_init_common_ring;
1213 ring->get_seqno = gen8_get_seqno;
1214 ring->set_seqno = gen8_set_seqno;
1215 ring->emit_request = gen8_emit_request;
1216 ring->emit_flush = gen8_emit_flush;
1217 ring->irq_get = gen8_logical_ring_get_irq;
1218 ring->irq_put = gen8_logical_ring_put_irq;
1219 ring->emit_bb_start = gen8_emit_bb_start;
1220
1221 return logical_ring_init(dev, ring);
1222 }
1223
1224 int intel_logical_rings_init(struct drm_device *dev)
1225 {
1226 struct drm_i915_private *dev_priv = dev->dev_private;
1227 int ret;
1228
1229 ret = logical_render_ring_init(dev);
1230 if (ret)
1231 return ret;
1232
1233 if (HAS_BSD(dev)) {
1234 ret = logical_bsd_ring_init(dev);
1235 if (ret)
1236 goto cleanup_render_ring;
1237 }
1238
1239 if (HAS_BLT(dev)) {
1240 ret = logical_blt_ring_init(dev);
1241 if (ret)
1242 goto cleanup_bsd_ring;
1243 }
1244
1245 if (HAS_VEBOX(dev)) {
1246 ret = logical_vebox_ring_init(dev);
1247 if (ret)
1248 goto cleanup_blt_ring;
1249 }
1250
1251 if (HAS_BSD2(dev)) {
1252 ret = logical_bsd2_ring_init(dev);
1253 if (ret)
1254 goto cleanup_vebox_ring;
1255 }
1256
1257 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
1258 if (ret)
1259 goto cleanup_bsd2_ring;
1260
1261 return 0;
1262
1263 cleanup_bsd2_ring:
1264 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
1265 cleanup_vebox_ring:
1266 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
1267 cleanup_blt_ring:
1268 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
1269 cleanup_bsd_ring:
1270 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
1271 cleanup_render_ring:
1272 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
1273
1274 return ret;
1275 }
1276
1277 static int
1278 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1279 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1280 {
1281 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
1282 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1283 struct page *page;
1284 uint32_t *reg_state;
1285 int ret;
1286
1287 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1288 if (ret) {
1289 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1290 return ret;
1291 }
1292
1293 ret = i915_gem_object_get_pages(ctx_obj);
1294 if (ret) {
1295 DRM_DEBUG_DRIVER("Could not get object pages\n");
1296 return ret;
1297 }
1298
1299 i915_gem_object_pin_pages(ctx_obj);
1300
1301 /* The second page of the context object contains some fields which must
1302 * be set up prior to the first execution. */
1303 page = i915_gem_object_get_page(ctx_obj, 1);
1304 reg_state = kmap_atomic(page);
1305
1306 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1307 * commands followed by (reg, value) pairs. The values we are setting here are
1308 * only for the first context restore: on a subsequent save, the GPU will
1309 * recreate this batchbuffer with new values (including all the missing
1310 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1311 if (ring->id == RCS)
1312 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
1313 else
1314 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
1315 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1316 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1317 reg_state[CTX_CONTEXT_CONTROL+1] =
1318 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
1319 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1320 reg_state[CTX_RING_HEAD+1] = 0;
1321 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1322 reg_state[CTX_RING_TAIL+1] = 0;
1323 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1324 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
1325 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1326 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1327 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
1328 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
1329 reg_state[CTX_BB_HEAD_U+1] = 0;
1330 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
1331 reg_state[CTX_BB_HEAD_L+1] = 0;
1332 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
1333 reg_state[CTX_BB_STATE+1] = (1<<5);
1334 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
1335 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
1336 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
1337 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
1338 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
1339 reg_state[CTX_SECOND_BB_STATE+1] = 0;
1340 if (ring->id == RCS) {
1341 /* TODO: according to BSpec, the register state context
1342 * for CHV does not have these. OTOH, these registers do
1343 * exist in CHV. I'm waiting for a clarification */
1344 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
1345 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
1346 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1347 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
1348 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
1349 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
1350 }
1351 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
1352 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1353 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1354 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1355 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1356 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1357 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1358 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1359 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1360 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1361 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1362 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1363 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1364 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1365 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1366 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1367 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1368 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1369 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1370 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1371 if (ring->id == RCS) {
1372 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1373 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1374 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1375 }
1376
1377 kunmap_atomic(reg_state);
1378
1379 ctx_obj->dirty = 1;
1380 set_page_dirty(page);
1381 i915_gem_object_unpin_pages(ctx_obj);
1382
1383 return 0;
1384 }
1385
1386 void intel_lr_context_free(struct intel_context *ctx)
1387 {
1388 int i;
1389
1390 for (i = 0; i < I915_NUM_RINGS; i++) {
1391 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1392 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1393
1394 if (ctx_obj) {
1395 intel_destroy_ringbuffer_obj(ringbuf);
1396 kfree(ringbuf);
1397 i915_gem_object_ggtt_unpin(ctx_obj);
1398 drm_gem_object_unreference(&ctx_obj->base);
1399 }
1400 }
1401 }
1402
1403 static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1404 {
1405 int ret = 0;
1406
1407 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1408
1409 switch (ring->id) {
1410 case RCS:
1411 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1412 break;
1413 case VCS:
1414 case BCS:
1415 case VECS:
1416 case VCS2:
1417 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1418 break;
1419 }
1420
1421 return ret;
1422 }
1423
1424 int intel_lr_context_deferred_create(struct intel_context *ctx,
1425 struct intel_engine_cs *ring)
1426 {
1427 struct drm_device *dev = ring->dev;
1428 struct drm_i915_gem_object *ctx_obj;
1429 uint32_t context_size;
1430 struct intel_ringbuffer *ringbuf;
1431 int ret;
1432
1433 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
1434 if (ctx->engine[ring->id].state)
1435 return 0;
1436
1437 context_size = round_up(get_lr_context_size(ring), 4096);
1438
1439 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1440 if (IS_ERR(ctx_obj)) {
1441 ret = PTR_ERR(ctx_obj);
1442 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1443 return ret;
1444 }
1445
1446 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1447 if (ret) {
1448 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1449 drm_gem_object_unreference(&ctx_obj->base);
1450 return ret;
1451 }
1452
1453 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1454 if (!ringbuf) {
1455 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1456 ring->name);
1457 i915_gem_object_ggtt_unpin(ctx_obj);
1458 drm_gem_object_unreference(&ctx_obj->base);
1459 ret = -ENOMEM;
1460 return ret;
1461 }
1462
1463 ringbuf->ring = ring;
1464 ringbuf->FIXME_lrc_ctx = ctx;
1465
1466 ringbuf->size = 32 * PAGE_SIZE;
1467 ringbuf->effective_size = ringbuf->size;
1468 ringbuf->head = 0;
1469 ringbuf->tail = 0;
1470 ringbuf->space = ringbuf->size;
1471 ringbuf->last_retired_head = -1;
1472
1473 /* TODO: For now we put this in the mappable region so that we can reuse
1474 * the existing ringbuffer code which ioremaps it. When we start
1475 * creating many contexts, this will no longer work and we must switch
1476 * to a kmapish interface.
1477 */
1478 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1479 if (ret) {
1480 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1481 ring->name, ret);
1482 goto error;
1483 }
1484
1485 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1486 if (ret) {
1487 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1488 intel_destroy_ringbuffer_obj(ringbuf);
1489 goto error;
1490 }
1491
1492 ctx->engine[ring->id].ringbuf = ringbuf;
1493 ctx->engine[ring->id].state = ctx_obj;
1494
1495 return 0;
1496
1497 error:
1498 kfree(ringbuf);
1499 i915_gem_object_ggtt_unpin(ctx_obj);
1500 drm_gem_object_unreference(&ctx_obj->base);
1501 return ret;
1502 }
This page took 0.072755 seconds and 5 git commands to generate.