drm/i915/bdw: Handle context switch events
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
127f1003 43
8c857917
OM
44#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
8670d6f9 49#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
84b790f8 50#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
8670d6f9 51#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
e981e7b1
TD
52#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
53#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
54
55#define RING_EXECLIST_QFULL (1 << 0x2)
56#define RING_EXECLIST1_VALID (1 << 0x3)
57#define RING_EXECLIST0_VALID (1 << 0x4)
58#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
59#define RING_EXECLIST1_ACTIVE (1 << 0x11)
60#define RING_EXECLIST0_ACTIVE (1 << 0x12)
61
62#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
63#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
64#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
65#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
66#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
67#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
8670d6f9
OM
68
69#define CTX_LRI_HEADER_0 0x01
70#define CTX_CONTEXT_CONTROL 0x02
71#define CTX_RING_HEAD 0x04
72#define CTX_RING_TAIL 0x06
73#define CTX_RING_BUFFER_START 0x08
74#define CTX_RING_BUFFER_CONTROL 0x0a
75#define CTX_BB_HEAD_U 0x0c
76#define CTX_BB_HEAD_L 0x0e
77#define CTX_BB_STATE 0x10
78#define CTX_SECOND_BB_HEAD_U 0x12
79#define CTX_SECOND_BB_HEAD_L 0x14
80#define CTX_SECOND_BB_STATE 0x16
81#define CTX_BB_PER_CTX_PTR 0x18
82#define CTX_RCS_INDIRECT_CTX 0x1a
83#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
84#define CTX_LRI_HEADER_1 0x21
85#define CTX_CTX_TIMESTAMP 0x22
86#define CTX_PDP3_UDW 0x24
87#define CTX_PDP3_LDW 0x26
88#define CTX_PDP2_UDW 0x28
89#define CTX_PDP2_LDW 0x2a
90#define CTX_PDP1_UDW 0x2c
91#define CTX_PDP1_LDW 0x2e
92#define CTX_PDP0_UDW 0x30
93#define CTX_PDP0_LDW 0x32
94#define CTX_LRI_HEADER_2 0x41
95#define CTX_R_PWR_CLK_STATE 0x42
96#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
97
84b790f8
BW
98#define GEN8_CTX_VALID (1<<0)
99#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
100#define GEN8_CTX_FORCE_RESTORE (1<<2)
101#define GEN8_CTX_L3LLC_COHERENT (1<<5)
102#define GEN8_CTX_PRIVILEGE (1<<8)
103enum {
104 ADVANCED_CONTEXT = 0,
105 LEGACY_CONTEXT,
106 ADVANCED_AD_CONTEXT,
107 LEGACY_64B_CONTEXT
108};
109#define GEN8_CTX_MODE_SHIFT 3
110enum {
111 FAULT_AND_HANG = 0,
112 FAULT_AND_HALT, /* Debug only */
113 FAULT_AND_STREAM,
114 FAULT_AND_CONTINUE /* Unsupported */
115};
116#define GEN8_CTX_ID_SHIFT 32
117
127f1003
OM
118int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
119{
bd84b1e9
DV
120 WARN_ON(i915.enable_ppgtt == -1);
121
127f1003
OM
122 if (enable_execlists == 0)
123 return 0;
124
14bf993e
OM
125 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
126 i915.use_mmio_flip >= 0)
127f1003
OM
127 return 1;
128
129 return 0;
130}
ede7d42b 131
84b790f8
BW
132u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
133{
134 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
135
136 /* LRCA is required to be 4K aligned so the more significant 20 bits
137 * are globally unique */
138 return lrca >> 12;
139}
140
141static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
142{
143 uint64_t desc;
144 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
acdd884a
MT
145
146 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
84b790f8
BW
147
148 desc = GEN8_CTX_VALID;
149 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
150 desc |= GEN8_CTX_L3LLC_COHERENT;
151 desc |= GEN8_CTX_PRIVILEGE;
152 desc |= lrca;
153 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
154
155 /* TODO: WaDisableLiteRestore when we start using semaphore
156 * signalling between Command Streamers */
157 /* desc |= GEN8_CTX_FORCE_RESTORE; */
158
159 return desc;
160}
161
162static void execlists_elsp_write(struct intel_engine_cs *ring,
163 struct drm_i915_gem_object *ctx_obj0,
164 struct drm_i915_gem_object *ctx_obj1)
165{
166 struct drm_i915_private *dev_priv = ring->dev->dev_private;
167 uint64_t temp = 0;
168 uint32_t desc[4];
e981e7b1 169 unsigned long flags;
84b790f8
BW
170
171 /* XXX: You must always write both descriptors in the order below. */
172 if (ctx_obj1)
173 temp = execlists_ctx_descriptor(ctx_obj1);
174 else
175 temp = 0;
176 desc[1] = (u32)(temp >> 32);
177 desc[0] = (u32)temp;
178
179 temp = execlists_ctx_descriptor(ctx_obj0);
180 desc[3] = (u32)(temp >> 32);
181 desc[2] = (u32)temp;
182
e981e7b1
TD
183 /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
184 * are in progress.
185 *
186 * The other problem is that we can't just call gen6_gt_force_wake_get()
187 * because that function calls intel_runtime_pm_get(), which might sleep.
188 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
189 */
190 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
191 if (dev_priv->uncore.forcewake_count++ == 0)
192 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
193 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
84b790f8
BW
194
195 I915_WRITE(RING_ELSP(ring), desc[1]);
196 I915_WRITE(RING_ELSP(ring), desc[0]);
197 I915_WRITE(RING_ELSP(ring), desc[3]);
198 /* The context is automatically loaded after the following */
199 I915_WRITE(RING_ELSP(ring), desc[2]);
200
201 /* ELSP is a wo register, so use another nearby reg for posting instead */
202 POSTING_READ(RING_EXECLIST_STATUS(ring));
203
e981e7b1
TD
204 /* Release Force Wakeup (see the big comment above). */
205 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
206 if (--dev_priv->uncore.forcewake_count == 0)
207 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
208 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
84b790f8
BW
209}
210
ae1250b9
OM
211static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
212{
213 struct page *page;
214 uint32_t *reg_state;
215
216 page = i915_gem_object_get_page(ctx_obj, 1);
217 reg_state = kmap_atomic(page);
218
219 reg_state[CTX_RING_TAIL+1] = tail;
220
221 kunmap_atomic(reg_state);
222
223 return 0;
224}
225
84b790f8
BW
226static int execlists_submit_context(struct intel_engine_cs *ring,
227 struct intel_context *to0, u32 tail0,
228 struct intel_context *to1, u32 tail1)
229{
230 struct drm_i915_gem_object *ctx_obj0;
231 struct drm_i915_gem_object *ctx_obj1 = NULL;
232
233 ctx_obj0 = to0->engine[ring->id].state;
234 BUG_ON(!ctx_obj0);
acdd884a 235 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
84b790f8 236
ae1250b9
OM
237 execlists_ctx_write_tail(ctx_obj0, tail0);
238
84b790f8
BW
239 if (to1) {
240 ctx_obj1 = to1->engine[ring->id].state;
241 BUG_ON(!ctx_obj1);
acdd884a 242 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
ae1250b9
OM
243
244 execlists_ctx_write_tail(ctx_obj1, tail1);
84b790f8
BW
245 }
246
247 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
248
249 return 0;
250}
251
acdd884a
MT
252static void execlists_context_unqueue(struct intel_engine_cs *ring)
253{
254 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
255 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
e981e7b1
TD
256 struct drm_i915_private *dev_priv = ring->dev->dev_private;
257
258 assert_spin_locked(&ring->execlist_lock);
acdd884a
MT
259
260 if (list_empty(&ring->execlist_queue))
261 return;
262
263 /* Try to read in pairs */
264 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
265 execlist_link) {
266 if (!req0) {
267 req0 = cursor;
268 } else if (req0->ctx == cursor->ctx) {
269 /* Same ctx: ignore first request, as second request
270 * will update tail past first request's workload */
271 list_del(&req0->execlist_link);
e981e7b1 272 queue_work(dev_priv->wq, &req0->work);
acdd884a
MT
273 req0 = cursor;
274 } else {
275 req1 = cursor;
276 break;
277 }
278 }
279
280 WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
281 req1 ? req1->ctx : NULL,
282 req1 ? req1->tail : 0));
283}
284
e981e7b1
TD
285static bool execlists_check_remove_request(struct intel_engine_cs *ring,
286 u32 request_id)
287{
288 struct drm_i915_private *dev_priv = ring->dev->dev_private;
289 struct intel_ctx_submit_request *head_req;
290
291 assert_spin_locked(&ring->execlist_lock);
292
293 head_req = list_first_entry_or_null(&ring->execlist_queue,
294 struct intel_ctx_submit_request,
295 execlist_link);
296
297 if (head_req != NULL) {
298 struct drm_i915_gem_object *ctx_obj =
299 head_req->ctx->engine[ring->id].state;
300 if (intel_execlists_ctx_id(ctx_obj) == request_id) {
301 list_del(&head_req->execlist_link);
302 queue_work(dev_priv->wq, &head_req->work);
303 return true;
304 }
305 }
306
307 return false;
308}
309
310void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
311{
312 struct drm_i915_private *dev_priv = ring->dev->dev_private;
313 u32 status_pointer;
314 u8 read_pointer;
315 u8 write_pointer;
316 u32 status;
317 u32 status_id;
318 u32 submit_contexts = 0;
319
320 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
321
322 read_pointer = ring->next_context_status_buffer;
323 write_pointer = status_pointer & 0x07;
324 if (read_pointer > write_pointer)
325 write_pointer += 6;
326
327 spin_lock(&ring->execlist_lock);
328
329 while (read_pointer < write_pointer) {
330 read_pointer++;
331 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
332 (read_pointer % 6) * 8);
333 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
334 (read_pointer % 6) * 8 + 4);
335
336 if (status & GEN8_CTX_STATUS_COMPLETE) {
337 if (execlists_check_remove_request(ring, status_id))
338 submit_contexts++;
339 }
340 }
341
342 if (submit_contexts != 0)
343 execlists_context_unqueue(ring);
344
345 spin_unlock(&ring->execlist_lock);
346
347 WARN(submit_contexts > 2, "More than two context complete events?\n");
348 ring->next_context_status_buffer = write_pointer % 6;
349
350 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
351 ((u32)ring->next_context_status_buffer & 0x07) << 8);
352}
353
354static void execlists_free_request_task(struct work_struct *work)
355{
356 struct intel_ctx_submit_request *req =
357 container_of(work, struct intel_ctx_submit_request, work);
358 struct drm_device *dev = req->ring->dev;
359 struct drm_i915_private *dev_priv = dev->dev_private;
360
361 intel_runtime_pm_put(dev_priv);
362
363 mutex_lock(&dev->struct_mutex);
364 i915_gem_context_unreference(req->ctx);
365 mutex_unlock(&dev->struct_mutex);
366
367 kfree(req);
368}
369
acdd884a
MT
370static int execlists_context_queue(struct intel_engine_cs *ring,
371 struct intel_context *to,
372 u32 tail)
373{
374 struct intel_ctx_submit_request *req = NULL;
e981e7b1 375 struct drm_i915_private *dev_priv = ring->dev->dev_private;
acdd884a
MT
376 unsigned long flags;
377 bool was_empty;
378
379 req = kzalloc(sizeof(*req), GFP_KERNEL);
380 if (req == NULL)
381 return -ENOMEM;
382 req->ctx = to;
383 i915_gem_context_reference(req->ctx);
384 req->ring = ring;
385 req->tail = tail;
e981e7b1
TD
386 INIT_WORK(&req->work, execlists_free_request_task);
387
388 intel_runtime_pm_get(dev_priv);
acdd884a
MT
389
390 spin_lock_irqsave(&ring->execlist_lock, flags);
391
392 was_empty = list_empty(&ring->execlist_queue);
393 list_add_tail(&req->execlist_link, &ring->execlist_queue);
394 if (was_empty)
395 execlists_context_unqueue(ring);
396
397 spin_unlock_irqrestore(&ring->execlist_lock, flags);
398
399 return 0;
400}
401
ba8b7ccb
OM
402static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
403{
404 struct intel_engine_cs *ring = ringbuf->ring;
405 uint32_t flush_domains;
406 int ret;
407
408 flush_domains = 0;
409 if (ring->gpu_caches_dirty)
410 flush_domains = I915_GEM_GPU_DOMAINS;
411
412 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
413 if (ret)
414 return ret;
415
416 ring->gpu_caches_dirty = false;
417 return 0;
418}
419
420static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
421 struct list_head *vmas)
422{
423 struct intel_engine_cs *ring = ringbuf->ring;
424 struct i915_vma *vma;
425 uint32_t flush_domains = 0;
426 bool flush_chipset = false;
427 int ret;
428
429 list_for_each_entry(vma, vmas, exec_list) {
430 struct drm_i915_gem_object *obj = vma->obj;
431
432 ret = i915_gem_object_sync(obj, ring);
433 if (ret)
434 return ret;
435
436 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
437 flush_chipset |= i915_gem_clflush_object(obj, false);
438
439 flush_domains |= obj->base.write_domain;
440 }
441
442 if (flush_domains & I915_GEM_DOMAIN_GTT)
443 wmb();
444
445 /* Unconditionally invalidate gpu caches and ensure that we do flush
446 * any residual writes from the previous batch.
447 */
448 return logical_ring_invalidate_all_caches(ringbuf);
449}
450
454afebd
OM
451int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
452 struct intel_engine_cs *ring,
453 struct intel_context *ctx,
454 struct drm_i915_gem_execbuffer2 *args,
455 struct list_head *vmas,
456 struct drm_i915_gem_object *batch_obj,
457 u64 exec_start, u32 flags)
458{
ba8b7ccb
OM
459 struct drm_i915_private *dev_priv = dev->dev_private;
460 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
461 int instp_mode;
462 u32 instp_mask;
463 int ret;
464
465 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
466 instp_mask = I915_EXEC_CONSTANTS_MASK;
467 switch (instp_mode) {
468 case I915_EXEC_CONSTANTS_REL_GENERAL:
469 case I915_EXEC_CONSTANTS_ABSOLUTE:
470 case I915_EXEC_CONSTANTS_REL_SURFACE:
471 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
472 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
473 return -EINVAL;
474 }
475
476 if (instp_mode != dev_priv->relative_constants_mode) {
477 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
478 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
479 return -EINVAL;
480 }
481
482 /* The HW changed the meaning on this bit on gen6 */
483 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
484 }
485 break;
486 default:
487 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
488 return -EINVAL;
489 }
490
491 if (args->num_cliprects != 0) {
492 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
493 return -EINVAL;
494 } else {
495 if (args->DR4 == 0xffffffff) {
496 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
497 args->DR4 = 0;
498 }
499
500 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
501 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
502 return -EINVAL;
503 }
504 }
505
506 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
507 DRM_DEBUG("sol reset is gen7 only\n");
508 return -EINVAL;
509 }
510
511 ret = execlists_move_to_gpu(ringbuf, vmas);
512 if (ret)
513 return ret;
514
515 if (ring == &dev_priv->ring[RCS] &&
516 instp_mode != dev_priv->relative_constants_mode) {
517 ret = intel_logical_ring_begin(ringbuf, 4);
518 if (ret)
519 return ret;
520
521 intel_logical_ring_emit(ringbuf, MI_NOOP);
522 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
523 intel_logical_ring_emit(ringbuf, INSTPM);
524 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
525 intel_logical_ring_advance(ringbuf);
526
527 dev_priv->relative_constants_mode = instp_mode;
528 }
529
530 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
531 if (ret)
532 return ret;
533
534 i915_gem_execbuffer_move_to_active(vmas, ring);
535 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
536
454afebd
OM
537 return 0;
538}
539
540void intel_logical_ring_stop(struct intel_engine_cs *ring)
541{
9832b9da
OM
542 struct drm_i915_private *dev_priv = ring->dev->dev_private;
543 int ret;
544
545 if (!intel_ring_initialized(ring))
546 return;
547
548 ret = intel_ring_idle(ring);
549 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
550 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
551 ring->name, ret);
552
553 /* TODO: Is this correct with Execlists enabled? */
554 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
555 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
556 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
557 return;
558 }
559 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
454afebd
OM
560}
561
48e29f55
OM
562int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
563{
564 struct intel_engine_cs *ring = ringbuf->ring;
565 int ret;
566
567 if (!ring->gpu_caches_dirty)
568 return 0;
569
570 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
571 if (ret)
572 return ret;
573
574 ring->gpu_caches_dirty = false;
575 return 0;
576}
577
82e104cc
OM
578void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
579{
84b790f8
BW
580 struct intel_engine_cs *ring = ringbuf->ring;
581 struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
582
82e104cc
OM
583 intel_logical_ring_advance(ringbuf);
584
84b790f8 585 if (intel_ring_stopped(ring))
82e104cc
OM
586 return;
587
acdd884a 588 execlists_context_queue(ring, ctx, ringbuf->tail);
82e104cc
OM
589}
590
48e29f55
OM
591static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
592 struct intel_context *ctx)
82e104cc
OM
593{
594 if (ring->outstanding_lazy_seqno)
595 return 0;
596
597 if (ring->preallocated_lazy_request == NULL) {
598 struct drm_i915_gem_request *request;
599
600 request = kmalloc(sizeof(*request), GFP_KERNEL);
601 if (request == NULL)
602 return -ENOMEM;
603
48e29f55
OM
604 /* Hold a reference to the context this request belongs to
605 * (we will need it when the time comes to emit/retire the
606 * request).
607 */
608 request->ctx = ctx;
609 i915_gem_context_reference(request->ctx);
610
82e104cc
OM
611 ring->preallocated_lazy_request = request;
612 }
613
614 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
615}
616
617static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
618 int bytes)
619{
620 struct intel_engine_cs *ring = ringbuf->ring;
621 struct drm_i915_gem_request *request;
622 u32 seqno = 0;
623 int ret;
624
625 if (ringbuf->last_retired_head != -1) {
626 ringbuf->head = ringbuf->last_retired_head;
627 ringbuf->last_retired_head = -1;
628
629 ringbuf->space = intel_ring_space(ringbuf);
630 if (ringbuf->space >= bytes)
631 return 0;
632 }
633
634 list_for_each_entry(request, &ring->request_list, list) {
635 if (__intel_ring_space(request->tail, ringbuf->tail,
636 ringbuf->size) >= bytes) {
637 seqno = request->seqno;
638 break;
639 }
640 }
641
642 if (seqno == 0)
643 return -ENOSPC;
644
645 ret = i915_wait_seqno(ring, seqno);
646 if (ret)
647 return ret;
648
82e104cc
OM
649 i915_gem_retire_requests_ring(ring);
650 ringbuf->head = ringbuf->last_retired_head;
651 ringbuf->last_retired_head = -1;
652
653 ringbuf->space = intel_ring_space(ringbuf);
654 return 0;
655}
656
657static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
658 int bytes)
659{
660 struct intel_engine_cs *ring = ringbuf->ring;
661 struct drm_device *dev = ring->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
663 unsigned long end;
664 int ret;
665
666 ret = logical_ring_wait_request(ringbuf, bytes);
667 if (ret != -ENOSPC)
668 return ret;
669
670 /* Force the context submission in case we have been skipping it */
671 intel_logical_ring_advance_and_submit(ringbuf);
672
673 /* With GEM the hangcheck timer should kick us out of the loop,
674 * leaving it early runs the risk of corrupting GEM state (due
675 * to running on almost untested codepaths). But on resume
676 * timers don't work yet, so prevent a complete hang in that
677 * case by choosing an insanely large timeout. */
678 end = jiffies + 60 * HZ;
679
680 do {
681 ringbuf->head = I915_READ_HEAD(ring);
682 ringbuf->space = intel_ring_space(ringbuf);
683 if (ringbuf->space >= bytes) {
684 ret = 0;
685 break;
686 }
687
688 msleep(1);
689
690 if (dev_priv->mm.interruptible && signal_pending(current)) {
691 ret = -ERESTARTSYS;
692 break;
693 }
694
695 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
696 dev_priv->mm.interruptible);
697 if (ret)
698 break;
699
700 if (time_after(jiffies, end)) {
701 ret = -EBUSY;
702 break;
703 }
704 } while (1);
705
706 return ret;
707}
708
709static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
710{
711 uint32_t __iomem *virt;
712 int rem = ringbuf->size - ringbuf->tail;
713
714 if (ringbuf->space < rem) {
715 int ret = logical_ring_wait_for_space(ringbuf, rem);
716
717 if (ret)
718 return ret;
719 }
720
721 virt = ringbuf->virtual_start + ringbuf->tail;
722 rem /= 4;
723 while (rem--)
724 iowrite32(MI_NOOP, virt++);
725
726 ringbuf->tail = 0;
727 ringbuf->space = intel_ring_space(ringbuf);
728
729 return 0;
730}
731
732static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
733{
734 int ret;
735
736 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
737 ret = logical_ring_wrap_buffer(ringbuf);
738 if (unlikely(ret))
739 return ret;
740 }
741
742 if (unlikely(ringbuf->space < bytes)) {
743 ret = logical_ring_wait_for_space(ringbuf, bytes);
744 if (unlikely(ret))
745 return ret;
746 }
747
748 return 0;
749}
750
751int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
752{
753 struct intel_engine_cs *ring = ringbuf->ring;
754 struct drm_device *dev = ring->dev;
755 struct drm_i915_private *dev_priv = dev->dev_private;
756 int ret;
757
758 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
759 dev_priv->mm.interruptible);
760 if (ret)
761 return ret;
762
763 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
764 if (ret)
765 return ret;
766
767 /* Preallocate the olr before touching the ring */
48e29f55 768 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
82e104cc
OM
769 if (ret)
770 return ret;
771
772 ringbuf->space -= num_dwords * sizeof(uint32_t);
773 return 0;
774}
775
9b1136d5
OM
776static int gen8_init_common_ring(struct intel_engine_cs *ring)
777{
778 struct drm_device *dev = ring->dev;
779 struct drm_i915_private *dev_priv = dev->dev_private;
780
73d477f6
OM
781 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
782 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
783
9b1136d5
OM
784 I915_WRITE(RING_MODE_GEN7(ring),
785 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
786 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
787 POSTING_READ(RING_MODE_GEN7(ring));
788 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
789
790 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
791
792 return 0;
793}
794
795static int gen8_init_render_ring(struct intel_engine_cs *ring)
796{
797 struct drm_device *dev = ring->dev;
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 int ret;
800
801 ret = gen8_init_common_ring(ring);
802 if (ret)
803 return ret;
804
805 /* We need to disable the AsyncFlip performance optimisations in order
806 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
807 * programmed to '1' on all products.
808 *
809 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
810 */
811 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
812
813 ret = intel_init_pipe_control(ring);
814 if (ret)
815 return ret;
816
817 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
818
819 return ret;
820}
821
15648585
OM
822static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
823 u64 offset, unsigned flags)
824{
15648585
OM
825 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
826 int ret;
827
828 ret = intel_logical_ring_begin(ringbuf, 4);
829 if (ret)
830 return ret;
831
832 /* FIXME(BDW): Address space and security selectors. */
833 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
834 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
835 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
836 intel_logical_ring_emit(ringbuf, MI_NOOP);
837 intel_logical_ring_advance(ringbuf);
838
839 return 0;
840}
841
73d477f6
OM
842static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
843{
844 struct drm_device *dev = ring->dev;
845 struct drm_i915_private *dev_priv = dev->dev_private;
846 unsigned long flags;
847
848 if (!dev->irq_enabled)
849 return false;
850
851 spin_lock_irqsave(&dev_priv->irq_lock, flags);
852 if (ring->irq_refcount++ == 0) {
853 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
854 POSTING_READ(RING_IMR(ring->mmio_base));
855 }
856 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
857
858 return true;
859}
860
861static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
862{
863 struct drm_device *dev = ring->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private;
865 unsigned long flags;
866
867 spin_lock_irqsave(&dev_priv->irq_lock, flags);
868 if (--ring->irq_refcount == 0) {
869 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
870 POSTING_READ(RING_IMR(ring->mmio_base));
871 }
872 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
873}
874
4712274c
OM
875static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
876 u32 invalidate_domains,
877 u32 unused)
878{
879 struct intel_engine_cs *ring = ringbuf->ring;
880 struct drm_device *dev = ring->dev;
881 struct drm_i915_private *dev_priv = dev->dev_private;
882 uint32_t cmd;
883 int ret;
884
885 ret = intel_logical_ring_begin(ringbuf, 4);
886 if (ret)
887 return ret;
888
889 cmd = MI_FLUSH_DW + 1;
890
891 if (ring == &dev_priv->ring[VCS]) {
892 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
893 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
894 MI_FLUSH_DW_STORE_INDEX |
895 MI_FLUSH_DW_OP_STOREDW;
896 } else {
897 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
898 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
899 MI_FLUSH_DW_OP_STOREDW;
900 }
901
902 intel_logical_ring_emit(ringbuf, cmd);
903 intel_logical_ring_emit(ringbuf,
904 I915_GEM_HWS_SCRATCH_ADDR |
905 MI_FLUSH_DW_USE_GTT);
906 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
907 intel_logical_ring_emit(ringbuf, 0); /* value */
908 intel_logical_ring_advance(ringbuf);
909
910 return 0;
911}
912
913static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
914 u32 invalidate_domains,
915 u32 flush_domains)
916{
917 struct intel_engine_cs *ring = ringbuf->ring;
918 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
919 u32 flags = 0;
920 int ret;
921
922 flags |= PIPE_CONTROL_CS_STALL;
923
924 if (flush_domains) {
925 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
926 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
927 }
928
929 if (invalidate_domains) {
930 flags |= PIPE_CONTROL_TLB_INVALIDATE;
931 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
932 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
933 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
934 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
935 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
936 flags |= PIPE_CONTROL_QW_WRITE;
937 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
938 }
939
940 ret = intel_logical_ring_begin(ringbuf, 6);
941 if (ret)
942 return ret;
943
944 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
945 intel_logical_ring_emit(ringbuf, flags);
946 intel_logical_ring_emit(ringbuf, scratch_addr);
947 intel_logical_ring_emit(ringbuf, 0);
948 intel_logical_ring_emit(ringbuf, 0);
949 intel_logical_ring_emit(ringbuf, 0);
950 intel_logical_ring_advance(ringbuf);
951
952 return 0;
953}
954
e94e37ad
OM
955static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
956{
957 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
958}
959
960static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
961{
962 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
963}
964
4da46e1e
OM
965static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
966{
967 struct intel_engine_cs *ring = ringbuf->ring;
968 u32 cmd;
969 int ret;
970
971 ret = intel_logical_ring_begin(ringbuf, 6);
972 if (ret)
973 return ret;
974
975 cmd = MI_STORE_DWORD_IMM_GEN8;
976 cmd |= MI_GLOBAL_GTT;
977
978 intel_logical_ring_emit(ringbuf, cmd);
979 intel_logical_ring_emit(ringbuf,
980 (ring->status_page.gfx_addr +
981 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
982 intel_logical_ring_emit(ringbuf, 0);
983 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
984 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
985 intel_logical_ring_emit(ringbuf, MI_NOOP);
986 intel_logical_ring_advance_and_submit(ringbuf);
987
988 return 0;
989}
990
454afebd
OM
991void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
992{
9832b9da
OM
993 struct drm_i915_private *dev_priv = ring->dev->dev_private;
994
48d82387
OM
995 if (!intel_ring_initialized(ring))
996 return;
997
9832b9da
OM
998 intel_logical_ring_stop(ring);
999 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
48d82387
OM
1000 ring->preallocated_lazy_request = NULL;
1001 ring->outstanding_lazy_seqno = 0;
1002
1003 if (ring->cleanup)
1004 ring->cleanup(ring);
1005
1006 i915_cmd_parser_fini_ring(ring);
1007
1008 if (ring->status_page.obj) {
1009 kunmap(sg_page(ring->status_page.obj->pages->sgl));
1010 ring->status_page.obj = NULL;
1011 }
454afebd
OM
1012}
1013
1014static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1015{
48d82387
OM
1016 int ret;
1017 struct intel_context *dctx = ring->default_context;
1018 struct drm_i915_gem_object *dctx_obj;
1019
1020 /* Intentionally left blank. */
1021 ring->buffer = NULL;
1022
1023 ring->dev = dev;
1024 INIT_LIST_HEAD(&ring->active_list);
1025 INIT_LIST_HEAD(&ring->request_list);
1026 init_waitqueue_head(&ring->irq_queue);
1027
acdd884a
MT
1028 INIT_LIST_HEAD(&ring->execlist_queue);
1029 spin_lock_init(&ring->execlist_lock);
e981e7b1 1030 ring->next_context_status_buffer = 0;
acdd884a 1031
48d82387
OM
1032 ret = intel_lr_context_deferred_create(dctx, ring);
1033 if (ret)
1034 return ret;
1035
1036 /* The status page is offset 0 from the context object in LRCs. */
1037 dctx_obj = dctx->engine[ring->id].state;
1038 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
1039 ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
1040 if (ring->status_page.page_addr == NULL)
1041 return -ENOMEM;
1042 ring->status_page.obj = dctx_obj;
1043
1044 ret = i915_cmd_parser_init_ring(ring);
1045 if (ret)
1046 return ret;
1047
1048 if (ring->init) {
1049 ret = ring->init(ring);
1050 if (ret)
1051 return ret;
1052 }
1053
454afebd
OM
1054 return 0;
1055}
1056
1057static int logical_render_ring_init(struct drm_device *dev)
1058{
1059 struct drm_i915_private *dev_priv = dev->dev_private;
1060 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1061
1062 ring->name = "render ring";
1063 ring->id = RCS;
1064 ring->mmio_base = RENDER_RING_BASE;
1065 ring->irq_enable_mask =
1066 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
73d477f6
OM
1067 ring->irq_keep_mask =
1068 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1069 if (HAS_L3_DPF(dev))
1070 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
454afebd 1071
9b1136d5
OM
1072 ring->init = gen8_init_render_ring;
1073 ring->cleanup = intel_fini_pipe_control;
e94e37ad
OM
1074 ring->get_seqno = gen8_get_seqno;
1075 ring->set_seqno = gen8_set_seqno;
4da46e1e 1076 ring->emit_request = gen8_emit_request;
4712274c 1077 ring->emit_flush = gen8_emit_flush_render;
73d477f6
OM
1078 ring->irq_get = gen8_logical_ring_get_irq;
1079 ring->irq_put = gen8_logical_ring_put_irq;
15648585 1080 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 1081
454afebd
OM
1082 return logical_ring_init(dev, ring);
1083}
1084
1085static int logical_bsd_ring_init(struct drm_device *dev)
1086{
1087 struct drm_i915_private *dev_priv = dev->dev_private;
1088 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
1089
1090 ring->name = "bsd ring";
1091 ring->id = VCS;
1092 ring->mmio_base = GEN6_BSD_RING_BASE;
1093 ring->irq_enable_mask =
1094 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
73d477f6
OM
1095 ring->irq_keep_mask =
1096 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
454afebd 1097
9b1136d5 1098 ring->init = gen8_init_common_ring;
e94e37ad
OM
1099 ring->get_seqno = gen8_get_seqno;
1100 ring->set_seqno = gen8_set_seqno;
4da46e1e 1101 ring->emit_request = gen8_emit_request;
4712274c 1102 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
1103 ring->irq_get = gen8_logical_ring_get_irq;
1104 ring->irq_put = gen8_logical_ring_put_irq;
15648585 1105 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 1106
454afebd
OM
1107 return logical_ring_init(dev, ring);
1108}
1109
1110static int logical_bsd2_ring_init(struct drm_device *dev)
1111{
1112 struct drm_i915_private *dev_priv = dev->dev_private;
1113 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
1114
1115 ring->name = "bds2 ring";
1116 ring->id = VCS2;
1117 ring->mmio_base = GEN8_BSD2_RING_BASE;
1118 ring->irq_enable_mask =
1119 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
73d477f6
OM
1120 ring->irq_keep_mask =
1121 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
454afebd 1122
9b1136d5 1123 ring->init = gen8_init_common_ring;
e94e37ad
OM
1124 ring->get_seqno = gen8_get_seqno;
1125 ring->set_seqno = gen8_set_seqno;
4da46e1e 1126 ring->emit_request = gen8_emit_request;
4712274c 1127 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
1128 ring->irq_get = gen8_logical_ring_get_irq;
1129 ring->irq_put = gen8_logical_ring_put_irq;
15648585 1130 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 1131
454afebd
OM
1132 return logical_ring_init(dev, ring);
1133}
1134
1135static int logical_blt_ring_init(struct drm_device *dev)
1136{
1137 struct drm_i915_private *dev_priv = dev->dev_private;
1138 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
1139
1140 ring->name = "blitter ring";
1141 ring->id = BCS;
1142 ring->mmio_base = BLT_RING_BASE;
1143 ring->irq_enable_mask =
1144 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
73d477f6
OM
1145 ring->irq_keep_mask =
1146 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
454afebd 1147
9b1136d5 1148 ring->init = gen8_init_common_ring;
e94e37ad
OM
1149 ring->get_seqno = gen8_get_seqno;
1150 ring->set_seqno = gen8_set_seqno;
4da46e1e 1151 ring->emit_request = gen8_emit_request;
4712274c 1152 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
1153 ring->irq_get = gen8_logical_ring_get_irq;
1154 ring->irq_put = gen8_logical_ring_put_irq;
15648585 1155 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 1156
454afebd
OM
1157 return logical_ring_init(dev, ring);
1158}
1159
1160static int logical_vebox_ring_init(struct drm_device *dev)
1161{
1162 struct drm_i915_private *dev_priv = dev->dev_private;
1163 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
1164
1165 ring->name = "video enhancement ring";
1166 ring->id = VECS;
1167 ring->mmio_base = VEBOX_RING_BASE;
1168 ring->irq_enable_mask =
1169 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
73d477f6
OM
1170 ring->irq_keep_mask =
1171 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
454afebd 1172
9b1136d5 1173 ring->init = gen8_init_common_ring;
e94e37ad
OM
1174 ring->get_seqno = gen8_get_seqno;
1175 ring->set_seqno = gen8_set_seqno;
4da46e1e 1176 ring->emit_request = gen8_emit_request;
4712274c 1177 ring->emit_flush = gen8_emit_flush;
73d477f6
OM
1178 ring->irq_get = gen8_logical_ring_get_irq;
1179 ring->irq_put = gen8_logical_ring_put_irq;
15648585 1180 ring->emit_bb_start = gen8_emit_bb_start;
9b1136d5 1181
454afebd
OM
1182 return logical_ring_init(dev, ring);
1183}
1184
1185int intel_logical_rings_init(struct drm_device *dev)
1186{
1187 struct drm_i915_private *dev_priv = dev->dev_private;
1188 int ret;
1189
1190 ret = logical_render_ring_init(dev);
1191 if (ret)
1192 return ret;
1193
1194 if (HAS_BSD(dev)) {
1195 ret = logical_bsd_ring_init(dev);
1196 if (ret)
1197 goto cleanup_render_ring;
1198 }
1199
1200 if (HAS_BLT(dev)) {
1201 ret = logical_blt_ring_init(dev);
1202 if (ret)
1203 goto cleanup_bsd_ring;
1204 }
1205
1206 if (HAS_VEBOX(dev)) {
1207 ret = logical_vebox_ring_init(dev);
1208 if (ret)
1209 goto cleanup_blt_ring;
1210 }
1211
1212 if (HAS_BSD2(dev)) {
1213 ret = logical_bsd2_ring_init(dev);
1214 if (ret)
1215 goto cleanup_vebox_ring;
1216 }
1217
1218 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
1219 if (ret)
1220 goto cleanup_bsd2_ring;
1221
1222 return 0;
1223
1224cleanup_bsd2_ring:
1225 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
1226cleanup_vebox_ring:
1227 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
1228cleanup_blt_ring:
1229 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
1230cleanup_bsd_ring:
1231 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
1232cleanup_render_ring:
1233 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
1234
1235 return ret;
1236}
1237
8670d6f9
OM
1238static int
1239populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1240 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1241{
1242 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
ae6c4806 1243 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
8670d6f9
OM
1244 struct page *page;
1245 uint32_t *reg_state;
1246 int ret;
1247
1248 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1249 if (ret) {
1250 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1251 return ret;
1252 }
1253
1254 ret = i915_gem_object_get_pages(ctx_obj);
1255 if (ret) {
1256 DRM_DEBUG_DRIVER("Could not get object pages\n");
1257 return ret;
1258 }
1259
1260 i915_gem_object_pin_pages(ctx_obj);
1261
1262 /* The second page of the context object contains some fields which must
1263 * be set up prior to the first execution. */
1264 page = i915_gem_object_get_page(ctx_obj, 1);
1265 reg_state = kmap_atomic(page);
1266
1267 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1268 * commands followed by (reg, value) pairs. The values we are setting here are
1269 * only for the first context restore: on a subsequent save, the GPU will
1270 * recreate this batchbuffer with new values (including all the missing
1271 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1272 if (ring->id == RCS)
1273 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
1274 else
1275 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
1276 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1277 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1278 reg_state[CTX_CONTEXT_CONTROL+1] =
1279 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
1280 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1281 reg_state[CTX_RING_HEAD+1] = 0;
1282 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1283 reg_state[CTX_RING_TAIL+1] = 0;
1284 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1285 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
1286 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1287 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1288 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
1289 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
1290 reg_state[CTX_BB_HEAD_U+1] = 0;
1291 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
1292 reg_state[CTX_BB_HEAD_L+1] = 0;
1293 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
1294 reg_state[CTX_BB_STATE+1] = (1<<5);
1295 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
1296 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
1297 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
1298 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
1299 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
1300 reg_state[CTX_SECOND_BB_STATE+1] = 0;
1301 if (ring->id == RCS) {
1302 /* TODO: according to BSpec, the register state context
1303 * for CHV does not have these. OTOH, these registers do
1304 * exist in CHV. I'm waiting for a clarification */
1305 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
1306 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
1307 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1308 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
1309 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
1310 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
1311 }
1312 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
1313 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1314 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1315 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1316 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1317 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1318 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1319 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1320 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1321 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1322 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1323 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1324 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1325 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1326 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1327 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1328 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1329 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1330 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1331 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1332 if (ring->id == RCS) {
1333 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1334 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1335 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1336 }
1337
1338 kunmap_atomic(reg_state);
1339
1340 ctx_obj->dirty = 1;
1341 set_page_dirty(page);
1342 i915_gem_object_unpin_pages(ctx_obj);
1343
1344 return 0;
1345}
1346
ede7d42b
OM
1347void intel_lr_context_free(struct intel_context *ctx)
1348{
8c857917
OM
1349 int i;
1350
1351 for (i = 0; i < I915_NUM_RINGS; i++) {
1352 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
84c2377f
OM
1353 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1354
8c857917 1355 if (ctx_obj) {
84c2377f
OM
1356 intel_destroy_ringbuffer_obj(ringbuf);
1357 kfree(ringbuf);
8c857917
OM
1358 i915_gem_object_ggtt_unpin(ctx_obj);
1359 drm_gem_object_unreference(&ctx_obj->base);
1360 }
1361 }
1362}
1363
1364static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1365{
1366 int ret = 0;
1367
1368 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1369
1370 switch (ring->id) {
1371 case RCS:
1372 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1373 break;
1374 case VCS:
1375 case BCS:
1376 case VECS:
1377 case VCS2:
1378 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1379 break;
1380 }
1381
1382 return ret;
ede7d42b
OM
1383}
1384
1385int intel_lr_context_deferred_create(struct intel_context *ctx,
1386 struct intel_engine_cs *ring)
1387{
8c857917
OM
1388 struct drm_device *dev = ring->dev;
1389 struct drm_i915_gem_object *ctx_obj;
1390 uint32_t context_size;
84c2377f 1391 struct intel_ringbuffer *ringbuf;
8c857917
OM
1392 int ret;
1393
ede7d42b 1394 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
48d82387
OM
1395 if (ctx->engine[ring->id].state)
1396 return 0;
ede7d42b 1397
8c857917
OM
1398 context_size = round_up(get_lr_context_size(ring), 4096);
1399
1400 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1401 if (IS_ERR(ctx_obj)) {
1402 ret = PTR_ERR(ctx_obj);
1403 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1404 return ret;
1405 }
1406
1407 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1408 if (ret) {
1409 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1410 drm_gem_object_unreference(&ctx_obj->base);
1411 return ret;
1412 }
1413
84c2377f
OM
1414 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1415 if (!ringbuf) {
1416 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1417 ring->name);
1418 i915_gem_object_ggtt_unpin(ctx_obj);
1419 drm_gem_object_unreference(&ctx_obj->base);
1420 ret = -ENOMEM;
1421 return ret;
1422 }
1423
0c7dd53b 1424 ringbuf->ring = ring;
582d67f0
OM
1425 ringbuf->FIXME_lrc_ctx = ctx;
1426
84c2377f
OM
1427 ringbuf->size = 32 * PAGE_SIZE;
1428 ringbuf->effective_size = ringbuf->size;
1429 ringbuf->head = 0;
1430 ringbuf->tail = 0;
1431 ringbuf->space = ringbuf->size;
1432 ringbuf->last_retired_head = -1;
1433
1434 /* TODO: For now we put this in the mappable region so that we can reuse
1435 * the existing ringbuffer code which ioremaps it. When we start
1436 * creating many contexts, this will no longer work and we must switch
1437 * to a kmapish interface.
1438 */
1439 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1440 if (ret) {
1441 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1442 ring->name, ret);
8670d6f9
OM
1443 goto error;
1444 }
1445
1446 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1447 if (ret) {
1448 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1449 intel_destroy_ringbuffer_obj(ringbuf);
1450 goto error;
84c2377f
OM
1451 }
1452
1453 ctx->engine[ring->id].ringbuf = ringbuf;
8c857917 1454 ctx->engine[ring->id].state = ctx_obj;
ede7d42b
OM
1455
1456 return 0;
8670d6f9
OM
1457
1458error:
1459 kfree(ringbuf);
1460 i915_gem_object_ggtt_unpin(ctx_obj);
1461 drm_gem_object_unreference(&ctx_obj->base);
1462 return ret;
ede7d42b 1463}
This page took 0.091242 seconds and 5 git commands to generate.