2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
41 #include <drm/i915_drm.h>
44 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
47 #define GEN8_LR_CONTEXT_ALIGN 4096
49 #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50 #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
52 #define CTX_LRI_HEADER_0 0x01
53 #define CTX_CONTEXT_CONTROL 0x02
54 #define CTX_RING_HEAD 0x04
55 #define CTX_RING_TAIL 0x06
56 #define CTX_RING_BUFFER_START 0x08
57 #define CTX_RING_BUFFER_CONTROL 0x0a
58 #define CTX_BB_HEAD_U 0x0c
59 #define CTX_BB_HEAD_L 0x0e
60 #define CTX_BB_STATE 0x10
61 #define CTX_SECOND_BB_HEAD_U 0x12
62 #define CTX_SECOND_BB_HEAD_L 0x14
63 #define CTX_SECOND_BB_STATE 0x16
64 #define CTX_BB_PER_CTX_PTR 0x18
65 #define CTX_RCS_INDIRECT_CTX 0x1a
66 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67 #define CTX_LRI_HEADER_1 0x21
68 #define CTX_CTX_TIMESTAMP 0x22
69 #define CTX_PDP3_UDW 0x24
70 #define CTX_PDP3_LDW 0x26
71 #define CTX_PDP2_UDW 0x28
72 #define CTX_PDP2_LDW 0x2a
73 #define CTX_PDP1_UDW 0x2c
74 #define CTX_PDP1_LDW 0x2e
75 #define CTX_PDP0_UDW 0x30
76 #define CTX_PDP0_LDW 0x32
77 #define CTX_LRI_HEADER_2 0x41
78 #define CTX_R_PWR_CLK_STATE 0x42
79 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
81 int intel_sanitize_enable_execlists(struct drm_device
*dev
, int enable_execlists
)
83 WARN_ON(i915
.enable_ppgtt
== -1);
85 if (enable_execlists
== 0)
88 if (HAS_LOGICAL_RING_CONTEXTS(dev
) && USES_PPGTT(dev
))
94 int intel_execlists_submission(struct drm_device
*dev
, struct drm_file
*file
,
95 struct intel_engine_cs
*ring
,
96 struct intel_context
*ctx
,
97 struct drm_i915_gem_execbuffer2
*args
,
98 struct list_head
*vmas
,
99 struct drm_i915_gem_object
*batch_obj
,
100 u64 exec_start
, u32 flags
)
106 void intel_logical_ring_stop(struct intel_engine_cs
*ring
)
111 void intel_logical_ring_cleanup(struct intel_engine_cs
*ring
)
116 static int logical_ring_init(struct drm_device
*dev
, struct intel_engine_cs
*ring
)
122 static int logical_render_ring_init(struct drm_device
*dev
)
124 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
125 struct intel_engine_cs
*ring
= &dev_priv
->ring
[RCS
];
127 ring
->name
= "render ring";
129 ring
->mmio_base
= RENDER_RING_BASE
;
130 ring
->irq_enable_mask
=
131 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
;
133 return logical_ring_init(dev
, ring
);
136 static int logical_bsd_ring_init(struct drm_device
*dev
)
138 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
139 struct intel_engine_cs
*ring
= &dev_priv
->ring
[VCS
];
141 ring
->name
= "bsd ring";
143 ring
->mmio_base
= GEN6_BSD_RING_BASE
;
144 ring
->irq_enable_mask
=
145 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
;
147 return logical_ring_init(dev
, ring
);
150 static int logical_bsd2_ring_init(struct drm_device
*dev
)
152 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
153 struct intel_engine_cs
*ring
= &dev_priv
->ring
[VCS2
];
155 ring
->name
= "bds2 ring";
157 ring
->mmio_base
= GEN8_BSD2_RING_BASE
;
158 ring
->irq_enable_mask
=
159 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
;
161 return logical_ring_init(dev
, ring
);
164 static int logical_blt_ring_init(struct drm_device
*dev
)
166 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
167 struct intel_engine_cs
*ring
= &dev_priv
->ring
[BCS
];
169 ring
->name
= "blitter ring";
171 ring
->mmio_base
= BLT_RING_BASE
;
172 ring
->irq_enable_mask
=
173 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
;
175 return logical_ring_init(dev
, ring
);
178 static int logical_vebox_ring_init(struct drm_device
*dev
)
180 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
181 struct intel_engine_cs
*ring
= &dev_priv
->ring
[VECS
];
183 ring
->name
= "video enhancement ring";
185 ring
->mmio_base
= VEBOX_RING_BASE
;
186 ring
->irq_enable_mask
=
187 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
;
189 return logical_ring_init(dev
, ring
);
192 int intel_logical_rings_init(struct drm_device
*dev
)
194 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
197 ret
= logical_render_ring_init(dev
);
202 ret
= logical_bsd_ring_init(dev
);
204 goto cleanup_render_ring
;
208 ret
= logical_blt_ring_init(dev
);
210 goto cleanup_bsd_ring
;
213 if (HAS_VEBOX(dev
)) {
214 ret
= logical_vebox_ring_init(dev
);
216 goto cleanup_blt_ring
;
220 ret
= logical_bsd2_ring_init(dev
);
222 goto cleanup_vebox_ring
;
225 ret
= i915_gem_set_seqno(dev
, ((u32
)~0 - 0x1000));
227 goto cleanup_bsd2_ring
;
232 intel_logical_ring_cleanup(&dev_priv
->ring
[VCS2
]);
234 intel_logical_ring_cleanup(&dev_priv
->ring
[VECS
]);
236 intel_logical_ring_cleanup(&dev_priv
->ring
[BCS
]);
238 intel_logical_ring_cleanup(&dev_priv
->ring
[VCS
]);
240 intel_logical_ring_cleanup(&dev_priv
->ring
[RCS
]);
246 populate_lr_context(struct intel_context
*ctx
, struct drm_i915_gem_object
*ctx_obj
,
247 struct intel_engine_cs
*ring
, struct intel_ringbuffer
*ringbuf
)
249 struct drm_i915_gem_object
*ring_obj
= ringbuf
->obj
;
250 struct i915_hw_ppgtt
*ppgtt
= ctx_to_ppgtt(ctx
);
255 ret
= i915_gem_object_set_to_cpu_domain(ctx_obj
, true);
257 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
261 ret
= i915_gem_object_get_pages(ctx_obj
);
263 DRM_DEBUG_DRIVER("Could not get object pages\n");
267 i915_gem_object_pin_pages(ctx_obj
);
269 /* The second page of the context object contains some fields which must
270 * be set up prior to the first execution. */
271 page
= i915_gem_object_get_page(ctx_obj
, 1);
272 reg_state
= kmap_atomic(page
);
274 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
275 * commands followed by (reg, value) pairs. The values we are setting here are
276 * only for the first context restore: on a subsequent save, the GPU will
277 * recreate this batchbuffer with new values (including all the missing
278 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
280 reg_state
[CTX_LRI_HEADER_0
] = MI_LOAD_REGISTER_IMM(14);
282 reg_state
[CTX_LRI_HEADER_0
] = MI_LOAD_REGISTER_IMM(11);
283 reg_state
[CTX_LRI_HEADER_0
] |= MI_LRI_FORCE_POSTED
;
284 reg_state
[CTX_CONTEXT_CONTROL
] = RING_CONTEXT_CONTROL(ring
);
285 reg_state
[CTX_CONTEXT_CONTROL
+1] =
286 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT
);
287 reg_state
[CTX_RING_HEAD
] = RING_HEAD(ring
->mmio_base
);
288 reg_state
[CTX_RING_HEAD
+1] = 0;
289 reg_state
[CTX_RING_TAIL
] = RING_TAIL(ring
->mmio_base
);
290 reg_state
[CTX_RING_TAIL
+1] = 0;
291 reg_state
[CTX_RING_BUFFER_START
] = RING_START(ring
->mmio_base
);
292 reg_state
[CTX_RING_BUFFER_START
+1] = i915_gem_obj_ggtt_offset(ring_obj
);
293 reg_state
[CTX_RING_BUFFER_CONTROL
] = RING_CTL(ring
->mmio_base
);
294 reg_state
[CTX_RING_BUFFER_CONTROL
+1] =
295 ((ringbuf
->size
- PAGE_SIZE
) & RING_NR_PAGES
) | RING_VALID
;
296 reg_state
[CTX_BB_HEAD_U
] = ring
->mmio_base
+ 0x168;
297 reg_state
[CTX_BB_HEAD_U
+1] = 0;
298 reg_state
[CTX_BB_HEAD_L
] = ring
->mmio_base
+ 0x140;
299 reg_state
[CTX_BB_HEAD_L
+1] = 0;
300 reg_state
[CTX_BB_STATE
] = ring
->mmio_base
+ 0x110;
301 reg_state
[CTX_BB_STATE
+1] = (1<<5);
302 reg_state
[CTX_SECOND_BB_HEAD_U
] = ring
->mmio_base
+ 0x11c;
303 reg_state
[CTX_SECOND_BB_HEAD_U
+1] = 0;
304 reg_state
[CTX_SECOND_BB_HEAD_L
] = ring
->mmio_base
+ 0x114;
305 reg_state
[CTX_SECOND_BB_HEAD_L
+1] = 0;
306 reg_state
[CTX_SECOND_BB_STATE
] = ring
->mmio_base
+ 0x118;
307 reg_state
[CTX_SECOND_BB_STATE
+1] = 0;
308 if (ring
->id
== RCS
) {
309 /* TODO: according to BSpec, the register state context
310 * for CHV does not have these. OTOH, these registers do
311 * exist in CHV. I'm waiting for a clarification */
312 reg_state
[CTX_BB_PER_CTX_PTR
] = ring
->mmio_base
+ 0x1c0;
313 reg_state
[CTX_BB_PER_CTX_PTR
+1] = 0;
314 reg_state
[CTX_RCS_INDIRECT_CTX
] = ring
->mmio_base
+ 0x1c4;
315 reg_state
[CTX_RCS_INDIRECT_CTX
+1] = 0;
316 reg_state
[CTX_RCS_INDIRECT_CTX_OFFSET
] = ring
->mmio_base
+ 0x1c8;
317 reg_state
[CTX_RCS_INDIRECT_CTX_OFFSET
+1] = 0;
319 reg_state
[CTX_LRI_HEADER_1
] = MI_LOAD_REGISTER_IMM(9);
320 reg_state
[CTX_LRI_HEADER_1
] |= MI_LRI_FORCE_POSTED
;
321 reg_state
[CTX_CTX_TIMESTAMP
] = ring
->mmio_base
+ 0x3a8;
322 reg_state
[CTX_CTX_TIMESTAMP
+1] = 0;
323 reg_state
[CTX_PDP3_UDW
] = GEN8_RING_PDP_UDW(ring
, 3);
324 reg_state
[CTX_PDP3_LDW
] = GEN8_RING_PDP_LDW(ring
, 3);
325 reg_state
[CTX_PDP2_UDW
] = GEN8_RING_PDP_UDW(ring
, 2);
326 reg_state
[CTX_PDP2_LDW
] = GEN8_RING_PDP_LDW(ring
, 2);
327 reg_state
[CTX_PDP1_UDW
] = GEN8_RING_PDP_UDW(ring
, 1);
328 reg_state
[CTX_PDP1_LDW
] = GEN8_RING_PDP_LDW(ring
, 1);
329 reg_state
[CTX_PDP0_UDW
] = GEN8_RING_PDP_UDW(ring
, 0);
330 reg_state
[CTX_PDP0_LDW
] = GEN8_RING_PDP_LDW(ring
, 0);
331 reg_state
[CTX_PDP3_UDW
+1] = upper_32_bits(ppgtt
->pd_dma_addr
[3]);
332 reg_state
[CTX_PDP3_LDW
+1] = lower_32_bits(ppgtt
->pd_dma_addr
[3]);
333 reg_state
[CTX_PDP2_UDW
+1] = upper_32_bits(ppgtt
->pd_dma_addr
[2]);
334 reg_state
[CTX_PDP2_LDW
+1] = lower_32_bits(ppgtt
->pd_dma_addr
[2]);
335 reg_state
[CTX_PDP1_UDW
+1] = upper_32_bits(ppgtt
->pd_dma_addr
[1]);
336 reg_state
[CTX_PDP1_LDW
+1] = lower_32_bits(ppgtt
->pd_dma_addr
[1]);
337 reg_state
[CTX_PDP0_UDW
+1] = upper_32_bits(ppgtt
->pd_dma_addr
[0]);
338 reg_state
[CTX_PDP0_LDW
+1] = lower_32_bits(ppgtt
->pd_dma_addr
[0]);
339 if (ring
->id
== RCS
) {
340 reg_state
[CTX_LRI_HEADER_2
] = MI_LOAD_REGISTER_IMM(1);
341 reg_state
[CTX_R_PWR_CLK_STATE
] = 0x20c8;
342 reg_state
[CTX_R_PWR_CLK_STATE
+1] = 0;
345 kunmap_atomic(reg_state
);
348 set_page_dirty(page
);
349 i915_gem_object_unpin_pages(ctx_obj
);
354 void intel_lr_context_free(struct intel_context
*ctx
)
358 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
359 struct drm_i915_gem_object
*ctx_obj
= ctx
->engine
[i
].state
;
360 struct intel_ringbuffer
*ringbuf
= ctx
->engine
[i
].ringbuf
;
363 intel_destroy_ringbuffer_obj(ringbuf
);
365 i915_gem_object_ggtt_unpin(ctx_obj
);
366 drm_gem_object_unreference(&ctx_obj
->base
);
371 static uint32_t get_lr_context_size(struct intel_engine_cs
*ring
)
375 WARN_ON(INTEL_INFO(ring
->dev
)->gen
!= 8);
379 ret
= GEN8_LR_CONTEXT_RENDER_SIZE
;
385 ret
= GEN8_LR_CONTEXT_OTHER_SIZE
;
392 int intel_lr_context_deferred_create(struct intel_context
*ctx
,
393 struct intel_engine_cs
*ring
)
395 struct drm_device
*dev
= ring
->dev
;
396 struct drm_i915_gem_object
*ctx_obj
;
397 uint32_t context_size
;
398 struct intel_ringbuffer
*ringbuf
;
401 WARN_ON(ctx
->legacy_hw_ctx
.rcs_state
!= NULL
);
403 context_size
= round_up(get_lr_context_size(ring
), 4096);
405 ctx_obj
= i915_gem_alloc_context_obj(dev
, context_size
);
406 if (IS_ERR(ctx_obj
)) {
407 ret
= PTR_ERR(ctx_obj
);
408 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret
);
412 ret
= i915_gem_obj_ggtt_pin(ctx_obj
, GEN8_LR_CONTEXT_ALIGN
, 0);
414 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret
);
415 drm_gem_object_unreference(&ctx_obj
->base
);
419 ringbuf
= kzalloc(sizeof(*ringbuf
), GFP_KERNEL
);
421 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
423 i915_gem_object_ggtt_unpin(ctx_obj
);
424 drm_gem_object_unreference(&ctx_obj
->base
);
429 ringbuf
->ring
= ring
;
430 ringbuf
->size
= 32 * PAGE_SIZE
;
431 ringbuf
->effective_size
= ringbuf
->size
;
434 ringbuf
->space
= ringbuf
->size
;
435 ringbuf
->last_retired_head
= -1;
437 /* TODO: For now we put this in the mappable region so that we can reuse
438 * the existing ringbuffer code which ioremaps it. When we start
439 * creating many contexts, this will no longer work and we must switch
440 * to a kmapish interface.
442 ret
= intel_alloc_ringbuffer_obj(dev
, ringbuf
);
444 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
449 ret
= populate_lr_context(ctx
, ctx_obj
, ring
, ringbuf
);
451 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret
);
452 intel_destroy_ringbuffer_obj(ringbuf
);
456 ctx
->engine
[ring
->id
].ringbuf
= ringbuf
;
457 ctx
->engine
[ring
->id
].state
= ctx_obj
;
463 i915_gem_object_ggtt_unpin(ctx_obj
);
464 drm_gem_object_unreference(&ctx_obj
->base
);