drm/i915/bdw: Skeleton for the new logical rings submission path
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/*
32 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
33 * These expanded contexts enable a number of new abilities, especially
34 * "Execlists" (also implemented in this file).
35 *
36 * Execlists are the new method by which, on gen8+ hardware, workloads are
37 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
38 */
39
40#include <drm/drmP.h>
41#include <drm/i915_drm.h>
42#include "i915_drv.h"
127f1003 43
8c857917
OM
44#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
45#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
46
47#define GEN8_LR_CONTEXT_ALIGN 4096
48
8670d6f9
OM
49#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
51
52#define CTX_LRI_HEADER_0 0x01
53#define CTX_CONTEXT_CONTROL 0x02
54#define CTX_RING_HEAD 0x04
55#define CTX_RING_TAIL 0x06
56#define CTX_RING_BUFFER_START 0x08
57#define CTX_RING_BUFFER_CONTROL 0x0a
58#define CTX_BB_HEAD_U 0x0c
59#define CTX_BB_HEAD_L 0x0e
60#define CTX_BB_STATE 0x10
61#define CTX_SECOND_BB_HEAD_U 0x12
62#define CTX_SECOND_BB_HEAD_L 0x14
63#define CTX_SECOND_BB_STATE 0x16
64#define CTX_BB_PER_CTX_PTR 0x18
65#define CTX_RCS_INDIRECT_CTX 0x1a
66#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
67#define CTX_LRI_HEADER_1 0x21
68#define CTX_CTX_TIMESTAMP 0x22
69#define CTX_PDP3_UDW 0x24
70#define CTX_PDP3_LDW 0x26
71#define CTX_PDP2_UDW 0x28
72#define CTX_PDP2_LDW 0x2a
73#define CTX_PDP1_UDW 0x2c
74#define CTX_PDP1_LDW 0x2e
75#define CTX_PDP0_UDW 0x30
76#define CTX_PDP0_LDW 0x32
77#define CTX_LRI_HEADER_2 0x41
78#define CTX_R_PWR_CLK_STATE 0x42
79#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
80
127f1003
OM
81int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
82{
bd84b1e9
DV
83 WARN_ON(i915.enable_ppgtt == -1);
84
127f1003
OM
85 if (enable_execlists == 0)
86 return 0;
87
88 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev))
89 return 1;
90
91 return 0;
92}
ede7d42b 93
454afebd
OM
94int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
95 struct intel_engine_cs *ring,
96 struct intel_context *ctx,
97 struct drm_i915_gem_execbuffer2 *args,
98 struct list_head *vmas,
99 struct drm_i915_gem_object *batch_obj,
100 u64 exec_start, u32 flags)
101{
102 /* TODO */
103 return 0;
104}
105
106void intel_logical_ring_stop(struct intel_engine_cs *ring)
107{
108 /* TODO */
109}
110
111void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
112{
113 /* TODO */
114}
115
116static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
117{
118 /* TODO */
119 return 0;
120}
121
122static int logical_render_ring_init(struct drm_device *dev)
123{
124 struct drm_i915_private *dev_priv = dev->dev_private;
125 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
126
127 ring->name = "render ring";
128 ring->id = RCS;
129 ring->mmio_base = RENDER_RING_BASE;
130 ring->irq_enable_mask =
131 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
132
133 return logical_ring_init(dev, ring);
134}
135
136static int logical_bsd_ring_init(struct drm_device *dev)
137{
138 struct drm_i915_private *dev_priv = dev->dev_private;
139 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
140
141 ring->name = "bsd ring";
142 ring->id = VCS;
143 ring->mmio_base = GEN6_BSD_RING_BASE;
144 ring->irq_enable_mask =
145 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
146
147 return logical_ring_init(dev, ring);
148}
149
150static int logical_bsd2_ring_init(struct drm_device *dev)
151{
152 struct drm_i915_private *dev_priv = dev->dev_private;
153 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
154
155 ring->name = "bds2 ring";
156 ring->id = VCS2;
157 ring->mmio_base = GEN8_BSD2_RING_BASE;
158 ring->irq_enable_mask =
159 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
160
161 return logical_ring_init(dev, ring);
162}
163
164static int logical_blt_ring_init(struct drm_device *dev)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
168
169 ring->name = "blitter ring";
170 ring->id = BCS;
171 ring->mmio_base = BLT_RING_BASE;
172 ring->irq_enable_mask =
173 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
174
175 return logical_ring_init(dev, ring);
176}
177
178static int logical_vebox_ring_init(struct drm_device *dev)
179{
180 struct drm_i915_private *dev_priv = dev->dev_private;
181 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
182
183 ring->name = "video enhancement ring";
184 ring->id = VECS;
185 ring->mmio_base = VEBOX_RING_BASE;
186 ring->irq_enable_mask =
187 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
188
189 return logical_ring_init(dev, ring);
190}
191
192int intel_logical_rings_init(struct drm_device *dev)
193{
194 struct drm_i915_private *dev_priv = dev->dev_private;
195 int ret;
196
197 ret = logical_render_ring_init(dev);
198 if (ret)
199 return ret;
200
201 if (HAS_BSD(dev)) {
202 ret = logical_bsd_ring_init(dev);
203 if (ret)
204 goto cleanup_render_ring;
205 }
206
207 if (HAS_BLT(dev)) {
208 ret = logical_blt_ring_init(dev);
209 if (ret)
210 goto cleanup_bsd_ring;
211 }
212
213 if (HAS_VEBOX(dev)) {
214 ret = logical_vebox_ring_init(dev);
215 if (ret)
216 goto cleanup_blt_ring;
217 }
218
219 if (HAS_BSD2(dev)) {
220 ret = logical_bsd2_ring_init(dev);
221 if (ret)
222 goto cleanup_vebox_ring;
223 }
224
225 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
226 if (ret)
227 goto cleanup_bsd2_ring;
228
229 return 0;
230
231cleanup_bsd2_ring:
232 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
233cleanup_vebox_ring:
234 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
235cleanup_blt_ring:
236 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
237cleanup_bsd_ring:
238 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
239cleanup_render_ring:
240 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
241
242 return ret;
243}
244
8670d6f9
OM
245static int
246populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
247 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
248{
249 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
250 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
251 struct page *page;
252 uint32_t *reg_state;
253 int ret;
254
255 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
256 if (ret) {
257 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
258 return ret;
259 }
260
261 ret = i915_gem_object_get_pages(ctx_obj);
262 if (ret) {
263 DRM_DEBUG_DRIVER("Could not get object pages\n");
264 return ret;
265 }
266
267 i915_gem_object_pin_pages(ctx_obj);
268
269 /* The second page of the context object contains some fields which must
270 * be set up prior to the first execution. */
271 page = i915_gem_object_get_page(ctx_obj, 1);
272 reg_state = kmap_atomic(page);
273
274 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
275 * commands followed by (reg, value) pairs. The values we are setting here are
276 * only for the first context restore: on a subsequent save, the GPU will
277 * recreate this batchbuffer with new values (including all the missing
278 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
279 if (ring->id == RCS)
280 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
281 else
282 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
283 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
284 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
285 reg_state[CTX_CONTEXT_CONTROL+1] =
286 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
287 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
288 reg_state[CTX_RING_HEAD+1] = 0;
289 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
290 reg_state[CTX_RING_TAIL+1] = 0;
291 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
292 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
293 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
294 reg_state[CTX_RING_BUFFER_CONTROL+1] =
295 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
296 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
297 reg_state[CTX_BB_HEAD_U+1] = 0;
298 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
299 reg_state[CTX_BB_HEAD_L+1] = 0;
300 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
301 reg_state[CTX_BB_STATE+1] = (1<<5);
302 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
303 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
304 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
305 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
306 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
307 reg_state[CTX_SECOND_BB_STATE+1] = 0;
308 if (ring->id == RCS) {
309 /* TODO: according to BSpec, the register state context
310 * for CHV does not have these. OTOH, these registers do
311 * exist in CHV. I'm waiting for a clarification */
312 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
313 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
314 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
315 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
316 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
317 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
318 }
319 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
320 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
321 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
322 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
323 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
324 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
325 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
326 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
327 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
328 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
329 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
330 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
331 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
332 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
333 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
334 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
335 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
336 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
337 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
338 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
339 if (ring->id == RCS) {
340 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
341 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
342 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
343 }
344
345 kunmap_atomic(reg_state);
346
347 ctx_obj->dirty = 1;
348 set_page_dirty(page);
349 i915_gem_object_unpin_pages(ctx_obj);
350
351 return 0;
352}
353
ede7d42b
OM
354void intel_lr_context_free(struct intel_context *ctx)
355{
8c857917
OM
356 int i;
357
358 for (i = 0; i < I915_NUM_RINGS; i++) {
359 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
84c2377f
OM
360 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
361
8c857917 362 if (ctx_obj) {
84c2377f
OM
363 intel_destroy_ringbuffer_obj(ringbuf);
364 kfree(ringbuf);
8c857917
OM
365 i915_gem_object_ggtt_unpin(ctx_obj);
366 drm_gem_object_unreference(&ctx_obj->base);
367 }
368 }
369}
370
371static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
372{
373 int ret = 0;
374
375 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
376
377 switch (ring->id) {
378 case RCS:
379 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
380 break;
381 case VCS:
382 case BCS:
383 case VECS:
384 case VCS2:
385 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
386 break;
387 }
388
389 return ret;
ede7d42b
OM
390}
391
392int intel_lr_context_deferred_create(struct intel_context *ctx,
393 struct intel_engine_cs *ring)
394{
8c857917
OM
395 struct drm_device *dev = ring->dev;
396 struct drm_i915_gem_object *ctx_obj;
397 uint32_t context_size;
84c2377f 398 struct intel_ringbuffer *ringbuf;
8c857917
OM
399 int ret;
400
ede7d42b
OM
401 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
402
8c857917
OM
403 context_size = round_up(get_lr_context_size(ring), 4096);
404
405 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
406 if (IS_ERR(ctx_obj)) {
407 ret = PTR_ERR(ctx_obj);
408 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
409 return ret;
410 }
411
412 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
413 if (ret) {
414 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
415 drm_gem_object_unreference(&ctx_obj->base);
416 return ret;
417 }
418
84c2377f
OM
419 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
420 if (!ringbuf) {
421 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
422 ring->name);
423 i915_gem_object_ggtt_unpin(ctx_obj);
424 drm_gem_object_unreference(&ctx_obj->base);
425 ret = -ENOMEM;
426 return ret;
427 }
428
0c7dd53b 429 ringbuf->ring = ring;
84c2377f
OM
430 ringbuf->size = 32 * PAGE_SIZE;
431 ringbuf->effective_size = ringbuf->size;
432 ringbuf->head = 0;
433 ringbuf->tail = 0;
434 ringbuf->space = ringbuf->size;
435 ringbuf->last_retired_head = -1;
436
437 /* TODO: For now we put this in the mappable region so that we can reuse
438 * the existing ringbuffer code which ioremaps it. When we start
439 * creating many contexts, this will no longer work and we must switch
440 * to a kmapish interface.
441 */
442 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
443 if (ret) {
444 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
445 ring->name, ret);
8670d6f9
OM
446 goto error;
447 }
448
449 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
450 if (ret) {
451 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
452 intel_destroy_ringbuffer_obj(ringbuf);
453 goto error;
84c2377f
OM
454 }
455
456 ctx->engine[ring->id].ringbuf = ringbuf;
8c857917 457 ctx->engine[ring->id].state = ctx_obj;
ede7d42b
OM
458
459 return 0;
8670d6f9
OM
460
461error:
462 kfree(ringbuf);
463 i915_gem_object_ggtt_unpin(ctx_obj);
464 drm_gem_object_unreference(&ctx_obj->base);
465 return ret;
ede7d42b 466}
This page took 0.046238 seconds and 5 git commands to generate.