2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
89 #include <drm/i915_drm.h>
92 /* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is.
96 #define GEN6_CONTEXT_ALIGN (64<<10)
97 #define GEN7_CONTEXT_ALIGN 4096
99 static void do_ppgtt_cleanup(struct i915_hw_ppgtt
*ppgtt
)
101 struct drm_device
*dev
= ppgtt
->base
.dev
;
102 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
103 struct i915_address_space
*vm
= &ppgtt
->base
;
105 if (ppgtt
== dev_priv
->mm
.aliasing_ppgtt
||
106 (list_empty(&vm
->active_list
) && list_empty(&vm
->inactive_list
))) {
107 ppgtt
->base
.cleanup(&ppgtt
->base
);
112 * Make sure vmas are unbound before we take down the drm_mm
114 * FIXME: Proper refcounting should take care of this, this shouldn't be
117 if (!list_empty(&vm
->active_list
)) {
118 struct i915_vma
*vma
;
120 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
121 if (WARN_ON(list_empty(&vma
->vma_link
) ||
122 list_is_singular(&vma
->vma_link
)))
125 i915_gem_evict_vm(&ppgtt
->base
, true);
127 i915_gem_retire_requests(dev
);
128 i915_gem_evict_vm(&ppgtt
->base
, false);
131 ppgtt
->base
.cleanup(&ppgtt
->base
);
134 static void ppgtt_release(struct kref
*kref
)
136 struct i915_hw_ppgtt
*ppgtt
=
137 container_of(kref
, struct i915_hw_ppgtt
, ref
);
139 do_ppgtt_cleanup(ppgtt
);
143 static size_t get_context_alignment(struct drm_device
*dev
)
146 return GEN6_CONTEXT_ALIGN
;
148 return GEN7_CONTEXT_ALIGN
;
151 static int get_context_size(struct drm_device
*dev
)
153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
157 switch (INTEL_INFO(dev
)->gen
) {
159 reg
= I915_READ(CXT_SIZE
);
160 ret
= GEN6_CXT_TOTAL_SIZE(reg
) * 64;
163 reg
= I915_READ(GEN7_CXT_SIZE
);
165 ret
= HSW_CXT_TOTAL_SIZE
;
167 ret
= GEN7_CXT_TOTAL_SIZE(reg
) * 64;
170 ret
= GEN8_CXT_TOTAL_SIZE
;
179 void i915_gem_context_free(struct kref
*ctx_ref
)
181 struct intel_context
*ctx
= container_of(ctx_ref
,
183 struct i915_hw_ppgtt
*ppgtt
= NULL
;
185 if (i915
.enable_execlists
) {
186 ppgtt
= ctx_to_ppgtt(ctx
);
187 intel_lr_context_free(ctx
);
188 } else if (ctx
->legacy_hw_ctx
.rcs_state
) {
189 /* We refcount even the aliasing PPGTT to keep the code symmetric */
190 if (USES_PPGTT(ctx
->legacy_hw_ctx
.rcs_state
->base
.dev
))
191 ppgtt
= ctx_to_ppgtt(ctx
);
195 kref_put(&ppgtt
->ref
, ppgtt_release
);
196 if (ctx
->legacy_hw_ctx
.rcs_state
)
197 drm_gem_object_unreference(&ctx
->legacy_hw_ctx
.rcs_state
->base
);
198 list_del(&ctx
->link
);
202 static struct drm_i915_gem_object
*
203 i915_gem_alloc_context_obj(struct drm_device
*dev
, size_t size
)
205 struct drm_i915_gem_object
*obj
;
208 obj
= i915_gem_alloc_object(dev
, size
);
210 return ERR_PTR(-ENOMEM
);
213 * Try to make the context utilize L3 as well as LLC.
215 * On VLV we don't have L3 controls in the PTEs so we
216 * shouldn't touch the cache level, especially as that
217 * would make the object snooped which might have a
218 * negative performance impact.
220 if (INTEL_INFO(dev
)->gen
>= 7 && !IS_VALLEYVIEW(dev
)) {
221 ret
= i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
222 /* Failure shouldn't ever happen this early */
224 drm_gem_object_unreference(&obj
->base
);
232 static struct i915_hw_ppgtt
*
233 create_vm_for_ctx(struct drm_device
*dev
, struct intel_context
*ctx
)
235 struct i915_hw_ppgtt
*ppgtt
;
238 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
240 return ERR_PTR(-ENOMEM
);
242 ret
= i915_gem_init_ppgtt(dev
, ppgtt
);
252 static struct intel_context
*
253 __create_hw_context(struct drm_device
*dev
,
254 struct drm_i915_file_private
*file_priv
)
256 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
257 struct intel_context
*ctx
;
260 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
262 return ERR_PTR(-ENOMEM
);
264 kref_init(&ctx
->ref
);
265 list_add_tail(&ctx
->link
, &dev_priv
->context_list
);
267 if (dev_priv
->hw_context_size
) {
268 struct drm_i915_gem_object
*obj
=
269 i915_gem_alloc_context_obj(dev
, dev_priv
->hw_context_size
);
274 ctx
->legacy_hw_ctx
.rcs_state
= obj
;
277 /* Default context will never have a file_priv */
278 if (file_priv
!= NULL
) {
279 ret
= idr_alloc(&file_priv
->context_idr
, ctx
,
280 DEFAULT_CONTEXT_HANDLE
, 0, GFP_KERNEL
);
284 ret
= DEFAULT_CONTEXT_HANDLE
;
286 ctx
->file_priv
= file_priv
;
287 ctx
->user_handle
= ret
;
288 /* NB: Mark all slices as needing a remap so that when the context first
289 * loads it will restore whatever remap state already exists. If there
290 * is no remap info, it will be a NOP. */
291 ctx
->remap_slice
= (1 << NUM_L3_SLICES(dev
)) - 1;
296 i915_gem_context_unreference(ctx
);
301 * The default context needs to exist per ring that uses contexts. It stores the
302 * context state of the GPU for applications that don't utilize HW contexts, as
303 * well as an idle case.
305 static struct intel_context
*
306 i915_gem_create_context(struct drm_device
*dev
,
307 struct drm_i915_file_private
*file_priv
,
310 const bool is_global_default_ctx
= file_priv
== NULL
;
311 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
312 struct intel_context
*ctx
;
315 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
317 ctx
= __create_hw_context(dev
, file_priv
);
321 if (is_global_default_ctx
&& ctx
->legacy_hw_ctx
.rcs_state
) {
322 /* We may need to do things with the shrinker which
323 * require us to immediately switch back to the default
324 * context. This can cause a problem as pinning the
325 * default context also requires GTT space which may not
326 * be available. To avoid this we always pin the default
329 ret
= i915_gem_obj_ggtt_pin(ctx
->legacy_hw_ctx
.rcs_state
,
330 get_context_alignment(dev
), 0);
332 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret
);
338 struct i915_hw_ppgtt
*ppgtt
= create_vm_for_ctx(dev
, ctx
);
340 if (IS_ERR_OR_NULL(ppgtt
)) {
341 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
343 ret
= PTR_ERR(ppgtt
);
346 ctx
->vm
= &ppgtt
->base
;
348 /* This case is reserved for the global default context and
349 * should only happen once. */
350 if (is_global_default_ctx
) {
351 if (WARN_ON(dev_priv
->mm
.aliasing_ppgtt
)) {
356 dev_priv
->mm
.aliasing_ppgtt
= ppgtt
;
358 } else if (USES_PPGTT(dev
)) {
359 /* For platforms which only have aliasing PPGTT, we fake the
360 * address space and refcounting. */
361 ctx
->vm
= &dev_priv
->mm
.aliasing_ppgtt
->base
;
362 kref_get(&dev_priv
->mm
.aliasing_ppgtt
->ref
);
364 ctx
->vm
= &dev_priv
->gtt
.base
;
369 if (is_global_default_ctx
&& ctx
->legacy_hw_ctx
.rcs_state
)
370 i915_gem_object_ggtt_unpin(ctx
->legacy_hw_ctx
.rcs_state
);
372 i915_gem_context_unreference(ctx
);
376 void i915_gem_context_reset(struct drm_device
*dev
)
378 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
381 /* Prevent the hardware from restoring the last context (which hung) on
383 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
384 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
385 struct intel_context
*dctx
= ring
->default_context
;
386 struct intel_context
*lctx
= ring
->last_context
;
388 /* Do a fake switch to the default context */
395 if (dctx
->legacy_hw_ctx
.rcs_state
&& i
== RCS
) {
396 WARN_ON(i915_gem_obj_ggtt_pin(dctx
->legacy_hw_ctx
.rcs_state
,
397 get_context_alignment(dev
), 0));
398 /* Fake a finish/inactive */
399 dctx
->legacy_hw_ctx
.rcs_state
->base
.write_domain
= 0;
400 dctx
->legacy_hw_ctx
.rcs_state
->active
= 0;
403 if (lctx
->legacy_hw_ctx
.rcs_state
&& i
== RCS
)
404 i915_gem_object_ggtt_unpin(lctx
->legacy_hw_ctx
.rcs_state
);
406 i915_gem_context_unreference(lctx
);
407 i915_gem_context_reference(dctx
);
408 ring
->last_context
= dctx
;
412 int i915_gem_context_init(struct drm_device
*dev
)
414 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
415 struct intel_context
*ctx
;
418 /* Init should only be called once per module load. Eventually the
419 * restriction on the context_disabled check can be loosened. */
420 if (WARN_ON(dev_priv
->ring
[RCS
].default_context
))
423 if (i915
.enable_execlists
) {
424 /* NB: intentionally left blank. We will allocate our own
425 * backing objects as we need them, thank you very much */
426 dev_priv
->hw_context_size
= 0;
427 } else if (HAS_HW_CONTEXTS(dev
)) {
428 dev_priv
->hw_context_size
= round_up(get_context_size(dev
), 4096);
429 if (dev_priv
->hw_context_size
> (1<<20)) {
430 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
431 dev_priv
->hw_context_size
);
432 dev_priv
->hw_context_size
= 0;
436 ctx
= i915_gem_create_context(dev
, NULL
, USES_PPGTT(dev
));
438 DRM_ERROR("Failed to create default global context (error %ld)\n",
443 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
444 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
446 /* NB: RCS will hold a ref for all rings */
447 ring
->default_context
= ctx
;
449 /* FIXME: we really only want to do this for initialized rings */
450 if (i915
.enable_execlists
)
451 intel_lr_context_deferred_create(ctx
, ring
);
454 DRM_DEBUG_DRIVER("%s context support initialized\n",
455 i915
.enable_execlists
? "LR" :
456 dev_priv
->hw_context_size
? "HW" : "fake");
460 void i915_gem_context_fini(struct drm_device
*dev
)
462 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
463 struct intel_context
*dctx
= dev_priv
->ring
[RCS
].default_context
;
466 if (dctx
->legacy_hw_ctx
.rcs_state
) {
467 /* The only known way to stop the gpu from accessing the hw context is
468 * to reset it. Do this as the very last operation to avoid confusing
469 * other code, leading to spurious errors. */
470 intel_gpu_reset(dev
);
472 /* When default context is created and switched to, base object refcount
473 * will be 2 (+1 from object creation and +1 from do_switch()).
474 * i915_gem_context_fini() will be called after gpu_idle() has switched
475 * to default context. So we need to unreference the base object once
476 * to offset the do_switch part, so that i915_gem_context_unreference()
477 * can then free the base object correctly. */
478 WARN_ON(!dev_priv
->ring
[RCS
].last_context
);
479 if (dev_priv
->ring
[RCS
].last_context
== dctx
) {
480 /* Fake switch to NULL context */
481 WARN_ON(dctx
->legacy_hw_ctx
.rcs_state
->active
);
482 i915_gem_object_ggtt_unpin(dctx
->legacy_hw_ctx
.rcs_state
);
483 i915_gem_context_unreference(dctx
);
484 dev_priv
->ring
[RCS
].last_context
= NULL
;
487 i915_gem_object_ggtt_unpin(dctx
->legacy_hw_ctx
.rcs_state
);
490 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
491 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
493 if (ring
->last_context
)
494 i915_gem_context_unreference(ring
->last_context
);
496 ring
->default_context
= NULL
;
497 ring
->last_context
= NULL
;
500 i915_gem_context_unreference(dctx
);
503 int i915_gem_context_enable(struct drm_i915_private
*dev_priv
)
505 struct intel_engine_cs
*ring
;
508 /* This is the only place the aliasing PPGTT gets enabled, which means
509 * it has to happen before we bail on reset */
510 if (dev_priv
->mm
.aliasing_ppgtt
) {
511 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
512 ppgtt
->enable(ppgtt
);
515 /* FIXME: We should make this work, even in reset */
516 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
519 BUG_ON(!dev_priv
->ring
[RCS
].default_context
);
521 for_each_ring(ring
, dev_priv
, i
) {
522 ret
= i915_switch_context(ring
, ring
->default_context
);
530 static int context_idr_cleanup(int id
, void *p
, void *data
)
532 struct intel_context
*ctx
= p
;
534 i915_gem_context_unreference(ctx
);
538 int i915_gem_context_open(struct drm_device
*dev
, struct drm_file
*file
)
540 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
541 struct intel_context
*ctx
;
543 idr_init(&file_priv
->context_idr
);
545 mutex_lock(&dev
->struct_mutex
);
546 ctx
= i915_gem_create_context(dev
, file_priv
, USES_FULL_PPGTT(dev
));
547 mutex_unlock(&dev
->struct_mutex
);
550 idr_destroy(&file_priv
->context_idr
);
557 void i915_gem_context_close(struct drm_device
*dev
, struct drm_file
*file
)
559 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
561 idr_for_each(&file_priv
->context_idr
, context_idr_cleanup
, NULL
);
562 idr_destroy(&file_priv
->context_idr
);
565 struct intel_context
*
566 i915_gem_context_get(struct drm_i915_file_private
*file_priv
, u32 id
)
568 struct intel_context
*ctx
;
570 ctx
= (struct intel_context
*)idr_find(&file_priv
->context_idr
, id
);
572 return ERR_PTR(-ENOENT
);
578 mi_set_context(struct intel_engine_cs
*ring
,
579 struct intel_context
*new_context
,
584 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
585 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
586 * explicitly, so we rely on the value at ring init, stored in
587 * itlb_before_ctx_switch.
589 if (IS_GEN6(ring
->dev
)) {
590 ret
= ring
->flush(ring
, I915_GEM_GPU_DOMAINS
, 0);
595 ret
= intel_ring_begin(ring
, 6);
599 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
600 if (INTEL_INFO(ring
->dev
)->gen
>= 7)
601 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_DISABLE
);
603 intel_ring_emit(ring
, MI_NOOP
);
605 intel_ring_emit(ring
, MI_NOOP
);
606 intel_ring_emit(ring
, MI_SET_CONTEXT
);
607 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(new_context
->legacy_hw_ctx
.rcs_state
) |
609 MI_SAVE_EXT_STATE_EN
|
610 MI_RESTORE_EXT_STATE_EN
|
613 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
614 * WaMiSetContext_Hang:snb,ivb,vlv
616 intel_ring_emit(ring
, MI_NOOP
);
618 if (INTEL_INFO(ring
->dev
)->gen
>= 7)
619 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_ENABLE
);
621 intel_ring_emit(ring
, MI_NOOP
);
623 intel_ring_advance(ring
);
628 static int do_switch(struct intel_engine_cs
*ring
,
629 struct intel_context
*to
)
631 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
632 struct intel_context
*from
= ring
->last_context
;
633 struct i915_hw_ppgtt
*ppgtt
= ctx_to_ppgtt(to
);
635 bool uninitialized
= false;
638 if (from
!= NULL
&& ring
== &dev_priv
->ring
[RCS
]) {
639 BUG_ON(from
->legacy_hw_ctx
.rcs_state
== NULL
);
640 BUG_ON(!i915_gem_obj_is_pinned(from
->legacy_hw_ctx
.rcs_state
));
643 if (from
== to
&& !to
->remap_slice
)
646 /* Trying to pin first makes error handling easier. */
647 if (ring
== &dev_priv
->ring
[RCS
]) {
648 ret
= i915_gem_obj_ggtt_pin(to
->legacy_hw_ctx
.rcs_state
,
649 get_context_alignment(ring
->dev
), 0);
655 * Pin can switch back to the default context if we end up calling into
656 * evict_everything - as a last ditch gtt defrag effort that also
657 * switches to the default context. Hence we need to reload from here.
659 from
= ring
->last_context
;
661 if (USES_FULL_PPGTT(ring
->dev
)) {
662 ret
= ppgtt
->switch_mm(ppgtt
, ring
, false);
667 if (ring
!= &dev_priv
->ring
[RCS
]) {
669 i915_gem_context_unreference(from
);
674 * Clear this page out of any CPU caches for coherent swap-in/out. Note
675 * that thanks to write = false in this call and us not setting any gpu
676 * write domains when putting a context object onto the active list
677 * (when switching away from it), this won't block.
679 * XXX: We need a real interface to do this instead of trickery.
681 ret
= i915_gem_object_set_to_gtt_domain(to
->legacy_hw_ctx
.rcs_state
, false);
685 if (!to
->legacy_hw_ctx
.rcs_state
->has_global_gtt_mapping
) {
686 struct i915_vma
*vma
= i915_gem_obj_to_vma(to
->legacy_hw_ctx
.rcs_state
,
687 &dev_priv
->gtt
.base
);
688 vma
->bind_vma(vma
, to
->legacy_hw_ctx
.rcs_state
->cache_level
, GLOBAL_BIND
);
691 if (!to
->legacy_hw_ctx
.initialized
|| i915_gem_context_is_default(to
))
692 hw_flags
|= MI_RESTORE_INHIBIT
;
694 ret
= mi_set_context(ring
, to
, hw_flags
);
698 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
699 if (!(to
->remap_slice
& (1<<i
)))
702 ret
= i915_gem_l3_remap(ring
, i
);
703 /* If it failed, try again next round */
705 DRM_DEBUG_DRIVER("L3 remapping failed\n");
707 to
->remap_slice
&= ~(1<<i
);
710 /* The backing object for the context is done after switching to the
711 * *next* context. Therefore we cannot retire the previous context until
712 * the next context has already started running. In fact, the below code
713 * is a bit suboptimal because the retiring can occur simply after the
714 * MI_SET_CONTEXT instead of when the next seqno has completed.
717 from
->legacy_hw_ctx
.rcs_state
->base
.read_domains
= I915_GEM_DOMAIN_INSTRUCTION
;
718 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from
->legacy_hw_ctx
.rcs_state
), ring
);
719 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
720 * whole damn pipeline, we don't need to explicitly mark the
721 * object dirty. The only exception is that the context must be
722 * correct in case the object gets swapped out. Ideally we'd be
723 * able to defer doing this until we know the object would be
724 * swapped, but there is no way to do that yet.
726 from
->legacy_hw_ctx
.rcs_state
->dirty
= 1;
727 BUG_ON(from
->legacy_hw_ctx
.rcs_state
->ring
!= ring
);
729 /* obj is kept alive until the next request by its active ref */
730 i915_gem_object_ggtt_unpin(from
->legacy_hw_ctx
.rcs_state
);
731 i915_gem_context_unreference(from
);
734 uninitialized
= !to
->legacy_hw_ctx
.initialized
&& from
== NULL
;
735 to
->legacy_hw_ctx
.initialized
= true;
738 i915_gem_context_reference(to
);
739 ring
->last_context
= to
;
742 ret
= i915_gem_render_state_init(ring
);
744 DRM_ERROR("init render state: %d\n", ret
);
751 i915_gem_object_ggtt_unpin(to
->legacy_hw_ctx
.rcs_state
);
756 * i915_switch_context() - perform a GPU context switch.
757 * @ring: ring for which we'll execute the context switch
758 * @to: the context to switch to
760 * The context life cycle is simple. The context refcount is incremented and
761 * decremented by 1 and create and destroy. If the context is in use by the GPU,
762 * it will have a refoucnt > 1. This allows us to destroy the context abstract
763 * object while letting the normal object tracking destroy the backing BO.
765 int i915_switch_context(struct intel_engine_cs
*ring
,
766 struct intel_context
*to
)
768 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
770 WARN_ON(!mutex_is_locked(&dev_priv
->dev
->struct_mutex
));
772 if (to
->legacy_hw_ctx
.rcs_state
== NULL
) { /* We have the fake context */
773 if (to
!= ring
->last_context
) {
774 i915_gem_context_reference(to
);
775 if (ring
->last_context
)
776 i915_gem_context_unreference(ring
->last_context
);
777 ring
->last_context
= to
;
782 return do_switch(ring
, to
);
785 static bool hw_context_enabled(struct drm_device
*dev
)
787 return to_i915(dev
)->hw_context_size
;
790 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
791 struct drm_file
*file
)
793 struct drm_i915_gem_context_create
*args
= data
;
794 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
795 struct intel_context
*ctx
;
798 /* FIXME: allow user-created LR contexts as well */
799 if (!hw_context_enabled(dev
))
802 ret
= i915_mutex_lock_interruptible(dev
);
806 ctx
= i915_gem_create_context(dev
, file_priv
, USES_FULL_PPGTT(dev
));
807 mutex_unlock(&dev
->struct_mutex
);
811 args
->ctx_id
= ctx
->user_handle
;
812 DRM_DEBUG_DRIVER("HW context %d created\n", args
->ctx_id
);
817 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
818 struct drm_file
*file
)
820 struct drm_i915_gem_context_destroy
*args
= data
;
821 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
822 struct intel_context
*ctx
;
825 if (args
->ctx_id
== DEFAULT_CONTEXT_HANDLE
)
828 ret
= i915_mutex_lock_interruptible(dev
);
832 ctx
= i915_gem_context_get(file_priv
, args
->ctx_id
);
834 mutex_unlock(&dev
->struct_mutex
);
838 idr_remove(&ctx
->file_priv
->context_idr
, ctx
->user_handle
);
839 i915_gem_context_unreference(ctx
);
840 mutex_unlock(&dev
->struct_mutex
);
842 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args
->ctx_id
);