2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
89 #include <drm/i915_drm.h>
92 /* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is.
96 #define GEN6_CONTEXT_ALIGN (64<<10)
97 #define GEN7_CONTEXT_ALIGN 4096
99 static size_t get_context_alignment(struct drm_device
*dev
)
102 return GEN6_CONTEXT_ALIGN
;
104 return GEN7_CONTEXT_ALIGN
;
107 static int get_context_size(struct drm_device
*dev
)
109 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
113 switch (INTEL_INFO(dev
)->gen
) {
115 reg
= I915_READ(CXT_SIZE
);
116 ret
= GEN6_CXT_TOTAL_SIZE(reg
) * 64;
119 reg
= I915_READ(GEN7_CXT_SIZE
);
121 ret
= HSW_CXT_TOTAL_SIZE
;
123 ret
= GEN7_CXT_TOTAL_SIZE(reg
) * 64;
126 ret
= GEN8_CXT_TOTAL_SIZE
;
135 void i915_gem_context_free(struct kref
*ctx_ref
)
137 struct intel_context
*ctx
= container_of(ctx_ref
,
140 if (i915
.enable_execlists
)
141 intel_lr_context_free(ctx
);
143 i915_ppgtt_put(ctx
->ppgtt
);
145 if (ctx
->legacy_hw_ctx
.rcs_state
)
146 drm_gem_object_unreference(&ctx
->legacy_hw_ctx
.rcs_state
->base
);
147 list_del(&ctx
->link
);
151 struct drm_i915_gem_object
*
152 i915_gem_alloc_context_obj(struct drm_device
*dev
, size_t size
)
154 struct drm_i915_gem_object
*obj
;
157 obj
= i915_gem_alloc_object(dev
, size
);
159 return ERR_PTR(-ENOMEM
);
162 * Try to make the context utilize L3 as well as LLC.
164 * On VLV we don't have L3 controls in the PTEs so we
165 * shouldn't touch the cache level, especially as that
166 * would make the object snooped which might have a
167 * negative performance impact.
169 if (INTEL_INFO(dev
)->gen
>= 7 && !IS_VALLEYVIEW(dev
)) {
170 ret
= i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
171 /* Failure shouldn't ever happen this early */
173 drm_gem_object_unreference(&obj
->base
);
181 static struct intel_context
*
182 __create_hw_context(struct drm_device
*dev
,
183 struct drm_i915_file_private
*file_priv
)
185 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
186 struct intel_context
*ctx
;
189 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
191 return ERR_PTR(-ENOMEM
);
193 kref_init(&ctx
->ref
);
194 list_add_tail(&ctx
->link
, &dev_priv
->context_list
);
196 if (dev_priv
->hw_context_size
) {
197 struct drm_i915_gem_object
*obj
=
198 i915_gem_alloc_context_obj(dev
, dev_priv
->hw_context_size
);
203 ctx
->legacy_hw_ctx
.rcs_state
= obj
;
206 /* Default context will never have a file_priv */
207 if (file_priv
!= NULL
) {
208 ret
= idr_alloc(&file_priv
->context_idr
, ctx
,
209 DEFAULT_CONTEXT_HANDLE
, 0, GFP_KERNEL
);
213 ret
= DEFAULT_CONTEXT_HANDLE
;
215 ctx
->file_priv
= file_priv
;
216 ctx
->user_handle
= ret
;
217 /* NB: Mark all slices as needing a remap so that when the context first
218 * loads it will restore whatever remap state already exists. If there
219 * is no remap info, it will be a NOP. */
220 ctx
->remap_slice
= (1 << NUM_L3_SLICES(dev
)) - 1;
225 i915_gem_context_unreference(ctx
);
230 * The default context needs to exist per ring that uses contexts. It stores the
231 * context state of the GPU for applications that don't utilize HW contexts, as
232 * well as an idle case.
234 static struct intel_context
*
235 i915_gem_create_context(struct drm_device
*dev
,
236 struct drm_i915_file_private
*file_priv
)
238 const bool is_global_default_ctx
= file_priv
== NULL
;
239 struct intel_context
*ctx
;
242 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
244 ctx
= __create_hw_context(dev
, file_priv
);
248 if (is_global_default_ctx
&& ctx
->legacy_hw_ctx
.rcs_state
) {
249 /* We may need to do things with the shrinker which
250 * require us to immediately switch back to the default
251 * context. This can cause a problem as pinning the
252 * default context also requires GTT space which may not
253 * be available. To avoid this we always pin the default
256 ret
= i915_gem_obj_ggtt_pin(ctx
->legacy_hw_ctx
.rcs_state
,
257 get_context_alignment(dev
), 0);
259 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret
);
264 if (USES_FULL_PPGTT(dev
)) {
265 struct i915_hw_ppgtt
*ppgtt
= i915_ppgtt_create(dev
, file_priv
);
267 if (IS_ERR_OR_NULL(ppgtt
)) {
268 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
270 ret
= PTR_ERR(ppgtt
);
280 if (is_global_default_ctx
&& ctx
->legacy_hw_ctx
.rcs_state
)
281 i915_gem_object_ggtt_unpin(ctx
->legacy_hw_ctx
.rcs_state
);
283 i915_gem_context_unreference(ctx
);
287 void i915_gem_context_reset(struct drm_device
*dev
)
289 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
292 /* Prevent the hardware from restoring the last context (which hung) on
294 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
295 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
296 struct intel_context
*dctx
= ring
->default_context
;
297 struct intel_context
*lctx
= ring
->last_context
;
299 /* Do a fake switch to the default context */
306 if (dctx
->legacy_hw_ctx
.rcs_state
&& i
== RCS
) {
307 WARN_ON(i915_gem_obj_ggtt_pin(dctx
->legacy_hw_ctx
.rcs_state
,
308 get_context_alignment(dev
), 0));
309 /* Fake a finish/inactive */
310 dctx
->legacy_hw_ctx
.rcs_state
->base
.write_domain
= 0;
311 dctx
->legacy_hw_ctx
.rcs_state
->active
= 0;
314 if (lctx
->legacy_hw_ctx
.rcs_state
&& i
== RCS
)
315 i915_gem_object_ggtt_unpin(lctx
->legacy_hw_ctx
.rcs_state
);
317 i915_gem_context_unreference(lctx
);
318 i915_gem_context_reference(dctx
);
319 ring
->last_context
= dctx
;
323 int i915_gem_context_init(struct drm_device
*dev
)
325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
326 struct intel_context
*ctx
;
329 /* Init should only be called once per module load. Eventually the
330 * restriction on the context_disabled check can be loosened. */
331 if (WARN_ON(dev_priv
->ring
[RCS
].default_context
))
334 if (i915
.enable_execlists
) {
335 /* NB: intentionally left blank. We will allocate our own
336 * backing objects as we need them, thank you very much */
337 dev_priv
->hw_context_size
= 0;
338 } else if (HAS_HW_CONTEXTS(dev
)) {
339 dev_priv
->hw_context_size
= round_up(get_context_size(dev
), 4096);
340 if (dev_priv
->hw_context_size
> (1<<20)) {
341 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
342 dev_priv
->hw_context_size
);
343 dev_priv
->hw_context_size
= 0;
347 ctx
= i915_gem_create_context(dev
, NULL
);
349 DRM_ERROR("Failed to create default global context (error %ld)\n",
354 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
355 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
357 /* NB: RCS will hold a ref for all rings */
358 ring
->default_context
= ctx
;
361 DRM_DEBUG_DRIVER("%s context support initialized\n",
362 i915
.enable_execlists
? "LR" :
363 dev_priv
->hw_context_size
? "HW" : "fake");
367 void i915_gem_context_fini(struct drm_device
*dev
)
369 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
370 struct intel_context
*dctx
= dev_priv
->ring
[RCS
].default_context
;
373 if (dctx
->legacy_hw_ctx
.rcs_state
) {
374 /* The only known way to stop the gpu from accessing the hw context is
375 * to reset it. Do this as the very last operation to avoid confusing
376 * other code, leading to spurious errors. */
377 intel_gpu_reset(dev
);
379 /* When default context is created and switched to, base object refcount
380 * will be 2 (+1 from object creation and +1 from do_switch()).
381 * i915_gem_context_fini() will be called after gpu_idle() has switched
382 * to default context. So we need to unreference the base object once
383 * to offset the do_switch part, so that i915_gem_context_unreference()
384 * can then free the base object correctly. */
385 WARN_ON(!dev_priv
->ring
[RCS
].last_context
);
386 if (dev_priv
->ring
[RCS
].last_context
== dctx
) {
387 /* Fake switch to NULL context */
388 WARN_ON(dctx
->legacy_hw_ctx
.rcs_state
->active
);
389 i915_gem_object_ggtt_unpin(dctx
->legacy_hw_ctx
.rcs_state
);
390 i915_gem_context_unreference(dctx
);
391 dev_priv
->ring
[RCS
].last_context
= NULL
;
394 i915_gem_object_ggtt_unpin(dctx
->legacy_hw_ctx
.rcs_state
);
397 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
398 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
400 if (ring
->last_context
)
401 i915_gem_context_unreference(ring
->last_context
);
403 ring
->default_context
= NULL
;
404 ring
->last_context
= NULL
;
407 i915_gem_context_unreference(dctx
);
410 int i915_gem_context_enable(struct drm_i915_private
*dev_priv
)
412 struct intel_engine_cs
*ring
;
415 /* FIXME: We should make this work, even in reset */
416 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
419 BUG_ON(!dev_priv
->ring
[RCS
].default_context
);
421 for_each_ring(ring
, dev_priv
, i
) {
422 ret
= i915_switch_context(ring
, ring
->default_context
);
430 static int context_idr_cleanup(int id
, void *p
, void *data
)
432 struct intel_context
*ctx
= p
;
434 i915_gem_context_unreference(ctx
);
438 int i915_gem_context_open(struct drm_device
*dev
, struct drm_file
*file
)
440 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
441 struct intel_context
*ctx
;
443 idr_init(&file_priv
->context_idr
);
445 mutex_lock(&dev
->struct_mutex
);
446 ctx
= i915_gem_create_context(dev
, file_priv
);
447 mutex_unlock(&dev
->struct_mutex
);
450 idr_destroy(&file_priv
->context_idr
);
457 void i915_gem_context_close(struct drm_device
*dev
, struct drm_file
*file
)
459 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
461 idr_for_each(&file_priv
->context_idr
, context_idr_cleanup
, NULL
);
462 idr_destroy(&file_priv
->context_idr
);
465 struct intel_context
*
466 i915_gem_context_get(struct drm_i915_file_private
*file_priv
, u32 id
)
468 struct intel_context
*ctx
;
470 ctx
= (struct intel_context
*)idr_find(&file_priv
->context_idr
, id
);
472 return ERR_PTR(-ENOENT
);
478 mi_set_context(struct intel_engine_cs
*ring
,
479 struct intel_context
*new_context
,
484 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
485 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
486 * explicitly, so we rely on the value at ring init, stored in
487 * itlb_before_ctx_switch.
489 if (IS_GEN6(ring
->dev
)) {
490 ret
= ring
->flush(ring
, I915_GEM_GPU_DOMAINS
, 0);
495 ret
= intel_ring_begin(ring
, 6);
499 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
500 if (INTEL_INFO(ring
->dev
)->gen
>= 7)
501 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_DISABLE
);
503 intel_ring_emit(ring
, MI_NOOP
);
505 intel_ring_emit(ring
, MI_NOOP
);
506 intel_ring_emit(ring
, MI_SET_CONTEXT
);
507 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(new_context
->legacy_hw_ctx
.rcs_state
) |
509 MI_SAVE_EXT_STATE_EN
|
510 MI_RESTORE_EXT_STATE_EN
|
513 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
514 * WaMiSetContext_Hang:snb,ivb,vlv
516 intel_ring_emit(ring
, MI_NOOP
);
518 if (INTEL_INFO(ring
->dev
)->gen
>= 7)
519 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_ENABLE
);
521 intel_ring_emit(ring
, MI_NOOP
);
523 intel_ring_advance(ring
);
528 static int do_switch(struct intel_engine_cs
*ring
,
529 struct intel_context
*to
)
531 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
532 struct intel_context
*from
= ring
->last_context
;
534 bool uninitialized
= false;
537 if (from
!= NULL
&& ring
== &dev_priv
->ring
[RCS
]) {
538 BUG_ON(from
->legacy_hw_ctx
.rcs_state
== NULL
);
539 BUG_ON(!i915_gem_obj_is_pinned(from
->legacy_hw_ctx
.rcs_state
));
542 if (from
== to
&& !to
->remap_slice
)
545 /* Trying to pin first makes error handling easier. */
546 if (ring
== &dev_priv
->ring
[RCS
]) {
547 ret
= i915_gem_obj_ggtt_pin(to
->legacy_hw_ctx
.rcs_state
,
548 get_context_alignment(ring
->dev
), 0);
554 * Pin can switch back to the default context if we end up calling into
555 * evict_everything - as a last ditch gtt defrag effort that also
556 * switches to the default context. Hence we need to reload from here.
558 from
= ring
->last_context
;
561 ret
= to
->ppgtt
->switch_mm(to
->ppgtt
, ring
, false);
566 if (ring
!= &dev_priv
->ring
[RCS
]) {
568 i915_gem_context_unreference(from
);
573 * Clear this page out of any CPU caches for coherent swap-in/out. Note
574 * that thanks to write = false in this call and us not setting any gpu
575 * write domains when putting a context object onto the active list
576 * (when switching away from it), this won't block.
578 * XXX: We need a real interface to do this instead of trickery.
580 ret
= i915_gem_object_set_to_gtt_domain(to
->legacy_hw_ctx
.rcs_state
, false);
584 if (!to
->legacy_hw_ctx
.rcs_state
->has_global_gtt_mapping
) {
585 struct i915_vma
*vma
= i915_gem_obj_to_vma(to
->legacy_hw_ctx
.rcs_state
,
586 &dev_priv
->gtt
.base
);
587 vma
->bind_vma(vma
, to
->legacy_hw_ctx
.rcs_state
->cache_level
, GLOBAL_BIND
);
590 if (!to
->legacy_hw_ctx
.initialized
|| i915_gem_context_is_default(to
))
591 hw_flags
|= MI_RESTORE_INHIBIT
;
593 ret
= mi_set_context(ring
, to
, hw_flags
);
597 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
598 if (!(to
->remap_slice
& (1<<i
)))
601 ret
= i915_gem_l3_remap(ring
, i
);
602 /* If it failed, try again next round */
604 DRM_DEBUG_DRIVER("L3 remapping failed\n");
606 to
->remap_slice
&= ~(1<<i
);
609 /* The backing object for the context is done after switching to the
610 * *next* context. Therefore we cannot retire the previous context until
611 * the next context has already started running. In fact, the below code
612 * is a bit suboptimal because the retiring can occur simply after the
613 * MI_SET_CONTEXT instead of when the next seqno has completed.
616 from
->legacy_hw_ctx
.rcs_state
->base
.read_domains
= I915_GEM_DOMAIN_INSTRUCTION
;
617 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from
->legacy_hw_ctx
.rcs_state
), ring
);
618 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
619 * whole damn pipeline, we don't need to explicitly mark the
620 * object dirty. The only exception is that the context must be
621 * correct in case the object gets swapped out. Ideally we'd be
622 * able to defer doing this until we know the object would be
623 * swapped, but there is no way to do that yet.
625 from
->legacy_hw_ctx
.rcs_state
->dirty
= 1;
626 BUG_ON(from
->legacy_hw_ctx
.rcs_state
->ring
!= ring
);
628 /* obj is kept alive until the next request by its active ref */
629 i915_gem_object_ggtt_unpin(from
->legacy_hw_ctx
.rcs_state
);
630 i915_gem_context_unreference(from
);
633 uninitialized
= !to
->legacy_hw_ctx
.initialized
&& from
== NULL
;
634 to
->legacy_hw_ctx
.initialized
= true;
637 i915_gem_context_reference(to
);
638 ring
->last_context
= to
;
641 ret
= i915_gem_render_state_init(ring
);
643 DRM_ERROR("init render state: %d\n", ret
);
650 i915_gem_object_ggtt_unpin(to
->legacy_hw_ctx
.rcs_state
);
655 * i915_switch_context() - perform a GPU context switch.
656 * @ring: ring for which we'll execute the context switch
657 * @to: the context to switch to
659 * The context life cycle is simple. The context refcount is incremented and
660 * decremented by 1 and create and destroy. If the context is in use by the GPU,
661 * it will have a refoucnt > 1. This allows us to destroy the context abstract
662 * object while letting the normal object tracking destroy the backing BO.
664 int i915_switch_context(struct intel_engine_cs
*ring
,
665 struct intel_context
*to
)
667 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
669 WARN_ON(!mutex_is_locked(&dev_priv
->dev
->struct_mutex
));
671 if (to
->legacy_hw_ctx
.rcs_state
== NULL
) { /* We have the fake context */
672 if (to
!= ring
->last_context
) {
673 i915_gem_context_reference(to
);
674 if (ring
->last_context
)
675 i915_gem_context_unreference(ring
->last_context
);
676 ring
->last_context
= to
;
681 return do_switch(ring
, to
);
684 static bool contexts_enabled(struct drm_device
*dev
)
686 return i915
.enable_execlists
|| to_i915(dev
)->hw_context_size
;
689 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
690 struct drm_file
*file
)
692 struct drm_i915_gem_context_create
*args
= data
;
693 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
694 struct intel_context
*ctx
;
697 if (!contexts_enabled(dev
))
700 ret
= i915_mutex_lock_interruptible(dev
);
704 ctx
= i915_gem_create_context(dev
, file_priv
);
705 mutex_unlock(&dev
->struct_mutex
);
709 args
->ctx_id
= ctx
->user_handle
;
710 DRM_DEBUG_DRIVER("HW context %d created\n", args
->ctx_id
);
715 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
716 struct drm_file
*file
)
718 struct drm_i915_gem_context_destroy
*args
= data
;
719 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
720 struct intel_context
*ctx
;
723 if (args
->ctx_id
== DEFAULT_CONTEXT_HANDLE
)
726 ret
= i915_mutex_lock_interruptible(dev
);
730 ctx
= i915_gem_context_get(file_priv
, args
->ctx_id
);
732 mutex_unlock(&dev
->struct_mutex
);
736 idr_remove(&ctx
->file_priv
->context_idr
, ctx
->user_handle
);
737 i915_gem_context_unreference(ctx
);
738 mutex_unlock(&dev
->struct_mutex
);
740 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args
->ctx_id
);