2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
89 #include <drm/i915_drm.h>
92 /* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is.
96 #define GEN6_CONTEXT_ALIGN (64<<10)
97 #define GEN7_CONTEXT_ALIGN 4096
99 static int do_switch(struct intel_ring_buffer
*ring
,
100 struct i915_hw_context
*to
);
102 static size_t get_context_alignment(struct drm_device
*dev
)
105 return GEN6_CONTEXT_ALIGN
;
107 return GEN7_CONTEXT_ALIGN
;
110 static int get_context_size(struct drm_device
*dev
)
112 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
116 switch (INTEL_INFO(dev
)->gen
) {
118 reg
= I915_READ(CXT_SIZE
);
119 ret
= GEN6_CXT_TOTAL_SIZE(reg
) * 64;
122 reg
= I915_READ(GEN7_CXT_SIZE
);
124 ret
= HSW_CXT_TOTAL_SIZE
;
126 ret
= GEN7_CXT_TOTAL_SIZE(reg
) * 64;
129 ret
= GEN8_CXT_TOTAL_SIZE
;
138 void i915_gem_context_free(struct kref
*ctx_ref
)
140 struct i915_hw_context
*ctx
= container_of(ctx_ref
,
142 struct i915_hw_ppgtt
*ppgtt
= NULL
;
144 /* We refcount even the aliasing PPGTT to keep the code symmetric */
145 if (USES_PPGTT(ctx
->obj
->base
.dev
))
146 ppgtt
= ctx_to_ppgtt(ctx
);
148 /* XXX: Free up the object before tearing down the address space, in
149 * case we're bound in the PPGTT */
150 drm_gem_object_unreference(&ctx
->obj
->base
);
153 kref_put(&ppgtt
->ref
, ppgtt_release
);
154 list_del(&ctx
->link
);
158 static struct i915_hw_ppgtt
*
159 create_vm_for_ctx(struct drm_device
*dev
, struct i915_hw_context
*ctx
)
161 struct i915_hw_ppgtt
*ppgtt
;
164 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
166 return ERR_PTR(-ENOMEM
);
168 ret
= i915_gem_init_ppgtt(dev
, ppgtt
);
177 static struct i915_hw_context
*
178 __create_hw_context(struct drm_device
*dev
,
179 struct drm_i915_file_private
*file_priv
)
181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
182 struct i915_hw_context
*ctx
;
185 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
187 return ERR_PTR(-ENOMEM
);
189 kref_init(&ctx
->ref
);
190 ctx
->obj
= i915_gem_alloc_object(dev
, dev_priv
->hw_context_size
);
191 INIT_LIST_HEAD(&ctx
->link
);
192 if (ctx
->obj
== NULL
) {
194 DRM_DEBUG_DRIVER("Context object allocated failed\n");
195 return ERR_PTR(-ENOMEM
);
198 if (INTEL_INFO(dev
)->gen
>= 7) {
199 ret
= i915_gem_object_set_cache_level(ctx
->obj
,
201 /* Failure shouldn't ever happen this early */
206 list_add_tail(&ctx
->link
, &dev_priv
->context_list
);
208 /* Default context will never have a file_priv */
209 if (file_priv
== NULL
)
212 ret
= idr_alloc(&file_priv
->context_idr
, ctx
, DEFAULT_CONTEXT_ID
, 0,
217 ctx
->file_priv
= file_priv
;
219 /* NB: Mark all slices as needing a remap so that when the context first
220 * loads it will restore whatever remap state already exists. If there
221 * is no remap info, it will be a NOP. */
222 ctx
->remap_slice
= (1 << NUM_L3_SLICES(dev
)) - 1;
227 i915_gem_context_unreference(ctx
);
232 * The default context needs to exist per ring that uses contexts. It stores the
233 * context state of the GPU for applications that don't utilize HW contexts, as
234 * well as an idle case.
236 static struct i915_hw_context
*
237 i915_gem_create_context(struct drm_device
*dev
,
238 struct drm_i915_file_private
*file_priv
,
241 const bool is_global_default_ctx
= file_priv
== NULL
;
242 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
243 struct i915_hw_context
*ctx
;
246 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
248 ctx
= __create_hw_context(dev
, file_priv
);
252 if (is_global_default_ctx
) {
253 /* We may need to do things with the shrinker which
254 * require us to immediately switch back to the default
255 * context. This can cause a problem as pinning the
256 * default context also requires GTT space which may not
257 * be available. To avoid this we always pin the default
260 ret
= i915_gem_obj_ggtt_pin(ctx
->obj
,
261 get_context_alignment(dev
),
264 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret
);
270 struct i915_hw_ppgtt
*ppgtt
= create_vm_for_ctx(dev
, ctx
);
272 if (IS_ERR_OR_NULL(ppgtt
)) {
273 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
275 ret
= PTR_ERR(ppgtt
);
278 ctx
->vm
= &ppgtt
->base
;
280 /* This case is reserved for the global default context and
281 * should only happen once. */
282 if (is_global_default_ctx
) {
283 if (WARN_ON(dev_priv
->mm
.aliasing_ppgtt
)) {
288 dev_priv
->mm
.aliasing_ppgtt
= ppgtt
;
290 } else if (USES_PPGTT(dev
)) {
291 /* For platforms which only have aliasing PPGTT, we fake the
292 * address space and refcounting. */
293 ctx
->vm
= &dev_priv
->mm
.aliasing_ppgtt
->base
;
294 kref_get(&dev_priv
->mm
.aliasing_ppgtt
->ref
);
296 ctx
->vm
= &dev_priv
->gtt
.base
;
301 if (is_global_default_ctx
)
302 i915_gem_object_ggtt_unpin(ctx
->obj
);
304 i915_gem_context_unreference(ctx
);
308 void i915_gem_context_reset(struct drm_device
*dev
)
310 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
311 struct intel_ring_buffer
*ring
;
314 if (!HAS_HW_CONTEXTS(dev
))
317 /* Prevent the hardware from restoring the last context (which hung) on
319 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
320 struct i915_hw_context
*dctx
;
321 if (!(INTEL_INFO(dev
)->ring_mask
& (1<<i
)))
324 /* Do a fake switch to the default context */
325 ring
= &dev_priv
->ring
[i
];
326 dctx
= ring
->default_context
;
330 if (!ring
->last_context
)
333 if (ring
->last_context
== dctx
)
337 WARN_ON(i915_gem_obj_ggtt_pin(dctx
->obj
,
338 get_context_alignment(dev
),
340 /* Fake a finish/inactive */
341 dctx
->obj
->base
.write_domain
= 0;
342 dctx
->obj
->active
= 0;
345 i915_gem_context_unreference(ring
->last_context
);
346 i915_gem_context_reference(dctx
);
347 ring
->last_context
= dctx
;
351 int i915_gem_context_init(struct drm_device
*dev
)
353 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
354 struct intel_ring_buffer
*ring
;
357 if (!HAS_HW_CONTEXTS(dev
))
360 /* Init should only be called once per module load. Eventually the
361 * restriction on the context_disabled check can be loosened. */
362 if (WARN_ON(dev_priv
->ring
[RCS
].default_context
))
365 dev_priv
->hw_context_size
= round_up(get_context_size(dev
), 4096);
367 if (dev_priv
->hw_context_size
> (1<<20)) {
368 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
372 dev_priv
->ring
[RCS
].default_context
=
373 i915_gem_create_context(dev
, NULL
, USES_PPGTT(dev
));
375 if (IS_ERR_OR_NULL(dev_priv
->ring
[RCS
].default_context
)) {
376 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n",
377 PTR_ERR(dev_priv
->ring
[RCS
].default_context
));
378 return PTR_ERR(dev_priv
->ring
[RCS
].default_context
);
381 for (i
= RCS
+ 1; i
< I915_NUM_RINGS
; i
++) {
382 if (!(INTEL_INFO(dev
)->ring_mask
& (1<<i
)))
385 ring
= &dev_priv
->ring
[i
];
387 /* NB: RCS will hold a ref for all rings */
388 ring
->default_context
= dev_priv
->ring
[RCS
].default_context
;
391 DRM_DEBUG_DRIVER("HW context support initialized\n");
395 void i915_gem_context_fini(struct drm_device
*dev
)
397 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
398 struct i915_hw_context
*dctx
= dev_priv
->ring
[RCS
].default_context
;
401 if (!HAS_HW_CONTEXTS(dev
))
404 /* The only known way to stop the gpu from accessing the hw context is
405 * to reset it. Do this as the very last operation to avoid confusing
406 * other code, leading to spurious errors. */
407 intel_gpu_reset(dev
);
409 /* When default context is created and switched to, base object refcount
410 * will be 2 (+1 from object creation and +1 from do_switch()).
411 * i915_gem_context_fini() will be called after gpu_idle() has switched
412 * to default context. So we need to unreference the base object once
413 * to offset the do_switch part, so that i915_gem_context_unreference()
414 * can then free the base object correctly. */
415 WARN_ON(!dev_priv
->ring
[RCS
].last_context
);
416 if (dev_priv
->ring
[RCS
].last_context
== dctx
) {
417 /* Fake switch to NULL context */
418 WARN_ON(dctx
->obj
->active
);
419 i915_gem_object_ggtt_unpin(dctx
->obj
);
420 i915_gem_context_unreference(dctx
);
421 dev_priv
->ring
[RCS
].last_context
= NULL
;
424 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
425 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[i
];
426 if (!(INTEL_INFO(dev
)->ring_mask
& (1<<i
)))
429 if (ring
->last_context
)
430 i915_gem_context_unreference(ring
->last_context
);
432 ring
->default_context
= NULL
;
433 ring
->last_context
= NULL
;
436 i915_gem_object_ggtt_unpin(dctx
->obj
);
437 i915_gem_context_unreference(dctx
);
438 dev_priv
->mm
.aliasing_ppgtt
= NULL
;
441 int i915_gem_context_enable(struct drm_i915_private
*dev_priv
)
443 struct intel_ring_buffer
*ring
;
446 if (!HAS_HW_CONTEXTS(dev_priv
->dev
))
449 /* This is the only place the aliasing PPGTT gets enabled, which means
450 * it has to happen before we bail on reset */
451 if (dev_priv
->mm
.aliasing_ppgtt
) {
452 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
453 ppgtt
->enable(ppgtt
);
456 /* FIXME: We should make this work, even in reset */
457 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
460 BUG_ON(!dev_priv
->ring
[RCS
].default_context
);
462 for_each_ring(ring
, dev_priv
, i
) {
463 ret
= do_switch(ring
, ring
->default_context
);
471 static int context_idr_cleanup(int id
, void *p
, void *data
)
473 struct i915_hw_context
*ctx
= p
;
475 /* Ignore the default context because close will handle it */
476 if (i915_gem_context_is_default(ctx
))
479 i915_gem_context_unreference(ctx
);
483 int i915_gem_context_open(struct drm_device
*dev
, struct drm_file
*file
)
485 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
486 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
488 if (!HAS_HW_CONTEXTS(dev
)) {
489 /* Cheat for hang stats */
490 file_priv
->private_default_ctx
=
491 kzalloc(sizeof(struct i915_hw_context
), GFP_KERNEL
);
493 if (file_priv
->private_default_ctx
== NULL
)
496 file_priv
->private_default_ctx
->vm
= &dev_priv
->gtt
.base
;
500 idr_init(&file_priv
->context_idr
);
502 mutex_lock(&dev
->struct_mutex
);
503 file_priv
->private_default_ctx
=
504 i915_gem_create_context(dev
, file_priv
, USES_FULL_PPGTT(dev
));
505 mutex_unlock(&dev
->struct_mutex
);
507 if (IS_ERR(file_priv
->private_default_ctx
)) {
508 idr_destroy(&file_priv
->context_idr
);
509 return PTR_ERR(file_priv
->private_default_ctx
);
515 void i915_gem_context_close(struct drm_device
*dev
, struct drm_file
*file
)
517 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
519 if (!HAS_HW_CONTEXTS(dev
)) {
520 kfree(file_priv
->private_default_ctx
);
524 idr_for_each(&file_priv
->context_idr
, context_idr_cleanup
, NULL
);
525 i915_gem_context_unreference(file_priv
->private_default_ctx
);
526 idr_destroy(&file_priv
->context_idr
);
529 struct i915_hw_context
*
530 i915_gem_context_get(struct drm_i915_file_private
*file_priv
, u32 id
)
532 struct i915_hw_context
*ctx
;
534 if (!HAS_HW_CONTEXTS(file_priv
->dev_priv
->dev
))
535 return file_priv
->private_default_ctx
;
537 ctx
= (struct i915_hw_context
*)idr_find(&file_priv
->context_idr
, id
);
539 return ERR_PTR(-ENOENT
);
545 mi_set_context(struct intel_ring_buffer
*ring
,
546 struct i915_hw_context
*new_context
,
551 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
552 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
553 * explicitly, so we rely on the value at ring init, stored in
554 * itlb_before_ctx_switch.
556 if (IS_GEN6(ring
->dev
) && ring
->itlb_before_ctx_switch
) {
557 ret
= ring
->flush(ring
, I915_GEM_GPU_DOMAINS
, 0);
562 ret
= intel_ring_begin(ring
, 6);
566 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
567 if (IS_GEN7(ring
->dev
))
568 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_DISABLE
);
570 intel_ring_emit(ring
, MI_NOOP
);
572 intel_ring_emit(ring
, MI_NOOP
);
573 intel_ring_emit(ring
, MI_SET_CONTEXT
);
574 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(new_context
->obj
) |
576 MI_SAVE_EXT_STATE_EN
|
577 MI_RESTORE_EXT_STATE_EN
|
580 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
581 * WaMiSetContext_Hang:snb,ivb,vlv
583 intel_ring_emit(ring
, MI_NOOP
);
585 if (IS_GEN7(ring
->dev
))
586 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_ENABLE
);
588 intel_ring_emit(ring
, MI_NOOP
);
590 intel_ring_advance(ring
);
595 static int do_switch(struct intel_ring_buffer
*ring
,
596 struct i915_hw_context
*to
)
598 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
599 struct i915_hw_context
*from
= ring
->last_context
;
600 struct i915_hw_ppgtt
*ppgtt
= ctx_to_ppgtt(to
);
604 if (from
!= NULL
&& ring
== &dev_priv
->ring
[RCS
]) {
605 BUG_ON(from
->obj
== NULL
);
606 BUG_ON(!i915_gem_obj_is_pinned(from
->obj
));
609 if (from
== to
&& from
->last_ring
== ring
&& !to
->remap_slice
)
612 /* Trying to pin first makes error handling easier. */
613 if (ring
== &dev_priv
->ring
[RCS
]) {
614 ret
= i915_gem_obj_ggtt_pin(to
->obj
,
615 get_context_alignment(ring
->dev
),
622 * Pin can switch back to the default context if we end up calling into
623 * evict_everything - as a last ditch gtt defrag effort that also
624 * switches to the default context. Hence we need to reload from here.
626 from
= ring
->last_context
;
628 if (USES_FULL_PPGTT(ring
->dev
)) {
629 ret
= ppgtt
->switch_mm(ppgtt
, ring
, false);
634 if (ring
!= &dev_priv
->ring
[RCS
]) {
636 i915_gem_context_unreference(from
);
641 * Clear this page out of any CPU caches for coherent swap-in/out. Note
642 * that thanks to write = false in this call and us not setting any gpu
643 * write domains when putting a context object onto the active list
644 * (when switching away from it), this won't block.
646 * XXX: We need a real interface to do this instead of trickery.
648 ret
= i915_gem_object_set_to_gtt_domain(to
->obj
, false);
652 if (!to
->obj
->has_global_gtt_mapping
) {
653 struct i915_vma
*vma
= i915_gem_obj_to_vma(to
->obj
,
654 &dev_priv
->gtt
.base
);
655 vma
->bind_vma(vma
, to
->obj
->cache_level
, GLOBAL_BIND
);
658 if (!to
->is_initialized
|| i915_gem_context_is_default(to
))
659 hw_flags
|= MI_RESTORE_INHIBIT
;
661 ret
= mi_set_context(ring
, to
, hw_flags
);
665 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
666 if (!(to
->remap_slice
& (1<<i
)))
669 ret
= i915_gem_l3_remap(ring
, i
);
670 /* If it failed, try again next round */
672 DRM_DEBUG_DRIVER("L3 remapping failed\n");
674 to
->remap_slice
&= ~(1<<i
);
677 /* The backing object for the context is done after switching to the
678 * *next* context. Therefore we cannot retire the previous context until
679 * the next context has already started running. In fact, the below code
680 * is a bit suboptimal because the retiring can occur simply after the
681 * MI_SET_CONTEXT instead of when the next seqno has completed.
684 from
->obj
->base
.read_domains
= I915_GEM_DOMAIN_INSTRUCTION
;
685 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from
->obj
), ring
);
686 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
687 * whole damn pipeline, we don't need to explicitly mark the
688 * object dirty. The only exception is that the context must be
689 * correct in case the object gets swapped out. Ideally we'd be
690 * able to defer doing this until we know the object would be
691 * swapped, but there is no way to do that yet.
693 from
->obj
->dirty
= 1;
694 BUG_ON(from
->obj
->ring
!= ring
);
696 /* obj is kept alive until the next request by its active ref */
697 i915_gem_object_ggtt_unpin(from
->obj
);
698 i915_gem_context_unreference(from
);
701 to
->is_initialized
= true;
704 i915_gem_context_reference(to
);
705 ring
->last_context
= to
;
706 to
->last_ring
= ring
;
712 i915_gem_object_ggtt_unpin(to
->obj
);
717 * i915_switch_context() - perform a GPU context switch.
718 * @ring: ring for which we'll execute the context switch
719 * @file_priv: file_priv associated with the context, may be NULL
720 * @id: context id number
722 * The context life cycle is simple. The context refcount is incremented and
723 * decremented by 1 and create and destroy. If the context is in use by the GPU,
724 * it will have a refoucnt > 1. This allows us to destroy the context abstract
725 * object while letting the normal object tracking destroy the backing BO.
727 int i915_switch_context(struct intel_ring_buffer
*ring
,
728 struct drm_file
*file
,
729 struct i915_hw_context
*to
)
731 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
733 WARN_ON(!mutex_is_locked(&dev_priv
->dev
->struct_mutex
));
735 BUG_ON(file
&& to
== NULL
);
737 /* We have the fake context, but don't supports switching. */
738 if (!HAS_HW_CONTEXTS(ring
->dev
))
741 return do_switch(ring
, to
);
744 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
745 struct drm_file
*file
)
747 struct drm_i915_gem_context_create
*args
= data
;
748 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
749 struct i915_hw_context
*ctx
;
752 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
755 if (!HAS_HW_CONTEXTS(dev
))
758 ret
= i915_mutex_lock_interruptible(dev
);
762 ctx
= i915_gem_create_context(dev
, file_priv
, USES_FULL_PPGTT(dev
));
763 mutex_unlock(&dev
->struct_mutex
);
767 args
->ctx_id
= ctx
->id
;
768 DRM_DEBUG_DRIVER("HW context %d created\n", args
->ctx_id
);
773 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
774 struct drm_file
*file
)
776 struct drm_i915_gem_context_destroy
*args
= data
;
777 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
778 struct i915_hw_context
*ctx
;
781 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
784 if (args
->ctx_id
== DEFAULT_CONTEXT_ID
)
787 ret
= i915_mutex_lock_interruptible(dev
);
791 ctx
= i915_gem_context_get(file_priv
, args
->ctx_id
);
793 mutex_unlock(&dev
->struct_mutex
);
797 idr_remove(&ctx
->file_priv
->context_idr
, ctx
->id
);
798 i915_gem_context_unreference(ctx
);
799 mutex_unlock(&dev
->struct_mutex
);
801 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args
->ctx_id
);