2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
89 #include <drm/i915_drm.h>
92 /* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping
94 * part. It should be safe to decrease this, but it's more future proof as is.
96 #define GEN6_CONTEXT_ALIGN (64<<10)
97 #define GEN7_CONTEXT_ALIGN 4096
99 static void do_ppgtt_cleanup(struct i915_hw_ppgtt
*ppgtt
)
101 struct drm_device
*dev
= ppgtt
->base
.dev
;
102 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
103 struct i915_address_space
*vm
= &ppgtt
->base
;
105 if (ppgtt
== dev_priv
->mm
.aliasing_ppgtt
||
106 (list_empty(&vm
->active_list
) && list_empty(&vm
->inactive_list
))) {
107 ppgtt
->base
.cleanup(&ppgtt
->base
);
112 * Make sure vmas are unbound before we take down the drm_mm
114 * FIXME: Proper refcounting should take care of this, this shouldn't be
117 if (!list_empty(&vm
->active_list
)) {
118 struct i915_vma
*vma
;
120 list_for_each_entry(vma
, &vm
->active_list
, mm_list
)
121 if (WARN_ON(list_empty(&vma
->vma_link
) ||
122 list_is_singular(&vma
->vma_link
)))
125 i915_gem_evict_vm(&ppgtt
->base
, true);
127 i915_gem_retire_requests(dev
);
128 i915_gem_evict_vm(&ppgtt
->base
, false);
131 ppgtt
->base
.cleanup(&ppgtt
->base
);
134 static void ppgtt_release(struct kref
*kref
)
136 struct i915_hw_ppgtt
*ppgtt
=
137 container_of(kref
, struct i915_hw_ppgtt
, ref
);
139 do_ppgtt_cleanup(ppgtt
);
143 static size_t get_context_alignment(struct drm_device
*dev
)
146 return GEN6_CONTEXT_ALIGN
;
148 return GEN7_CONTEXT_ALIGN
;
151 static int get_context_size(struct drm_device
*dev
)
153 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
157 switch (INTEL_INFO(dev
)->gen
) {
159 reg
= I915_READ(CXT_SIZE
);
160 ret
= GEN6_CXT_TOTAL_SIZE(reg
) * 64;
163 reg
= I915_READ(GEN7_CXT_SIZE
);
165 ret
= HSW_CXT_TOTAL_SIZE
;
167 ret
= GEN7_CXT_TOTAL_SIZE(reg
) * 64;
170 ret
= GEN8_CXT_TOTAL_SIZE
;
179 void i915_gem_context_free(struct kref
*ctx_ref
)
181 struct intel_context
*ctx
= container_of(ctx_ref
,
183 struct i915_hw_ppgtt
*ppgtt
= NULL
;
186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
187 if (USES_PPGTT(ctx
->obj
->base
.dev
))
188 ppgtt
= ctx_to_ppgtt(ctx
);
190 /* XXX: Free up the object before tearing down the address space, in
191 * case we're bound in the PPGTT */
192 drm_gem_object_unreference(&ctx
->obj
->base
);
196 kref_put(&ppgtt
->ref
, ppgtt_release
);
197 list_del(&ctx
->link
);
201 static struct i915_hw_ppgtt
*
202 create_vm_for_ctx(struct drm_device
*dev
, struct intel_context
*ctx
)
204 struct i915_hw_ppgtt
*ppgtt
;
207 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
209 return ERR_PTR(-ENOMEM
);
211 ret
= i915_gem_init_ppgtt(dev
, ppgtt
);
221 static struct intel_context
*
222 __create_hw_context(struct drm_device
*dev
,
223 struct drm_i915_file_private
*file_priv
)
225 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
226 struct intel_context
*ctx
;
229 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
231 return ERR_PTR(-ENOMEM
);
233 kref_init(&ctx
->ref
);
234 list_add_tail(&ctx
->link
, &dev_priv
->context_list
);
236 if (dev_priv
->hw_context_size
) {
237 ctx
->obj
= i915_gem_alloc_object(dev
, dev_priv
->hw_context_size
);
238 if (ctx
->obj
== NULL
) {
244 * Try to make the context utilize L3 as well as LLC.
246 * On VLV we don't have L3 controls in the PTEs so we
247 * shouldn't touch the cache level, especially as that
248 * would make the object snooped which might have a
249 * negative performance impact.
251 if (INTEL_INFO(dev
)->gen
>= 7 && !IS_VALLEYVIEW(dev
)) {
252 ret
= i915_gem_object_set_cache_level(ctx
->obj
,
254 /* Failure shouldn't ever happen this early */
260 /* Default context will never have a file_priv */
261 if (file_priv
!= NULL
) {
262 ret
= idr_alloc(&file_priv
->context_idr
, ctx
,
263 DEFAULT_CONTEXT_ID
, 0, GFP_KERNEL
);
267 ret
= DEFAULT_CONTEXT_ID
;
269 ctx
->file_priv
= file_priv
;
271 /* NB: Mark all slices as needing a remap so that when the context first
272 * loads it will restore whatever remap state already exists. If there
273 * is no remap info, it will be a NOP. */
274 ctx
->remap_slice
= (1 << NUM_L3_SLICES(dev
)) - 1;
279 i915_gem_context_unreference(ctx
);
284 * The default context needs to exist per ring that uses contexts. It stores the
285 * context state of the GPU for applications that don't utilize HW contexts, as
286 * well as an idle case.
288 static struct intel_context
*
289 i915_gem_create_context(struct drm_device
*dev
,
290 struct drm_i915_file_private
*file_priv
,
293 const bool is_global_default_ctx
= file_priv
== NULL
;
294 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
295 struct intel_context
*ctx
;
298 BUG_ON(!mutex_is_locked(&dev
->struct_mutex
));
300 ctx
= __create_hw_context(dev
, file_priv
);
304 if (is_global_default_ctx
&& ctx
->obj
) {
305 /* We may need to do things with the shrinker which
306 * require us to immediately switch back to the default
307 * context. This can cause a problem as pinning the
308 * default context also requires GTT space which may not
309 * be available. To avoid this we always pin the default
312 ret
= i915_gem_obj_ggtt_pin(ctx
->obj
,
313 get_context_alignment(dev
), 0);
315 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret
);
321 struct i915_hw_ppgtt
*ppgtt
= create_vm_for_ctx(dev
, ctx
);
323 if (IS_ERR_OR_NULL(ppgtt
)) {
324 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
326 ret
= PTR_ERR(ppgtt
);
329 ctx
->vm
= &ppgtt
->base
;
331 /* This case is reserved for the global default context and
332 * should only happen once. */
333 if (is_global_default_ctx
) {
334 if (WARN_ON(dev_priv
->mm
.aliasing_ppgtt
)) {
339 dev_priv
->mm
.aliasing_ppgtt
= ppgtt
;
341 } else if (USES_PPGTT(dev
)) {
342 /* For platforms which only have aliasing PPGTT, we fake the
343 * address space and refcounting. */
344 ctx
->vm
= &dev_priv
->mm
.aliasing_ppgtt
->base
;
345 kref_get(&dev_priv
->mm
.aliasing_ppgtt
->ref
);
347 ctx
->vm
= &dev_priv
->gtt
.base
;
352 if (is_global_default_ctx
&& ctx
->obj
)
353 i915_gem_object_ggtt_unpin(ctx
->obj
);
355 i915_gem_context_unreference(ctx
);
359 void i915_gem_context_reset(struct drm_device
*dev
)
361 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
364 /* Prevent the hardware from restoring the last context (which hung) on
366 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
367 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
368 struct intel_context
*dctx
= ring
->default_context
;
370 /* Do a fake switch to the default context */
371 if (ring
->last_context
== dctx
)
374 if (!ring
->last_context
)
377 if (dctx
->obj
&& i
== RCS
) {
378 WARN_ON(i915_gem_obj_ggtt_pin(dctx
->obj
,
379 get_context_alignment(dev
), 0));
380 /* Fake a finish/inactive */
381 dctx
->obj
->base
.write_domain
= 0;
382 dctx
->obj
->active
= 0;
385 i915_gem_context_unreference(ring
->last_context
);
386 i915_gem_context_reference(dctx
);
387 ring
->last_context
= dctx
;
391 int i915_gem_context_init(struct drm_device
*dev
)
393 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
394 struct intel_context
*ctx
;
397 /* Init should only be called once per module load. Eventually the
398 * restriction on the context_disabled check can be loosened. */
399 if (WARN_ON(dev_priv
->ring
[RCS
].default_context
))
402 if (HAS_HW_CONTEXTS(dev
)) {
403 dev_priv
->hw_context_size
= round_up(get_context_size(dev
), 4096);
404 if (dev_priv
->hw_context_size
> (1<<20)) {
405 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
406 dev_priv
->hw_context_size
);
407 dev_priv
->hw_context_size
= 0;
411 ctx
= i915_gem_create_context(dev
, NULL
, USES_PPGTT(dev
));
413 DRM_ERROR("Failed to create default global context (error %ld)\n",
418 /* NB: RCS will hold a ref for all rings */
419 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
420 dev_priv
->ring
[i
].default_context
= ctx
;
422 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv
->hw_context_size
? "HW" : "fake");
426 void i915_gem_context_fini(struct drm_device
*dev
)
428 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
429 struct intel_context
*dctx
= dev_priv
->ring
[RCS
].default_context
;
433 /* The only known way to stop the gpu from accessing the hw context is
434 * to reset it. Do this as the very last operation to avoid confusing
435 * other code, leading to spurious errors. */
436 intel_gpu_reset(dev
);
438 /* When default context is created and switched to, base object refcount
439 * will be 2 (+1 from object creation and +1 from do_switch()).
440 * i915_gem_context_fini() will be called after gpu_idle() has switched
441 * to default context. So we need to unreference the base object once
442 * to offset the do_switch part, so that i915_gem_context_unreference()
443 * can then free the base object correctly. */
444 WARN_ON(!dev_priv
->ring
[RCS
].last_context
);
445 if (dev_priv
->ring
[RCS
].last_context
== dctx
) {
446 /* Fake switch to NULL context */
447 WARN_ON(dctx
->obj
->active
);
448 i915_gem_object_ggtt_unpin(dctx
->obj
);
449 i915_gem_context_unreference(dctx
);
450 dev_priv
->ring
[RCS
].last_context
= NULL
;
453 i915_gem_object_ggtt_unpin(dctx
->obj
);
456 for (i
= 0; i
< I915_NUM_RINGS
; i
++) {
457 struct intel_engine_cs
*ring
= &dev_priv
->ring
[i
];
459 if (ring
->last_context
)
460 i915_gem_context_unreference(ring
->last_context
);
462 ring
->default_context
= NULL
;
463 ring
->last_context
= NULL
;
466 i915_gem_context_unreference(dctx
);
469 int i915_gem_context_enable(struct drm_i915_private
*dev_priv
)
471 struct intel_engine_cs
*ring
;
474 /* This is the only place the aliasing PPGTT gets enabled, which means
475 * it has to happen before we bail on reset */
476 if (dev_priv
->mm
.aliasing_ppgtt
) {
477 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
478 ppgtt
->enable(ppgtt
);
481 /* FIXME: We should make this work, even in reset */
482 if (i915_reset_in_progress(&dev_priv
->gpu_error
))
485 BUG_ON(!dev_priv
->ring
[RCS
].default_context
);
487 for_each_ring(ring
, dev_priv
, i
) {
488 ret
= i915_switch_context(ring
, ring
->default_context
);
496 static int context_idr_cleanup(int id
, void *p
, void *data
)
498 struct intel_context
*ctx
= p
;
500 i915_gem_context_unreference(ctx
);
504 int i915_gem_context_open(struct drm_device
*dev
, struct drm_file
*file
)
506 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
507 struct intel_context
*ctx
;
509 idr_init(&file_priv
->context_idr
);
511 mutex_lock(&dev
->struct_mutex
);
512 ctx
= i915_gem_create_context(dev
, file_priv
, USES_FULL_PPGTT(dev
));
513 mutex_unlock(&dev
->struct_mutex
);
516 idr_destroy(&file_priv
->context_idr
);
523 void i915_gem_context_close(struct drm_device
*dev
, struct drm_file
*file
)
525 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
527 idr_for_each(&file_priv
->context_idr
, context_idr_cleanup
, NULL
);
528 idr_destroy(&file_priv
->context_idr
);
531 struct intel_context
*
532 i915_gem_context_get(struct drm_i915_file_private
*file_priv
, u32 id
)
534 struct intel_context
*ctx
;
536 ctx
= (struct intel_context
*)idr_find(&file_priv
->context_idr
, id
);
538 return ERR_PTR(-ENOENT
);
544 mi_set_context(struct intel_engine_cs
*ring
,
545 struct intel_context
*new_context
,
550 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
551 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
552 * explicitly, so we rely on the value at ring init, stored in
553 * itlb_before_ctx_switch.
555 if (IS_GEN6(ring
->dev
)) {
556 ret
= ring
->flush(ring
, I915_GEM_GPU_DOMAINS
, 0);
561 ret
= intel_ring_begin(ring
, 6);
565 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
566 if (INTEL_INFO(ring
->dev
)->gen
>= 7)
567 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_DISABLE
);
569 intel_ring_emit(ring
, MI_NOOP
);
571 intel_ring_emit(ring
, MI_NOOP
);
572 intel_ring_emit(ring
, MI_SET_CONTEXT
);
573 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(new_context
->obj
) |
575 MI_SAVE_EXT_STATE_EN
|
576 MI_RESTORE_EXT_STATE_EN
|
579 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
580 * WaMiSetContext_Hang:snb,ivb,vlv
582 intel_ring_emit(ring
, MI_NOOP
);
584 if (INTEL_INFO(ring
->dev
)->gen
>= 7)
585 intel_ring_emit(ring
, MI_ARB_ON_OFF
| MI_ARB_ENABLE
);
587 intel_ring_emit(ring
, MI_NOOP
);
589 intel_ring_advance(ring
);
594 static int do_switch(struct intel_engine_cs
*ring
,
595 struct intel_context
*to
)
597 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
598 struct intel_context
*from
= ring
->last_context
;
599 struct i915_hw_ppgtt
*ppgtt
= ctx_to_ppgtt(to
);
601 bool uninitialized
= false;
604 if (from
!= NULL
&& ring
== &dev_priv
->ring
[RCS
]) {
605 BUG_ON(from
->obj
== NULL
);
606 BUG_ON(!i915_gem_obj_is_pinned(from
->obj
));
609 if (from
== to
&& !to
->remap_slice
)
612 /* Trying to pin first makes error handling easier. */
613 if (ring
== &dev_priv
->ring
[RCS
]) {
614 ret
= i915_gem_obj_ggtt_pin(to
->obj
,
615 get_context_alignment(ring
->dev
), 0);
621 * Pin can switch back to the default context if we end up calling into
622 * evict_everything - as a last ditch gtt defrag effort that also
623 * switches to the default context. Hence we need to reload from here.
625 from
= ring
->last_context
;
627 if (USES_FULL_PPGTT(ring
->dev
)) {
628 ret
= ppgtt
->switch_mm(ppgtt
, ring
, false);
633 if (ring
!= &dev_priv
->ring
[RCS
]) {
635 i915_gem_context_unreference(from
);
640 * Clear this page out of any CPU caches for coherent swap-in/out. Note
641 * that thanks to write = false in this call and us not setting any gpu
642 * write domains when putting a context object onto the active list
643 * (when switching away from it), this won't block.
645 * XXX: We need a real interface to do this instead of trickery.
647 ret
= i915_gem_object_set_to_gtt_domain(to
->obj
, false);
651 if (!to
->obj
->has_global_gtt_mapping
) {
652 struct i915_vma
*vma
= i915_gem_obj_to_vma(to
->obj
,
653 &dev_priv
->gtt
.base
);
654 vma
->bind_vma(vma
, to
->obj
->cache_level
, GLOBAL_BIND
);
657 if (!to
->is_initialized
|| i915_gem_context_is_default(to
))
658 hw_flags
|= MI_RESTORE_INHIBIT
;
660 ret
= mi_set_context(ring
, to
, hw_flags
);
664 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
665 if (!(to
->remap_slice
& (1<<i
)))
668 ret
= i915_gem_l3_remap(ring
, i
);
669 /* If it failed, try again next round */
671 DRM_DEBUG_DRIVER("L3 remapping failed\n");
673 to
->remap_slice
&= ~(1<<i
);
676 /* The backing object for the context is done after switching to the
677 * *next* context. Therefore we cannot retire the previous context until
678 * the next context has already started running. In fact, the below code
679 * is a bit suboptimal because the retiring can occur simply after the
680 * MI_SET_CONTEXT instead of when the next seqno has completed.
683 from
->obj
->base
.read_domains
= I915_GEM_DOMAIN_INSTRUCTION
;
684 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from
->obj
), ring
);
685 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
686 * whole damn pipeline, we don't need to explicitly mark the
687 * object dirty. The only exception is that the context must be
688 * correct in case the object gets swapped out. Ideally we'd be
689 * able to defer doing this until we know the object would be
690 * swapped, but there is no way to do that yet.
692 from
->obj
->dirty
= 1;
693 BUG_ON(from
->obj
->ring
!= ring
);
695 /* obj is kept alive until the next request by its active ref */
696 i915_gem_object_ggtt_unpin(from
->obj
);
697 i915_gem_context_unreference(from
);
700 uninitialized
= !to
->is_initialized
&& from
== NULL
;
701 to
->is_initialized
= true;
704 i915_gem_context_reference(to
);
705 ring
->last_context
= to
;
708 ret
= i915_gem_render_state_init(ring
);
710 DRM_ERROR("init render state: %d\n", ret
);
717 i915_gem_object_ggtt_unpin(to
->obj
);
722 * i915_switch_context() - perform a GPU context switch.
723 * @ring: ring for which we'll execute the context switch
724 * @to: the context to switch to
726 * The context life cycle is simple. The context refcount is incremented and
727 * decremented by 1 and create and destroy. If the context is in use by the GPU,
728 * it will have a refoucnt > 1. This allows us to destroy the context abstract
729 * object while letting the normal object tracking destroy the backing BO.
731 int i915_switch_context(struct intel_engine_cs
*ring
,
732 struct intel_context
*to
)
734 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
736 WARN_ON(!mutex_is_locked(&dev_priv
->dev
->struct_mutex
));
738 if (to
->obj
== NULL
) { /* We have the fake context */
739 if (to
!= ring
->last_context
) {
740 i915_gem_context_reference(to
);
741 if (ring
->last_context
)
742 i915_gem_context_unreference(ring
->last_context
);
743 ring
->last_context
= to
;
748 return do_switch(ring
, to
);
751 static bool hw_context_enabled(struct drm_device
*dev
)
753 return to_i915(dev
)->hw_context_size
;
756 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
757 struct drm_file
*file
)
759 struct drm_i915_gem_context_create
*args
= data
;
760 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
761 struct intel_context
*ctx
;
764 if (!hw_context_enabled(dev
))
767 ret
= i915_mutex_lock_interruptible(dev
);
771 ctx
= i915_gem_create_context(dev
, file_priv
, USES_FULL_PPGTT(dev
));
772 mutex_unlock(&dev
->struct_mutex
);
776 args
->ctx_id
= ctx
->id
;
777 DRM_DEBUG_DRIVER("HW context %d created\n", args
->ctx_id
);
782 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
783 struct drm_file
*file
)
785 struct drm_i915_gem_context_destroy
*args
= data
;
786 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
787 struct intel_context
*ctx
;
790 if (args
->ctx_id
== DEFAULT_CONTEXT_ID
)
793 ret
= i915_mutex_lock_interruptible(dev
);
797 ctx
= i915_gem_context_get(file_priv
, args
->ctx_id
);
799 mutex_unlock(&dev
->struct_mutex
);
803 idr_remove(&ctx
->file_priv
->context_idr
, ctx
->id
);
804 i915_gem_context_unreference(ctx
);
805 mutex_unlock(&dev
->struct_mutex
);
807 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args
->ctx_id
);