2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
88 #include <linux/log2.h>
90 #include <drm/i915_drm.h>
92 #include "i915_trace.h"
94 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
96 static void lut_close(struct i915_gem_context
*ctx
)
98 struct i915_lut_handle
*lut
, *ln
;
99 struct radix_tree_iter iter
;
102 list_for_each_entry_safe(lut
, ln
, &ctx
->handles_list
, ctx_link
) {
103 list_del(&lut
->obj_link
);
104 kmem_cache_free(ctx
->i915
->luts
, lut
);
108 radix_tree_for_each_slot(slot
, &ctx
->handles_vma
, &iter
, 0) {
109 struct i915_vma
*vma
= rcu_dereference_raw(*slot
);
111 radix_tree_iter_delete(&ctx
->handles_vma
, &iter
, slot
);
112 __i915_gem_object_release_unless_active(vma
->obj
);
117 static void i915_gem_context_free(struct i915_gem_context
*ctx
)
121 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
122 GEM_BUG_ON(!i915_gem_context_is_closed(ctx
));
124 i915_ppgtt_put(ctx
->ppgtt
);
126 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
127 struct intel_context
*ce
= &ctx
->engine
[i
];
132 WARN_ON(ce
->pin_count
);
134 intel_ring_free(ce
->ring
);
136 __i915_gem_object_release_unless_active(ce
->state
->obj
);
139 kfree(ctx
->jump_whitelist
);
144 list_del(&ctx
->link
);
146 ida_simple_remove(&ctx
->i915
->contexts
.hw_ida
, ctx
->hw_id
);
150 static void contexts_free(struct drm_i915_private
*i915
)
152 struct llist_node
*freed
= llist_del_all(&i915
->contexts
.free_list
);
153 struct i915_gem_context
*ctx
, *cn
;
155 lockdep_assert_held(&i915
->drm
.struct_mutex
);
157 llist_for_each_entry_safe(ctx
, cn
, freed
, free_link
)
158 i915_gem_context_free(ctx
);
161 static void contexts_free_first(struct drm_i915_private
*i915
)
163 struct i915_gem_context
*ctx
;
164 struct llist_node
*freed
;
166 lockdep_assert_held(&i915
->drm
.struct_mutex
);
168 freed
= llist_del_first(&i915
->contexts
.free_list
);
172 ctx
= container_of(freed
, typeof(*ctx
), free_link
);
173 i915_gem_context_free(ctx
);
176 static void contexts_free_worker(struct work_struct
*work
)
178 struct drm_i915_private
*i915
=
179 container_of(work
, typeof(*i915
), contexts
.free_work
);
181 mutex_lock(&i915
->drm
.struct_mutex
);
183 mutex_unlock(&i915
->drm
.struct_mutex
);
186 void i915_gem_context_release(struct kref
*ref
)
188 struct i915_gem_context
*ctx
= container_of(ref
, typeof(*ctx
), ref
);
189 struct drm_i915_private
*i915
= ctx
->i915
;
191 trace_i915_context_free(ctx
);
192 if (llist_add(&ctx
->free_link
, &i915
->contexts
.free_list
))
193 queue_work(i915
->wq
, &i915
->contexts
.free_work
);
196 static void context_close(struct i915_gem_context
*ctx
)
198 i915_gem_context_set_closed(ctx
);
201 * The LUT uses the VMA as a backpointer to unref the object,
202 * so we need to clear the LUT before we close all the VMA (inside
207 i915_ppgtt_close(&ctx
->ppgtt
->base
);
209 ctx
->file_priv
= ERR_PTR(-EBADF
);
210 i915_gem_context_put(ctx
);
213 static int assign_hw_id(struct drm_i915_private
*dev_priv
, unsigned *out
)
217 ret
= ida_simple_get(&dev_priv
->contexts
.hw_ida
,
218 0, MAX_CONTEXT_HW_ID
, GFP_KERNEL
);
220 /* Contexts are only released when no longer active.
221 * Flush any pending retires to hopefully release some
222 * stale contexts and try again.
224 i915_gem_retire_requests(dev_priv
);
225 ret
= ida_simple_get(&dev_priv
->contexts
.hw_ida
,
226 0, MAX_CONTEXT_HW_ID
, GFP_KERNEL
);
235 static u32
default_desc_template(const struct drm_i915_private
*i915
,
236 const struct i915_hw_ppgtt
*ppgtt
)
241 desc
= GEN8_CTX_VALID
| GEN8_CTX_PRIVILEGE
;
243 address_mode
= INTEL_LEGACY_32B_CONTEXT
;
244 if (ppgtt
&& i915_vm_is_48bit(&ppgtt
->base
))
245 address_mode
= INTEL_LEGACY_64B_CONTEXT
;
246 desc
|= address_mode
<< GEN8_CTX_ADDRESSING_MODE_SHIFT
;
249 desc
|= GEN8_CTX_L3LLC_COHERENT
;
251 /* TODO: WaDisableLiteRestore when we start using semaphore
252 * signalling between Command Streamers
253 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
259 static struct i915_gem_context
*
260 __create_hw_context(struct drm_i915_private
*dev_priv
,
261 struct drm_i915_file_private
*file_priv
)
263 struct i915_gem_context
*ctx
;
266 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
268 return ERR_PTR(-ENOMEM
);
270 ret
= assign_hw_id(dev_priv
, &ctx
->hw_id
);
276 kref_init(&ctx
->ref
);
277 list_add_tail(&ctx
->link
, &dev_priv
->contexts
.list
);
278 ctx
->i915
= dev_priv
;
279 ctx
->priority
= I915_PRIORITY_NORMAL
;
281 INIT_RADIX_TREE(&ctx
->handles_vma
, GFP_KERNEL
);
282 INIT_LIST_HEAD(&ctx
->handles_list
);
284 /* Default context will never have a file_priv */
285 ret
= DEFAULT_CONTEXT_HANDLE
;
287 ret
= idr_alloc(&file_priv
->context_idr
, ctx
,
288 DEFAULT_CONTEXT_HANDLE
, 0, GFP_KERNEL
);
292 ctx
->user_handle
= ret
;
294 ctx
->file_priv
= file_priv
;
296 ctx
->pid
= get_task_pid(current
, PIDTYPE_PID
);
297 ctx
->name
= kasprintf(GFP_KERNEL
, "%s[%d]/%x",
307 /* NB: Mark all slices as needing a remap so that when the context first
308 * loads it will restore whatever remap state already exists. If there
309 * is no remap info, it will be a NOP. */
310 ctx
->remap_slice
= ALL_L3_SLICES(dev_priv
);
312 i915_gem_context_set_bannable(ctx
);
313 ctx
->ring_size
= 4 * PAGE_SIZE
;
315 default_desc_template(dev_priv
, dev_priv
->mm
.aliasing_ppgtt
);
317 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
318 * present or not in use we still need a small bias as ring wraparound
319 * at offset 0 sometimes hangs. No idea why.
321 if (HAS_GUC(dev_priv
) && i915_modparams
.enable_guc_loading
)
322 ctx
->ggtt_offset_bias
= GUC_WOPCM_TOP
;
324 ctx
->ggtt_offset_bias
= I915_GTT_PAGE_SIZE
;
326 ctx
->jump_whitelist
= NULL
;
327 ctx
->jump_whitelist_cmds
= 0;
333 idr_remove(&file_priv
->context_idr
, ctx
->user_handle
);
339 static void __destroy_hw_context(struct i915_gem_context
*ctx
,
340 struct drm_i915_file_private
*file_priv
)
342 idr_remove(&file_priv
->context_idr
, ctx
->user_handle
);
347 * The default context needs to exist per ring that uses contexts. It stores the
348 * context state of the GPU for applications that don't utilize HW contexts, as
349 * well as an idle case.
351 static struct i915_gem_context
*
352 i915_gem_create_context(struct drm_i915_private
*dev_priv
,
353 struct drm_i915_file_private
*file_priv
)
355 struct i915_gem_context
*ctx
;
357 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
359 /* Reap the most stale context */
360 contexts_free_first(dev_priv
);
362 ctx
= __create_hw_context(dev_priv
, file_priv
);
366 if (USES_FULL_PPGTT(dev_priv
)) {
367 struct i915_hw_ppgtt
*ppgtt
;
369 ppgtt
= i915_ppgtt_create(dev_priv
, file_priv
, ctx
->name
);
371 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
373 __destroy_hw_context(ctx
, file_priv
);
374 return ERR_CAST(ppgtt
);
378 ctx
->desc_template
= default_desc_template(dev_priv
, ppgtt
);
381 trace_i915_context_create(ctx
);
387 * i915_gem_context_create_gvt - create a GVT GEM context
390 * This function is used to create a GVT specific GEM context.
393 * pointer to i915_gem_context on success, error pointer if failed
396 struct i915_gem_context
*
397 i915_gem_context_create_gvt(struct drm_device
*dev
)
399 struct i915_gem_context
*ctx
;
402 if (!IS_ENABLED(CONFIG_DRM_I915_GVT
))
403 return ERR_PTR(-ENODEV
);
405 ret
= i915_mutex_lock_interruptible(dev
);
409 ctx
= __create_hw_context(to_i915(dev
), NULL
);
413 ctx
->file_priv
= ERR_PTR(-EBADF
);
414 i915_gem_context_set_closed(ctx
); /* not user accessible */
415 i915_gem_context_clear_bannable(ctx
);
416 i915_gem_context_set_force_single_submission(ctx
);
417 if (!i915_modparams
.enable_guc_submission
)
418 ctx
->ring_size
= 512 * PAGE_SIZE
; /* Max ring buffer size */
420 GEM_BUG_ON(i915_gem_context_is_kernel(ctx
));
422 mutex_unlock(&dev
->struct_mutex
);
426 static struct i915_gem_context
*
427 create_kernel_context(struct drm_i915_private
*i915
, int prio
)
429 struct i915_gem_context
*ctx
;
431 ctx
= i915_gem_create_context(i915
, NULL
);
435 i915_gem_context_clear_bannable(ctx
);
436 ctx
->priority
= prio
;
437 ctx
->ring_size
= PAGE_SIZE
;
439 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx
));
445 destroy_kernel_context(struct i915_gem_context
**ctxp
)
447 struct i915_gem_context
*ctx
;
449 /* Keep the context ref so that we can free it immediately ourselves */
450 ctx
= i915_gem_context_get(fetch_and_zero(ctxp
));
451 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx
));
454 i915_gem_context_free(ctx
);
457 int i915_gem_contexts_init(struct drm_i915_private
*dev_priv
)
459 struct i915_gem_context
*ctx
;
462 GEM_BUG_ON(dev_priv
->kernel_context
);
464 INIT_LIST_HEAD(&dev_priv
->contexts
.list
);
465 INIT_WORK(&dev_priv
->contexts
.free_work
, contexts_free_worker
);
466 init_llist_head(&dev_priv
->contexts
.free_list
);
468 if (intel_vgpu_active(dev_priv
) &&
469 HAS_LOGICAL_RING_CONTEXTS(dev_priv
)) {
470 if (!i915_modparams
.enable_execlists
) {
471 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
476 /* Using the simple ida interface, the max is limited by sizeof(int) */
477 BUILD_BUG_ON(MAX_CONTEXT_HW_ID
> INT_MAX
);
478 ida_init(&dev_priv
->contexts
.hw_ida
);
480 /* lowest priority; idle task */
481 ctx
= create_kernel_context(dev_priv
, I915_PRIORITY_MIN
);
483 DRM_ERROR("Failed to create default global context\n");
488 * For easy recognisablity, we want the kernel context to be 0 and then
489 * all user contexts will have non-zero hw_id.
491 GEM_BUG_ON(ctx
->hw_id
);
492 dev_priv
->kernel_context
= ctx
;
494 /* highest priority; preempting task */
495 ctx
= create_kernel_context(dev_priv
, INT_MAX
);
497 DRM_ERROR("Failed to create default preempt context\n");
499 goto err_kernel_context
;
501 dev_priv
->preempt_context
= ctx
;
503 DRM_DEBUG_DRIVER("%s context support initialized\n",
504 dev_priv
->engine
[RCS
]->context_size
? "logical" :
509 destroy_kernel_context(&dev_priv
->kernel_context
);
514 void i915_gem_contexts_lost(struct drm_i915_private
*dev_priv
)
516 struct intel_engine_cs
*engine
;
517 enum intel_engine_id id
;
519 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
521 for_each_engine(engine
, dev_priv
, id
) {
522 engine
->legacy_active_context
= NULL
;
524 if (!engine
->last_retired_context
)
527 engine
->context_unpin(engine
, engine
->last_retired_context
);
528 engine
->last_retired_context
= NULL
;
531 /* Force the GPU state to be restored on enabling */
532 if (!i915_modparams
.enable_execlists
) {
533 struct i915_gem_context
*ctx
;
535 list_for_each_entry(ctx
, &dev_priv
->contexts
.list
, link
) {
536 if (!i915_gem_context_is_default(ctx
))
539 for_each_engine(engine
, dev_priv
, id
)
540 ctx
->engine
[engine
->id
].initialised
= false;
542 ctx
->remap_slice
= ALL_L3_SLICES(dev_priv
);
545 for_each_engine(engine
, dev_priv
, id
) {
546 struct intel_context
*kce
=
547 &dev_priv
->kernel_context
->engine
[engine
->id
];
549 kce
->initialised
= true;
554 void i915_gem_contexts_fini(struct drm_i915_private
*i915
)
556 lockdep_assert_held(&i915
->drm
.struct_mutex
);
558 destroy_kernel_context(&i915
->preempt_context
);
559 destroy_kernel_context(&i915
->kernel_context
);
561 /* Must free all deferred contexts (via flush_workqueue) first */
562 ida_destroy(&i915
->contexts
.hw_ida
);
565 static int context_idr_cleanup(int id
, void *p
, void *data
)
567 struct i915_gem_context
*ctx
= p
;
573 int i915_gem_context_open(struct drm_i915_private
*i915
,
574 struct drm_file
*file
)
576 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
577 struct i915_gem_context
*ctx
;
579 idr_init(&file_priv
->context_idr
);
581 mutex_lock(&i915
->drm
.struct_mutex
);
582 ctx
= i915_gem_create_context(i915
, file_priv
);
583 mutex_unlock(&i915
->drm
.struct_mutex
);
585 idr_destroy(&file_priv
->context_idr
);
589 GEM_BUG_ON(i915_gem_context_is_kernel(ctx
));
594 void i915_gem_context_close(struct drm_file
*file
)
596 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
598 lockdep_assert_held(&file_priv
->dev_priv
->drm
.struct_mutex
);
600 idr_for_each(&file_priv
->context_idr
, context_idr_cleanup
, NULL
);
601 idr_destroy(&file_priv
->context_idr
);
605 mi_set_context(struct drm_i915_gem_request
*req
, u32 flags
)
607 struct drm_i915_private
*dev_priv
= req
->i915
;
608 struct intel_engine_cs
*engine
= req
->engine
;
609 enum intel_engine_id id
;
610 const int num_rings
=
611 /* Use an extended w/a on gen7 if signalling from other rings */
612 (i915_modparams
.semaphores
&& INTEL_GEN(dev_priv
) == 7) ?
613 INTEL_INFO(dev_priv
)->num_rings
- 1 :
618 flags
|= MI_MM_SPACE_GTT
;
619 if (IS_HASWELL(dev_priv
) || INTEL_GEN(dev_priv
) >= 8)
620 /* These flags are for resource streamer on HSW+ */
621 flags
|= HSW_MI_RS_SAVE_STATE_EN
| HSW_MI_RS_RESTORE_STATE_EN
;
623 flags
|= MI_SAVE_EXT_STATE_EN
| MI_RESTORE_EXT_STATE_EN
;
626 if (INTEL_GEN(dev_priv
) >= 7)
627 len
+= 2 + (num_rings
? 4*num_rings
+ 6 : 0);
629 cs
= intel_ring_begin(req
, len
);
633 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
634 if (INTEL_GEN(dev_priv
) >= 7) {
635 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_DISABLE
;
637 struct intel_engine_cs
*signaller
;
639 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
640 for_each_engine(signaller
, dev_priv
, id
) {
641 if (signaller
== engine
)
644 *cs
++ = i915_mmio_reg_offset(
645 RING_PSMI_CTL(signaller
->mmio_base
));
646 *cs
++ = _MASKED_BIT_ENABLE(
647 GEN6_PSMI_SLEEP_MSG_DISABLE
);
653 *cs
++ = MI_SET_CONTEXT
;
654 *cs
++ = i915_ggtt_offset(req
->ctx
->engine
[RCS
].state
) | flags
;
656 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
657 * WaMiSetContext_Hang:snb,ivb,vlv
661 if (INTEL_GEN(dev_priv
) >= 7) {
663 struct intel_engine_cs
*signaller
;
664 i915_reg_t last_reg
= {}; /* keep gcc quiet */
666 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
667 for_each_engine(signaller
, dev_priv
, id
) {
668 if (signaller
== engine
)
671 last_reg
= RING_PSMI_CTL(signaller
->mmio_base
);
672 *cs
++ = i915_mmio_reg_offset(last_reg
);
673 *cs
++ = _MASKED_BIT_DISABLE(
674 GEN6_PSMI_SLEEP_MSG_DISABLE
);
677 /* Insert a delay before the next switch! */
678 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
679 *cs
++ = i915_mmio_reg_offset(last_reg
);
680 *cs
++ = i915_ggtt_offset(engine
->scratch
);
683 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_ENABLE
;
686 intel_ring_advance(req
, cs
);
691 static int remap_l3(struct drm_i915_gem_request
*req
, int slice
)
693 u32
*cs
, *remap_info
= req
->i915
->l3_parity
.remap_info
[slice
];
699 cs
= intel_ring_begin(req
, GEN7_L3LOG_SIZE
/4 * 2 + 2);
704 * Note: We do not worry about the concurrent register cacheline hang
705 * here because no other code should access these registers other than
706 * at initialization time.
708 *cs
++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE
/4);
709 for (i
= 0; i
< GEN7_L3LOG_SIZE
/4; i
++) {
710 *cs
++ = i915_mmio_reg_offset(GEN7_L3LOG(slice
, i
));
711 *cs
++ = remap_info
[i
];
714 intel_ring_advance(req
, cs
);
719 static inline bool skip_rcs_switch(struct i915_hw_ppgtt
*ppgtt
,
720 struct intel_engine_cs
*engine
,
721 struct i915_gem_context
*to
)
726 if (!to
->engine
[RCS
].initialised
)
729 if (ppgtt
&& (intel_engine_flag(engine
) & ppgtt
->pd_dirty_rings
))
732 return to
== engine
->legacy_active_context
;
736 needs_pd_load_pre(struct i915_hw_ppgtt
*ppgtt
, struct intel_engine_cs
*engine
)
738 struct i915_gem_context
*from
= engine
->legacy_active_context
;
743 /* Always load the ppgtt on first use */
747 /* Same context without new entries, skip */
748 if ((!from
->ppgtt
|| from
->ppgtt
== ppgtt
) &&
749 !(intel_engine_flag(engine
) & ppgtt
->pd_dirty_rings
))
752 if (engine
->id
!= RCS
)
755 if (INTEL_GEN(engine
->i915
) < 8)
762 needs_pd_load_post(struct i915_hw_ppgtt
*ppgtt
,
763 struct i915_gem_context
*to
,
769 if (!IS_GEN8(to
->i915
))
772 if (hw_flags
& MI_RESTORE_INHIBIT
)
778 static int do_rcs_switch(struct drm_i915_gem_request
*req
)
780 struct i915_gem_context
*to
= req
->ctx
;
781 struct intel_engine_cs
*engine
= req
->engine
;
782 struct i915_hw_ppgtt
*ppgtt
= to
->ppgtt
?: req
->i915
->mm
.aliasing_ppgtt
;
783 struct i915_gem_context
*from
= engine
->legacy_active_context
;
787 GEM_BUG_ON(engine
->id
!= RCS
);
789 if (skip_rcs_switch(ppgtt
, engine
, to
))
792 if (needs_pd_load_pre(ppgtt
, engine
)) {
793 /* Older GENs and non render rings still want the load first,
794 * "PP_DCLV followed by PP_DIR_BASE register through Load
795 * Register Immediate commands in Ring Buffer before submitting
797 trace_switch_mm(engine
, to
);
798 ret
= ppgtt
->switch_mm(ppgtt
, req
);
803 if (!to
->engine
[RCS
].initialised
|| i915_gem_context_is_default(to
))
804 /* NB: If we inhibit the restore, the context is not allowed to
805 * die because future work may end up depending on valid address
806 * space. This means we must enforce that a page table load
807 * occur when this occurs. */
808 hw_flags
= MI_RESTORE_INHIBIT
;
809 else if (ppgtt
&& intel_engine_flag(engine
) & ppgtt
->pd_dirty_rings
)
810 hw_flags
= MI_FORCE_RESTORE
;
814 if (to
!= from
|| (hw_flags
& MI_FORCE_RESTORE
)) {
815 ret
= mi_set_context(req
, hw_flags
);
819 engine
->legacy_active_context
= to
;
822 /* GEN8 does *not* require an explicit reload if the PDPs have been
823 * setup, and we do not wish to move them.
825 if (needs_pd_load_post(ppgtt
, to
, hw_flags
)) {
826 trace_switch_mm(engine
, to
);
827 ret
= ppgtt
->switch_mm(ppgtt
, req
);
828 /* The hardware context switch is emitted, but we haven't
829 * actually changed the state - so it's probably safe to bail
830 * here. Still, let the user know something dangerous has
838 ppgtt
->pd_dirty_rings
&= ~intel_engine_flag(engine
);
840 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
841 if (!(to
->remap_slice
& (1<<i
)))
844 ret
= remap_l3(req
, i
);
848 to
->remap_slice
&= ~(1<<i
);
851 if (!to
->engine
[RCS
].initialised
) {
852 if (engine
->init_context
) {
853 ret
= engine
->init_context(req
);
857 to
->engine
[RCS
].initialised
= true;
864 * i915_switch_context() - perform a GPU context switch.
865 * @req: request for which we'll execute the context switch
867 * The context life cycle is simple. The context refcount is incremented and
868 * decremented by 1 and create and destroy. If the context is in use by the GPU,
869 * it will have a refcount > 1. This allows us to destroy the context abstract
870 * object while letting the normal object tracking destroy the backing BO.
872 * This function should not be used in execlists mode. Instead the context is
873 * switched by writing to the ELSP and requests keep a reference to their
876 int i915_switch_context(struct drm_i915_gem_request
*req
)
878 struct intel_engine_cs
*engine
= req
->engine
;
880 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
881 if (i915_modparams
.enable_execlists
)
884 if (!req
->ctx
->engine
[engine
->id
].state
) {
885 struct i915_gem_context
*to
= req
->ctx
;
886 struct i915_hw_ppgtt
*ppgtt
=
887 to
->ppgtt
?: req
->i915
->mm
.aliasing_ppgtt
;
889 if (needs_pd_load_pre(ppgtt
, engine
)) {
892 trace_switch_mm(engine
, to
);
893 ret
= ppgtt
->switch_mm(ppgtt
, req
);
897 ppgtt
->pd_dirty_rings
&= ~intel_engine_flag(engine
);
900 engine
->legacy_active_context
= to
;
904 return do_rcs_switch(req
);
907 static bool engine_has_idle_kernel_context(struct intel_engine_cs
*engine
)
909 struct i915_gem_timeline
*timeline
;
911 list_for_each_entry(timeline
, &engine
->i915
->gt
.timelines
, link
) {
912 struct intel_timeline
*tl
;
914 if (timeline
== &engine
->i915
->gt
.global_timeline
)
917 tl
= &timeline
->engine
[engine
->id
];
918 if (i915_gem_active_peek(&tl
->last_request
,
919 &engine
->i915
->drm
.struct_mutex
))
923 return intel_engine_has_kernel_context(engine
);
926 int i915_gem_switch_to_kernel_context(struct drm_i915_private
*dev_priv
)
928 struct intel_engine_cs
*engine
;
929 struct i915_gem_timeline
*timeline
;
930 enum intel_engine_id id
;
932 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
934 i915_gem_retire_requests(dev_priv
);
936 for_each_engine(engine
, dev_priv
, id
) {
937 struct drm_i915_gem_request
*req
;
940 if (engine_has_idle_kernel_context(engine
))
943 req
= i915_gem_request_alloc(engine
, dev_priv
->kernel_context
);
947 /* Queue this switch after all other activity */
948 list_for_each_entry(timeline
, &dev_priv
->gt
.timelines
, link
) {
949 struct drm_i915_gem_request
*prev
;
950 struct intel_timeline
*tl
;
952 tl
= &timeline
->engine
[engine
->id
];
953 prev
= i915_gem_active_raw(&tl
->last_request
,
954 &dev_priv
->drm
.struct_mutex
);
956 i915_sw_fence_await_sw_fence_gfp(&req
->submit
,
961 ret
= i915_switch_context(req
);
962 i915_add_request(req
);
970 static bool client_is_banned(struct drm_i915_file_private
*file_priv
)
972 return atomic_read(&file_priv
->context_bans
) > I915_MAX_CLIENT_CONTEXT_BANS
;
975 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
976 struct drm_file
*file
)
978 struct drm_i915_private
*dev_priv
= to_i915(dev
);
979 struct drm_i915_gem_context_create
*args
= data
;
980 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
981 struct i915_gem_context
*ctx
;
984 if (!dev_priv
->engine
[RCS
]->context_size
)
990 if (client_is_banned(file_priv
)) {
991 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
993 pid_nr(get_task_pid(current
, PIDTYPE_PID
)));
998 ret
= i915_mutex_lock_interruptible(dev
);
1002 ctx
= i915_gem_create_context(dev_priv
, file_priv
);
1003 mutex_unlock(&dev
->struct_mutex
);
1005 return PTR_ERR(ctx
);
1007 GEM_BUG_ON(i915_gem_context_is_kernel(ctx
));
1009 args
->ctx_id
= ctx
->user_handle
;
1010 DRM_DEBUG("HW context %d created\n", args
->ctx_id
);
1015 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
1016 struct drm_file
*file
)
1018 struct drm_i915_gem_context_destroy
*args
= data
;
1019 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1020 struct i915_gem_context
*ctx
;
1026 if (args
->ctx_id
== DEFAULT_CONTEXT_HANDLE
)
1029 ret
= i915_mutex_lock_interruptible(dev
);
1033 ctx
= i915_gem_context_lookup(file_priv
, args
->ctx_id
);
1035 mutex_unlock(&dev
->struct_mutex
);
1039 __destroy_hw_context(ctx
, file_priv
);
1040 mutex_unlock(&dev
->struct_mutex
);
1042 i915_gem_context_put(ctx
);
1046 int i915_gem_context_getparam_ioctl(struct drm_device
*dev
, void *data
,
1047 struct drm_file
*file
)
1049 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1050 struct drm_i915_gem_context_param
*args
= data
;
1051 struct i915_gem_context
*ctx
;
1054 ctx
= i915_gem_context_lookup(file_priv
, args
->ctx_id
);
1059 switch (args
->param
) {
1060 case I915_CONTEXT_PARAM_BAN_PERIOD
:
1063 case I915_CONTEXT_PARAM_NO_ZEROMAP
:
1064 args
->value
= ctx
->flags
& CONTEXT_NO_ZEROMAP
;
1066 case I915_CONTEXT_PARAM_GTT_SIZE
:
1068 args
->value
= ctx
->ppgtt
->base
.total
;
1069 else if (to_i915(dev
)->mm
.aliasing_ppgtt
)
1070 args
->value
= to_i915(dev
)->mm
.aliasing_ppgtt
->base
.total
;
1072 args
->value
= to_i915(dev
)->ggtt
.base
.total
;
1074 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE
:
1075 args
->value
= i915_gem_context_no_error_capture(ctx
);
1077 case I915_CONTEXT_PARAM_BANNABLE
:
1078 args
->value
= i915_gem_context_is_bannable(ctx
);
1080 case I915_CONTEXT_PARAM_PRIORITY
:
1081 args
->value
= ctx
->priority
;
1088 i915_gem_context_put(ctx
);
1092 int i915_gem_context_setparam_ioctl(struct drm_device
*dev
, void *data
,
1093 struct drm_file
*file
)
1095 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1096 struct drm_i915_gem_context_param
*args
= data
;
1097 struct i915_gem_context
*ctx
;
1100 ctx
= i915_gem_context_lookup(file_priv
, args
->ctx_id
);
1104 ret
= i915_mutex_lock_interruptible(dev
);
1108 switch (args
->param
) {
1109 case I915_CONTEXT_PARAM_BAN_PERIOD
:
1112 case I915_CONTEXT_PARAM_NO_ZEROMAP
:
1116 ctx
->flags
&= ~CONTEXT_NO_ZEROMAP
;
1117 ctx
->flags
|= args
->value
? CONTEXT_NO_ZEROMAP
: 0;
1120 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE
:
1123 else if (args
->value
)
1124 i915_gem_context_set_no_error_capture(ctx
);
1126 i915_gem_context_clear_no_error_capture(ctx
);
1128 case I915_CONTEXT_PARAM_BANNABLE
:
1131 else if (!capable(CAP_SYS_ADMIN
) && !args
->value
)
1133 else if (args
->value
)
1134 i915_gem_context_set_bannable(ctx
);
1136 i915_gem_context_clear_bannable(ctx
);
1139 case I915_CONTEXT_PARAM_PRIORITY
:
1141 int priority
= args
->value
;
1145 else if (!to_i915(dev
)->engine
[RCS
]->schedule
)
1147 else if (priority
> I915_CONTEXT_MAX_USER_PRIORITY
||
1148 priority
< I915_CONTEXT_MIN_USER_PRIORITY
)
1150 else if (priority
> I915_CONTEXT_DEFAULT_PRIORITY
&&
1151 !capable(CAP_SYS_NICE
))
1154 ctx
->priority
= priority
;
1162 mutex_unlock(&dev
->struct_mutex
);
1165 i915_gem_context_put(ctx
);
1169 int i915_gem_context_reset_stats_ioctl(struct drm_device
*dev
,
1170 void *data
, struct drm_file
*file
)
1172 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1173 struct drm_i915_reset_stats
*args
= data
;
1174 struct i915_gem_context
*ctx
;
1177 if (args
->flags
|| args
->pad
)
1182 ctx
= __i915_gem_context_lookup_rcu(file
->driver_priv
, args
->ctx_id
);
1187 * We opt for unserialised reads here. This may result in tearing
1188 * in the extremely unlikely event of a GPU hang on this context
1189 * as we are querying them. If we need that extra layer of protection,
1190 * we should wrap the hangstats with a seqlock.
1193 if (capable(CAP_SYS_ADMIN
))
1194 args
->reset_count
= i915_reset_count(&dev_priv
->gpu_error
);
1196 args
->reset_count
= 0;
1198 args
->batch_active
= atomic_read(&ctx
->guilty_count
);
1199 args
->batch_pending
= atomic_read(&ctx
->active_count
);
1207 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1208 #include "selftests/mock_context.c"
1209 #include "selftests/i915_gem_context.c"