]>
Commit | Line | Data |
---|---|---|
254f965c BW |
1 | /* |
2 | * Copyright © 2011-2012 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Ben Widawsky <ben@bwidawsk.net> | |
25 | * | |
26 | */ | |
27 | ||
28 | /* | |
29 | * This file implements HW context support. On gen5+ a HW context consists of an | |
30 | * opaque GPU object which is referenced at times of context saves and restores. | |
31 | * With RC6 enabled, the context is also referenced as the GPU enters and exists | |
32 | * from RC6 (GPU has it's own internal power context, except on gen5). Though | |
33 | * something like a context does exist for the media ring, the code only | |
34 | * supports contexts for the render ring. | |
35 | * | |
36 | * In software, there is a distinction between contexts created by the user, | |
37 | * and the default HW context. The default HW context is used by GPU clients | |
38 | * that do not request setup of their own hardware context. The default | |
39 | * context's state is never restored to help prevent programming errors. This | |
40 | * would happen if a client ran and piggy-backed off another clients GPU state. | |
41 | * The default context only exists to give the GPU some offset to load as the | |
42 | * current to invoke a save of the context we actually care about. In fact, the | |
43 | * code could likely be constructed, albeit in a more complicated fashion, to | |
44 | * never use the default context, though that limits the driver's ability to | |
45 | * swap out, and/or destroy other contexts. | |
46 | * | |
47 | * All other contexts are created as a request by the GPU client. These contexts | |
48 | * store GPU state, and thus allow GPU clients to not re-emit state (and | |
49 | * potentially query certain state) at any time. The kernel driver makes | |
50 | * certain that the appropriate commands are inserted. | |
51 | * | |
52 | * The context life cycle is semi-complicated in that context BOs may live | |
53 | * longer than the context itself because of the way the hardware, and object | |
54 | * tracking works. Below is a very crude representation of the state machine | |
55 | * describing the context life. | |
56 | * refcount pincount active | |
57 | * S0: initial state 0 0 0 | |
58 | * S1: context created 1 0 0 | |
59 | * S2: context is currently running 2 1 X | |
60 | * S3: GPU referenced, but not current 2 0 1 | |
61 | * S4: context is current, but destroyed 1 1 0 | |
62 | * S5: like S3, but destroyed 1 0 1 | |
63 | * | |
64 | * The most common (but not all) transitions: | |
65 | * S0->S1: client creates a context | |
66 | * S1->S2: client submits execbuf with context | |
67 | * S2->S3: other clients submits execbuf with context | |
68 | * S3->S1: context object was retired | |
69 | * S3->S2: clients submits another execbuf | |
70 | * S2->S4: context destroy called with current context | |
71 | * S3->S5->S0: destroy path | |
72 | * S4->S5->S0: destroy path on current context | |
73 | * | |
74 | * There are two confusing terms used above: | |
75 | * The "current context" means the context which is currently running on the | |
508842a0 | 76 | * GPU. The GPU has loaded its state already and has stored away the gtt |
254f965c BW |
77 | * offset of the BO. The GPU is not actively referencing the data at this |
78 | * offset, but it will on the next context switch. The only way to avoid this | |
79 | * is to do a GPU reset. | |
80 | * | |
81 | * An "active context' is one which was previously the "current context" and is | |
82 | * on the active list waiting for the next context switch to occur. Until this | |
83 | * happens, the object must remain at the same gtt offset. It is therefore | |
84 | * possible to destroy a context, but it is still active. | |
85 | * | |
86 | */ | |
87 | ||
4ff4b44c | 88 | #include <linux/log2.h> |
760285e7 DH |
89 | #include <drm/drmP.h> |
90 | #include <drm/i915_drm.h> | |
254f965c | 91 | #include "i915_drv.h" |
198c974d | 92 | #include "i915_trace.h" |
254f965c | 93 | |
b2e862d0 CW |
94 | #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 |
95 | ||
4ff4b44c CW |
96 | /* Initial size (as log2) to preallocate the handle->object hashtable */ |
97 | #define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */ | |
98 | ||
99 | static void resize_vma_ht(struct work_struct *work) | |
100 | { | |
101 | struct i915_gem_context_vma_lut *lut = | |
102 | container_of(work, typeof(*lut), resize); | |
103 | unsigned int bits, new_bits, size, i; | |
104 | struct hlist_head *new_ht; | |
105 | ||
106 | GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)); | |
107 | ||
108 | bits = 1 + ilog2(4*lut->ht_count/3 + 1); | |
109 | new_bits = min_t(unsigned int, | |
110 | max(bits, VMA_HT_BITS), | |
111 | sizeof(unsigned int) * BITS_PER_BYTE - 1); | |
112 | if (new_bits == lut->ht_bits) | |
113 | goto out; | |
114 | ||
115 | new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN); | |
116 | if (!new_ht) | |
117 | new_ht = vzalloc(sizeof(*new_ht)<<new_bits); | |
118 | if (!new_ht) | |
119 | /* Pretend resize succeeded and stop calling us for a bit! */ | |
120 | goto out; | |
121 | ||
122 | size = BIT(lut->ht_bits); | |
123 | for (i = 0; i < size; i++) { | |
124 | struct i915_vma *vma; | |
125 | struct hlist_node *tmp; | |
126 | ||
127 | hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node) | |
128 | hlist_add_head(&vma->ctx_node, | |
129 | &new_ht[hash_32(vma->ctx_handle, | |
130 | new_bits)]); | |
131 | } | |
132 | kvfree(lut->ht); | |
133 | lut->ht = new_ht; | |
134 | lut->ht_bits = new_bits; | |
135 | out: | |
136 | smp_store_release(&lut->ht_size, BIT(bits)); | |
137 | GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS); | |
138 | } | |
139 | ||
140 | static void vma_lut_free(struct i915_gem_context *ctx) | |
141 | { | |
142 | struct i915_gem_context_vma_lut *lut = &ctx->vma_lut; | |
143 | unsigned int i, size; | |
144 | ||
145 | if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) | |
146 | cancel_work_sync(&lut->resize); | |
147 | ||
148 | size = BIT(lut->ht_bits); | |
149 | for (i = 0; i < size; i++) { | |
150 | struct i915_vma *vma; | |
151 | ||
152 | hlist_for_each_entry(vma, &lut->ht[i], ctx_node) { | |
153 | vma->obj->vma_hashed = NULL; | |
154 | vma->ctx = NULL; | |
dade2a61 | 155 | i915_vma_put(vma); |
4ff4b44c CW |
156 | } |
157 | } | |
158 | kvfree(lut->ht); | |
159 | } | |
160 | ||
dce3271b | 161 | void i915_gem_context_free(struct kref *ctx_ref) |
40521054 | 162 | { |
e2efd130 | 163 | struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); |
bca44d80 | 164 | int i; |
40521054 | 165 | |
91c8a326 | 166 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); |
198c974d | 167 | trace_i915_context_free(ctx); |
6095868a | 168 | GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); |
198c974d | 169 | |
4ff4b44c | 170 | vma_lut_free(ctx); |
ae6c4806 DV |
171 | i915_ppgtt_put(ctx->ppgtt); |
172 | ||
bca44d80 CW |
173 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
174 | struct intel_context *ce = &ctx->engine[i]; | |
175 | ||
176 | if (!ce->state) | |
177 | continue; | |
178 | ||
179 | WARN_ON(ce->pin_count); | |
dca33ecc | 180 | if (ce->ring) |
7e37f889 | 181 | intel_ring_free(ce->ring); |
bca44d80 | 182 | |
f8a7fde4 | 183 | __i915_gem_object_release_unless_active(ce->state->obj); |
bca44d80 CW |
184 | } |
185 | ||
562f5d45 | 186 | kfree(ctx->name); |
c84455b4 | 187 | put_pid(ctx->pid); |
4ff4b44c | 188 | |
c7c48dfd | 189 | list_del(&ctx->link); |
5d1808ec CW |
190 | |
191 | ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); | |
40521054 BW |
192 | kfree(ctx); |
193 | } | |
194 | ||
50e046b6 CW |
195 | static void context_close(struct i915_gem_context *ctx) |
196 | { | |
6095868a | 197 | i915_gem_context_set_closed(ctx); |
50e046b6 CW |
198 | if (ctx->ppgtt) |
199 | i915_ppgtt_close(&ctx->ppgtt->base); | |
200 | ctx->file_priv = ERR_PTR(-EBADF); | |
201 | i915_gem_context_put(ctx); | |
202 | } | |
203 | ||
5d1808ec CW |
204 | static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) |
205 | { | |
206 | int ret; | |
207 | ||
208 | ret = ida_simple_get(&dev_priv->context_hw_ida, | |
209 | 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); | |
210 | if (ret < 0) { | |
211 | /* Contexts are only released when no longer active. | |
212 | * Flush any pending retires to hopefully release some | |
213 | * stale contexts and try again. | |
214 | */ | |
c033666a | 215 | i915_gem_retire_requests(dev_priv); |
5d1808ec CW |
216 | ret = ida_simple_get(&dev_priv->context_hw_ida, |
217 | 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); | |
218 | if (ret < 0) | |
219 | return ret; | |
220 | } | |
221 | ||
222 | *out = ret; | |
223 | return 0; | |
224 | } | |
225 | ||
949e8ab3 CW |
226 | static u32 default_desc_template(const struct drm_i915_private *i915, |
227 | const struct i915_hw_ppgtt *ppgtt) | |
2355cf08 | 228 | { |
949e8ab3 | 229 | u32 address_mode; |
2355cf08 MK |
230 | u32 desc; |
231 | ||
949e8ab3 | 232 | desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; |
2355cf08 | 233 | |
949e8ab3 CW |
234 | address_mode = INTEL_LEGACY_32B_CONTEXT; |
235 | if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) | |
236 | address_mode = INTEL_LEGACY_64B_CONTEXT; | |
237 | desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; | |
238 | ||
239 | if (IS_GEN8(i915)) | |
2355cf08 MK |
240 | desc |= GEN8_CTX_L3LLC_COHERENT; |
241 | ||
242 | /* TODO: WaDisableLiteRestore when we start using semaphore | |
243 | * signalling between Command Streamers | |
244 | * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; | |
245 | */ | |
246 | ||
247 | return desc; | |
248 | } | |
249 | ||
e2efd130 | 250 | static struct i915_gem_context * |
bf9e8429 | 251 | __create_hw_context(struct drm_i915_private *dev_priv, |
ee960be7 | 252 | struct drm_i915_file_private *file_priv) |
40521054 | 253 | { |
e2efd130 | 254 | struct i915_gem_context *ctx; |
c8c470af | 255 | int ret; |
40521054 | 256 | |
f94982b0 | 257 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
146937e5 BW |
258 | if (ctx == NULL) |
259 | return ERR_PTR(-ENOMEM); | |
40521054 | 260 | |
5d1808ec CW |
261 | ret = assign_hw_id(dev_priv, &ctx->hw_id); |
262 | if (ret) { | |
263 | kfree(ctx); | |
264 | return ERR_PTR(ret); | |
265 | } | |
266 | ||
dce3271b | 267 | kref_init(&ctx->ref); |
691e6415 | 268 | list_add_tail(&ctx->link, &dev_priv->context_list); |
9ea4feec | 269 | ctx->i915 = dev_priv; |
e4f815f6 | 270 | ctx->priority = I915_PRIORITY_NORMAL; |
40521054 | 271 | |
4ff4b44c CW |
272 | ctx->vma_lut.ht_bits = VMA_HT_BITS; |
273 | ctx->vma_lut.ht_size = BIT(VMA_HT_BITS); | |
274 | BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS); | |
275 | ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size, | |
276 | sizeof(*ctx->vma_lut.ht), | |
277 | GFP_KERNEL); | |
278 | if (!ctx->vma_lut.ht) | |
279 | goto err_out; | |
280 | ||
281 | INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht); | |
282 | ||
40521054 | 283 | /* Default context will never have a file_priv */ |
562f5d45 CW |
284 | ret = DEFAULT_CONTEXT_HANDLE; |
285 | if (file_priv) { | |
691e6415 | 286 | ret = idr_alloc(&file_priv->context_idr, ctx, |
821d66dd | 287 | DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); |
691e6415 | 288 | if (ret < 0) |
4ff4b44c | 289 | goto err_lut; |
562f5d45 CW |
290 | } |
291 | ctx->user_handle = ret; | |
dce3271b MK |
292 | |
293 | ctx->file_priv = file_priv; | |
562f5d45 | 294 | if (file_priv) { |
c84455b4 | 295 | ctx->pid = get_task_pid(current, PIDTYPE_PID); |
562f5d45 CW |
296 | ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x", |
297 | current->comm, | |
298 | pid_nr(ctx->pid), | |
299 | ctx->user_handle); | |
300 | if (!ctx->name) { | |
301 | ret = -ENOMEM; | |
302 | goto err_pid; | |
303 | } | |
304 | } | |
c84455b4 | 305 | |
3ccfd19d BW |
306 | /* NB: Mark all slices as needing a remap so that when the context first |
307 | * loads it will restore whatever remap state already exists. If there | |
308 | * is no remap info, it will be a NOP. */ | |
b2e862d0 | 309 | ctx->remap_slice = ALL_L3_SLICES(dev_priv); |
40521054 | 310 | |
6095868a | 311 | i915_gem_context_set_bannable(ctx); |
bcd794c2 | 312 | ctx->ring_size = 4 * PAGE_SIZE; |
949e8ab3 CW |
313 | ctx->desc_template = |
314 | default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); | |
676fa572 | 315 | |
d3ef1af6 DCS |
316 | /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not |
317 | * present or not in use we still need a small bias as ring wraparound | |
318 | * at offset 0 sometimes hangs. No idea why. | |
319 | */ | |
320 | if (HAS_GUC(dev_priv) && i915.enable_guc_loading) | |
321 | ctx->ggtt_offset_bias = GUC_WOPCM_TOP; | |
322 | else | |
f51455d4 | 323 | ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; |
d3ef1af6 | 324 | |
146937e5 | 325 | return ctx; |
40521054 | 326 | |
562f5d45 CW |
327 | err_pid: |
328 | put_pid(ctx->pid); | |
329 | idr_remove(&file_priv->context_idr, ctx->user_handle); | |
4ff4b44c CW |
330 | err_lut: |
331 | kvfree(ctx->vma_lut.ht); | |
40521054 | 332 | err_out: |
50e046b6 | 333 | context_close(ctx); |
146937e5 | 334 | return ERR_PTR(ret); |
40521054 BW |
335 | } |
336 | ||
6d1f9fb3 JL |
337 | static void __destroy_hw_context(struct i915_gem_context *ctx, |
338 | struct drm_i915_file_private *file_priv) | |
339 | { | |
340 | idr_remove(&file_priv->context_idr, ctx->user_handle); | |
341 | context_close(ctx); | |
342 | } | |
343 | ||
254f965c BW |
344 | /** |
345 | * The default context needs to exist per ring that uses contexts. It stores the | |
346 | * context state of the GPU for applications that don't utilize HW contexts, as | |
347 | * well as an idle case. | |
348 | */ | |
e2efd130 | 349 | static struct i915_gem_context * |
bf9e8429 | 350 | i915_gem_create_context(struct drm_i915_private *dev_priv, |
d624d86e | 351 | struct drm_i915_file_private *file_priv) |
254f965c | 352 | { |
e2efd130 | 353 | struct i915_gem_context *ctx; |
40521054 | 354 | |
bf9e8429 | 355 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
40521054 | 356 | |
bf9e8429 | 357 | ctx = __create_hw_context(dev_priv, file_priv); |
146937e5 | 358 | if (IS_ERR(ctx)) |
a45d0f6a | 359 | return ctx; |
40521054 | 360 | |
bf9e8429 | 361 | if (USES_FULL_PPGTT(dev_priv)) { |
80b204bc | 362 | struct i915_hw_ppgtt *ppgtt; |
bdf4fd7e | 363 | |
bf9e8429 | 364 | ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name); |
c6aab916 | 365 | if (IS_ERR(ppgtt)) { |
0eea67eb BW |
366 | DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", |
367 | PTR_ERR(ppgtt)); | |
6d1f9fb3 | 368 | __destroy_hw_context(ctx, file_priv); |
c6aab916 | 369 | return ERR_CAST(ppgtt); |
ae6c4806 DV |
370 | } |
371 | ||
372 | ctx->ppgtt = ppgtt; | |
949e8ab3 | 373 | ctx->desc_template = default_desc_template(dev_priv, ppgtt); |
ae6c4806 | 374 | } |
bdf4fd7e | 375 | |
198c974d DCS |
376 | trace_i915_context_create(ctx); |
377 | ||
a45d0f6a | 378 | return ctx; |
254f965c BW |
379 | } |
380 | ||
c8c35799 ZW |
381 | /** |
382 | * i915_gem_context_create_gvt - create a GVT GEM context | |
383 | * @dev: drm device * | |
384 | * | |
385 | * This function is used to create a GVT specific GEM context. | |
386 | * | |
387 | * Returns: | |
388 | * pointer to i915_gem_context on success, error pointer if failed | |
389 | * | |
390 | */ | |
391 | struct i915_gem_context * | |
392 | i915_gem_context_create_gvt(struct drm_device *dev) | |
393 | { | |
394 | struct i915_gem_context *ctx; | |
395 | int ret; | |
396 | ||
397 | if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) | |
398 | return ERR_PTR(-ENODEV); | |
399 | ||
400 | ret = i915_mutex_lock_interruptible(dev); | |
401 | if (ret) | |
402 | return ERR_PTR(ret); | |
403 | ||
984ff29f | 404 | ctx = __create_hw_context(to_i915(dev), NULL); |
c8c35799 ZW |
405 | if (IS_ERR(ctx)) |
406 | goto out; | |
407 | ||
984ff29f | 408 | ctx->file_priv = ERR_PTR(-EBADF); |
6095868a CW |
409 | i915_gem_context_set_closed(ctx); /* not user accessible */ |
410 | i915_gem_context_clear_bannable(ctx); | |
411 | i915_gem_context_set_force_single_submission(ctx); | |
718e884a CD |
412 | if (!i915.enable_guc_submission) |
413 | ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ | |
984ff29f CW |
414 | |
415 | GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); | |
c8c35799 ZW |
416 | out: |
417 | mutex_unlock(&dev->struct_mutex); | |
418 | return ctx; | |
419 | } | |
420 | ||
bf9e8429 | 421 | int i915_gem_context_init(struct drm_i915_private *dev_priv) |
254f965c | 422 | { |
e2efd130 | 423 | struct i915_gem_context *ctx; |
254f965c | 424 | |
2fa48d8d BW |
425 | /* Init should only be called once per module load. Eventually the |
426 | * restriction on the context_disabled check can be loosened. */ | |
ed54c1a1 | 427 | if (WARN_ON(dev_priv->kernel_context)) |
8245be31 | 428 | return 0; |
254f965c | 429 | |
c033666a CW |
430 | if (intel_vgpu_active(dev_priv) && |
431 | HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { | |
a0bd6c31 ZL |
432 | if (!i915.enable_execlists) { |
433 | DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); | |
434 | return -EINVAL; | |
435 | } | |
436 | } | |
437 | ||
5d1808ec CW |
438 | /* Using the simple ida interface, the max is limited by sizeof(int) */ |
439 | BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); | |
440 | ida_init(&dev_priv->context_hw_ida); | |
441 | ||
bf9e8429 | 442 | ctx = i915_gem_create_context(dev_priv, NULL); |
691e6415 CW |
443 | if (IS_ERR(ctx)) { |
444 | DRM_ERROR("Failed to create default global context (error %ld)\n", | |
445 | PTR_ERR(ctx)); | |
446 | return PTR_ERR(ctx); | |
254f965c BW |
447 | } |
448 | ||
5d12fcef CW |
449 | /* For easy recognisablity, we want the kernel context to be 0 and then |
450 | * all user contexts will have non-zero hw_id. | |
451 | */ | |
452 | GEM_BUG_ON(ctx->hw_id); | |
453 | ||
6095868a | 454 | i915_gem_context_clear_bannable(ctx); |
9f792eba | 455 | ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ |
ed54c1a1 | 456 | dev_priv->kernel_context = ctx; |
67e3d297 | 457 | |
984ff29f CW |
458 | GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); |
459 | ||
ede7d42b | 460 | DRM_DEBUG_DRIVER("%s context support initialized\n", |
63ffbcda JL |
461 | dev_priv->engine[RCS]->context_size ? "logical" : |
462 | "fake"); | |
8245be31 | 463 | return 0; |
254f965c BW |
464 | } |
465 | ||
b2e862d0 CW |
466 | void i915_gem_context_lost(struct drm_i915_private *dev_priv) |
467 | { | |
468 | struct intel_engine_cs *engine; | |
3b3f1650 | 469 | enum intel_engine_id id; |
b2e862d0 | 470 | |
91c8a326 | 471 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
499f2697 | 472 | |
3b3f1650 | 473 | for_each_engine(engine, dev_priv, id) { |
e8a9c58f CW |
474 | engine->legacy_active_context = NULL; |
475 | ||
476 | if (!engine->last_retired_context) | |
477 | continue; | |
478 | ||
479 | engine->context_unpin(engine, engine->last_retired_context); | |
480 | engine->last_retired_context = NULL; | |
b2e862d0 CW |
481 | } |
482 | ||
c7c3c07d CW |
483 | /* Force the GPU state to be restored on enabling */ |
484 | if (!i915.enable_execlists) { | |
a168b2d8 CW |
485 | struct i915_gem_context *ctx; |
486 | ||
487 | list_for_each_entry(ctx, &dev_priv->context_list, link) { | |
488 | if (!i915_gem_context_is_default(ctx)) | |
489 | continue; | |
490 | ||
3b3f1650 | 491 | for_each_engine(engine, dev_priv, id) |
a168b2d8 CW |
492 | ctx->engine[engine->id].initialised = false; |
493 | ||
494 | ctx->remap_slice = ALL_L3_SLICES(dev_priv); | |
495 | } | |
496 | ||
3b3f1650 | 497 | for_each_engine(engine, dev_priv, id) { |
c7c3c07d CW |
498 | struct intel_context *kce = |
499 | &dev_priv->kernel_context->engine[engine->id]; | |
500 | ||
501 | kce->initialised = true; | |
502 | } | |
503 | } | |
b2e862d0 CW |
504 | } |
505 | ||
cb15d9f8 | 506 | void i915_gem_context_fini(struct drm_i915_private *dev_priv) |
254f965c | 507 | { |
e2efd130 | 508 | struct i915_gem_context *dctx = dev_priv->kernel_context; |
b2e862d0 | 509 | |
cb15d9f8 | 510 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
499f2697 | 511 | |
984ff29f CW |
512 | GEM_BUG_ON(!i915_gem_context_is_kernel(dctx)); |
513 | ||
50e046b6 | 514 | context_close(dctx); |
ed54c1a1 | 515 | dev_priv->kernel_context = NULL; |
5d1808ec CW |
516 | |
517 | ida_destroy(&dev_priv->context_hw_ida); | |
254f965c BW |
518 | } |
519 | ||
40521054 BW |
520 | static int context_idr_cleanup(int id, void *p, void *data) |
521 | { | |
e2efd130 | 522 | struct i915_gem_context *ctx = p; |
40521054 | 523 | |
50e046b6 | 524 | context_close(ctx); |
40521054 | 525 | return 0; |
254f965c BW |
526 | } |
527 | ||
e422b888 BW |
528 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) |
529 | { | |
530 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
e2efd130 | 531 | struct i915_gem_context *ctx; |
e422b888 BW |
532 | |
533 | idr_init(&file_priv->context_idr); | |
534 | ||
0eea67eb | 535 | mutex_lock(&dev->struct_mutex); |
bf9e8429 | 536 | ctx = i915_gem_create_context(to_i915(dev), file_priv); |
0eea67eb BW |
537 | mutex_unlock(&dev->struct_mutex); |
538 | ||
984ff29f CW |
539 | GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); |
540 | ||
f83d6518 | 541 | if (IS_ERR(ctx)) { |
0eea67eb | 542 | idr_destroy(&file_priv->context_idr); |
f83d6518 | 543 | return PTR_ERR(ctx); |
0eea67eb BW |
544 | } |
545 | ||
e422b888 BW |
546 | return 0; |
547 | } | |
548 | ||
254f965c BW |
549 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) |
550 | { | |
40521054 | 551 | struct drm_i915_file_private *file_priv = file->driver_priv; |
254f965c | 552 | |
499f2697 CW |
553 | lockdep_assert_held(&dev->struct_mutex); |
554 | ||
73c273eb | 555 | idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); |
40521054 | 556 | idr_destroy(&file_priv->context_idr); |
40521054 BW |
557 | } |
558 | ||
e0556841 | 559 | static inline int |
e555e326 | 560 | mi_set_context(struct drm_i915_gem_request *req, u32 flags) |
e0556841 | 561 | { |
c033666a | 562 | struct drm_i915_private *dev_priv = req->i915; |
4a570db5 | 563 | struct intel_engine_cs *engine = req->engine; |
3b3f1650 | 564 | enum intel_engine_id id; |
2c550183 | 565 | const int num_rings = |
e02d9d76 CW |
566 | /* Use an extended w/a on gen7 if signalling from other rings */ |
567 | (i915.semaphores && INTEL_GEN(dev_priv) == 7) ? | |
c1bb1145 | 568 | INTEL_INFO(dev_priv)->num_rings - 1 : |
2c550183 | 569 | 0; |
a937eaf8 | 570 | int len; |
e555e326 | 571 | u32 *cs; |
e0556841 | 572 | |
e555e326 | 573 | flags |= MI_MM_SPACE_GTT; |
c033666a | 574 | if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) |
e555e326 CW |
575 | /* These flags are for resource streamer on HSW+ */ |
576 | flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; | |
577 | else | |
578 | flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; | |
2c550183 CW |
579 | |
580 | len = 4; | |
c033666a | 581 | if (INTEL_GEN(dev_priv) >= 7) |
e9135c4f | 582 | len += 2 + (num_rings ? 4*num_rings + 6 : 0); |
2c550183 | 583 | |
73dec95e TU |
584 | cs = intel_ring_begin(req, len); |
585 | if (IS_ERR(cs)) | |
586 | return PTR_ERR(cs); | |
e0556841 | 587 | |
b3f797ac | 588 | /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ |
c033666a | 589 | if (INTEL_GEN(dev_priv) >= 7) { |
73dec95e | 590 | *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; |
2c550183 CW |
591 | if (num_rings) { |
592 | struct intel_engine_cs *signaller; | |
593 | ||
73dec95e | 594 | *cs++ = MI_LOAD_REGISTER_IMM(num_rings); |
3b3f1650 | 595 | for_each_engine(signaller, dev_priv, id) { |
e2f80391 | 596 | if (signaller == engine) |
2c550183 CW |
597 | continue; |
598 | ||
73dec95e TU |
599 | *cs++ = i915_mmio_reg_offset( |
600 | RING_PSMI_CTL(signaller->mmio_base)); | |
601 | *cs++ = _MASKED_BIT_ENABLE( | |
602 | GEN6_PSMI_SLEEP_MSG_DISABLE); | |
2c550183 CW |
603 | } |
604 | } | |
605 | } | |
e37ec39b | 606 | |
73dec95e TU |
607 | *cs++ = MI_NOOP; |
608 | *cs++ = MI_SET_CONTEXT; | |
609 | *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags; | |
2b7e8082 VS |
610 | /* |
611 | * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP | |
612 | * WaMiSetContext_Hang:snb,ivb,vlv | |
613 | */ | |
73dec95e | 614 | *cs++ = MI_NOOP; |
e0556841 | 615 | |
c033666a | 616 | if (INTEL_GEN(dev_priv) >= 7) { |
2c550183 CW |
617 | if (num_rings) { |
618 | struct intel_engine_cs *signaller; | |
e9135c4f | 619 | i915_reg_t last_reg = {}; /* keep gcc quiet */ |
2c550183 | 620 | |
73dec95e | 621 | *cs++ = MI_LOAD_REGISTER_IMM(num_rings); |
3b3f1650 | 622 | for_each_engine(signaller, dev_priv, id) { |
e2f80391 | 623 | if (signaller == engine) |
2c550183 CW |
624 | continue; |
625 | ||
e9135c4f | 626 | last_reg = RING_PSMI_CTL(signaller->mmio_base); |
73dec95e TU |
627 | *cs++ = i915_mmio_reg_offset(last_reg); |
628 | *cs++ = _MASKED_BIT_DISABLE( | |
629 | GEN6_PSMI_SLEEP_MSG_DISABLE); | |
2c550183 | 630 | } |
e9135c4f CW |
631 | |
632 | /* Insert a delay before the next switch! */ | |
73dec95e TU |
633 | *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; |
634 | *cs++ = i915_mmio_reg_offset(last_reg); | |
635 | *cs++ = i915_ggtt_offset(engine->scratch); | |
636 | *cs++ = MI_NOOP; | |
2c550183 | 637 | } |
73dec95e | 638 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
2c550183 | 639 | } |
e37ec39b | 640 | |
73dec95e | 641 | intel_ring_advance(req, cs); |
e0556841 | 642 | |
a937eaf8 | 643 | return 0; |
e0556841 BW |
644 | } |
645 | ||
d200cda6 | 646 | static int remap_l3(struct drm_i915_gem_request *req, int slice) |
b0ebde39 | 647 | { |
73dec95e TU |
648 | u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice]; |
649 | int i; | |
b0ebde39 | 650 | |
ff55b5e8 | 651 | if (!remap_info) |
b0ebde39 CW |
652 | return 0; |
653 | ||
73dec95e TU |
654 | cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2); |
655 | if (IS_ERR(cs)) | |
656 | return PTR_ERR(cs); | |
b0ebde39 CW |
657 | |
658 | /* | |
659 | * Note: We do not worry about the concurrent register cacheline hang | |
660 | * here because no other code should access these registers other than | |
661 | * at initialization time. | |
662 | */ | |
73dec95e | 663 | *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); |
ff55b5e8 | 664 | for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { |
73dec95e TU |
665 | *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); |
666 | *cs++ = remap_info[i]; | |
b0ebde39 | 667 | } |
73dec95e TU |
668 | *cs++ = MI_NOOP; |
669 | intel_ring_advance(req, cs); | |
b0ebde39 | 670 | |
ff55b5e8 | 671 | return 0; |
b0ebde39 CW |
672 | } |
673 | ||
f9326be5 CW |
674 | static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, |
675 | struct intel_engine_cs *engine, | |
e2efd130 | 676 | struct i915_gem_context *to) |
317b4e90 | 677 | { |
563222a7 BW |
678 | if (to->remap_slice) |
679 | return false; | |
680 | ||
bca44d80 | 681 | if (!to->engine[RCS].initialised) |
fcb5106d CW |
682 | return false; |
683 | ||
f9326be5 | 684 | if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) |
fcb5106d | 685 | return false; |
317b4e90 | 686 | |
e8a9c58f | 687 | return to == engine->legacy_active_context; |
317b4e90 BW |
688 | } |
689 | ||
690 | static bool | |
430ffaf4 | 691 | needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine) |
317b4e90 | 692 | { |
430ffaf4 CW |
693 | struct i915_gem_context *from = engine->legacy_active_context; |
694 | ||
f9326be5 | 695 | if (!ppgtt) |
317b4e90 BW |
696 | return false; |
697 | ||
f9326be5 | 698 | /* Always load the ppgtt on first use */ |
430ffaf4 | 699 | if (!from) |
f9326be5 CW |
700 | return true; |
701 | ||
702 | /* Same context without new entries, skip */ | |
430ffaf4 | 703 | if ((!from->ppgtt || from->ppgtt == ppgtt) && |
f9326be5 | 704 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) |
e1a8daa2 CW |
705 | return false; |
706 | ||
707 | if (engine->id != RCS) | |
317b4e90 BW |
708 | return true; |
709 | ||
c033666a | 710 | if (INTEL_GEN(engine->i915) < 8) |
317b4e90 BW |
711 | return true; |
712 | ||
713 | return false; | |
714 | } | |
715 | ||
716 | static bool | |
f9326be5 | 717 | needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, |
e2efd130 | 718 | struct i915_gem_context *to, |
f9326be5 | 719 | u32 hw_flags) |
317b4e90 | 720 | { |
f9326be5 | 721 | if (!ppgtt) |
317b4e90 BW |
722 | return false; |
723 | ||
fcb5106d | 724 | if (!IS_GEN8(to->i915)) |
317b4e90 BW |
725 | return false; |
726 | ||
6702cf16 | 727 | if (hw_flags & MI_RESTORE_INHIBIT) |
317b4e90 BW |
728 | return true; |
729 | ||
730 | return false; | |
731 | } | |
732 | ||
e1a8daa2 | 733 | static int do_rcs_switch(struct drm_i915_gem_request *req) |
e0556841 | 734 | { |
e2efd130 | 735 | struct i915_gem_context *to = req->ctx; |
4a570db5 | 736 | struct intel_engine_cs *engine = req->engine; |
f9326be5 | 737 | struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; |
e8a9c58f | 738 | struct i915_gem_context *from = engine->legacy_active_context; |
fcb5106d | 739 | u32 hw_flags; |
3ccfd19d | 740 | int ret, i; |
e0556841 | 741 | |
e8a9c58f CW |
742 | GEM_BUG_ON(engine->id != RCS); |
743 | ||
f9326be5 | 744 | if (skip_rcs_switch(ppgtt, engine, to)) |
9a3b5304 CW |
745 | return 0; |
746 | ||
430ffaf4 | 747 | if (needs_pd_load_pre(ppgtt, engine)) { |
fcb5106d CW |
748 | /* Older GENs and non render rings still want the load first, |
749 | * "PP_DCLV followed by PP_DIR_BASE register through Load | |
750 | * Register Immediate commands in Ring Buffer before submitting | |
751 | * a context."*/ | |
752 | trace_switch_mm(engine, to); | |
f9326be5 | 753 | ret = ppgtt->switch_mm(ppgtt, req); |
fcb5106d | 754 | if (ret) |
e8a9c58f | 755 | return ret; |
fcb5106d CW |
756 | } |
757 | ||
bca44d80 | 758 | if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) |
6702cf16 BW |
759 | /* NB: If we inhibit the restore, the context is not allowed to |
760 | * die because future work may end up depending on valid address | |
761 | * space. This means we must enforce that a page table load | |
762 | * occur when this occurs. */ | |
fcb5106d | 763 | hw_flags = MI_RESTORE_INHIBIT; |
f9326be5 | 764 | else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings) |
fcb5106d CW |
765 | hw_flags = MI_FORCE_RESTORE; |
766 | else | |
767 | hw_flags = 0; | |
e0556841 | 768 | |
fcb5106d CW |
769 | if (to != from || (hw_flags & MI_FORCE_RESTORE)) { |
770 | ret = mi_set_context(req, hw_flags); | |
3ccfd19d | 771 | if (ret) |
e8a9c58f | 772 | return ret; |
3ccfd19d | 773 | |
e8a9c58f | 774 | engine->legacy_active_context = to; |
e0556841 | 775 | } |
e0556841 | 776 | |
fcb5106d CW |
777 | /* GEN8 does *not* require an explicit reload if the PDPs have been |
778 | * setup, and we do not wish to move them. | |
779 | */ | |
f9326be5 | 780 | if (needs_pd_load_post(ppgtt, to, hw_flags)) { |
fcb5106d | 781 | trace_switch_mm(engine, to); |
f9326be5 | 782 | ret = ppgtt->switch_mm(ppgtt, req); |
fcb5106d CW |
783 | /* The hardware context switch is emitted, but we haven't |
784 | * actually changed the state - so it's probably safe to bail | |
785 | * here. Still, let the user know something dangerous has | |
786 | * happened. | |
787 | */ | |
788 | if (ret) | |
789 | return ret; | |
790 | } | |
791 | ||
f9326be5 CW |
792 | if (ppgtt) |
793 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | |
fcb5106d CW |
794 | |
795 | for (i = 0; i < MAX_L3_SLICES; i++) { | |
796 | if (!(to->remap_slice & (1<<i))) | |
797 | continue; | |
798 | ||
d200cda6 | 799 | ret = remap_l3(req, i); |
fcb5106d CW |
800 | if (ret) |
801 | return ret; | |
802 | ||
803 | to->remap_slice &= ~(1<<i); | |
804 | } | |
805 | ||
bca44d80 | 806 | if (!to->engine[RCS].initialised) { |
e2f80391 TU |
807 | if (engine->init_context) { |
808 | ret = engine->init_context(req); | |
86d7f238 | 809 | if (ret) |
fcb5106d | 810 | return ret; |
86d7f238 | 811 | } |
bca44d80 | 812 | to->engine[RCS].initialised = true; |
46470fc9 MK |
813 | } |
814 | ||
e0556841 BW |
815 | return 0; |
816 | } | |
817 | ||
818 | /** | |
819 | * i915_switch_context() - perform a GPU context switch. | |
ba01cc93 | 820 | * @req: request for which we'll execute the context switch |
e0556841 BW |
821 | * |
822 | * The context life cycle is simple. The context refcount is incremented and | |
823 | * decremented by 1 and create and destroy. If the context is in use by the GPU, | |
ecdb5fd8 | 824 | * it will have a refcount > 1. This allows us to destroy the context abstract |
e0556841 | 825 | * object while letting the normal object tracking destroy the backing BO. |
ecdb5fd8 TD |
826 | * |
827 | * This function should not be used in execlists mode. Instead the context is | |
828 | * switched by writing to the ELSP and requests keep a reference to their | |
829 | * context. | |
e0556841 | 830 | */ |
ba01cc93 | 831 | int i915_switch_context(struct drm_i915_gem_request *req) |
e0556841 | 832 | { |
4a570db5 | 833 | struct intel_engine_cs *engine = req->engine; |
e0556841 | 834 | |
91c8a326 | 835 | lockdep_assert_held(&req->i915->drm.struct_mutex); |
5b043f4e CW |
836 | if (i915.enable_execlists) |
837 | return 0; | |
0eea67eb | 838 | |
bca44d80 | 839 | if (!req->ctx->engine[engine->id].state) { |
e2efd130 | 840 | struct i915_gem_context *to = req->ctx; |
f9326be5 CW |
841 | struct i915_hw_ppgtt *ppgtt = |
842 | to->ppgtt ?: req->i915->mm.aliasing_ppgtt; | |
e1a8daa2 | 843 | |
430ffaf4 | 844 | if (needs_pd_load_pre(ppgtt, engine)) { |
e1a8daa2 CW |
845 | int ret; |
846 | ||
847 | trace_switch_mm(engine, to); | |
f9326be5 | 848 | ret = ppgtt->switch_mm(ppgtt, req); |
e1a8daa2 CW |
849 | if (ret) |
850 | return ret; | |
851 | ||
f9326be5 | 852 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); |
e1a8daa2 CW |
853 | } |
854 | ||
430ffaf4 | 855 | engine->legacy_active_context = to; |
c482972a | 856 | return 0; |
a95f6a00 | 857 | } |
c482972a | 858 | |
e1a8daa2 | 859 | return do_rcs_switch(req); |
e0556841 | 860 | } |
84624813 | 861 | |
f131e356 CW |
862 | static bool engine_has_kernel_context(struct intel_engine_cs *engine) |
863 | { | |
864 | struct i915_gem_timeline *timeline; | |
865 | ||
866 | list_for_each_entry(timeline, &engine->i915->gt.timelines, link) { | |
867 | struct intel_timeline *tl; | |
868 | ||
869 | if (timeline == &engine->i915->gt.global_timeline) | |
870 | continue; | |
871 | ||
872 | tl = &timeline->engine[engine->id]; | |
873 | if (i915_gem_active_peek(&tl->last_request, | |
874 | &engine->i915->drm.struct_mutex)) | |
875 | return false; | |
876 | } | |
877 | ||
878 | return (!engine->last_retired_context || | |
879 | i915_gem_context_is_kernel(engine->last_retired_context)); | |
880 | } | |
881 | ||
945657b4 CW |
882 | int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) |
883 | { | |
884 | struct intel_engine_cs *engine; | |
3033acab | 885 | struct i915_gem_timeline *timeline; |
3b3f1650 | 886 | enum intel_engine_id id; |
945657b4 | 887 | |
3033acab CW |
888 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
889 | ||
f131e356 CW |
890 | i915_gem_retire_requests(dev_priv); |
891 | ||
3b3f1650 | 892 | for_each_engine(engine, dev_priv, id) { |
945657b4 CW |
893 | struct drm_i915_gem_request *req; |
894 | int ret; | |
895 | ||
f131e356 CW |
896 | if (engine_has_kernel_context(engine)) |
897 | continue; | |
898 | ||
945657b4 CW |
899 | req = i915_gem_request_alloc(engine, dev_priv->kernel_context); |
900 | if (IS_ERR(req)) | |
901 | return PTR_ERR(req); | |
902 | ||
3033acab CW |
903 | /* Queue this switch after all other activity */ |
904 | list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { | |
905 | struct drm_i915_gem_request *prev; | |
906 | struct intel_timeline *tl; | |
907 | ||
908 | tl = &timeline->engine[engine->id]; | |
909 | prev = i915_gem_active_raw(&tl->last_request, | |
910 | &dev_priv->drm.struct_mutex); | |
911 | if (prev) | |
912 | i915_sw_fence_await_sw_fence_gfp(&req->submit, | |
913 | &prev->submit, | |
914 | GFP_KERNEL); | |
915 | } | |
916 | ||
5b043f4e | 917 | ret = i915_switch_context(req); |
e642c85b | 918 | i915_add_request(req); |
945657b4 CW |
919 | if (ret) |
920 | return ret; | |
921 | } | |
922 | ||
923 | return 0; | |
924 | } | |
925 | ||
b083a087 MK |
926 | static bool client_is_banned(struct drm_i915_file_private *file_priv) |
927 | { | |
928 | return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS; | |
929 | } | |
930 | ||
84624813 BW |
931 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
932 | struct drm_file *file) | |
933 | { | |
63ffbcda | 934 | struct drm_i915_private *dev_priv = to_i915(dev); |
84624813 BW |
935 | struct drm_i915_gem_context_create *args = data; |
936 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
e2efd130 | 937 | struct i915_gem_context *ctx; |
84624813 BW |
938 | int ret; |
939 | ||
63ffbcda | 940 | if (!dev_priv->engine[RCS]->context_size) |
5fa8be65 DV |
941 | return -ENODEV; |
942 | ||
b31e5136 CW |
943 | if (args->pad != 0) |
944 | return -EINVAL; | |
945 | ||
b083a087 MK |
946 | if (client_is_banned(file_priv)) { |
947 | DRM_DEBUG("client %s[%d] banned from creating ctx\n", | |
948 | current->comm, | |
949 | pid_nr(get_task_pid(current, PIDTYPE_PID))); | |
950 | ||
951 | return -EIO; | |
952 | } | |
953 | ||
84624813 BW |
954 | ret = i915_mutex_lock_interruptible(dev); |
955 | if (ret) | |
956 | return ret; | |
957 | ||
63ffbcda | 958 | ctx = i915_gem_create_context(dev_priv, file_priv); |
84624813 | 959 | mutex_unlock(&dev->struct_mutex); |
be636387 DC |
960 | if (IS_ERR(ctx)) |
961 | return PTR_ERR(ctx); | |
84624813 | 962 | |
984ff29f CW |
963 | GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); |
964 | ||
821d66dd | 965 | args->ctx_id = ctx->user_handle; |
b84cf536 | 966 | DRM_DEBUG("HW context %d created\n", args->ctx_id); |
84624813 | 967 | |
be636387 | 968 | return 0; |
84624813 BW |
969 | } |
970 | ||
971 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | |
972 | struct drm_file *file) | |
973 | { | |
974 | struct drm_i915_gem_context_destroy *args = data; | |
975 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
e2efd130 | 976 | struct i915_gem_context *ctx; |
84624813 BW |
977 | int ret; |
978 | ||
b31e5136 CW |
979 | if (args->pad != 0) |
980 | return -EINVAL; | |
981 | ||
821d66dd | 982 | if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) |
c2cf2416 | 983 | return -ENOENT; |
0eea67eb | 984 | |
84624813 BW |
985 | ret = i915_mutex_lock_interruptible(dev); |
986 | if (ret) | |
987 | return ret; | |
988 | ||
ca585b5d | 989 | ctx = i915_gem_context_lookup(file_priv, args->ctx_id); |
72ad5c45 | 990 | if (IS_ERR(ctx)) { |
84624813 | 991 | mutex_unlock(&dev->struct_mutex); |
72ad5c45 | 992 | return PTR_ERR(ctx); |
84624813 BW |
993 | } |
994 | ||
6d1f9fb3 | 995 | __destroy_hw_context(ctx, file_priv); |
84624813 BW |
996 | mutex_unlock(&dev->struct_mutex); |
997 | ||
b84cf536 | 998 | DRM_DEBUG("HW context %d destroyed\n", args->ctx_id); |
84624813 BW |
999 | return 0; |
1000 | } | |
c9dc0f35 CW |
1001 | |
1002 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | |
1003 | struct drm_file *file) | |
1004 | { | |
1005 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
1006 | struct drm_i915_gem_context_param *args = data; | |
e2efd130 | 1007 | struct i915_gem_context *ctx; |
c9dc0f35 CW |
1008 | int ret; |
1009 | ||
1010 | ret = i915_mutex_lock_interruptible(dev); | |
1011 | if (ret) | |
1012 | return ret; | |
1013 | ||
ca585b5d | 1014 | ctx = i915_gem_context_lookup(file_priv, args->ctx_id); |
c9dc0f35 CW |
1015 | if (IS_ERR(ctx)) { |
1016 | mutex_unlock(&dev->struct_mutex); | |
1017 | return PTR_ERR(ctx); | |
1018 | } | |
1019 | ||
1020 | args->size = 0; | |
1021 | switch (args->param) { | |
1022 | case I915_CONTEXT_PARAM_BAN_PERIOD: | |
84102171 | 1023 | ret = -EINVAL; |
c9dc0f35 | 1024 | break; |
b1b38278 DW |
1025 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
1026 | args->value = ctx->flags & CONTEXT_NO_ZEROMAP; | |
1027 | break; | |
fa8848f2 CW |
1028 | case I915_CONTEXT_PARAM_GTT_SIZE: |
1029 | if (ctx->ppgtt) | |
1030 | args->value = ctx->ppgtt->base.total; | |
1031 | else if (to_i915(dev)->mm.aliasing_ppgtt) | |
1032 | args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; | |
1033 | else | |
62106b4f | 1034 | args->value = to_i915(dev)->ggtt.base.total; |
fa8848f2 | 1035 | break; |
bc3d6744 | 1036 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: |
6095868a | 1037 | args->value = i915_gem_context_no_error_capture(ctx); |
bc3d6744 | 1038 | break; |
84102171 | 1039 | case I915_CONTEXT_PARAM_BANNABLE: |
6095868a | 1040 | args->value = i915_gem_context_is_bannable(ctx); |
84102171 | 1041 | break; |
c9dc0f35 CW |
1042 | default: |
1043 | ret = -EINVAL; | |
1044 | break; | |
1045 | } | |
1046 | mutex_unlock(&dev->struct_mutex); | |
1047 | ||
1048 | return ret; | |
1049 | } | |
1050 | ||
1051 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | |
1052 | struct drm_file *file) | |
1053 | { | |
1054 | struct drm_i915_file_private *file_priv = file->driver_priv; | |
1055 | struct drm_i915_gem_context_param *args = data; | |
e2efd130 | 1056 | struct i915_gem_context *ctx; |
c9dc0f35 CW |
1057 | int ret; |
1058 | ||
1059 | ret = i915_mutex_lock_interruptible(dev); | |
1060 | if (ret) | |
1061 | return ret; | |
1062 | ||
ca585b5d | 1063 | ctx = i915_gem_context_lookup(file_priv, args->ctx_id); |
c9dc0f35 CW |
1064 | if (IS_ERR(ctx)) { |
1065 | mutex_unlock(&dev->struct_mutex); | |
1066 | return PTR_ERR(ctx); | |
1067 | } | |
1068 | ||
1069 | switch (args->param) { | |
1070 | case I915_CONTEXT_PARAM_BAN_PERIOD: | |
84102171 | 1071 | ret = -EINVAL; |
c9dc0f35 | 1072 | break; |
b1b38278 DW |
1073 | case I915_CONTEXT_PARAM_NO_ZEROMAP: |
1074 | if (args->size) { | |
1075 | ret = -EINVAL; | |
1076 | } else { | |
1077 | ctx->flags &= ~CONTEXT_NO_ZEROMAP; | |
1078 | ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; | |
bc3d6744 CW |
1079 | } |
1080 | break; | |
1081 | case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: | |
6095868a | 1082 | if (args->size) |
bc3d6744 | 1083 | ret = -EINVAL; |
6095868a CW |
1084 | else if (args->value) |
1085 | i915_gem_context_set_no_error_capture(ctx); | |
1086 | else | |
1087 | i915_gem_context_clear_no_error_capture(ctx); | |
b1b38278 | 1088 | break; |
84102171 MK |
1089 | case I915_CONTEXT_PARAM_BANNABLE: |
1090 | if (args->size) | |
1091 | ret = -EINVAL; | |
1092 | else if (!capable(CAP_SYS_ADMIN) && !args->value) | |
1093 | ret = -EPERM; | |
6095868a CW |
1094 | else if (args->value) |
1095 | i915_gem_context_set_bannable(ctx); | |
84102171 | 1096 | else |
6095868a | 1097 | i915_gem_context_clear_bannable(ctx); |
84102171 | 1098 | break; |
c9dc0f35 CW |
1099 | default: |
1100 | ret = -EINVAL; | |
1101 | break; | |
1102 | } | |
1103 | mutex_unlock(&dev->struct_mutex); | |
1104 | ||
1105 | return ret; | |
1106 | } | |
d538704b CW |
1107 | |
1108 | int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, | |
1109 | void *data, struct drm_file *file) | |
1110 | { | |
fac5e23e | 1111 | struct drm_i915_private *dev_priv = to_i915(dev); |
d538704b | 1112 | struct drm_i915_reset_stats *args = data; |
e2efd130 | 1113 | struct i915_gem_context *ctx; |
d538704b CW |
1114 | int ret; |
1115 | ||
1116 | if (args->flags || args->pad) | |
1117 | return -EINVAL; | |
1118 | ||
bdb04614 | 1119 | ret = i915_mutex_lock_interruptible(dev); |
d538704b CW |
1120 | if (ret) |
1121 | return ret; | |
1122 | ||
ca585b5d | 1123 | ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); |
d538704b CW |
1124 | if (IS_ERR(ctx)) { |
1125 | mutex_unlock(&dev->struct_mutex); | |
1126 | return PTR_ERR(ctx); | |
1127 | } | |
d538704b CW |
1128 | |
1129 | if (capable(CAP_SYS_ADMIN)) | |
1130 | args->reset_count = i915_reset_count(&dev_priv->gpu_error); | |
1131 | else | |
1132 | args->reset_count = 0; | |
1133 | ||
bc1d53c6 MK |
1134 | args->batch_active = ctx->guilty_count; |
1135 | args->batch_pending = ctx->active_count; | |
d538704b CW |
1136 | |
1137 | mutex_unlock(&dev->struct_mutex); | |
1138 | ||
1139 | return 0; | |
1140 | } | |
0daf0113 CW |
1141 | |
1142 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
1143 | #include "selftests/mock_context.c" | |
791ff39a | 1144 | #include "selftests/i915_gem_context.c" |
0daf0113 | 1145 | #endif |