]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/selftests/i915_gem_context.c
drm/i915: Move dev_priv->mm.[un]bound_list to its own lock
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / selftests / i915_gem_context.c
CommitLineData
791ff39a
CW
1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "../i915_selftest.h"
26
27#include "mock_drm.h"
28#include "huge_gem_object.h"
29
30#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
31
32static struct i915_vma *
33gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
34{
35 struct drm_i915_gem_object *obj;
36 const int gen = INTEL_GEN(vma->vm->i915);
37 unsigned long n, size;
38 u32 *cmd;
39 int err;
40
791ff39a
CW
41 size = (4 * count + 1) * sizeof(u32);
42 size = round_up(size, PAGE_SIZE);
43 obj = i915_gem_object_create_internal(vma->vm->i915, size);
44 if (IS_ERR(obj))
45 return ERR_CAST(obj);
46
47 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
48 if (IS_ERR(cmd)) {
49 err = PTR_ERR(cmd);
50 goto err;
51 }
52
53 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
54 offset += vma->node.start;
55
56 for (n = 0; n < count; n++) {
57 if (gen >= 8) {
58 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
59 *cmd++ = lower_32_bits(offset);
60 *cmd++ = upper_32_bits(offset);
61 *cmd++ = value;
62 } else if (gen >= 4) {
63 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
64 (gen < 6 ? 1 << 22 : 0);
65 *cmd++ = 0;
66 *cmd++ = offset;
67 *cmd++ = value;
68 } else {
69 *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
70 *cmd++ = offset;
71 *cmd++ = value;
72 }
73 offset += PAGE_SIZE;
74 }
75 *cmd = MI_BATCH_BUFFER_END;
76 i915_gem_object_unpin_map(obj);
77
78 err = i915_gem_object_set_to_gtt_domain(obj, false);
79 if (err)
80 goto err;
81
82 vma = i915_vma_instance(obj, vma->vm, NULL);
83 if (IS_ERR(vma)) {
84 err = PTR_ERR(vma);
85 goto err;
86 }
87
88 err = i915_vma_pin(vma, 0, 0, PIN_USER);
89 if (err)
90 goto err;
91
92 return vma;
93
94err:
95 i915_gem_object_put(obj);
96 return ERR_PTR(err);
97}
98
99static unsigned long real_page_count(struct drm_i915_gem_object *obj)
100{
101 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
102}
103
104static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
105{
106 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
107}
108
109static int gpu_fill(struct drm_i915_gem_object *obj,
110 struct i915_gem_context *ctx,
111 struct intel_engine_cs *engine,
112 unsigned int dw)
113{
114 struct drm_i915_private *i915 = to_i915(obj->base.dev);
115 struct i915_address_space *vm =
116 ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
117 struct drm_i915_gem_request *rq;
118 struct i915_vma *vma;
119 struct i915_vma *batch;
120 unsigned int flags;
121 int err;
122
123 GEM_BUG_ON(obj->base.size > vm->total);
f2f5c061 124 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
791ff39a
CW
125
126 vma = i915_vma_instance(obj, vm, NULL);
127 if (IS_ERR(vma))
128 return PTR_ERR(vma);
129
130 err = i915_gem_object_set_to_gtt_domain(obj, false);
131 if (err)
132 return err;
133
134 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
135 if (err)
136 return err;
137
138 /* Within the GTT the huge objects maps every page onto
139 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
140 * We set the nth dword within the page using the nth
141 * mapping via the GTT - this should exercise the GTT mapping
142 * whilst checking that each context provides a unique view
143 * into the object.
144 */
145 batch = gpu_fill_dw(vma,
146 (dw * real_page_count(obj)) << PAGE_SHIFT |
147 (dw * sizeof(u32)),
148 real_page_count(obj),
149 dw);
150 if (IS_ERR(batch)) {
151 err = PTR_ERR(batch);
152 goto err_vma;
153 }
154
155 rq = i915_gem_request_alloc(engine, ctx);
156 if (IS_ERR(rq)) {
157 err = PTR_ERR(rq);
158 goto err_batch;
159 }
160
161 err = engine->emit_flush(rq, EMIT_INVALIDATE);
162 if (err)
163 goto err_request;
164
165 err = i915_switch_context(rq);
166 if (err)
167 goto err_request;
168
169 flags = 0;
170 if (INTEL_GEN(vm->i915) <= 5)
171 flags |= I915_DISPATCH_SECURE;
172
173 err = engine->emit_bb_start(rq,
174 batch->node.start, batch->node.size,
175 flags);
176 if (err)
177 goto err_request;
178
179 i915_vma_move_to_active(batch, rq, 0);
180 i915_gem_object_set_active_reference(batch->obj);
181 i915_vma_unpin(batch);
182 i915_vma_close(batch);
183
184 i915_vma_move_to_active(vma, rq, 0);
185 i915_vma_unpin(vma);
186
187 reservation_object_lock(obj->resv, NULL);
188 reservation_object_add_excl_fence(obj->resv, &rq->fence);
189 reservation_object_unlock(obj->resv);
190
191 __i915_add_request(rq, true);
192
193 return 0;
194
195err_request:
196 __i915_add_request(rq, false);
197err_batch:
198 i915_vma_unpin(batch);
199err_vma:
200 i915_vma_unpin(vma);
201 return err;
202}
203
204static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
205{
206 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
207 unsigned int n, m, need_flush;
208 int err;
209
210 err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
211 if (err)
212 return err;
213
214 for (n = 0; n < real_page_count(obj); n++) {
215 u32 *map;
216
217 map = kmap_atomic(i915_gem_object_get_page(obj, n));
218 for (m = 0; m < DW_PER_PAGE; m++)
219 map[m] = value;
220 if (!has_llc)
221 drm_clflush_virt_range(map, PAGE_SIZE);
222 kunmap_atomic(map);
223 }
224
225 i915_gem_obj_finish_shmem_access(obj);
226 obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
227 obj->base.write_domain = 0;
228 return 0;
229}
230
231static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
232{
233 unsigned int n, m, needs_flush;
234 int err;
235
236 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
237 if (err)
238 return err;
239
240 for (n = 0; n < real_page_count(obj); n++) {
241 u32 *map;
242
243 map = kmap_atomic(i915_gem_object_get_page(obj, n));
244 if (needs_flush & CLFLUSH_BEFORE)
245 drm_clflush_virt_range(map, PAGE_SIZE);
246
247 for (m = 0; m < max; m++) {
248 if (map[m] != m) {
249 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
250 n, m, map[m], m);
251 err = -EINVAL;
252 goto out_unmap;
253 }
254 }
255
256 for (; m < DW_PER_PAGE; m++) {
257 if (map[m] != 0xdeadbeef) {
258 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
259 n, m, map[m], 0xdeadbeef);
260 err = -EINVAL;
261 goto out_unmap;
262 }
263 }
264
265out_unmap:
266 kunmap_atomic(map);
267 if (err)
268 break;
269 }
270
271 i915_gem_obj_finish_shmem_access(obj);
272 return err;
273}
274
275static struct drm_i915_gem_object *
276create_test_object(struct i915_gem_context *ctx,
277 struct drm_file *file,
278 struct list_head *objects)
279{
280 struct drm_i915_gem_object *obj;
281 struct i915_address_space *vm =
282 ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
283 u64 size;
284 u32 handle;
285 int err;
286
287 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
288 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
289
290 obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
291 if (IS_ERR(obj))
292 return obj;
293
294 /* tie the handle to the drm_file for easy reaping */
295 err = drm_gem_handle_create(file, &obj->base, &handle);
296 i915_gem_object_put(obj);
297 if (err)
298 return ERR_PTR(err);
299
300 err = cpu_fill(obj, 0xdeadbeef);
301 if (err) {
302 pr_err("Failed to fill object with cpu, err=%d\n",
303 err);
304 return ERR_PTR(err);
305 }
306
307 list_add_tail(&obj->st_link, objects);
308 return obj;
309}
310
311static unsigned long max_dwords(struct drm_i915_gem_object *obj)
312{
313 unsigned long npages = fake_page_count(obj);
314
315 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
316 return npages / DW_PER_PAGE;
317}
318
319static int igt_ctx_exec(void *arg)
320{
321 struct drm_i915_private *i915 = arg;
ca83d584 322 struct drm_i915_gem_object *obj = NULL;
ef47a0e0 323 struct drm_file *file;
791ff39a
CW
324 IGT_TIMEOUT(end_time);
325 LIST_HEAD(objects);
326 unsigned long ncontexts, ndwords, dw;
92fdf8d4 327 bool first_shared_gtt = true;
791ff39a
CW
328 int err;
329
330 /* Create a few different contexts (with different mm) and write
331 * through each ctx/mm using the GPU making sure those writes end
332 * up in the expected pages of our obj.
333 */
334
ef47a0e0
CW
335 file = mock_file(i915);
336 if (IS_ERR(file))
337 return PTR_ERR(file);
338
791ff39a
CW
339 mutex_lock(&i915->drm.struct_mutex);
340
341 ncontexts = 0;
342 ndwords = 0;
343 dw = 0;
344 while (!time_after(jiffies, end_time)) {
345 struct intel_engine_cs *engine;
346 struct i915_gem_context *ctx;
347 unsigned int id;
348
92fdf8d4
CW
349 if (first_shared_gtt) {
350 ctx = __create_hw_context(i915, file->driver_priv);
351 first_shared_gtt = false;
352 } else {
353 ctx = i915_gem_create_context(i915, file->driver_priv);
354 }
791ff39a
CW
355 if (IS_ERR(ctx)) {
356 err = PTR_ERR(ctx);
357 goto out_unlock;
358 }
359
360 for_each_engine(engine, i915, id) {
f2f5c061
CW
361 if (!intel_engine_can_store_dword(engine))
362 continue;
363
ca83d584 364 if (!obj) {
791ff39a
CW
365 obj = create_test_object(ctx, file, &objects);
366 if (IS_ERR(obj)) {
367 err = PTR_ERR(obj);
368 goto out_unlock;
369 }
370 }
371
372 err = gpu_fill(obj, ctx, engine, dw);
373 if (err) {
374 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
375 ndwords, dw, max_dwords(obj),
376 engine->name, ctx->hw_id,
377 yesno(!!ctx->ppgtt), err);
378 goto out_unlock;
379 }
380
ca83d584
CW
381 if (++dw == max_dwords(obj)) {
382 obj = NULL;
791ff39a 383 dw = 0;
ca83d584 384 }
791ff39a
CW
385 ndwords++;
386 }
387 ncontexts++;
388 }
389 pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
390 ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
391
392 dw = 0;
393 list_for_each_entry(obj, &objects, st_link) {
394 unsigned int rem =
395 min_t(unsigned int, ndwords - dw, max_dwords(obj));
396
397 err = cpu_check(obj, rem);
398 if (err)
399 break;
400
401 dw += rem;
402 }
403
404out_unlock:
405 mutex_unlock(&i915->drm.struct_mutex);
406
407 mock_file_free(i915, file);
408 return err;
409}
410
92fdf8d4
CW
411static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
412{
413 struct drm_i915_gem_object *obj;
414 int err;
415
416 err = i915_gem_init_aliasing_ppgtt(i915);
417 if (err)
418 return err;
419
f2123818 420 list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
92fdf8d4
CW
421 struct i915_vma *vma;
422
423 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
424 if (IS_ERR(vma))
425 continue;
426
427 vma->flags &= ~I915_VMA_LOCAL_BIND;
428 }
429
430 return 0;
431}
432
433static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
434{
435 i915_gem_fini_aliasing_ppgtt(i915);
436}
437
438int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
791ff39a
CW
439{
440 static const struct i915_subtest tests[] = {
441 SUBTEST(igt_ctx_exec),
442 };
92fdf8d4
CW
443 bool fake_alias = false;
444 int err;
445
446 /* Install a fake aliasing gtt for exercise */
447 if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
448 mutex_lock(&dev_priv->drm.struct_mutex);
449 err = fake_aliasing_ppgtt_enable(dev_priv);
450 mutex_unlock(&dev_priv->drm.struct_mutex);
451 if (err)
452 return err;
453
454 GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
455 fake_alias = true;
456 }
457
458 err = i915_subtests(tests, dev_priv);
791ff39a 459
92fdf8d4
CW
460 if (fake_alias) {
461 mutex_lock(&dev_priv->drm.struct_mutex);
462 fake_aliasing_ppgtt_disable(dev_priv);
463 mutex_unlock(&dev_priv->drm.struct_mutex);
464 }
465
466 return err;
791ff39a 467}