]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
57c14d3340cde50d456b3d123ca7392556a797ff
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_execbuffer.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include "i915_selftest.h"
7
8 #include "gt/intel_engine_pm.h"
9 #include "selftests/igt_flush_test.h"
10
11 static u64 read_reloc(const u32 *map, int x, const u64 mask)
12 {
13 u64 reloc;
14
15 memcpy(&reloc, &map[x], sizeof(reloc));
16 return reloc & mask;
17 }
18
19 static int __igt_gpu_reloc(struct i915_execbuffer *eb,
20 struct drm_i915_gem_object *obj)
21 {
22 const unsigned int offsets[] = { 8, 3, 0 };
23 const u64 mask =
24 GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
25 const u32 *map = page_mask_bits(obj->mm.mapping);
26 struct i915_request *rq;
27 struct i915_vma *vma;
28 int err;
29 int i;
30
31 vma = i915_vma_instance(obj, eb->context->vm, NULL);
32 if (IS_ERR(vma))
33 return PTR_ERR(vma);
34
35 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
36 if (err)
37 return err;
38
39 /* 8-Byte aligned */
40 err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
41 if (err)
42 goto unpin_vma;
43
44 /* !8-Byte aligned */
45 err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
46 if (err)
47 goto unpin_vma;
48
49 /* Skip to the end of the cmd page */
50 i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
51 i -= eb->reloc_cache.rq_size;
52 memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
53 MI_NOOP, i);
54 eb->reloc_cache.rq_size += i;
55
56 /* Force batch chaining */
57 err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
58 if (err)
59 goto unpin_vma;
60
61 GEM_BUG_ON(!eb->reloc_cache.rq);
62 rq = i915_request_get(eb->reloc_cache.rq);
63 err = reloc_gpu_flush(&eb->reloc_cache);
64 if (err)
65 goto put_rq;
66 GEM_BUG_ON(eb->reloc_cache.rq);
67
68 err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
69 if (err) {
70 intel_gt_set_wedged(eb->engine->gt);
71 goto put_rq;
72 }
73
74 if (!i915_request_completed(rq)) {
75 pr_err("%s: did not wait for relocations!\n", eb->engine->name);
76 err = -EINVAL;
77 goto put_rq;
78 }
79
80 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
81 u64 reloc = read_reloc(map, offsets[i], mask);
82
83 if (reloc != i) {
84 pr_err("%s[%d]: map[%d] %llx != %x\n",
85 eb->engine->name, i, offsets[i], reloc, i);
86 err = -EINVAL;
87 }
88 }
89 if (err)
90 igt_hexdump(map, 4096);
91
92 put_rq:
93 i915_request_put(rq);
94 unpin_vma:
95 i915_vma_unpin(vma);
96 return err;
97 }
98
99 static int igt_gpu_reloc(void *arg)
100 {
101 struct i915_execbuffer eb;
102 struct drm_i915_gem_object *scratch;
103 int err = 0;
104 u32 *map;
105
106 eb.i915 = arg;
107
108 scratch = i915_gem_object_create_internal(eb.i915, 4096);
109 if (IS_ERR(scratch))
110 return PTR_ERR(scratch);
111
112 map = i915_gem_object_pin_map(scratch, I915_MAP_WC);
113 if (IS_ERR(map)) {
114 err = PTR_ERR(map);
115 goto err_scratch;
116 }
117
118 for_each_uabi_engine(eb.engine, eb.i915) {
119 reloc_cache_init(&eb.reloc_cache, eb.i915);
120 memset(map, POISON_INUSE, 4096);
121
122 intel_engine_pm_get(eb.engine);
123 eb.context = intel_context_create(eb.engine);
124 if (IS_ERR(eb.context)) {
125 err = PTR_ERR(eb.context);
126 goto err_pm;
127 }
128
129 err = intel_context_pin(eb.context);
130 if (err)
131 goto err_put;
132
133 err = __igt_gpu_reloc(&eb, scratch);
134
135 intel_context_unpin(eb.context);
136 err_put:
137 intel_context_put(eb.context);
138 err_pm:
139 intel_engine_pm_put(eb.engine);
140 if (err)
141 break;
142 }
143
144 if (igt_flush_test(eb.i915))
145 err = -EIO;
146
147 err_scratch:
148 i915_gem_object_put(scratch);
149 return err;
150 }
151
152 int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
153 {
154 static const struct i915_subtest tests[] = {
155 SUBTEST(igt_gpu_reloc),
156 };
157
158 if (intel_gt_is_wedged(&i915->gt))
159 return 0;
160
161 return i915_live_subtests(tests, i915);
162 }