]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/i915/selftests/intel_memory_region.c
drm/i915/region: fix max size calculation
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / selftests / intel_memory_region.c
CommitLineData
232a6eba
MA
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/prime_numbers.h>
ba12993c 7#include <linux/sort.h>
232a6eba
MA
8
9#include "../i915_selftest.h"
10
11#include "mock_drm.h"
12#include "mock_gem_device.h"
13#include "mock_region.h"
14
340be48f 15#include "gem/i915_gem_context.h"
b908be54 16#include "gem/i915_gem_lmem.h"
232a6eba 17#include "gem/i915_gem_region.h"
01377a0d 18#include "gem/i915_gem_object_blt.h"
340be48f 19#include "gem/selftests/igt_gem_utils.h"
232a6eba 20#include "gem/selftests/mock_context.h"
6804da20 21#include "gt/intel_engine_user.h"
b908be54 22#include "gt/intel_gt.h"
ba12993c 23#include "i915_memcpy.h"
01377a0d 24#include "selftests/igt_flush_test.h"
2f0b97ca 25#include "selftests/i915_random.h"
232a6eba
MA
26
27static void close_objects(struct intel_memory_region *mem,
28 struct list_head *objects)
29{
30 struct drm_i915_private *i915 = mem->i915;
31 struct drm_i915_gem_object *obj, *on;
32
33 list_for_each_entry_safe(obj, on, objects, st_link) {
34 if (i915_gem_object_has_pinned_pages(obj))
35 i915_gem_object_unpin_pages(obj);
36 /* No polluting the memory region between tests */
f86dbacb 37 __i915_gem_object_put_pages(obj);
232a6eba
MA
38 list_del(&obj->st_link);
39 i915_gem_object_put(obj);
40 }
41
42 cond_resched();
43
44 i915_gem_drain_freed_objects(i915);
45}
46
47static int igt_mock_fill(void *arg)
48{
49 struct intel_memory_region *mem = arg;
50 resource_size_t total = resource_size(&mem->region);
51 resource_size_t page_size;
52 resource_size_t rem;
53 unsigned long max_pages;
54 unsigned long page_num;
55 LIST_HEAD(objects);
56 int err = 0;
57
58 page_size = mem->mm.chunk_size;
59 max_pages = div64_u64(total, page_size);
60 rem = total;
61
62 for_each_prime_number_from(page_num, 1, max_pages) {
63 resource_size_t size = page_num * page_size;
64 struct drm_i915_gem_object *obj;
65
66 obj = i915_gem_object_create_region(mem, size, 0);
67 if (IS_ERR(obj)) {
68 err = PTR_ERR(obj);
69 break;
70 }
71
72 err = i915_gem_object_pin_pages(obj);
73 if (err) {
74 i915_gem_object_put(obj);
75 break;
76 }
77
78 list_add(&obj->st_link, &objects);
79 rem -= size;
80 }
81
82 if (err == -ENOMEM)
83 err = 0;
84 if (err == -ENXIO) {
85 if (page_num * page_size <= rem) {
86 pr_err("%s failed, space still left in region\n",
87 __func__);
88 err = -EINVAL;
89 } else {
90 err = 0;
91 }
92 }
93
94 close_objects(mem, &objects);
95
96 return err;
97}
98
2f0b97ca
MA
99static struct drm_i915_gem_object *
100igt_object_create(struct intel_memory_region *mem,
101 struct list_head *objects,
102 u64 size,
103 unsigned int flags)
104{
105 struct drm_i915_gem_object *obj;
106 int err;
107
108 obj = i915_gem_object_create_region(mem, size, flags);
109 if (IS_ERR(obj))
110 return obj;
111
112 err = i915_gem_object_pin_pages(obj);
113 if (err)
114 goto put;
115
116 list_add(&obj->st_link, objects);
117 return obj;
118
119put:
120 i915_gem_object_put(obj);
121 return ERR_PTR(err);
122}
123
124static void igt_object_release(struct drm_i915_gem_object *obj)
125{
126 i915_gem_object_unpin_pages(obj);
f86dbacb 127 __i915_gem_object_put_pages(obj);
2f0b97ca
MA
128 list_del(&obj->st_link);
129 i915_gem_object_put(obj);
130}
131
132static int igt_mock_contiguous(void *arg)
133{
134 struct intel_memory_region *mem = arg;
135 struct drm_i915_gem_object *obj;
136 unsigned long n_objects;
137 LIST_HEAD(objects);
138 LIST_HEAD(holes);
139 I915_RND_STATE(prng);
2f0b97ca
MA
140 resource_size_t total;
141 resource_size_t min;
280bc0ce 142 u64 target;
2f0b97ca
MA
143 int err = 0;
144
145 total = resource_size(&mem->region);
146
147 /* Min size */
148 obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
149 I915_BO_ALLOC_CONTIGUOUS);
150 if (IS_ERR(obj))
151 return PTR_ERR(obj);
152
153 if (obj->mm.pages->nents != 1) {
154 pr_err("%s min object spans multiple sg entries\n", __func__);
155 err = -EINVAL;
156 goto err_close_objects;
157 }
158
159 igt_object_release(obj);
160
161 /* Max size */
162 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
163 if (IS_ERR(obj))
164 return PTR_ERR(obj);
165
166 if (obj->mm.pages->nents != 1) {
167 pr_err("%s max object spans multiple sg entries\n", __func__);
168 err = -EINVAL;
169 goto err_close_objects;
170 }
171
172 igt_object_release(obj);
173
174 /* Internal fragmentation should not bleed into the object size */
280bc0ce
CW
175 target = i915_prandom_u64_state(&prng);
176 div64_u64_rem(target, total, &target);
177 target = round_up(target, PAGE_SIZE);
2f0b97ca
MA
178 target = max_t(u64, PAGE_SIZE, target);
179
180 obj = igt_object_create(mem, &objects, target,
181 I915_BO_ALLOC_CONTIGUOUS);
182 if (IS_ERR(obj))
183 return PTR_ERR(obj);
184
185 if (obj->base.size != target) {
280bc0ce
CW
186 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
187 obj->base.size, target);
2f0b97ca
MA
188 err = -EINVAL;
189 goto err_close_objects;
190 }
191
192 if (obj->mm.pages->nents != 1) {
193 pr_err("%s object spans multiple sg entries\n", __func__);
194 err = -EINVAL;
195 goto err_close_objects;
196 }
197
198 igt_object_release(obj);
199
200 /*
201 * Try to fragment the address space, such that half of it is free, but
202 * the max contiguous block size is SZ_64K.
203 */
204
205 target = SZ_64K;
206 n_objects = div64_u64(total, target);
207
208 while (n_objects--) {
209 struct list_head *list;
210
211 if (n_objects % 2)
212 list = &holes;
213 else
214 list = &objects;
215
216 obj = igt_object_create(mem, list, target,
217 I915_BO_ALLOC_CONTIGUOUS);
218 if (IS_ERR(obj)) {
219 err = PTR_ERR(obj);
220 goto err_close_objects;
221 }
222 }
223
224 close_objects(mem, &holes);
225
226 min = target;
227 target = total >> 1;
228
229 /* Make sure we can still allocate all the fragmented space */
230 obj = igt_object_create(mem, &objects, target, 0);
231 if (IS_ERR(obj)) {
232 err = PTR_ERR(obj);
233 goto err_close_objects;
234 }
235
236 igt_object_release(obj);
237
238 /*
239 * Even though we have enough free space, we don't have a big enough
240 * contiguous block. Make sure that holds true.
241 */
242
243 do {
244 bool should_fail = target > min;
245
246 obj = igt_object_create(mem, &objects, target,
247 I915_BO_ALLOC_CONTIGUOUS);
248 if (should_fail != IS_ERR(obj)) {
249 pr_err("%s target allocation(%llx) mismatch\n",
280bc0ce 250 __func__, target);
2f0b97ca
MA
251 err = -EINVAL;
252 goto err_close_objects;
253 }
254
255 target >>= 1;
256 } while (target >= mem->mm.chunk_size);
257
258err_close_objects:
259 list_splice_tail(&holes, &objects);
260 close_objects(mem, &objects);
261 return err;
262}
263
09a729b1
MA
264static int igt_mock_splintered_region(void *arg)
265{
266 struct intel_memory_region *mem = arg;
267 struct drm_i915_private *i915 = mem->i915;
268 struct drm_i915_gem_object *obj;
269 unsigned int expected_order;
270 LIST_HEAD(objects);
271 u64 size;
272 int err = 0;
273
274 /*
275 * Sanity check we can still allocate everything even if the
276 * mm.max_order != mm.size. i.e our starting address space size is not a
277 * power-of-two.
278 */
279
280 size = (SZ_4G - 1) & PAGE_MASK;
281 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
282 if (IS_ERR(mem))
283 return PTR_ERR(mem);
284
285 if (mem->mm.size != size) {
286 pr_err("%s size mismatch(%llu != %llu)\n",
287 __func__, mem->mm.size, size);
288 err = -EINVAL;
289 goto out_put;
290 }
291
292 expected_order = get_order(rounddown_pow_of_two(size));
293 if (mem->mm.max_order != expected_order) {
294 pr_err("%s order mismatch(%u != %u)\n",
295 __func__, mem->mm.max_order, expected_order);
296 err = -EINVAL;
297 goto out_put;
298 }
299
300 obj = igt_object_create(mem, &objects, size, 0);
301 if (IS_ERR(obj)) {
302 err = PTR_ERR(obj);
303 goto out_close;
304 }
305
306 close_objects(mem, &objects);
307
308 /*
309 * While we should be able allocate everything without any flag
310 * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
311 * actually limited to the largest power-of-two for the region size i.e
312 * max_order, due to the inner workings of the buddy allocator. So make
313 * sure that does indeed hold true.
314 */
315
316 obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
317 if (!IS_ERR(obj)) {
318 pr_err("%s too large contiguous allocation was not rejected\n",
319 __func__);
320 err = -EINVAL;
321 goto out_close;
322 }
323
324 obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
325 I915_BO_ALLOC_CONTIGUOUS);
326 if (IS_ERR(obj)) {
327 pr_err("%s largest possible contiguous allocation failed\n",
328 __func__);
329 err = PTR_ERR(obj);
330 goto out_close;
331 }
332
333out_close:
334 close_objects(mem, &objects);
335out_put:
336 intel_memory_region_put(mem);
337 return err;
338}
339
340be48f
MA
340static int igt_gpu_write_dw(struct intel_context *ce,
341 struct i915_vma *vma,
342 u32 dword,
343 u32 value)
344{
345 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
346 vma->size >> PAGE_SHIFT, value);
347}
348
349static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
350{
6056e500
CW
351 unsigned long n = obj->base.size >> PAGE_SHIFT;
352 u32 *ptr;
340be48f
MA
353 int err;
354
6056e500 355 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
340be48f
MA
356 if (err)
357 return err;
358
6056e500
CW
359 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
360 if (IS_ERR(ptr))
361 return PTR_ERR(ptr);
340be48f 362
6056e500
CW
363 ptr += dword;
364 while (n--) {
365 if (*ptr != val) {
366 pr_err("base[%u]=%08x, val=%08x\n",
367 dword, *ptr, val);
340be48f
MA
368 err = -EINVAL;
369 break;
370 }
6056e500
CW
371
372 ptr += PAGE_SIZE / sizeof(*ptr);
340be48f
MA
373 }
374
6056e500 375 i915_gem_object_unpin_map(obj);
340be48f
MA
376 return err;
377}
378
379static int igt_gpu_write(struct i915_gem_context *ctx,
380 struct drm_i915_gem_object *obj)
381{
382 struct i915_gem_engines *engines;
383 struct i915_gem_engines_iter it;
384 struct i915_address_space *vm;
385 struct intel_context *ce;
386 I915_RND_STATE(prng);
387 IGT_TIMEOUT(end_time);
388 unsigned int count;
389 struct i915_vma *vma;
390 int *order;
391 int i, n;
392 int err = 0;
393
394 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
395
396 n = 0;
397 count = 0;
398 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
399 count++;
400 if (!intel_engine_can_store_dword(ce->engine))
401 continue;
402
403 vm = ce->vm;
404 n++;
405 }
406 i915_gem_context_unlock_engines(ctx);
407 if (!n)
408 return 0;
409
410 order = i915_random_order(count * count, &prng);
411 if (!order)
412 return -ENOMEM;
413
414 vma = i915_vma_instance(obj, vm, NULL);
415 if (IS_ERR(vma)) {
416 err = PTR_ERR(vma);
417 goto out_free;
418 }
419
420 err = i915_vma_pin(vma, 0, 0, PIN_USER);
421 if (err)
422 goto out_free;
423
424 i = 0;
425 engines = i915_gem_context_lock_engines(ctx);
426 do {
427 u32 rng = prandom_u32_state(&prng);
428 u32 dword = offset_in_page(rng) / 4;
429
430 ce = engines->engines[order[i] % engines->num_engines];
431 i = (i + 1) % (count * count);
432 if (!ce || !intel_engine_can_store_dword(ce->engine))
433 continue;
434
435 err = igt_gpu_write_dw(ce, vma, dword, rng);
436 if (err)
437 break;
438
439 err = igt_cpu_check(obj, dword, rng);
440 if (err)
441 break;
442 } while (!__igt_timeout(end_time, NULL));
443 i915_gem_context_unlock_engines(ctx);
444
445out_free:
446 kfree(order);
447
448 if (err == -ENOMEM)
449 err = 0;
450
451 return err;
452}
453
b908be54
MA
454static int igt_lmem_create(void *arg)
455{
456 struct drm_i915_private *i915 = arg;
457 struct drm_i915_gem_object *obj;
458 int err = 0;
459
460 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
461 if (IS_ERR(obj))
462 return PTR_ERR(obj);
463
464 err = i915_gem_object_pin_pages(obj);
465 if (err)
466 goto out_put;
467
468 i915_gem_object_unpin_pages(obj);
469out_put:
470 i915_gem_object_put(obj);
471
472 return err;
473}
474
340be48f
MA
475static int igt_lmem_write_gpu(void *arg)
476{
477 struct drm_i915_private *i915 = arg;
478 struct drm_i915_gem_object *obj;
479 struct i915_gem_context *ctx;
a8c9a7f5 480 struct file *file;
340be48f
MA
481 I915_RND_STATE(prng);
482 u32 sz;
483 int err;
484
485 file = mock_file(i915);
486 if (IS_ERR(file))
487 return PTR_ERR(file);
488
489 ctx = live_context(i915, file);
490 if (IS_ERR(ctx)) {
491 err = PTR_ERR(ctx);
492 goto out_file;
493 }
494
495 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
496
497 obj = i915_gem_object_create_lmem(i915, sz, 0);
498 if (IS_ERR(obj)) {
499 err = PTR_ERR(obj);
500 goto out_file;
501 }
502
503 err = i915_gem_object_pin_pages(obj);
504 if (err)
505 goto out_put;
506
507 err = igt_gpu_write(ctx, obj);
508 if (err)
509 pr_err("igt_gpu_write failed(%d)\n", err);
510
511 i915_gem_object_unpin_pages(obj);
512out_put:
513 i915_gem_object_put(obj);
514out_file:
a8c9a7f5 515 fput(file);
340be48f
MA
516 return err;
517}
518
6804da20
CW
519static struct intel_engine_cs *
520random_engine_class(struct drm_i915_private *i915,
521 unsigned int class,
522 struct rnd_state *prng)
523{
524 struct intel_engine_cs *engine;
525 unsigned int count;
526
527 count = 0;
528 for (engine = intel_engine_lookup_user(i915, class, 0);
529 engine && engine->uabi_class == class;
530 engine = rb_entry_safe(rb_next(&engine->uabi_node),
531 typeof(*engine), uabi_node))
532 count++;
533
534 count = i915_prandom_u32_max_state(count, prng);
535 return intel_engine_lookup_user(i915, class, count);
536}
537
01377a0d
AJ
538static int igt_lmem_write_cpu(void *arg)
539{
540 struct drm_i915_private *i915 = arg;
541 struct drm_i915_gem_object *obj;
542 I915_RND_STATE(prng);
543 IGT_TIMEOUT(end_time);
544 u32 bytes[] = {
545 0, /* rng placeholder */
546 sizeof(u32),
547 sizeof(u64),
548 64, /* cl */
549 PAGE_SIZE,
550 PAGE_SIZE - sizeof(u32),
551 PAGE_SIZE - sizeof(u64),
552 PAGE_SIZE - 64,
553 };
6804da20 554 struct intel_engine_cs *engine;
01377a0d
AJ
555 u32 *vaddr;
556 u32 sz;
557 u32 i;
558 int *order;
559 int count;
560 int err;
561
6804da20
CW
562 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
563 if (!engine)
01377a0d
AJ
564 return 0;
565
6804da20
CW
566 pr_info("%s: using %s\n", __func__, engine->name);
567
01377a0d
AJ
568 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
569 sz = max_t(u32, 2 * PAGE_SIZE, sz);
570
571 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
572 if (IS_ERR(obj))
573 return PTR_ERR(obj);
574
575 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
576 if (IS_ERR(vaddr)) {
577 err = PTR_ERR(vaddr);
578 goto out_put;
579 }
580
581 /* Put the pages into a known state -- from the gpu for added fun */
de5825be 582 intel_engine_pm_get(engine);
6804da20 583 err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
de5825be 584 intel_engine_pm_put(engine);
01377a0d
AJ
585 if (err)
586 goto out_unpin;
587
80f0b679 588 i915_gem_object_lock(obj, NULL);
01377a0d
AJ
589 err = i915_gem_object_set_to_wc_domain(obj, true);
590 i915_gem_object_unlock(obj);
591 if (err)
592 goto out_unpin;
593
594 count = ARRAY_SIZE(bytes);
595 order = i915_random_order(count * count, &prng);
596 if (!order) {
597 err = -ENOMEM;
598 goto out_unpin;
599 }
600
dd5e0249
CW
601 /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
602 bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
603 GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
01377a0d
AJ
604
605 i = 0;
606 do {
607 u32 offset;
608 u32 align;
609 u32 dword;
610 u32 size;
611 u32 val;
612
613 size = bytes[order[i] % count];
614 i = (i + 1) % (count * count);
615
616 align = bytes[order[i] % count];
617 i = (i + 1) % (count * count);
618
619 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
620
621 offset = igt_random_offset(&prng, 0, obj->base.size,
622 size, align);
623
624 val = prandom_u32_state(&prng);
625 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
626 size / sizeof(u32));
627
628 /*
629 * Sample random dw -- don't waste precious time reading every
630 * single dw.
631 */
632 dword = igt_random_offset(&prng, offset,
633 offset + size,
634 sizeof(u32), sizeof(u32));
635 dword /= sizeof(u32);
636 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
637 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
638 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
639 size, align, offset);
640 err = -EINVAL;
641 break;
642 }
643 } while (!__igt_timeout(end_time, NULL));
644
645out_unpin:
646 i915_gem_object_unpin_map(obj);
647out_put:
648 i915_gem_object_put(obj);
649
650 return err;
651}
652
ba12993c
MA
653static const char *repr_type(u32 type)
654{
655 switch (type) {
656 case I915_MAP_WB:
657 return "WB";
658 case I915_MAP_WC:
659 return "WC";
660 }
661
662 return "";
663}
664
665static struct drm_i915_gem_object *
666create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
667 void **out_addr)
668{
669 struct drm_i915_gem_object *obj;
670 void *addr;
671
672 obj = i915_gem_object_create_region(mr, size, 0);
45d41739
MA
673 if (IS_ERR(obj)) {
674 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
675 return ERR_PTR(-ENODEV);
ba12993c 676 return obj;
45d41739 677 }
ba12993c
MA
678
679 addr = i915_gem_object_pin_map(obj, type);
680 if (IS_ERR(addr)) {
681 i915_gem_object_put(obj);
682 if (PTR_ERR(addr) == -ENXIO)
683 return ERR_PTR(-ENODEV);
684 return addr;
685 }
686
687 *out_addr = addr;
688 return obj;
689}
690
691static int wrap_ktime_compare(const void *A, const void *B)
692{
693 const ktime_t *a = A, *b = B;
694
695 return ktime_compare(*a, *b);
696}
697
698static void igt_memcpy_long(void *dst, const void *src, size_t size)
699{
700 unsigned long *tmp = dst;
701 const unsigned long *s = src;
702
703 size = size / sizeof(unsigned long);
704 while (size--)
705 *tmp++ = *s++;
706}
707
708static inline void igt_memcpy(void *dst, const void *src, size_t size)
709{
710 memcpy(dst, src, size);
711}
712
713static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
714{
715 i915_memcpy_from_wc(dst, src, size);
716}
717
718static int _perf_memcpy(struct intel_memory_region *src_mr,
719 struct intel_memory_region *dst_mr,
720 u64 size, u32 src_type, u32 dst_type)
721{
722 struct drm_i915_private *i915 = src_mr->i915;
723 const struct {
724 const char *name;
725 void (*copy)(void *dst, const void *src, size_t size);
726 bool skip;
727 } tests[] = {
728 {
729 "memcpy",
730 igt_memcpy,
731 },
732 {
733 "memcpy_long",
734 igt_memcpy_long,
735 },
736 {
737 "memcpy_from_wc",
738 igt_memcpy_from_wc,
739 !i915_has_memcpy_from_wc(),
740 },
741 };
742 struct drm_i915_gem_object *src, *dst;
743 void *src_addr, *dst_addr;
744 int ret = 0;
745 int i;
746
747 src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
748 if (IS_ERR(src)) {
749 ret = PTR_ERR(src);
750 goto out;
751 }
752
753 dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
754 if (IS_ERR(dst)) {
755 ret = PTR_ERR(dst);
756 goto out_unpin_src;
757 }
758
759 for (i = 0; i < ARRAY_SIZE(tests); ++i) {
760 ktime_t t[5];
761 int pass;
762
763 if (tests[i].skip)
764 continue;
765
766 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
767 ktime_t t0, t1;
768
769 t0 = ktime_get();
770
771 tests[i].copy(dst_addr, src_addr, size);
772
773 t1 = ktime_get();
774 t[pass] = ktime_sub(t1, t0);
775 }
776
777 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
778 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
779 __func__,
780 src_mr->name,
781 repr_type(src_type),
782 dst_mr->name,
783 repr_type(dst_type),
784 tests[i].name,
785 size >> 10,
786 div64_u64(mul_u32_u32(4 * size,
787 1000 * 1000 * 1000),
788 t[1] + 2 * t[2] + t[3]) >> 20);
789
790 cond_resched();
791 }
792
793 i915_gem_object_unpin_map(dst);
794 i915_gem_object_put(dst);
795out_unpin_src:
796 i915_gem_object_unpin_map(src);
797 i915_gem_object_put(src);
798
799 i915_gem_drain_freed_objects(i915);
800out:
801 if (ret == -ENODEV)
802 ret = 0;
803
804 return ret;
805}
806
807static int perf_memcpy(void *arg)
808{
809 struct drm_i915_private *i915 = arg;
810 static const u32 types[] = {
811 I915_MAP_WB,
812 I915_MAP_WC,
813 };
814 static const u32 sizes[] = {
815 SZ_4K,
816 SZ_64K,
817 SZ_4M,
818 };
819 struct intel_memory_region *src_mr, *dst_mr;
820 int src_id, dst_id;
821 int i, j, k;
822 int ret;
823
824 for_each_memory_region(src_mr, i915, src_id) {
825 for_each_memory_region(dst_mr, i915, dst_id) {
826 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
827 for (j = 0; j < ARRAY_SIZE(types); ++j) {
828 for (k = 0; k < ARRAY_SIZE(types); ++k) {
829 ret = _perf_memcpy(src_mr,
830 dst_mr,
831 sizes[i],
832 types[j],
833 types[k]);
834 if (ret)
835 return ret;
836 }
837 }
838 }
839 }
840 }
841
842 return 0;
843}
844
232a6eba
MA
845int intel_memory_region_mock_selftests(void)
846{
847 static const struct i915_subtest tests[] = {
848 SUBTEST(igt_mock_fill),
2f0b97ca 849 SUBTEST(igt_mock_contiguous),
09a729b1 850 SUBTEST(igt_mock_splintered_region),
232a6eba
MA
851 };
852 struct intel_memory_region *mem;
853 struct drm_i915_private *i915;
854 int err;
855
856 i915 = mock_gem_device();
857 if (!i915)
858 return -ENOMEM;
859
860 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
861 if (IS_ERR(mem)) {
862 pr_err("failed to create memory region\n");
863 err = PTR_ERR(mem);
864 goto out_unref;
865 }
866
867 err = i915_subtests(tests, mem);
868
869 intel_memory_region_put(mem);
870out_unref:
82be0d75 871 mock_destroy_device(i915);
232a6eba
MA
872 return err;
873}
b908be54
MA
874
875int intel_memory_region_live_selftests(struct drm_i915_private *i915)
876{
877 static const struct i915_subtest tests[] = {
878 SUBTEST(igt_lmem_create),
01377a0d 879 SUBTEST(igt_lmem_write_cpu),
340be48f 880 SUBTEST(igt_lmem_write_gpu),
b908be54
MA
881 };
882
883 if (!HAS_LMEM(i915)) {
884 pr_info("device lacks LMEM support, skipping\n");
885 return 0;
886 }
887
888 if (intel_gt_is_wedged(&i915->gt))
889 return 0;
890
891 return i915_live_subtests(tests, i915);
892}
ba12993c
MA
893
894int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
895{
896 static const struct i915_subtest tests[] = {
897 SUBTEST(perf_memcpy),
898 };
899
900 if (intel_gt_is_wedged(&i915->gt))
901 return 0;
902
903 return i915_live_subtests(tests, i915);
904}