]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 struct sg_table *pages)
37 {
38 sg_free_table(pages);
39 kfree(pages);
40 }
41
42 static int fake_get_pages(struct drm_i915_gem_object *obj)
43 {
44 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
45 #define PFN_BIAS 0x1000
46 struct sg_table *pages;
47 struct scatterlist *sg;
48 unsigned int sg_page_sizes;
49 typeof(obj->base.size) rem;
50
51 pages = kmalloc(sizeof(*pages), GFP);
52 if (!pages)
53 return -ENOMEM;
54
55 rem = round_up(obj->base.size, BIT(31)) >> 31;
56 if (sg_alloc_table(pages, rem, GFP)) {
57 kfree(pages);
58 return -ENOMEM;
59 }
60
61 sg_page_sizes = 0;
62 rem = obj->base.size;
63 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
64 unsigned long len = min_t(typeof(rem), rem, BIT(31));
65
66 GEM_BUG_ON(!len);
67 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
68 sg_dma_address(sg) = page_to_phys(sg_page(sg));
69 sg_dma_len(sg) = len;
70 sg_page_sizes |= len;
71
72 rem -= len;
73 }
74 GEM_BUG_ON(rem);
75
76 obj->mm.madv = I915_MADV_DONTNEED;
77
78 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
79
80 return 0;
81 #undef GFP
82 }
83
84 static void fake_put_pages(struct drm_i915_gem_object *obj,
85 struct sg_table *pages)
86 {
87 fake_free_pages(obj, pages);
88 obj->mm.dirty = false;
89 obj->mm.madv = I915_MADV_WILLNEED;
90 }
91
92 static const struct drm_i915_gem_object_ops fake_ops = {
93 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
94 .get_pages = fake_get_pages,
95 .put_pages = fake_put_pages,
96 };
97
98 static struct drm_i915_gem_object *
99 fake_dma_object(struct drm_i915_private *i915, u64 size)
100 {
101 struct drm_i915_gem_object *obj;
102
103 GEM_BUG_ON(!size);
104 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
105
106 if (overflows_type(size, obj->base.size))
107 return ERR_PTR(-E2BIG);
108
109 obj = i915_gem_object_alloc(i915);
110 if (!obj)
111 goto err;
112
113 drm_gem_private_object_init(&i915->drm, &obj->base, size);
114 i915_gem_object_init(obj, &fake_ops);
115
116 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
117 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
118 obj->cache_level = I915_CACHE_NONE;
119
120 /* Preallocate the "backing storage" */
121 if (i915_gem_object_pin_pages(obj))
122 goto err_obj;
123
124 i915_gem_object_unpin_pages(obj);
125 return obj;
126
127 err_obj:
128 i915_gem_object_put(obj);
129 err:
130 return ERR_PTR(-ENOMEM);
131 }
132
133 static int igt_ppgtt_alloc(void *arg)
134 {
135 struct drm_i915_private *dev_priv = arg;
136 struct i915_hw_ppgtt *ppgtt;
137 u64 size, last;
138 int err;
139
140 /* Allocate a ppggt and try to fill the entire range */
141
142 if (!USES_PPGTT(dev_priv))
143 return 0;
144
145 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
146 if (!ppgtt)
147 return -ENOMEM;
148
149 mutex_lock(&dev_priv->drm.struct_mutex);
150 err = __hw_ppgtt_init(ppgtt, dev_priv);
151 if (err)
152 goto err_ppgtt;
153
154 if (!ppgtt->base.allocate_va_range)
155 goto err_ppgtt_cleanup;
156
157 /* Check we can allocate the entire range */
158 for (size = 4096;
159 size <= ppgtt->base.total;
160 size <<= 2) {
161 err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
162 if (err) {
163 if (err == -ENOMEM) {
164 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
165 size, ilog2(size));
166 err = 0; /* virtual space too large! */
167 }
168 goto err_ppgtt_cleanup;
169 }
170
171 ppgtt->base.clear_range(&ppgtt->base, 0, size);
172 }
173
174 /* Check we can incrementally allocate the entire range */
175 for (last = 0, size = 4096;
176 size <= ppgtt->base.total;
177 last = size, size <<= 2) {
178 err = ppgtt->base.allocate_va_range(&ppgtt->base,
179 last, size - last);
180 if (err) {
181 if (err == -ENOMEM) {
182 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
183 last, size - last, ilog2(size));
184 err = 0; /* virtual space too large! */
185 }
186 goto err_ppgtt_cleanup;
187 }
188 }
189
190 err_ppgtt_cleanup:
191 ppgtt->base.cleanup(&ppgtt->base);
192 err_ppgtt:
193 mutex_unlock(&dev_priv->drm.struct_mutex);
194 kfree(ppgtt);
195 return err;
196 }
197
198 static int lowlevel_hole(struct drm_i915_private *i915,
199 struct i915_address_space *vm,
200 u64 hole_start, u64 hole_end,
201 unsigned long end_time)
202 {
203 I915_RND_STATE(seed_prng);
204 unsigned int size;
205 struct i915_vma mock_vma;
206
207 memset(&mock_vma, 0, sizeof(struct i915_vma));
208
209 /* Keep creating larger objects until one cannot fit into the hole */
210 for (size = 12; (hole_end - hole_start) >> size; size++) {
211 I915_RND_SUBSTATE(prng, seed_prng);
212 struct drm_i915_gem_object *obj;
213 unsigned int *order, count, n;
214 u64 hole_size;
215
216 hole_size = (hole_end - hole_start) >> size;
217 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
218 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
219 count = hole_size;
220 do {
221 count >>= 1;
222 order = i915_random_order(count, &prng);
223 } while (!order && count);
224 if (!order)
225 break;
226
227 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
228 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
229
230 /* Ignore allocation failures (i.e. don't report them as
231 * a test failure) as we are purposefully allocating very
232 * large objects without checking that we have sufficient
233 * memory. We expect to hit -ENOMEM.
234 */
235
236 obj = fake_dma_object(i915, BIT_ULL(size));
237 if (IS_ERR(obj)) {
238 kfree(order);
239 break;
240 }
241
242 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
243
244 if (i915_gem_object_pin_pages(obj)) {
245 i915_gem_object_put(obj);
246 kfree(order);
247 break;
248 }
249
250 for (n = 0; n < count; n++) {
251 u64 addr = hole_start + order[n] * BIT_ULL(size);
252
253 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
254
255 if (igt_timeout(end_time,
256 "%s timed out before %d/%d\n",
257 __func__, n, count)) {
258 hole_end = hole_start; /* quit */
259 break;
260 }
261
262 if (vm->allocate_va_range &&
263 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
264 break;
265
266 mock_vma.pages = obj->mm.pages;
267 mock_vma.node.size = BIT_ULL(size);
268 mock_vma.node.start = addr;
269
270 vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
271 }
272 count = n;
273
274 i915_random_reorder(order, count, &prng);
275 for (n = 0; n < count; n++) {
276 u64 addr = hole_start + order[n] * BIT_ULL(size);
277
278 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
279 vm->clear_range(vm, addr, BIT_ULL(size));
280 }
281
282 i915_gem_object_unpin_pages(obj);
283 i915_gem_object_put(obj);
284
285 kfree(order);
286 }
287
288 return 0;
289 }
290
291 static void close_object_list(struct list_head *objects,
292 struct i915_address_space *vm)
293 {
294 struct drm_i915_gem_object *obj, *on;
295 int ignored;
296
297 list_for_each_entry_safe(obj, on, objects, st_link) {
298 struct i915_vma *vma;
299
300 vma = i915_vma_instance(obj, vm, NULL);
301 if (!IS_ERR(vma))
302 ignored = i915_vma_unbind(vma);
303 /* Only ppgtt vma may be closed before the object is freed */
304 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
305 i915_vma_close(vma);
306
307 list_del(&obj->st_link);
308 i915_gem_object_put(obj);
309 }
310 }
311
312 static int fill_hole(struct drm_i915_private *i915,
313 struct i915_address_space *vm,
314 u64 hole_start, u64 hole_end,
315 unsigned long end_time)
316 {
317 const u64 hole_size = hole_end - hole_start;
318 struct drm_i915_gem_object *obj;
319 const unsigned long max_pages =
320 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
321 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
322 unsigned long npages, prime, flags;
323 struct i915_vma *vma;
324 LIST_HEAD(objects);
325 int err;
326
327 /* Try binding many VMA working inwards from either edge */
328
329 flags = PIN_OFFSET_FIXED | PIN_USER;
330 if (i915_is_ggtt(vm))
331 flags |= PIN_GLOBAL;
332
333 for_each_prime_number_from(prime, 2, max_step) {
334 for (npages = 1; npages <= max_pages; npages *= prime) {
335 const u64 full_size = npages << PAGE_SHIFT;
336 const struct {
337 const char *name;
338 u64 offset;
339 int step;
340 } phases[] = {
341 { "top-down", hole_end, -1, },
342 { "bottom-up", hole_start, 1, },
343 { }
344 }, *p;
345
346 obj = fake_dma_object(i915, full_size);
347 if (IS_ERR(obj))
348 break;
349
350 list_add(&obj->st_link, &objects);
351
352 /* Align differing sized objects against the edges, and
353 * check we don't walk off into the void when binding
354 * them into the GTT.
355 */
356 for (p = phases; p->name; p++) {
357 u64 offset;
358
359 offset = p->offset;
360 list_for_each_entry(obj, &objects, st_link) {
361 vma = i915_vma_instance(obj, vm, NULL);
362 if (IS_ERR(vma))
363 continue;
364
365 if (p->step < 0) {
366 if (offset < hole_start + obj->base.size)
367 break;
368 offset -= obj->base.size;
369 }
370
371 err = i915_vma_pin(vma, 0, 0, offset | flags);
372 if (err) {
373 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
374 __func__, p->name, err, npages, prime, offset);
375 goto err;
376 }
377
378 if (!drm_mm_node_allocated(&vma->node) ||
379 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
380 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
381 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
382 offset);
383 err = -EINVAL;
384 goto err;
385 }
386
387 i915_vma_unpin(vma);
388
389 if (p->step > 0) {
390 if (offset + obj->base.size > hole_end)
391 break;
392 offset += obj->base.size;
393 }
394 }
395
396 offset = p->offset;
397 list_for_each_entry(obj, &objects, st_link) {
398 vma = i915_vma_instance(obj, vm, NULL);
399 if (IS_ERR(vma))
400 continue;
401
402 if (p->step < 0) {
403 if (offset < hole_start + obj->base.size)
404 break;
405 offset -= obj->base.size;
406 }
407
408 if (!drm_mm_node_allocated(&vma->node) ||
409 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
410 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
411 __func__, p->name, vma->node.start, vma->node.size,
412 offset);
413 err = -EINVAL;
414 goto err;
415 }
416
417 err = i915_vma_unbind(vma);
418 if (err) {
419 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
420 __func__, p->name, vma->node.start, vma->node.size,
421 err);
422 goto err;
423 }
424
425 if (p->step > 0) {
426 if (offset + obj->base.size > hole_end)
427 break;
428 offset += obj->base.size;
429 }
430 }
431
432 offset = p->offset;
433 list_for_each_entry_reverse(obj, &objects, st_link) {
434 vma = i915_vma_instance(obj, vm, NULL);
435 if (IS_ERR(vma))
436 continue;
437
438 if (p->step < 0) {
439 if (offset < hole_start + obj->base.size)
440 break;
441 offset -= obj->base.size;
442 }
443
444 err = i915_vma_pin(vma, 0, 0, offset | flags);
445 if (err) {
446 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
447 __func__, p->name, err, npages, prime, offset);
448 goto err;
449 }
450
451 if (!drm_mm_node_allocated(&vma->node) ||
452 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
453 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
454 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
455 offset);
456 err = -EINVAL;
457 goto err;
458 }
459
460 i915_vma_unpin(vma);
461
462 if (p->step > 0) {
463 if (offset + obj->base.size > hole_end)
464 break;
465 offset += obj->base.size;
466 }
467 }
468
469 offset = p->offset;
470 list_for_each_entry_reverse(obj, &objects, st_link) {
471 vma = i915_vma_instance(obj, vm, NULL);
472 if (IS_ERR(vma))
473 continue;
474
475 if (p->step < 0) {
476 if (offset < hole_start + obj->base.size)
477 break;
478 offset -= obj->base.size;
479 }
480
481 if (!drm_mm_node_allocated(&vma->node) ||
482 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
483 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
484 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
485 offset);
486 err = -EINVAL;
487 goto err;
488 }
489
490 err = i915_vma_unbind(vma);
491 if (err) {
492 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
493 __func__, p->name, vma->node.start, vma->node.size,
494 err);
495 goto err;
496 }
497
498 if (p->step > 0) {
499 if (offset + obj->base.size > hole_end)
500 break;
501 offset += obj->base.size;
502 }
503 }
504 }
505
506 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
507 __func__, npages, prime)) {
508 err = -EINTR;
509 goto err;
510 }
511 }
512
513 close_object_list(&objects, vm);
514 }
515
516 return 0;
517
518 err:
519 close_object_list(&objects, vm);
520 return err;
521 }
522
523 static int walk_hole(struct drm_i915_private *i915,
524 struct i915_address_space *vm,
525 u64 hole_start, u64 hole_end,
526 unsigned long end_time)
527 {
528 const u64 hole_size = hole_end - hole_start;
529 const unsigned long max_pages =
530 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
531 unsigned long flags;
532 u64 size;
533
534 /* Try binding a single VMA in different positions within the hole */
535
536 flags = PIN_OFFSET_FIXED | PIN_USER;
537 if (i915_is_ggtt(vm))
538 flags |= PIN_GLOBAL;
539
540 for_each_prime_number_from(size, 1, max_pages) {
541 struct drm_i915_gem_object *obj;
542 struct i915_vma *vma;
543 u64 addr;
544 int err = 0;
545
546 obj = fake_dma_object(i915, size << PAGE_SHIFT);
547 if (IS_ERR(obj))
548 break;
549
550 vma = i915_vma_instance(obj, vm, NULL);
551 if (IS_ERR(vma)) {
552 err = PTR_ERR(vma);
553 goto err_put;
554 }
555
556 for (addr = hole_start;
557 addr + obj->base.size < hole_end;
558 addr += obj->base.size) {
559 err = i915_vma_pin(vma, 0, 0, addr | flags);
560 if (err) {
561 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
562 __func__, addr, vma->size,
563 hole_start, hole_end, err);
564 goto err_close;
565 }
566 i915_vma_unpin(vma);
567
568 if (!drm_mm_node_allocated(&vma->node) ||
569 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
570 pr_err("%s incorrect at %llx + %llx\n",
571 __func__, addr, vma->size);
572 err = -EINVAL;
573 goto err_close;
574 }
575
576 err = i915_vma_unbind(vma);
577 if (err) {
578 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
579 __func__, addr, vma->size, err);
580 goto err_close;
581 }
582
583 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
584
585 if (igt_timeout(end_time,
586 "%s timed out at %llx\n",
587 __func__, addr)) {
588 err = -EINTR;
589 goto err_close;
590 }
591 }
592
593 err_close:
594 if (!i915_vma_is_ggtt(vma))
595 i915_vma_close(vma);
596 err_put:
597 i915_gem_object_put(obj);
598 if (err)
599 return err;
600 }
601
602 return 0;
603 }
604
605 static int pot_hole(struct drm_i915_private *i915,
606 struct i915_address_space *vm,
607 u64 hole_start, u64 hole_end,
608 unsigned long end_time)
609 {
610 struct drm_i915_gem_object *obj;
611 struct i915_vma *vma;
612 unsigned long flags;
613 unsigned int pot;
614 int err = 0;
615
616 flags = PIN_OFFSET_FIXED | PIN_USER;
617 if (i915_is_ggtt(vm))
618 flags |= PIN_GLOBAL;
619
620 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
621 if (IS_ERR(obj))
622 return PTR_ERR(obj);
623
624 vma = i915_vma_instance(obj, vm, NULL);
625 if (IS_ERR(vma)) {
626 err = PTR_ERR(vma);
627 goto err_obj;
628 }
629
630 /* Insert a pair of pages across every pot boundary within the hole */
631 for (pot = fls64(hole_end - 1) - 1;
632 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
633 pot--) {
634 u64 step = BIT_ULL(pot);
635 u64 addr;
636
637 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
638 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
639 addr += step) {
640 err = i915_vma_pin(vma, 0, 0, addr | flags);
641 if (err) {
642 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
643 __func__,
644 addr,
645 hole_start, hole_end,
646 err);
647 goto err;
648 }
649
650 if (!drm_mm_node_allocated(&vma->node) ||
651 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
652 pr_err("%s incorrect at %llx + %llx\n",
653 __func__, addr, vma->size);
654 i915_vma_unpin(vma);
655 err = i915_vma_unbind(vma);
656 err = -EINVAL;
657 goto err;
658 }
659
660 i915_vma_unpin(vma);
661 err = i915_vma_unbind(vma);
662 GEM_BUG_ON(err);
663 }
664
665 if (igt_timeout(end_time,
666 "%s timed out after %d/%d\n",
667 __func__, pot, fls64(hole_end - 1) - 1)) {
668 err = -EINTR;
669 goto err;
670 }
671 }
672
673 err:
674 if (!i915_vma_is_ggtt(vma))
675 i915_vma_close(vma);
676 err_obj:
677 i915_gem_object_put(obj);
678 return err;
679 }
680
681 static int drunk_hole(struct drm_i915_private *i915,
682 struct i915_address_space *vm,
683 u64 hole_start, u64 hole_end,
684 unsigned long end_time)
685 {
686 I915_RND_STATE(prng);
687 unsigned int size;
688 unsigned long flags;
689
690 flags = PIN_OFFSET_FIXED | PIN_USER;
691 if (i915_is_ggtt(vm))
692 flags |= PIN_GLOBAL;
693
694 /* Keep creating larger objects until one cannot fit into the hole */
695 for (size = 12; (hole_end - hole_start) >> size; size++) {
696 struct drm_i915_gem_object *obj;
697 unsigned int *order, count, n;
698 struct i915_vma *vma;
699 u64 hole_size;
700 int err;
701
702 hole_size = (hole_end - hole_start) >> size;
703 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
704 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
705 count = hole_size;
706 do {
707 count >>= 1;
708 order = i915_random_order(count, &prng);
709 } while (!order && count);
710 if (!order)
711 break;
712
713 /* Ignore allocation failures (i.e. don't report them as
714 * a test failure) as we are purposefully allocating very
715 * large objects without checking that we have sufficient
716 * memory. We expect to hit -ENOMEM.
717 */
718
719 obj = fake_dma_object(i915, BIT_ULL(size));
720 if (IS_ERR(obj)) {
721 kfree(order);
722 break;
723 }
724
725 vma = i915_vma_instance(obj, vm, NULL);
726 if (IS_ERR(vma)) {
727 err = PTR_ERR(vma);
728 goto err_obj;
729 }
730
731 GEM_BUG_ON(vma->size != BIT_ULL(size));
732
733 for (n = 0; n < count; n++) {
734 u64 addr = hole_start + order[n] * BIT_ULL(size);
735
736 err = i915_vma_pin(vma, 0, 0, addr | flags);
737 if (err) {
738 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
739 __func__,
740 addr, BIT_ULL(size),
741 hole_start, hole_end,
742 err);
743 goto err;
744 }
745
746 if (!drm_mm_node_allocated(&vma->node) ||
747 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
748 pr_err("%s incorrect at %llx + %llx\n",
749 __func__, addr, BIT_ULL(size));
750 i915_vma_unpin(vma);
751 err = i915_vma_unbind(vma);
752 err = -EINVAL;
753 goto err;
754 }
755
756 i915_vma_unpin(vma);
757 err = i915_vma_unbind(vma);
758 GEM_BUG_ON(err);
759
760 if (igt_timeout(end_time,
761 "%s timed out after %d/%d\n",
762 __func__, n, count)) {
763 err = -EINTR;
764 goto err;
765 }
766 }
767
768 err:
769 if (!i915_vma_is_ggtt(vma))
770 i915_vma_close(vma);
771 err_obj:
772 i915_gem_object_put(obj);
773 kfree(order);
774 if (err)
775 return err;
776 }
777
778 return 0;
779 }
780
781 static int __shrink_hole(struct drm_i915_private *i915,
782 struct i915_address_space *vm,
783 u64 hole_start, u64 hole_end,
784 unsigned long end_time)
785 {
786 struct drm_i915_gem_object *obj;
787 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
788 unsigned int order = 12;
789 LIST_HEAD(objects);
790 int err = 0;
791 u64 addr;
792
793 /* Keep creating larger objects until one cannot fit into the hole */
794 for (addr = hole_start; addr < hole_end; ) {
795 struct i915_vma *vma;
796 u64 size = BIT_ULL(order++);
797
798 size = min(size, hole_end - addr);
799 obj = fake_dma_object(i915, size);
800 if (IS_ERR(obj)) {
801 err = PTR_ERR(obj);
802 break;
803 }
804
805 list_add(&obj->st_link, &objects);
806
807 vma = i915_vma_instance(obj, vm, NULL);
808 if (IS_ERR(vma)) {
809 err = PTR_ERR(vma);
810 break;
811 }
812
813 GEM_BUG_ON(vma->size != size);
814
815 err = i915_vma_pin(vma, 0, 0, addr | flags);
816 if (err) {
817 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
818 __func__, addr, size, hole_start, hole_end, err);
819 break;
820 }
821
822 if (!drm_mm_node_allocated(&vma->node) ||
823 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
824 pr_err("%s incorrect at %llx + %llx\n",
825 __func__, addr, size);
826 i915_vma_unpin(vma);
827 err = i915_vma_unbind(vma);
828 err = -EINVAL;
829 break;
830 }
831
832 i915_vma_unpin(vma);
833 addr += size;
834
835 if (igt_timeout(end_time,
836 "%s timed out at ofset %llx [%llx - %llx]\n",
837 __func__, addr, hole_start, hole_end)) {
838 err = -EINTR;
839 break;
840 }
841 }
842
843 close_object_list(&objects, vm);
844 return err;
845 }
846
847 static int shrink_hole(struct drm_i915_private *i915,
848 struct i915_address_space *vm,
849 u64 hole_start, u64 hole_end,
850 unsigned long end_time)
851 {
852 unsigned long prime;
853 int err;
854
855 vm->fault_attr.probability = 999;
856 atomic_set(&vm->fault_attr.times, -1);
857
858 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
859 vm->fault_attr.interval = prime;
860 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
861 if (err)
862 break;
863 }
864
865 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
866
867 return err;
868 }
869
870 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
871 int (*func)(struct drm_i915_private *i915,
872 struct i915_address_space *vm,
873 u64 hole_start, u64 hole_end,
874 unsigned long end_time))
875 {
876 struct drm_file *file;
877 struct i915_hw_ppgtt *ppgtt;
878 IGT_TIMEOUT(end_time);
879 int err;
880
881 if (!USES_FULL_PPGTT(dev_priv))
882 return 0;
883
884 file = mock_file(dev_priv);
885 if (IS_ERR(file))
886 return PTR_ERR(file);
887
888 mutex_lock(&dev_priv->drm.struct_mutex);
889 ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
890 if (IS_ERR(ppgtt)) {
891 err = PTR_ERR(ppgtt);
892 goto out_unlock;
893 }
894 GEM_BUG_ON(offset_in_page(ppgtt->base.total));
895 GEM_BUG_ON(ppgtt->base.closed);
896
897 err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
898
899 i915_ppgtt_close(&ppgtt->base);
900 i915_ppgtt_put(ppgtt);
901 out_unlock:
902 mutex_unlock(&dev_priv->drm.struct_mutex);
903
904 mock_file_free(dev_priv, file);
905 return err;
906 }
907
908 static int igt_ppgtt_fill(void *arg)
909 {
910 return exercise_ppgtt(arg, fill_hole);
911 }
912
913 static int igt_ppgtt_walk(void *arg)
914 {
915 return exercise_ppgtt(arg, walk_hole);
916 }
917
918 static int igt_ppgtt_pot(void *arg)
919 {
920 return exercise_ppgtt(arg, pot_hole);
921 }
922
923 static int igt_ppgtt_drunk(void *arg)
924 {
925 return exercise_ppgtt(arg, drunk_hole);
926 }
927
928 static int igt_ppgtt_lowlevel(void *arg)
929 {
930 return exercise_ppgtt(arg, lowlevel_hole);
931 }
932
933 static int igt_ppgtt_shrink(void *arg)
934 {
935 return exercise_ppgtt(arg, shrink_hole);
936 }
937
938 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
939 {
940 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
941 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
942
943 if (a->start < b->start)
944 return -1;
945 else
946 return 1;
947 }
948
949 static int exercise_ggtt(struct drm_i915_private *i915,
950 int (*func)(struct drm_i915_private *i915,
951 struct i915_address_space *vm,
952 u64 hole_start, u64 hole_end,
953 unsigned long end_time))
954 {
955 struct i915_ggtt *ggtt = &i915->ggtt;
956 u64 hole_start, hole_end, last = 0;
957 struct drm_mm_node *node;
958 IGT_TIMEOUT(end_time);
959 int err;
960
961 mutex_lock(&i915->drm.struct_mutex);
962 restart:
963 list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
964 drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
965 if (hole_start < last)
966 continue;
967
968 if (ggtt->base.mm.color_adjust)
969 ggtt->base.mm.color_adjust(node, 0,
970 &hole_start, &hole_end);
971 if (hole_start >= hole_end)
972 continue;
973
974 err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
975 if (err)
976 break;
977
978 /* As we have manipulated the drm_mm, the list may be corrupt */
979 last = hole_end;
980 goto restart;
981 }
982 mutex_unlock(&i915->drm.struct_mutex);
983
984 return err;
985 }
986
987 static int igt_ggtt_fill(void *arg)
988 {
989 return exercise_ggtt(arg, fill_hole);
990 }
991
992 static int igt_ggtt_walk(void *arg)
993 {
994 return exercise_ggtt(arg, walk_hole);
995 }
996
997 static int igt_ggtt_pot(void *arg)
998 {
999 return exercise_ggtt(arg, pot_hole);
1000 }
1001
1002 static int igt_ggtt_drunk(void *arg)
1003 {
1004 return exercise_ggtt(arg, drunk_hole);
1005 }
1006
1007 static int igt_ggtt_lowlevel(void *arg)
1008 {
1009 return exercise_ggtt(arg, lowlevel_hole);
1010 }
1011
1012 static int igt_ggtt_page(void *arg)
1013 {
1014 const unsigned int count = PAGE_SIZE/sizeof(u32);
1015 I915_RND_STATE(prng);
1016 struct drm_i915_private *i915 = arg;
1017 struct i915_ggtt *ggtt = &i915->ggtt;
1018 struct drm_i915_gem_object *obj;
1019 struct drm_mm_node tmp;
1020 unsigned int *order, n;
1021 int err;
1022
1023 mutex_lock(&i915->drm.struct_mutex);
1024
1025 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1026 if (IS_ERR(obj)) {
1027 err = PTR_ERR(obj);
1028 goto out_unlock;
1029 }
1030
1031 err = i915_gem_object_pin_pages(obj);
1032 if (err)
1033 goto out_free;
1034
1035 memset(&tmp, 0, sizeof(tmp));
1036 err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1037 1024 * PAGE_SIZE, 0,
1038 I915_COLOR_UNEVICTABLE,
1039 0, ggtt->mappable_end,
1040 DRM_MM_INSERT_LOW);
1041 if (err)
1042 goto out_unpin;
1043
1044 order = i915_random_order(count, &prng);
1045 if (!order) {
1046 err = -ENOMEM;
1047 goto out_remove;
1048 }
1049
1050 for (n = 0; n < count; n++) {
1051 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1052 u32 __iomem *vaddr;
1053
1054 ggtt->base.insert_page(&ggtt->base,
1055 i915_gem_object_get_dma_address(obj, 0),
1056 offset, I915_CACHE_NONE, 0);
1057
1058 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1059 iowrite32(n, vaddr + n);
1060 io_mapping_unmap_atomic(vaddr);
1061
1062 wmb();
1063 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1064 }
1065
1066 i915_random_reorder(order, count, &prng);
1067 for (n = 0; n < count; n++) {
1068 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1069 u32 __iomem *vaddr;
1070 u32 val;
1071
1072 ggtt->base.insert_page(&ggtt->base,
1073 i915_gem_object_get_dma_address(obj, 0),
1074 offset, I915_CACHE_NONE, 0);
1075
1076 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1077 val = ioread32(vaddr + n);
1078 io_mapping_unmap_atomic(vaddr);
1079
1080 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1081
1082 if (val != n) {
1083 pr_err("insert page failed: found %d, expected %d\n",
1084 val, n);
1085 err = -EINVAL;
1086 break;
1087 }
1088 }
1089
1090 kfree(order);
1091 out_remove:
1092 drm_mm_remove_node(&tmp);
1093 out_unpin:
1094 i915_gem_object_unpin_pages(obj);
1095 out_free:
1096 i915_gem_object_put(obj);
1097 out_unlock:
1098 mutex_unlock(&i915->drm.struct_mutex);
1099 return err;
1100 }
1101
1102 static void track_vma_bind(struct i915_vma *vma)
1103 {
1104 struct drm_i915_gem_object *obj = vma->obj;
1105
1106 obj->bind_count++; /* track for eviction later */
1107 __i915_gem_object_pin_pages(obj);
1108
1109 vma->pages = obj->mm.pages;
1110 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1111 }
1112
1113 static int exercise_mock(struct drm_i915_private *i915,
1114 int (*func)(struct drm_i915_private *i915,
1115 struct i915_address_space *vm,
1116 u64 hole_start, u64 hole_end,
1117 unsigned long end_time))
1118 {
1119 struct i915_gem_context *ctx;
1120 struct i915_hw_ppgtt *ppgtt;
1121 IGT_TIMEOUT(end_time);
1122 int err;
1123
1124 ctx = mock_context(i915, "mock");
1125 if (!ctx)
1126 return -ENOMEM;
1127
1128 ppgtt = ctx->ppgtt;
1129 GEM_BUG_ON(!ppgtt);
1130
1131 err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1132
1133 mock_context_close(ctx);
1134 return err;
1135 }
1136
1137 static int igt_mock_fill(void *arg)
1138 {
1139 return exercise_mock(arg, fill_hole);
1140 }
1141
1142 static int igt_mock_walk(void *arg)
1143 {
1144 return exercise_mock(arg, walk_hole);
1145 }
1146
1147 static int igt_mock_pot(void *arg)
1148 {
1149 return exercise_mock(arg, pot_hole);
1150 }
1151
1152 static int igt_mock_drunk(void *arg)
1153 {
1154 return exercise_mock(arg, drunk_hole);
1155 }
1156
1157 static int igt_gtt_reserve(void *arg)
1158 {
1159 struct drm_i915_private *i915 = arg;
1160 struct drm_i915_gem_object *obj, *on;
1161 LIST_HEAD(objects);
1162 u64 total;
1163 int err;
1164
1165 /* i915_gem_gtt_reserve() tries to reserve the precise range
1166 * for the node, and evicts if it has to. So our test checks that
1167 * it can give us the requsted space and prevent overlaps.
1168 */
1169
1170 /* Start by filling the GGTT */
1171 for (total = 0;
1172 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1173 total += 2*I915_GTT_PAGE_SIZE) {
1174 struct i915_vma *vma;
1175
1176 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1177 if (IS_ERR(obj)) {
1178 err = PTR_ERR(obj);
1179 goto out;
1180 }
1181
1182 err = i915_gem_object_pin_pages(obj);
1183 if (err) {
1184 i915_gem_object_put(obj);
1185 goto out;
1186 }
1187
1188 list_add(&obj->st_link, &objects);
1189
1190 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1191 if (IS_ERR(vma)) {
1192 err = PTR_ERR(vma);
1193 goto out;
1194 }
1195
1196 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1197 obj->base.size,
1198 total,
1199 obj->cache_level,
1200 0);
1201 if (err) {
1202 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1203 total, i915->ggtt.base.total, err);
1204 goto out;
1205 }
1206 track_vma_bind(vma);
1207
1208 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1209 if (vma->node.start != total ||
1210 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1211 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1212 vma->node.start, vma->node.size,
1213 total, 2*I915_GTT_PAGE_SIZE);
1214 err = -EINVAL;
1215 goto out;
1216 }
1217 }
1218
1219 /* Now we start forcing evictions */
1220 for (total = I915_GTT_PAGE_SIZE;
1221 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1222 total += 2*I915_GTT_PAGE_SIZE) {
1223 struct i915_vma *vma;
1224
1225 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1226 if (IS_ERR(obj)) {
1227 err = PTR_ERR(obj);
1228 goto out;
1229 }
1230
1231 err = i915_gem_object_pin_pages(obj);
1232 if (err) {
1233 i915_gem_object_put(obj);
1234 goto out;
1235 }
1236
1237 list_add(&obj->st_link, &objects);
1238
1239 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1240 if (IS_ERR(vma)) {
1241 err = PTR_ERR(vma);
1242 goto out;
1243 }
1244
1245 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1246 obj->base.size,
1247 total,
1248 obj->cache_level,
1249 0);
1250 if (err) {
1251 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1252 total, i915->ggtt.base.total, err);
1253 goto out;
1254 }
1255 track_vma_bind(vma);
1256
1257 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1258 if (vma->node.start != total ||
1259 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1260 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1261 vma->node.start, vma->node.size,
1262 total, 2*I915_GTT_PAGE_SIZE);
1263 err = -EINVAL;
1264 goto out;
1265 }
1266 }
1267
1268 /* And then try at random */
1269 list_for_each_entry_safe(obj, on, &objects, st_link) {
1270 struct i915_vma *vma;
1271 u64 offset;
1272
1273 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1274 if (IS_ERR(vma)) {
1275 err = PTR_ERR(vma);
1276 goto out;
1277 }
1278
1279 err = i915_vma_unbind(vma);
1280 if (err) {
1281 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1282 goto out;
1283 }
1284
1285 offset = random_offset(0, i915->ggtt.base.total,
1286 2*I915_GTT_PAGE_SIZE,
1287 I915_GTT_MIN_ALIGNMENT);
1288
1289 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1290 obj->base.size,
1291 offset,
1292 obj->cache_level,
1293 0);
1294 if (err) {
1295 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1296 total, i915->ggtt.base.total, err);
1297 goto out;
1298 }
1299 track_vma_bind(vma);
1300
1301 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1302 if (vma->node.start != offset ||
1303 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1304 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1305 vma->node.start, vma->node.size,
1306 offset, 2*I915_GTT_PAGE_SIZE);
1307 err = -EINVAL;
1308 goto out;
1309 }
1310 }
1311
1312 out:
1313 list_for_each_entry_safe(obj, on, &objects, st_link) {
1314 i915_gem_object_unpin_pages(obj);
1315 i915_gem_object_put(obj);
1316 }
1317 return err;
1318 }
1319
1320 static int igt_gtt_insert(void *arg)
1321 {
1322 struct drm_i915_private *i915 = arg;
1323 struct drm_i915_gem_object *obj, *on;
1324 struct drm_mm_node tmp = {};
1325 const struct invalid_insert {
1326 u64 size;
1327 u64 alignment;
1328 u64 start, end;
1329 } invalid_insert[] = {
1330 {
1331 i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1332 0, i915->ggtt.base.total,
1333 },
1334 {
1335 2*I915_GTT_PAGE_SIZE, 0,
1336 0, I915_GTT_PAGE_SIZE,
1337 },
1338 {
1339 -(u64)I915_GTT_PAGE_SIZE, 0,
1340 0, 4*I915_GTT_PAGE_SIZE,
1341 },
1342 {
1343 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1344 0, 4*I915_GTT_PAGE_SIZE,
1345 },
1346 {
1347 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1348 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1349 },
1350 {}
1351 }, *ii;
1352 LIST_HEAD(objects);
1353 u64 total;
1354 int err;
1355
1356 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1357 * to the node, evicting if required.
1358 */
1359
1360 /* Check a couple of obviously invalid requests */
1361 for (ii = invalid_insert; ii->size; ii++) {
1362 err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1363 ii->size, ii->alignment,
1364 I915_COLOR_UNEVICTABLE,
1365 ii->start, ii->end,
1366 0);
1367 if (err != -ENOSPC) {
1368 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1369 ii->size, ii->alignment, ii->start, ii->end,
1370 err);
1371 return -EINVAL;
1372 }
1373 }
1374
1375 /* Start by filling the GGTT */
1376 for (total = 0;
1377 total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1378 total += I915_GTT_PAGE_SIZE) {
1379 struct i915_vma *vma;
1380
1381 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1382 if (IS_ERR(obj)) {
1383 err = PTR_ERR(obj);
1384 goto out;
1385 }
1386
1387 err = i915_gem_object_pin_pages(obj);
1388 if (err) {
1389 i915_gem_object_put(obj);
1390 goto out;
1391 }
1392
1393 list_add(&obj->st_link, &objects);
1394
1395 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1396 if (IS_ERR(vma)) {
1397 err = PTR_ERR(vma);
1398 goto out;
1399 }
1400
1401 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1402 obj->base.size, 0, obj->cache_level,
1403 0, i915->ggtt.base.total,
1404 0);
1405 if (err == -ENOSPC) {
1406 /* maxed out the GGTT space */
1407 i915_gem_object_put(obj);
1408 break;
1409 }
1410 if (err) {
1411 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1412 total, i915->ggtt.base.total, err);
1413 goto out;
1414 }
1415 track_vma_bind(vma);
1416 __i915_vma_pin(vma);
1417
1418 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1419 }
1420
1421 list_for_each_entry(obj, &objects, st_link) {
1422 struct i915_vma *vma;
1423
1424 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1425 if (IS_ERR(vma)) {
1426 err = PTR_ERR(vma);
1427 goto out;
1428 }
1429
1430 if (!drm_mm_node_allocated(&vma->node)) {
1431 pr_err("VMA was unexpectedly evicted!\n");
1432 err = -EINVAL;
1433 goto out;
1434 }
1435
1436 __i915_vma_unpin(vma);
1437 }
1438
1439 /* If we then reinsert, we should find the same hole */
1440 list_for_each_entry_safe(obj, on, &objects, st_link) {
1441 struct i915_vma *vma;
1442 u64 offset;
1443
1444 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1445 if (IS_ERR(vma)) {
1446 err = PTR_ERR(vma);
1447 goto out;
1448 }
1449
1450 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1451 offset = vma->node.start;
1452
1453 err = i915_vma_unbind(vma);
1454 if (err) {
1455 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1456 goto out;
1457 }
1458
1459 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1460 obj->base.size, 0, obj->cache_level,
1461 0, i915->ggtt.base.total,
1462 0);
1463 if (err) {
1464 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1465 total, i915->ggtt.base.total, err);
1466 goto out;
1467 }
1468 track_vma_bind(vma);
1469
1470 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1471 if (vma->node.start != offset) {
1472 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1473 offset, vma->node.start);
1474 err = -EINVAL;
1475 goto out;
1476 }
1477 }
1478
1479 /* And then force evictions */
1480 for (total = 0;
1481 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1482 total += 2*I915_GTT_PAGE_SIZE) {
1483 struct i915_vma *vma;
1484
1485 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1486 if (IS_ERR(obj)) {
1487 err = PTR_ERR(obj);
1488 goto out;
1489 }
1490
1491 err = i915_gem_object_pin_pages(obj);
1492 if (err) {
1493 i915_gem_object_put(obj);
1494 goto out;
1495 }
1496
1497 list_add(&obj->st_link, &objects);
1498
1499 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1500 if (IS_ERR(vma)) {
1501 err = PTR_ERR(vma);
1502 goto out;
1503 }
1504
1505 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1506 obj->base.size, 0, obj->cache_level,
1507 0, i915->ggtt.base.total,
1508 0);
1509 if (err) {
1510 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1511 total, i915->ggtt.base.total, err);
1512 goto out;
1513 }
1514 track_vma_bind(vma);
1515
1516 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1517 }
1518
1519 out:
1520 list_for_each_entry_safe(obj, on, &objects, st_link) {
1521 i915_gem_object_unpin_pages(obj);
1522 i915_gem_object_put(obj);
1523 }
1524 return err;
1525 }
1526
1527 int i915_gem_gtt_mock_selftests(void)
1528 {
1529 static const struct i915_subtest tests[] = {
1530 SUBTEST(igt_mock_drunk),
1531 SUBTEST(igt_mock_walk),
1532 SUBTEST(igt_mock_pot),
1533 SUBTEST(igt_mock_fill),
1534 SUBTEST(igt_gtt_reserve),
1535 SUBTEST(igt_gtt_insert),
1536 };
1537 struct drm_i915_private *i915;
1538 int err;
1539
1540 i915 = mock_gem_device();
1541 if (!i915)
1542 return -ENOMEM;
1543
1544 mutex_lock(&i915->drm.struct_mutex);
1545 err = i915_subtests(tests, i915);
1546 mutex_unlock(&i915->drm.struct_mutex);
1547
1548 drm_dev_unref(&i915->drm);
1549 return err;
1550 }
1551
1552 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1553 {
1554 static const struct i915_subtest tests[] = {
1555 SUBTEST(igt_ppgtt_alloc),
1556 SUBTEST(igt_ppgtt_lowlevel),
1557 SUBTEST(igt_ppgtt_drunk),
1558 SUBTEST(igt_ppgtt_walk),
1559 SUBTEST(igt_ppgtt_pot),
1560 SUBTEST(igt_ppgtt_fill),
1561 SUBTEST(igt_ppgtt_shrink),
1562 SUBTEST(igt_ggtt_lowlevel),
1563 SUBTEST(igt_ggtt_drunk),
1564 SUBTEST(igt_ggtt_walk),
1565 SUBTEST(igt_ggtt_pot),
1566 SUBTEST(igt_ggtt_fill),
1567 SUBTEST(igt_ggtt_page),
1568 };
1569
1570 GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1571
1572 return i915_subtests(tests, i915);
1573 }