2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
33 #include "mock_gem_device.h"
35 static void fake_free_pages(struct drm_i915_gem_object
*obj
,
36 struct sg_table
*pages
)
42 static struct sg_table
*
43 fake_get_pages(struct drm_i915_gem_object
*obj
)
45 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
46 #define PFN_BIAS 0x1000
47 struct sg_table
*pages
;
48 struct scatterlist
*sg
;
49 typeof(obj
->base
.size
) rem
;
51 pages
= kmalloc(sizeof(*pages
), GFP
);
53 return ERR_PTR(-ENOMEM
);
55 rem
= round_up(obj
->base
.size
, BIT(31)) >> 31;
56 if (sg_alloc_table(pages
, rem
, GFP
)) {
58 return ERR_PTR(-ENOMEM
);
62 for (sg
= pages
->sgl
; sg
; sg
= sg_next(sg
)) {
63 unsigned long len
= min_t(typeof(rem
), rem
, BIT(31));
66 sg_set_page(sg
, pfn_to_page(PFN_BIAS
), len
, 0);
67 sg_dma_address(sg
) = page_to_phys(sg_page(sg
));
74 obj
->mm
.madv
= I915_MADV_DONTNEED
;
79 static void fake_put_pages(struct drm_i915_gem_object
*obj
,
80 struct sg_table
*pages
)
82 fake_free_pages(obj
, pages
);
83 obj
->mm
.dirty
= false;
84 obj
->mm
.madv
= I915_MADV_WILLNEED
;
87 static const struct drm_i915_gem_object_ops fake_ops
= {
88 .flags
= I915_GEM_OBJECT_IS_SHRINKABLE
,
89 .get_pages
= fake_get_pages
,
90 .put_pages
= fake_put_pages
,
93 static struct drm_i915_gem_object
*
94 fake_dma_object(struct drm_i915_private
*i915
, u64 size
)
96 struct drm_i915_gem_object
*obj
;
99 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
101 if (overflows_type(size
, obj
->base
.size
))
102 return ERR_PTR(-E2BIG
);
104 obj
= i915_gem_object_alloc(i915
);
108 drm_gem_private_object_init(&i915
->drm
, &obj
->base
, size
);
109 i915_gem_object_init(obj
, &fake_ops
);
111 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
112 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
113 obj
->cache_level
= I915_CACHE_NONE
;
115 /* Preallocate the "backing storage" */
116 if (i915_gem_object_pin_pages(obj
))
119 i915_gem_object_unpin_pages(obj
);
123 i915_gem_object_put(obj
);
125 return ERR_PTR(-ENOMEM
);
128 static int igt_ppgtt_alloc(void *arg
)
130 struct drm_i915_private
*dev_priv
= arg
;
131 struct i915_hw_ppgtt
*ppgtt
;
135 /* Allocate a ppggt and try to fill the entire range */
137 if (!USES_PPGTT(dev_priv
))
140 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
144 mutex_lock(&dev_priv
->drm
.struct_mutex
);
145 err
= __hw_ppgtt_init(ppgtt
, dev_priv
);
149 if (!ppgtt
->base
.allocate_va_range
)
150 goto err_ppgtt_cleanup
;
152 /* Check we can allocate the entire range */
154 size
<= ppgtt
->base
.total
;
156 err
= ppgtt
->base
.allocate_va_range(&ppgtt
->base
, 0, size
);
158 if (err
== -ENOMEM
) {
159 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
161 err
= 0; /* virtual space too large! */
163 goto err_ppgtt_cleanup
;
166 ppgtt
->base
.clear_range(&ppgtt
->base
, 0, size
);
169 /* Check we can incrementally allocate the entire range */
170 for (last
= 0, size
= 4096;
171 size
<= ppgtt
->base
.total
;
172 last
= size
, size
<<= 2) {
173 err
= ppgtt
->base
.allocate_va_range(&ppgtt
->base
,
176 if (err
== -ENOMEM
) {
177 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
178 last
, size
- last
, ilog2(size
));
179 err
= 0; /* virtual space too large! */
181 goto err_ppgtt_cleanup
;
186 ppgtt
->base
.cleanup(&ppgtt
->base
);
188 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
193 static int lowlevel_hole(struct drm_i915_private
*i915
,
194 struct i915_address_space
*vm
,
195 u64 hole_start
, u64 hole_end
,
196 unsigned long end_time
)
198 I915_RND_STATE(seed_prng
);
200 struct i915_vma mock_vma
;
202 memset(&mock_vma
, 0, sizeof(struct i915_vma
));
204 /* Keep creating larger objects until one cannot fit into the hole */
205 for (size
= 12; (hole_end
- hole_start
) >> size
; size
++) {
206 I915_RND_SUBSTATE(prng
, seed_prng
);
207 struct drm_i915_gem_object
*obj
;
208 unsigned int *order
, count
, n
;
211 hole_size
= (hole_end
- hole_start
) >> size
;
212 if (hole_size
> KMALLOC_MAX_SIZE
/ sizeof(u32
))
213 hole_size
= KMALLOC_MAX_SIZE
/ sizeof(u32
);
217 order
= i915_random_order(count
, &prng
);
218 } while (!order
&& count
);
222 GEM_BUG_ON(count
* BIT_ULL(size
) > vm
->total
);
223 GEM_BUG_ON(hole_start
+ count
* BIT_ULL(size
) > hole_end
);
225 /* Ignore allocation failures (i.e. don't report them as
226 * a test failure) as we are purposefully allocating very
227 * large objects without checking that we have sufficient
228 * memory. We expect to hit -ENOMEM.
231 obj
= fake_dma_object(i915
, BIT_ULL(size
));
237 GEM_BUG_ON(obj
->base
.size
!= BIT_ULL(size
));
239 if (i915_gem_object_pin_pages(obj
)) {
240 i915_gem_object_put(obj
);
245 for (n
= 0; n
< count
; n
++) {
246 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
248 GEM_BUG_ON(addr
+ BIT_ULL(size
) > vm
->total
);
250 if (igt_timeout(end_time
,
251 "%s timed out before %d/%d\n",
252 __func__
, n
, count
)) {
253 hole_end
= hole_start
; /* quit */
257 if (vm
->allocate_va_range
&&
258 vm
->allocate_va_range(vm
, addr
, BIT_ULL(size
)))
261 mock_vma
.pages
= obj
->mm
.pages
;
262 mock_vma
.node
.size
= BIT_ULL(size
);
263 mock_vma
.node
.start
= addr
;
265 vm
->insert_entries(vm
, &mock_vma
, I915_CACHE_NONE
, 0);
269 i915_random_reorder(order
, count
, &prng
);
270 for (n
= 0; n
< count
; n
++) {
271 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
273 GEM_BUG_ON(addr
+ BIT_ULL(size
) > vm
->total
);
274 vm
->clear_range(vm
, addr
, BIT_ULL(size
));
277 i915_gem_object_unpin_pages(obj
);
278 i915_gem_object_put(obj
);
286 static void close_object_list(struct list_head
*objects
,
287 struct i915_address_space
*vm
)
289 struct drm_i915_gem_object
*obj
, *on
;
292 list_for_each_entry_safe(obj
, on
, objects
, st_link
) {
293 struct i915_vma
*vma
;
295 vma
= i915_vma_instance(obj
, vm
, NULL
);
297 ignored
= i915_vma_unbind(vma
);
298 /* Only ppgtt vma may be closed before the object is freed */
299 if (!IS_ERR(vma
) && !i915_vma_is_ggtt(vma
))
302 list_del(&obj
->st_link
);
303 i915_gem_object_put(obj
);
307 static int fill_hole(struct drm_i915_private
*i915
,
308 struct i915_address_space
*vm
,
309 u64 hole_start
, u64 hole_end
,
310 unsigned long end_time
)
312 const u64 hole_size
= hole_end
- hole_start
;
313 struct drm_i915_gem_object
*obj
;
314 const unsigned long max_pages
=
315 min_t(u64
, ULONG_MAX
- 1, hole_size
/2 >> PAGE_SHIFT
);
316 const unsigned long max_step
= max(int_sqrt(max_pages
), 2UL);
317 unsigned long npages
, prime
, flags
;
318 struct i915_vma
*vma
;
322 /* Try binding many VMA working inwards from either edge */
324 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
325 if (i915_is_ggtt(vm
))
328 for_each_prime_number_from(prime
, 2, max_step
) {
329 for (npages
= 1; npages
<= max_pages
; npages
*= prime
) {
330 const u64 full_size
= npages
<< PAGE_SHIFT
;
336 { "top-down", hole_end
, -1, },
337 { "bottom-up", hole_start
, 1, },
341 obj
= fake_dma_object(i915
, full_size
);
345 list_add(&obj
->st_link
, &objects
);
347 /* Align differing sized objects against the edges, and
348 * check we don't walk off into the void when binding
351 for (p
= phases
; p
->name
; p
++) {
355 list_for_each_entry(obj
, &objects
, st_link
) {
356 vma
= i915_vma_instance(obj
, vm
, NULL
);
361 if (offset
< hole_start
+ obj
->base
.size
)
363 offset
-= obj
->base
.size
;
366 err
= i915_vma_pin(vma
, 0, 0, offset
| flags
);
368 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
369 __func__
, p
->name
, err
, npages
, prime
, offset
);
373 if (!drm_mm_node_allocated(&vma
->node
) ||
374 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
375 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
376 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
385 if (offset
+ obj
->base
.size
> hole_end
)
387 offset
+= obj
->base
.size
;
392 list_for_each_entry(obj
, &objects
, st_link
) {
393 vma
= i915_vma_instance(obj
, vm
, NULL
);
398 if (offset
< hole_start
+ obj
->base
.size
)
400 offset
-= obj
->base
.size
;
403 if (!drm_mm_node_allocated(&vma
->node
) ||
404 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
405 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
406 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
412 err
= i915_vma_unbind(vma
);
414 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
415 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
421 if (offset
+ obj
->base
.size
> hole_end
)
423 offset
+= obj
->base
.size
;
428 list_for_each_entry_reverse(obj
, &objects
, st_link
) {
429 vma
= i915_vma_instance(obj
, vm
, NULL
);
434 if (offset
< hole_start
+ obj
->base
.size
)
436 offset
-= obj
->base
.size
;
439 err
= i915_vma_pin(vma
, 0, 0, offset
| flags
);
441 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
442 __func__
, p
->name
, err
, npages
, prime
, offset
);
446 if (!drm_mm_node_allocated(&vma
->node
) ||
447 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
448 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
449 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
458 if (offset
+ obj
->base
.size
> hole_end
)
460 offset
+= obj
->base
.size
;
465 list_for_each_entry_reverse(obj
, &objects
, st_link
) {
466 vma
= i915_vma_instance(obj
, vm
, NULL
);
471 if (offset
< hole_start
+ obj
->base
.size
)
473 offset
-= obj
->base
.size
;
476 if (!drm_mm_node_allocated(&vma
->node
) ||
477 i915_vma_misplaced(vma
, 0, 0, offset
| flags
)) {
478 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
479 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
, drm_mm_node_allocated(&vma
->node
),
485 err
= i915_vma_unbind(vma
);
487 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
488 __func__
, p
->name
, vma
->node
.start
, vma
->node
.size
,
494 if (offset
+ obj
->base
.size
> hole_end
)
496 offset
+= obj
->base
.size
;
501 if (igt_timeout(end_time
, "%s timed out (npages=%lu, prime=%lu)\n",
502 __func__
, npages
, prime
)) {
508 close_object_list(&objects
, vm
);
514 close_object_list(&objects
, vm
);
518 static int walk_hole(struct drm_i915_private
*i915
,
519 struct i915_address_space
*vm
,
520 u64 hole_start
, u64 hole_end
,
521 unsigned long end_time
)
523 const u64 hole_size
= hole_end
- hole_start
;
524 const unsigned long max_pages
=
525 min_t(u64
, ULONG_MAX
- 1, hole_size
>> PAGE_SHIFT
);
529 /* Try binding a single VMA in different positions within the hole */
531 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
532 if (i915_is_ggtt(vm
))
535 for_each_prime_number_from(size
, 1, max_pages
) {
536 struct drm_i915_gem_object
*obj
;
537 struct i915_vma
*vma
;
541 obj
= fake_dma_object(i915
, size
<< PAGE_SHIFT
);
545 vma
= i915_vma_instance(obj
, vm
, NULL
);
551 for (addr
= hole_start
;
552 addr
+ obj
->base
.size
< hole_end
;
553 addr
+= obj
->base
.size
) {
554 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
556 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
557 __func__
, addr
, vma
->size
,
558 hole_start
, hole_end
, err
);
563 if (!drm_mm_node_allocated(&vma
->node
) ||
564 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
565 pr_err("%s incorrect at %llx + %llx\n",
566 __func__
, addr
, vma
->size
);
571 err
= i915_vma_unbind(vma
);
573 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
574 __func__
, addr
, vma
->size
, err
);
578 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
580 if (igt_timeout(end_time
,
581 "%s timed out at %llx\n",
589 if (!i915_vma_is_ggtt(vma
))
592 i915_gem_object_put(obj
);
600 static int pot_hole(struct drm_i915_private
*i915
,
601 struct i915_address_space
*vm
,
602 u64 hole_start
, u64 hole_end
,
603 unsigned long end_time
)
605 struct drm_i915_gem_object
*obj
;
606 struct i915_vma
*vma
;
611 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
612 if (i915_is_ggtt(vm
))
615 obj
= i915_gem_object_create_internal(i915
, 2 * I915_GTT_PAGE_SIZE
);
619 vma
= i915_vma_instance(obj
, vm
, NULL
);
625 /* Insert a pair of pages across every pot boundary within the hole */
626 for (pot
= fls64(hole_end
- 1) - 1;
627 pot
> ilog2(2 * I915_GTT_PAGE_SIZE
);
629 u64 step
= BIT_ULL(pot
);
632 for (addr
= round_up(hole_start
+ I915_GTT_PAGE_SIZE
, step
) - I915_GTT_PAGE_SIZE
;
633 addr
<= round_down(hole_end
- 2*I915_GTT_PAGE_SIZE
, step
) - I915_GTT_PAGE_SIZE
;
635 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
637 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
640 hole_start
, hole_end
,
645 if (!drm_mm_node_allocated(&vma
->node
) ||
646 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
647 pr_err("%s incorrect at %llx + %llx\n",
648 __func__
, addr
, vma
->size
);
650 err
= i915_vma_unbind(vma
);
656 err
= i915_vma_unbind(vma
);
660 if (igt_timeout(end_time
,
661 "%s timed out after %d/%d\n",
662 __func__
, pot
, fls64(hole_end
- 1) - 1)) {
669 if (!i915_vma_is_ggtt(vma
))
672 i915_gem_object_put(obj
);
676 static int drunk_hole(struct drm_i915_private
*i915
,
677 struct i915_address_space
*vm
,
678 u64 hole_start
, u64 hole_end
,
679 unsigned long end_time
)
681 I915_RND_STATE(prng
);
685 flags
= PIN_OFFSET_FIXED
| PIN_USER
;
686 if (i915_is_ggtt(vm
))
689 /* Keep creating larger objects until one cannot fit into the hole */
690 for (size
= 12; (hole_end
- hole_start
) >> size
; size
++) {
691 struct drm_i915_gem_object
*obj
;
692 unsigned int *order
, count
, n
;
693 struct i915_vma
*vma
;
697 hole_size
= (hole_end
- hole_start
) >> size
;
698 if (hole_size
> KMALLOC_MAX_SIZE
/ sizeof(u32
))
699 hole_size
= KMALLOC_MAX_SIZE
/ sizeof(u32
);
703 order
= i915_random_order(count
, &prng
);
704 } while (!order
&& count
);
708 /* Ignore allocation failures (i.e. don't report them as
709 * a test failure) as we are purposefully allocating very
710 * large objects without checking that we have sufficient
711 * memory. We expect to hit -ENOMEM.
714 obj
= fake_dma_object(i915
, BIT_ULL(size
));
720 vma
= i915_vma_instance(obj
, vm
, NULL
);
726 GEM_BUG_ON(vma
->size
!= BIT_ULL(size
));
728 for (n
= 0; n
< count
; n
++) {
729 u64 addr
= hole_start
+ order
[n
] * BIT_ULL(size
);
731 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
733 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
736 hole_start
, hole_end
,
741 if (!drm_mm_node_allocated(&vma
->node
) ||
742 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
743 pr_err("%s incorrect at %llx + %llx\n",
744 __func__
, addr
, BIT_ULL(size
));
746 err
= i915_vma_unbind(vma
);
752 err
= i915_vma_unbind(vma
);
755 if (igt_timeout(end_time
,
756 "%s timed out after %d/%d\n",
757 __func__
, n
, count
)) {
764 if (!i915_vma_is_ggtt(vma
))
767 i915_gem_object_put(obj
);
776 static int __shrink_hole(struct drm_i915_private
*i915
,
777 struct i915_address_space
*vm
,
778 u64 hole_start
, u64 hole_end
,
779 unsigned long end_time
)
781 struct drm_i915_gem_object
*obj
;
782 unsigned long flags
= PIN_OFFSET_FIXED
| PIN_USER
;
783 unsigned int order
= 12;
788 /* Keep creating larger objects until one cannot fit into the hole */
789 for (addr
= hole_start
; addr
< hole_end
; ) {
790 struct i915_vma
*vma
;
791 u64 size
= BIT_ULL(order
++);
793 size
= min(size
, hole_end
- addr
);
794 obj
= fake_dma_object(i915
, size
);
800 list_add(&obj
->st_link
, &objects
);
802 vma
= i915_vma_instance(obj
, vm
, NULL
);
808 GEM_BUG_ON(vma
->size
!= size
);
810 err
= i915_vma_pin(vma
, 0, 0, addr
| flags
);
812 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
813 __func__
, addr
, size
, hole_start
, hole_end
, err
);
817 if (!drm_mm_node_allocated(&vma
->node
) ||
818 i915_vma_misplaced(vma
, 0, 0, addr
| flags
)) {
819 pr_err("%s incorrect at %llx + %llx\n",
820 __func__
, addr
, size
);
822 err
= i915_vma_unbind(vma
);
830 if (igt_timeout(end_time
,
831 "%s timed out at ofset %llx [%llx - %llx]\n",
832 __func__
, addr
, hole_start
, hole_end
)) {
838 close_object_list(&objects
, vm
);
842 static int shrink_hole(struct drm_i915_private
*i915
,
843 struct i915_address_space
*vm
,
844 u64 hole_start
, u64 hole_end
,
845 unsigned long end_time
)
850 vm
->fault_attr
.probability
= 999;
851 atomic_set(&vm
->fault_attr
.times
, -1);
853 for_each_prime_number_from(prime
, 0, ULONG_MAX
- 1) {
854 vm
->fault_attr
.interval
= prime
;
855 err
= __shrink_hole(i915
, vm
, hole_start
, hole_end
, end_time
);
860 memset(&vm
->fault_attr
, 0, sizeof(vm
->fault_attr
));
865 static int exercise_ppgtt(struct drm_i915_private
*dev_priv
,
866 int (*func
)(struct drm_i915_private
*i915
,
867 struct i915_address_space
*vm
,
868 u64 hole_start
, u64 hole_end
,
869 unsigned long end_time
))
871 struct drm_file
*file
;
872 struct i915_hw_ppgtt
*ppgtt
;
873 IGT_TIMEOUT(end_time
);
876 if (!USES_FULL_PPGTT(dev_priv
))
879 file
= mock_file(dev_priv
);
881 return PTR_ERR(file
);
883 mutex_lock(&dev_priv
->drm
.struct_mutex
);
884 ppgtt
= i915_ppgtt_create(dev_priv
, file
->driver_priv
, "mock");
886 err
= PTR_ERR(ppgtt
);
889 GEM_BUG_ON(offset_in_page(ppgtt
->base
.total
));
890 GEM_BUG_ON(ppgtt
->base
.closed
);
892 err
= func(dev_priv
, &ppgtt
->base
, 0, ppgtt
->base
.total
, end_time
);
894 i915_ppgtt_close(&ppgtt
->base
);
895 i915_ppgtt_put(ppgtt
);
897 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
899 mock_file_free(dev_priv
, file
);
903 static int igt_ppgtt_fill(void *arg
)
905 return exercise_ppgtt(arg
, fill_hole
);
908 static int igt_ppgtt_walk(void *arg
)
910 return exercise_ppgtt(arg
, walk_hole
);
913 static int igt_ppgtt_pot(void *arg
)
915 return exercise_ppgtt(arg
, pot_hole
);
918 static int igt_ppgtt_drunk(void *arg
)
920 return exercise_ppgtt(arg
, drunk_hole
);
923 static int igt_ppgtt_lowlevel(void *arg
)
925 return exercise_ppgtt(arg
, lowlevel_hole
);
928 static int igt_ppgtt_shrink(void *arg
)
930 return exercise_ppgtt(arg
, shrink_hole
);
933 static int sort_holes(void *priv
, struct list_head
*A
, struct list_head
*B
)
935 struct drm_mm_node
*a
= list_entry(A
, typeof(*a
), hole_stack
);
936 struct drm_mm_node
*b
= list_entry(B
, typeof(*b
), hole_stack
);
938 if (a
->start
< b
->start
)
944 static int exercise_ggtt(struct drm_i915_private
*i915
,
945 int (*func
)(struct drm_i915_private
*i915
,
946 struct i915_address_space
*vm
,
947 u64 hole_start
, u64 hole_end
,
948 unsigned long end_time
))
950 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
951 u64 hole_start
, hole_end
, last
= 0;
952 struct drm_mm_node
*node
;
953 IGT_TIMEOUT(end_time
);
956 mutex_lock(&i915
->drm
.struct_mutex
);
958 list_sort(NULL
, &ggtt
->base
.mm
.hole_stack
, sort_holes
);
959 drm_mm_for_each_hole(node
, &ggtt
->base
.mm
, hole_start
, hole_end
) {
960 if (hole_start
< last
)
963 if (ggtt
->base
.mm
.color_adjust
)
964 ggtt
->base
.mm
.color_adjust(node
, 0,
965 &hole_start
, &hole_end
);
966 if (hole_start
>= hole_end
)
969 err
= func(i915
, &ggtt
->base
, hole_start
, hole_end
, end_time
);
973 /* As we have manipulated the drm_mm, the list may be corrupt */
977 mutex_unlock(&i915
->drm
.struct_mutex
);
982 static int igt_ggtt_fill(void *arg
)
984 return exercise_ggtt(arg
, fill_hole
);
987 static int igt_ggtt_walk(void *arg
)
989 return exercise_ggtt(arg
, walk_hole
);
992 static int igt_ggtt_pot(void *arg
)
994 return exercise_ggtt(arg
, pot_hole
);
997 static int igt_ggtt_drunk(void *arg
)
999 return exercise_ggtt(arg
, drunk_hole
);
1002 static int igt_ggtt_lowlevel(void *arg
)
1004 return exercise_ggtt(arg
, lowlevel_hole
);
1007 static int igt_ggtt_page(void *arg
)
1009 const unsigned int count
= PAGE_SIZE
/sizeof(u32
);
1010 I915_RND_STATE(prng
);
1011 struct drm_i915_private
*i915
= arg
;
1012 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
1013 struct drm_i915_gem_object
*obj
;
1014 struct drm_mm_node tmp
;
1015 unsigned int *order
, n
;
1018 mutex_lock(&i915
->drm
.struct_mutex
);
1020 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
1026 err
= i915_gem_object_pin_pages(obj
);
1030 memset(&tmp
, 0, sizeof(tmp
));
1031 err
= drm_mm_insert_node_in_range(&ggtt
->base
.mm
, &tmp
,
1032 1024 * PAGE_SIZE
, 0,
1033 I915_COLOR_UNEVICTABLE
,
1034 0, ggtt
->mappable_end
,
1039 order
= i915_random_order(count
, &prng
);
1045 for (n
= 0; n
< count
; n
++) {
1046 u64 offset
= tmp
.start
+ order
[n
] * PAGE_SIZE
;
1049 ggtt
->base
.insert_page(&ggtt
->base
,
1050 i915_gem_object_get_dma_address(obj
, 0),
1051 offset
, I915_CACHE_NONE
, 0);
1053 vaddr
= io_mapping_map_atomic_wc(&ggtt
->mappable
, offset
);
1054 iowrite32(n
, vaddr
+ n
);
1055 io_mapping_unmap_atomic(vaddr
);
1058 ggtt
->base
.clear_range(&ggtt
->base
, offset
, PAGE_SIZE
);
1061 i915_random_reorder(order
, count
, &prng
);
1062 for (n
= 0; n
< count
; n
++) {
1063 u64 offset
= tmp
.start
+ order
[n
] * PAGE_SIZE
;
1067 ggtt
->base
.insert_page(&ggtt
->base
,
1068 i915_gem_object_get_dma_address(obj
, 0),
1069 offset
, I915_CACHE_NONE
, 0);
1071 vaddr
= io_mapping_map_atomic_wc(&ggtt
->mappable
, offset
);
1072 val
= ioread32(vaddr
+ n
);
1073 io_mapping_unmap_atomic(vaddr
);
1075 ggtt
->base
.clear_range(&ggtt
->base
, offset
, PAGE_SIZE
);
1078 pr_err("insert page failed: found %d, expected %d\n",
1087 drm_mm_remove_node(&tmp
);
1089 i915_gem_object_unpin_pages(obj
);
1091 i915_gem_object_put(obj
);
1093 mutex_unlock(&i915
->drm
.struct_mutex
);
1097 static void track_vma_bind(struct i915_vma
*vma
)
1099 struct drm_i915_gem_object
*obj
= vma
->obj
;
1101 obj
->bind_count
++; /* track for eviction later */
1102 __i915_gem_object_pin_pages(obj
);
1104 vma
->pages
= obj
->mm
.pages
;
1105 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
1108 static int exercise_mock(struct drm_i915_private
*i915
,
1109 int (*func
)(struct drm_i915_private
*i915
,
1110 struct i915_address_space
*vm
,
1111 u64 hole_start
, u64 hole_end
,
1112 unsigned long end_time
))
1114 struct i915_gem_context
*ctx
;
1115 struct i915_hw_ppgtt
*ppgtt
;
1116 IGT_TIMEOUT(end_time
);
1119 ctx
= mock_context(i915
, "mock");
1126 err
= func(i915
, &ppgtt
->base
, 0, ppgtt
->base
.total
, end_time
);
1128 mock_context_close(ctx
);
1132 static int igt_mock_fill(void *arg
)
1134 return exercise_mock(arg
, fill_hole
);
1137 static int igt_mock_walk(void *arg
)
1139 return exercise_mock(arg
, walk_hole
);
1142 static int igt_mock_pot(void *arg
)
1144 return exercise_mock(arg
, pot_hole
);
1147 static int igt_mock_drunk(void *arg
)
1149 return exercise_mock(arg
, drunk_hole
);
1152 static int igt_gtt_reserve(void *arg
)
1154 struct drm_i915_private
*i915
= arg
;
1155 struct drm_i915_gem_object
*obj
, *on
;
1160 /* i915_gem_gtt_reserve() tries to reserve the precise range
1161 * for the node, and evicts if it has to. So our test checks that
1162 * it can give us the requsted space and prevent overlaps.
1165 /* Start by filling the GGTT */
1167 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1168 total
+= 2*I915_GTT_PAGE_SIZE
) {
1169 struct i915_vma
*vma
;
1171 obj
= i915_gem_object_create_internal(i915
, 2*PAGE_SIZE
);
1177 err
= i915_gem_object_pin_pages(obj
);
1179 i915_gem_object_put(obj
);
1183 list_add(&obj
->st_link
, &objects
);
1185 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1191 err
= i915_gem_gtt_reserve(&i915
->ggtt
.base
, &vma
->node
,
1197 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1198 total
, i915
->ggtt
.base
.total
, err
);
1201 track_vma_bind(vma
);
1203 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1204 if (vma
->node
.start
!= total
||
1205 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1206 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1207 vma
->node
.start
, vma
->node
.size
,
1208 total
, 2*I915_GTT_PAGE_SIZE
);
1214 /* Now we start forcing evictions */
1215 for (total
= I915_GTT_PAGE_SIZE
;
1216 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1217 total
+= 2*I915_GTT_PAGE_SIZE
) {
1218 struct i915_vma
*vma
;
1220 obj
= i915_gem_object_create_internal(i915
, 2*PAGE_SIZE
);
1226 err
= i915_gem_object_pin_pages(obj
);
1228 i915_gem_object_put(obj
);
1232 list_add(&obj
->st_link
, &objects
);
1234 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1240 err
= i915_gem_gtt_reserve(&i915
->ggtt
.base
, &vma
->node
,
1246 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1247 total
, i915
->ggtt
.base
.total
, err
);
1250 track_vma_bind(vma
);
1252 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1253 if (vma
->node
.start
!= total
||
1254 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1255 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1256 vma
->node
.start
, vma
->node
.size
,
1257 total
, 2*I915_GTT_PAGE_SIZE
);
1263 /* And then try at random */
1264 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1265 struct i915_vma
*vma
;
1268 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1274 err
= i915_vma_unbind(vma
);
1276 pr_err("i915_vma_unbind failed with err=%d!\n", err
);
1280 offset
= random_offset(0, i915
->ggtt
.base
.total
,
1281 2*I915_GTT_PAGE_SIZE
,
1282 I915_GTT_MIN_ALIGNMENT
);
1284 err
= i915_gem_gtt_reserve(&i915
->ggtt
.base
, &vma
->node
,
1290 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1291 total
, i915
->ggtt
.base
.total
, err
);
1294 track_vma_bind(vma
);
1296 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1297 if (vma
->node
.start
!= offset
||
1298 vma
->node
.size
!= 2*I915_GTT_PAGE_SIZE
) {
1299 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1300 vma
->node
.start
, vma
->node
.size
,
1301 offset
, 2*I915_GTT_PAGE_SIZE
);
1308 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1309 i915_gem_object_unpin_pages(obj
);
1310 i915_gem_object_put(obj
);
1315 static int igt_gtt_insert(void *arg
)
1317 struct drm_i915_private
*i915
= arg
;
1318 struct drm_i915_gem_object
*obj
, *on
;
1319 struct drm_mm_node tmp
= {};
1320 const struct invalid_insert
{
1324 } invalid_insert
[] = {
1326 i915
->ggtt
.base
.total
+ I915_GTT_PAGE_SIZE
, 0,
1327 0, i915
->ggtt
.base
.total
,
1330 2*I915_GTT_PAGE_SIZE
, 0,
1331 0, I915_GTT_PAGE_SIZE
,
1334 -(u64
)I915_GTT_PAGE_SIZE
, 0,
1335 0, 4*I915_GTT_PAGE_SIZE
,
1338 -(u64
)2*I915_GTT_PAGE_SIZE
, 2*I915_GTT_PAGE_SIZE
,
1339 0, 4*I915_GTT_PAGE_SIZE
,
1342 I915_GTT_PAGE_SIZE
, I915_GTT_MIN_ALIGNMENT
<< 1,
1343 I915_GTT_MIN_ALIGNMENT
, I915_GTT_MIN_ALIGNMENT
<< 1,
1351 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1352 * to the node, evicting if required.
1355 /* Check a couple of obviously invalid requests */
1356 for (ii
= invalid_insert
; ii
->size
; ii
++) {
1357 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &tmp
,
1358 ii
->size
, ii
->alignment
,
1359 I915_COLOR_UNEVICTABLE
,
1362 if (err
!= -ENOSPC
) {
1363 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1364 ii
->size
, ii
->alignment
, ii
->start
, ii
->end
,
1370 /* Start by filling the GGTT */
1372 total
+ I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1373 total
+= I915_GTT_PAGE_SIZE
) {
1374 struct i915_vma
*vma
;
1376 obj
= i915_gem_object_create_internal(i915
, I915_GTT_PAGE_SIZE
);
1382 err
= i915_gem_object_pin_pages(obj
);
1384 i915_gem_object_put(obj
);
1388 list_add(&obj
->st_link
, &objects
);
1390 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1396 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &vma
->node
,
1397 obj
->base
.size
, 0, obj
->cache_level
,
1398 0, i915
->ggtt
.base
.total
,
1400 if (err
== -ENOSPC
) {
1401 /* maxed out the GGTT space */
1402 i915_gem_object_put(obj
);
1406 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1407 total
, i915
->ggtt
.base
.total
, err
);
1410 track_vma_bind(vma
);
1411 __i915_vma_pin(vma
);
1413 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1416 list_for_each_entry(obj
, &objects
, st_link
) {
1417 struct i915_vma
*vma
;
1419 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1425 if (!drm_mm_node_allocated(&vma
->node
)) {
1426 pr_err("VMA was unexpectedly evicted!\n");
1431 __i915_vma_unpin(vma
);
1434 /* If we then reinsert, we should find the same hole */
1435 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1436 struct i915_vma
*vma
;
1439 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1445 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1446 offset
= vma
->node
.start
;
1448 err
= i915_vma_unbind(vma
);
1450 pr_err("i915_vma_unbind failed with err=%d!\n", err
);
1454 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &vma
->node
,
1455 obj
->base
.size
, 0, obj
->cache_level
,
1456 0, i915
->ggtt
.base
.total
,
1459 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1460 total
, i915
->ggtt
.base
.total
, err
);
1463 track_vma_bind(vma
);
1465 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1466 if (vma
->node
.start
!= offset
) {
1467 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1468 offset
, vma
->node
.start
);
1474 /* And then force evictions */
1476 total
+ 2*I915_GTT_PAGE_SIZE
<= i915
->ggtt
.base
.total
;
1477 total
+= 2*I915_GTT_PAGE_SIZE
) {
1478 struct i915_vma
*vma
;
1480 obj
= i915_gem_object_create_internal(i915
, 2*I915_GTT_PAGE_SIZE
);
1486 err
= i915_gem_object_pin_pages(obj
);
1488 i915_gem_object_put(obj
);
1492 list_add(&obj
->st_link
, &objects
);
1494 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1500 err
= i915_gem_gtt_insert(&i915
->ggtt
.base
, &vma
->node
,
1501 obj
->base
.size
, 0, obj
->cache_level
,
1502 0, i915
->ggtt
.base
.total
,
1505 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1506 total
, i915
->ggtt
.base
.total
, err
);
1509 track_vma_bind(vma
);
1511 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
1515 list_for_each_entry_safe(obj
, on
, &objects
, st_link
) {
1516 i915_gem_object_unpin_pages(obj
);
1517 i915_gem_object_put(obj
);
1522 int i915_gem_gtt_mock_selftests(void)
1524 static const struct i915_subtest tests
[] = {
1525 SUBTEST(igt_mock_drunk
),
1526 SUBTEST(igt_mock_walk
),
1527 SUBTEST(igt_mock_pot
),
1528 SUBTEST(igt_mock_fill
),
1529 SUBTEST(igt_gtt_reserve
),
1530 SUBTEST(igt_gtt_insert
),
1532 struct drm_i915_private
*i915
;
1535 i915
= mock_gem_device();
1539 mutex_lock(&i915
->drm
.struct_mutex
);
1540 err
= i915_subtests(tests
, i915
);
1541 mutex_unlock(&i915
->drm
.struct_mutex
);
1543 drm_dev_unref(&i915
->drm
);
1547 int i915_gem_gtt_live_selftests(struct drm_i915_private
*i915
)
1549 static const struct i915_subtest tests
[] = {
1550 SUBTEST(igt_ppgtt_alloc
),
1551 SUBTEST(igt_ppgtt_lowlevel
),
1552 SUBTEST(igt_ppgtt_drunk
),
1553 SUBTEST(igt_ppgtt_walk
),
1554 SUBTEST(igt_ppgtt_pot
),
1555 SUBTEST(igt_ppgtt_fill
),
1556 SUBTEST(igt_ppgtt_shrink
),
1557 SUBTEST(igt_ggtt_lowlevel
),
1558 SUBTEST(igt_ggtt_drunk
),
1559 SUBTEST(igt_ggtt_walk
),
1560 SUBTEST(igt_ggtt_pot
),
1561 SUBTEST(igt_ggtt_fill
),
1562 SUBTEST(igt_ggtt_page
),
1565 GEM_BUG_ON(offset_in_page(i915
->ggtt
.base
.total
));
1567 return i915_subtests(tests
, i915
);