2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
31 #include <drm/drm_gem.h>
34 i915_vma_retire(struct i915_gem_active
*active
,
35 struct drm_i915_gem_request
*rq
)
37 const unsigned int idx
= rq
->engine
->id
;
38 struct i915_vma
*vma
=
39 container_of(active
, struct i915_vma
, last_read
[idx
]);
40 struct drm_i915_gem_object
*obj
= vma
->obj
;
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma
, idx
));
44 i915_vma_clear_active(vma
, idx
);
45 if (i915_vma_is_active(vma
))
48 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
49 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
50 if (unlikely(i915_vma_is_closed(vma
) && !i915_vma_is_pinned(vma
)))
51 WARN_ON(i915_vma_unbind(vma
));
53 GEM_BUG_ON(!i915_gem_object_is_active(obj
));
54 if (--obj
->active_count
)
57 /* Prune the shared fence arrays iff completely idle (inc. external) */
58 if (reservation_object_trylock(obj
->resv
)) {
59 if (reservation_object_test_signaled_rcu(obj
->resv
, true))
60 reservation_object_add_excl_fence(obj
->resv
, NULL
);
61 reservation_object_unlock(obj
->resv
);
64 /* Bump our place on the bound list to keep it roughly in LRU order
65 * so that we don't steal from recently used but inactive objects
66 * (unless we are forced to ofc!)
68 spin_lock(&rq
->i915
->mm
.obj_lock
);
70 list_move_tail(&obj
->mm
.link
, &rq
->i915
->mm
.bound_list
);
71 spin_unlock(&rq
->i915
->mm
.obj_lock
);
73 obj
->mm
.dirty
= true; /* be paranoid */
75 if (i915_gem_object_has_active_reference(obj
)) {
76 i915_gem_object_clear_active_reference(obj
);
77 i915_gem_object_put(obj
);
81 static struct i915_vma
*
82 vma_create(struct drm_i915_gem_object
*obj
,
83 struct i915_address_space
*vm
,
84 const struct i915_ggtt_view
*view
)
87 struct rb_node
*rb
, **p
;
90 /* The aliasing_ppgtt should never be used directly! */
91 GEM_BUG_ON(vm
== &vm
->i915
->mm
.aliasing_ppgtt
->base
);
93 vma
= kmem_cache_zalloc(vm
->i915
->vmas
, GFP_KERNEL
);
95 return ERR_PTR(-ENOMEM
);
97 for (i
= 0; i
< ARRAY_SIZE(vma
->last_read
); i
++)
98 init_request_active(&vma
->last_read
[i
], i915_vma_retire
);
99 init_request_active(&vma
->last_fence
, NULL
);
102 vma
->resv
= obj
->resv
;
103 vma
->size
= obj
->base
.size
;
104 vma
->display_alignment
= I915_GTT_MIN_ALIGNMENT
;
106 if (view
&& view
->type
!= I915_GGTT_VIEW_NORMAL
) {
107 vma
->ggtt_view
= *view
;
108 if (view
->type
== I915_GGTT_VIEW_PARTIAL
) {
109 GEM_BUG_ON(range_overflows_t(u64
,
110 view
->partial
.offset
,
112 obj
->base
.size
>> PAGE_SHIFT
));
113 vma
->size
= view
->partial
.size
;
114 vma
->size
<<= PAGE_SHIFT
;
115 GEM_BUG_ON(vma
->size
>= obj
->base
.size
);
116 } else if (view
->type
== I915_GGTT_VIEW_ROTATED
) {
117 vma
->size
= intel_rotation_info_size(&view
->rotated
);
118 vma
->size
<<= PAGE_SHIFT
;
122 if (unlikely(vma
->size
> vm
->total
))
125 GEM_BUG_ON(!IS_ALIGNED(vma
->size
, I915_GTT_PAGE_SIZE
));
127 if (i915_is_ggtt(vm
)) {
128 if (unlikely(overflows_type(vma
->size
, u32
)))
131 vma
->fence_size
= i915_gem_fence_size(vm
->i915
, vma
->size
,
132 i915_gem_object_get_tiling(obj
),
133 i915_gem_object_get_stride(obj
));
134 if (unlikely(vma
->fence_size
< vma
->size
|| /* overflow */
135 vma
->fence_size
> vm
->total
))
138 GEM_BUG_ON(!IS_ALIGNED(vma
->fence_size
, I915_GTT_MIN_ALIGNMENT
));
140 vma
->fence_alignment
= i915_gem_fence_alignment(vm
->i915
, vma
->size
,
141 i915_gem_object_get_tiling(obj
),
142 i915_gem_object_get_stride(obj
));
143 GEM_BUG_ON(!is_power_of_2(vma
->fence_alignment
));
145 vma
->flags
|= I915_VMA_GGTT
;
146 list_add(&vma
->obj_link
, &obj
->vma_list
);
148 i915_ppgtt_get(i915_vm_to_ppgtt(vm
));
149 list_add_tail(&vma
->obj_link
, &obj
->vma_list
);
153 p
= &obj
->vma_tree
.rb_node
;
155 struct i915_vma
*pos
;
158 pos
= rb_entry(rb
, struct i915_vma
, obj_node
);
159 if (i915_vma_compare(pos
, vm
, view
) < 0)
164 rb_link_node(&vma
->obj_node
, rb
, p
);
165 rb_insert_color(&vma
->obj_node
, &obj
->vma_tree
);
166 list_add(&vma
->vm_link
, &vm
->unbound_list
);
171 kmem_cache_free(vm
->i915
->vmas
, vma
);
172 return ERR_PTR(-E2BIG
);
175 static struct i915_vma
*
176 vma_lookup(struct drm_i915_gem_object
*obj
,
177 struct i915_address_space
*vm
,
178 const struct i915_ggtt_view
*view
)
182 rb
= obj
->vma_tree
.rb_node
;
184 struct i915_vma
*vma
= rb_entry(rb
, struct i915_vma
, obj_node
);
187 cmp
= i915_vma_compare(vma
, vm
, view
);
201 * i915_vma_instance - return the singleton instance of the VMA
202 * @obj: parent &struct drm_i915_gem_object to be mapped
203 * @vm: address space in which the mapping is located
204 * @view: additional mapping requirements
206 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
207 * the same @view characteristics. If a match is not found, one is created.
208 * Once created, the VMA is kept until either the object is freed, or the
209 * address space is closed.
211 * Must be called with struct_mutex held.
213 * Returns the vma, or an error pointer.
216 i915_vma_instance(struct drm_i915_gem_object
*obj
,
217 struct i915_address_space
*vm
,
218 const struct i915_ggtt_view
*view
)
220 struct i915_vma
*vma
;
222 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
223 GEM_BUG_ON(view
&& !i915_is_ggtt(vm
));
224 GEM_BUG_ON(vm
->closed
);
226 vma
= vma_lookup(obj
, vm
, view
);
228 vma
= vma_create(obj
, vm
, view
);
230 GEM_BUG_ON(!IS_ERR(vma
) && i915_vma_is_closed(vma
));
231 GEM_BUG_ON(!IS_ERR(vma
) && i915_vma_compare(vma
, vm
, view
));
232 GEM_BUG_ON(!IS_ERR(vma
) && vma_lookup(obj
, vm
, view
) != vma
);
237 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
239 * @cache_level: mapping cache level
240 * @flags: flags like global or local mapping
242 * DMA addresses are taken from the scatter-gather table of this object (or of
243 * this VMA in case of non-default GGTT views) and PTE entries set up.
244 * Note that DMA addresses are also the only part of the SG table we care about.
246 int i915_vma_bind(struct i915_vma
*vma
, enum i915_cache_level cache_level
,
253 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
254 GEM_BUG_ON(vma
->size
> vma
->node
.size
);
256 if (GEM_WARN_ON(range_overflows(vma
->node
.start
,
261 if (GEM_WARN_ON(!flags
))
265 if (flags
& PIN_GLOBAL
)
266 bind_flags
|= I915_VMA_GLOBAL_BIND
;
267 if (flags
& PIN_USER
)
268 bind_flags
|= I915_VMA_LOCAL_BIND
;
270 vma_flags
= vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
271 if (flags
& PIN_UPDATE
)
272 bind_flags
|= vma_flags
;
274 bind_flags
&= ~vma_flags
;
278 GEM_BUG_ON(!vma
->pages
);
280 trace_i915_vma_bind(vma
, bind_flags
);
281 ret
= vma
->vm
->bind_vma(vma
, cache_level
, bind_flags
);
285 vma
->flags
|= bind_flags
;
289 void __iomem
*i915_vma_pin_iomap(struct i915_vma
*vma
)
294 /* Access through the GTT requires the device to be awake. */
295 assert_rpm_wakelock_held(vma
->vm
->i915
);
297 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
298 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma
))) {
303 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
304 GEM_BUG_ON((vma
->flags
& I915_VMA_GLOBAL_BIND
) == 0);
308 ptr
= io_mapping_map_wc(&i915_vm_to_ggtt(vma
->vm
)->mappable
,
321 err
= i915_vma_pin_fence(vma
);
328 __i915_vma_unpin(vma
);
330 return IO_ERR_PTR(err
);
333 void i915_vma_unpin_iomap(struct i915_vma
*vma
)
335 lockdep_assert_held(&vma
->obj
->base
.dev
->struct_mutex
);
337 GEM_BUG_ON(vma
->iomap
== NULL
);
339 i915_vma_unpin_fence(vma
);
343 void i915_vma_unpin_and_release(struct i915_vma
**p_vma
)
345 struct i915_vma
*vma
;
346 struct drm_i915_gem_object
*obj
;
348 vma
= fetch_and_zero(p_vma
);
357 __i915_gem_object_release_unless_active(obj
);
360 bool i915_vma_misplaced(const struct i915_vma
*vma
,
361 u64 size
, u64 alignment
, u64 flags
)
363 if (!drm_mm_node_allocated(&vma
->node
))
366 if (vma
->node
.size
< size
)
369 GEM_BUG_ON(alignment
&& !is_power_of_2(alignment
));
370 if (alignment
&& !IS_ALIGNED(vma
->node
.start
, alignment
))
373 if (flags
& PIN_MAPPABLE
&& !i915_vma_is_map_and_fenceable(vma
))
376 if (flags
& PIN_OFFSET_BIAS
&&
377 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
380 if (flags
& PIN_OFFSET_FIXED
&&
381 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
387 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
389 bool mappable
, fenceable
;
391 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
392 GEM_BUG_ON(!vma
->fence_size
);
395 * Explicitly disable for rotated VMA since the display does not
396 * need the fence and the VMA is not accessible to other users.
398 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_ROTATED
)
401 fenceable
= (vma
->node
.size
>= vma
->fence_size
&&
402 IS_ALIGNED(vma
->node
.start
, vma
->fence_alignment
));
404 mappable
= vma
->node
.start
+ vma
->fence_size
<= i915_vm_to_ggtt(vma
->vm
)->mappable_end
;
406 if (mappable
&& fenceable
)
407 vma
->flags
|= I915_VMA_CAN_FENCE
;
409 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
412 static bool color_differs(struct drm_mm_node
*node
, unsigned long color
)
414 return node
->allocated
&& node
->color
!= color
;
417 bool i915_gem_valid_gtt_space(struct i915_vma
*vma
, unsigned long cache_level
)
419 struct drm_mm_node
*node
= &vma
->node
;
420 struct drm_mm_node
*other
;
423 * On some machines we have to be careful when putting differing types
424 * of snoopable memory together to avoid the prefetcher crossing memory
425 * domains and dying. During vm initialisation, we decide whether or not
426 * these constraints apply and set the drm_mm.color_adjust
429 if (vma
->vm
->mm
.color_adjust
== NULL
)
432 /* Only valid to be called on an already inserted vma */
433 GEM_BUG_ON(!drm_mm_node_allocated(node
));
434 GEM_BUG_ON(list_empty(&node
->node_list
));
436 other
= list_prev_entry(node
, node_list
);
437 if (color_differs(other
, cache_level
) && !drm_mm_hole_follows(other
))
440 other
= list_next_entry(node
, node_list
);
441 if (color_differs(other
, cache_level
) && !drm_mm_hole_follows(node
))
448 * i915_vma_insert - finds a slot for the vma in its address space
450 * @size: requested size in bytes (can be larger than the VMA)
451 * @alignment: required alignment
452 * @flags: mask of PIN_* flags to use
454 * First we try to allocate some free space that meets the requirements for
455 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
456 * preferrably the oldest idle entry to make room for the new VMA.
459 * 0 on success, negative error code otherwise.
462 i915_vma_insert(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
464 struct drm_i915_private
*dev_priv
= vma
->vm
->i915
;
465 struct drm_i915_gem_object
*obj
= vma
->obj
;
469 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
470 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
472 size
= max(size
, vma
->size
);
473 alignment
= max(alignment
, vma
->display_alignment
);
474 if (flags
& PIN_MAPPABLE
) {
475 size
= max_t(typeof(size
), size
, vma
->fence_size
);
476 alignment
= max_t(typeof(alignment
),
477 alignment
, vma
->fence_alignment
);
480 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
481 GEM_BUG_ON(!IS_ALIGNED(alignment
, I915_GTT_MIN_ALIGNMENT
));
482 GEM_BUG_ON(!is_power_of_2(alignment
));
484 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
485 GEM_BUG_ON(!IS_ALIGNED(start
, I915_GTT_PAGE_SIZE
));
487 end
= vma
->vm
->total
;
488 if (flags
& PIN_MAPPABLE
)
489 end
= min_t(u64
, end
, dev_priv
->ggtt
.mappable_end
);
490 if (flags
& PIN_ZONE_4G
)
491 end
= min_t(u64
, end
, (1ULL << 32) - I915_GTT_PAGE_SIZE
);
492 GEM_BUG_ON(!IS_ALIGNED(end
, I915_GTT_PAGE_SIZE
));
494 /* If binding the object/GGTT view requires more space than the entire
495 * aperture has, reject it early before evicting everything in a vain
496 * attempt to find space.
499 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
500 size
, obj
->base
.size
,
501 flags
& PIN_MAPPABLE
? "mappable" : "total",
506 ret
= i915_gem_object_pin_pages(obj
);
510 GEM_BUG_ON(vma
->pages
);
512 ret
= vma
->vm
->set_pages(vma
);
516 if (flags
& PIN_OFFSET_FIXED
) {
517 u64 offset
= flags
& PIN_OFFSET_MASK
;
518 if (!IS_ALIGNED(offset
, alignment
) ||
519 range_overflows(offset
, size
, end
)) {
524 ret
= i915_gem_gtt_reserve(vma
->vm
, &vma
->node
,
525 size
, offset
, obj
->cache_level
,
531 * We only support huge gtt pages through the 48b PPGTT,
532 * however we also don't want to force any alignment for
533 * objects which need to be tightly packed into the low 32bits.
535 * Note that we assume that GGTT are limited to 4GiB for the
536 * forseeable future. See also i915_ggtt_offset().
538 if (upper_32_bits(end
- 1) &&
539 vma
->page_sizes
.sg
> I915_GTT_PAGE_SIZE
) {
541 * We can't mix 64K and 4K PTEs in the same page-table
542 * (2M block), and so to avoid the ugliness and
543 * complexity of coloring we opt for just aligning 64K
547 rounddown_pow_of_two(vma
->page_sizes
.sg
|
548 I915_GTT_PAGE_SIZE_2M
);
551 * Check we don't expand for the limited Global GTT
552 * (mappable aperture is even more precious!). This
553 * also checks that we exclude the aliasing-ppgtt.
555 GEM_BUG_ON(i915_vma_is_ggtt(vma
));
557 alignment
= max(alignment
, page_alignment
);
559 if (vma
->page_sizes
.sg
& I915_GTT_PAGE_SIZE_64K
)
560 size
= round_up(size
, I915_GTT_PAGE_SIZE_2M
);
563 ret
= i915_gem_gtt_insert(vma
->vm
, &vma
->node
,
564 size
, alignment
, obj
->cache_level
,
569 GEM_BUG_ON(vma
->node
.start
< start
);
570 GEM_BUG_ON(vma
->node
.start
+ vma
->node
.size
> end
);
572 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
573 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma
, obj
->cache_level
));
575 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
577 spin_lock(&dev_priv
->mm
.obj_lock
);
578 list_move_tail(&obj
->mm
.link
, &dev_priv
->mm
.bound_list
);
580 spin_unlock(&dev_priv
->mm
.obj_lock
);
582 GEM_BUG_ON(atomic_read(&obj
->mm
.pages_pin_count
) < obj
->bind_count
);
587 vma
->vm
->clear_pages(vma
);
589 i915_gem_object_unpin_pages(obj
);
594 i915_vma_remove(struct i915_vma
*vma
)
596 struct drm_i915_private
*i915
= vma
->vm
->i915
;
597 struct drm_i915_gem_object
*obj
= vma
->obj
;
599 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
600 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
602 vma
->vm
->clear_pages(vma
);
604 drm_mm_remove_node(&vma
->node
);
605 list_move_tail(&vma
->vm_link
, &vma
->vm
->unbound_list
);
607 /* Since the unbound list is global, only move to that list if
608 * no more VMAs exist.
610 spin_lock(&i915
->mm
.obj_lock
);
611 if (--obj
->bind_count
== 0)
612 list_move_tail(&obj
->mm
.link
, &i915
->mm
.unbound_list
);
613 spin_unlock(&i915
->mm
.obj_lock
);
615 /* And finally now the object is completely decoupled from this vma,
616 * we can drop its hold on the backing storage and allow it to be
617 * reaped by the shrinker.
619 i915_gem_object_unpin_pages(obj
);
620 GEM_BUG_ON(atomic_read(&obj
->mm
.pages_pin_count
) < obj
->bind_count
);
623 int __i915_vma_do_pin(struct i915_vma
*vma
,
624 u64 size
, u64 alignment
, u64 flags
)
626 const unsigned int bound
= vma
->flags
;
629 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
630 GEM_BUG_ON((flags
& (PIN_GLOBAL
| PIN_USER
)) == 0);
631 GEM_BUG_ON((flags
& PIN_GLOBAL
) && !i915_vma_is_ggtt(vma
));
633 if (WARN_ON(bound
& I915_VMA_PIN_OVERFLOW
)) {
638 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
639 ret
= i915_vma_insert(vma
, size
, alignment
, flags
);
644 ret
= i915_vma_bind(vma
, vma
->obj
->cache_level
, flags
);
648 if ((bound
^ vma
->flags
) & I915_VMA_GLOBAL_BIND
)
649 __i915_vma_set_map_and_fenceable(vma
);
651 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
652 GEM_BUG_ON(i915_vma_misplaced(vma
, size
, alignment
, flags
));
656 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
657 i915_vma_remove(vma
);
658 GEM_BUG_ON(vma
->pages
);
661 __i915_vma_unpin(vma
);
665 static void i915_vma_destroy(struct i915_vma
*vma
)
669 GEM_BUG_ON(vma
->node
.allocated
);
670 GEM_BUG_ON(i915_vma_is_active(vma
));
671 GEM_BUG_ON(!i915_vma_is_closed(vma
));
672 GEM_BUG_ON(vma
->fence
);
674 for (i
= 0; i
< ARRAY_SIZE(vma
->last_read
); i
++)
675 GEM_BUG_ON(i915_gem_active_isset(&vma
->last_read
[i
]));
676 GEM_BUG_ON(i915_gem_active_isset(&vma
->last_fence
));
678 list_del(&vma
->vm_link
);
679 if (!i915_vma_is_ggtt(vma
))
680 i915_ppgtt_put(i915_vm_to_ppgtt(vma
->vm
));
682 kmem_cache_free(to_i915(vma
->obj
->base
.dev
)->vmas
, vma
);
685 void i915_vma_close(struct i915_vma
*vma
)
687 GEM_BUG_ON(i915_vma_is_closed(vma
));
688 vma
->flags
|= I915_VMA_CLOSED
;
690 list_del(&vma
->obj_link
);
691 rb_erase(&vma
->obj_node
, &vma
->obj
->vma_tree
);
693 if (!i915_vma_is_active(vma
) && !i915_vma_is_pinned(vma
))
694 WARN_ON(i915_vma_unbind(vma
));
697 static void __i915_vma_iounmap(struct i915_vma
*vma
)
699 GEM_BUG_ON(i915_vma_is_pinned(vma
));
701 if (vma
->iomap
== NULL
)
704 io_mapping_unmap(vma
->iomap
);
708 void i915_vma_revoke_mmap(struct i915_vma
*vma
)
710 struct drm_vma_offset_node
*node
= &vma
->obj
->base
.vma_node
;
713 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
715 if (!i915_vma_has_userfault(vma
))
718 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma
));
719 GEM_BUG_ON(!vma
->obj
->userfault_count
);
721 vma_offset
= vma
->ggtt_view
.partial
.offset
<< PAGE_SHIFT
;
722 unmap_mapping_range(vma
->vm
->i915
->drm
.anon_inode
->i_mapping
,
723 drm_vma_node_offset_addr(node
) + vma_offset
,
727 i915_vma_unset_userfault(vma
);
728 if (!--vma
->obj
->userfault_count
)
729 list_del(&vma
->obj
->userfault_link
);
732 int i915_vma_unbind(struct i915_vma
*vma
)
734 struct drm_i915_gem_object
*obj
= vma
->obj
;
735 unsigned long active
;
738 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
740 /* First wait upon any activity as retiring the request may
741 * have side-effects such as unpinning or even unbinding this vma.
743 active
= i915_vma_get_active(vma
);
747 /* When a closed VMA is retired, it is unbound - eek.
748 * In order to prevent it from being recursively closed,
749 * take a pin on the vma so that the second unbind is
752 * Even more scary is that the retire callback may free
753 * the object (last active vma). To prevent the explosion
754 * we defer the actual object free to a worker that can
755 * only proceed once it acquires the struct_mutex (which
756 * we currently hold, therefore it cannot free this object
757 * before we are finished).
761 for_each_active(active
, idx
) {
762 ret
= i915_gem_active_retire(&vma
->last_read
[idx
],
763 &vma
->vm
->i915
->drm
.struct_mutex
);
769 ret
= i915_gem_active_retire(&vma
->last_fence
,
770 &vma
->vm
->i915
->drm
.struct_mutex
);
773 __i915_vma_unpin(vma
);
777 GEM_BUG_ON(i915_vma_is_active(vma
));
779 if (i915_vma_is_pinned(vma
))
782 if (!drm_mm_node_allocated(&vma
->node
))
785 GEM_BUG_ON(obj
->bind_count
== 0);
786 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
788 if (i915_vma_is_map_and_fenceable(vma
)) {
789 /* release the fence reg _after_ flushing */
790 ret
= i915_vma_put_fence(vma
);
794 /* Force a pagefault for domain tracking on next user access */
795 i915_vma_revoke_mmap(vma
);
797 __i915_vma_iounmap(vma
);
798 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
800 GEM_BUG_ON(vma
->fence
);
801 GEM_BUG_ON(i915_vma_has_userfault(vma
));
803 if (likely(!vma
->vm
->closed
)) {
804 trace_i915_vma_unbind(vma
);
805 vma
->vm
->unbind_vma(vma
);
807 vma
->flags
&= ~(I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
809 i915_vma_remove(vma
);
812 if (unlikely(i915_vma_is_closed(vma
)))
813 i915_vma_destroy(vma
);
818 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
819 #include "selftests/i915_vma.c"