2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
31 #include <drm/drm_gem.h>
34 i915_vma_retire(struct i915_gem_active
*active
,
35 struct drm_i915_gem_request
*rq
)
37 const unsigned int idx
= rq
->engine
->id
;
38 struct i915_vma
*vma
=
39 container_of(active
, struct i915_vma
, last_read
[idx
]);
40 struct drm_i915_gem_object
*obj
= vma
->obj
;
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma
, idx
));
44 i915_vma_clear_active(vma
, idx
);
45 if (i915_vma_is_active(vma
))
48 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
49 if (unlikely(i915_vma_is_closed(vma
) && !i915_vma_is_pinned(vma
)))
50 WARN_ON(i915_vma_unbind(vma
));
52 GEM_BUG_ON(!i915_gem_object_is_active(obj
));
53 if (--obj
->active_count
)
56 /* Bump our place on the bound list to keep it roughly in LRU order
57 * so that we don't steal from recently used but inactive objects
58 * (unless we are forced to ofc!)
61 list_move_tail(&obj
->global_link
, &rq
->i915
->mm
.bound_list
);
63 obj
->mm
.dirty
= true; /* be paranoid */
65 if (i915_gem_object_has_active_reference(obj
)) {
66 i915_gem_object_clear_active_reference(obj
);
67 i915_gem_object_put(obj
);
71 static struct i915_vma
*
72 __i915_vma_create(struct drm_i915_gem_object
*obj
,
73 struct i915_address_space
*vm
,
74 const struct i915_ggtt_view
*view
)
77 struct rb_node
*rb
, **p
;
80 GEM_BUG_ON(vm
->closed
);
82 vma
= kmem_cache_zalloc(to_i915(obj
->base
.dev
)->vmas
, GFP_KERNEL
);
84 return ERR_PTR(-ENOMEM
);
86 INIT_LIST_HEAD(&vma
->exec_list
);
87 for (i
= 0; i
< ARRAY_SIZE(vma
->last_read
); i
++)
88 init_request_active(&vma
->last_read
[i
], i915_vma_retire
);
89 init_request_active(&vma
->last_fence
, NULL
);
90 list_add(&vma
->vm_link
, &vm
->unbound_list
);
93 vma
->size
= obj
->base
.size
;
96 vma
->ggtt_view
= *view
;
97 if (view
->type
== I915_GGTT_VIEW_PARTIAL
) {
98 vma
->size
= view
->params
.partial
.size
;
99 vma
->size
<<= PAGE_SHIFT
;
100 } else if (view
->type
== I915_GGTT_VIEW_ROTATED
) {
102 intel_rotation_info_size(&view
->params
.rotated
);
103 vma
->size
<<= PAGE_SHIFT
;
107 if (i915_is_ggtt(vm
)) {
108 vma
->flags
|= I915_VMA_GGTT
;
109 list_add(&vma
->obj_link
, &obj
->vma_list
);
111 i915_ppgtt_get(i915_vm_to_ppgtt(vm
));
112 list_add_tail(&vma
->obj_link
, &obj
->vma_list
);
116 p
= &obj
->vma_tree
.rb_node
;
118 struct i915_vma
*pos
;
121 pos
= rb_entry(rb
, struct i915_vma
, obj_node
);
122 if (i915_vma_compare(pos
, vm
, view
) < 0)
127 rb_link_node(&vma
->obj_node
, rb
, p
);
128 rb_insert_color(&vma
->obj_node
, &obj
->vma_tree
);
134 i915_vma_create(struct drm_i915_gem_object
*obj
,
135 struct i915_address_space
*vm
,
136 const struct i915_ggtt_view
*view
)
138 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
139 GEM_BUG_ON(view
&& !i915_is_ggtt(vm
));
140 GEM_BUG_ON(i915_gem_obj_to_vma(obj
, vm
, view
));
142 return __i915_vma_create(obj
, vm
, view
);
146 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
148 * @cache_level: mapping cache level
149 * @flags: flags like global or local mapping
151 * DMA addresses are taken from the scatter-gather table of this object (or of
152 * this VMA in case of non-default GGTT views) and PTE entries set up.
153 * Note that DMA addresses are also the only part of the SG table we care about.
155 int i915_vma_bind(struct i915_vma
*vma
, enum i915_cache_level cache_level
,
162 if (WARN_ON(flags
== 0))
166 if (flags
& PIN_GLOBAL
)
167 bind_flags
|= I915_VMA_GLOBAL_BIND
;
168 if (flags
& PIN_USER
)
169 bind_flags
|= I915_VMA_LOCAL_BIND
;
171 vma_flags
= vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
172 if (flags
& PIN_UPDATE
)
173 bind_flags
|= vma_flags
;
175 bind_flags
&= ~vma_flags
;
179 if (vma_flags
== 0 && vma
->vm
->allocate_va_range
) {
180 trace_i915_va_alloc(vma
);
181 ret
= vma
->vm
->allocate_va_range(vma
->vm
,
188 ret
= vma
->vm
->bind_vma(vma
, cache_level
, bind_flags
);
192 vma
->flags
|= bind_flags
;
196 void __iomem
*i915_vma_pin_iomap(struct i915_vma
*vma
)
200 /* Access through the GTT requires the device to be awake. */
201 assert_rpm_wakelock_held(vma
->vm
->i915
);
203 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
204 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma
)))
205 return IO_ERR_PTR(-ENODEV
);
207 GEM_BUG_ON(!i915_vma_is_ggtt(vma
));
208 GEM_BUG_ON((vma
->flags
& I915_VMA_GLOBAL_BIND
) == 0);
212 ptr
= io_mapping_map_wc(&i915_vm_to_ggtt(vma
->vm
)->mappable
,
216 return IO_ERR_PTR(-ENOMEM
);
225 void i915_vma_unpin_and_release(struct i915_vma
**p_vma
)
227 struct i915_vma
*vma
;
228 struct drm_i915_gem_object
*obj
;
230 vma
= fetch_and_zero(p_vma
);
239 __i915_gem_object_release_unless_active(obj
);
243 i915_vma_misplaced(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
245 if (!drm_mm_node_allocated(&vma
->node
))
248 if (vma
->node
.size
< size
)
251 if (alignment
&& vma
->node
.start
& (alignment
- 1))
254 if (flags
& PIN_MAPPABLE
&& !i915_vma_is_map_and_fenceable(vma
))
257 if (flags
& PIN_OFFSET_BIAS
&&
258 vma
->node
.start
< (flags
& PIN_OFFSET_MASK
))
261 if (flags
& PIN_OFFSET_FIXED
&&
262 vma
->node
.start
!= (flags
& PIN_OFFSET_MASK
))
268 void __i915_vma_set_map_and_fenceable(struct i915_vma
*vma
)
270 struct drm_i915_gem_object
*obj
= vma
->obj
;
271 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
272 bool mappable
, fenceable
;
273 u32 fence_size
, fence_alignment
;
275 fence_size
= i915_gem_get_ggtt_size(dev_priv
,
277 i915_gem_object_get_tiling(obj
));
278 fence_alignment
= i915_gem_get_ggtt_alignment(dev_priv
,
280 i915_gem_object_get_tiling(obj
),
283 fenceable
= (vma
->node
.size
== fence_size
&&
284 (vma
->node
.start
& (fence_alignment
- 1)) == 0);
286 mappable
= (vma
->node
.start
+ fence_size
<=
287 dev_priv
->ggtt
.mappable_end
);
290 * Explicitly disable for rotated VMA since the display does not
291 * need the fence and the VMA is not accessible to other users.
293 if (mappable
&& fenceable
&&
294 vma
->ggtt_view
.type
!= I915_GGTT_VIEW_ROTATED
)
295 vma
->flags
|= I915_VMA_CAN_FENCE
;
297 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
300 static bool color_differs(struct drm_mm_node
*node
, unsigned long color
)
302 return node
->allocated
&& node
->color
!= color
;
305 bool i915_gem_valid_gtt_space(struct i915_vma
*vma
, unsigned long cache_level
)
307 struct drm_mm_node
*node
= &vma
->node
;
308 struct drm_mm_node
*other
;
311 * On some machines we have to be careful when putting differing types
312 * of snoopable memory together to avoid the prefetcher crossing memory
313 * domains and dying. During vm initialisation, we decide whether or not
314 * these constraints apply and set the drm_mm.color_adjust
317 if (vma
->vm
->mm
.color_adjust
== NULL
)
320 /* Only valid to be called on an already inserted vma */
321 GEM_BUG_ON(!drm_mm_node_allocated(node
));
322 GEM_BUG_ON(list_empty(&node
->node_list
));
324 other
= list_prev_entry(node
, node_list
);
325 if (color_differs(other
, cache_level
) && !other
->hole_follows
)
328 other
= list_next_entry(node
, node_list
);
329 if (color_differs(other
, cache_level
) && !node
->hole_follows
)
336 * i915_vma_insert - finds a slot for the vma in its address space
338 * @size: requested size in bytes (can be larger than the VMA)
339 * @alignment: required alignment
340 * @flags: mask of PIN_* flags to use
342 * First we try to allocate some free space that meets the requirements for
343 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
344 * preferrably the oldest idle entry to make room for the new VMA.
347 * 0 on success, negative error code otherwise.
350 i915_vma_insert(struct i915_vma
*vma
, u64 size
, u64 alignment
, u64 flags
)
352 struct drm_i915_private
*dev_priv
= vma
->vm
->i915
;
353 struct drm_i915_gem_object
*obj
= vma
->obj
;
357 GEM_BUG_ON(vma
->flags
& (I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
));
358 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
));
360 size
= max(size
, vma
->size
);
361 if (flags
& PIN_MAPPABLE
)
362 size
= i915_gem_get_ggtt_size(dev_priv
, size
,
363 i915_gem_object_get_tiling(obj
));
365 alignment
= max(max(alignment
, vma
->display_alignment
),
366 i915_gem_get_ggtt_alignment(dev_priv
, size
,
367 i915_gem_object_get_tiling(obj
),
368 flags
& PIN_MAPPABLE
));
370 start
= flags
& PIN_OFFSET_BIAS
? flags
& PIN_OFFSET_MASK
: 0;
372 end
= vma
->vm
->total
;
373 if (flags
& PIN_MAPPABLE
)
374 end
= min_t(u64
, end
, dev_priv
->ggtt
.mappable_end
);
375 if (flags
& PIN_ZONE_4G
)
376 end
= min_t(u64
, end
, (1ULL << 32) - PAGE_SIZE
);
378 /* If binding the object/GGTT view requires more space than the entire
379 * aperture has, reject it early before evicting everything in a vain
380 * attempt to find space.
383 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
384 size
, obj
->base
.size
,
385 flags
& PIN_MAPPABLE
? "mappable" : "total",
390 ret
= i915_gem_object_pin_pages(obj
);
394 if (flags
& PIN_OFFSET_FIXED
) {
395 u64 offset
= flags
& PIN_OFFSET_MASK
;
396 if (offset
& (alignment
- 1) || offset
> end
- size
) {
401 vma
->node
.start
= offset
;
402 vma
->node
.size
= size
;
403 vma
->node
.color
= obj
->cache_level
;
404 ret
= drm_mm_reserve_node(&vma
->vm
->mm
, &vma
->node
);
406 ret
= i915_gem_evict_for_vma(vma
, flags
);
408 ret
= drm_mm_reserve_node(&vma
->vm
->mm
, &vma
->node
);
413 u32 search_flag
, alloc_flag
;
415 if (flags
& PIN_HIGH
) {
416 search_flag
= DRM_MM_SEARCH_BELOW
;
417 alloc_flag
= DRM_MM_CREATE_TOP
;
419 search_flag
= DRM_MM_SEARCH_DEFAULT
;
420 alloc_flag
= DRM_MM_CREATE_DEFAULT
;
423 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
424 * so we know that we always have a minimum alignment of 4096.
425 * The drm_mm range manager is optimised to return results
426 * with zero alignment, so where possible use the optimal
429 if (alignment
<= 4096)
433 ret
= drm_mm_insert_node_in_range_generic(&vma
->vm
->mm
,
441 ret
= i915_gem_evict_something(vma
->vm
, size
, alignment
,
451 GEM_BUG_ON(vma
->node
.start
< start
);
452 GEM_BUG_ON(vma
->node
.start
+ vma
->node
.size
> end
);
454 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma
, obj
->cache_level
));
456 list_move_tail(&obj
->global_link
, &dev_priv
->mm
.bound_list
);
457 list_move_tail(&vma
->vm_link
, &vma
->vm
->inactive_list
);
459 GEM_BUG_ON(atomic_read(&obj
->mm
.pages_pin_count
) < obj
->bind_count
);
464 i915_gem_object_unpin_pages(obj
);
468 int __i915_vma_do_pin(struct i915_vma
*vma
,
469 u64 size
, u64 alignment
, u64 flags
)
471 unsigned int bound
= vma
->flags
;
474 lockdep_assert_held(&vma
->vm
->i915
->drm
.struct_mutex
);
475 GEM_BUG_ON((flags
& (PIN_GLOBAL
| PIN_USER
)) == 0);
476 GEM_BUG_ON((flags
& PIN_GLOBAL
) && !i915_vma_is_ggtt(vma
));
478 if (WARN_ON(bound
& I915_VMA_PIN_OVERFLOW
)) {
483 if ((bound
& I915_VMA_BIND_MASK
) == 0) {
484 ret
= i915_vma_insert(vma
, size
, alignment
, flags
);
489 ret
= i915_vma_bind(vma
, vma
->obj
->cache_level
, flags
);
493 if ((bound
^ vma
->flags
) & I915_VMA_GLOBAL_BIND
)
494 __i915_vma_set_map_and_fenceable(vma
);
496 GEM_BUG_ON(i915_vma_misplaced(vma
, size
, alignment
, flags
));
500 __i915_vma_unpin(vma
);
504 void i915_vma_destroy(struct i915_vma
*vma
)
506 GEM_BUG_ON(vma
->node
.allocated
);
507 GEM_BUG_ON(i915_vma_is_active(vma
));
508 GEM_BUG_ON(!i915_vma_is_closed(vma
));
509 GEM_BUG_ON(vma
->fence
);
511 list_del(&vma
->vm_link
);
512 if (!i915_vma_is_ggtt(vma
))
513 i915_ppgtt_put(i915_vm_to_ppgtt(vma
->vm
));
515 kmem_cache_free(to_i915(vma
->obj
->base
.dev
)->vmas
, vma
);
518 void i915_vma_close(struct i915_vma
*vma
)
520 GEM_BUG_ON(i915_vma_is_closed(vma
));
521 vma
->flags
|= I915_VMA_CLOSED
;
523 list_del(&vma
->obj_link
);
524 rb_erase(&vma
->obj_node
, &vma
->obj
->vma_tree
);
526 if (!i915_vma_is_active(vma
) && !i915_vma_is_pinned(vma
))
527 WARN_ON(i915_vma_unbind(vma
));
530 static void __i915_vma_iounmap(struct i915_vma
*vma
)
532 GEM_BUG_ON(i915_vma_is_pinned(vma
));
534 if (vma
->iomap
== NULL
)
537 io_mapping_unmap(vma
->iomap
);
541 int i915_vma_unbind(struct i915_vma
*vma
)
543 struct drm_i915_gem_object
*obj
= vma
->obj
;
544 unsigned long active
;
547 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
549 /* First wait upon any activity as retiring the request may
550 * have side-effects such as unpinning or even unbinding this vma.
552 active
= i915_vma_get_active(vma
);
556 /* When a closed VMA is retired, it is unbound - eek.
557 * In order to prevent it from being recursively closed,
558 * take a pin on the vma so that the second unbind is
561 * Even more scary is that the retire callback may free
562 * the object (last active vma). To prevent the explosion
563 * we defer the actual object free to a worker that can
564 * only proceed once it acquires the struct_mutex (which
565 * we currently hold, therefore it cannot free this object
566 * before we are finished).
570 for_each_active(active
, idx
) {
571 ret
= i915_gem_active_retire(&vma
->last_read
[idx
],
572 &vma
->vm
->i915
->drm
.struct_mutex
);
577 __i915_vma_unpin(vma
);
581 GEM_BUG_ON(i915_vma_is_active(vma
));
584 if (i915_vma_is_pinned(vma
))
587 if (!drm_mm_node_allocated(&vma
->node
))
590 GEM_BUG_ON(obj
->bind_count
== 0);
591 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
593 if (i915_vma_is_map_and_fenceable(vma
)) {
594 /* release the fence reg _after_ flushing */
595 ret
= i915_vma_put_fence(vma
);
599 /* Force a pagefault for domain tracking on next user access */
600 i915_gem_release_mmap(obj
);
602 __i915_vma_iounmap(vma
);
603 vma
->flags
&= ~I915_VMA_CAN_FENCE
;
606 if (likely(!vma
->vm
->closed
)) {
607 trace_i915_vma_unbind(vma
);
608 vma
->vm
->unbind_vma(vma
);
610 vma
->flags
&= ~(I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
);
612 drm_mm_remove_node(&vma
->node
);
613 list_move_tail(&vma
->vm_link
, &vma
->vm
->unbound_list
);
615 if (vma
->pages
!= obj
->mm
.pages
) {
616 GEM_BUG_ON(!vma
->pages
);
617 sg_free_table(vma
->pages
);
622 /* Since the unbound list is global, only move to that list if
623 * no more VMAs exist. */
624 if (--obj
->bind_count
== 0)
625 list_move_tail(&obj
->global_link
,
626 &to_i915(obj
->base
.dev
)->mm
.unbound_list
);
628 /* And finally now the object is completely decoupled from this vma,
629 * we can drop its hold on the backing storage and allow it to be
630 * reaped by the shrinker.
632 i915_gem_object_unpin_pages(obj
);
635 if (unlikely(i915_vma_is_closed(vma
)))
636 i915_vma_destroy(vma
);