]>
Commit | Line | Data |
---|---|---|
b42fe9ca JL |
1 | /* |
2 | * Copyright © 2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include "i915_vma.h" | |
26 | ||
27 | #include "i915_drv.h" | |
28 | #include "intel_ringbuffer.h" | |
29 | #include "intel_frontbuffer.h" | |
30 | ||
31 | #include <drm/drm_gem.h> | |
32 | ||
33 | static void | |
34 | i915_vma_retire(struct i915_gem_active *active, | |
35 | struct drm_i915_gem_request *rq) | |
36 | { | |
37 | const unsigned int idx = rq->engine->id; | |
38 | struct i915_vma *vma = | |
39 | container_of(active, struct i915_vma, last_read[idx]); | |
40 | struct drm_i915_gem_object *obj = vma->obj; | |
41 | ||
42 | GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); | |
43 | ||
44 | i915_vma_clear_active(vma, idx); | |
45 | if (i915_vma_is_active(vma)) | |
46 | return; | |
47 | ||
48 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); | |
49 | if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) | |
50 | WARN_ON(i915_vma_unbind(vma)); | |
51 | ||
52 | GEM_BUG_ON(!i915_gem_object_is_active(obj)); | |
53 | if (--obj->active_count) | |
54 | return; | |
55 | ||
56 | /* Bump our place on the bound list to keep it roughly in LRU order | |
57 | * so that we don't steal from recently used but inactive objects | |
58 | * (unless we are forced to ofc!) | |
59 | */ | |
60 | if (obj->bind_count) | |
61 | list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); | |
62 | ||
63 | obj->mm.dirty = true; /* be paranoid */ | |
64 | ||
65 | if (i915_gem_object_has_active_reference(obj)) { | |
66 | i915_gem_object_clear_active_reference(obj); | |
67 | i915_gem_object_put(obj); | |
68 | } | |
69 | } | |
70 | ||
b42fe9ca JL |
71 | static struct i915_vma * |
72 | __i915_vma_create(struct drm_i915_gem_object *obj, | |
73 | struct i915_address_space *vm, | |
74 | const struct i915_ggtt_view *view) | |
75 | { | |
76 | struct i915_vma *vma; | |
77 | struct rb_node *rb, **p; | |
78 | int i; | |
79 | ||
80 | GEM_BUG_ON(vm->closed); | |
81 | ||
82 | vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); | |
83 | if (vma == NULL) | |
84 | return ERR_PTR(-ENOMEM); | |
85 | ||
86 | INIT_LIST_HEAD(&vma->exec_list); | |
87 | for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) | |
88 | init_request_active(&vma->last_read[i], i915_vma_retire); | |
b42fe9ca JL |
89 | init_request_active(&vma->last_fence, NULL); |
90 | list_add(&vma->vm_link, &vm->unbound_list); | |
91 | vma->vm = vm; | |
92 | vma->obj = obj; | |
93 | vma->size = obj->base.size; | |
94 | ||
95 | if (view) { | |
96 | vma->ggtt_view = *view; | |
97 | if (view->type == I915_GGTT_VIEW_PARTIAL) { | |
98 | vma->size = view->params.partial.size; | |
99 | vma->size <<= PAGE_SHIFT; | |
100 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { | |
101 | vma->size = | |
102 | intel_rotation_info_size(&view->params.rotated); | |
103 | vma->size <<= PAGE_SHIFT; | |
104 | } | |
105 | } | |
106 | ||
107 | if (i915_is_ggtt(vm)) { | |
108 | vma->flags |= I915_VMA_GGTT; | |
109 | list_add(&vma->obj_link, &obj->vma_list); | |
110 | } else { | |
111 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | |
112 | list_add_tail(&vma->obj_link, &obj->vma_list); | |
113 | } | |
114 | ||
115 | rb = NULL; | |
116 | p = &obj->vma_tree.rb_node; | |
117 | while (*p) { | |
118 | struct i915_vma *pos; | |
119 | ||
120 | rb = *p; | |
121 | pos = rb_entry(rb, struct i915_vma, obj_node); | |
122 | if (i915_vma_compare(pos, vm, view) < 0) | |
123 | p = &rb->rb_right; | |
124 | else | |
125 | p = &rb->rb_left; | |
126 | } | |
127 | rb_link_node(&vma->obj_node, rb, p); | |
128 | rb_insert_color(&vma->obj_node, &obj->vma_tree); | |
129 | ||
130 | return vma; | |
131 | } | |
132 | ||
133 | struct i915_vma * | |
134 | i915_vma_create(struct drm_i915_gem_object *obj, | |
135 | struct i915_address_space *vm, | |
136 | const struct i915_ggtt_view *view) | |
137 | { | |
138 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
139 | GEM_BUG_ON(view && !i915_is_ggtt(vm)); | |
140 | GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view)); | |
141 | ||
142 | return __i915_vma_create(obj, vm, view); | |
143 | } | |
144 | ||
145 | /** | |
146 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. | |
147 | * @vma: VMA to map | |
148 | * @cache_level: mapping cache level | |
149 | * @flags: flags like global or local mapping | |
150 | * | |
151 | * DMA addresses are taken from the scatter-gather table of this object (or of | |
152 | * this VMA in case of non-default GGTT views) and PTE entries set up. | |
153 | * Note that DMA addresses are also the only part of the SG table we care about. | |
154 | */ | |
155 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |
156 | u32 flags) | |
157 | { | |
158 | u32 bind_flags; | |
159 | u32 vma_flags; | |
160 | int ret; | |
161 | ||
162 | if (WARN_ON(flags == 0)) | |
163 | return -EINVAL; | |
164 | ||
165 | bind_flags = 0; | |
166 | if (flags & PIN_GLOBAL) | |
167 | bind_flags |= I915_VMA_GLOBAL_BIND; | |
168 | if (flags & PIN_USER) | |
169 | bind_flags |= I915_VMA_LOCAL_BIND; | |
170 | ||
171 | vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); | |
172 | if (flags & PIN_UPDATE) | |
173 | bind_flags |= vma_flags; | |
174 | else | |
175 | bind_flags &= ~vma_flags; | |
176 | if (bind_flags == 0) | |
177 | return 0; | |
178 | ||
7a0499a4 MA |
179 | if (GEM_WARN_ON(vma->node.start + vma->node.size < vma->node.start)) |
180 | return -ENODEV; | |
181 | ||
182 | if (GEM_WARN_ON(vma->node.start + vma->node.size > vma->vm->total)) | |
183 | return -ENODEV; | |
184 | ||
b42fe9ca JL |
185 | if (vma_flags == 0 && vma->vm->allocate_va_range) { |
186 | trace_i915_va_alloc(vma); | |
187 | ret = vma->vm->allocate_va_range(vma->vm, | |
188 | vma->node.start, | |
189 | vma->node.size); | |
190 | if (ret) | |
191 | return ret; | |
192 | } | |
193 | ||
194 | ret = vma->vm->bind_vma(vma, cache_level, bind_flags); | |
195 | if (ret) | |
196 | return ret; | |
197 | ||
198 | vma->flags |= bind_flags; | |
199 | return 0; | |
200 | } | |
201 | ||
202 | void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) | |
203 | { | |
204 | void __iomem *ptr; | |
205 | ||
206 | /* Access through the GTT requires the device to be awake. */ | |
49d73912 | 207 | assert_rpm_wakelock_held(vma->vm->i915); |
b42fe9ca | 208 | |
49d73912 | 209 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b42fe9ca JL |
210 | if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) |
211 | return IO_ERR_PTR(-ENODEV); | |
212 | ||
213 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | |
214 | GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); | |
215 | ||
216 | ptr = vma->iomap; | |
217 | if (ptr == NULL) { | |
218 | ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, | |
219 | vma->node.start, | |
220 | vma->node.size); | |
221 | if (ptr == NULL) | |
222 | return IO_ERR_PTR(-ENOMEM); | |
223 | ||
224 | vma->iomap = ptr; | |
225 | } | |
226 | ||
227 | __i915_vma_pin(vma); | |
228 | return ptr; | |
229 | } | |
230 | ||
231 | void i915_vma_unpin_and_release(struct i915_vma **p_vma) | |
232 | { | |
233 | struct i915_vma *vma; | |
234 | struct drm_i915_gem_object *obj; | |
235 | ||
236 | vma = fetch_and_zero(p_vma); | |
237 | if (!vma) | |
238 | return; | |
239 | ||
240 | obj = vma->obj; | |
241 | ||
242 | i915_vma_unpin(vma); | |
243 | i915_vma_close(vma); | |
244 | ||
245 | __i915_gem_object_release_unless_active(obj); | |
246 | } | |
247 | ||
248 | bool | |
249 | i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | |
250 | { | |
251 | if (!drm_mm_node_allocated(&vma->node)) | |
252 | return false; | |
253 | ||
254 | if (vma->node.size < size) | |
255 | return true; | |
256 | ||
257 | if (alignment && vma->node.start & (alignment - 1)) | |
258 | return true; | |
259 | ||
260 | if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) | |
261 | return true; | |
262 | ||
263 | if (flags & PIN_OFFSET_BIAS && | |
264 | vma->node.start < (flags & PIN_OFFSET_MASK)) | |
265 | return true; | |
266 | ||
267 | if (flags & PIN_OFFSET_FIXED && | |
268 | vma->node.start != (flags & PIN_OFFSET_MASK)) | |
269 | return true; | |
270 | ||
271 | return false; | |
272 | } | |
273 | ||
274 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | |
275 | { | |
276 | struct drm_i915_gem_object *obj = vma->obj; | |
277 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
278 | bool mappable, fenceable; | |
279 | u32 fence_size, fence_alignment; | |
280 | ||
281 | fence_size = i915_gem_get_ggtt_size(dev_priv, | |
282 | vma->size, | |
283 | i915_gem_object_get_tiling(obj)); | |
284 | fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, | |
285 | vma->size, | |
286 | i915_gem_object_get_tiling(obj), | |
287 | true); | |
288 | ||
289 | fenceable = (vma->node.size == fence_size && | |
290 | (vma->node.start & (fence_alignment - 1)) == 0); | |
291 | ||
292 | mappable = (vma->node.start + fence_size <= | |
293 | dev_priv->ggtt.mappable_end); | |
294 | ||
295 | /* | |
296 | * Explicitly disable for rotated VMA since the display does not | |
297 | * need the fence and the VMA is not accessible to other users. | |
298 | */ | |
299 | if (mappable && fenceable && | |
300 | vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED) | |
301 | vma->flags |= I915_VMA_CAN_FENCE; | |
302 | else | |
303 | vma->flags &= ~I915_VMA_CAN_FENCE; | |
304 | } | |
305 | ||
7d1d9aea | 306 | static bool color_differs(struct drm_mm_node *node, unsigned long color) |
b42fe9ca | 307 | { |
7d1d9aea CW |
308 | return node->allocated && node->color != color; |
309 | } | |
310 | ||
311 | bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) | |
312 | { | |
313 | struct drm_mm_node *node = &vma->node; | |
b42fe9ca JL |
314 | struct drm_mm_node *other; |
315 | ||
316 | /* | |
317 | * On some machines we have to be careful when putting differing types | |
318 | * of snoopable memory together to avoid the prefetcher crossing memory | |
319 | * domains and dying. During vm initialisation, we decide whether or not | |
320 | * these constraints apply and set the drm_mm.color_adjust | |
321 | * appropriately. | |
322 | */ | |
323 | if (vma->vm->mm.color_adjust == NULL) | |
324 | return true; | |
325 | ||
7d1d9aea CW |
326 | /* Only valid to be called on an already inserted vma */ |
327 | GEM_BUG_ON(!drm_mm_node_allocated(node)); | |
328 | GEM_BUG_ON(list_empty(&node->node_list)); | |
b42fe9ca | 329 | |
7d1d9aea CW |
330 | other = list_prev_entry(node, node_list); |
331 | if (color_differs(other, cache_level) && !other->hole_follows) | |
b42fe9ca JL |
332 | return false; |
333 | ||
7d1d9aea CW |
334 | other = list_next_entry(node, node_list); |
335 | if (color_differs(other, cache_level) && !node->hole_follows) | |
b42fe9ca JL |
336 | return false; |
337 | ||
338 | return true; | |
339 | } | |
340 | ||
341 | /** | |
342 | * i915_vma_insert - finds a slot for the vma in its address space | |
343 | * @vma: the vma | |
344 | * @size: requested size in bytes (can be larger than the VMA) | |
345 | * @alignment: required alignment | |
346 | * @flags: mask of PIN_* flags to use | |
347 | * | |
348 | * First we try to allocate some free space that meets the requirements for | |
349 | * the VMA. Failiing that, if the flags permit, it will evict an old VMA, | |
350 | * preferrably the oldest idle entry to make room for the new VMA. | |
351 | * | |
352 | * Returns: | |
353 | * 0 on success, negative error code otherwise. | |
354 | */ | |
355 | static int | |
356 | i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | |
357 | { | |
49d73912 | 358 | struct drm_i915_private *dev_priv = vma->vm->i915; |
b42fe9ca JL |
359 | struct drm_i915_gem_object *obj = vma->obj; |
360 | u64 start, end; | |
361 | int ret; | |
362 | ||
363 | GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); | |
364 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); | |
365 | ||
366 | size = max(size, vma->size); | |
367 | if (flags & PIN_MAPPABLE) | |
368 | size = i915_gem_get_ggtt_size(dev_priv, size, | |
369 | i915_gem_object_get_tiling(obj)); | |
370 | ||
371 | alignment = max(max(alignment, vma->display_alignment), | |
372 | i915_gem_get_ggtt_alignment(dev_priv, size, | |
373 | i915_gem_object_get_tiling(obj), | |
374 | flags & PIN_MAPPABLE)); | |
375 | ||
376 | start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | |
377 | ||
378 | end = vma->vm->total; | |
379 | if (flags & PIN_MAPPABLE) | |
380 | end = min_t(u64, end, dev_priv->ggtt.mappable_end); | |
381 | if (flags & PIN_ZONE_4G) | |
382 | end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); | |
383 | ||
384 | /* If binding the object/GGTT view requires more space than the entire | |
385 | * aperture has, reject it early before evicting everything in a vain | |
386 | * attempt to find space. | |
387 | */ | |
388 | if (size > end) { | |
389 | DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", | |
390 | size, obj->base.size, | |
391 | flags & PIN_MAPPABLE ? "mappable" : "total", | |
392 | end); | |
393 | return -E2BIG; | |
394 | } | |
395 | ||
396 | ret = i915_gem_object_pin_pages(obj); | |
397 | if (ret) | |
398 | return ret; | |
399 | ||
400 | if (flags & PIN_OFFSET_FIXED) { | |
401 | u64 offset = flags & PIN_OFFSET_MASK; | |
402 | if (offset & (alignment - 1) || offset > end - size) { | |
403 | ret = -EINVAL; | |
404 | goto err_unpin; | |
405 | } | |
406 | ||
407 | vma->node.start = offset; | |
408 | vma->node.size = size; | |
409 | vma->node.color = obj->cache_level; | |
410 | ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); | |
411 | if (ret) { | |
172ae5b4 | 412 | ret = i915_gem_evict_for_vma(vma, flags); |
b42fe9ca JL |
413 | if (ret == 0) |
414 | ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); | |
415 | if (ret) | |
416 | goto err_unpin; | |
417 | } | |
418 | } else { | |
419 | u32 search_flag, alloc_flag; | |
420 | ||
421 | if (flags & PIN_HIGH) { | |
422 | search_flag = DRM_MM_SEARCH_BELOW; | |
423 | alloc_flag = DRM_MM_CREATE_TOP; | |
424 | } else { | |
425 | search_flag = DRM_MM_SEARCH_DEFAULT; | |
426 | alloc_flag = DRM_MM_CREATE_DEFAULT; | |
427 | } | |
428 | ||
429 | /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, | |
430 | * so we know that we always have a minimum alignment of 4096. | |
431 | * The drm_mm range manager is optimised to return results | |
432 | * with zero alignment, so where possible use the optimal | |
433 | * path. | |
434 | */ | |
435 | if (alignment <= 4096) | |
436 | alignment = 0; | |
437 | ||
438 | search_free: | |
439 | ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm, | |
440 | &vma->node, | |
441 | size, alignment, | |
442 | obj->cache_level, | |
443 | start, end, | |
444 | search_flag, | |
445 | alloc_flag); | |
446 | if (ret) { | |
447 | ret = i915_gem_evict_something(vma->vm, size, alignment, | |
448 | obj->cache_level, | |
449 | start, end, | |
450 | flags); | |
451 | if (ret == 0) | |
452 | goto search_free; | |
453 | ||
454 | goto err_unpin; | |
455 | } | |
456 | ||
457 | GEM_BUG_ON(vma->node.start < start); | |
458 | GEM_BUG_ON(vma->node.start + vma->node.size > end); | |
459 | } | |
460 | GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); | |
461 | ||
462 | list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); | |
463 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); | |
464 | obj->bind_count++; | |
465 | GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); | |
466 | ||
467 | return 0; | |
468 | ||
469 | err_unpin: | |
470 | i915_gem_object_unpin_pages(obj); | |
471 | return ret; | |
472 | } | |
473 | ||
474 | int __i915_vma_do_pin(struct i915_vma *vma, | |
475 | u64 size, u64 alignment, u64 flags) | |
476 | { | |
477 | unsigned int bound = vma->flags; | |
478 | int ret; | |
479 | ||
49d73912 | 480 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b42fe9ca JL |
481 | GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); |
482 | GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); | |
483 | ||
484 | if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { | |
485 | ret = -EBUSY; | |
486 | goto err; | |
487 | } | |
488 | ||
489 | if ((bound & I915_VMA_BIND_MASK) == 0) { | |
490 | ret = i915_vma_insert(vma, size, alignment, flags); | |
491 | if (ret) | |
492 | goto err; | |
493 | } | |
494 | ||
495 | ret = i915_vma_bind(vma, vma->obj->cache_level, flags); | |
496 | if (ret) | |
497 | goto err; | |
498 | ||
499 | if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) | |
500 | __i915_vma_set_map_and_fenceable(vma); | |
501 | ||
502 | GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); | |
503 | return 0; | |
504 | ||
505 | err: | |
506 | __i915_vma_unpin(vma); | |
507 | return ret; | |
508 | } | |
509 | ||
510 | void i915_vma_destroy(struct i915_vma *vma) | |
511 | { | |
512 | GEM_BUG_ON(vma->node.allocated); | |
513 | GEM_BUG_ON(i915_vma_is_active(vma)); | |
514 | GEM_BUG_ON(!i915_vma_is_closed(vma)); | |
515 | GEM_BUG_ON(vma->fence); | |
516 | ||
517 | list_del(&vma->vm_link); | |
518 | if (!i915_vma_is_ggtt(vma)) | |
519 | i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); | |
520 | ||
521 | kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); | |
522 | } | |
523 | ||
524 | void i915_vma_close(struct i915_vma *vma) | |
525 | { | |
526 | GEM_BUG_ON(i915_vma_is_closed(vma)); | |
527 | vma->flags |= I915_VMA_CLOSED; | |
528 | ||
529 | list_del(&vma->obj_link); | |
530 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | |
531 | ||
532 | if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) | |
533 | WARN_ON(i915_vma_unbind(vma)); | |
534 | } | |
535 | ||
536 | static void __i915_vma_iounmap(struct i915_vma *vma) | |
537 | { | |
538 | GEM_BUG_ON(i915_vma_is_pinned(vma)); | |
539 | ||
540 | if (vma->iomap == NULL) | |
541 | return; | |
542 | ||
543 | io_mapping_unmap(vma->iomap); | |
544 | vma->iomap = NULL; | |
545 | } | |
546 | ||
547 | int i915_vma_unbind(struct i915_vma *vma) | |
548 | { | |
549 | struct drm_i915_gem_object *obj = vma->obj; | |
550 | unsigned long active; | |
551 | int ret; | |
552 | ||
553 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
554 | ||
555 | /* First wait upon any activity as retiring the request may | |
556 | * have side-effects such as unpinning or even unbinding this vma. | |
557 | */ | |
558 | active = i915_vma_get_active(vma); | |
559 | if (active) { | |
560 | int idx; | |
561 | ||
562 | /* When a closed VMA is retired, it is unbound - eek. | |
563 | * In order to prevent it from being recursively closed, | |
564 | * take a pin on the vma so that the second unbind is | |
565 | * aborted. | |
566 | * | |
567 | * Even more scary is that the retire callback may free | |
568 | * the object (last active vma). To prevent the explosion | |
569 | * we defer the actual object free to a worker that can | |
570 | * only proceed once it acquires the struct_mutex (which | |
571 | * we currently hold, therefore it cannot free this object | |
572 | * before we are finished). | |
573 | */ | |
574 | __i915_vma_pin(vma); | |
575 | ||
576 | for_each_active(active, idx) { | |
577 | ret = i915_gem_active_retire(&vma->last_read[idx], | |
49d73912 | 578 | &vma->vm->i915->drm.struct_mutex); |
b42fe9ca JL |
579 | if (ret) |
580 | break; | |
581 | } | |
582 | ||
583 | __i915_vma_unpin(vma); | |
584 | if (ret) | |
585 | return ret; | |
586 | ||
587 | GEM_BUG_ON(i915_vma_is_active(vma)); | |
588 | } | |
589 | ||
590 | if (i915_vma_is_pinned(vma)) | |
591 | return -EBUSY; | |
592 | ||
593 | if (!drm_mm_node_allocated(&vma->node)) | |
594 | goto destroy; | |
595 | ||
596 | GEM_BUG_ON(obj->bind_count == 0); | |
597 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); | |
598 | ||
599 | if (i915_vma_is_map_and_fenceable(vma)) { | |
600 | /* release the fence reg _after_ flushing */ | |
601 | ret = i915_vma_put_fence(vma); | |
602 | if (ret) | |
603 | return ret; | |
604 | ||
605 | /* Force a pagefault for domain tracking on next user access */ | |
606 | i915_gem_release_mmap(obj); | |
607 | ||
608 | __i915_vma_iounmap(vma); | |
609 | vma->flags &= ~I915_VMA_CAN_FENCE; | |
610 | } | |
611 | ||
612 | if (likely(!vma->vm->closed)) { | |
613 | trace_i915_vma_unbind(vma); | |
614 | vma->vm->unbind_vma(vma); | |
615 | } | |
616 | vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); | |
617 | ||
618 | drm_mm_remove_node(&vma->node); | |
619 | list_move_tail(&vma->vm_link, &vma->vm->unbound_list); | |
620 | ||
621 | if (vma->pages != obj->mm.pages) { | |
622 | GEM_BUG_ON(!vma->pages); | |
623 | sg_free_table(vma->pages); | |
624 | kfree(vma->pages); | |
625 | } | |
626 | vma->pages = NULL; | |
627 | ||
628 | /* Since the unbound list is global, only move to that list if | |
629 | * no more VMAs exist. */ | |
630 | if (--obj->bind_count == 0) | |
631 | list_move_tail(&obj->global_link, | |
632 | &to_i915(obj->base.dev)->mm.unbound_list); | |
633 | ||
634 | /* And finally now the object is completely decoupled from this vma, | |
635 | * we can drop its hold on the backing storage and allow it to be | |
636 | * reaped by the shrinker. | |
637 | */ | |
638 | i915_gem_object_unpin_pages(obj); | |
639 | ||
640 | destroy: | |
641 | if (unlikely(i915_vma_is_closed(vma))) | |
642 | i915_vma_destroy(vma); | |
643 | ||
644 | return 0; | |
645 | } | |
646 |