]>
Commit | Line | Data |
---|---|---|
b42fe9ca JL |
1 | /* |
2 | * Copyright © 2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
10195b1e | 24 | |
b42fe9ca JL |
25 | #include "i915_vma.h" |
26 | ||
27 | #include "i915_drv.h" | |
28 | #include "intel_ringbuffer.h" | |
29 | #include "intel_frontbuffer.h" | |
30 | ||
31 | #include <drm/drm_gem.h> | |
32 | ||
1eca65d9 | 33 | #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) |
10195b1e CW |
34 | |
35 | #include <linux/stackdepot.h> | |
36 | ||
37 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) | |
38 | { | |
39 | unsigned long entries[12]; | |
40 | struct stack_trace trace = { | |
41 | .entries = entries, | |
42 | .max_entries = ARRAY_SIZE(entries), | |
43 | }; | |
44 | char buf[512]; | |
45 | ||
46 | if (!vma->node.stack) { | |
47 | DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", | |
48 | vma->node.start, vma->node.size, reason); | |
49 | return; | |
50 | } | |
51 | ||
52 | depot_fetch_stack(vma->node.stack, &trace); | |
53 | snprint_stack_trace(buf, sizeof(buf), &trace, 0); | |
54 | DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", | |
55 | vma->node.start, vma->node.size, reason, buf); | |
56 | } | |
57 | ||
58 | #else | |
59 | ||
60 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) | |
61 | { | |
62 | } | |
63 | ||
64 | #endif | |
65 | ||
5c3f8c22 CW |
66 | struct i915_vma_active { |
67 | struct i915_gem_active base; | |
68 | struct i915_vma *vma; | |
69 | struct rb_node node; | |
70 | u64 timeline; | |
71 | }; | |
72 | ||
b42fe9ca | 73 | static void |
5c3f8c22 | 74 | __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) |
b42fe9ca | 75 | { |
b42fe9ca JL |
76 | struct drm_i915_gem_object *obj = vma->obj; |
77 | ||
5c3f8c22 CW |
78 | GEM_BUG_ON(!i915_vma_is_active(vma)); |
79 | if (--vma->active_count) | |
b42fe9ca JL |
80 | return; |
81 | ||
44a0ec0d | 82 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
b42fe9ca | 83 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); |
b42fe9ca JL |
84 | |
85 | GEM_BUG_ON(!i915_gem_object_is_active(obj)); | |
86 | if (--obj->active_count) | |
87 | return; | |
88 | ||
1ab22356 CW |
89 | /* Prune the shared fence arrays iff completely idle (inc. external) */ |
90 | if (reservation_object_trylock(obj->resv)) { | |
91 | if (reservation_object_test_signaled_rcu(obj->resv, true)) | |
92 | reservation_object_add_excl_fence(obj->resv, NULL); | |
93 | reservation_object_unlock(obj->resv); | |
94 | } | |
95 | ||
b42fe9ca JL |
96 | /* Bump our place on the bound list to keep it roughly in LRU order |
97 | * so that we don't steal from recently used but inactive objects | |
98 | * (unless we are forced to ofc!) | |
99 | */ | |
f2123818 | 100 | spin_lock(&rq->i915->mm.obj_lock); |
b42fe9ca | 101 | if (obj->bind_count) |
f2123818 CW |
102 | list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list); |
103 | spin_unlock(&rq->i915->mm.obj_lock); | |
b42fe9ca JL |
104 | |
105 | obj->mm.dirty = true; /* be paranoid */ | |
106 | ||
107 | if (i915_gem_object_has_active_reference(obj)) { | |
108 | i915_gem_object_clear_active_reference(obj); | |
109 | i915_gem_object_put(obj); | |
110 | } | |
111 | } | |
112 | ||
5c3f8c22 CW |
113 | static void |
114 | i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) | |
115 | { | |
116 | struct i915_vma_active *active = | |
117 | container_of(base, typeof(*active), base); | |
118 | ||
119 | __i915_vma_retire(active->vma, rq); | |
120 | } | |
121 | ||
8b293eb5 CW |
122 | static void |
123 | i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq) | |
124 | { | |
125 | __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq); | |
126 | } | |
127 | ||
b42fe9ca | 128 | static struct i915_vma * |
a01cb37a CW |
129 | vma_create(struct drm_i915_gem_object *obj, |
130 | struct i915_address_space *vm, | |
131 | const struct i915_ggtt_view *view) | |
b42fe9ca JL |
132 | { |
133 | struct i915_vma *vma; | |
134 | struct rb_node *rb, **p; | |
b42fe9ca | 135 | |
e1cc3db0 | 136 | /* The aliasing_ppgtt should never be used directly! */ |
82ad6443 | 137 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); |
e1cc3db0 | 138 | |
1fcdaa7e | 139 | vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); |
b42fe9ca JL |
140 | if (vma == NULL) |
141 | return ERR_PTR(-ENOMEM); | |
142 | ||
5c3f8c22 CW |
143 | vma->active = RB_ROOT; |
144 | ||
8b293eb5 | 145 | init_request_active(&vma->last_active, i915_vma_last_retire); |
b42fe9ca | 146 | init_request_active(&vma->last_fence, NULL); |
b42fe9ca | 147 | vma->vm = vm; |
93f2cde2 | 148 | vma->ops = &vm->vma_ops; |
b42fe9ca | 149 | vma->obj = obj; |
95ff7c7d | 150 | vma->resv = obj->resv; |
b42fe9ca | 151 | vma->size = obj->base.size; |
f51455d4 | 152 | vma->display_alignment = I915_GTT_MIN_ALIGNMENT; |
b42fe9ca | 153 | |
7c518460 | 154 | if (view && view->type != I915_GGTT_VIEW_NORMAL) { |
b42fe9ca JL |
155 | vma->ggtt_view = *view; |
156 | if (view->type == I915_GGTT_VIEW_PARTIAL) { | |
07e19ea4 | 157 | GEM_BUG_ON(range_overflows_t(u64, |
8bab1193 CW |
158 | view->partial.offset, |
159 | view->partial.size, | |
07e19ea4 | 160 | obj->base.size >> PAGE_SHIFT)); |
8bab1193 | 161 | vma->size = view->partial.size; |
b42fe9ca | 162 | vma->size <<= PAGE_SHIFT; |
7e7367d3 | 163 | GEM_BUG_ON(vma->size > obj->base.size); |
b42fe9ca | 164 | } else if (view->type == I915_GGTT_VIEW_ROTATED) { |
8bab1193 | 165 | vma->size = intel_rotation_info_size(&view->rotated); |
b42fe9ca JL |
166 | vma->size <<= PAGE_SHIFT; |
167 | } | |
168 | } | |
169 | ||
1fcdaa7e CW |
170 | if (unlikely(vma->size > vm->total)) |
171 | goto err_vma; | |
172 | ||
b00ddb27 CW |
173 | GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); |
174 | ||
b42fe9ca | 175 | if (i915_is_ggtt(vm)) { |
1fcdaa7e CW |
176 | if (unlikely(overflows_type(vma->size, u32))) |
177 | goto err_vma; | |
178 | ||
91d4e0aa CW |
179 | vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, |
180 | i915_gem_object_get_tiling(obj), | |
181 | i915_gem_object_get_stride(obj)); | |
1fcdaa7e CW |
182 | if (unlikely(vma->fence_size < vma->size || /* overflow */ |
183 | vma->fence_size > vm->total)) | |
184 | goto err_vma; | |
185 | ||
f51455d4 | 186 | GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); |
944397f0 | 187 | |
91d4e0aa CW |
188 | vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, |
189 | i915_gem_object_get_tiling(obj), | |
190 | i915_gem_object_get_stride(obj)); | |
944397f0 CW |
191 | GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); |
192 | ||
e2189dd0 CW |
193 | /* |
194 | * We put the GGTT vma at the start of the vma-list, followed | |
195 | * by the ppGGTT vma. This allows us to break early when | |
196 | * iterating over only the GGTT vma for an object, see | |
197 | * for_each_ggtt_vma() | |
198 | */ | |
b42fe9ca JL |
199 | vma->flags |= I915_VMA_GGTT; |
200 | list_add(&vma->obj_link, &obj->vma_list); | |
201 | } else { | |
b42fe9ca JL |
202 | list_add_tail(&vma->obj_link, &obj->vma_list); |
203 | } | |
204 | ||
205 | rb = NULL; | |
206 | p = &obj->vma_tree.rb_node; | |
207 | while (*p) { | |
208 | struct i915_vma *pos; | |
209 | ||
210 | rb = *p; | |
211 | pos = rb_entry(rb, struct i915_vma, obj_node); | |
212 | if (i915_vma_compare(pos, vm, view) < 0) | |
213 | p = &rb->rb_right; | |
214 | else | |
215 | p = &rb->rb_left; | |
216 | } | |
217 | rb_link_node(&vma->obj_node, rb, p); | |
218 | rb_insert_color(&vma->obj_node, &obj->vma_tree); | |
1fcdaa7e | 219 | list_add(&vma->vm_link, &vm->unbound_list); |
b42fe9ca JL |
220 | |
221 | return vma; | |
1fcdaa7e CW |
222 | |
223 | err_vma: | |
224 | kmem_cache_free(vm->i915->vmas, vma); | |
225 | return ERR_PTR(-E2BIG); | |
b42fe9ca JL |
226 | } |
227 | ||
481a6f7d CW |
228 | static struct i915_vma * |
229 | vma_lookup(struct drm_i915_gem_object *obj, | |
230 | struct i915_address_space *vm, | |
231 | const struct i915_ggtt_view *view) | |
718659a6 CW |
232 | { |
233 | struct rb_node *rb; | |
234 | ||
718659a6 CW |
235 | rb = obj->vma_tree.rb_node; |
236 | while (rb) { | |
237 | struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); | |
238 | long cmp; | |
239 | ||
240 | cmp = i915_vma_compare(vma, vm, view); | |
241 | if (cmp == 0) | |
242 | return vma; | |
243 | ||
244 | if (cmp < 0) | |
245 | rb = rb->rb_right; | |
246 | else | |
247 | rb = rb->rb_left; | |
248 | } | |
249 | ||
250 | return NULL; | |
251 | } | |
252 | ||
718659a6 CW |
253 | /** |
254 | * i915_vma_instance - return the singleton instance of the VMA | |
255 | * @obj: parent &struct drm_i915_gem_object to be mapped | |
256 | * @vm: address space in which the mapping is located | |
257 | * @view: additional mapping requirements | |
258 | * | |
259 | * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with | |
260 | * the same @view characteristics. If a match is not found, one is created. | |
261 | * Once created, the VMA is kept until either the object is freed, or the | |
262 | * address space is closed. | |
263 | * | |
264 | * Must be called with struct_mutex held. | |
265 | * | |
266 | * Returns the vma, or an error pointer. | |
267 | */ | |
268 | struct i915_vma * | |
269 | i915_vma_instance(struct drm_i915_gem_object *obj, | |
270 | struct i915_address_space *vm, | |
271 | const struct i915_ggtt_view *view) | |
272 | { | |
273 | struct i915_vma *vma; | |
274 | ||
275 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
276 | GEM_BUG_ON(view && !i915_is_ggtt(vm)); | |
277 | GEM_BUG_ON(vm->closed); | |
278 | ||
481a6f7d | 279 | vma = vma_lookup(obj, vm, view); |
718659a6 | 280 | if (!vma) |
a01cb37a | 281 | vma = vma_create(obj, vm, view); |
718659a6 | 282 | |
4ea9527c | 283 | GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); |
481a6f7d | 284 | GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma); |
718659a6 CW |
285 | return vma; |
286 | } | |
287 | ||
b42fe9ca JL |
288 | /** |
289 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. | |
290 | * @vma: VMA to map | |
291 | * @cache_level: mapping cache level | |
292 | * @flags: flags like global or local mapping | |
293 | * | |
294 | * DMA addresses are taken from the scatter-gather table of this object (or of | |
295 | * this VMA in case of non-default GGTT views) and PTE entries set up. | |
296 | * Note that DMA addresses are also the only part of the SG table we care about. | |
297 | */ | |
298 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |
299 | u32 flags) | |
300 | { | |
301 | u32 bind_flags; | |
302 | u32 vma_flags; | |
303 | int ret; | |
304 | ||
aa149431 CW |
305 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
306 | GEM_BUG_ON(vma->size > vma->node.size); | |
307 | ||
308 | if (GEM_WARN_ON(range_overflows(vma->node.start, | |
309 | vma->node.size, | |
310 | vma->vm->total))) | |
311 | return -ENODEV; | |
312 | ||
313 | if (GEM_WARN_ON(!flags)) | |
b42fe9ca JL |
314 | return -EINVAL; |
315 | ||
316 | bind_flags = 0; | |
317 | if (flags & PIN_GLOBAL) | |
318 | bind_flags |= I915_VMA_GLOBAL_BIND; | |
319 | if (flags & PIN_USER) | |
320 | bind_flags |= I915_VMA_LOCAL_BIND; | |
321 | ||
322 | vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); | |
323 | if (flags & PIN_UPDATE) | |
324 | bind_flags |= vma_flags; | |
325 | else | |
326 | bind_flags &= ~vma_flags; | |
327 | if (bind_flags == 0) | |
328 | return 0; | |
329 | ||
fa3f46af MA |
330 | GEM_BUG_ON(!vma->pages); |
331 | ||
6146e6da | 332 | trace_i915_vma_bind(vma, bind_flags); |
93f2cde2 | 333 | ret = vma->ops->bind_vma(vma, cache_level, bind_flags); |
b42fe9ca JL |
334 | if (ret) |
335 | return ret; | |
336 | ||
337 | vma->flags |= bind_flags; | |
338 | return 0; | |
339 | } | |
340 | ||
341 | void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) | |
342 | { | |
343 | void __iomem *ptr; | |
b4563f59 | 344 | int err; |
b42fe9ca JL |
345 | |
346 | /* Access through the GTT requires the device to be awake. */ | |
49d73912 | 347 | assert_rpm_wakelock_held(vma->vm->i915); |
b42fe9ca | 348 | |
49d73912 | 349 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b4563f59 CW |
350 | if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { |
351 | err = -ENODEV; | |
352 | goto err; | |
353 | } | |
b42fe9ca JL |
354 | |
355 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | |
356 | GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); | |
357 | ||
358 | ptr = vma->iomap; | |
359 | if (ptr == NULL) { | |
73ebd503 | 360 | ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, |
b42fe9ca JL |
361 | vma->node.start, |
362 | vma->node.size); | |
b4563f59 CW |
363 | if (ptr == NULL) { |
364 | err = -ENOMEM; | |
365 | goto err; | |
366 | } | |
b42fe9ca JL |
367 | |
368 | vma->iomap = ptr; | |
369 | } | |
370 | ||
371 | __i915_vma_pin(vma); | |
b4563f59 | 372 | |
3bd40735 | 373 | err = i915_vma_pin_fence(vma); |
b4563f59 CW |
374 | if (err) |
375 | goto err_unpin; | |
376 | ||
7125397b | 377 | i915_vma_set_ggtt_write(vma); |
b42fe9ca | 378 | return ptr; |
b4563f59 CW |
379 | |
380 | err_unpin: | |
381 | __i915_vma_unpin(vma); | |
382 | err: | |
383 | return IO_ERR_PTR(err); | |
384 | } | |
385 | ||
7125397b CW |
386 | void i915_vma_flush_writes(struct i915_vma *vma) |
387 | { | |
388 | if (!i915_vma_has_ggtt_write(vma)) | |
389 | return; | |
390 | ||
391 | i915_gem_flush_ggtt_writes(vma->vm->i915); | |
392 | ||
393 | i915_vma_unset_ggtt_write(vma); | |
394 | } | |
395 | ||
b4563f59 CW |
396 | void i915_vma_unpin_iomap(struct i915_vma *vma) |
397 | { | |
520ea7c5 | 398 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b4563f59 CW |
399 | |
400 | GEM_BUG_ON(vma->iomap == NULL); | |
401 | ||
7125397b CW |
402 | i915_vma_flush_writes(vma); |
403 | ||
b4563f59 CW |
404 | i915_vma_unpin_fence(vma); |
405 | i915_vma_unpin(vma); | |
b42fe9ca JL |
406 | } |
407 | ||
408 | void i915_vma_unpin_and_release(struct i915_vma **p_vma) | |
409 | { | |
410 | struct i915_vma *vma; | |
411 | struct drm_i915_gem_object *obj; | |
412 | ||
413 | vma = fetch_and_zero(p_vma); | |
414 | if (!vma) | |
415 | return; | |
416 | ||
417 | obj = vma->obj; | |
520ea7c5 | 418 | GEM_BUG_ON(!obj); |
b42fe9ca JL |
419 | |
420 | i915_vma_unpin(vma); | |
421 | i915_vma_close(vma); | |
422 | ||
423 | __i915_gem_object_release_unless_active(obj); | |
424 | } | |
425 | ||
782a3e9e CW |
426 | bool i915_vma_misplaced(const struct i915_vma *vma, |
427 | u64 size, u64 alignment, u64 flags) | |
b42fe9ca JL |
428 | { |
429 | if (!drm_mm_node_allocated(&vma->node)) | |
430 | return false; | |
431 | ||
432 | if (vma->node.size < size) | |
433 | return true; | |
434 | ||
f51455d4 CW |
435 | GEM_BUG_ON(alignment && !is_power_of_2(alignment)); |
436 | if (alignment && !IS_ALIGNED(vma->node.start, alignment)) | |
b42fe9ca JL |
437 | return true; |
438 | ||
439 | if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) | |
440 | return true; | |
441 | ||
442 | if (flags & PIN_OFFSET_BIAS && | |
443 | vma->node.start < (flags & PIN_OFFSET_MASK)) | |
444 | return true; | |
445 | ||
446 | if (flags & PIN_OFFSET_FIXED && | |
447 | vma->node.start != (flags & PIN_OFFSET_MASK)) | |
448 | return true; | |
449 | ||
450 | return false; | |
451 | } | |
452 | ||
453 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | |
454 | { | |
b42fe9ca | 455 | bool mappable, fenceable; |
b42fe9ca | 456 | |
944397f0 CW |
457 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); |
458 | GEM_BUG_ON(!vma->fence_size); | |
b42fe9ca JL |
459 | |
460 | /* | |
461 | * Explicitly disable for rotated VMA since the display does not | |
462 | * need the fence and the VMA is not accessible to other users. | |
463 | */ | |
944397f0 CW |
464 | if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) |
465 | return; | |
466 | ||
467 | fenceable = (vma->node.size >= vma->fence_size && | |
f51455d4 | 468 | IS_ALIGNED(vma->node.start, vma->fence_alignment)); |
944397f0 CW |
469 | |
470 | mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; | |
471 | ||
472 | if (mappable && fenceable) | |
b42fe9ca JL |
473 | vma->flags |= I915_VMA_CAN_FENCE; |
474 | else | |
475 | vma->flags &= ~I915_VMA_CAN_FENCE; | |
476 | } | |
477 | ||
7d1d9aea | 478 | static bool color_differs(struct drm_mm_node *node, unsigned long color) |
b42fe9ca | 479 | { |
7d1d9aea CW |
480 | return node->allocated && node->color != color; |
481 | } | |
482 | ||
483 | bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) | |
484 | { | |
485 | struct drm_mm_node *node = &vma->node; | |
b42fe9ca JL |
486 | struct drm_mm_node *other; |
487 | ||
488 | /* | |
489 | * On some machines we have to be careful when putting differing types | |
490 | * of snoopable memory together to avoid the prefetcher crossing memory | |
491 | * domains and dying. During vm initialisation, we decide whether or not | |
492 | * these constraints apply and set the drm_mm.color_adjust | |
493 | * appropriately. | |
494 | */ | |
495 | if (vma->vm->mm.color_adjust == NULL) | |
496 | return true; | |
497 | ||
7d1d9aea CW |
498 | /* Only valid to be called on an already inserted vma */ |
499 | GEM_BUG_ON(!drm_mm_node_allocated(node)); | |
500 | GEM_BUG_ON(list_empty(&node->node_list)); | |
b42fe9ca | 501 | |
7d1d9aea | 502 | other = list_prev_entry(node, node_list); |
ef426c10 | 503 | if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) |
b42fe9ca JL |
504 | return false; |
505 | ||
7d1d9aea | 506 | other = list_next_entry(node, node_list); |
ef426c10 | 507 | if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) |
b42fe9ca JL |
508 | return false; |
509 | ||
510 | return true; | |
511 | } | |
512 | ||
83d317ad CW |
513 | static void assert_bind_count(const struct drm_i915_gem_object *obj) |
514 | { | |
515 | /* | |
516 | * Combine the assertion that the object is bound and that we have | |
517 | * pinned its pages. But we should never have bound the object | |
518 | * more than we have pinned its pages. (For complete accuracy, we | |
519 | * assume that no else is pinning the pages, but as a rough assertion | |
520 | * that we will not run into problems later, this will do!) | |
521 | */ | |
522 | GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); | |
523 | } | |
524 | ||
b42fe9ca JL |
525 | /** |
526 | * i915_vma_insert - finds a slot for the vma in its address space | |
527 | * @vma: the vma | |
528 | * @size: requested size in bytes (can be larger than the VMA) | |
529 | * @alignment: required alignment | |
530 | * @flags: mask of PIN_* flags to use | |
531 | * | |
532 | * First we try to allocate some free space that meets the requirements for | |
533 | * the VMA. Failiing that, if the flags permit, it will evict an old VMA, | |
534 | * preferrably the oldest idle entry to make room for the new VMA. | |
535 | * | |
536 | * Returns: | |
537 | * 0 on success, negative error code otherwise. | |
538 | */ | |
539 | static int | |
540 | i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | |
541 | { | |
49d73912 | 542 | struct drm_i915_private *dev_priv = vma->vm->i915; |
520ea7c5 | 543 | unsigned int cache_level; |
b42fe9ca JL |
544 | u64 start, end; |
545 | int ret; | |
546 | ||
010e3e68 | 547 | GEM_BUG_ON(i915_vma_is_closed(vma)); |
b42fe9ca JL |
548 | GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); |
549 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); | |
550 | ||
551 | size = max(size, vma->size); | |
944397f0 CW |
552 | alignment = max(alignment, vma->display_alignment); |
553 | if (flags & PIN_MAPPABLE) { | |
554 | size = max_t(typeof(size), size, vma->fence_size); | |
555 | alignment = max_t(typeof(alignment), | |
556 | alignment, vma->fence_alignment); | |
557 | } | |
b42fe9ca | 558 | |
f51455d4 CW |
559 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); |
560 | GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); | |
561 | GEM_BUG_ON(!is_power_of_2(alignment)); | |
562 | ||
b42fe9ca | 563 | start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
f51455d4 | 564 | GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); |
b42fe9ca JL |
565 | |
566 | end = vma->vm->total; | |
567 | if (flags & PIN_MAPPABLE) | |
568 | end = min_t(u64, end, dev_priv->ggtt.mappable_end); | |
569 | if (flags & PIN_ZONE_4G) | |
f51455d4 CW |
570 | end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); |
571 | GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); | |
b42fe9ca JL |
572 | |
573 | /* If binding the object/GGTT view requires more space than the entire | |
574 | * aperture has, reject it early before evicting everything in a vain | |
575 | * attempt to find space. | |
576 | */ | |
577 | if (size > end) { | |
520ea7c5 CW |
578 | DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", |
579 | size, flags & PIN_MAPPABLE ? "mappable" : "total", | |
b42fe9ca | 580 | end); |
2889caa9 | 581 | return -ENOSPC; |
b42fe9ca JL |
582 | } |
583 | ||
520ea7c5 CW |
584 | if (vma->obj) { |
585 | ret = i915_gem_object_pin_pages(vma->obj); | |
586 | if (ret) | |
587 | return ret; | |
588 | ||
589 | cache_level = vma->obj->cache_level; | |
590 | } else { | |
591 | cache_level = 0; | |
592 | } | |
b42fe9ca | 593 | |
fa3f46af MA |
594 | GEM_BUG_ON(vma->pages); |
595 | ||
93f2cde2 | 596 | ret = vma->ops->set_pages(vma); |
fa3f46af MA |
597 | if (ret) |
598 | goto err_unpin; | |
599 | ||
b42fe9ca JL |
600 | if (flags & PIN_OFFSET_FIXED) { |
601 | u64 offset = flags & PIN_OFFSET_MASK; | |
f51455d4 | 602 | if (!IS_ALIGNED(offset, alignment) || |
e8f9ae9b | 603 | range_overflows(offset, size, end)) { |
b42fe9ca | 604 | ret = -EINVAL; |
fa3f46af | 605 | goto err_clear; |
b42fe9ca JL |
606 | } |
607 | ||
625d988a | 608 | ret = i915_gem_gtt_reserve(vma->vm, &vma->node, |
520ea7c5 | 609 | size, offset, cache_level, |
625d988a CW |
610 | flags); |
611 | if (ret) | |
fa3f46af | 612 | goto err_clear; |
b42fe9ca | 613 | } else { |
7464284b MA |
614 | /* |
615 | * We only support huge gtt pages through the 48b PPGTT, | |
616 | * however we also don't want to force any alignment for | |
617 | * objects which need to be tightly packed into the low 32bits. | |
618 | * | |
619 | * Note that we assume that GGTT are limited to 4GiB for the | |
620 | * forseeable future. See also i915_ggtt_offset(). | |
621 | */ | |
622 | if (upper_32_bits(end - 1) && | |
623 | vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { | |
855822be MA |
624 | /* |
625 | * We can't mix 64K and 4K PTEs in the same page-table | |
626 | * (2M block), and so to avoid the ugliness and | |
627 | * complexity of coloring we opt for just aligning 64K | |
628 | * objects to 2M. | |
629 | */ | |
7464284b | 630 | u64 page_alignment = |
855822be MA |
631 | rounddown_pow_of_two(vma->page_sizes.sg | |
632 | I915_GTT_PAGE_SIZE_2M); | |
7464284b | 633 | |
bef27bdb CW |
634 | /* |
635 | * Check we don't expand for the limited Global GTT | |
636 | * (mappable aperture is even more precious!). This | |
637 | * also checks that we exclude the aliasing-ppgtt. | |
638 | */ | |
639 | GEM_BUG_ON(i915_vma_is_ggtt(vma)); | |
640 | ||
7464284b | 641 | alignment = max(alignment, page_alignment); |
855822be MA |
642 | |
643 | if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) | |
644 | size = round_up(size, I915_GTT_PAGE_SIZE_2M); | |
7464284b MA |
645 | } |
646 | ||
e007b19d | 647 | ret = i915_gem_gtt_insert(vma->vm, &vma->node, |
520ea7c5 | 648 | size, alignment, cache_level, |
e007b19d CW |
649 | start, end, flags); |
650 | if (ret) | |
fa3f46af | 651 | goto err_clear; |
b42fe9ca JL |
652 | |
653 | GEM_BUG_ON(vma->node.start < start); | |
654 | GEM_BUG_ON(vma->node.start + vma->node.size > end); | |
655 | } | |
44a0ec0d | 656 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
520ea7c5 | 657 | GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); |
b42fe9ca | 658 | |
b42fe9ca | 659 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); |
f2123818 | 660 | |
520ea7c5 CW |
661 | if (vma->obj) { |
662 | struct drm_i915_gem_object *obj = vma->obj; | |
663 | ||
664 | spin_lock(&dev_priv->mm.obj_lock); | |
665 | list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); | |
666 | obj->bind_count++; | |
667 | spin_unlock(&dev_priv->mm.obj_lock); | |
f2123818 | 668 | |
520ea7c5 CW |
669 | assert_bind_count(obj); |
670 | } | |
b42fe9ca JL |
671 | |
672 | return 0; | |
673 | ||
fa3f46af | 674 | err_clear: |
93f2cde2 | 675 | vma->ops->clear_pages(vma); |
b42fe9ca | 676 | err_unpin: |
520ea7c5 CW |
677 | if (vma->obj) |
678 | i915_gem_object_unpin_pages(vma->obj); | |
b42fe9ca JL |
679 | return ret; |
680 | } | |
681 | ||
31c7effa CW |
682 | static void |
683 | i915_vma_remove(struct i915_vma *vma) | |
684 | { | |
f2123818 | 685 | struct drm_i915_private *i915 = vma->vm->i915; |
31c7effa CW |
686 | |
687 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
688 | GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); | |
689 | ||
93f2cde2 | 690 | vma->ops->clear_pages(vma); |
fa3f46af | 691 | |
31c7effa CW |
692 | drm_mm_remove_node(&vma->node); |
693 | list_move_tail(&vma->vm_link, &vma->vm->unbound_list); | |
694 | ||
520ea7c5 CW |
695 | /* |
696 | * Since the unbound list is global, only move to that list if | |
31c7effa CW |
697 | * no more VMAs exist. |
698 | */ | |
520ea7c5 CW |
699 | if (vma->obj) { |
700 | struct drm_i915_gem_object *obj = vma->obj; | |
701 | ||
702 | spin_lock(&i915->mm.obj_lock); | |
703 | if (--obj->bind_count == 0) | |
704 | list_move_tail(&obj->mm.link, &i915->mm.unbound_list); | |
705 | spin_unlock(&i915->mm.obj_lock); | |
706 | ||
707 | /* | |
708 | * And finally now the object is completely decoupled from this | |
709 | * vma, we can drop its hold on the backing storage and allow | |
710 | * it to be reaped by the shrinker. | |
711 | */ | |
712 | i915_gem_object_unpin_pages(obj); | |
713 | assert_bind_count(obj); | |
714 | } | |
31c7effa CW |
715 | } |
716 | ||
b42fe9ca JL |
717 | int __i915_vma_do_pin(struct i915_vma *vma, |
718 | u64 size, u64 alignment, u64 flags) | |
719 | { | |
31c7effa | 720 | const unsigned int bound = vma->flags; |
b42fe9ca JL |
721 | int ret; |
722 | ||
49d73912 | 723 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b42fe9ca JL |
724 | GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); |
725 | GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); | |
726 | ||
727 | if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { | |
728 | ret = -EBUSY; | |
31c7effa | 729 | goto err_unpin; |
b42fe9ca JL |
730 | } |
731 | ||
732 | if ((bound & I915_VMA_BIND_MASK) == 0) { | |
733 | ret = i915_vma_insert(vma, size, alignment, flags); | |
734 | if (ret) | |
31c7effa | 735 | goto err_unpin; |
b42fe9ca | 736 | } |
d36caeea | 737 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
b42fe9ca | 738 | |
520ea7c5 | 739 | ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); |
b42fe9ca | 740 | if (ret) |
31c7effa | 741 | goto err_remove; |
b42fe9ca | 742 | |
d36caeea CW |
743 | GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); |
744 | ||
b42fe9ca JL |
745 | if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) |
746 | __i915_vma_set_map_and_fenceable(vma); | |
747 | ||
748 | GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); | |
749 | return 0; | |
750 | ||
31c7effa CW |
751 | err_remove: |
752 | if ((bound & I915_VMA_BIND_MASK) == 0) { | |
31c7effa | 753 | i915_vma_remove(vma); |
fa3f46af | 754 | GEM_BUG_ON(vma->pages); |
d36caeea | 755 | GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); |
31c7effa CW |
756 | } |
757 | err_unpin: | |
b42fe9ca JL |
758 | __i915_vma_unpin(vma); |
759 | return ret; | |
760 | } | |
761 | ||
3365e226 CW |
762 | void i915_vma_close(struct i915_vma *vma) |
763 | { | |
764 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); | |
765 | ||
766 | GEM_BUG_ON(i915_vma_is_closed(vma)); | |
767 | vma->flags |= I915_VMA_CLOSED; | |
768 | ||
769 | /* | |
770 | * We defer actually closing, unbinding and destroying the VMA until | |
771 | * the next idle point, or if the object is freed in the meantime. By | |
772 | * postponing the unbind, we allow for it to be resurrected by the | |
773 | * client, avoiding the work required to rebind the VMA. This is | |
774 | * advantageous for DRI, where the client/server pass objects | |
775 | * between themselves, temporarily opening a local VMA to the | |
776 | * object, and then closing it again. The same object is then reused | |
777 | * on the next frame (or two, depending on the depth of the swap queue) | |
778 | * causing us to rebind the VMA once more. This ends up being a lot | |
779 | * of wasted work for the steady state. | |
780 | */ | |
781 | list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); | |
782 | } | |
783 | ||
784 | void i915_vma_reopen(struct i915_vma *vma) | |
785 | { | |
786 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); | |
787 | ||
788 | if (vma->flags & I915_VMA_CLOSED) { | |
789 | vma->flags &= ~I915_VMA_CLOSED; | |
790 | list_del(&vma->closed_link); | |
791 | } | |
792 | } | |
793 | ||
794 | static void __i915_vma_destroy(struct i915_vma *vma) | |
b42fe9ca | 795 | { |
520ea7c5 | 796 | struct drm_i915_private *i915 = vma->vm->i915; |
5c3f8c22 | 797 | struct i915_vma_active *iter, *n; |
7a3bc034 | 798 | |
b42fe9ca | 799 | GEM_BUG_ON(vma->node.allocated); |
b42fe9ca JL |
800 | GEM_BUG_ON(vma->fence); |
801 | ||
7a3bc034 CW |
802 | GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); |
803 | ||
010e3e68 | 804 | list_del(&vma->obj_link); |
b42fe9ca | 805 | list_del(&vma->vm_link); |
520ea7c5 CW |
806 | if (vma->obj) |
807 | rb_erase(&vma->obj_node, &vma->obj->vma_tree); | |
010e3e68 | 808 | |
5c3f8c22 CW |
809 | rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { |
810 | GEM_BUG_ON(i915_gem_active_isset(&iter->base)); | |
811 | kfree(iter); | |
812 | } | |
813 | ||
520ea7c5 | 814 | kmem_cache_free(i915->vmas, vma); |
b42fe9ca JL |
815 | } |
816 | ||
3365e226 | 817 | void i915_vma_destroy(struct i915_vma *vma) |
b42fe9ca | 818 | { |
3365e226 | 819 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b42fe9ca | 820 | |
3365e226 CW |
821 | GEM_BUG_ON(i915_vma_is_active(vma)); |
822 | GEM_BUG_ON(i915_vma_is_pinned(vma)); | |
823 | ||
824 | if (i915_vma_is_closed(vma)) | |
825 | list_del(&vma->closed_link); | |
826 | ||
827 | WARN_ON(i915_vma_unbind(vma)); | |
828 | __i915_vma_destroy(vma); | |
829 | } | |
830 | ||
831 | void i915_vma_parked(struct drm_i915_private *i915) | |
832 | { | |
833 | struct i915_vma *vma, *next; | |
b42fe9ca | 834 | |
3365e226 CW |
835 | list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { |
836 | GEM_BUG_ON(!i915_vma_is_closed(vma)); | |
837 | i915_vma_destroy(vma); | |
838 | } | |
839 | ||
840 | GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); | |
b42fe9ca JL |
841 | } |
842 | ||
843 | static void __i915_vma_iounmap(struct i915_vma *vma) | |
844 | { | |
845 | GEM_BUG_ON(i915_vma_is_pinned(vma)); | |
846 | ||
847 | if (vma->iomap == NULL) | |
848 | return; | |
849 | ||
850 | io_mapping_unmap(vma->iomap); | |
851 | vma->iomap = NULL; | |
852 | } | |
853 | ||
a65adaf8 CW |
854 | void i915_vma_revoke_mmap(struct i915_vma *vma) |
855 | { | |
856 | struct drm_vma_offset_node *node = &vma->obj->base.vma_node; | |
857 | u64 vma_offset; | |
858 | ||
859 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); | |
860 | ||
861 | if (!i915_vma_has_userfault(vma)) | |
862 | return; | |
863 | ||
864 | GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); | |
865 | GEM_BUG_ON(!vma->obj->userfault_count); | |
866 | ||
867 | vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; | |
868 | unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, | |
869 | drm_vma_node_offset_addr(node) + vma_offset, | |
870 | vma->size, | |
871 | 1); | |
872 | ||
873 | i915_vma_unset_userfault(vma); | |
874 | if (!--vma->obj->userfault_count) | |
875 | list_del(&vma->obj->userfault_link); | |
876 | } | |
877 | ||
e6bb1d7f CW |
878 | static void export_fence(struct i915_vma *vma, |
879 | struct i915_request *rq, | |
880 | unsigned int flags) | |
881 | { | |
882 | struct reservation_object *resv = vma->resv; | |
883 | ||
884 | /* | |
885 | * Ignore errors from failing to allocate the new fence, we can't | |
886 | * handle an error right now. Worst case should be missed | |
887 | * synchronisation leading to rendering corruption. | |
888 | */ | |
889 | reservation_object_lock(resv, NULL); | |
890 | if (flags & EXEC_OBJECT_WRITE) | |
891 | reservation_object_add_excl_fence(resv, &rq->fence); | |
892 | else if (reservation_object_reserve_shared(resv) == 0) | |
893 | reservation_object_add_shared_fence(resv, &rq->fence); | |
894 | reservation_object_unlock(resv); | |
895 | } | |
896 | ||
5c3f8c22 CW |
897 | static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) |
898 | { | |
899 | struct i915_vma_active *active; | |
900 | struct rb_node **p, *parent; | |
8b293eb5 CW |
901 | struct i915_request *old; |
902 | ||
903 | /* | |
904 | * We track the most recently used timeline to skip a rbtree search | |
905 | * for the common case, under typical loads we never need the rbtree | |
906 | * at all. We can reuse the last_active slot if it is empty, that is | |
907 | * after the previous activity has been retired, or if the active | |
908 | * matches the current timeline. | |
909 | * | |
910 | * Note that we allow the timeline to be active simultaneously in | |
911 | * the rbtree and the last_active cache. We do this to avoid having | |
912 | * to search and replace the rbtree element for a new timeline, with | |
913 | * the cost being that we must be aware that the vma may be retired | |
914 | * twice for the same timeline (as the older rbtree element will be | |
915 | * retired before the new request added to last_active). | |
916 | */ | |
917 | old = i915_gem_active_raw(&vma->last_active, | |
918 | &vma->vm->i915->drm.struct_mutex); | |
919 | if (!old || old->fence.context == idx) | |
920 | goto out; | |
921 | ||
922 | /* Move the currently active fence into the rbtree */ | |
923 | idx = old->fence.context; | |
5c3f8c22 CW |
924 | |
925 | parent = NULL; | |
926 | p = &vma->active.rb_node; | |
927 | while (*p) { | |
928 | parent = *p; | |
929 | ||
930 | active = rb_entry(parent, struct i915_vma_active, node); | |
931 | if (active->timeline == idx) | |
8b293eb5 | 932 | goto replace; |
5c3f8c22 CW |
933 | |
934 | if (active->timeline < idx) | |
935 | p = &parent->rb_right; | |
936 | else | |
937 | p = &parent->rb_left; | |
938 | } | |
939 | ||
940 | active = kmalloc(sizeof(*active), GFP_KERNEL); | |
46b1063f CW |
941 | |
942 | /* kmalloc may retire the vma->last_active request (thanks shrinker)! */ | |
943 | if (unlikely(!i915_gem_active_raw(&vma->last_active, | |
944 | &vma->vm->i915->drm.struct_mutex))) { | |
945 | kfree(active); | |
946 | goto out; | |
947 | } | |
948 | ||
5c3f8c22 CW |
949 | if (unlikely(!active)) |
950 | return ERR_PTR(-ENOMEM); | |
951 | ||
952 | init_request_active(&active->base, i915_vma_retire); | |
953 | active->vma = vma; | |
954 | active->timeline = idx; | |
955 | ||
956 | rb_link_node(&active->node, parent, p); | |
957 | rb_insert_color(&active->node, &vma->active); | |
958 | ||
8b293eb5 CW |
959 | replace: |
960 | /* | |
961 | * Overwrite the previous active slot in the rbtree with last_active, | |
962 | * leaving last_active zeroed. If the previous slot is still active, | |
963 | * we must be careful as we now only expect to receive one retire | |
964 | * callback not two, and so much undo the active counting for the | |
965 | * overwritten slot. | |
966 | */ | |
967 | if (i915_gem_active_isset(&active->base)) { | |
968 | /* Retire ourselves from the old rq->active_list */ | |
969 | __list_del_entry(&active->base.link); | |
970 | vma->active_count--; | |
971 | GEM_BUG_ON(!vma->active_count); | |
972 | } | |
973 | GEM_BUG_ON(list_empty(&vma->last_active.link)); | |
974 | list_replace_init(&vma->last_active.link, &active->base.link); | |
975 | active->base.request = fetch_and_zero(&vma->last_active.request); | |
976 | ||
977 | out: | |
978 | return &vma->last_active; | |
5c3f8c22 CW |
979 | } |
980 | ||
e6bb1d7f CW |
981 | int i915_vma_move_to_active(struct i915_vma *vma, |
982 | struct i915_request *rq, | |
983 | unsigned int flags) | |
984 | { | |
985 | struct drm_i915_gem_object *obj = vma->obj; | |
5c3f8c22 | 986 | struct i915_gem_active *active; |
e6bb1d7f CW |
987 | |
988 | lockdep_assert_held(&rq->i915->drm.struct_mutex); | |
989 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
990 | ||
5c3f8c22 CW |
991 | active = active_instance(vma, rq->fence.context); |
992 | if (IS_ERR(active)) | |
993 | return PTR_ERR(active); | |
994 | ||
e6bb1d7f CW |
995 | /* |
996 | * Add a reference if we're newly entering the active list. | |
997 | * The order in which we add operations to the retirement queue is | |
998 | * vital here: mark_active adds to the start of the callback list, | |
999 | * such that subsequent callbacks are called first. Therefore we | |
1000 | * add the active reference first and queue for it to be dropped | |
1001 | * *last*. | |
1002 | */ | |
5c3f8c22 CW |
1003 | if (!i915_gem_active_isset(active) && !vma->active_count++) { |
1004 | list_move_tail(&vma->vm_link, &vma->vm->active_list); | |
e6bb1d7f | 1005 | obj->active_count++; |
5c3f8c22 CW |
1006 | } |
1007 | i915_gem_active_set(active, rq); | |
1008 | GEM_BUG_ON(!i915_vma_is_active(vma)); | |
1009 | GEM_BUG_ON(!obj->active_count); | |
e6bb1d7f CW |
1010 | |
1011 | obj->write_domain = 0; | |
1012 | if (flags & EXEC_OBJECT_WRITE) { | |
1013 | obj->write_domain = I915_GEM_DOMAIN_RENDER; | |
1014 | ||
1015 | if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) | |
1016 | i915_gem_active_set(&obj->frontbuffer_write, rq); | |
1017 | ||
1018 | obj->read_domains = 0; | |
1019 | } | |
1020 | obj->read_domains |= I915_GEM_GPU_DOMAINS; | |
1021 | ||
1022 | if (flags & EXEC_OBJECT_NEEDS_FENCE) | |
1023 | i915_gem_active_set(&vma->last_fence, rq); | |
1024 | ||
1025 | export_fence(vma, rq, flags); | |
1026 | return 0; | |
1027 | } | |
1028 | ||
b42fe9ca JL |
1029 | int i915_vma_unbind(struct i915_vma *vma) |
1030 | { | |
b42fe9ca JL |
1031 | int ret; |
1032 | ||
520ea7c5 | 1033 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
b42fe9ca | 1034 | |
520ea7c5 CW |
1035 | /* |
1036 | * First wait upon any activity as retiring the request may | |
b42fe9ca JL |
1037 | * have side-effects such as unpinning or even unbinding this vma. |
1038 | */ | |
7f017b19 | 1039 | might_sleep(); |
5c3f8c22 CW |
1040 | if (i915_vma_is_active(vma)) { |
1041 | struct i915_vma_active *active, *n; | |
b42fe9ca | 1042 | |
520ea7c5 CW |
1043 | /* |
1044 | * When a closed VMA is retired, it is unbound - eek. | |
b42fe9ca JL |
1045 | * In order to prevent it from being recursively closed, |
1046 | * take a pin on the vma so that the second unbind is | |
1047 | * aborted. | |
1048 | * | |
1049 | * Even more scary is that the retire callback may free | |
1050 | * the object (last active vma). To prevent the explosion | |
1051 | * we defer the actual object free to a worker that can | |
1052 | * only proceed once it acquires the struct_mutex (which | |
1053 | * we currently hold, therefore it cannot free this object | |
1054 | * before we are finished). | |
1055 | */ | |
1056 | __i915_vma_pin(vma); | |
1057 | ||
8b293eb5 CW |
1058 | ret = i915_gem_active_retire(&vma->last_active, |
1059 | &vma->vm->i915->drm.struct_mutex); | |
1060 | if (ret) | |
1061 | goto unpin; | |
1062 | ||
5c3f8c22 CW |
1063 | rbtree_postorder_for_each_entry_safe(active, n, |
1064 | &vma->active, node) { | |
1065 | ret = i915_gem_active_retire(&active->base, | |
49d73912 | 1066 | &vma->vm->i915->drm.struct_mutex); |
b42fe9ca | 1067 | if (ret) |
5c3f8c22 | 1068 | goto unpin; |
760a898d CW |
1069 | } |
1070 | ||
5c3f8c22 CW |
1071 | ret = i915_gem_active_retire(&vma->last_fence, |
1072 | &vma->vm->i915->drm.struct_mutex); | |
1073 | unpin: | |
b42fe9ca JL |
1074 | __i915_vma_unpin(vma); |
1075 | if (ret) | |
1076 | return ret; | |
b42fe9ca | 1077 | } |
7a3bc034 | 1078 | GEM_BUG_ON(i915_vma_is_active(vma)); |
b42fe9ca | 1079 | |
10195b1e CW |
1080 | if (i915_vma_is_pinned(vma)) { |
1081 | vma_print_allocator(vma, "is pinned"); | |
b42fe9ca | 1082 | return -EBUSY; |
10195b1e | 1083 | } |
b42fe9ca JL |
1084 | |
1085 | if (!drm_mm_node_allocated(&vma->node)) | |
3365e226 | 1086 | return 0; |
b42fe9ca | 1087 | |
b42fe9ca | 1088 | if (i915_vma_is_map_and_fenceable(vma)) { |
7125397b CW |
1089 | /* |
1090 | * Check that we have flushed all writes through the GGTT | |
1091 | * before the unbind, other due to non-strict nature of those | |
1092 | * indirect writes they may end up referencing the GGTT PTE | |
1093 | * after the unbind. | |
1094 | */ | |
1095 | i915_vma_flush_writes(vma); | |
1096 | GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); | |
1097 | ||
b42fe9ca JL |
1098 | /* release the fence reg _after_ flushing */ |
1099 | ret = i915_vma_put_fence(vma); | |
1100 | if (ret) | |
1101 | return ret; | |
1102 | ||
1103 | /* Force a pagefault for domain tracking on next user access */ | |
a65adaf8 | 1104 | i915_vma_revoke_mmap(vma); |
b42fe9ca JL |
1105 | |
1106 | __i915_vma_iounmap(vma); | |
1107 | vma->flags &= ~I915_VMA_CAN_FENCE; | |
1108 | } | |
a65adaf8 CW |
1109 | GEM_BUG_ON(vma->fence); |
1110 | GEM_BUG_ON(i915_vma_has_userfault(vma)); | |
b42fe9ca JL |
1111 | |
1112 | if (likely(!vma->vm->closed)) { | |
1113 | trace_i915_vma_unbind(vma); | |
93f2cde2 | 1114 | vma->ops->unbind_vma(vma); |
b42fe9ca JL |
1115 | } |
1116 | vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); | |
1117 | ||
31c7effa | 1118 | i915_vma_remove(vma); |
b42fe9ca | 1119 | |
b42fe9ca JL |
1120 | return 0; |
1121 | } | |
1122 | ||
e3c7a1c5 CW |
1123 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
1124 | #include "selftests/i915_vma.c" | |
1125 | #endif |