]>
Commit | Line | Data |
---|---|---|
b47eb4a2 CW |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uuk> | |
26 | * | |
27 | */ | |
28 | ||
760285e7 | 29 | #include <drm/drmP.h> |
760285e7 | 30 | #include <drm/i915_drm.h> |
74e21ac2 CW |
31 | |
32 | #include "i915_drv.h" | |
33 | #include "intel_drv.h" | |
db53a302 | 34 | #include "i915_trace.h" |
b47eb4a2 | 35 | |
80b204bc | 36 | static bool ggtt_is_idle(struct drm_i915_private *dev_priv) |
9332f3b1 | 37 | { |
80b204bc | 38 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
9332f3b1 | 39 | struct intel_engine_cs *engine; |
3b3f1650 | 40 | enum intel_engine_id id; |
9332f3b1 | 41 | |
3b3f1650 | 42 | for_each_engine(engine, dev_priv, id) { |
80b204bc CW |
43 | struct intel_timeline *tl; |
44 | ||
45 | tl = &ggtt->base.timeline.engine[engine->id]; | |
46 | if (i915_gem_active_isset(&tl->last_request)) | |
9332f3b1 CW |
47 | return false; |
48 | } | |
49 | ||
50 | return true; | |
51 | } | |
52 | ||
2889caa9 CW |
53 | static int ggtt_flush(struct drm_i915_private *i915) |
54 | { | |
55 | int err; | |
56 | ||
57 | /* Not everything in the GGTT is tracked via vma (otherwise we | |
58 | * could evict as required with minimal stalling) so we are forced | |
59 | * to idle the GPU and explicitly retire outstanding requests in | |
60 | * the hopes that we can then remove contexts and the like only | |
61 | * bound by their active reference. | |
62 | */ | |
63 | err = i915_gem_switch_to_kernel_context(i915); | |
64 | if (err) | |
65 | return err; | |
66 | ||
67 | err = i915_gem_wait_for_idle(i915, | |
68 | I915_WAIT_INTERRUPTIBLE | | |
69 | I915_WAIT_LOCKED); | |
70 | if (err) | |
71 | return err; | |
72 | ||
73 | return 0; | |
74 | } | |
75 | ||
cd377ea9 | 76 | static bool |
9a71e277 CW |
77 | mark_free(struct drm_mm_scan *scan, |
78 | struct i915_vma *vma, | |
79 | unsigned int flags, | |
80 | struct list_head *unwind) | |
b47eb4a2 | 81 | { |
20dfbde4 | 82 | if (i915_vma_is_pinned(vma)) |
1b50247a CW |
83 | return false; |
84 | ||
a65adaf8 | 85 | if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) |
82118877 CW |
86 | return false; |
87 | ||
8c45cec4 | 88 | list_add(&vma->evict_link, unwind); |
9a71e277 | 89 | return drm_mm_scan_add_block(scan, &vma->node); |
b47eb4a2 CW |
90 | } |
91 | ||
c2c1d491 DV |
92 | /** |
93 | * i915_gem_evict_something - Evict vmas to make room for binding a new one | |
c2c1d491 | 94 | * @vm: address space to evict from |
7838a63a | 95 | * @min_size: size of the desired free space |
c2c1d491 DV |
96 | * @alignment: alignment constraint of the desired free space |
97 | * @cache_level: cache_level for the desired space | |
7838a63a DV |
98 | * @start: start (inclusive) of the range from which to evict objects |
99 | * @end: end (exclusive) of the range from which to evict objects | |
100 | * @flags: additional flags to control the eviction algorithm | |
c2c1d491 DV |
101 | * |
102 | * This function will try to evict vmas until a free space satisfying the | |
103 | * requirements is found. Callers must check first whether any such hole exists | |
104 | * already before calling this function. | |
105 | * | |
106 | * This function is used by the object/vma binding code. | |
107 | * | |
eb0b44ad DV |
108 | * Since this function is only used to free up virtual address space it only |
109 | * ignores pinned vmas, and not object where the backing storage itself is | |
110 | * pinned. Hence obj->pages_pin_count does not protect against eviction. | |
111 | * | |
c2c1d491 DV |
112 | * To clarify: This is for freeing up virtual address space, not for freeing |
113 | * memory in e.g. the shrinker. | |
114 | */ | |
b47eb4a2 | 115 | int |
e522ac23 | 116 | i915_gem_evict_something(struct i915_address_space *vm, |
2ffffd0f CW |
117 | u64 min_size, u64 alignment, |
118 | unsigned cache_level, | |
119 | u64 start, u64 end, | |
1ec9e26d | 120 | unsigned flags) |
b47eb4a2 | 121 | { |
49d73912 | 122 | struct drm_i915_private *dev_priv = vm->i915; |
9a71e277 | 123 | struct drm_mm_scan scan; |
9332f3b1 CW |
124 | struct list_head eviction_list; |
125 | struct list_head *phases[] = { | |
126 | &vm->inactive_list, | |
127 | &vm->active_list, | |
128 | NULL, | |
129 | }, **phase; | |
130 | struct i915_vma *vma, *next; | |
3fa489da | 131 | struct drm_mm_node *node; |
4e64e553 | 132 | enum drm_mm_insert_mode mode; |
9332f3b1 | 133 | int ret; |
b47eb4a2 | 134 | |
49d73912 | 135 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
e522ac23 | 136 | trace_i915_gem_evict(vm, min_size, alignment, flags); |
db53a302 | 137 | |
cd377ea9 CW |
138 | /* |
139 | * The goal is to evict objects and amalgamate space in LRU order. | |
140 | * The oldest idle objects reside on the inactive list, which is in | |
9332f3b1 CW |
141 | * retirement order. The next objects to retire are those in flight, |
142 | * on the active list, again in retirement order. | |
cd377ea9 CW |
143 | * |
144 | * The retirement sequence is thus: | |
145 | * 1. Inactive objects (already retired) | |
9332f3b1 | 146 | * 2. Active objects (will stall on unbinding) |
cd377ea9 CW |
147 | * |
148 | * On each list, the oldest objects lie at the HEAD with the freshest | |
149 | * object on the TAIL. | |
150 | */ | |
4e64e553 CW |
151 | mode = DRM_MM_INSERT_BEST; |
152 | if (flags & PIN_HIGH) | |
153 | mode = DRM_MM_INSERT_HIGH; | |
154 | if (flags & PIN_MAPPABLE) | |
155 | mode = DRM_MM_INSERT_LOW; | |
2c4b3895 CW |
156 | drm_mm_scan_init_with_range(&scan, &vm->mm, |
157 | min_size, alignment, cache_level, | |
4e64e553 | 158 | start, end, mode); |
cd377ea9 | 159 | |
7155b057 CW |
160 | /* Retire before we search the active list. Although we have |
161 | * reasonable accuracy in our retirement lists, we may have | |
162 | * a stray pin (preventing eviction) that can only be resolved by | |
163 | * retiring. | |
164 | */ | |
165 | if (!(flags & PIN_NONBLOCK)) | |
166 | i915_gem_retire_requests(dev_priv); | |
167 | else | |
9332f3b1 | 168 | phases[1] = NULL; |
b47eb4a2 | 169 | |
9332f3b1 CW |
170 | search_again: |
171 | INIT_LIST_HEAD(&eviction_list); | |
172 | phase = phases; | |
173 | do { | |
174 | list_for_each_entry(vma, *phase, vm_link) | |
9a71e277 | 175 | if (mark_free(&scan, vma, flags, &eviction_list)) |
9332f3b1 CW |
176 | goto found; |
177 | } while (*++phase); | |
cd377ea9 CW |
178 | |
179 | /* Nothing found, clean up and bail out! */ | |
8c45cec4 | 180 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
9a71e277 | 181 | ret = drm_mm_scan_remove_block(&scan, &vma->node); |
cd377ea9 CW |
182 | BUG_ON(ret); |
183 | } | |
184 | ||
ad071acb | 185 | /* Can we unpin some objects such as idle hw contents, |
9332f3b1 CW |
186 | * or pending flips? But since only the GGTT has global entries |
187 | * such as scanouts, rinbuffers and contexts, we can skip the | |
188 | * purge when inspecting per-process local address spaces. | |
cd377ea9 | 189 | */ |
9332f3b1 | 190 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
74e21ac2 | 191 | return -ENOSPC; |
ad071acb | 192 | |
80b204bc | 193 | if (ggtt_is_idle(dev_priv)) { |
9332f3b1 CW |
194 | /* If we still have pending pageflip completions, drop |
195 | * back to userspace to give our workqueues time to | |
196 | * acquire our locks and unpin the old scanouts. | |
197 | */ | |
49d73912 | 198 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; |
74e21ac2 CW |
199 | } |
200 | ||
2889caa9 | 201 | ret = ggtt_flush(dev_priv); |
9332f3b1 CW |
202 | if (ret) |
203 | return ret; | |
204 | ||
9332f3b1 | 205 | goto search_again; |
cd377ea9 CW |
206 | |
207 | found: | |
e39a0150 | 208 | /* drm_mm doesn't allow any other other operations while |
9332f3b1 CW |
209 | * scanning, therefore store to-be-evicted objects on a |
210 | * temporary list and take a reference for all before | |
211 | * calling unbind (which may remove the active reference | |
212 | * of any of our objects, thus corrupting the list). | |
213 | */ | |
8c45cec4 | 214 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
9a71e277 | 215 | if (drm_mm_scan_remove_block(&scan, &vma->node)) |
20dfbde4 | 216 | __i915_vma_pin(vma); |
9332f3b1 | 217 | else |
8c45cec4 | 218 | list_del(&vma->evict_link); |
cd377ea9 | 219 | } |
b47eb4a2 | 220 | |
cd377ea9 | 221 | /* Unbinding will emit any required flushes */ |
121dfbb2 | 222 | ret = 0; |
8c45cec4 | 223 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
20dfbde4 | 224 | __i915_vma_unpin(vma); |
e39a0150 | 225 | if (ret == 0) |
82a55ad1 | 226 | ret = i915_vma_unbind(vma); |
b47eb4a2 | 227 | } |
3fa489da CW |
228 | |
229 | while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { | |
230 | vma = container_of(node, struct i915_vma, node); | |
231 | ret = i915_vma_unbind(vma); | |
232 | } | |
233 | ||
e39a0150 | 234 | return ret; |
b47eb4a2 CW |
235 | } |
236 | ||
172ae5b4 CW |
237 | /** |
238 | * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one | |
625d988a CW |
239 | * @vm: address space to evict from |
240 | * @target: range (and color) to evict for | |
172ae5b4 CW |
241 | * @flags: additional flags to control the eviction algorithm |
242 | * | |
243 | * This function will try to evict vmas that overlap the target node. | |
244 | * | |
245 | * To clarify: This is for freeing up virtual address space, not for freeing | |
246 | * memory in e.g. the shrinker. | |
247 | */ | |
625d988a CW |
248 | int i915_gem_evict_for_node(struct i915_address_space *vm, |
249 | struct drm_mm_node *target, | |
250 | unsigned int flags) | |
506a8e87 | 251 | { |
172ae5b4 CW |
252 | LIST_HEAD(eviction_list); |
253 | struct drm_mm_node *node; | |
625d988a CW |
254 | u64 start = target->start; |
255 | u64 end = start + target->size; | |
172ae5b4 CW |
256 | struct i915_vma *vma, *next; |
257 | bool check_color; | |
258 | int ret = 0; | |
506a8e87 | 259 | |
625d988a | 260 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
a6508ded CW |
261 | GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); |
262 | GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); | |
263 | ||
625d988a | 264 | trace_i915_gem_evict_node(vm, target, flags); |
172ae5b4 | 265 | |
7155b057 CW |
266 | /* Retire before we search the active list. Although we have |
267 | * reasonable accuracy in our retirement lists, we may have | |
268 | * a stray pin (preventing eviction) that can only be resolved by | |
269 | * retiring. | |
270 | */ | |
271 | if (!(flags & PIN_NONBLOCK)) | |
625d988a | 272 | i915_gem_retire_requests(vm->i915); |
7155b057 | 273 | |
625d988a | 274 | check_color = vm->mm.color_adjust; |
172ae5b4 CW |
275 | if (check_color) { |
276 | /* Expand search to cover neighbouring guard pages (or lack!) */ | |
381b943b | 277 | if (start) |
f51455d4 | 278 | start -= I915_GTT_PAGE_SIZE; |
a6508ded CW |
279 | |
280 | /* Always look at the page afterwards to avoid the end-of-GTT */ | |
281 | end += I915_GTT_PAGE_SIZE; | |
172ae5b4 | 282 | } |
a6508ded | 283 | GEM_BUG_ON(start >= end); |
4c7d62c6 | 284 | |
625d988a | 285 | drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { |
172ae5b4 CW |
286 | /* If we find any non-objects (!vma), we cannot evict them */ |
287 | if (node->color == I915_COLOR_UNEVICTABLE) { | |
288 | ret = -ENOSPC; | |
506a8e87 | 289 | break; |
172ae5b4 | 290 | } |
506a8e87 | 291 | |
a6508ded | 292 | GEM_BUG_ON(!node->allocated); |
506a8e87 CW |
293 | vma = container_of(node, typeof(*vma), node); |
294 | ||
172ae5b4 CW |
295 | /* If we are using coloring to insert guard pages between |
296 | * different cache domains within the address space, we have | |
297 | * to check whether the objects on either side of our range | |
298 | * abutt and conflict. If they are in conflict, then we evict | |
299 | * those as well to make room for our guard pages. | |
300 | */ | |
301 | if (check_color) { | |
fe65cbdb MA |
302 | if (node->start + node->size == target->start) { |
303 | if (node->color == target->color) | |
172ae5b4 CW |
304 | continue; |
305 | } | |
fe65cbdb MA |
306 | if (node->start == target->start + target->size) { |
307 | if (node->color == target->color) | |
172ae5b4 CW |
308 | continue; |
309 | } | |
310 | } | |
506a8e87 | 311 | |
172ae5b4 CW |
312 | if (flags & PIN_NONBLOCK && |
313 | (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { | |
314 | ret = -ENOSPC; | |
315 | break; | |
316 | } | |
506a8e87 | 317 | |
f34a93bb CW |
318 | if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) { |
319 | ret = -ENOSPC; | |
320 | break; | |
321 | } | |
322 | ||
172ae5b4 | 323 | /* Overlap of objects in the same batch? */ |
d55495b4 | 324 | if (i915_vma_is_pinned(vma)) { |
172ae5b4 | 325 | ret = -ENOSPC; |
c7c6e46f CW |
326 | if (vma->exec_flags && |
327 | *vma->exec_flags & EXEC_OBJECT_PINNED) | |
172ae5b4 CW |
328 | ret = -EINVAL; |
329 | break; | |
506a8e87 CW |
330 | } |
331 | ||
172ae5b4 CW |
332 | /* Never show fear in the face of dragons! |
333 | * | |
334 | * We cannot directly remove this node from within this | |
335 | * iterator and as with i915_gem_evict_something() we employ | |
336 | * the vma pin_count in order to prevent the action of | |
337 | * unbinding one vma from freeing (by dropping its active | |
338 | * reference) another in our eviction list. | |
339 | */ | |
340 | __i915_vma_pin(vma); | |
8c45cec4 | 341 | list_add(&vma->evict_link, &eviction_list); |
172ae5b4 CW |
342 | } |
343 | ||
8c45cec4 | 344 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { |
172ae5b4 CW |
345 | __i915_vma_unpin(vma); |
346 | if (ret == 0) | |
347 | ret = i915_vma_unbind(vma); | |
506a8e87 CW |
348 | } |
349 | ||
172ae5b4 | 350 | return ret; |
506a8e87 CW |
351 | } |
352 | ||
68c8c17f | 353 | /** |
c2c1d491 | 354 | * i915_gem_evict_vm - Evict all idle vmas from a vm |
c2c1d491 | 355 | * @vm: Address space to cleanse |
68c8c17f | 356 | * |
2889caa9 | 357 | * This function evicts all vmas from a vm. |
68c8c17f | 358 | * |
c2c1d491 DV |
359 | * This is used by the execbuf code as a last-ditch effort to defragment the |
360 | * address space. | |
361 | * | |
362 | * To clarify: This is for freeing up virtual address space, not for freeing | |
363 | * memory in e.g. the shrinker. | |
68c8c17f | 364 | */ |
2889caa9 | 365 | int i915_gem_evict_vm(struct i915_address_space *vm) |
7b796122 | 366 | { |
2889caa9 CW |
367 | struct list_head *phases[] = { |
368 | &vm->inactive_list, | |
369 | &vm->active_list, | |
370 | NULL | |
371 | }, **phase; | |
372 | struct list_head eviction_list; | |
7b796122 BW |
373 | struct i915_vma *vma, *next; |
374 | int ret; | |
375 | ||
49d73912 | 376 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
bcccff84 BW |
377 | trace_i915_gem_evict_vm(vm); |
378 | ||
2889caa9 CW |
379 | /* Switch back to the default context in order to unpin |
380 | * the existing context objects. However, such objects only | |
381 | * pin themselves inside the global GTT and performing the | |
382 | * switch otherwise is ineffective. | |
383 | */ | |
384 | if (i915_is_ggtt(vm)) { | |
385 | ret = ggtt_flush(vm->i915); | |
7b796122 BW |
386 | if (ret) |
387 | return ret; | |
7b796122 BW |
388 | } |
389 | ||
2889caa9 CW |
390 | INIT_LIST_HEAD(&eviction_list); |
391 | phase = phases; | |
392 | do { | |
393 | list_for_each_entry(vma, *phase, vm_link) { | |
394 | if (i915_vma_is_pinned(vma)) | |
395 | continue; | |
7b796122 | 396 | |
2889caa9 CW |
397 | __i915_vma_pin(vma); |
398 | list_add(&vma->evict_link, &eviction_list); | |
399 | } | |
400 | } while (*++phase); | |
401 | ||
402 | ret = 0; | |
403 | list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { | |
404 | __i915_vma_unpin(vma); | |
405 | if (ret == 0) | |
406 | ret = i915_vma_unbind(vma); | |
407 | } | |
408 | return ret; | |
7b796122 | 409 | } |
f40a7b75 CW |
410 | |
411 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
412 | #include "selftests/i915_gem_evict.c" | |
413 | #endif |