]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/oom.h> | |
26 | #include <linux/shmem_fs.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/pci.h> | |
30 | #include <linux/dma-buf.h> | |
31 | #include <linux/vmalloc.h> | |
32 | #include <drm/drmP.h> | |
33 | #include <drm/i915_drm.h> | |
34 | ||
35 | #include "i915_drv.h" | |
36 | #include "i915_trace.h" | |
37 | ||
38 | static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) | |
39 | { | |
40 | if (!mutex_is_locked(mutex)) | |
41 | return false; | |
42 | ||
43 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) | |
44 | return mutex->owner == task; | |
45 | #else | |
46 | /* Since UP may be pre-empted, we cannot assume that we own the lock */ | |
47 | return false; | |
48 | #endif | |
49 | } | |
50 | ||
51 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) | |
52 | { | |
53 | struct i915_vma *vma; | |
54 | ||
55 | list_for_each_entry(vma, &obj->vma_list, obj_link) | |
56 | if (i915_vma_is_pinned(vma)) | |
57 | return true; | |
58 | ||
59 | return false; | |
60 | } | |
61 | ||
62 | static bool swap_available(void) | |
63 | { | |
64 | return get_nr_swap_pages() > 0; | |
65 | } | |
66 | ||
67 | static bool can_release_pages(struct drm_i915_gem_object *obj) | |
68 | { | |
69 | /* Only shmemfs objects are backed by swap */ | |
70 | if (!obj->base.filp) | |
71 | return false; | |
72 | ||
73 | /* Only report true if by unbinding the object and putting its pages | |
74 | * we can actually make forward progress towards freeing physical | |
75 | * pages. | |
76 | * | |
77 | * If the pages are pinned for any other reason than being bound | |
78 | * to the GPU, simply unbinding from the GPU is not going to succeed | |
79 | * in releasing our pin count on the pages themselves. | |
80 | */ | |
81 | if (obj->pages_pin_count > obj->bind_count) | |
82 | return false; | |
83 | ||
84 | if (any_vma_pinned(obj)) | |
85 | return false; | |
86 | ||
87 | /* We can only return physical pages to the system if we can either | |
88 | * discard the contents (because the user has marked them as being | |
89 | * purgeable) or if we can move their contents out to swap. | |
90 | */ | |
91 | return swap_available() || obj->madv == I915_MADV_DONTNEED; | |
92 | } | |
93 | ||
94 | /** | |
95 | * i915_gem_shrink - Shrink buffer object caches | |
96 | * @dev_priv: i915 device | |
97 | * @target: amount of memory to make available, in pages | |
98 | * @flags: control flags for selecting cache types | |
99 | * | |
100 | * This function is the main interface to the shrinker. It will try to release | |
101 | * up to @target pages of main memory backing storage from buffer objects. | |
102 | * Selection of the specific caches can be done with @flags. This is e.g. useful | |
103 | * when purgeable objects should be removed from caches preferentially. | |
104 | * | |
105 | * Note that it's not guaranteed that released amount is actually available as | |
106 | * free system memory - the pages might still be in-used to due to other reasons | |
107 | * (like cpu mmaps) or the mm core has reused them before we could grab them. | |
108 | * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to | |
109 | * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). | |
110 | * | |
111 | * Also note that any kind of pinning (both per-vma address space pins and | |
112 | * backing storage pins at the buffer object level) result in the shrinker code | |
113 | * having to skip the object. | |
114 | * | |
115 | * Returns: | |
116 | * The number of pages of backing storage actually released. | |
117 | */ | |
118 | unsigned long | |
119 | i915_gem_shrink(struct drm_i915_private *dev_priv, | |
120 | unsigned long target, unsigned flags) | |
121 | { | |
122 | const struct { | |
123 | struct list_head *list; | |
124 | unsigned int bit; | |
125 | } phases[] = { | |
126 | { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, | |
127 | { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, | |
128 | { NULL, 0 }, | |
129 | }, *phase; | |
130 | unsigned long count = 0; | |
131 | ||
132 | trace_i915_gem_shrink(dev_priv, target, flags); | |
133 | i915_gem_retire_requests(dev_priv); | |
134 | ||
135 | /* | |
136 | * Unbinding of objects will require HW access; Let us not wake the | |
137 | * device just to recover a little memory. If absolutely necessary, | |
138 | * we will force the wake during oom-notifier. | |
139 | */ | |
140 | if ((flags & I915_SHRINK_BOUND) && | |
141 | !intel_runtime_pm_get_if_in_use(dev_priv)) | |
142 | flags &= ~I915_SHRINK_BOUND; | |
143 | ||
144 | /* | |
145 | * As we may completely rewrite the (un)bound list whilst unbinding | |
146 | * (due to retiring requests) we have to strictly process only | |
147 | * one element of the list at the time, and recheck the list | |
148 | * on every iteration. | |
149 | * | |
150 | * In particular, we must hold a reference whilst removing the | |
151 | * object as we may end up waiting for and/or retiring the objects. | |
152 | * This might release the final reference (held by the active list) | |
153 | * and result in the object being freed from under us. This is | |
154 | * similar to the precautions the eviction code must take whilst | |
155 | * removing objects. | |
156 | * | |
157 | * Also note that although these lists do not hold a reference to | |
158 | * the object we can safely grab one here: The final object | |
159 | * unreferencing and the bound_list are both protected by the | |
160 | * dev->struct_mutex and so we won't ever be able to observe an | |
161 | * object on the bound_list with a reference count equals 0. | |
162 | */ | |
163 | for (phase = phases; phase->list; phase++) { | |
164 | struct list_head still_in_list; | |
165 | struct drm_i915_gem_object *obj; | |
166 | ||
167 | if ((flags & phase->bit) == 0) | |
168 | continue; | |
169 | ||
170 | INIT_LIST_HEAD(&still_in_list); | |
171 | while (count < target && | |
172 | (obj = list_first_entry_or_null(phase->list, | |
173 | typeof(*obj), | |
174 | global_list))) { | |
175 | list_move_tail(&obj->global_list, &still_in_list); | |
176 | ||
177 | if (flags & I915_SHRINK_PURGEABLE && | |
178 | obj->madv != I915_MADV_DONTNEED) | |
179 | continue; | |
180 | ||
181 | if (flags & I915_SHRINK_VMAPS && | |
182 | !is_vmalloc_addr(obj->mapping)) | |
183 | continue; | |
184 | ||
185 | if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) | |
186 | continue; | |
187 | ||
188 | if (!can_release_pages(obj)) | |
189 | continue; | |
190 | ||
191 | i915_gem_object_get(obj); | |
192 | ||
193 | /* For the unbound phase, this should be a no-op! */ | |
194 | i915_gem_object_unbind(obj); | |
195 | if (i915_gem_object_put_pages(obj) == 0) | |
196 | count += obj->base.size >> PAGE_SHIFT; | |
197 | ||
198 | i915_gem_object_put(obj); | |
199 | } | |
200 | list_splice(&still_in_list, phase->list); | |
201 | } | |
202 | ||
203 | if (flags & I915_SHRINK_BOUND) | |
204 | intel_runtime_pm_put(dev_priv); | |
205 | ||
206 | i915_gem_retire_requests(dev_priv); | |
207 | ||
208 | return count; | |
209 | } | |
210 | ||
211 | /** | |
212 | * i915_gem_shrink_all - Shrink buffer object caches completely | |
213 | * @dev_priv: i915 device | |
214 | * | |
215 | * This is a simple wraper around i915_gem_shrink() to aggressively shrink all | |
216 | * caches completely. It also first waits for and retires all outstanding | |
217 | * requests to also be able to release backing storage for active objects. | |
218 | * | |
219 | * This should only be used in code to intentionally quiescent the gpu or as a | |
220 | * last-ditch effort when memory seems to have run out. | |
221 | * | |
222 | * Returns: | |
223 | * The number of pages of backing storage actually released. | |
224 | */ | |
225 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |
226 | { | |
227 | return i915_gem_shrink(dev_priv, -1UL, | |
228 | I915_SHRINK_BOUND | | |
229 | I915_SHRINK_UNBOUND | | |
230 | I915_SHRINK_ACTIVE); | |
231 | } | |
232 | ||
233 | static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) | |
234 | { | |
235 | if (!mutex_trylock(&dev->struct_mutex)) { | |
236 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) | |
237 | return false; | |
238 | ||
239 | if (to_i915(dev)->mm.shrinker_no_lock_stealing) | |
240 | return false; | |
241 | ||
242 | *unlock = false; | |
243 | } else | |
244 | *unlock = true; | |
245 | ||
246 | return true; | |
247 | } | |
248 | ||
249 | static unsigned long | |
250 | i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) | |
251 | { | |
252 | struct drm_i915_private *dev_priv = | |
253 | container_of(shrinker, struct drm_i915_private, mm.shrinker); | |
254 | struct drm_device *dev = &dev_priv->drm; | |
255 | struct drm_i915_gem_object *obj; | |
256 | unsigned long count; | |
257 | bool unlock; | |
258 | ||
259 | if (!i915_gem_shrinker_lock(dev, &unlock)) | |
260 | return 0; | |
261 | ||
262 | i915_gem_retire_requests(dev_priv); | |
263 | ||
264 | count = 0; | |
265 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) | |
266 | if (can_release_pages(obj)) | |
267 | count += obj->base.size >> PAGE_SHIFT; | |
268 | ||
269 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | |
270 | if (!obj->active && can_release_pages(obj)) | |
271 | count += obj->base.size >> PAGE_SHIFT; | |
272 | } | |
273 | ||
274 | if (unlock) | |
275 | mutex_unlock(&dev->struct_mutex); | |
276 | ||
277 | return count; | |
278 | } | |
279 | ||
280 | static unsigned long | |
281 | i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) | |
282 | { | |
283 | struct drm_i915_private *dev_priv = | |
284 | container_of(shrinker, struct drm_i915_private, mm.shrinker); | |
285 | struct drm_device *dev = &dev_priv->drm; | |
286 | unsigned long freed; | |
287 | bool unlock; | |
288 | ||
289 | if (!i915_gem_shrinker_lock(dev, &unlock)) | |
290 | return SHRINK_STOP; | |
291 | ||
292 | freed = i915_gem_shrink(dev_priv, | |
293 | sc->nr_to_scan, | |
294 | I915_SHRINK_BOUND | | |
295 | I915_SHRINK_UNBOUND | | |
296 | I915_SHRINK_PURGEABLE); | |
297 | if (freed < sc->nr_to_scan) | |
298 | freed += i915_gem_shrink(dev_priv, | |
299 | sc->nr_to_scan - freed, | |
300 | I915_SHRINK_BOUND | | |
301 | I915_SHRINK_UNBOUND); | |
302 | if (unlock) | |
303 | mutex_unlock(&dev->struct_mutex); | |
304 | ||
305 | return freed; | |
306 | } | |
307 | ||
308 | struct shrinker_lock_uninterruptible { | |
309 | bool was_interruptible; | |
310 | bool unlock; | |
311 | }; | |
312 | ||
313 | static bool | |
314 | i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, | |
315 | struct shrinker_lock_uninterruptible *slu, | |
316 | int timeout_ms) | |
317 | { | |
318 | unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; | |
319 | ||
320 | while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) { | |
321 | schedule_timeout_killable(1); | |
322 | if (fatal_signal_pending(current)) | |
323 | return false; | |
324 | if (--timeout == 0) { | |
325 | pr_err("Unable to lock GPU to purge memory.\n"); | |
326 | return false; | |
327 | } | |
328 | } | |
329 | ||
330 | slu->was_interruptible = dev_priv->mm.interruptible; | |
331 | dev_priv->mm.interruptible = false; | |
332 | return true; | |
333 | } | |
334 | ||
335 | static void | |
336 | i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, | |
337 | struct shrinker_lock_uninterruptible *slu) | |
338 | { | |
339 | dev_priv->mm.interruptible = slu->was_interruptible; | |
340 | if (slu->unlock) | |
341 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
342 | } | |
343 | ||
344 | static int | |
345 | i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |
346 | { | |
347 | struct drm_i915_private *dev_priv = | |
348 | container_of(nb, struct drm_i915_private, mm.oom_notifier); | |
349 | struct shrinker_lock_uninterruptible slu; | |
350 | struct drm_i915_gem_object *obj; | |
351 | unsigned long unevictable, bound, unbound, freed_pages; | |
352 | ||
353 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) | |
354 | return NOTIFY_DONE; | |
355 | ||
356 | intel_runtime_pm_get(dev_priv); | |
357 | freed_pages = i915_gem_shrink_all(dev_priv); | |
358 | intel_runtime_pm_put(dev_priv); | |
359 | ||
360 | /* Because we may be allocating inside our own driver, we cannot | |
361 | * assert that there are no objects with pinned pages that are not | |
362 | * being pointed to by hardware. | |
363 | */ | |
364 | unbound = bound = unevictable = 0; | |
365 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { | |
366 | if (!can_release_pages(obj)) | |
367 | unevictable += obj->base.size >> PAGE_SHIFT; | |
368 | else | |
369 | unbound += obj->base.size >> PAGE_SHIFT; | |
370 | } | |
371 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | |
372 | if (!can_release_pages(obj)) | |
373 | unevictable += obj->base.size >> PAGE_SHIFT; | |
374 | else | |
375 | bound += obj->base.size >> PAGE_SHIFT; | |
376 | } | |
377 | ||
378 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); | |
379 | ||
380 | if (freed_pages || unbound || bound) | |
381 | pr_info("Purging GPU memory, %lu pages freed, " | |
382 | "%lu pages still pinned.\n", | |
383 | freed_pages, unevictable); | |
384 | if (unbound || bound) | |
385 | pr_err("%lu and %lu pages still available in the " | |
386 | "bound and unbound GPU page lists.\n", | |
387 | bound, unbound); | |
388 | ||
389 | *(unsigned long *)ptr += freed_pages; | |
390 | return NOTIFY_DONE; | |
391 | } | |
392 | ||
393 | static int | |
394 | i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) | |
395 | { | |
396 | struct drm_i915_private *dev_priv = | |
397 | container_of(nb, struct drm_i915_private, mm.vmap_notifier); | |
398 | struct shrinker_lock_uninterruptible slu; | |
399 | struct i915_vma *vma, *next; | |
400 | unsigned long freed_pages = 0; | |
401 | int ret; | |
402 | ||
403 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) | |
404 | return NOTIFY_DONE; | |
405 | ||
406 | /* Force everything onto the inactive lists */ | |
407 | ret = i915_gem_wait_for_idle(dev_priv); | |
408 | if (ret) | |
409 | goto out; | |
410 | ||
411 | intel_runtime_pm_get(dev_priv); | |
412 | freed_pages += i915_gem_shrink(dev_priv, -1UL, | |
413 | I915_SHRINK_BOUND | | |
414 | I915_SHRINK_UNBOUND | | |
415 | I915_SHRINK_ACTIVE | | |
416 | I915_SHRINK_VMAPS); | |
417 | intel_runtime_pm_put(dev_priv); | |
418 | ||
419 | /* We also want to clear any cached iomaps as they wrap vmap */ | |
420 | list_for_each_entry_safe(vma, next, | |
421 | &dev_priv->ggtt.base.inactive_list, vm_link) { | |
422 | unsigned long count = vma->node.size >> PAGE_SHIFT; | |
423 | if (vma->iomap && i915_vma_unbind(vma) == 0) | |
424 | freed_pages += count; | |
425 | } | |
426 | ||
427 | out: | |
428 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); | |
429 | ||
430 | *(unsigned long *)ptr += freed_pages; | |
431 | return NOTIFY_DONE; | |
432 | } | |
433 | ||
434 | /** | |
435 | * i915_gem_shrinker_init - Initialize i915 shrinker | |
436 | * @dev_priv: i915 device | |
437 | * | |
438 | * This function registers and sets up the i915 shrinker and OOM handler. | |
439 | */ | |
440 | void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) | |
441 | { | |
442 | dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; | |
443 | dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; | |
444 | dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; | |
445 | WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); | |
446 | ||
447 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; | |
448 | WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); | |
449 | ||
450 | dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; | |
451 | WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); | |
452 | } | |
453 | ||
454 | /** | |
455 | * i915_gem_shrinker_cleanup - Clean up i915 shrinker | |
456 | * @dev_priv: i915 device | |
457 | * | |
458 | * This function unregisters the i915 shrinker and OOM handler. | |
459 | */ | |
460 | void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) | |
461 | { | |
462 | WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); | |
463 | WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); | |
464 | unregister_shrinker(&dev_priv->mm.shrinker); | |
465 | } |