]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_shrinker.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_shrinker.c
CommitLineData
be6a0376
DV
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/oom.h>
26#include <linux/shmem_fs.h>
27#include <linux/slab.h>
28#include <linux/swap.h>
29#include <linux/pci.h>
30#include <linux/dma-buf.h>
e87666b5 31#include <linux/vmalloc.h>
be6a0376
DV
32#include <drm/drmP.h>
33#include <drm/i915_drm.h>
34
35#include "i915_drv.h"
36#include "i915_trace.h"
37
9439b371 38static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
be6a0376 39{
9439b371
LT
40 switch (mutex_trylock_recursive(&dev->struct_mutex)) {
41 case MUTEX_TRYLOCK_FAILED:
be6a0376
DV
42 return false;
43
9439b371
LT
44 case MUTEX_TRYLOCK_SUCCESS:
45 *unlock = true;
46 return true;
1233e2db 47
9439b371 48 case MUTEX_TRYLOCK_RECURSIVE:
1233e2db 49 *unlock = false;
9439b371 50 return true;
1233e2db
CW
51 }
52
9439b371 53 BUG();
1233e2db
CW
54}
55
c053b5a5
JL
56static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
57{
58 if (!unlock)
59 return;
60
61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65}
66
15717de2 67static bool any_vma_pinned(struct drm_i915_gem_object *obj)
c1a415e2
CW
68{
69 struct i915_vma *vma;
c1a415e2 70
15717de2 71 list_for_each_entry(vma, &obj->vma_list, obj_link)
3272db53 72 if (i915_vma_is_pinned(vma))
15717de2 73 return true;
c1a415e2 74
15717de2 75 return false;
c1a415e2
CW
76}
77
78static bool swap_available(void)
79{
80 return get_nr_swap_pages() > 0;
81}
82
83static bool can_release_pages(struct drm_i915_gem_object *obj)
84{
1233e2db
CW
85 if (!obj->mm.pages)
86 return false;
87
3599a91c
TU
88 /* Consider only shrinkable ojects. */
89 if (!i915_gem_object_is_shrinkable(obj))
1bec9b0b
CW
90 return false;
91
c1a415e2
CW
92 /* Only report true if by unbinding the object and putting its pages
93 * we can actually make forward progress towards freeing physical
94 * pages.
95 *
96 * If the pages are pinned for any other reason than being bound
97 * to the GPU, simply unbinding from the GPU is not going to succeed
98 * in releasing our pin count on the pages themselves.
99 */
1233e2db 100 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
15717de2
CW
101 return false;
102
103 if (any_vma_pinned(obj))
c1a415e2
CW
104 return false;
105
106 /* We can only return physical pages to the system if we can either
107 * discard the contents (because the user has marked them as being
108 * purgeable) or if we can move their contents out to swap.
109 */
a4f5ea64 110 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
c1a415e2
CW
111}
112
03ac84f1
CW
113static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
114{
115 if (i915_gem_object_unbind(obj) == 0)
548625ee 116 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
1233e2db 117 return !READ_ONCE(obj->mm.pages);
c1a415e2
CW
118}
119
eb0b44ad
DV
120/**
121 * i915_gem_shrink - Shrink buffer object caches
122 * @dev_priv: i915 device
123 * @target: amount of memory to make available, in pages
124 * @flags: control flags for selecting cache types
125 *
126 * This function is the main interface to the shrinker. It will try to release
127 * up to @target pages of main memory backing storage from buffer objects.
128 * Selection of the specific caches can be done with @flags. This is e.g. useful
129 * when purgeable objects should be removed from caches preferentially.
130 *
131 * Note that it's not guaranteed that released amount is actually available as
132 * free system memory - the pages might still be in-used to due to other reasons
133 * (like cpu mmaps) or the mm core has reused them before we could grab them.
134 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
135 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
136 *
137 * Also note that any kind of pinning (both per-vma address space pins and
138 * backing storage pins at the buffer object level) result in the shrinker code
139 * having to skip the object.
140 *
141 * Returns:
142 * The number of pages of backing storage actually released.
143 */
be6a0376
DV
144unsigned long
145i915_gem_shrink(struct drm_i915_private *dev_priv,
14387540 146 unsigned long target, unsigned flags)
be6a0376
DV
147{
148 const struct {
149 struct list_head *list;
150 unsigned int bit;
151 } phases[] = {
152 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
153 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
154 { NULL, 0 },
155 }, *phase;
156 unsigned long count = 0;
1233e2db
CW
157 bool unlock;
158
159 if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
160 return 0;
be6a0376 161
3abafa53 162 trace_i915_gem_shrink(dev_priv, target, flags);
c033666a 163 i915_gem_retire_requests(dev_priv);
3abafa53 164
178a30c9
PP
165 /*
166 * Unbinding of objects will require HW access; Let us not wake the
167 * device just to recover a little memory. If absolutely necessary,
168 * we will force the wake during oom-notifier.
169 */
170 if ((flags & I915_SHRINK_BOUND) &&
171 !intel_runtime_pm_get_if_in_use(dev_priv))
172 flags &= ~I915_SHRINK_BOUND;
173
be6a0376
DV
174 /*
175 * As we may completely rewrite the (un)bound list whilst unbinding
176 * (due to retiring requests) we have to strictly process only
177 * one element of the list at the time, and recheck the list
178 * on every iteration.
179 *
180 * In particular, we must hold a reference whilst removing the
181 * object as we may end up waiting for and/or retiring the objects.
182 * This might release the final reference (held by the active list)
183 * and result in the object being freed from under us. This is
184 * similar to the precautions the eviction code must take whilst
185 * removing objects.
186 *
187 * Also note that although these lists do not hold a reference to
188 * the object we can safely grab one here: The final object
189 * unreferencing and the bound_list are both protected by the
190 * dev->struct_mutex and so we won't ever be able to observe an
191 * object on the bound_list with a reference count equals 0.
192 */
193 for (phase = phases; phase->list; phase++) {
194 struct list_head still_in_list;
2a1d7752 195 struct drm_i915_gem_object *obj;
be6a0376
DV
196
197 if ((flags & phase->bit) == 0)
198 continue;
199
200 INIT_LIST_HEAD(&still_in_list);
2a1d7752
CW
201 while (count < target &&
202 (obj = list_first_entry_or_null(phase->list,
203 typeof(*obj),
56cea323
JL
204 global_link))) {
205 list_move_tail(&obj->global_link, &still_in_list);
fbbd37b3 206 if (!obj->mm.pages) {
56cea323 207 list_del_init(&obj->global_link);
fbbd37b3
CW
208 continue;
209 }
be6a0376
DV
210
211 if (flags & I915_SHRINK_PURGEABLE &&
a4f5ea64 212 obj->mm.madv != I915_MADV_DONTNEED)
be6a0376
DV
213 continue;
214
eae2c43b 215 if (flags & I915_SHRINK_VMAPS &&
a4f5ea64 216 !is_vmalloc_addr(obj->mm.mapping))
eae2c43b
CW
217 continue;
218
45353ce5
CW
219 if (!(flags & I915_SHRINK_ACTIVE) &&
220 (i915_gem_object_is_active(obj) ||
221 obj->framebuffer_references))
5763ff04
CW
222 continue;
223
c1a415e2
CW
224 if (!can_release_pages(obj))
225 continue;
226
1233e2db 227 if (unsafe_drop_pages(obj)) {
7b7a119e
CW
228 /* May arrive from get_pages on another bo */
229 mutex_lock_nested(&obj->mm.lock,
548625ee 230 I915_MM_SHRINKER);
1233e2db
CW
231 if (!obj->mm.pages) {
232 __i915_gem_object_invalidate(obj);
56cea323 233 list_del_init(&obj->global_link);
1233e2db
CW
234 count += obj->base.size >> PAGE_SHIFT;
235 }
236 mutex_unlock(&obj->mm.lock);
237 }
be6a0376 238 }
53597277 239 list_splice_tail(&still_in_list, phase->list);
be6a0376
DV
240 }
241
178a30c9
PP
242 if (flags & I915_SHRINK_BOUND)
243 intel_runtime_pm_put(dev_priv);
244
c033666a 245 i915_gem_retire_requests(dev_priv);
1233e2db 246
c053b5a5 247 i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
c9c0f5ea 248
be6a0376
DV
249 return count;
250}
251
eb0b44ad 252/**
1f2449cd 253 * i915_gem_shrink_all - Shrink buffer object caches completely
eb0b44ad
DV
254 * @dev_priv: i915 device
255 *
256 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
257 * caches completely. It also first waits for and retires all outstanding
258 * requests to also be able to release backing storage for active objects.
259 *
260 * This should only be used in code to intentionally quiescent the gpu or as a
261 * last-ditch effort when memory seems to have run out.
262 *
263 * Returns:
264 * The number of pages of backing storage actually released.
265 */
be6a0376
DV
266unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
267{
0eafec6d
CW
268 unsigned long freed;
269
270 freed = i915_gem_shrink(dev_priv, -1UL,
271 I915_SHRINK_BOUND |
272 I915_SHRINK_UNBOUND |
273 I915_SHRINK_ACTIVE);
3d3d18f0 274 synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
0eafec6d
CW
275
276 return freed;
be6a0376
DV
277}
278
be6a0376
DV
279static unsigned long
280i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
281{
282 struct drm_i915_private *dev_priv =
283 container_of(shrinker, struct drm_i915_private, mm.shrinker);
91c8a326 284 struct drm_device *dev = &dev_priv->drm;
be6a0376
DV
285 struct drm_i915_gem_object *obj;
286 unsigned long count;
287 bool unlock;
288
289 if (!i915_gem_shrinker_lock(dev, &unlock))
290 return 0;
291
bed50aea
CW
292 i915_gem_retire_requests(dev_priv);
293
be6a0376 294 count = 0;
56cea323 295 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
6f0ac204 296 if (can_release_pages(obj))
be6a0376
DV
297 count += obj->base.size >> PAGE_SHIFT;
298
56cea323 299 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
573adb39 300 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
be6a0376
DV
301 count += obj->base.size >> PAGE_SHIFT;
302 }
303
c053b5a5 304 i915_gem_shrinker_unlock(dev, unlock);
be6a0376
DV
305
306 return count;
307}
308
309static unsigned long
310i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
311{
312 struct drm_i915_private *dev_priv =
313 container_of(shrinker, struct drm_i915_private, mm.shrinker);
91c8a326 314 struct drm_device *dev = &dev_priv->drm;
be6a0376
DV
315 unsigned long freed;
316 bool unlock;
317
318 if (!i915_gem_shrinker_lock(dev, &unlock))
319 return SHRINK_STOP;
320
321 freed = i915_gem_shrink(dev_priv,
322 sc->nr_to_scan,
323 I915_SHRINK_BOUND |
324 I915_SHRINK_UNBOUND |
325 I915_SHRINK_PURGEABLE);
326 if (freed < sc->nr_to_scan)
327 freed += i915_gem_shrink(dev_priv,
328 sc->nr_to_scan - freed,
329 I915_SHRINK_BOUND |
330 I915_SHRINK_UNBOUND);
c053b5a5
JL
331
332 i915_gem_shrinker_unlock(dev, unlock);
be6a0376
DV
333
334 return freed;
335}
336
168cf367
CW
337struct shrinker_lock_uninterruptible {
338 bool was_interruptible;
339 bool unlock;
340};
341
342static bool
343i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
344 struct shrinker_lock_uninterruptible *slu,
345 int timeout_ms)
346{
5cba5be6
CW
347 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
348
349 do {
ea746f36 350 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
5cba5be6
CW
351 i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
352 break;
168cf367 353
168cf367
CW
354 schedule_timeout_killable(1);
355 if (fatal_signal_pending(current))
356 return false;
5cba5be6
CW
357
358 if (time_after(jiffies, timeout)) {
168cf367
CW
359 pr_err("Unable to lock GPU to purge memory.\n");
360 return false;
361 }
5cba5be6 362 } while (1);
168cf367
CW
363
364 slu->was_interruptible = dev_priv->mm.interruptible;
365 dev_priv->mm.interruptible = false;
366 return true;
367}
368
369static void
370i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
371 struct shrinker_lock_uninterruptible *slu)
372{
373 dev_priv->mm.interruptible = slu->was_interruptible;
c053b5a5 374 i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
168cf367
CW
375}
376
be6a0376
DV
377static int
378i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
379{
380 struct drm_i915_private *dev_priv =
381 container_of(nb, struct drm_i915_private, mm.oom_notifier);
168cf367 382 struct shrinker_lock_uninterruptible slu;
be6a0376 383 struct drm_i915_gem_object *obj;
1768d455 384 unsigned long unevictable, bound, unbound, freed_pages;
be6a0376 385
168cf367 386 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
be6a0376 387 return NOTIFY_DONE;
be6a0376 388
ea9d9768 389 intel_runtime_pm_get(dev_priv);
be6a0376 390 freed_pages = i915_gem_shrink_all(dev_priv);
ea9d9768 391 intel_runtime_pm_put(dev_priv);
be6a0376 392
be6a0376
DV
393 /* Because we may be allocating inside our own driver, we cannot
394 * assert that there are no objects with pinned pages that are not
395 * being pointed to by hardware.
396 */
1768d455 397 unbound = bound = unevictable = 0;
56cea323 398 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
fbbd37b3
CW
399 if (!obj->mm.pages)
400 continue;
401
1768d455
CW
402 if (!can_release_pages(obj))
403 unevictable += obj->base.size >> PAGE_SHIFT;
be6a0376 404 else
1768d455 405 unbound += obj->base.size >> PAGE_SHIFT;
be6a0376 406 }
56cea323 407 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
fbbd37b3
CW
408 if (!obj->mm.pages)
409 continue;
410
1768d455
CW
411 if (!can_release_pages(obj))
412 unevictable += obj->base.size >> PAGE_SHIFT;
be6a0376 413 else
1768d455 414 bound += obj->base.size >> PAGE_SHIFT;
be6a0376
DV
415 }
416
168cf367 417 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
be6a0376
DV
418
419 if (freed_pages || unbound || bound)
1768d455
CW
420 pr_info("Purging GPU memory, %lu pages freed, "
421 "%lu pages still pinned.\n",
422 freed_pages, unevictable);
be6a0376 423 if (unbound || bound)
1768d455 424 pr_err("%lu and %lu pages still available in the "
be6a0376
DV
425 "bound and unbound GPU page lists.\n",
426 bound, unbound);
427
428 *(unsigned long *)ptr += freed_pages;
429 return NOTIFY_DONE;
430}
431
e87666b5
CW
432static int
433i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
434{
435 struct drm_i915_private *dev_priv =
436 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
168cf367 437 struct shrinker_lock_uninterruptible slu;
8ef8561f
CW
438 struct i915_vma *vma, *next;
439 unsigned long freed_pages = 0;
440 int ret;
e87666b5 441
168cf367 442 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
e87666b5 443 return NOTIFY_DONE;
e87666b5 444
8ef8561f 445 /* Force everything onto the inactive lists */
22dd3bb9 446 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
8ef8561f
CW
447 if (ret)
448 goto out;
449
ea9d9768 450 intel_runtime_pm_get(dev_priv);
8ef8561f
CW
451 freed_pages += i915_gem_shrink(dev_priv, -1UL,
452 I915_SHRINK_BOUND |
453 I915_SHRINK_UNBOUND |
454 I915_SHRINK_ACTIVE |
455 I915_SHRINK_VMAPS);
ea9d9768 456 intel_runtime_pm_put(dev_priv);
8ef8561f
CW
457
458 /* We also want to clear any cached iomaps as they wrap vmap */
459 list_for_each_entry_safe(vma, next,
460 &dev_priv->ggtt.base.inactive_list, vm_link) {
461 unsigned long count = vma->node.size >> PAGE_SHIFT;
462 if (vma->iomap && i915_vma_unbind(vma) == 0)
463 freed_pages += count;
464 }
e87666b5 465
8ef8561f 466out:
168cf367 467 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
e87666b5
CW
468
469 *(unsigned long *)ptr += freed_pages;
470 return NOTIFY_DONE;
471}
472
eb0b44ad
DV
473/**
474 * i915_gem_shrinker_init - Initialize i915 shrinker
475 * @dev_priv: i915 device
476 *
477 * This function registers and sets up the i915 shrinker and OOM handler.
478 */
be6a0376
DV
479void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
480{
481 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
482 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
483 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
a8a40589 484 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
be6a0376
DV
485
486 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
a8a40589 487 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
e87666b5
CW
488
489 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
490 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
a8a40589
ID
491}
492
493/**
494 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
495 * @dev_priv: i915 device
496 *
497 * This function unregisters the i915 shrinker and OOM handler.
498 */
499void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
500{
e87666b5 501 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
a8a40589
ID
502 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
503 unregister_shrinker(&dev_priv->mm.shrinker);
be6a0376 504}