]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/i915_gem_shrinker.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_gem_shrinker.c
1 /*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
32 #include <drm/drmP.h>
33 #include <drm/i915_drm.h>
34
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37
38 static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
39 {
40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
41 case MUTEX_TRYLOCK_RECURSIVE:
42 *unlock = false;
43 return true;
44
45 case MUTEX_TRYLOCK_FAILED:
46 do {
47 cpu_relax();
48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
49 case MUTEX_TRYLOCK_SUCCESS:
50 *unlock = true;
51 return true;
52 }
53 } while (!need_resched());
54
55 return false;
56 }
57
58 BUG();
59 }
60
61 static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
62 {
63 if (!unlock)
64 return;
65
66 mutex_unlock(&dev_priv->drm.struct_mutex);
67 }
68
69 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
70 {
71 struct i915_vma *vma;
72
73 list_for_each_entry(vma, &obj->vma_list, obj_link) {
74 /* Only GGTT vma may be permanently pinned, and are always
75 * at the start of the list. We can stop hunting as soon
76 * as we see a ppGTT vma.
77 */
78 if (!i915_vma_is_ggtt(vma))
79 break;
80
81 if (i915_vma_is_pinned(vma))
82 return true;
83 }
84
85 return false;
86 }
87
88 static bool swap_available(void)
89 {
90 return get_nr_swap_pages() > 0;
91 }
92
93 static bool can_release_pages(struct drm_i915_gem_object *obj)
94 {
95 if (!obj->mm.pages)
96 return false;
97
98 /* Consider only shrinkable ojects. */
99 if (!i915_gem_object_is_shrinkable(obj))
100 return false;
101
102 /* Only report true if by unbinding the object and putting its pages
103 * we can actually make forward progress towards freeing physical
104 * pages.
105 *
106 * If the pages are pinned for any other reason than being bound
107 * to the GPU, simply unbinding from the GPU is not going to succeed
108 * in releasing our pin count on the pages themselves.
109 */
110 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
111 return false;
112
113 if (any_vma_pinned(obj))
114 return false;
115
116 /* We can only return physical pages to the system if we can either
117 * discard the contents (because the user has marked them as being
118 * purgeable) or if we can move their contents out to swap.
119 */
120 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
121 }
122
123 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
124 {
125 if (i915_gem_object_unbind(obj) == 0)
126 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
127 return !READ_ONCE(obj->mm.pages);
128 }
129
130 /**
131 * i915_gem_shrink - Shrink buffer object caches
132 * @dev_priv: i915 device
133 * @target: amount of memory to make available, in pages
134 * @flags: control flags for selecting cache types
135 *
136 * This function is the main interface to the shrinker. It will try to release
137 * up to @target pages of main memory backing storage from buffer objects.
138 * Selection of the specific caches can be done with @flags. This is e.g. useful
139 * when purgeable objects should be removed from caches preferentially.
140 *
141 * Note that it's not guaranteed that released amount is actually available as
142 * free system memory - the pages might still be in-used to due to other reasons
143 * (like cpu mmaps) or the mm core has reused them before we could grab them.
144 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
145 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
146 *
147 * Also note that any kind of pinning (both per-vma address space pins and
148 * backing storage pins at the buffer object level) result in the shrinker code
149 * having to skip the object.
150 *
151 * Returns:
152 * The number of pages of backing storage actually released.
153 */
154 unsigned long
155 i915_gem_shrink(struct drm_i915_private *dev_priv,
156 unsigned long target, unsigned flags)
157 {
158 const struct {
159 struct list_head *list;
160 unsigned int bit;
161 } phases[] = {
162 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
163 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
164 { NULL, 0 },
165 }, *phase;
166 unsigned long count = 0;
167 bool unlock;
168
169 if (!shrinker_lock(dev_priv, &unlock))
170 return 0;
171
172 trace_i915_gem_shrink(dev_priv, target, flags);
173 i915_gem_retire_requests(dev_priv);
174
175 /*
176 * Unbinding of objects will require HW access; Let us not wake the
177 * device just to recover a little memory. If absolutely necessary,
178 * we will force the wake during oom-notifier.
179 */
180 if ((flags & I915_SHRINK_BOUND) &&
181 !intel_runtime_pm_get_if_in_use(dev_priv))
182 flags &= ~I915_SHRINK_BOUND;
183
184 /*
185 * As we may completely rewrite the (un)bound list whilst unbinding
186 * (due to retiring requests) we have to strictly process only
187 * one element of the list at the time, and recheck the list
188 * on every iteration.
189 *
190 * In particular, we must hold a reference whilst removing the
191 * object as we may end up waiting for and/or retiring the objects.
192 * This might release the final reference (held by the active list)
193 * and result in the object being freed from under us. This is
194 * similar to the precautions the eviction code must take whilst
195 * removing objects.
196 *
197 * Also note that although these lists do not hold a reference to
198 * the object we can safely grab one here: The final object
199 * unreferencing and the bound_list are both protected by the
200 * dev->struct_mutex and so we won't ever be able to observe an
201 * object on the bound_list with a reference count equals 0.
202 */
203 for (phase = phases; phase->list; phase++) {
204 struct list_head still_in_list;
205 struct drm_i915_gem_object *obj;
206
207 if ((flags & phase->bit) == 0)
208 continue;
209
210 INIT_LIST_HEAD(&still_in_list);
211 while (count < target &&
212 (obj = list_first_entry_or_null(phase->list,
213 typeof(*obj),
214 global_link))) {
215 list_move_tail(&obj->global_link, &still_in_list);
216 if (!obj->mm.pages) {
217 list_del_init(&obj->global_link);
218 continue;
219 }
220
221 if (flags & I915_SHRINK_PURGEABLE &&
222 obj->mm.madv != I915_MADV_DONTNEED)
223 continue;
224
225 if (flags & I915_SHRINK_VMAPS &&
226 !is_vmalloc_addr(obj->mm.mapping))
227 continue;
228
229 if (!(flags & I915_SHRINK_ACTIVE) &&
230 (i915_gem_object_is_active(obj) ||
231 i915_gem_object_is_framebuffer(obj)))
232 continue;
233
234 if (!can_release_pages(obj))
235 continue;
236
237 if (unsafe_drop_pages(obj)) {
238 /* May arrive from get_pages on another bo */
239 mutex_lock_nested(&obj->mm.lock,
240 I915_MM_SHRINKER);
241 if (!obj->mm.pages) {
242 __i915_gem_object_invalidate(obj);
243 list_del_init(&obj->global_link);
244 count += obj->base.size >> PAGE_SHIFT;
245 }
246 mutex_unlock(&obj->mm.lock);
247 }
248 }
249 list_splice_tail(&still_in_list, phase->list);
250 }
251
252 if (flags & I915_SHRINK_BOUND)
253 intel_runtime_pm_put(dev_priv);
254
255 i915_gem_retire_requests(dev_priv);
256
257 shrinker_unlock(dev_priv, unlock);
258
259 return count;
260 }
261
262 /**
263 * i915_gem_shrink_all - Shrink buffer object caches completely
264 * @dev_priv: i915 device
265 *
266 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
267 * caches completely. It also first waits for and retires all outstanding
268 * requests to also be able to release backing storage for active objects.
269 *
270 * This should only be used in code to intentionally quiescent the gpu or as a
271 * last-ditch effort when memory seems to have run out.
272 *
273 * Returns:
274 * The number of pages of backing storage actually released.
275 */
276 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
277 {
278 unsigned long freed;
279
280 intel_runtime_pm_get(dev_priv);
281 freed = i915_gem_shrink(dev_priv, -1UL,
282 I915_SHRINK_BOUND |
283 I915_SHRINK_UNBOUND |
284 I915_SHRINK_ACTIVE);
285 intel_runtime_pm_put(dev_priv);
286
287 return freed;
288 }
289
290 static unsigned long
291 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
292 {
293 struct drm_i915_private *dev_priv =
294 container_of(shrinker, struct drm_i915_private, mm.shrinker);
295 struct drm_i915_gem_object *obj;
296 unsigned long count;
297 bool unlock;
298
299 if (!shrinker_lock(dev_priv, &unlock))
300 return 0;
301
302 i915_gem_retire_requests(dev_priv);
303
304 count = 0;
305 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
306 if (can_release_pages(obj))
307 count += obj->base.size >> PAGE_SHIFT;
308
309 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
310 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
311 count += obj->base.size >> PAGE_SHIFT;
312 }
313
314 shrinker_unlock(dev_priv, unlock);
315
316 return count;
317 }
318
319 static unsigned long
320 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
321 {
322 struct drm_i915_private *dev_priv =
323 container_of(shrinker, struct drm_i915_private, mm.shrinker);
324 unsigned long freed;
325 bool unlock;
326
327 if (!shrinker_lock(dev_priv, &unlock))
328 return SHRINK_STOP;
329
330 freed = i915_gem_shrink(dev_priv,
331 sc->nr_to_scan,
332 I915_SHRINK_BOUND |
333 I915_SHRINK_UNBOUND |
334 I915_SHRINK_PURGEABLE);
335 if (freed < sc->nr_to_scan)
336 freed += i915_gem_shrink(dev_priv,
337 sc->nr_to_scan - freed,
338 I915_SHRINK_BOUND |
339 I915_SHRINK_UNBOUND);
340 if (freed < sc->nr_to_scan && current_is_kswapd()) {
341 intel_runtime_pm_get(dev_priv);
342 freed += i915_gem_shrink(dev_priv,
343 sc->nr_to_scan - freed,
344 I915_SHRINK_ACTIVE |
345 I915_SHRINK_BOUND |
346 I915_SHRINK_UNBOUND);
347 intel_runtime_pm_put(dev_priv);
348 }
349
350 shrinker_unlock(dev_priv, unlock);
351
352 return freed;
353 }
354
355 static bool
356 shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
357 int timeout_ms)
358 {
359 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
360
361 do {
362 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
363 shrinker_lock(dev_priv, unlock))
364 break;
365
366 schedule_timeout_killable(1);
367 if (fatal_signal_pending(current))
368 return false;
369
370 if (time_after(jiffies, timeout)) {
371 pr_err("Unable to lock GPU to purge memory.\n");
372 return false;
373 }
374 } while (1);
375
376 return true;
377 }
378
379 static int
380 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
381 {
382 struct drm_i915_private *dev_priv =
383 container_of(nb, struct drm_i915_private, mm.oom_notifier);
384 struct drm_i915_gem_object *obj;
385 unsigned long unevictable, bound, unbound, freed_pages;
386 bool unlock;
387
388 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
389 return NOTIFY_DONE;
390
391 freed_pages = i915_gem_shrink_all(dev_priv);
392
393 /* Because we may be allocating inside our own driver, we cannot
394 * assert that there are no objects with pinned pages that are not
395 * being pointed to by hardware.
396 */
397 unbound = bound = unevictable = 0;
398 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
399 if (!obj->mm.pages)
400 continue;
401
402 if (!can_release_pages(obj))
403 unevictable += obj->base.size >> PAGE_SHIFT;
404 else
405 unbound += obj->base.size >> PAGE_SHIFT;
406 }
407 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
408 if (!obj->mm.pages)
409 continue;
410
411 if (!can_release_pages(obj))
412 unevictable += obj->base.size >> PAGE_SHIFT;
413 else
414 bound += obj->base.size >> PAGE_SHIFT;
415 }
416
417 shrinker_unlock(dev_priv, unlock);
418
419 if (freed_pages || unbound || bound)
420 pr_info("Purging GPU memory, %lu pages freed, "
421 "%lu pages still pinned.\n",
422 freed_pages, unevictable);
423 if (unbound || bound)
424 pr_err("%lu and %lu pages still available in the "
425 "bound and unbound GPU page lists.\n",
426 bound, unbound);
427
428 *(unsigned long *)ptr += freed_pages;
429 return NOTIFY_DONE;
430 }
431
432 static int
433 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
434 {
435 struct drm_i915_private *dev_priv =
436 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
437 struct i915_vma *vma, *next;
438 unsigned long freed_pages = 0;
439 bool unlock;
440 int ret;
441
442 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
443 return NOTIFY_DONE;
444
445 /* Force everything onto the inactive lists */
446 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
447 if (ret)
448 goto out;
449
450 intel_runtime_pm_get(dev_priv);
451 freed_pages += i915_gem_shrink(dev_priv, -1UL,
452 I915_SHRINK_BOUND |
453 I915_SHRINK_UNBOUND |
454 I915_SHRINK_ACTIVE |
455 I915_SHRINK_VMAPS);
456 intel_runtime_pm_put(dev_priv);
457
458 /* We also want to clear any cached iomaps as they wrap vmap */
459 list_for_each_entry_safe(vma, next,
460 &dev_priv->ggtt.base.inactive_list, vm_link) {
461 unsigned long count = vma->node.size >> PAGE_SHIFT;
462 if (vma->iomap && i915_vma_unbind(vma) == 0)
463 freed_pages += count;
464 }
465
466 out:
467 shrinker_unlock(dev_priv, unlock);
468
469 *(unsigned long *)ptr += freed_pages;
470 return NOTIFY_DONE;
471 }
472
473 /**
474 * i915_gem_shrinker_init - Initialize i915 shrinker
475 * @dev_priv: i915 device
476 *
477 * This function registers and sets up the i915 shrinker and OOM handler.
478 */
479 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
480 {
481 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
482 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
483 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
484 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
485
486 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
487 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
488
489 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
490 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
491 }
492
493 /**
494 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
495 * @dev_priv: i915 device
496 *
497 * This function unregisters the i915 shrinker and OOM handler.
498 */
499 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
500 {
501 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
502 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
503 unregister_shrinker(&dev_priv->mm.shrinker);
504 }