2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool i915_gem_shrinker_lock(struct drm_device
*dev
, bool *unlock
)
40 switch (mutex_trylock_recursive(&dev
->struct_mutex
)) {
41 case MUTEX_TRYLOCK_FAILED
:
44 case MUTEX_TRYLOCK_SUCCESS
:
48 case MUTEX_TRYLOCK_RECURSIVE
:
56 static void i915_gem_shrinker_unlock(struct drm_device
*dev
, bool unlock
)
61 mutex_unlock(&dev
->struct_mutex
);
64 static bool any_vma_pinned(struct drm_i915_gem_object
*obj
)
68 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
)
69 if (i915_vma_is_pinned(vma
))
75 static bool swap_available(void)
77 return get_nr_swap_pages() > 0;
80 static bool can_release_pages(struct drm_i915_gem_object
*obj
)
85 /* Consider only shrinkable ojects. */
86 if (!i915_gem_object_is_shrinkable(obj
))
89 /* Only report true if by unbinding the object and putting its pages
90 * we can actually make forward progress towards freeing physical
93 * If the pages are pinned for any other reason than being bound
94 * to the GPU, simply unbinding from the GPU is not going to succeed
95 * in releasing our pin count on the pages themselves.
97 if (atomic_read(&obj
->mm
.pages_pin_count
) > obj
->bind_count
)
100 if (any_vma_pinned(obj
))
103 /* We can only return physical pages to the system if we can either
104 * discard the contents (because the user has marked them as being
105 * purgeable) or if we can move their contents out to swap.
107 return swap_available() || obj
->mm
.madv
== I915_MADV_DONTNEED
;
110 static bool unsafe_drop_pages(struct drm_i915_gem_object
*obj
)
112 if (i915_gem_object_unbind(obj
) == 0)
113 __i915_gem_object_put_pages(obj
, I915_MM_SHRINKER
);
114 return !READ_ONCE(obj
->mm
.pages
);
118 * i915_gem_shrink - Shrink buffer object caches
119 * @dev_priv: i915 device
120 * @target: amount of memory to make available, in pages
121 * @flags: control flags for selecting cache types
123 * This function is the main interface to the shrinker. It will try to release
124 * up to @target pages of main memory backing storage from buffer objects.
125 * Selection of the specific caches can be done with @flags. This is e.g. useful
126 * when purgeable objects should be removed from caches preferentially.
128 * Note that it's not guaranteed that released amount is actually available as
129 * free system memory - the pages might still be in-used to due to other reasons
130 * (like cpu mmaps) or the mm core has reused them before we could grab them.
131 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
132 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
134 * Also note that any kind of pinning (both per-vma address space pins and
135 * backing storage pins at the buffer object level) result in the shrinker code
136 * having to skip the object.
139 * The number of pages of backing storage actually released.
142 i915_gem_shrink(struct drm_i915_private
*dev_priv
,
143 unsigned long target
, unsigned flags
)
146 struct list_head
*list
;
149 { &dev_priv
->mm
.unbound_list
, I915_SHRINK_UNBOUND
},
150 { &dev_priv
->mm
.bound_list
, I915_SHRINK_BOUND
},
153 unsigned long count
= 0;
156 if (!i915_gem_shrinker_lock(&dev_priv
->drm
, &unlock
))
159 trace_i915_gem_shrink(dev_priv
, target
, flags
);
160 i915_gem_retire_requests(dev_priv
);
163 * Unbinding of objects will require HW access; Let us not wake the
164 * device just to recover a little memory. If absolutely necessary,
165 * we will force the wake during oom-notifier.
167 if ((flags
& I915_SHRINK_BOUND
) &&
168 !intel_runtime_pm_get_if_in_use(dev_priv
))
169 flags
&= ~I915_SHRINK_BOUND
;
172 * As we may completely rewrite the (un)bound list whilst unbinding
173 * (due to retiring requests) we have to strictly process only
174 * one element of the list at the time, and recheck the list
175 * on every iteration.
177 * In particular, we must hold a reference whilst removing the
178 * object as we may end up waiting for and/or retiring the objects.
179 * This might release the final reference (held by the active list)
180 * and result in the object being freed from under us. This is
181 * similar to the precautions the eviction code must take whilst
184 * Also note that although these lists do not hold a reference to
185 * the object we can safely grab one here: The final object
186 * unreferencing and the bound_list are both protected by the
187 * dev->struct_mutex and so we won't ever be able to observe an
188 * object on the bound_list with a reference count equals 0.
190 for (phase
= phases
; phase
->list
; phase
++) {
191 struct list_head still_in_list
;
192 struct drm_i915_gem_object
*obj
;
194 if ((flags
& phase
->bit
) == 0)
197 INIT_LIST_HEAD(&still_in_list
);
198 while (count
< target
&&
199 (obj
= list_first_entry_or_null(phase
->list
,
202 list_move_tail(&obj
->global_link
, &still_in_list
);
203 if (!obj
->mm
.pages
) {
204 list_del_init(&obj
->global_link
);
208 if (flags
& I915_SHRINK_PURGEABLE
&&
209 obj
->mm
.madv
!= I915_MADV_DONTNEED
)
212 if (flags
& I915_SHRINK_VMAPS
&&
213 !is_vmalloc_addr(obj
->mm
.mapping
))
216 if (!(flags
& I915_SHRINK_ACTIVE
) &&
217 (i915_gem_object_is_active(obj
) ||
218 i915_gem_object_is_framebuffer(obj
)))
221 if (!can_release_pages(obj
))
224 if (unsafe_drop_pages(obj
)) {
225 /* May arrive from get_pages on another bo */
226 mutex_lock_nested(&obj
->mm
.lock
,
228 if (!obj
->mm
.pages
) {
229 __i915_gem_object_invalidate(obj
);
230 list_del_init(&obj
->global_link
);
231 count
+= obj
->base
.size
>> PAGE_SHIFT
;
233 mutex_unlock(&obj
->mm
.lock
);
236 list_splice_tail(&still_in_list
, phase
->list
);
239 if (flags
& I915_SHRINK_BOUND
)
240 intel_runtime_pm_put(dev_priv
);
242 i915_gem_retire_requests(dev_priv
);
244 i915_gem_shrinker_unlock(&dev_priv
->drm
, unlock
);
250 * i915_gem_shrink_all - Shrink buffer object caches completely
251 * @dev_priv: i915 device
253 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
254 * caches completely. It also first waits for and retires all outstanding
255 * requests to also be able to release backing storage for active objects.
257 * This should only be used in code to intentionally quiescent the gpu or as a
258 * last-ditch effort when memory seems to have run out.
261 * The number of pages of backing storage actually released.
263 unsigned long i915_gem_shrink_all(struct drm_i915_private
*dev_priv
)
267 intel_runtime_pm_get(dev_priv
);
268 freed
= i915_gem_shrink(dev_priv
, -1UL,
270 I915_SHRINK_UNBOUND
|
272 intel_runtime_pm_put(dev_priv
);
278 i915_gem_shrinker_count(struct shrinker
*shrinker
, struct shrink_control
*sc
)
280 struct drm_i915_private
*dev_priv
=
281 container_of(shrinker
, struct drm_i915_private
, mm
.shrinker
);
282 struct drm_device
*dev
= &dev_priv
->drm
;
283 struct drm_i915_gem_object
*obj
;
287 if (!i915_gem_shrinker_lock(dev
, &unlock
))
290 i915_gem_retire_requests(dev_priv
);
293 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_link
)
294 if (can_release_pages(obj
))
295 count
+= obj
->base
.size
>> PAGE_SHIFT
;
297 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_link
) {
298 if (!i915_gem_object_is_active(obj
) && can_release_pages(obj
))
299 count
+= obj
->base
.size
>> PAGE_SHIFT
;
302 i915_gem_shrinker_unlock(dev
, unlock
);
308 i915_gem_shrinker_scan(struct shrinker
*shrinker
, struct shrink_control
*sc
)
310 struct drm_i915_private
*dev_priv
=
311 container_of(shrinker
, struct drm_i915_private
, mm
.shrinker
);
312 struct drm_device
*dev
= &dev_priv
->drm
;
316 if (!i915_gem_shrinker_lock(dev
, &unlock
))
319 freed
= i915_gem_shrink(dev_priv
,
322 I915_SHRINK_UNBOUND
|
323 I915_SHRINK_PURGEABLE
);
324 if (freed
< sc
->nr_to_scan
)
325 freed
+= i915_gem_shrink(dev_priv
,
326 sc
->nr_to_scan
- freed
,
328 I915_SHRINK_UNBOUND
);
330 i915_gem_shrinker_unlock(dev
, unlock
);
335 struct shrinker_lock_uninterruptible
{
336 bool was_interruptible
;
341 i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private
*dev_priv
,
342 struct shrinker_lock_uninterruptible
*slu
,
345 unsigned long timeout
= jiffies
+ msecs_to_jiffies_timeout(timeout_ms
);
348 if (i915_gem_wait_for_idle(dev_priv
, 0) == 0 &&
349 i915_gem_shrinker_lock(&dev_priv
->drm
, &slu
->unlock
))
352 schedule_timeout_killable(1);
353 if (fatal_signal_pending(current
))
356 if (time_after(jiffies
, timeout
)) {
357 pr_err("Unable to lock GPU to purge memory.\n");
362 slu
->was_interruptible
= dev_priv
->mm
.interruptible
;
363 dev_priv
->mm
.interruptible
= false;
368 i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private
*dev_priv
,
369 struct shrinker_lock_uninterruptible
*slu
)
371 dev_priv
->mm
.interruptible
= slu
->was_interruptible
;
372 i915_gem_shrinker_unlock(&dev_priv
->drm
, slu
->unlock
);
376 i915_gem_shrinker_oom(struct notifier_block
*nb
, unsigned long event
, void *ptr
)
378 struct drm_i915_private
*dev_priv
=
379 container_of(nb
, struct drm_i915_private
, mm
.oom_notifier
);
380 struct shrinker_lock_uninterruptible slu
;
381 struct drm_i915_gem_object
*obj
;
382 unsigned long unevictable
, bound
, unbound
, freed_pages
;
384 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv
, &slu
, 5000))
387 freed_pages
= i915_gem_shrink_all(dev_priv
);
389 /* Because we may be allocating inside our own driver, we cannot
390 * assert that there are no objects with pinned pages that are not
391 * being pointed to by hardware.
393 unbound
= bound
= unevictable
= 0;
394 list_for_each_entry(obj
, &dev_priv
->mm
.unbound_list
, global_link
) {
398 if (!can_release_pages(obj
))
399 unevictable
+= obj
->base
.size
>> PAGE_SHIFT
;
401 unbound
+= obj
->base
.size
>> PAGE_SHIFT
;
403 list_for_each_entry(obj
, &dev_priv
->mm
.bound_list
, global_link
) {
407 if (!can_release_pages(obj
))
408 unevictable
+= obj
->base
.size
>> PAGE_SHIFT
;
410 bound
+= obj
->base
.size
>> PAGE_SHIFT
;
413 i915_gem_shrinker_unlock_uninterruptible(dev_priv
, &slu
);
415 if (freed_pages
|| unbound
|| bound
)
416 pr_info("Purging GPU memory, %lu pages freed, "
417 "%lu pages still pinned.\n",
418 freed_pages
, unevictable
);
419 if (unbound
|| bound
)
420 pr_err("%lu and %lu pages still available in the "
421 "bound and unbound GPU page lists.\n",
424 *(unsigned long *)ptr
+= freed_pages
;
429 i915_gem_shrinker_vmap(struct notifier_block
*nb
, unsigned long event
, void *ptr
)
431 struct drm_i915_private
*dev_priv
=
432 container_of(nb
, struct drm_i915_private
, mm
.vmap_notifier
);
433 struct shrinker_lock_uninterruptible slu
;
434 struct i915_vma
*vma
, *next
;
435 unsigned long freed_pages
= 0;
438 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv
, &slu
, 5000))
441 /* Force everything onto the inactive lists */
442 ret
= i915_gem_wait_for_idle(dev_priv
, I915_WAIT_LOCKED
);
446 intel_runtime_pm_get(dev_priv
);
447 freed_pages
+= i915_gem_shrink(dev_priv
, -1UL,
449 I915_SHRINK_UNBOUND
|
452 intel_runtime_pm_put(dev_priv
);
454 /* We also want to clear any cached iomaps as they wrap vmap */
455 list_for_each_entry_safe(vma
, next
,
456 &dev_priv
->ggtt
.base
.inactive_list
, vm_link
) {
457 unsigned long count
= vma
->node
.size
>> PAGE_SHIFT
;
458 if (vma
->iomap
&& i915_vma_unbind(vma
) == 0)
459 freed_pages
+= count
;
463 i915_gem_shrinker_unlock_uninterruptible(dev_priv
, &slu
);
465 *(unsigned long *)ptr
+= freed_pages
;
470 * i915_gem_shrinker_init - Initialize i915 shrinker
471 * @dev_priv: i915 device
473 * This function registers and sets up the i915 shrinker and OOM handler.
475 void i915_gem_shrinker_init(struct drm_i915_private
*dev_priv
)
477 dev_priv
->mm
.shrinker
.scan_objects
= i915_gem_shrinker_scan
;
478 dev_priv
->mm
.shrinker
.count_objects
= i915_gem_shrinker_count
;
479 dev_priv
->mm
.shrinker
.seeks
= DEFAULT_SEEKS
;
480 WARN_ON(register_shrinker(&dev_priv
->mm
.shrinker
));
482 dev_priv
->mm
.oom_notifier
.notifier_call
= i915_gem_shrinker_oom
;
483 WARN_ON(register_oom_notifier(&dev_priv
->mm
.oom_notifier
));
485 dev_priv
->mm
.vmap_notifier
.notifier_call
= i915_gem_shrinker_vmap
;
486 WARN_ON(register_vmap_purge_notifier(&dev_priv
->mm
.vmap_notifier
));
490 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
491 * @dev_priv: i915 device
493 * This function unregisters the i915 shrinker and OOM handler.
495 void i915_gem_shrinker_cleanup(struct drm_i915_private
*dev_priv
)
497 WARN_ON(unregister_vmap_purge_notifier(&dev_priv
->mm
.vmap_notifier
));
498 WARN_ON(unregister_oom_notifier(&dev_priv
->mm
.oom_notifier
));
499 unregister_shrinker(&dev_priv
->mm
.shrinker
);