]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/gpu/drm/i915/i915_gem_shrinker.c
drm/i915: Move dev_priv->mm.[un]bound_list to its own lock
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_shrinker.c
index 74002b2d1b6f82640c7e1cf54b8791f01a049327..065d026b5092d14f85ac27444dc3e5a2d3fd16cd 100644 (file)
@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
        mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
-static bool any_vma_pinned(struct drm_i915_gem_object *obj)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               /* Only GGTT vma may be permanently pinned, and are always
-                * at the start of the list. We can stop hunting as soon
-                * as we see a ppGTT vma.
-                */
-               if (!i915_vma_is_ggtt(vma))
-                       break;
-
-               if (i915_vma_is_pinned(vma))
-                       return true;
-       }
-
-       return false;
-}
-
 static bool swap_available(void)
 {
        return get_nr_swap_pages() > 0;
@@ -97,9 +78,6 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
-       if (!obj->mm.pages)
-               return false;
-
        /* Consider only shrinkable ojects. */
        if (!i915_gem_object_is_shrinkable(obj))
                return false;
@@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
        if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
                return false;
 
-       if (any_vma_pinned(obj))
+       /* If any vma are "permanently" pinned, it will prevent us from
+        * reclaiming the obj->mm.pages. We only allow scanout objects to claim
+        * a permanent pin, along with a few others like the context objects.
+        * To simplify the scan, and to avoid walking the list of vma under the
+        * object, we just check the count of its permanently pinned.
+        */
+       if (READ_ONCE(obj->pin_global))
                return false;
 
        /* We can only return physical pages to the system if we can either
@@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 {
        if (i915_gem_object_unbind(obj) == 0)
                __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
-       return !READ_ONCE(obj->mm.pages);
+       return !i915_gem_object_has_pages(obj);
 }
 
 /**
@@ -217,15 +201,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        continue;
 
                INIT_LIST_HEAD(&still_in_list);
+
+               /*
+                * We serialize our access to unreferenced objects through
+                * the use of the struct_mutex. While the objects are not
+                * yet freed (due to RCU then a workqueue) we still want
+                * to be able to shrink their pages, so they remain on
+                * the unbound/bound list until actually freed.
+                */
+               spin_lock(&dev_priv->mm.obj_lock);
                while (count < target &&
                       (obj = list_first_entry_or_null(phase->list,
                                                       typeof(*obj),
-                                                      global_link))) {
-                       list_move_tail(&obj->global_link, &still_in_list);
-                       if (!obj->mm.pages) {
-                               list_del_init(&obj->global_link);
-                               continue;
-                       }
+                                                      mm.link))) {
+                       list_move_tail(&obj->mm.link, &still_in_list);
 
                        if (flags & I915_SHRINK_PURGEABLE &&
                            obj->mm.madv != I915_MADV_DONTNEED)
@@ -243,20 +232,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        if (!can_release_pages(obj))
                                continue;
 
+                       spin_unlock(&dev_priv->mm.obj_lock);
+
                        if (unsafe_drop_pages(obj)) {
                                /* May arrive from get_pages on another bo */
                                mutex_lock_nested(&obj->mm.lock,
                                                  I915_MM_SHRINKER);
-                               if (!obj->mm.pages) {
+                               if (!i915_gem_object_has_pages(obj)) {
                                        __i915_gem_object_invalidate(obj);
-                                       list_del_init(&obj->global_link);
                                        count += obj->base.size >> PAGE_SHIFT;
                                }
                                mutex_unlock(&obj->mm.lock);
                                scanned += obj->base.size >> PAGE_SHIFT;
                        }
+
+                       spin_lock(&dev_priv->mm.obj_lock);
                }
                list_splice_tail(&still_in_list, phase->list);
+               spin_unlock(&dev_priv->mm.obj_lock);
        }
 
        if (flags & I915_SHRINK_BOUND)
@@ -305,25 +298,17 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
        struct drm_i915_private *dev_priv =
                container_of(shrinker, struct drm_i915_private, mm.shrinker);
        struct drm_i915_gem_object *obj;
-       unsigned long count;
-       bool unlock;
-
-       if (!shrinker_lock(dev_priv, &unlock))
-               return 0;
-
-       i915_gem_retire_requests(dev_priv);
+       unsigned long count = 0;
 
-       count = 0;
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
+       spin_lock(&dev_priv->mm.obj_lock);
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link)
                if (can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link)
                if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
-       }
-
-       shrinker_unlock(dev_priv, unlock);
+       spin_unlock(&dev_priv->mm.obj_lock);
 
        return count;
 }
@@ -400,10 +385,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                container_of(nb, struct drm_i915_private, mm.oom_notifier);
        struct drm_i915_gem_object *obj;
        unsigned long unevictable, bound, unbound, freed_pages;
-       bool unlock;
-
-       if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
-               return NOTIFY_DONE;
 
        freed_pages = i915_gem_shrink_all(dev_priv);
 
@@ -412,26 +393,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
         * being pointed to by hardware.
         */
        unbound = bound = unevictable = 0;
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
-               if (!obj->mm.pages)
-                       continue;
-
+       spin_lock(&dev_priv->mm.obj_lock);
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
                if (!can_release_pages(obj))
                        unevictable += obj->base.size >> PAGE_SHIFT;
                else
                        unbound += obj->base.size >> PAGE_SHIFT;
        }
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-               if (!obj->mm.pages)
-                       continue;
-
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
                if (!can_release_pages(obj))
                        unevictable += obj->base.size >> PAGE_SHIFT;
                else
                        bound += obj->base.size >> PAGE_SHIFT;
        }
-
-       shrinker_unlock(dev_priv, unlock);
+       spin_unlock(&dev_priv->mm.obj_lock);
 
        if (freed_pages || unbound || bound)
                pr_info("Purging GPU memory, %lu pages freed, "