]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/i915: Extract reserving space in the GTT to a helper
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 11 Jan 2017 11:23:11 +0000 (11:23 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 11 Jan 2017 12:28:13 +0000 (12:28 +0000)
Extract drm_mm_reserve_node + calling i915_gem_evict_for_node into its
own routine so that it can be shared rather than duplicated.

v2: Kerneldoc

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: igvt-g-dev@lists.01.org
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170111112312.31493-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/i915_vma.c

index 2827dab61edfe04bb23fb4185017376455cd706a..e9b4ece689d018243023390e3503895ed0df308a 100644 (file)
@@ -3469,8 +3469,9 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
                                          unsigned cache_level,
                                          u64 start, u64 end,
                                          unsigned flags);
-int __must_check i915_gem_evict_for_vma(struct i915_vma *vma,
-                                       unsigned int flags);
+int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+                                        struct drm_mm_node *node,
+                                        unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 
 /* belongs in i915_gem_gtt.h */
index 6a5415e31acf18b7570da5177944fcfb9acb9d58..50b4645bf627a9fdca37efab9bfde1e55f892286 100644 (file)
@@ -231,7 +231,8 @@ found:
 
 /**
  * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
- * @target: address space and range to evict for
+ * @vm: address space to evict from
+ * @target: range (and color) to evict for
  * @flags: additional flags to control the eviction algorithm
  *
  * This function will try to evict vmas that overlap the target node.
@@ -239,18 +240,20 @@ found:
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  */
-int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
+int i915_gem_evict_for_node(struct i915_address_space *vm,
+                           struct drm_mm_node *target,
+                           unsigned int flags)
 {
        LIST_HEAD(eviction_list);
        struct drm_mm_node *node;
-       u64 start = target->node.start;
-       u64 end = start + target->node.size;
+       u64 start = target->start;
+       u64 end = start + target->size;
        struct i915_vma *vma, *next;
        bool check_color;
        int ret = 0;
 
-       lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
-       trace_i915_gem_evict_vma(target, flags);
+       lockdep_assert_held(&vm->i915->drm.struct_mutex);
+       trace_i915_gem_evict_node(vm, target, flags);
 
        /* Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
@@ -258,18 +261,18 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
         * retiring.
         */
        if (!(flags & PIN_NONBLOCK))
-               i915_gem_retire_requests(target->vm->i915);
+               i915_gem_retire_requests(vm->i915);
 
-       check_color = target->vm->mm.color_adjust;
+       check_color = vm->mm.color_adjust;
        if (check_color) {
                /* Expand search to cover neighbouring guard pages (or lack!) */
-               if (start > target->vm->start)
+               if (start > vm->start)
                        start -= I915_GTT_PAGE_SIZE;
-               if (end < target->vm->start + target->vm->total)
+               if (end < vm->start + vm->total)
                        end += I915_GTT_PAGE_SIZE;
        }
 
-       drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
+       drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
                /* If we find any non-objects (!vma), we cannot evict them */
                if (node->color == I915_COLOR_UNEVICTABLE) {
                        ret = -ENOSPC;
@@ -285,12 +288,12 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
                 * those as well to make room for our guard pages.
                 */
                if (check_color) {
-                       if (vma->node.start + vma->node.size == target->node.start) {
-                               if (vma->node.color == target->node.color)
+                       if (vma->node.start + vma->node.size == node->start) {
+                               if (vma->node.color == node->color)
                                        continue;
                        }
-                       if (vma->node.start == target->node.start + target->node.size) {
-                               if (vma->node.color == target->node.color)
+                       if (vma->node.start == node->start + node->size) {
+                               if (vma->node.color == node->color)
                                        continue;
                        }
                }
index f0f17b2b72157332d7d0ce479c03238ac9330ccb..09a5eca494b35a822d71ba20bd74982a8a3ddd3e 100644 (file)
@@ -3554,6 +3554,58 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
        return ret;
 }
 
+/**
+ * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
+ * @vm - the &struct i915_address_space
+ * @node - the &struct drm_mm_node (typically i915_vma.mode)
+ * @size - how much space to allocate inside the GTT,
+ *         must be #I915_GTT_PAGE_SIZE aligned
+ * @offset - where to insert inside the GTT,
+ *           must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
+ *           (@offset + @size) must fit within the address space
+ * @color - color to apply to node, if this node is not from a VMA,
+ *          color must be #I915_COLOR_UNEVICTABLE
+ * @flags - control search and eviction behaviour
+ *
+ * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
+ * the address space (using @size and @color). If the @node does not fit, it
+ * tries to evict any overlapping nodes from the GTT, including any
+ * neighbouring nodes if the colors do not match (to ensure guard pages between
+ * differing domains). See i915_gem_evict_for_node() for the gory details
+ * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
+ * evicting active overlapping objects, and any overlapping node that is pinned
+ * or marked as unevictable will also result in failure.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+                        struct drm_mm_node *node,
+                        u64 size, u64 offset, unsigned long color,
+                        unsigned int flags)
+{
+       int err;
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
+       GEM_BUG_ON(range_overflows(offset, size, vm->total));
+
+       node->size = size;
+       node->start = offset;
+       node->color = color;
+
+       err = drm_mm_reserve_node(&vm->mm, node);
+       if (err != -ENOSPC)
+               return err;
+
+       err = i915_gem_evict_for_node(vm, node, flags);
+       if (err == 0)
+               err = drm_mm_reserve_node(&vm->mm, node);
+
+       return err;
+}
+
 /**
  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
  * @vm - the &struct i915_address_space
index 79198352a491ca549b072ffb468ff29e622fd4bd..3e031a057f78cce381e5563fec63b5d1c10a9d7c 100644 (file)
@@ -532,6 +532,11 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
                               struct sg_table *pages);
 
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+                        struct drm_mm_node *node,
+                        u64 size, u64 offset, unsigned long color,
+                        unsigned int flags);
+
 int i915_gem_gtt_insert(struct i915_address_space *vm,
                        struct drm_mm_node *node,
                        u64 size, u64 alignment, unsigned long color,
index e5be8e04bf3b25e36dcd6fc0711f18bccbad9965..52dbb9bab2687cfe2c94407555b9c7f9629e874b 100644 (file)
@@ -694,10 +694,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
-       vma->node.start = gtt_offset;
-       vma->node.size = size;
-
-       ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+       ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+                                  size, gtt_offset, obj->cache_level,
+                                  0);
        if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
                goto err_pages;
index 18ae37c411fd4226b13bb2192323543d77b76c65..4461df5a94feac71e65e8c4d1cc6d326492019a6 100644 (file)
@@ -450,9 +450,9 @@ TRACE_EVENT(i915_gem_evict_vm,
            TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
 );
 
-TRACE_EVENT(i915_gem_evict_vma,
-           TP_PROTO(struct i915_vma *vma, unsigned int flags),
-           TP_ARGS(vma, flags),
+TRACE_EVENT(i915_gem_evict_node,
+           TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
+           TP_ARGS(vm, node, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -464,11 +464,11 @@ TRACE_EVENT(i915_gem_evict_vma,
                            ),
 
            TP_fast_assign(
-                          __entry->dev = vma->vm->i915->drm.primary->index;
-                          __entry->vm = vma->vm;
-                          __entry->start = vma->node.start;
-                          __entry->size = vma->node.size;
-                          __entry->color = vma->node.color;
+                          __entry->dev = vm->i915->drm.primary->index;
+                          __entry->vm = vm;
+                          __entry->start = node->start;
+                          __entry->size = node->size;
+                          __entry->color = node->color;
                           __entry->flags = flags;
                          ),
 
index dae340cfc6c76f617795e248b74550fcc54bb0ae..f1ad4fbb5ba7226d36b4667cf4e8543d3b161709 100644 (file)
@@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
        memset(&bl_info, 0, sizeof(bl_info));
 }
 
-static int vgt_balloon_space(struct drm_mm *mm,
+static int vgt_balloon_space(struct i915_ggtt *ggtt,
                             struct drm_mm_node *node,
                             unsigned long start, unsigned long end)
 {
        unsigned long size = end - start;
 
-       if (start == end)
+       if (start <= end)
                return -EINVAL;
 
        DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
                 start, end, size / 1024);
-
-       node->start = start;
-       node->size = size;
-
-       return drm_mm_reserve_node(mm, node);
+       return i915_gem_gtt_reserve(&ggtt->base, node,
+                                   size, start, I915_COLOR_UNEVICTABLE,
+                                   0);
 }
 
 /**
@@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 
        /* Unmappable graphic memory ballooning */
        if (unmappable_base > ggtt->mappable_end) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[2],
-                                       ggtt->mappable_end,
-                                       unmappable_base);
+               ret = vgt_balloon_space(ggtt, &bl_info.space[2],
+                                       ggtt->mappable_end, unmappable_base);
 
                if (ret)
                        goto err;
@@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
         * because it is reserved to the guard page.
         */
        if (unmappable_end < ggtt_end - PAGE_SIZE) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[3],
-                                       unmappable_end,
-                                       ggtt_end - PAGE_SIZE);
+               ret = vgt_balloon_space(ggtt, &bl_info.space[3],
+                                       unmappable_end, ggtt_end - PAGE_SIZE);
                if (ret)
                        goto err;
        }
 
        /* Mappable graphic memory ballooning */
        if (mappable_base > ggtt->base.start) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[0],
+               ret = vgt_balloon_space(ggtt, &bl_info.space[0],
                                        ggtt->base.start, mappable_base);
 
                if (ret)
@@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
        }
 
        if (mappable_end < ggtt->mappable_end) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[1],
-                                       mappable_end,
-                                       ggtt->mappable_end);
+               ret = vgt_balloon_space(ggtt, &bl_info.space[1],
+                                       mappable_end, ggtt->mappable_end);
 
                if (ret)
                        goto err;
index df3750d4c907cb4f911e49c2dbc3cf77fdea17b6..b74eeb73ae413cc386616d6cb976aef5a5c8de51 100644 (file)
@@ -419,17 +419,11 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                        goto err_unpin;
                }
 
-               vma->node.start = offset;
-               vma->node.size = size;
-               vma->node.color = obj->cache_level;
-               ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
-               if (ret) {
-                       ret = i915_gem_evict_for_vma(vma, flags);
-                       if (ret == 0)
-                               ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
-                       if (ret)
-                               goto err_unpin;
-               }
+               ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+                                          size, offset, obj->cache_level,
+                                          flags);
+               if (ret)
+                       goto err_unpin;
        } else {
                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
                                          size, alignment, obj->cache_level,