]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
drm/i915: Track user GTT faulting per-vma
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Oct 2017 08:43:57 +0000 (09:43 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Oct 2017 16:07:29 +0000 (17:07 +0100)
We don't wish to refault the entire object (other vma) when unbinding
one partial vma. To do this track which vma have been faulted into the
user's address space.

v2: Use a local vma_offset to tidy up a multiline unmap_mapping_range().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-3-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h

index 9ec2bcd9a695a0bbd3a16e6afeef9e40b4999318..5b58d2b897c7f8d6ac2229cf00f8a6eb5418e8c4 100644 (file)
@@ -98,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
 
 static char get_global_flag(struct drm_i915_gem_object *obj)
 {
-       return !list_empty(&obj->userfault_link) ? 'g' : ' ';
+       return obj->userfault_count ? 'g' : ' ';
 }
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
index 37eba9da3fcabb011746ddbd9b6981a236402de7..9cb8f85cbaadd8b4cd8f5c7c70b5d7ab392c26e4 100644 (file)
@@ -1914,18 +1914,22 @@ int i915_gem_fault(struct vm_fault *vmf)
        if (ret)
                goto err_unpin;
 
-       /* Mark as being mmapped into userspace for later revocation */
-       assert_rpm_wakelock_held(dev_priv);
-       if (list_empty(&obj->userfault_link))
-               list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
-
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
                               (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
                               &ggtt->mappable);
+       if (ret)
+               goto err_fence;
 
+       /* Mark as being mmapped into userspace for later revocation */
+       assert_rpm_wakelock_held(dev_priv);
+       if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+               list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+       GEM_BUG_ON(!obj->userfault_count);
+
+err_fence:
        i915_vma_unpin_fence(vma);
 err_unpin:
        __i915_vma_unpin(vma);
@@ -1978,6 +1982,25 @@ err:
        return ret;
 }
 
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+
+       GEM_BUG_ON(!obj->userfault_count);
+
+       obj->userfault_count = 0;
+       list_del(&obj->userfault_link);
+       drm_vma_node_unmap(&obj->base.vma_node,
+                          obj->base.dev->anon_inode->i_mapping);
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!i915_vma_is_ggtt(vma))
+                       break;
+
+               i915_vma_unset_userfault(vma);
+       }
+}
+
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -2008,12 +2031,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        lockdep_assert_held(&i915->drm.struct_mutex);
        intel_runtime_pm_get(i915);
 
-       if (list_empty(&obj->userfault_link))
+       if (!obj->userfault_count)
                goto out;
 
-       list_del_init(&obj->userfault_link);
-       drm_vma_node_unmap(&obj->base.vma_node,
-                          obj->base.dev->anon_inode->i_mapping);
+       __i915_gem_object_release_mmap(obj);
 
        /* Ensure that the CPU's PTE are revoked and there are not outstanding
         * memory transactions from userspace before we return. The TLB
@@ -2041,11 +2062,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
         */
 
        list_for_each_entry_safe(obj, on,
-                                &dev_priv->mm.userfault_list, userfault_link) {
-               list_del_init(&obj->userfault_link);
-               drm_vma_node_unmap(&obj->base.vma_node,
-                                  obj->base.dev->anon_inode->i_mapping);
-       }
+                                &dev_priv->mm.userfault_list, userfault_link)
+               __i915_gem_object_release_mmap(obj);
 
        /* The fence will be lost when the device powers down. If any were
         * in use by hardware (i.e. they are pinned), we should not be powering
@@ -2068,7 +2086,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
                if (!reg->vma)
                        continue;
 
-               GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+               GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
                reg->dirty = true;
        }
 }
@@ -4276,7 +4294,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        mutex_init(&obj->mm.lock);
 
        INIT_LIST_HEAD(&obj->global_link);
-       INIT_LIST_HEAD(&obj->userfault_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4457,6 +4474,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
        llist_for_each_entry_safe(obj, on, freed, freed) {
                GEM_BUG_ON(obj->bind_count);
+               GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
index 4df039ef2ce316509ecc6faa04e707d135acf507..933ee8ecfa54e01f15903f28f6ff08e182843887 100644 (file)
@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
        if (i915_vma_is_pinned(vma))
                return false;
 
-       if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
+       if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
                return false;
 
        list_add(&vma->evict_link, unwind);
index af824b8d73eac470f6b3d57e881c0352dcdf4e1e..012250f25255fa86c47fdd3aa4480cca09b7b281 100644 (file)
@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
                /* Ensure that all userspace CPU access is completed before
                 * stealing the fence.
                 */
-               i915_gem_release_mmap(fence->vma->obj);
+               GEM_BUG_ON(fence->vma->fence != fence);
+               i915_vma_revoke_mmap(fence->vma);
 
                fence->vma->fence = NULL;
                fence->vma = NULL;
@@ -451,7 +452,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
                GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
 
                if (fence->vma)
-                       i915_gem_release_mmap(fence->vma->obj);
+                       i915_vma_revoke_mmap(fence->vma);
        }
 }
 
@@ -479,7 +480,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
                 */
                if (vma && !i915_gem_object_is_tiled(vma->obj)) {
                        GEM_BUG_ON(!reg->dirty);
-                       GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+                       GEM_BUG_ON(i915_vma_has_userfault(vma));
 
                        list_move(&reg->link, &dev_priv->mm.fence_list);
                        vma->fence = NULL;
index 956c911c2cbffe120e0ae05904fc37d4d22f4fd7..d67f1cbe842d23aa3c6d318aef60b5a0467d6151 100644 (file)
@@ -123,6 +123,7 @@ struct drm_i915_gem_object {
        /**
         * Whether the object is currently in the GGTT mmap.
         */
+       unsigned int userfault_count;
        struct list_head userfault_link;
 
        struct list_head batch_pool_link;
index 2b0083c34914c72121b6fea7089749090d469d6b..4dce2e0197d9bda1b53aacdadfadf8e1f8dd23f7 100644 (file)
@@ -690,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
        vma->iomap = NULL;
 }
 
+void i915_vma_revoke_mmap(struct i915_vma *vma)
+{
+       struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+       u64 vma_offset;
+
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+       if (!i915_vma_has_userfault(vma))
+               return;
+
+       GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+       GEM_BUG_ON(!vma->obj->userfault_count);
+
+       vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+       unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+                           drm_vma_node_offset_addr(node) + vma_offset,
+                           vma->size,
+                           1);
+
+       i915_vma_unset_userfault(vma);
+       if (!--vma->obj->userfault_count)
+               list_del(&vma->obj->userfault_link);
+}
+
 int i915_vma_unbind(struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
@@ -753,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma)
                        return ret;
 
                /* Force a pagefault for domain tracking on next user access */
-               i915_gem_release_mmap(obj);
+               i915_vma_revoke_mmap(vma);
 
                __i915_vma_iounmap(vma);
                vma->flags &= ~I915_VMA_CAN_FENCE;
        }
+       GEM_BUG_ON(vma->fence);
+       GEM_BUG_ON(i915_vma_has_userfault(vma));
 
        if (likely(!vma->vm->closed)) {
                trace_i915_vma_unbind(vma);
index 13d7ba7ee21ecfc40d165a67dc6539cc15f28dde..1e2bc9b3c3ac19a4790222eb151765050e264d49 100644 (file)
@@ -66,7 +66,7 @@ struct i915_vma {
         * that exist in the ctx->handle_vmas LUT for this vma.
         */
        unsigned int open_count;
-       unsigned int flags;
+       unsigned long flags;
        /**
         * How many users have pinned this object in GTT space. The following
         * users can each hold at most one reference: pwrite/pread, execbuffer
@@ -88,6 +88,8 @@ struct i915_vma {
 #define I915_VMA_GGTT          BIT(8)
 #define I915_VMA_CAN_FENCE     BIT(9)
 #define I915_VMA_CLOSED                BIT(10)
+#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_USERFAULT     BIT(I915_VMA_USERFAULT_BIT)
 
        unsigned int active;
        struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -146,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
        return vma->flags & I915_VMA_CLOSED;
 }
 
+static inline bool i915_vma_set_userfault(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+       return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline void i915_vma_unset_userfault(struct i915_vma *vma)
+{
+       return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+       return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
 {
        return vma->active;
@@ -244,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
 bool i915_vma_misplaced(const struct i915_vma *vma,
                        u64 size, u64 alignment, u64 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);