]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
drm/i915: Pass around sg_table to get_pages/put_pages backend
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 12:58:36 +0000 (13:58 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 28 Oct 2016 19:53:47 +0000 (20:53 +0100)
The plan is to move obj->pages out from under the struct_mutex into its
own per-object lock. We need to prune any assumption of the struct_mutex
from the get_pages/put_pages backends, and to make it easier we pass
around the sg_table to operate on rather than indirectly via the obj.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-13-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c

index 50781cbe74d06b9c3d5058ed5f625496af335272..db67bec15ef260e88ad3cfcd34ca15b238c4a16a 100644 (file)
@@ -2185,8 +2185,8 @@ struct drm_i915_gem_object_ops {
         * being released or under memory pressure (where we attempt to
         * reap pages for the shrinker).
         */
-       int (*get_pages)(struct drm_i915_gem_object *);
-       void (*put_pages)(struct drm_i915_gem_object *);
+       struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+       void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
        int (*dmabuf_export)(struct drm_i915_gem_object *);
        void (*release)(struct drm_i915_gem_object *);
@@ -2321,8 +2321,6 @@ struct drm_i915_gem_object {
        struct i915_gem_userptr {
                uintptr_t ptr;
                unsigned read_only :1;
-               unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
 
                struct i915_mm_struct *mm;
                struct i915_mmu_object *mmu_object;
@@ -2383,6 +2381,19 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
 __deprecated
 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
 
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+       return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+#define lockdep_assert_held_unless(lock, cond) \
+       GEM_BUG_ON(debug_locks && !lockdep_is_held(lock) && !(cond))
+#else
+#define lockdep_assert_held_unless(lock, cond)
+#endif
+
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
@@ -3211,6 +3222,8 @@ dma_addr_t
 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
                                unsigned long n);
 
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+                                struct sg_table *pages);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
@@ -3227,7 +3240,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
+                                  i915_gem_object_is_dead(obj));
        GEM_BUG_ON(!obj->mm.pages);
 
        obj->mm.pages_pin_count++;
@@ -3242,7 +3256,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
+                                  i915_gem_object_is_dead(obj));
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
        GEM_BUG_ON(!obj->mm.pages);
 
@@ -3255,7 +3270,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
        __i915_gem_object_unpin_pages(obj);
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
 
 enum i915_map_type {
        I915_MAP_WB = 0,
@@ -3480,8 +3496,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 void i915_gem_restore_fences(struct drm_device *dev);
 
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+                                      struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+                                        struct sg_table *pages);
 
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
index 0d702c8f7b7b0e1e45b36969476f63289af3e91e..56060aeb12e3b80ff9622d2f53c01e600f0576cf 100644 (file)
@@ -169,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static int
+static struct sg_table *
 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        int i;
 
        if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
                struct page *page;
@@ -187,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
                page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page))
-                       return PTR_ERR(page);
+                       return ERR_CAST(page);
 
                src = kmap_atomic(page);
                memcpy(vaddr, src, PAGE_SIZE);
@@ -202,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
                kfree(st);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
        sg = st->sgl;
@@ -216,28 +216,30 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        sg_dma_address(sg) = obj->phys_handle->busaddr;
        sg_dma_len(sg) = obj->base.size;
 
-       obj->mm.pages = st;
-       return 0;
+       return st;
 }
 
 static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
 {
-       int ret;
-
        GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
-       ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (WARN_ON(ret)) {
-               /* In the event of a disaster, abandon all caches and
-                * hope for the best.
-                */
-               obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       }
-
        if (obj->mm.madv == I915_MADV_DONTNEED)
                obj->mm.dirty = false;
 
+       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+               i915_gem_clflush_object(obj, false);
+
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+                              struct sg_table *pages)
+{
+       __i915_gem_object_release_shmem(obj);
+
        if (obj->mm.dirty) {
                struct address_space *mapping = obj->base.filp->f_mapping;
                char *vaddr = obj->phys_handle->vaddr;
@@ -265,8 +267,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
                obj->mm.dirty = false;
        }
 
-       sg_free_table(obj->mm.pages);
-       kfree(obj->mm.pages);
+       sg_free_table(pages);
+       kfree(pages);
 }
 
 static void
@@ -518,9 +520,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       ret = __i915_gem_object_put_pages(obj);
-       if (ret)
-               return ret;
+       __i915_gem_object_put_pages(obj);
+       if (obj->mm.pages)
+               return -EBUSY;
 
        /* create a new object */
        phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -536,7 +538,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 static int
 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
-                    struct drm_file *file_priv)
+                    struct drm_file *file)
 {
        struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
@@ -552,7 +554,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                                   I915_WAIT_LOCKED |
                                   I915_WAIT_ALL,
                                   MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file_priv));
+                                  to_rps_client(file));
        if (ret)
                return ret;
 
@@ -2263,8 +2265,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 }
 
 /* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping;
 
@@ -2283,32 +2284,20 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+                             struct sg_table *pages)
 {
        struct sgt_iter sgt_iter;
        struct page *page;
-       int ret;
 
-       GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
-
-       ret = i915_gem_object_set_to_cpu_domain(obj, true);
-       if (WARN_ON(ret)) {
-               /* In the event of a disaster, abandon all caches and
-                * hope for the best.
-                */
-               i915_gem_clflush_object(obj, true);
-               obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-       }
+       __i915_gem_object_release_shmem(obj);
 
-       i915_gem_gtt_finish_object(obj);
+       i915_gem_gtt_finish_pages(obj, pages);
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
-               i915_gem_object_save_bit_17_swizzle(obj);
-
-       if (obj->mm.madv == I915_MADV_DONTNEED)
-               obj->mm.dirty = false;
+               i915_gem_object_save_bit_17_swizzle(obj, pages);
 
-       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+       for_each_sgt_page(page, sgt_iter, pages) {
                if (obj->mm.dirty)
                        set_page_dirty(page);
 
@@ -2319,8 +2308,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        }
        obj->mm.dirty = false;
 
-       sg_free_table(obj->mm.pages);
-       kfree(obj->mm.pages);
+       sg_free_table(pages);
+       kfree(pages);
 }
 
 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
@@ -2332,24 +2321,22 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
                radix_tree_delete(&obj->mm.get_page.radix, iter.index);
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 {
-       const struct drm_i915_gem_object_ops *ops = obj->ops;
+       struct sg_table *pages;
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       if (!obj->mm.pages)
-               return 0;
-
        if (i915_gem_object_has_pinned_pages(obj))
-               return -EBUSY;
+               return;
 
        GEM_BUG_ON(obj->bind_count);
 
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
         * lists early. */
-       list_del(&obj->global_list);
+       pages = fetch_and_zero(&obj->mm.pages);
+       GEM_BUG_ON(!pages);
 
        if (obj->mm.mapping) {
                void *ptr;
@@ -2365,12 +2352,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 
        __i915_gem_object_reset_page_iter(obj);
 
-       ops->put_pages(obj);
-       obj->mm.pages = NULL;
-
-       i915_gem_object_invalidate(obj);
-
-       return 0;
+       obj->ops->put_pages(obj, pages);
 }
 
 static unsigned int swiotlb_max_size(void)
@@ -2382,7 +2364,7 @@ static unsigned int swiotlb_max_size(void)
 #endif
 }
 
-static int
+static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2401,8 +2383,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
         */
-       BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
-       BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+       GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+       GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
        max_segment = swiotlb_max_size();
        if (!max_segment)
@@ -2410,12 +2392,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        page_count = obj->base.size / PAGE_SIZE;
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
        /* Get the list of pages out of our struct file.  They'll be pinned
@@ -2466,20 +2448,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        }
        if (sg) /* loop terminated early; short sg table */
                sg_mark_end(sg);
-       obj->mm.pages = st;
 
-       ret = i915_gem_gtt_prepare_object(obj);
+       ret = i915_gem_gtt_prepare_pages(obj, st);
        if (ret)
                goto err_pages;
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
-               i915_gem_object_do_bit_17_swizzle(obj);
+               i915_gem_object_do_bit_17_swizzle(obj, st);
 
        if (i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
                __i915_gem_object_pin_pages(obj);
 
-       return 0;
+       return st;
 
 err_pages:
        sg_mark_end(sg);
@@ -2499,7 +2480,35 @@ err_pages:
        if (ret == -ENOSPC)
                ret = -ENOMEM;
 
-       return ret;
+       return ERR_PTR(ret);
+}
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+                                struct sg_table *pages)
+{
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       obj->mm.get_page.sg_pos = pages->sgl;
+       obj->mm.get_page.sg_idx = 0;
+
+       obj->mm.pages = pages;
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+       struct sg_table *pages;
+
+       if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+               DRM_DEBUG("Attempting to obtain a purgeable object\n");
+               return -EFAULT;
+       }
+
+       pages = obj->ops->get_pages(obj);
+       if (unlikely(IS_ERR(pages)))
+               return PTR_ERR(pages);
+
+       __i915_gem_object_set_pages(obj, pages);
+       return 0;
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -2511,33 +2520,18 @@ err_pages:
  */
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       const struct drm_i915_gem_object_ops *ops = obj->ops;
-       int ret;
+       int err;
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
        if (obj->mm.pages)
                return 0;
 
-       if (obj->mm.madv != I915_MADV_WILLNEED) {
-               DRM_DEBUG("Attempting to obtain a purgeable object\n");
-               __i915_gem_object_unpin_pages(obj);
-               return -EFAULT;
-       }
-
-       ret = ops->get_pages(obj);
-       if (ret) {
+       err = ____i915_gem_object_get_pages(obj);
+       if (err)
                __i915_gem_object_unpin_pages(obj);
-               return ret;
-       }
 
-       list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
-
-       obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
-       obj->mm.get_page.sg_idx = 0;
-
-       return 0;
+       return err;
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
index 10441dc72e731b4e9ba6dd2fe4e9f79620cd317b..2abd524aba141ca20c565fbdcba12ca41b8e8661 100644 (file)
@@ -289,22 +289,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
        return dma_buf;
 }
 
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-       struct sg_table *sg;
-
-       sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
-       if (IS_ERR(sg))
-               return PTR_ERR(sg);
-
-       obj->mm.pages = sg;
-       return 0;
+       return dma_buf_map_attachment(obj->base.import_attach,
+                                     DMA_BIDIRECTIONAL);
 }
 
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+                                            struct sg_table *pages)
 {
-       dma_buf_unmap_attachment(obj->base.import_attach,
-                                obj->mm.pages, DMA_BIDIRECTIONAL);
+       dma_buf_unmap_attachment(obj->base.import_attach, pages,
+                                DMA_BIDIRECTIONAL);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
index 5aadab59f071c0a4929254e71d1902ec00d2ca14..cd59dbc6588c8a2a1aa88143b3fabf6797e868fe 100644 (file)
@@ -644,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
 /**
  * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
  * @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
  *
  * This function fixes up the swizzling in case any page frame number for this
  * object has changed in bit 17 since that state has been saved with
@@ -654,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
  * by swapping them out and back in again).
  */
 void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+                                 struct sg_table *pages)
 {
        struct sgt_iter sgt_iter;
        struct page *page;
@@ -664,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
                return;
 
        i = 0;
-       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+       for_each_sgt_page(page, sgt_iter, pages) {
                char new_bit_17 = page_to_phys(page) >> 17;
-               if ((new_bit_17 & 0x1) !=
-                   (test_bit(i, obj->bit_17) != 0)) {
+               if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
                        i915_gem_swizzle_page(page);
                        set_page_dirty(page);
                }
@@ -678,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 /**
  * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
  * @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
  *
  * This function saves the bit 17 of each page frame number so that swizzling
  * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
  * be called before the backing storage can be unpinned.
  */
 void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+                                   struct sg_table *pages)
 {
+       const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
        struct sgt_iter sgt_iter;
        struct page *page;
-       int page_count = obj->base.size >> PAGE_SHIFT;
        int i;
 
        if (obj->bit_17 == NULL) {
@@ -703,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
        i = 0;
 
-       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+       for_each_sgt_page(page, sgt_iter, pages) {
                if (page_to_phys(page) & (1 << 17))
                        __set_bit(i, obj->bit_17);
                else
index 794ccc4cffaa40afe0b638425cd5d747dcd8e1cf..1008209ca797524dd61b6ca4279a6c1c879bed78 100644 (file)
@@ -2370,14 +2370,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
        i915_ggtt_flush(dev_priv);
 }
 
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+                              struct sg_table *pages)
 {
-       if (!dma_map_sg(&obj->base.dev->pdev->dev,
-                       obj->mm.pages->sgl, obj->mm.pages->nents,
-                       PCI_DMA_BIDIRECTIONAL))
-               return -ENOSPC;
+       if (dma_map_sg(&obj->base.dev->pdev->dev,
+                      pages->sgl, pages->nents,
+                      PCI_DMA_BIDIRECTIONAL))
+               return 0;
 
-       return 0;
+       return -ENOSPC;
 }
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2696,7 +2697,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
                                         vma->node.start, size);
 }
 
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+                              struct sg_table *pages)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2710,8 +2712,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
                }
        }
 
-       dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
-                    PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
 }
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
index c241d8143255546e88e3546a03d4c39df5899ad7..dbe6a6cec20d46a0be8efbf37e8676c04d0cc0ef 100644 (file)
@@ -628,8 +628,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+                                           struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+                              struct sg_table *pages);
 
 /* Flags used by pin/bind&friends. */
 #define PIN_NONBLOCK           BIT(0)
index 08a2576ff7d2c6dae001f6ad645009c97910fb40..1b0607a44a9ae128b07bc29dbf6b6ba01282bb9f 100644 (file)
@@ -42,7 +42,8 @@ static void internal_free_pages(struct sg_table *st)
        kfree(st);
 }
 
-static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        unsigned int npages = obj->base.size / PAGE_SIZE;
@@ -53,11 +54,11 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        if (sg_alloc_table(st, npages, GFP_KERNEL)) {
                kfree(st);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
        sg = st->sgl;
@@ -102,12 +103,9 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 
                sg = __sg_next(sg);
        } while (1);
-       obj->mm.pages = st;
 
-       if (i915_gem_gtt_prepare_object(obj)) {
-               obj->mm.pages = NULL;
+       if (i915_gem_gtt_prepare_pages(obj, st))
                goto err;
-       }
 
        /* Mark the pages as dontneed whilst they are still pinned. As soon
         * as they are unpinned they are allowed to be reaped by the shrinker,
@@ -115,18 +113,19 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
         * object are only valid whilst active and pinned.
         */
        obj->mm.madv = I915_MADV_DONTNEED;
-       return 0;
+       return st;
 
 err:
        sg_mark_end(sg);
        internal_free_pages(st);
-       return -ENOMEM;
+       return ERR_PTR(-ENOMEM);
 }
 
-static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+                                              struct sg_table *pages)
 {
-       i915_gem_gtt_finish_object(obj);
-       internal_free_pages(obj->mm.pages);
+       i915_gem_gtt_finish_pages(obj, pages);
+       internal_free_pages(pages);
 
        obj->mm.dirty = false;
        obj->mm.madv = I915_MADV_WILLNEED;
index 124f69a801620bceb16ee13ac9b11f6c4615685f..f95061faeae64bd4d3c0869ab2359c48614e4d8e 100644 (file)
@@ -91,6 +91,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
        return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
 }
 
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+       if (i915_gem_object_unbind(obj) == 0)
+               __i915_gem_object_put_pages(obj);
+       return !obj->mm.pages;
+}
+
 /**
  * i915_gem_shrink - Shrink buffer object caches
  * @dev_priv: i915 device
@@ -192,9 +199,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
                        i915_gem_object_get(obj);
 
-                       /* For the unbound phase, this should be a no-op! */
-                       i915_gem_object_unbind(obj);
-                       if (__i915_gem_object_put_pages(obj) == 0)
+                       if (unsafe_drop_pages(obj))
                                count += obj->base.size >> PAGE_SHIFT;
 
                        i915_gem_object_put(obj);
index 0acbdcbd8f3a180f1b5e566e81e126f18cb13cbd..1a63ffa4d1899f0397b3395a00d9529c7a982a27 100644 (file)
@@ -546,17 +546,20 @@ i915_pages_create_for_stolen(struct drm_device *dev,
        return st;
 }
 
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 {
-       BUG();
-       return -EINVAL;
+       return i915_pages_create_for_stolen(obj->base.dev,
+                                           obj->stolen->start,
+                                           obj->stolen->size);
 }
 
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+                                            struct sg_table *pages)
 {
        /* Should only be called during free */
-       sg_free_table(obj->mm.pages);
-       kfree(obj->mm.pages);
+       sg_free_table(pages);
+       kfree(pages);
 }
 
 static void
@@ -591,21 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        drm_gem_private_object_init(dev, &obj->base, stolen->size);
        i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
-       obj->mm.pages = i915_pages_create_for_stolen(dev,
-                                                    stolen->start,
-                                                    stolen->size);
-       if (!obj->mm.pages)
-               goto cleanup;
-
-       obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
-       obj->mm.get_page.sg_idx = 0;
-
-       __i915_gem_object_pin_pages(obj);
        obj->stolen = stolen;
-
        obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
        obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
+       if (i915_gem_object_pin_pages(obj))
+               goto cleanup;
+
        return obj;
 
 cleanup:
@@ -700,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
 
+       ret = i915_gem_object_pin_pages(obj);
+       if (ret)
+               goto err;
+
        vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
-               goto err;
+               goto err_pages;
        }
 
        /* To simplify the initialisation sequence between KMS and GTT,
@@ -717,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
        if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-               goto err;
+               goto err_pages;
        }
 
        vma->pages = obj->mm.pages;
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
        list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+       list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        obj->bind_count++;
 
-       list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       __i915_gem_object_pin_pages(obj);
-
        return obj;
 
+err_pages:
+       i915_gem_object_unpin_pages(obj);
 err:
        i915_gem_object_put(obj);
        return NULL;
index 0cbc8f70317e24f58d992f290e3cde3983207677..a421447f1d8435653510dd00f0d2739f335f3439 100644 (file)
@@ -73,11 +73,14 @@ static void cancel_userptr(struct work_struct *work)
        /* Cancel any active worker and force us to re-evaluate gup */
        obj->userptr.work = NULL;
 
-       if (obj->mm.pages) {
-               /* We are inside a kthread context and can't be interrupted */
-               WARN_ON(i915_gem_object_unbind(obj));
-               WARN_ON(__i915_gem_object_put_pages(obj));
-       }
+       /* We are inside a kthread context and can't be interrupted */
+       if (i915_gem_object_unbind(obj) == 0)
+               __i915_gem_object_put_pages(obj);
+       WARN_ONCE(obj->mm.pages,
+                 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+                 obj->bind_count,
+                 obj->mm.pages_pin_count,
+                 obj->pin_display);
 
        i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
@@ -426,24 +429,25 @@ err:
        return ret;
 }
 
-static int
+static struct sg_table *
 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
                             struct page **pvec, int num_pages)
 {
+       struct sg_table *pages;
        int ret;
 
-       ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
+       ret = st_set_pages(&pages, pvec, num_pages);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
-       ret = i915_gem_gtt_prepare_object(obj);
+       ret = i915_gem_gtt_prepare_pages(obj, pages);
        if (ret) {
-               sg_free_table(obj->mm.pages);
-               kfree(obj->mm.pages);
-               obj->mm.pages = NULL;
+               sg_free_table(pages);
+               kfree(pages);
+               return ERR_PTR(ret);
        }
 
-       return ret;
+       return pages;
 }
 
 static int
@@ -525,20 +529,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 
        mutex_lock(&dev->struct_mutex);
        if (obj->userptr.work == &work->work) {
+               struct sg_table *pages = ERR_PTR(ret);
+
                if (pinned == npages) {
-                       ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
-                       if (ret == 0) {
-                               list_add_tail(&obj->global_list,
-                                             &to_i915(dev)->mm.unbound_list);
-                               obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
-                               obj->mm.get_page.sg_idx = 0;
+                       pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+                       if (!IS_ERR(pages)) {
+                               __i915_gem_object_set_pages(obj, pages);
                                pinned = 0;
+                               pages = NULL;
                        }
                }
-               obj->userptr.work = ERR_PTR(ret);
+
+               obj->userptr.work = ERR_CAST(pages);
        }
 
-       obj->userptr.workers--;
        i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -549,7 +553,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        kfree(work);
 }
 
-static int
+static struct sg_table *
 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
                                      bool *active)
 {
@@ -574,15 +578,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
         * that error back to this function through
         * obj->userptr.work = ERR_PTR.
         */
-       if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
-               return -EAGAIN;
-
        work = kmalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        obj->userptr.work = &work->work;
-       obj->userptr.workers++;
 
        work->obj = i915_gem_object_get(obj);
 
@@ -593,14 +593,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
        schedule_work(&work->work);
 
        *active = true;
-       return -EAGAIN;
+       return ERR_PTR(-EAGAIN);
 }
 
-static int
+static struct sg_table *
 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
        const int num_pages = obj->base.size >> PAGE_SHIFT;
        struct page **pvec;
+       struct sg_table *pages;
        int pinned, ret;
        bool active;
 
@@ -624,15 +625,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
        if (obj->userptr.work) {
                /* active flag should still be held for the pending work */
                if (IS_ERR(obj->userptr.work))
-                       return PTR_ERR(obj->userptr.work);
+                       return ERR_CAST(obj->userptr.work);
                else
-                       return -EAGAIN;
+                       return ERR_PTR(-EAGAIN);
        }
 
        /* Let the mmu-notifier know that we have begun and need cancellation */
        ret = __i915_gem_userptr_set_active(obj, true);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
        pvec = NULL;
        pinned = 0;
@@ -641,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
                                      GFP_TEMPORARY);
                if (pvec == NULL) {
                        __i915_gem_userptr_set_active(obj, false);
-                       return -ENOMEM;
+                       return ERR_PTR(-ENOMEM);
                }
 
                pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -650,21 +651,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
        active = false;
        if (pinned < 0)
-               ret = pinned, pinned = 0;
+               pages = ERR_PTR(pinned), pinned = 0;
        else if (pinned < num_pages)
-               ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+               pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
        else
-               ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
-       if (ret) {
+               pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+       if (IS_ERR(pages)) {
                __i915_gem_userptr_set_active(obj, active);
                release_pages(pvec, pinned, 0);
        }
        drm_free_large(pvec);
-       return ret;
+       return pages;
 }
 
 static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+                          struct sg_table *pages)
 {
        struct sgt_iter sgt_iter;
        struct page *page;
@@ -675,9 +677,9 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
        if (obj->mm.madv != I915_MADV_WILLNEED)
                obj->mm.dirty = false;
 
-       i915_gem_gtt_finish_object(obj);
+       i915_gem_gtt_finish_pages(obj, pages);
 
-       for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+       for_each_sgt_page(page, sgt_iter, pages) {
                if (obj->mm.dirty)
                        set_page_dirty(page);
 
@@ -686,8 +688,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
        }
        obj->mm.dirty = false;
 
-       sg_free_table(obj->mm.pages);
-       kfree(obj->mm.pages);
+       sg_free_table(pages);
+       kfree(pages);
 }
 
 static void