* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *);
+ struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
- unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object;
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+#define lockdep_assert_held_unless(lock, cond) \
+ GEM_BUG_ON(debug_locks && !lockdep_is_held(lock) && !(cond))
+#else
+#define lockdep_assert_held_unless(lock, cond)
+#endif
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
+ i915_gem_object_is_dead(obj));
GEM_BUG_ON(!obj->mm.pages);
obj->mm.pages_pin_count++;
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
+ i915_gem_object_is_dead(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages);
__i915_gem_object_unpin_pages(obj);
}
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type {
I915_MAP_WB = 0,
void i915_gem_restore_fences(struct drm_device *dev);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
return 0;
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
- return PTR_ERR(page);
+ return ERR_CAST(page);
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size;
- obj->mm.pages = st;
- return 0;
+ return st;
}
static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
{
- int ret;
-
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
-
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ i915_gem_clflush_object(obj, false);
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj);
+
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
obj->mm.dirty = false;
}
- sg_free_table(obj->mm.pages);
- kfree(obj->mm.pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
if (ret)
return ret;
- ret = __i915_gem_object_put_pages(obj);
- if (ret)
- return ret;
+ __i915_gem_object_put_pages(obj);
+ if (obj->mm.pages)
+ return -EBUSY;
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
I915_WAIT_LOCKED |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file_priv));
+ to_rps_client(file));
if (ret)
return ret;
}
/* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{
struct address_space *mapping;
}
static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
- int ret;
- GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
-
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (WARN_ON(ret)) {
- /* In the event of a disaster, abandon all caches and
- * hope for the best.
- */
- i915_gem_clflush_object(obj, true);
- obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ __i915_gem_object_release_shmem(obj);
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj);
-
- if (obj->mm.madv == I915_MADV_DONTNEED)
- obj->mm.dirty = false;
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
}
obj->mm.dirty = false;
- sg_free_table(obj->mm.pages);
- kfree(obj->mm.pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
}
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
- const struct drm_i915_gem_object_ops *ops = obj->ops;
+ struct sg_table *pages;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- if (!obj->mm.pages)
- return 0;
-
if (i915_gem_object_has_pinned_pages(obj))
- return -EBUSY;
+ return;
GEM_BUG_ON(obj->bind_count);
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
- list_del(&obj->global_list);
+ pages = fetch_and_zero(&obj->mm.pages);
+ GEM_BUG_ON(!pages);
if (obj->mm.mapping) {
void *ptr;
__i915_gem_object_reset_page_iter(obj);
- ops->put_pages(obj);
- obj->mm.pages = NULL;
-
- i915_gem_object_invalidate(obj);
-
- return 0;
+ obj->ops->put_pages(obj, pages);
}
static unsigned int swiotlb_max_size(void)
#endif
}
-static int
+static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_size();
if (!max_segment)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
/* Get the list of pages out of our struct file. They'll be pinned
}
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
- obj->mm.pages = st;
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret)
goto err_pages;
if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj);
+ i915_gem_object_do_bit_17_swizzle(obj, st);
if (i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
__i915_gem_object_pin_pages(obj);
- return 0;
+ return st;
err_pages:
sg_mark_end(sg);
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ret;
+ return ERR_PTR(ret);
+}
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
+
+ obj->mm.pages = pages;
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ return -EFAULT;
+ }
+
+ pages = obj->ops->get_pages(obj);
+ if (unlikely(IS_ERR(pages)))
+ return PTR_ERR(pages);
+
+ __i915_gem_object_set_pages(obj, pages);
+ return 0;
}
/* Ensure that the associated pages are gathered from the backing storage
*/
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const struct drm_i915_gem_object_ops *ops = obj->ops;
- int ret;
+ int err;
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->mm.pages)
return 0;
- if (obj->mm.madv != I915_MADV_WILLNEED) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
- __i915_gem_object_unpin_pages(obj);
- return -EFAULT;
- }
-
- ret = ops->get_pages(obj);
- if (ret) {
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
__i915_gem_object_unpin_pages(obj);
- return ret;
- }
- list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
-
- obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
- obj->mm.get_page.sg_idx = 0;
-
- return 0;
+ return err;
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
return dma_buf;
}
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- struct sg_table *sg;
-
- sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg))
- return PTR_ERR(sg);
-
- obj->mm.pages = sg;
- return 0;
+ return dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
}
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- dma_buf_unmap_attachment(obj->base.import_attach,
- obj->mm.pages, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(obj->base.import_attach, pages,
+ DMA_BIDIRECTIONAL);
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
* by swapping them out and back in again).
*/
void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
return;
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
+ * @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
+ const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
- int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
i = 0;
- for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
i915_ggtt_flush(dev_priv);
}
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- if (!dma_map_sg(&obj->base.dev->pdev->dev,
- obj->mm.pages->sgl, obj->mm.pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return -ENOSPC;
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
- return 0;
+ return -ENOSPC;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
vma->node.start, size);
}
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev;
}
}
- dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
kfree(st);
}
-static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int npages = obj->base.size / PAGE_SIZE;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
sg = __sg_next(sg);
} while (1);
- obj->mm.pages = st;
- if (i915_gem_gtt_prepare_object(obj)) {
- obj->mm.pages = NULL;
+ if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
- }
/* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker,
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
- return 0;
+ return st;
err:
sg_mark_end(sg);
internal_free_pages(st);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
-static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
- i915_gem_gtt_finish_object(obj);
- internal_free_pages(obj->mm.pages);
+ i915_gem_gtt_finish_pages(obj, pages);
+ internal_free_pages(pages);
obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED;
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj);
+ return !obj->mm.pages;
+}
+
/**
* i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device
i915_gem_object_get(obj);
- /* For the unbound phase, this should be a no-op! */
- i915_gem_object_unbind(obj);
- if (__i915_gem_object_put_pages(obj) == 0)
+ if (unsafe_drop_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj);
return st;
}
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- BUG();
- return -EINVAL;
+ return i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
}
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
/* Should only be called during free */
- sg_free_table(obj->mm.pages);
- kfree(obj->mm.pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
- obj->mm.pages = i915_pages_create_for_stolen(dev,
- stolen->start,
- stolen->size);
- if (!obj->mm.pages)
- goto cleanup;
-
- obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
- obj->mm.get_page.sg_idx = 0;
-
- __i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
-
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ if (i915_gem_object_pin_pages(obj))
+ goto cleanup;
+
return obj;
cleanup:
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err;
+ goto err_pages;
}
/* To simplify the initialisation sequence between KMS and GTT,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- goto err;
+ goto err_pages;
}
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
obj->bind_count++;
- list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- __i915_gem_object_pin_pages(obj);
-
return obj;
+err_pages:
+ i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
return NULL;
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
- if (obj->mm.pages) {
- /* We are inside a kthread context and can't be interrupted */
- WARN_ON(i915_gem_object_unbind(obj));
- WARN_ON(__i915_gem_object_put_pages(obj));
- }
+ /* We are inside a kthread context and can't be interrupted */
+ if (i915_gem_object_unbind(obj) == 0)
+ __i915_gem_object_put_pages(obj);
+ WARN_ONCE(obj->mm.pages,
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ obj->bind_count,
+ obj->mm.pages_pin_count,
+ obj->pin_display);
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
-static int
+static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
+ struct sg_table *pages;
int ret;
- ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
+ ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- ret = i915_gem_gtt_prepare_object(obj);
+ ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) {
- sg_free_table(obj->mm.pages);
- kfree(obj->mm.pages);
- obj->mm.pages = NULL;
+ sg_free_table(pages);
+ kfree(pages);
+ return ERR_PTR(ret);
}
- return ret;
+ return pages;
}
static int
mutex_lock(&dev->struct_mutex);
if (obj->userptr.work == &work->work) {
+ struct sg_table *pages = ERR_PTR(ret);
+
if (pinned == npages) {
- ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
- if (ret == 0) {
- list_add_tail(&obj->global_list,
- &to_i915(dev)->mm.unbound_list);
- obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
- obj->mm.get_page.sg_idx = 0;
+ pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ if (!IS_ERR(pages)) {
+ __i915_gem_object_set_pages(obj, pages);
pinned = 0;
+ pages = NULL;
}
}
- obj->userptr.work = ERR_PTR(ret);
+
+ obj->userptr.work = ERR_CAST(pages);
}
- obj->userptr.workers--;
i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
kfree(work);
}
-static int
+static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active)
{
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
- if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
- return -EAGAIN;
-
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work;
- obj->userptr.workers++;
work->obj = i915_gem_object_get(obj);
schedule_work(&work->work);
*active = true;
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
-static int
+static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ struct sg_table *pages;
int pinned, ret;
bool active;
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return PTR_ERR(obj->userptr.work);
+ return ERR_CAST(obj->userptr.work);
else
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
pvec = NULL;
pinned = 0;
GFP_TEMPORARY);
if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
active = false;
if (pinned < 0)
- ret = pinned, pinned = 0;
+ pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages)
- ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+ pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else
- ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
- if (ret) {
+ pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0);
}
drm_free_large(pvec);
- return ret;
+ return pages;
}
static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false;
- i915_gem_gtt_finish_object(obj);
+ i915_gem_gtt_finish_pages(obj, pages);
- for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+ for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
}
obj->mm.dirty = false;
- sg_free_table(obj->mm.pages);
- kfree(obj->mm.pages);
+ sg_free_table(pages);
+ kfree(pages);
}
static void