]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
gpu: ion: Make ion_free asynchronous
authorRebecca Schultz Zavin <rebecca@android.com>
Fri, 13 Dec 2013 22:24:35 +0000 (14:24 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 14 Dec 2013 16:55:41 +0000 (08:55 -0800)
Add the ability for a heap to free buffers asynchrounously.  Freed buffers
are placed on a free list and freed from a low priority background thread.
If allocations from a particular heap fail, the free list is drained.  This
patch also enable asynchronous frees from the chunk heap.

Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion.h
drivers/staging/android/ion/ion_chunk_heap.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c

index ba65bef05b6c7c1d2f93ae832d4296d85798ffbc..b965f155953620b2d981d5698328b7f81f217b1d 100644 (file)
 
 #include <linux/device.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <linux/fs.h>
 #include <linux/anon_inodes.h>
+#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/memblock.h>
 #include <linux/miscdevice.h>
@@ -26,6 +28,7 @@
 #include <linux/mm.h>
 #include <linux/mm_types.h>
 #include <linux/rbtree.h>
+#include <linux/rtmutex.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
@@ -140,6 +143,7 @@ static void ion_buffer_add(struct ion_device *dev,
 
 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
 
+static bool ion_heap_drain_freelist(struct ion_heap *heap);
 /* this function should only be called while dev->lock is held */
 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
                                     struct ion_device *dev,
@@ -161,9 +165,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
        kref_init(&buffer->ref);
 
        ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
        if (ret) {
-               kfree(buffer);
-               return ERR_PTR(ret);
+               if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+                       goto err2;
+
+               ion_heap_drain_freelist(heap);
+               ret = heap->ops->allocate(heap, buffer, len, align,
+                                         flags);
+               if (ret)
+                       goto err2;
        }
 
        buffer->dev = dev;
@@ -214,27 +225,42 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 err:
        heap->ops->unmap_dma(heap, buffer);
        heap->ops->free(buffer);
+err2:
        kfree(buffer);
        return ERR_PTR(ret);
 }
 
-static void ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
 {
-       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
-       struct ion_device *dev = buffer->dev;
-
        if (WARN_ON(buffer->kmap_cnt > 0))
                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
        buffer->heap->ops->unmap_dma(buffer->heap, buffer);
        buffer->heap->ops->free(buffer);
-       mutex_lock(&dev->buffer_lock);
-       rb_erase(&buffer->node, &dev->buffers);
-       mutex_unlock(&dev->buffer_lock);
        if (buffer->flags & ION_FLAG_CACHED)
                kfree(buffer->dirty);
        kfree(buffer);
 }
 
+static void ion_buffer_destroy(struct kref *kref)
+{
+       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+       struct ion_heap *heap = buffer->heap;
+       struct ion_device *dev = buffer->dev;
+
+       mutex_lock(&dev->buffer_lock);
+       rb_erase(&buffer->node, &dev->buffers);
+       mutex_unlock(&dev->buffer_lock);
+
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+               rt_mutex_lock(&heap->lock);
+               list_add(&buffer->list, &heap->free_list);
+               rt_mutex_unlock(&heap->lock);
+               wake_up(&heap->waitqueue);
+               return;
+       }
+       _ion_buffer_destroy(buffer);
+}
+
 static void ion_buffer_get(struct ion_buffer *buffer)
 {
        kref_get(&buffer->ref);
@@ -1272,13 +1298,81 @@ static const struct file_operations debug_heap_fops = {
        .release = single_release,
 };
 
+static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
+{
+       bool is_empty;
+
+       rt_mutex_lock(&heap->lock);
+       is_empty = list_empty(&heap->free_list);
+       rt_mutex_unlock(&heap->lock);
+
+       return is_empty;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+       struct ion_heap *heap = data;
+
+       while (true) {
+               struct ion_buffer *buffer;
+
+               wait_event_freezable(heap->waitqueue,
+                                    !ion_heap_free_list_is_empty(heap));
+
+               rt_mutex_lock(&heap->lock);
+               if (list_empty(&heap->free_list)) {
+                       rt_mutex_unlock(&heap->lock);
+                       continue;
+               }
+               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+                                         list);
+               list_del(&buffer->list);
+               rt_mutex_unlock(&heap->lock);
+               _ion_buffer_destroy(buffer);
+       }
+
+       return 0;
+}
+
+static bool ion_heap_drain_freelist(struct ion_heap *heap)
+{
+       struct ion_buffer *buffer, *tmp;
+
+       if (ion_heap_free_list_is_empty(heap))
+               return false;
+       rt_mutex_lock(&heap->lock);
+       list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+               _ion_buffer_destroy(buffer);
+               list_del(&buffer->list);
+       }
+       BUG_ON(!list_empty(&heap->free_list));
+       rt_mutex_unlock(&heap->lock);
+
+
+       return true;
+}
+
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
+       struct sched_param param = { .sched_priority = 0 };
+
        if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
            !heap->ops->unmap_dma)
                pr_err("%s: can not add heap with invalid ops struct.\n",
                       __func__);
 
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+               INIT_LIST_HEAD(&heap->free_list);
+               rt_mutex_init(&heap->lock);
+               init_waitqueue_head(&heap->waitqueue);
+               heap->task = kthread_run(ion_heap_deferred_free, heap,
+                                        "%s", heap->name);
+               sched_setscheduler(heap->task, SCHED_IDLE, &param);
+               if (IS_ERR(heap->task))
+                       pr_err("%s: creating thread for deferred free failed\n",
+                              __func__);
+       }
+
        heap->dev = dev;
        down_write(&dev->lock);
        /* use negative heap->id to reverse the priority -- when traversing
index 976123b189ae6178592cbb0396d5023ec0415159..679031ceee91aa87c195958976033918c676be38 100644 (file)
@@ -46,7 +46,7 @@ enum ion_heap_type {
 #define ION_NUM_HEAP_IDS               sizeof(unsigned int) * 8
 
 /**
- * heap flags - the lower 16 bits are used by core ion, the upper 16
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
  * bits are reserved for use by the heaps themselves.
  */
 #define ION_FLAG_CACHED 1              /* mappings of this buffer should be
index 60cd91ca20711e2753c936a760783a4f1bcd3cfe..ac7cf132985b044597c58d3a3f8ddf687b2b2bd3 100644 (file)
@@ -160,7 +160,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
        gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
        chunk_heap->heap.ops = &chunk_heap_ops;
        chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
-       pr_info("%s: base %lu size %ld align %ld\n", __func__, chunk_heap->base,
+       chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       pr_info("%s: base %lu size %u align %ld\n", __func__, chunk_heap->base,
                heap_data->size, heap_data->align);
 
        return &chunk_heap->heap;
index cfb4264fe4994babf7fb173fa0ba66489ec92b5d..ab1a8d956ada5693f153a55658c5a8e3fd60cc8e 100644 (file)
@@ -58,7 +58,10 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
 */
 struct ion_buffer {
        struct kref ref;
-       struct rb_node node;
+       union {
+               struct rb_node node;
+               struct list_head list;
+       };
        struct ion_device *dev;
        struct ion_heap *heap;
        unsigned long flags;
@@ -108,16 +111,26 @@ struct ion_heap_ops {
                         struct vm_area_struct *vma);
 };
 
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
 /**
  * struct ion_heap - represents a heap in the system
  * @node:              rb node to put the heap on the device's tree of heaps
  * @dev:               back pointer to the ion_device
  * @type:              type of heap
  * @ops:               ops struct as above
+ * @flags:             flags
  * @id:                        id of heap, also indicates priority of this heap when
  *                     allocating.  These are specified by platform data and
  *                     MUST be unique
  * @name:              used for debugging
+ * @free_list:         free list head if deferred free is used
+ * @lock:              protects the free list
+ * @waitqueue:         queue to wait on from deferred free thread
+ * @task:              task struct of deferred free thread
  * @debug_show:                called when heap debug file is read to add any
  *                     heap specific debug info to output
  *
@@ -131,8 +144,13 @@ struct ion_heap {
        struct ion_device *dev;
        enum ion_heap_type type;
        struct ion_heap_ops *ops;
+       unsigned long flags;
        unsigned int id;
        const char *name;
+       struct list_head free_list;
+       struct rt_mutex lock;
+       wait_queue_head_t waitqueue;
+       struct task_struct *task;
        int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
 };
 
index 3ca704e3ee141281fede352330c3fb3e8a24a891..6665797f5370e1ed7a71848add7a682770314c30 100644 (file)
@@ -283,6 +283,7 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
                return ERR_PTR(-ENOMEM);
        heap->heap.ops = &system_heap_ops;
        heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
        heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
                              GFP_KERNEL);
        if (!heap->pools)