3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39 #include <linux/sched/task.h>
43 #include "compat_ion.h"
45 bool ion_buffer_fault_user_mappings(struct ion_buffer
*buffer
)
47 return (buffer
->flags
& ION_FLAG_CACHED
) &&
48 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
);
51 bool ion_buffer_cached(struct ion_buffer
*buffer
)
53 return !!(buffer
->flags
& ION_FLAG_CACHED
);
56 static inline struct page
*ion_buffer_page(struct page
*page
)
58 return (struct page
*)((unsigned long)page
& ~(1UL));
61 static inline bool ion_buffer_page_is_dirty(struct page
*page
)
63 return !!((unsigned long)page
& 1UL);
66 static inline void ion_buffer_page_dirty(struct page
**page
)
68 *page
= (struct page
*)((unsigned long)(*page
) | 1UL);
71 static inline void ion_buffer_page_clean(struct page
**page
)
73 *page
= (struct page
*)((unsigned long)(*page
) & ~(1UL));
76 /* this function should only be called while dev->lock is held */
77 static void ion_buffer_add(struct ion_device
*dev
,
78 struct ion_buffer
*buffer
)
80 struct rb_node
**p
= &dev
->buffers
.rb_node
;
81 struct rb_node
*parent
= NULL
;
82 struct ion_buffer
*entry
;
86 entry
= rb_entry(parent
, struct ion_buffer
, node
);
90 } else if (buffer
> entry
) {
93 pr_err("%s: buffer already found.", __func__
);
98 rb_link_node(&buffer
->node
, parent
, p
);
99 rb_insert_color(&buffer
->node
, &dev
->buffers
);
102 /* this function should only be called while dev->lock is held */
103 static struct ion_buffer
*ion_buffer_create(struct ion_heap
*heap
,
104 struct ion_device
*dev
,
109 struct ion_buffer
*buffer
;
110 struct sg_table
*table
;
111 struct scatterlist
*sg
;
114 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
116 return ERR_PTR(-ENOMEM
);
119 buffer
->flags
= flags
;
120 kref_init(&buffer
->ref
);
122 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
, flags
);
125 if (!(heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
))
128 ion_heap_freelist_drain(heap
, 0);
129 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
,
135 if (buffer
->sg_table
== NULL
) {
136 WARN_ONCE(1, "This heap needs to set the sgtable");
141 table
= buffer
->sg_table
;
145 if (ion_buffer_fault_user_mappings(buffer
)) {
146 int num_pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
147 struct scatterlist
*sg
;
150 buffer
->pages
= vmalloc(sizeof(struct page
*) * num_pages
);
151 if (!buffer
->pages
) {
156 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
157 struct page
*page
= sg_page(sg
);
159 for (j
= 0; j
< sg
->length
/ PAGE_SIZE
; j
++)
160 buffer
->pages
[k
++] = page
++;
166 INIT_LIST_HEAD(&buffer
->vmas
);
167 mutex_init(&buffer
->lock
);
169 * this will set up dma addresses for the sglist -- it is not
170 * technically correct as per the dma api -- a specific
171 * device isn't really taking ownership here. However, in practice on
172 * our systems the only dma_address space is physical addresses.
173 * Additionally, we can't afford the overhead of invalidating every
174 * allocation via dma_map_sg. The implicit contract here is that
175 * memory coming from the heaps is ready for dma, ie if it has a
176 * cached mapping that mapping has been invalidated
178 for_each_sg(buffer
->sg_table
->sgl
, sg
, buffer
->sg_table
->nents
, i
) {
179 sg_dma_address(sg
) = sg_phys(sg
);
180 sg_dma_len(sg
) = sg
->length
;
182 mutex_lock(&dev
->buffer_lock
);
183 ion_buffer_add(dev
, buffer
);
184 mutex_unlock(&dev
->buffer_lock
);
188 heap
->ops
->free(buffer
);
194 void ion_buffer_destroy(struct ion_buffer
*buffer
)
196 if (WARN_ON(buffer
->kmap_cnt
> 0))
197 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
198 buffer
->heap
->ops
->free(buffer
);
199 vfree(buffer
->pages
);
203 static void _ion_buffer_destroy(struct kref
*kref
)
205 struct ion_buffer
*buffer
= container_of(kref
, struct ion_buffer
, ref
);
206 struct ion_heap
*heap
= buffer
->heap
;
207 struct ion_device
*dev
= buffer
->dev
;
209 mutex_lock(&dev
->buffer_lock
);
210 rb_erase(&buffer
->node
, &dev
->buffers
);
211 mutex_unlock(&dev
->buffer_lock
);
213 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
214 ion_heap_freelist_add(heap
, buffer
);
216 ion_buffer_destroy(buffer
);
219 static void ion_buffer_get(struct ion_buffer
*buffer
)
221 kref_get(&buffer
->ref
);
224 static int ion_buffer_put(struct ion_buffer
*buffer
)
226 return kref_put(&buffer
->ref
, _ion_buffer_destroy
);
229 static void ion_buffer_add_to_handle(struct ion_buffer
*buffer
)
231 mutex_lock(&buffer
->lock
);
232 buffer
->handle_count
++;
233 mutex_unlock(&buffer
->lock
);
236 static void ion_buffer_remove_from_handle(struct ion_buffer
*buffer
)
239 * when a buffer is removed from a handle, if it is not in
240 * any other handles, copy the taskcomm and the pid of the
241 * process it's being removed from into the buffer. At this
242 * point there will be no way to track what processes this buffer is
243 * being used by, it only exists as a dma_buf file descriptor.
244 * The taskcomm and pid can provide a debug hint as to where this fd
247 mutex_lock(&buffer
->lock
);
248 buffer
->handle_count
--;
249 BUG_ON(buffer
->handle_count
< 0);
250 if (!buffer
->handle_count
) {
251 struct task_struct
*task
;
253 task
= current
->group_leader
;
254 get_task_comm(buffer
->task_comm
, task
);
255 buffer
->pid
= task_pid_nr(task
);
257 mutex_unlock(&buffer
->lock
);
260 static struct ion_handle
*ion_handle_create(struct ion_client
*client
,
261 struct ion_buffer
*buffer
)
263 struct ion_handle
*handle
;
265 handle
= kzalloc(sizeof(*handle
), GFP_KERNEL
);
267 return ERR_PTR(-ENOMEM
);
268 kref_init(&handle
->ref
);
269 RB_CLEAR_NODE(&handle
->node
);
270 handle
->client
= client
;
271 ion_buffer_get(buffer
);
272 ion_buffer_add_to_handle(buffer
);
273 handle
->buffer
= buffer
;
278 static void ion_handle_kmap_put(struct ion_handle
*);
280 static void ion_handle_destroy(struct kref
*kref
)
282 struct ion_handle
*handle
= container_of(kref
, struct ion_handle
, ref
);
283 struct ion_client
*client
= handle
->client
;
284 struct ion_buffer
*buffer
= handle
->buffer
;
286 mutex_lock(&buffer
->lock
);
287 while (handle
->kmap_cnt
)
288 ion_handle_kmap_put(handle
);
289 mutex_unlock(&buffer
->lock
);
291 idr_remove(&client
->idr
, handle
->id
);
292 if (!RB_EMPTY_NODE(&handle
->node
))
293 rb_erase(&handle
->node
, &client
->handles
);
295 ion_buffer_remove_from_handle(buffer
);
296 ion_buffer_put(buffer
);
301 static void ion_handle_get(struct ion_handle
*handle
)
303 kref_get(&handle
->ref
);
306 int ion_handle_put_nolock(struct ion_handle
*handle
)
308 return kref_put(&handle
->ref
, ion_handle_destroy
);
311 int ion_handle_put(struct ion_handle
*handle
)
313 struct ion_client
*client
= handle
->client
;
316 mutex_lock(&client
->lock
);
317 ret
= ion_handle_put_nolock(handle
);
318 mutex_unlock(&client
->lock
);
323 static struct ion_handle
*ion_handle_lookup(struct ion_client
*client
,
324 struct ion_buffer
*buffer
)
326 struct rb_node
*n
= client
->handles
.rb_node
;
329 struct ion_handle
*entry
= rb_entry(n
, struct ion_handle
, node
);
331 if (buffer
< entry
->buffer
)
333 else if (buffer
> entry
->buffer
)
338 return ERR_PTR(-EINVAL
);
341 struct ion_handle
*ion_handle_get_by_id_nolock(struct ion_client
*client
,
344 struct ion_handle
*handle
;
346 handle
= idr_find(&client
->idr
, id
);
348 ion_handle_get(handle
);
350 return handle
? handle
: ERR_PTR(-EINVAL
);
353 struct ion_handle
*ion_handle_get_by_id(struct ion_client
*client
,
356 struct ion_handle
*handle
;
358 mutex_lock(&client
->lock
);
359 handle
= ion_handle_get_by_id_nolock(client
, id
);
360 mutex_unlock(&client
->lock
);
365 static bool ion_handle_validate(struct ion_client
*client
,
366 struct ion_handle
*handle
)
368 WARN_ON(!mutex_is_locked(&client
->lock
));
369 return idr_find(&client
->idr
, handle
->id
) == handle
;
372 static int ion_handle_add(struct ion_client
*client
, struct ion_handle
*handle
)
375 struct rb_node
**p
= &client
->handles
.rb_node
;
376 struct rb_node
*parent
= NULL
;
377 struct ion_handle
*entry
;
379 id
= idr_alloc(&client
->idr
, handle
, 1, 0, GFP_KERNEL
);
387 entry
= rb_entry(parent
, struct ion_handle
, node
);
389 if (handle
->buffer
< entry
->buffer
)
391 else if (handle
->buffer
> entry
->buffer
)
394 WARN(1, "%s: buffer already found.", __func__
);
397 rb_link_node(&handle
->node
, parent
, p
);
398 rb_insert_color(&handle
->node
, &client
->handles
);
403 struct ion_handle
*ion_alloc(struct ion_client
*client
, size_t len
,
404 size_t align
, unsigned int heap_id_mask
,
407 struct ion_handle
*handle
;
408 struct ion_device
*dev
= client
->dev
;
409 struct ion_buffer
*buffer
= NULL
;
410 struct ion_heap
*heap
;
413 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__
,
414 len
, align
, heap_id_mask
, flags
);
416 * traverse the list of heaps available in this system in priority
417 * order. If the heap type is supported by the client, and matches the
418 * request of the caller allocate from it. Repeat until allocate has
419 * succeeded or all heaps have been tried
421 len
= PAGE_ALIGN(len
);
424 return ERR_PTR(-EINVAL
);
426 down_read(&dev
->lock
);
427 plist_for_each_entry(heap
, &dev
->heaps
, node
) {
428 /* if the caller didn't specify this heap id */
429 if (!((1 << heap
->id
) & heap_id_mask
))
431 buffer
= ion_buffer_create(heap
, dev
, len
, align
, flags
);
438 return ERR_PTR(-ENODEV
);
441 return ERR_CAST(buffer
);
443 handle
= ion_handle_create(client
, buffer
);
446 * ion_buffer_create will create a buffer with a ref_cnt of 1,
447 * and ion_handle_create will take a second reference, drop one here
449 ion_buffer_put(buffer
);
454 mutex_lock(&client
->lock
);
455 ret
= ion_handle_add(client
, handle
);
456 mutex_unlock(&client
->lock
);
458 ion_handle_put(handle
);
459 handle
= ERR_PTR(ret
);
464 EXPORT_SYMBOL(ion_alloc
);
466 void ion_free_nolock(struct ion_client
*client
,
467 struct ion_handle
*handle
)
469 if (!ion_handle_validate(client
, handle
)) {
470 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
473 ion_handle_put_nolock(handle
);
476 void ion_free(struct ion_client
*client
, struct ion_handle
*handle
)
478 BUG_ON(client
!= handle
->client
);
480 mutex_lock(&client
->lock
);
481 ion_free_nolock(client
, handle
);
482 mutex_unlock(&client
->lock
);
484 EXPORT_SYMBOL(ion_free
);
486 static void *ion_buffer_kmap_get(struct ion_buffer
*buffer
)
490 if (buffer
->kmap_cnt
) {
492 return buffer
->vaddr
;
494 vaddr
= buffer
->heap
->ops
->map_kernel(buffer
->heap
, buffer
);
495 if (WARN_ONCE(vaddr
== NULL
,
496 "heap->ops->map_kernel should return ERR_PTR on error"))
497 return ERR_PTR(-EINVAL
);
500 buffer
->vaddr
= vaddr
;
505 static void *ion_handle_kmap_get(struct ion_handle
*handle
)
507 struct ion_buffer
*buffer
= handle
->buffer
;
510 if (handle
->kmap_cnt
) {
512 return buffer
->vaddr
;
514 vaddr
= ion_buffer_kmap_get(buffer
);
521 static void ion_buffer_kmap_put(struct ion_buffer
*buffer
)
524 if (!buffer
->kmap_cnt
) {
525 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
526 buffer
->vaddr
= NULL
;
530 static void ion_handle_kmap_put(struct ion_handle
*handle
)
532 struct ion_buffer
*buffer
= handle
->buffer
;
534 if (!handle
->kmap_cnt
) {
535 WARN(1, "%s: Double unmap detected! bailing...\n", __func__
);
539 if (!handle
->kmap_cnt
)
540 ion_buffer_kmap_put(buffer
);
543 void *ion_map_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
545 struct ion_buffer
*buffer
;
548 mutex_lock(&client
->lock
);
549 if (!ion_handle_validate(client
, handle
)) {
550 pr_err("%s: invalid handle passed to map_kernel.\n",
552 mutex_unlock(&client
->lock
);
553 return ERR_PTR(-EINVAL
);
556 buffer
= handle
->buffer
;
558 if (!handle
->buffer
->heap
->ops
->map_kernel
) {
559 pr_err("%s: map_kernel is not implemented by this heap.\n",
561 mutex_unlock(&client
->lock
);
562 return ERR_PTR(-ENODEV
);
565 mutex_lock(&buffer
->lock
);
566 vaddr
= ion_handle_kmap_get(handle
);
567 mutex_unlock(&buffer
->lock
);
568 mutex_unlock(&client
->lock
);
571 EXPORT_SYMBOL(ion_map_kernel
);
573 void ion_unmap_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
575 struct ion_buffer
*buffer
;
577 mutex_lock(&client
->lock
);
578 buffer
= handle
->buffer
;
579 mutex_lock(&buffer
->lock
);
580 ion_handle_kmap_put(handle
);
581 mutex_unlock(&buffer
->lock
);
582 mutex_unlock(&client
->lock
);
584 EXPORT_SYMBOL(ion_unmap_kernel
);
586 static struct mutex debugfs_mutex
;
587 static struct rb_root
*ion_root_client
;
588 static int is_client_alive(struct ion_client
*client
)
590 struct rb_node
*node
;
591 struct ion_client
*tmp
;
592 struct ion_device
*dev
;
594 node
= ion_root_client
->rb_node
;
595 dev
= container_of(ion_root_client
, struct ion_device
, clients
);
597 down_read(&dev
->lock
);
599 tmp
= rb_entry(node
, struct ion_client
, node
);
601 node
= node
->rb_left
;
602 } else if (client
> tmp
) {
603 node
= node
->rb_right
;
614 static int ion_debug_client_show(struct seq_file
*s
, void *unused
)
616 struct ion_client
*client
= s
->private;
618 size_t sizes
[ION_NUM_HEAP_IDS
] = {0};
619 const char *names
[ION_NUM_HEAP_IDS
] = {NULL
};
622 mutex_lock(&debugfs_mutex
);
623 if (!is_client_alive(client
)) {
624 seq_printf(s
, "ion_client 0x%p dead, can't dump its buffers\n",
626 mutex_unlock(&debugfs_mutex
);
630 mutex_lock(&client
->lock
);
631 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
632 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
634 unsigned int id
= handle
->buffer
->heap
->id
;
637 names
[id
] = handle
->buffer
->heap
->name
;
638 sizes
[id
] += handle
->buffer
->size
;
640 mutex_unlock(&client
->lock
);
641 mutex_unlock(&debugfs_mutex
);
643 seq_printf(s
, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
644 for (i
= 0; i
< ION_NUM_HEAP_IDS
; i
++) {
647 seq_printf(s
, "%16.16s: %16zu\n", names
[i
], sizes
[i
]);
652 static int ion_debug_client_open(struct inode
*inode
, struct file
*file
)
654 return single_open(file
, ion_debug_client_show
, inode
->i_private
);
657 static const struct file_operations debug_client_fops
= {
658 .open
= ion_debug_client_open
,
661 .release
= single_release
,
664 static int ion_get_client_serial(const struct rb_root
*root
,
665 const unsigned char *name
)
668 struct rb_node
*node
;
670 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
671 struct ion_client
*client
= rb_entry(node
, struct ion_client
,
674 if (strcmp(client
->name
, name
))
676 serial
= max(serial
, client
->display_serial
);
681 struct ion_client
*ion_client_create(struct ion_device
*dev
,
684 struct ion_client
*client
;
685 struct task_struct
*task
;
687 struct rb_node
*parent
= NULL
;
688 struct ion_client
*entry
;
692 pr_err("%s: Name cannot be null\n", __func__
);
693 return ERR_PTR(-EINVAL
);
696 get_task_struct(current
->group_leader
);
697 task_lock(current
->group_leader
);
698 pid
= task_pid_nr(current
->group_leader
);
700 * don't bother to store task struct for kernel threads,
701 * they can't be killed anyway
703 if (current
->group_leader
->flags
& PF_KTHREAD
) {
704 put_task_struct(current
->group_leader
);
707 task
= current
->group_leader
;
709 task_unlock(current
->group_leader
);
711 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
713 goto err_put_task_struct
;
716 client
->handles
= RB_ROOT
;
717 idr_init(&client
->idr
);
718 mutex_init(&client
->lock
);
721 client
->name
= kstrdup(name
, GFP_KERNEL
);
723 goto err_free_client
;
725 down_write(&dev
->lock
);
726 client
->display_serial
= ion_get_client_serial(&dev
->clients
, name
);
727 client
->display_name
= kasprintf(
728 GFP_KERNEL
, "%s-%d", name
, client
->display_serial
);
729 if (!client
->display_name
) {
730 up_write(&dev
->lock
);
731 goto err_free_client_name
;
733 p
= &dev
->clients
.rb_node
;
736 entry
= rb_entry(parent
, struct ion_client
, node
);
740 else if (client
> entry
)
743 rb_link_node(&client
->node
, parent
, p
);
744 rb_insert_color(&client
->node
, &dev
->clients
);
746 client
->debug_root
= debugfs_create_file(client
->display_name
, 0664,
747 dev
->clients_debug_root
,
748 client
, &debug_client_fops
);
749 if (!client
->debug_root
) {
750 char buf
[256], *path
;
752 path
= dentry_path(dev
->clients_debug_root
, buf
, 256);
753 pr_err("Failed to create client debugfs at %s/%s\n",
754 path
, client
->display_name
);
757 up_write(&dev
->lock
);
761 err_free_client_name
:
767 put_task_struct(current
->group_leader
);
768 return ERR_PTR(-ENOMEM
);
770 EXPORT_SYMBOL(ion_client_create
);
772 void ion_client_destroy(struct ion_client
*client
)
774 struct ion_device
*dev
= client
->dev
;
777 pr_debug("%s: %d\n", __func__
, __LINE__
);
778 mutex_lock(&debugfs_mutex
);
779 while ((n
= rb_first(&client
->handles
))) {
780 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
782 ion_handle_destroy(&handle
->ref
);
785 idr_destroy(&client
->idr
);
787 down_write(&dev
->lock
);
789 put_task_struct(client
->task
);
790 rb_erase(&client
->node
, &dev
->clients
);
791 debugfs_remove_recursive(client
->debug_root
);
792 up_write(&dev
->lock
);
794 kfree(client
->display_name
);
797 mutex_unlock(&debugfs_mutex
);
799 EXPORT_SYMBOL(ion_client_destroy
);
801 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
803 enum dma_data_direction direction
);
805 static struct sg_table
*ion_map_dma_buf(struct dma_buf_attachment
*attachment
,
806 enum dma_data_direction direction
)
808 struct dma_buf
*dmabuf
= attachment
->dmabuf
;
809 struct ion_buffer
*buffer
= dmabuf
->priv
;
811 ion_buffer_sync_for_device(buffer
, attachment
->dev
, direction
);
812 return buffer
->sg_table
;
815 static void ion_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
816 struct sg_table
*table
,
817 enum dma_data_direction direction
)
821 void ion_pages_sync_for_device(struct device
*dev
, struct page
*page
,
822 size_t size
, enum dma_data_direction dir
)
824 struct scatterlist sg
;
826 sg_init_table(&sg
, 1);
827 sg_set_page(&sg
, page
, size
, 0);
829 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
830 * for the targeted device, but this works on the currently targeted
833 sg_dma_address(&sg
) = page_to_phys(page
);
834 dma_sync_sg_for_device(dev
, &sg
, 1, dir
);
837 struct ion_vma_list
{
838 struct list_head list
;
839 struct vm_area_struct
*vma
;
842 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
844 enum dma_data_direction dir
)
846 struct ion_vma_list
*vma_list
;
847 int pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
850 pr_debug("%s: syncing for device %s\n", __func__
,
851 dev
? dev_name(dev
) : "null");
853 if (!ion_buffer_fault_user_mappings(buffer
))
856 mutex_lock(&buffer
->lock
);
857 for (i
= 0; i
< pages
; i
++) {
858 struct page
*page
= buffer
->pages
[i
];
860 if (ion_buffer_page_is_dirty(page
))
861 ion_pages_sync_for_device(dev
, ion_buffer_page(page
),
864 ion_buffer_page_clean(buffer
->pages
+ i
);
866 list_for_each_entry(vma_list
, &buffer
->vmas
, list
) {
867 struct vm_area_struct
*vma
= vma_list
->vma
;
869 zap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
871 mutex_unlock(&buffer
->lock
);
874 static int ion_vm_fault(struct vm_fault
*vmf
)
876 struct ion_buffer
*buffer
= vmf
->vma
->vm_private_data
;
880 mutex_lock(&buffer
->lock
);
881 ion_buffer_page_dirty(buffer
->pages
+ vmf
->pgoff
);
882 BUG_ON(!buffer
->pages
|| !buffer
->pages
[vmf
->pgoff
]);
884 pfn
= page_to_pfn(ion_buffer_page(buffer
->pages
[vmf
->pgoff
]));
885 ret
= vm_insert_pfn(vmf
->vma
, vmf
->address
, pfn
);
886 mutex_unlock(&buffer
->lock
);
888 return VM_FAULT_ERROR
;
890 return VM_FAULT_NOPAGE
;
893 static void ion_vm_open(struct vm_area_struct
*vma
)
895 struct ion_buffer
*buffer
= vma
->vm_private_data
;
896 struct ion_vma_list
*vma_list
;
898 vma_list
= kmalloc(sizeof(*vma_list
), GFP_KERNEL
);
902 mutex_lock(&buffer
->lock
);
903 list_add(&vma_list
->list
, &buffer
->vmas
);
904 mutex_unlock(&buffer
->lock
);
905 pr_debug("%s: adding %p\n", __func__
, vma
);
908 static void ion_vm_close(struct vm_area_struct
*vma
)
910 struct ion_buffer
*buffer
= vma
->vm_private_data
;
911 struct ion_vma_list
*vma_list
, *tmp
;
913 pr_debug("%s\n", __func__
);
914 mutex_lock(&buffer
->lock
);
915 list_for_each_entry_safe(vma_list
, tmp
, &buffer
->vmas
, list
) {
916 if (vma_list
->vma
!= vma
)
918 list_del(&vma_list
->list
);
920 pr_debug("%s: deleting %p\n", __func__
, vma
);
923 mutex_unlock(&buffer
->lock
);
926 static const struct vm_operations_struct ion_vma_ops
= {
928 .close
= ion_vm_close
,
929 .fault
= ion_vm_fault
,
932 static int ion_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
934 struct ion_buffer
*buffer
= dmabuf
->priv
;
937 if (!buffer
->heap
->ops
->map_user
) {
938 pr_err("%s: this heap does not define a method for mapping to userspace\n",
943 if (ion_buffer_fault_user_mappings(buffer
)) {
944 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
|
946 vma
->vm_private_data
= buffer
;
947 vma
->vm_ops
= &ion_vma_ops
;
952 if (!(buffer
->flags
& ION_FLAG_CACHED
))
953 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
955 mutex_lock(&buffer
->lock
);
956 /* now map it to userspace */
957 ret
= buffer
->heap
->ops
->map_user(buffer
->heap
, buffer
, vma
);
958 mutex_unlock(&buffer
->lock
);
961 pr_err("%s: failure mapping buffer to userspace\n",
967 static void ion_dma_buf_release(struct dma_buf
*dmabuf
)
969 struct ion_buffer
*buffer
= dmabuf
->priv
;
971 ion_buffer_put(buffer
);
974 static void *ion_dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long offset
)
976 struct ion_buffer
*buffer
= dmabuf
->priv
;
978 return buffer
->vaddr
+ offset
* PAGE_SIZE
;
981 static void ion_dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long offset
,
986 static int ion_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
,
987 enum dma_data_direction direction
)
989 struct ion_buffer
*buffer
= dmabuf
->priv
;
992 if (!buffer
->heap
->ops
->map_kernel
) {
993 pr_err("%s: map kernel is not implemented by this heap.\n",
998 mutex_lock(&buffer
->lock
);
999 vaddr
= ion_buffer_kmap_get(buffer
);
1000 mutex_unlock(&buffer
->lock
);
1001 return PTR_ERR_OR_ZERO(vaddr
);
1004 static int ion_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
,
1005 enum dma_data_direction direction
)
1007 struct ion_buffer
*buffer
= dmabuf
->priv
;
1009 mutex_lock(&buffer
->lock
);
1010 ion_buffer_kmap_put(buffer
);
1011 mutex_unlock(&buffer
->lock
);
1016 static const struct dma_buf_ops dma_buf_ops
= {
1017 .map_dma_buf
= ion_map_dma_buf
,
1018 .unmap_dma_buf
= ion_unmap_dma_buf
,
1020 .release
= ion_dma_buf_release
,
1021 .begin_cpu_access
= ion_dma_buf_begin_cpu_access
,
1022 .end_cpu_access
= ion_dma_buf_end_cpu_access
,
1023 .kmap_atomic
= ion_dma_buf_kmap
,
1024 .kunmap_atomic
= ion_dma_buf_kunmap
,
1025 .kmap
= ion_dma_buf_kmap
,
1026 .kunmap
= ion_dma_buf_kunmap
,
1029 struct dma_buf
*ion_share_dma_buf(struct ion_client
*client
,
1030 struct ion_handle
*handle
)
1032 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1033 struct ion_buffer
*buffer
;
1034 struct dma_buf
*dmabuf
;
1037 mutex_lock(&client
->lock
);
1038 valid_handle
= ion_handle_validate(client
, handle
);
1039 if (!valid_handle
) {
1040 WARN(1, "%s: invalid handle passed to share.\n", __func__
);
1041 mutex_unlock(&client
->lock
);
1042 return ERR_PTR(-EINVAL
);
1044 buffer
= handle
->buffer
;
1045 ion_buffer_get(buffer
);
1046 mutex_unlock(&client
->lock
);
1048 exp_info
.ops
= &dma_buf_ops
;
1049 exp_info
.size
= buffer
->size
;
1050 exp_info
.flags
= O_RDWR
;
1051 exp_info
.priv
= buffer
;
1053 dmabuf
= dma_buf_export(&exp_info
);
1054 if (IS_ERR(dmabuf
)) {
1055 ion_buffer_put(buffer
);
1061 EXPORT_SYMBOL(ion_share_dma_buf
);
1063 int ion_share_dma_buf_fd(struct ion_client
*client
, struct ion_handle
*handle
)
1065 struct dma_buf
*dmabuf
;
1068 dmabuf
= ion_share_dma_buf(client
, handle
);
1070 return PTR_ERR(dmabuf
);
1072 fd
= dma_buf_fd(dmabuf
, O_CLOEXEC
);
1074 dma_buf_put(dmabuf
);
1078 EXPORT_SYMBOL(ion_share_dma_buf_fd
);
1080 struct ion_handle
*ion_import_dma_buf(struct ion_client
*client
,
1081 struct dma_buf
*dmabuf
)
1083 struct ion_buffer
*buffer
;
1084 struct ion_handle
*handle
;
1087 /* if this memory came from ion */
1089 if (dmabuf
->ops
!= &dma_buf_ops
) {
1090 pr_err("%s: can not import dmabuf from another exporter\n",
1092 return ERR_PTR(-EINVAL
);
1094 buffer
= dmabuf
->priv
;
1096 mutex_lock(&client
->lock
);
1097 /* if a handle exists for this buffer just take a reference to it */
1098 handle
= ion_handle_lookup(client
, buffer
);
1099 if (!IS_ERR(handle
)) {
1100 ion_handle_get(handle
);
1101 mutex_unlock(&client
->lock
);
1105 handle
= ion_handle_create(client
, buffer
);
1106 if (IS_ERR(handle
)) {
1107 mutex_unlock(&client
->lock
);
1111 ret
= ion_handle_add(client
, handle
);
1112 mutex_unlock(&client
->lock
);
1114 ion_handle_put(handle
);
1115 handle
= ERR_PTR(ret
);
1121 EXPORT_SYMBOL(ion_import_dma_buf
);
1123 struct ion_handle
*ion_import_dma_buf_fd(struct ion_client
*client
, int fd
)
1125 struct dma_buf
*dmabuf
;
1126 struct ion_handle
*handle
;
1128 dmabuf
= dma_buf_get(fd
);
1130 return ERR_CAST(dmabuf
);
1132 handle
= ion_import_dma_buf(client
, dmabuf
);
1133 dma_buf_put(dmabuf
);
1136 EXPORT_SYMBOL(ion_import_dma_buf_fd
);
1138 int ion_sync_for_device(struct ion_client
*client
, int fd
)
1140 struct dma_buf
*dmabuf
;
1141 struct ion_buffer
*buffer
;
1143 dmabuf
= dma_buf_get(fd
);
1145 return PTR_ERR(dmabuf
);
1147 /* if this memory came from ion */
1148 if (dmabuf
->ops
!= &dma_buf_ops
) {
1149 pr_err("%s: can not sync dmabuf from another exporter\n",
1151 dma_buf_put(dmabuf
);
1154 buffer
= dmabuf
->priv
;
1156 dma_sync_sg_for_device(NULL
, buffer
->sg_table
->sgl
,
1157 buffer
->sg_table
->nents
, DMA_BIDIRECTIONAL
);
1158 dma_buf_put(dmabuf
);
1162 int ion_query_heaps(struct ion_client
*client
, struct ion_heap_query
*query
)
1164 struct ion_device
*dev
= client
->dev
;
1165 struct ion_heap_data __user
*buffer
= u64_to_user_ptr(query
->heaps
);
1166 int ret
= -EINVAL
, cnt
= 0, max_cnt
;
1167 struct ion_heap
*heap
;
1168 struct ion_heap_data hdata
;
1170 memset(&hdata
, 0, sizeof(hdata
));
1172 down_read(&dev
->lock
);
1174 query
->cnt
= dev
->heap_cnt
;
1179 if (query
->cnt
<= 0)
1182 max_cnt
= query
->cnt
;
1184 plist_for_each_entry(heap
, &dev
->heaps
, node
) {
1185 strncpy(hdata
.name
, heap
->name
, MAX_HEAP_NAME
);
1186 hdata
.name
[sizeof(hdata
.name
) - 1] = '\0';
1187 hdata
.type
= heap
->type
;
1188 hdata
.heap_id
= heap
->id
;
1190 if (copy_to_user(&buffer
[cnt
], &hdata
, sizeof(hdata
))) {
1202 up_read(&dev
->lock
);
1206 static int ion_release(struct inode
*inode
, struct file
*file
)
1208 struct ion_client
*client
= file
->private_data
;
1210 pr_debug("%s: %d\n", __func__
, __LINE__
);
1211 ion_client_destroy(client
);
1215 static int ion_open(struct inode
*inode
, struct file
*file
)
1217 struct miscdevice
*miscdev
= file
->private_data
;
1218 struct ion_device
*dev
= container_of(miscdev
, struct ion_device
, dev
);
1219 struct ion_client
*client
;
1220 char debug_name
[64];
1222 pr_debug("%s: %d\n", __func__
, __LINE__
);
1223 snprintf(debug_name
, 64, "%u", task_pid_nr(current
->group_leader
));
1224 client
= ion_client_create(dev
, debug_name
);
1226 return PTR_ERR(client
);
1227 file
->private_data
= client
;
1232 static const struct file_operations ion_fops
= {
1233 .owner
= THIS_MODULE
,
1235 .release
= ion_release
,
1236 .unlocked_ioctl
= ion_ioctl
,
1237 .compat_ioctl
= compat_ion_ioctl
,
1240 static size_t ion_debug_heap_total(struct ion_client
*client
,
1246 mutex_lock(&client
->lock
);
1247 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
1248 struct ion_handle
*handle
= rb_entry(n
,
1251 if (handle
->buffer
->heap
->id
== id
)
1252 size
+= handle
->buffer
->size
;
1254 mutex_unlock(&client
->lock
);
1258 static int ion_debug_heap_show(struct seq_file
*s
, void *unused
)
1260 struct ion_heap
*heap
= s
->private;
1261 struct ion_device
*dev
= heap
->dev
;
1263 size_t total_size
= 0;
1264 size_t total_orphaned_size
= 0;
1266 seq_printf(s
, "%16s %16s %16s\n", "client", "pid", "size");
1267 seq_puts(s
, "----------------------------------------------------\n");
1269 mutex_lock(&debugfs_mutex
);
1270 for (n
= rb_first(&dev
->clients
); n
; n
= rb_next(n
)) {
1271 struct ion_client
*client
= rb_entry(n
, struct ion_client
,
1273 size_t size
= ion_debug_heap_total(client
, heap
->id
);
1278 char task_comm
[TASK_COMM_LEN
];
1280 get_task_comm(task_comm
, client
->task
);
1281 seq_printf(s
, "%16s %16u %16zu\n", task_comm
,
1284 seq_printf(s
, "%16s %16u %16zu\n", client
->name
,
1288 mutex_unlock(&debugfs_mutex
);
1290 seq_puts(s
, "----------------------------------------------------\n");
1291 seq_puts(s
, "orphaned allocations (info is from last known client):\n");
1292 mutex_lock(&dev
->buffer_lock
);
1293 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
1294 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
1296 if (buffer
->heap
->id
!= heap
->id
)
1298 total_size
+= buffer
->size
;
1299 if (!buffer
->handle_count
) {
1300 seq_printf(s
, "%16s %16u %16zu %d %d\n",
1301 buffer
->task_comm
, buffer
->pid
,
1302 buffer
->size
, buffer
->kmap_cnt
,
1303 kref_read(&buffer
->ref
));
1304 total_orphaned_size
+= buffer
->size
;
1307 mutex_unlock(&dev
->buffer_lock
);
1308 seq_puts(s
, "----------------------------------------------------\n");
1309 seq_printf(s
, "%16s %16zu\n", "total orphaned",
1310 total_orphaned_size
);
1311 seq_printf(s
, "%16s %16zu\n", "total ", total_size
);
1312 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1313 seq_printf(s
, "%16s %16zu\n", "deferred free",
1314 heap
->free_list_size
);
1315 seq_puts(s
, "----------------------------------------------------\n");
1317 if (heap
->debug_show
)
1318 heap
->debug_show(heap
, s
, unused
);
1323 static int ion_debug_heap_open(struct inode
*inode
, struct file
*file
)
1325 return single_open(file
, ion_debug_heap_show
, inode
->i_private
);
1328 static const struct file_operations debug_heap_fops
= {
1329 .open
= ion_debug_heap_open
,
1331 .llseek
= seq_lseek
,
1332 .release
= single_release
,
1335 static int debug_shrink_set(void *data
, u64 val
)
1337 struct ion_heap
*heap
= data
;
1338 struct shrink_control sc
;
1341 sc
.gfp_mask
= GFP_HIGHUSER
;
1342 sc
.nr_to_scan
= val
;
1345 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
1346 sc
.nr_to_scan
= objs
;
1349 heap
->shrinker
.scan_objects(&heap
->shrinker
, &sc
);
1353 static int debug_shrink_get(void *data
, u64
*val
)
1355 struct ion_heap
*heap
= data
;
1356 struct shrink_control sc
;
1359 sc
.gfp_mask
= GFP_HIGHUSER
;
1362 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
1367 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops
, debug_shrink_get
,
1368 debug_shrink_set
, "%llu\n");
1370 void ion_device_add_heap(struct ion_device
*dev
, struct ion_heap
*heap
)
1372 struct dentry
*debug_file
;
1374 if (!heap
->ops
->allocate
|| !heap
->ops
->free
)
1375 pr_err("%s: can not add heap with invalid ops struct.\n",
1378 spin_lock_init(&heap
->free_lock
);
1379 heap
->free_list_size
= 0;
1381 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1382 ion_heap_init_deferred_free(heap
);
1384 if ((heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
) || heap
->ops
->shrink
)
1385 ion_heap_init_shrinker(heap
);
1388 down_write(&dev
->lock
);
1390 * use negative heap->id to reverse the priority -- when traversing
1391 * the list later attempt higher id numbers first
1393 plist_node_init(&heap
->node
, -heap
->id
);
1394 plist_add(&heap
->node
, &dev
->heaps
);
1395 debug_file
= debugfs_create_file(heap
->name
, 0664,
1396 dev
->heaps_debug_root
, heap
,
1400 char buf
[256], *path
;
1402 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
1403 pr_err("Failed to create heap debugfs at %s/%s\n",
1407 if (heap
->shrinker
.count_objects
&& heap
->shrinker
.scan_objects
) {
1408 char debug_name
[64];
1410 snprintf(debug_name
, 64, "%s_shrink", heap
->name
);
1411 debug_file
= debugfs_create_file(
1412 debug_name
, 0644, dev
->heaps_debug_root
, heap
,
1413 &debug_shrink_fops
);
1415 char buf
[256], *path
;
1417 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
1418 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1424 up_write(&dev
->lock
);
1426 EXPORT_SYMBOL(ion_device_add_heap
);
1428 struct ion_device
*ion_device_create(long (*custom_ioctl
)
1429 (struct ion_client
*client
,
1433 struct ion_device
*idev
;
1436 idev
= kzalloc(sizeof(*idev
), GFP_KERNEL
);
1438 return ERR_PTR(-ENOMEM
);
1440 idev
->dev
.minor
= MISC_DYNAMIC_MINOR
;
1441 idev
->dev
.name
= "ion";
1442 idev
->dev
.fops
= &ion_fops
;
1443 idev
->dev
.parent
= NULL
;
1444 ret
= misc_register(&idev
->dev
);
1446 pr_err("ion: failed to register misc device.\n");
1448 return ERR_PTR(ret
);
1451 idev
->debug_root
= debugfs_create_dir("ion", NULL
);
1452 if (!idev
->debug_root
) {
1453 pr_err("ion: failed to create debugfs root directory.\n");
1456 idev
->heaps_debug_root
= debugfs_create_dir("heaps", idev
->debug_root
);
1457 if (!idev
->heaps_debug_root
) {
1458 pr_err("ion: failed to create debugfs heaps directory.\n");
1461 idev
->clients_debug_root
= debugfs_create_dir("clients",
1463 if (!idev
->clients_debug_root
)
1464 pr_err("ion: failed to create debugfs clients directory.\n");
1468 idev
->custom_ioctl
= custom_ioctl
;
1469 idev
->buffers
= RB_ROOT
;
1470 mutex_init(&idev
->buffer_lock
);
1471 init_rwsem(&idev
->lock
);
1472 plist_head_init(&idev
->heaps
);
1473 idev
->clients
= RB_ROOT
;
1474 ion_root_client
= &idev
->clients
;
1475 mutex_init(&debugfs_mutex
);
1478 EXPORT_SYMBOL(ion_device_create
);
1480 void ion_device_destroy(struct ion_device
*dev
)
1482 misc_deregister(&dev
->dev
);
1483 debugfs_remove_recursive(dev
->debug_root
);
1484 /* XXX need to free the heaps and clients ? */
1487 EXPORT_SYMBOL(ion_device_destroy
);