3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
54 struct miscdevice dev
;
55 struct rb_root buffers
;
56 struct mutex buffer_lock
;
57 struct rw_semaphore lock
;
58 struct plist_head heaps
;
59 long (*custom_ioctl
)(struct ion_client
*client
, unsigned int cmd
,
61 struct rb_root clients
;
62 struct dentry
*debug_root
;
63 struct dentry
*heaps_debug_root
;
64 struct dentry
*clients_debug_root
;
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
85 struct ion_device
*dev
;
86 struct rb_root handles
;
92 struct task_struct
*task
;
94 struct dentry
*debug_root
;
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
111 struct ion_client
*client
;
112 struct ion_buffer
*buffer
;
114 unsigned int kmap_cnt
;
118 bool ion_buffer_fault_user_mappings(struct ion_buffer
*buffer
)
120 return (buffer
->flags
& ION_FLAG_CACHED
) &&
121 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
);
124 bool ion_buffer_cached(struct ion_buffer
*buffer
)
126 return !!(buffer
->flags
& ION_FLAG_CACHED
);
129 static inline struct page
*ion_buffer_page(struct page
*page
)
131 return (struct page
*)((unsigned long)page
& ~(1UL));
134 static inline bool ion_buffer_page_is_dirty(struct page
*page
)
136 return !!((unsigned long)page
& 1UL);
139 static inline void ion_buffer_page_dirty(struct page
**page
)
141 *page
= (struct page
*)((unsigned long)(*page
) | 1UL);
144 static inline void ion_buffer_page_clean(struct page
**page
)
146 *page
= (struct page
*)((unsigned long)(*page
) & ~(1UL));
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device
*dev
,
151 struct ion_buffer
*buffer
)
153 struct rb_node
**p
= &dev
->buffers
.rb_node
;
154 struct rb_node
*parent
= NULL
;
155 struct ion_buffer
*entry
;
159 entry
= rb_entry(parent
, struct ion_buffer
, node
);
161 if (buffer
< entry
) {
163 } else if (buffer
> entry
) {
166 pr_err("%s: buffer already found.", __func__
);
171 rb_link_node(&buffer
->node
, parent
, p
);
172 rb_insert_color(&buffer
->node
, &dev
->buffers
);
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer
*ion_buffer_create(struct ion_heap
*heap
,
177 struct ion_device
*dev
,
182 struct ion_buffer
*buffer
;
183 struct sg_table
*table
;
184 struct scatterlist
*sg
;
187 buffer
= kzalloc(sizeof(struct ion_buffer
), GFP_KERNEL
);
189 return ERR_PTR(-ENOMEM
);
192 buffer
->flags
= flags
;
193 kref_init(&buffer
->ref
);
195 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
, flags
);
198 if (!(heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
))
201 ion_heap_freelist_drain(heap
, 0);
202 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
,
211 table
= heap
->ops
->map_dma(heap
, buffer
);
212 if (WARN_ONCE(table
== NULL
,
213 "heap->ops->map_dma should return ERR_PTR on error"))
214 table
= ERR_PTR(-EINVAL
);
220 buffer
->sg_table
= table
;
221 if (ion_buffer_fault_user_mappings(buffer
)) {
222 int num_pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
223 struct scatterlist
*sg
;
226 buffer
->pages
= vmalloc(sizeof(struct page
*) * num_pages
);
227 if (!buffer
->pages
) {
232 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
233 struct page
*page
= sg_page(sg
);
235 for (j
= 0; j
< sg
->length
/ PAGE_SIZE
; j
++)
236 buffer
->pages
[k
++] = page
++;
242 INIT_LIST_HEAD(&buffer
->vmas
);
243 mutex_init(&buffer
->lock
);
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
254 for_each_sg(buffer
->sg_table
->sgl
, sg
, buffer
->sg_table
->nents
, i
) {
255 sg_dma_address(sg
) = sg_phys(sg
);
256 sg_dma_len(sg
) = sg
->length
;
258 mutex_lock(&dev
->buffer_lock
);
259 ion_buffer_add(dev
, buffer
);
260 mutex_unlock(&dev
->buffer_lock
);
264 heap
->ops
->unmap_dma(heap
, buffer
);
266 heap
->ops
->free(buffer
);
272 void ion_buffer_destroy(struct ion_buffer
*buffer
)
274 if (WARN_ON(buffer
->kmap_cnt
> 0))
275 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
276 buffer
->heap
->ops
->unmap_dma(buffer
->heap
, buffer
);
277 buffer
->heap
->ops
->free(buffer
);
278 vfree(buffer
->pages
);
282 static void _ion_buffer_destroy(struct kref
*kref
)
284 struct ion_buffer
*buffer
= container_of(kref
, struct ion_buffer
, ref
);
285 struct ion_heap
*heap
= buffer
->heap
;
286 struct ion_device
*dev
= buffer
->dev
;
288 mutex_lock(&dev
->buffer_lock
);
289 rb_erase(&buffer
->node
, &dev
->buffers
);
290 mutex_unlock(&dev
->buffer_lock
);
292 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
293 ion_heap_freelist_add(heap
, buffer
);
295 ion_buffer_destroy(buffer
);
298 static void ion_buffer_get(struct ion_buffer
*buffer
)
300 kref_get(&buffer
->ref
);
303 static int ion_buffer_put(struct ion_buffer
*buffer
)
305 return kref_put(&buffer
->ref
, _ion_buffer_destroy
);
308 static void ion_buffer_add_to_handle(struct ion_buffer
*buffer
)
310 mutex_lock(&buffer
->lock
);
311 buffer
->handle_count
++;
312 mutex_unlock(&buffer
->lock
);
315 static void ion_buffer_remove_from_handle(struct ion_buffer
*buffer
)
318 * when a buffer is removed from a handle, if it is not in
319 * any other handles, copy the taskcomm and the pid of the
320 * process it's being removed from into the buffer. At this
321 * point there will be no way to track what processes this buffer is
322 * being used by, it only exists as a dma_buf file descriptor.
323 * The taskcomm and pid can provide a debug hint as to where this fd
326 mutex_lock(&buffer
->lock
);
327 buffer
->handle_count
--;
328 BUG_ON(buffer
->handle_count
< 0);
329 if (!buffer
->handle_count
) {
330 struct task_struct
*task
;
332 task
= current
->group_leader
;
333 get_task_comm(buffer
->task_comm
, task
);
334 buffer
->pid
= task_pid_nr(task
);
336 mutex_unlock(&buffer
->lock
);
339 static struct ion_handle
*ion_handle_create(struct ion_client
*client
,
340 struct ion_buffer
*buffer
)
342 struct ion_handle
*handle
;
344 handle
= kzalloc(sizeof(struct ion_handle
), GFP_KERNEL
);
346 return ERR_PTR(-ENOMEM
);
347 kref_init(&handle
->ref
);
348 RB_CLEAR_NODE(&handle
->node
);
349 handle
->client
= client
;
350 ion_buffer_get(buffer
);
351 ion_buffer_add_to_handle(buffer
);
352 handle
->buffer
= buffer
;
357 static void ion_handle_kmap_put(struct ion_handle
*);
359 static void ion_handle_destroy(struct kref
*kref
)
361 struct ion_handle
*handle
= container_of(kref
, struct ion_handle
, ref
);
362 struct ion_client
*client
= handle
->client
;
363 struct ion_buffer
*buffer
= handle
->buffer
;
365 mutex_lock(&buffer
->lock
);
366 while (handle
->kmap_cnt
)
367 ion_handle_kmap_put(handle
);
368 mutex_unlock(&buffer
->lock
);
370 idr_remove(&client
->idr
, handle
->id
);
371 if (!RB_EMPTY_NODE(&handle
->node
))
372 rb_erase(&handle
->node
, &client
->handles
);
374 ion_buffer_remove_from_handle(buffer
);
375 ion_buffer_put(buffer
);
380 struct ion_buffer
*ion_handle_buffer(struct ion_handle
*handle
)
382 return handle
->buffer
;
385 static void ion_handle_get(struct ion_handle
*handle
)
387 kref_get(&handle
->ref
);
390 static int ion_handle_put_nolock(struct ion_handle
*handle
)
394 ret
= kref_put(&handle
->ref
, ion_handle_destroy
);
399 int ion_handle_put(struct ion_handle
*handle
)
401 struct ion_client
*client
= handle
->client
;
404 mutex_lock(&client
->lock
);
405 ret
= ion_handle_put_nolock(handle
);
406 mutex_unlock(&client
->lock
);
411 static struct ion_handle
*ion_handle_lookup(struct ion_client
*client
,
412 struct ion_buffer
*buffer
)
414 struct rb_node
*n
= client
->handles
.rb_node
;
417 struct ion_handle
*entry
= rb_entry(n
, struct ion_handle
, node
);
419 if (buffer
< entry
->buffer
)
421 else if (buffer
> entry
->buffer
)
426 return ERR_PTR(-EINVAL
);
429 static struct ion_handle
*ion_handle_get_by_id_nolock(struct ion_client
*client
,
432 struct ion_handle
*handle
;
434 handle
= idr_find(&client
->idr
, id
);
436 ion_handle_get(handle
);
438 return handle
? handle
: ERR_PTR(-EINVAL
);
441 struct ion_handle
*ion_handle_get_by_id(struct ion_client
*client
,
444 struct ion_handle
*handle
;
446 mutex_lock(&client
->lock
);
447 handle
= ion_handle_get_by_id_nolock(client
, id
);
448 mutex_unlock(&client
->lock
);
453 static bool ion_handle_validate(struct ion_client
*client
,
454 struct ion_handle
*handle
)
456 WARN_ON(!mutex_is_locked(&client
->lock
));
457 return idr_find(&client
->idr
, handle
->id
) == handle
;
460 static int ion_handle_add(struct ion_client
*client
, struct ion_handle
*handle
)
463 struct rb_node
**p
= &client
->handles
.rb_node
;
464 struct rb_node
*parent
= NULL
;
465 struct ion_handle
*entry
;
467 id
= idr_alloc(&client
->idr
, handle
, 1, 0, GFP_KERNEL
);
475 entry
= rb_entry(parent
, struct ion_handle
, node
);
477 if (handle
->buffer
< entry
->buffer
)
479 else if (handle
->buffer
> entry
->buffer
)
482 WARN(1, "%s: buffer already found.", __func__
);
485 rb_link_node(&handle
->node
, parent
, p
);
486 rb_insert_color(&handle
->node
, &client
->handles
);
491 struct ion_handle
*ion_alloc(struct ion_client
*client
, size_t len
,
492 size_t align
, unsigned int heap_id_mask
,
495 struct ion_handle
*handle
;
496 struct ion_device
*dev
= client
->dev
;
497 struct ion_buffer
*buffer
= NULL
;
498 struct ion_heap
*heap
;
501 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__
,
502 len
, align
, heap_id_mask
, flags
);
504 * traverse the list of heaps available in this system in priority
505 * order. If the heap type is supported by the client, and matches the
506 * request of the caller allocate from it. Repeat until allocate has
507 * succeeded or all heaps have been tried
509 len
= PAGE_ALIGN(len
);
512 return ERR_PTR(-EINVAL
);
514 down_read(&dev
->lock
);
515 plist_for_each_entry(heap
, &dev
->heaps
, node
) {
516 /* if the caller didn't specify this heap id */
517 if (!((1 << heap
->id
) & heap_id_mask
))
519 buffer
= ion_buffer_create(heap
, dev
, len
, align
, flags
);
526 return ERR_PTR(-ENODEV
);
529 return ERR_CAST(buffer
);
531 handle
= ion_handle_create(client
, buffer
);
534 * ion_buffer_create will create a buffer with a ref_cnt of 1,
535 * and ion_handle_create will take a second reference, drop one here
537 ion_buffer_put(buffer
);
542 mutex_lock(&client
->lock
);
543 ret
= ion_handle_add(client
, handle
);
544 mutex_unlock(&client
->lock
);
546 ion_handle_put(handle
);
547 handle
= ERR_PTR(ret
);
552 EXPORT_SYMBOL(ion_alloc
);
554 static void ion_free_nolock(struct ion_client
*client
, struct ion_handle
*handle
)
558 BUG_ON(client
!= handle
->client
);
560 valid_handle
= ion_handle_validate(client
, handle
);
563 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
566 ion_handle_put_nolock(handle
);
569 void ion_free(struct ion_client
*client
, struct ion_handle
*handle
)
571 BUG_ON(client
!= handle
->client
);
573 mutex_lock(&client
->lock
);
574 ion_free_nolock(client
, handle
);
575 mutex_unlock(&client
->lock
);
577 EXPORT_SYMBOL(ion_free
);
579 int ion_phys(struct ion_client
*client
, struct ion_handle
*handle
,
580 ion_phys_addr_t
*addr
, size_t *len
)
582 struct ion_buffer
*buffer
;
585 mutex_lock(&client
->lock
);
586 if (!ion_handle_validate(client
, handle
)) {
587 mutex_unlock(&client
->lock
);
591 buffer
= handle
->buffer
;
593 if (!buffer
->heap
->ops
->phys
) {
594 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
595 __func__
, buffer
->heap
->name
, buffer
->heap
->type
);
596 mutex_unlock(&client
->lock
);
599 mutex_unlock(&client
->lock
);
600 ret
= buffer
->heap
->ops
->phys(buffer
->heap
, buffer
, addr
, len
);
603 EXPORT_SYMBOL(ion_phys
);
605 static void *ion_buffer_kmap_get(struct ion_buffer
*buffer
)
609 if (buffer
->kmap_cnt
) {
611 return buffer
->vaddr
;
613 vaddr
= buffer
->heap
->ops
->map_kernel(buffer
->heap
, buffer
);
614 if (WARN_ONCE(vaddr
== NULL
,
615 "heap->ops->map_kernel should return ERR_PTR on error"))
616 return ERR_PTR(-EINVAL
);
619 buffer
->vaddr
= vaddr
;
624 static void *ion_handle_kmap_get(struct ion_handle
*handle
)
626 struct ion_buffer
*buffer
= handle
->buffer
;
629 if (handle
->kmap_cnt
) {
631 return buffer
->vaddr
;
633 vaddr
= ion_buffer_kmap_get(buffer
);
640 static void ion_buffer_kmap_put(struct ion_buffer
*buffer
)
643 if (!buffer
->kmap_cnt
) {
644 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
645 buffer
->vaddr
= NULL
;
649 static void ion_handle_kmap_put(struct ion_handle
*handle
)
651 struct ion_buffer
*buffer
= handle
->buffer
;
653 if (!handle
->kmap_cnt
) {
654 WARN(1, "%s: Double unmap detected! bailing...\n", __func__
);
658 if (!handle
->kmap_cnt
)
659 ion_buffer_kmap_put(buffer
);
662 void *ion_map_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
664 struct ion_buffer
*buffer
;
667 mutex_lock(&client
->lock
);
668 if (!ion_handle_validate(client
, handle
)) {
669 pr_err("%s: invalid handle passed to map_kernel.\n",
671 mutex_unlock(&client
->lock
);
672 return ERR_PTR(-EINVAL
);
675 buffer
= handle
->buffer
;
677 if (!handle
->buffer
->heap
->ops
->map_kernel
) {
678 pr_err("%s: map_kernel is not implemented by this heap.\n",
680 mutex_unlock(&client
->lock
);
681 return ERR_PTR(-ENODEV
);
684 mutex_lock(&buffer
->lock
);
685 vaddr
= ion_handle_kmap_get(handle
);
686 mutex_unlock(&buffer
->lock
);
687 mutex_unlock(&client
->lock
);
690 EXPORT_SYMBOL(ion_map_kernel
);
692 void ion_unmap_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
694 struct ion_buffer
*buffer
;
696 mutex_lock(&client
->lock
);
697 buffer
= handle
->buffer
;
698 mutex_lock(&buffer
->lock
);
699 ion_handle_kmap_put(handle
);
700 mutex_unlock(&buffer
->lock
);
701 mutex_unlock(&client
->lock
);
703 EXPORT_SYMBOL(ion_unmap_kernel
);
705 static struct mutex debugfs_mutex
;
706 static struct rb_root
*ion_root_client
;
707 static int is_client_alive(struct ion_client
*client
)
709 struct rb_node
*node
;
710 struct ion_client
*tmp
;
711 struct ion_device
*dev
;
713 node
= ion_root_client
->rb_node
;
714 dev
= container_of(ion_root_client
, struct ion_device
, clients
);
716 down_read(&dev
->lock
);
718 tmp
= rb_entry(node
, struct ion_client
, node
);
720 node
= node
->rb_left
;
721 } else if (client
> tmp
) {
722 node
= node
->rb_right
;
733 static int ion_debug_client_show(struct seq_file
*s
, void *unused
)
735 struct ion_client
*client
= s
->private;
737 size_t sizes
[ION_NUM_HEAP_IDS
] = {0};
738 const char *names
[ION_NUM_HEAP_IDS
] = {NULL
};
741 mutex_lock(&debugfs_mutex
);
742 if (!is_client_alive(client
)) {
743 seq_printf(s
, "ion_client 0x%p dead, can't dump its buffers\n",
745 mutex_unlock(&debugfs_mutex
);
749 mutex_lock(&client
->lock
);
750 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
751 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
753 unsigned int id
= handle
->buffer
->heap
->id
;
756 names
[id
] = handle
->buffer
->heap
->name
;
757 sizes
[id
] += handle
->buffer
->size
;
759 mutex_unlock(&client
->lock
);
760 mutex_unlock(&debugfs_mutex
);
762 seq_printf(s
, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
763 for (i
= 0; i
< ION_NUM_HEAP_IDS
; i
++) {
766 seq_printf(s
, "%16.16s: %16zu\n", names
[i
], sizes
[i
]);
771 static int ion_debug_client_open(struct inode
*inode
, struct file
*file
)
773 return single_open(file
, ion_debug_client_show
, inode
->i_private
);
776 static const struct file_operations debug_client_fops
= {
777 .open
= ion_debug_client_open
,
780 .release
= single_release
,
783 static int ion_get_client_serial(const struct rb_root
*root
,
784 const unsigned char *name
)
787 struct rb_node
*node
;
789 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
790 struct ion_client
*client
= rb_entry(node
, struct ion_client
,
793 if (strcmp(client
->name
, name
))
795 serial
= max(serial
, client
->display_serial
);
800 struct ion_client
*ion_client_create(struct ion_device
*dev
,
803 struct ion_client
*client
;
804 struct task_struct
*task
;
806 struct rb_node
*parent
= NULL
;
807 struct ion_client
*entry
;
811 pr_err("%s: Name cannot be null\n", __func__
);
812 return ERR_PTR(-EINVAL
);
815 get_task_struct(current
->group_leader
);
816 task_lock(current
->group_leader
);
817 pid
= task_pid_nr(current
->group_leader
);
819 * don't bother to store task struct for kernel threads,
820 * they can't be killed anyway
822 if (current
->group_leader
->flags
& PF_KTHREAD
) {
823 put_task_struct(current
->group_leader
);
826 task
= current
->group_leader
;
828 task_unlock(current
->group_leader
);
830 client
= kzalloc(sizeof(struct ion_client
), GFP_KERNEL
);
832 goto err_put_task_struct
;
835 client
->handles
= RB_ROOT
;
836 idr_init(&client
->idr
);
837 mutex_init(&client
->lock
);
840 client
->name
= kstrdup(name
, GFP_KERNEL
);
842 goto err_free_client
;
844 down_write(&dev
->lock
);
845 client
->display_serial
= ion_get_client_serial(&dev
->clients
, name
);
846 client
->display_name
= kasprintf(
847 GFP_KERNEL
, "%s-%d", name
, client
->display_serial
);
848 if (!client
->display_name
) {
849 up_write(&dev
->lock
);
850 goto err_free_client_name
;
852 p
= &dev
->clients
.rb_node
;
855 entry
= rb_entry(parent
, struct ion_client
, node
);
859 else if (client
> entry
)
862 rb_link_node(&client
->node
, parent
, p
);
863 rb_insert_color(&client
->node
, &dev
->clients
);
865 client
->debug_root
= debugfs_create_file(client
->display_name
, 0664,
866 dev
->clients_debug_root
,
867 client
, &debug_client_fops
);
868 if (!client
->debug_root
) {
869 char buf
[256], *path
;
871 path
= dentry_path(dev
->clients_debug_root
, buf
, 256);
872 pr_err("Failed to create client debugfs at %s/%s\n",
873 path
, client
->display_name
);
876 up_write(&dev
->lock
);
880 err_free_client_name
:
886 put_task_struct(current
->group_leader
);
887 return ERR_PTR(-ENOMEM
);
889 EXPORT_SYMBOL(ion_client_create
);
891 void ion_client_destroy(struct ion_client
*client
)
893 struct ion_device
*dev
= client
->dev
;
896 pr_debug("%s: %d\n", __func__
, __LINE__
);
897 mutex_lock(&debugfs_mutex
);
898 while ((n
= rb_first(&client
->handles
))) {
899 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
901 ion_handle_destroy(&handle
->ref
);
904 idr_destroy(&client
->idr
);
906 down_write(&dev
->lock
);
908 put_task_struct(client
->task
);
909 rb_erase(&client
->node
, &dev
->clients
);
910 debugfs_remove_recursive(client
->debug_root
);
911 up_write(&dev
->lock
);
913 kfree(client
->display_name
);
916 mutex_unlock(&debugfs_mutex
);
918 EXPORT_SYMBOL(ion_client_destroy
);
920 struct sg_table
*ion_sg_table(struct ion_client
*client
,
921 struct ion_handle
*handle
)
923 struct ion_buffer
*buffer
;
924 struct sg_table
*table
;
926 mutex_lock(&client
->lock
);
927 if (!ion_handle_validate(client
, handle
)) {
928 pr_err("%s: invalid handle passed to map_dma.\n",
930 mutex_unlock(&client
->lock
);
931 return ERR_PTR(-EINVAL
);
933 buffer
= handle
->buffer
;
934 table
= buffer
->sg_table
;
935 mutex_unlock(&client
->lock
);
938 EXPORT_SYMBOL(ion_sg_table
);
940 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
942 enum dma_data_direction direction
);
944 static struct sg_table
*ion_map_dma_buf(struct dma_buf_attachment
*attachment
,
945 enum dma_data_direction direction
)
947 struct dma_buf
*dmabuf
= attachment
->dmabuf
;
948 struct ion_buffer
*buffer
= dmabuf
->priv
;
950 ion_buffer_sync_for_device(buffer
, attachment
->dev
, direction
);
951 return buffer
->sg_table
;
954 static void ion_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
955 struct sg_table
*table
,
956 enum dma_data_direction direction
)
960 void ion_pages_sync_for_device(struct device
*dev
, struct page
*page
,
961 size_t size
, enum dma_data_direction dir
)
963 struct scatterlist sg
;
965 sg_init_table(&sg
, 1);
966 sg_set_page(&sg
, page
, size
, 0);
968 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
969 * for the targeted device, but this works on the currently targeted
972 sg_dma_address(&sg
) = page_to_phys(page
);
973 dma_sync_sg_for_device(dev
, &sg
, 1, dir
);
976 struct ion_vma_list
{
977 struct list_head list
;
978 struct vm_area_struct
*vma
;
981 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
983 enum dma_data_direction dir
)
985 struct ion_vma_list
*vma_list
;
986 int pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
989 pr_debug("%s: syncing for device %s\n", __func__
,
990 dev
? dev_name(dev
) : "null");
992 if (!ion_buffer_fault_user_mappings(buffer
))
995 mutex_lock(&buffer
->lock
);
996 for (i
= 0; i
< pages
; i
++) {
997 struct page
*page
= buffer
->pages
[i
];
999 if (ion_buffer_page_is_dirty(page
))
1000 ion_pages_sync_for_device(dev
, ion_buffer_page(page
),
1003 ion_buffer_page_clean(buffer
->pages
+ i
);
1005 list_for_each_entry(vma_list
, &buffer
->vmas
, list
) {
1006 struct vm_area_struct
*vma
= vma_list
->vma
;
1008 zap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
,
1011 mutex_unlock(&buffer
->lock
);
1014 static int ion_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1016 struct ion_buffer
*buffer
= vma
->vm_private_data
;
1020 mutex_lock(&buffer
->lock
);
1021 ion_buffer_page_dirty(buffer
->pages
+ vmf
->pgoff
);
1022 BUG_ON(!buffer
->pages
|| !buffer
->pages
[vmf
->pgoff
]);
1024 pfn
= page_to_pfn(ion_buffer_page(buffer
->pages
[vmf
->pgoff
]));
1025 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
1026 mutex_unlock(&buffer
->lock
);
1028 return VM_FAULT_ERROR
;
1030 return VM_FAULT_NOPAGE
;
1033 static void ion_vm_open(struct vm_area_struct
*vma
)
1035 struct ion_buffer
*buffer
= vma
->vm_private_data
;
1036 struct ion_vma_list
*vma_list
;
1038 vma_list
= kmalloc(sizeof(struct ion_vma_list
), GFP_KERNEL
);
1041 vma_list
->vma
= vma
;
1042 mutex_lock(&buffer
->lock
);
1043 list_add(&vma_list
->list
, &buffer
->vmas
);
1044 mutex_unlock(&buffer
->lock
);
1045 pr_debug("%s: adding %p\n", __func__
, vma
);
1048 static void ion_vm_close(struct vm_area_struct
*vma
)
1050 struct ion_buffer
*buffer
= vma
->vm_private_data
;
1051 struct ion_vma_list
*vma_list
, *tmp
;
1053 pr_debug("%s\n", __func__
);
1054 mutex_lock(&buffer
->lock
);
1055 list_for_each_entry_safe(vma_list
, tmp
, &buffer
->vmas
, list
) {
1056 if (vma_list
->vma
!= vma
)
1058 list_del(&vma_list
->list
);
1060 pr_debug("%s: deleting %p\n", __func__
, vma
);
1063 mutex_unlock(&buffer
->lock
);
1066 static const struct vm_operations_struct ion_vma_ops
= {
1067 .open
= ion_vm_open
,
1068 .close
= ion_vm_close
,
1069 .fault
= ion_vm_fault
,
1072 static int ion_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
1074 struct ion_buffer
*buffer
= dmabuf
->priv
;
1077 if (!buffer
->heap
->ops
->map_user
) {
1078 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1083 if (ion_buffer_fault_user_mappings(buffer
)) {
1084 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
|
1086 vma
->vm_private_data
= buffer
;
1087 vma
->vm_ops
= &ion_vma_ops
;
1092 if (!(buffer
->flags
& ION_FLAG_CACHED
))
1093 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
1095 mutex_lock(&buffer
->lock
);
1096 /* now map it to userspace */
1097 ret
= buffer
->heap
->ops
->map_user(buffer
->heap
, buffer
, vma
);
1098 mutex_unlock(&buffer
->lock
);
1101 pr_err("%s: failure mapping buffer to userspace\n",
1107 static void ion_dma_buf_release(struct dma_buf
*dmabuf
)
1109 struct ion_buffer
*buffer
= dmabuf
->priv
;
1111 ion_buffer_put(buffer
);
1114 static void *ion_dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long offset
)
1116 struct ion_buffer
*buffer
= dmabuf
->priv
;
1118 return buffer
->vaddr
+ offset
* PAGE_SIZE
;
1121 static void ion_dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long offset
,
1126 static int ion_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
1128 enum dma_data_direction direction
)
1130 struct ion_buffer
*buffer
= dmabuf
->priv
;
1133 if (!buffer
->heap
->ops
->map_kernel
) {
1134 pr_err("%s: map kernel is not implemented by this heap.\n",
1139 mutex_lock(&buffer
->lock
);
1140 vaddr
= ion_buffer_kmap_get(buffer
);
1141 mutex_unlock(&buffer
->lock
);
1142 return PTR_ERR_OR_ZERO(vaddr
);
1145 static void ion_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
1147 enum dma_data_direction direction
)
1149 struct ion_buffer
*buffer
= dmabuf
->priv
;
1151 mutex_lock(&buffer
->lock
);
1152 ion_buffer_kmap_put(buffer
);
1153 mutex_unlock(&buffer
->lock
);
1156 static struct dma_buf_ops dma_buf_ops
= {
1157 .map_dma_buf
= ion_map_dma_buf
,
1158 .unmap_dma_buf
= ion_unmap_dma_buf
,
1160 .release
= ion_dma_buf_release
,
1161 .begin_cpu_access
= ion_dma_buf_begin_cpu_access
,
1162 .end_cpu_access
= ion_dma_buf_end_cpu_access
,
1163 .kmap_atomic
= ion_dma_buf_kmap
,
1164 .kunmap_atomic
= ion_dma_buf_kunmap
,
1165 .kmap
= ion_dma_buf_kmap
,
1166 .kunmap
= ion_dma_buf_kunmap
,
1169 struct dma_buf
*ion_share_dma_buf(struct ion_client
*client
,
1170 struct ion_handle
*handle
)
1172 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1173 struct ion_buffer
*buffer
;
1174 struct dma_buf
*dmabuf
;
1177 mutex_lock(&client
->lock
);
1178 valid_handle
= ion_handle_validate(client
, handle
);
1179 if (!valid_handle
) {
1180 WARN(1, "%s: invalid handle passed to share.\n", __func__
);
1181 mutex_unlock(&client
->lock
);
1182 return ERR_PTR(-EINVAL
);
1184 buffer
= handle
->buffer
;
1185 ion_buffer_get(buffer
);
1186 mutex_unlock(&client
->lock
);
1188 exp_info
.ops
= &dma_buf_ops
;
1189 exp_info
.size
= buffer
->size
;
1190 exp_info
.flags
= O_RDWR
;
1191 exp_info
.priv
= buffer
;
1193 dmabuf
= dma_buf_export(&exp_info
);
1194 if (IS_ERR(dmabuf
)) {
1195 ion_buffer_put(buffer
);
1201 EXPORT_SYMBOL(ion_share_dma_buf
);
1203 int ion_share_dma_buf_fd(struct ion_client
*client
, struct ion_handle
*handle
)
1205 struct dma_buf
*dmabuf
;
1208 dmabuf
= ion_share_dma_buf(client
, handle
);
1210 return PTR_ERR(dmabuf
);
1212 fd
= dma_buf_fd(dmabuf
, O_CLOEXEC
);
1214 dma_buf_put(dmabuf
);
1218 EXPORT_SYMBOL(ion_share_dma_buf_fd
);
1220 struct ion_handle
*ion_import_dma_buf(struct ion_client
*client
,
1221 struct dma_buf
*dmabuf
)
1223 struct ion_buffer
*buffer
;
1224 struct ion_handle
*handle
;
1227 /* if this memory came from ion */
1229 if (dmabuf
->ops
!= &dma_buf_ops
) {
1230 pr_err("%s: can not import dmabuf from another exporter\n",
1232 return ERR_PTR(-EINVAL
);
1234 buffer
= dmabuf
->priv
;
1236 mutex_lock(&client
->lock
);
1237 /* if a handle exists for this buffer just take a reference to it */
1238 handle
= ion_handle_lookup(client
, buffer
);
1239 if (!IS_ERR(handle
)) {
1240 ion_handle_get(handle
);
1241 mutex_unlock(&client
->lock
);
1245 handle
= ion_handle_create(client
, buffer
);
1246 if (IS_ERR(handle
)) {
1247 mutex_unlock(&client
->lock
);
1251 ret
= ion_handle_add(client
, handle
);
1252 mutex_unlock(&client
->lock
);
1254 ion_handle_put(handle
);
1255 handle
= ERR_PTR(ret
);
1261 EXPORT_SYMBOL(ion_import_dma_buf
);
1263 struct ion_handle
*ion_import_dma_buf_fd(struct ion_client
*client
, int fd
)
1265 struct dma_buf
*dmabuf
;
1266 struct ion_handle
*handle
;
1268 dmabuf
= dma_buf_get(fd
);
1270 return ERR_CAST(dmabuf
);
1272 handle
= ion_import_dma_buf(client
, dmabuf
);
1273 dma_buf_put(dmabuf
);
1276 EXPORT_SYMBOL(ion_import_dma_buf_fd
);
1278 static int ion_sync_for_device(struct ion_client
*client
, int fd
)
1280 struct dma_buf
*dmabuf
;
1281 struct ion_buffer
*buffer
;
1283 dmabuf
= dma_buf_get(fd
);
1285 return PTR_ERR(dmabuf
);
1287 /* if this memory came from ion */
1288 if (dmabuf
->ops
!= &dma_buf_ops
) {
1289 pr_err("%s: can not sync dmabuf from another exporter\n",
1291 dma_buf_put(dmabuf
);
1294 buffer
= dmabuf
->priv
;
1296 dma_sync_sg_for_device(NULL
, buffer
->sg_table
->sgl
,
1297 buffer
->sg_table
->nents
, DMA_BIDIRECTIONAL
);
1298 dma_buf_put(dmabuf
);
1302 /* fix up the cases where the ioctl direction bits are incorrect */
1303 static unsigned int ion_ioctl_dir(unsigned int cmd
)
1308 case ION_IOC_CUSTOM
:
1311 return _IOC_DIR(cmd
);
1315 static long ion_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1317 struct ion_client
*client
= filp
->private_data
;
1318 struct ion_device
*dev
= client
->dev
;
1319 struct ion_handle
*cleanup_handle
= NULL
;
1324 struct ion_fd_data fd
;
1325 struct ion_allocation_data allocation
;
1326 struct ion_handle_data handle
;
1327 struct ion_custom_data custom
;
1330 dir
= ion_ioctl_dir(cmd
);
1332 if (_IOC_SIZE(cmd
) > sizeof(data
))
1335 if (dir
& _IOC_WRITE
)
1336 if (copy_from_user(&data
, (void __user
*)arg
, _IOC_SIZE(cmd
)))
1342 struct ion_handle
*handle
;
1344 handle
= ion_alloc(client
, data
.allocation
.len
,
1345 data
.allocation
.align
,
1346 data
.allocation
.heap_id_mask
,
1347 data
.allocation
.flags
);
1349 return PTR_ERR(handle
);
1351 data
.allocation
.handle
= handle
->id
;
1353 cleanup_handle
= handle
;
1358 struct ion_handle
*handle
;
1360 mutex_lock(&client
->lock
);
1361 handle
= ion_handle_get_by_id_nolock(client
, data
.handle
.handle
);
1362 if (IS_ERR(handle
)) {
1363 mutex_unlock(&client
->lock
);
1364 return PTR_ERR(handle
);
1366 ion_free_nolock(client
, handle
);
1367 ion_handle_put_nolock(handle
);
1368 mutex_unlock(&client
->lock
);
1374 struct ion_handle
*handle
;
1376 handle
= ion_handle_get_by_id(client
, data
.handle
.handle
);
1378 return PTR_ERR(handle
);
1379 data
.fd
.fd
= ion_share_dma_buf_fd(client
, handle
);
1380 ion_handle_put(handle
);
1385 case ION_IOC_IMPORT
:
1387 struct ion_handle
*handle
;
1389 handle
= ion_import_dma_buf_fd(client
, data
.fd
.fd
);
1391 ret
= PTR_ERR(handle
);
1393 data
.handle
.handle
= handle
->id
;
1398 ret
= ion_sync_for_device(client
, data
.fd
.fd
);
1401 case ION_IOC_CUSTOM
:
1403 if (!dev
->custom_ioctl
)
1405 ret
= dev
->custom_ioctl(client
, data
.custom
.cmd
,
1413 if (dir
& _IOC_READ
) {
1414 if (copy_to_user((void __user
*)arg
, &data
, _IOC_SIZE(cmd
))) {
1416 ion_free(client
, cleanup_handle
);
1423 static int ion_release(struct inode
*inode
, struct file
*file
)
1425 struct ion_client
*client
= file
->private_data
;
1427 pr_debug("%s: %d\n", __func__
, __LINE__
);
1428 ion_client_destroy(client
);
1432 static int ion_open(struct inode
*inode
, struct file
*file
)
1434 struct miscdevice
*miscdev
= file
->private_data
;
1435 struct ion_device
*dev
= container_of(miscdev
, struct ion_device
, dev
);
1436 struct ion_client
*client
;
1437 char debug_name
[64];
1439 pr_debug("%s: %d\n", __func__
, __LINE__
);
1440 snprintf(debug_name
, 64, "%u", task_pid_nr(current
->group_leader
));
1441 client
= ion_client_create(dev
, debug_name
);
1443 return PTR_ERR(client
);
1444 file
->private_data
= client
;
1449 static const struct file_operations ion_fops
= {
1450 .owner
= THIS_MODULE
,
1452 .release
= ion_release
,
1453 .unlocked_ioctl
= ion_ioctl
,
1454 .compat_ioctl
= compat_ion_ioctl
,
1457 static size_t ion_debug_heap_total(struct ion_client
*client
,
1463 mutex_lock(&client
->lock
);
1464 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
1465 struct ion_handle
*handle
= rb_entry(n
,
1468 if (handle
->buffer
->heap
->id
== id
)
1469 size
+= handle
->buffer
->size
;
1471 mutex_unlock(&client
->lock
);
1475 static int ion_debug_heap_show(struct seq_file
*s
, void *unused
)
1477 struct ion_heap
*heap
= s
->private;
1478 struct ion_device
*dev
= heap
->dev
;
1480 size_t total_size
= 0;
1481 size_t total_orphaned_size
= 0;
1483 seq_printf(s
, "%16s %16s %16s\n", "client", "pid", "size");
1484 seq_puts(s
, "----------------------------------------------------\n");
1486 mutex_lock(&debugfs_mutex
);
1487 for (n
= rb_first(&dev
->clients
); n
; n
= rb_next(n
)) {
1488 struct ion_client
*client
= rb_entry(n
, struct ion_client
,
1490 size_t size
= ion_debug_heap_total(client
, heap
->id
);
1495 char task_comm
[TASK_COMM_LEN
];
1497 get_task_comm(task_comm
, client
->task
);
1498 seq_printf(s
, "%16s %16u %16zu\n", task_comm
,
1501 seq_printf(s
, "%16s %16u %16zu\n", client
->name
,
1505 mutex_unlock(&debugfs_mutex
);
1507 seq_puts(s
, "----------------------------------------------------\n");
1508 seq_puts(s
, "orphaned allocations (info is from last known client):\n");
1509 mutex_lock(&dev
->buffer_lock
);
1510 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
1511 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
1513 if (buffer
->heap
->id
!= heap
->id
)
1515 total_size
+= buffer
->size
;
1516 if (!buffer
->handle_count
) {
1517 seq_printf(s
, "%16s %16u %16zu %d %d\n",
1518 buffer
->task_comm
, buffer
->pid
,
1519 buffer
->size
, buffer
->kmap_cnt
,
1520 atomic_read(&buffer
->ref
.refcount
));
1521 total_orphaned_size
+= buffer
->size
;
1524 mutex_unlock(&dev
->buffer_lock
);
1525 seq_puts(s
, "----------------------------------------------------\n");
1526 seq_printf(s
, "%16s %16zu\n", "total orphaned",
1527 total_orphaned_size
);
1528 seq_printf(s
, "%16s %16zu\n", "total ", total_size
);
1529 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1530 seq_printf(s
, "%16s %16zu\n", "deferred free",
1531 heap
->free_list_size
);
1532 seq_puts(s
, "----------------------------------------------------\n");
1534 if (heap
->debug_show
)
1535 heap
->debug_show(heap
, s
, unused
);
1540 static int ion_debug_heap_open(struct inode
*inode
, struct file
*file
)
1542 return single_open(file
, ion_debug_heap_show
, inode
->i_private
);
1545 static const struct file_operations debug_heap_fops
= {
1546 .open
= ion_debug_heap_open
,
1548 .llseek
= seq_lseek
,
1549 .release
= single_release
,
1552 static int debug_shrink_set(void *data
, u64 val
)
1554 struct ion_heap
*heap
= data
;
1555 struct shrink_control sc
;
1558 sc
.gfp_mask
= GFP_HIGHUSER
;
1559 sc
.nr_to_scan
= val
;
1562 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
1563 sc
.nr_to_scan
= objs
;
1566 heap
->shrinker
.scan_objects(&heap
->shrinker
, &sc
);
1570 static int debug_shrink_get(void *data
, u64
*val
)
1572 struct ion_heap
*heap
= data
;
1573 struct shrink_control sc
;
1576 sc
.gfp_mask
= GFP_HIGHUSER
;
1579 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
1584 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops
, debug_shrink_get
,
1585 debug_shrink_set
, "%llu\n");
1587 void ion_device_add_heap(struct ion_device
*dev
, struct ion_heap
*heap
)
1589 struct dentry
*debug_file
;
1591 if (!heap
->ops
->allocate
|| !heap
->ops
->free
|| !heap
->ops
->map_dma
||
1592 !heap
->ops
->unmap_dma
)
1593 pr_err("%s: can not add heap with invalid ops struct.\n",
1596 spin_lock_init(&heap
->free_lock
);
1597 heap
->free_list_size
= 0;
1599 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1600 ion_heap_init_deferred_free(heap
);
1602 if ((heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
) || heap
->ops
->shrink
)
1603 ion_heap_init_shrinker(heap
);
1606 down_write(&dev
->lock
);
1608 * use negative heap->id to reverse the priority -- when traversing
1609 * the list later attempt higher id numbers first
1611 plist_node_init(&heap
->node
, -heap
->id
);
1612 plist_add(&heap
->node
, &dev
->heaps
);
1613 debug_file
= debugfs_create_file(heap
->name
, 0664,
1614 dev
->heaps_debug_root
, heap
,
1618 char buf
[256], *path
;
1620 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
1621 pr_err("Failed to create heap debugfs at %s/%s\n",
1625 if (heap
->shrinker
.count_objects
&& heap
->shrinker
.scan_objects
) {
1626 char debug_name
[64];
1628 snprintf(debug_name
, 64, "%s_shrink", heap
->name
);
1629 debug_file
= debugfs_create_file(
1630 debug_name
, 0644, dev
->heaps_debug_root
, heap
,
1631 &debug_shrink_fops
);
1633 char buf
[256], *path
;
1635 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
1636 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1641 up_write(&dev
->lock
);
1643 EXPORT_SYMBOL(ion_device_add_heap
);
1645 struct ion_device
*ion_device_create(long (*custom_ioctl
)
1646 (struct ion_client
*client
,
1650 struct ion_device
*idev
;
1653 idev
= kzalloc(sizeof(struct ion_device
), GFP_KERNEL
);
1655 return ERR_PTR(-ENOMEM
);
1657 idev
->dev
.minor
= MISC_DYNAMIC_MINOR
;
1658 idev
->dev
.name
= "ion";
1659 idev
->dev
.fops
= &ion_fops
;
1660 idev
->dev
.parent
= NULL
;
1661 ret
= misc_register(&idev
->dev
);
1663 pr_err("ion: failed to register misc device.\n");
1665 return ERR_PTR(ret
);
1668 idev
->debug_root
= debugfs_create_dir("ion", NULL
);
1669 if (!idev
->debug_root
) {
1670 pr_err("ion: failed to create debugfs root directory.\n");
1673 idev
->heaps_debug_root
= debugfs_create_dir("heaps", idev
->debug_root
);
1674 if (!idev
->heaps_debug_root
) {
1675 pr_err("ion: failed to create debugfs heaps directory.\n");
1678 idev
->clients_debug_root
= debugfs_create_dir("clients",
1680 if (!idev
->clients_debug_root
)
1681 pr_err("ion: failed to create debugfs clients directory.\n");
1685 idev
->custom_ioctl
= custom_ioctl
;
1686 idev
->buffers
= RB_ROOT
;
1687 mutex_init(&idev
->buffer_lock
);
1688 init_rwsem(&idev
->lock
);
1689 plist_head_init(&idev
->heaps
);
1690 idev
->clients
= RB_ROOT
;
1691 ion_root_client
= &idev
->clients
;
1692 mutex_init(&debugfs_mutex
);
1695 EXPORT_SYMBOL(ion_device_create
);
1697 void ion_device_destroy(struct ion_device
*dev
)
1699 misc_deregister(&dev
->dev
);
1700 debugfs_remove_recursive(dev
->debug_root
);
1701 /* XXX need to free the heaps and clients ? */
1704 EXPORT_SYMBOL(ion_device_destroy
);
1706 void __init
ion_reserve(struct ion_platform_data
*data
)
1710 for (i
= 0; i
< data
->nr
; i
++) {
1711 if (data
->heaps
[i
].size
== 0)
1714 if (data
->heaps
[i
].base
== 0) {
1717 paddr
= memblock_alloc_base(data
->heaps
[i
].size
,
1718 data
->heaps
[i
].align
,
1719 MEMBLOCK_ALLOC_ANYWHERE
);
1721 pr_err("%s: error allocating memblock for heap %d\n",
1725 data
->heaps
[i
].base
= paddr
;
1727 int ret
= memblock_reserve(data
->heaps
[i
].base
,
1728 data
->heaps
[i
].size
);
1730 pr_err("memblock reserve of %zx@%lx failed\n",
1731 data
->heaps
[i
].size
,
1732 data
->heaps
[i
].base
);
1734 pr_info("%s: %s reserved base %lx size %zu\n", __func__
,
1735 data
->heaps
[i
].name
,
1736 data
->heaps
[i
].base
,
1737 data
->heaps
[i
].size
);