3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
54 struct miscdevice dev
;
55 struct rb_root buffers
;
56 struct mutex buffer_lock
;
57 struct rw_semaphore lock
;
58 struct plist_head heaps
;
59 long (*custom_ioctl
)(struct ion_client
*client
, unsigned int cmd
,
61 struct rb_root clients
;
62 struct dentry
*debug_root
;
63 struct dentry
*heaps_debug_root
;
64 struct dentry
*clients_debug_root
;
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
85 struct ion_device
*dev
;
86 struct rb_root handles
;
92 struct task_struct
*task
;
94 struct dentry
*debug_root
;
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
111 struct ion_client
*client
;
112 struct ion_buffer
*buffer
;
114 unsigned int kmap_cnt
;
118 bool ion_buffer_fault_user_mappings(struct ion_buffer
*buffer
)
120 return (buffer
->flags
& ION_FLAG_CACHED
) &&
121 !(buffer
->flags
& ION_FLAG_CACHED_NEEDS_SYNC
);
124 bool ion_buffer_cached(struct ion_buffer
*buffer
)
126 return !!(buffer
->flags
& ION_FLAG_CACHED
);
129 static inline struct page
*ion_buffer_page(struct page
*page
)
131 return (struct page
*)((unsigned long)page
& ~(1UL));
134 static inline bool ion_buffer_page_is_dirty(struct page
*page
)
136 return !!((unsigned long)page
& 1UL);
139 static inline void ion_buffer_page_dirty(struct page
**page
)
141 *page
= (struct page
*)((unsigned long)(*page
) | 1UL);
144 static inline void ion_buffer_page_clean(struct page
**page
)
146 *page
= (struct page
*)((unsigned long)(*page
) & ~(1UL));
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device
*dev
,
151 struct ion_buffer
*buffer
)
153 struct rb_node
**p
= &dev
->buffers
.rb_node
;
154 struct rb_node
*parent
= NULL
;
155 struct ion_buffer
*entry
;
159 entry
= rb_entry(parent
, struct ion_buffer
, node
);
161 if (buffer
< entry
) {
163 } else if (buffer
> entry
) {
166 pr_err("%s: buffer already found.", __func__
);
171 rb_link_node(&buffer
->node
, parent
, p
);
172 rb_insert_color(&buffer
->node
, &dev
->buffers
);
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer
*ion_buffer_create(struct ion_heap
*heap
,
177 struct ion_device
*dev
,
182 struct ion_buffer
*buffer
;
183 struct sg_table
*table
;
184 struct scatterlist
*sg
;
187 buffer
= kzalloc(sizeof(struct ion_buffer
), GFP_KERNEL
);
189 return ERR_PTR(-ENOMEM
);
192 buffer
->flags
= flags
;
193 kref_init(&buffer
->ref
);
195 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
, flags
);
198 if (!(heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
))
201 ion_heap_freelist_drain(heap
, 0);
202 ret
= heap
->ops
->allocate(heap
, buffer
, len
, align
,
211 table
= heap
->ops
->map_dma(heap
, buffer
);
212 if (WARN_ONCE(table
== NULL
,
213 "heap->ops->map_dma should return ERR_PTR on error"))
214 table
= ERR_PTR(-EINVAL
);
220 buffer
->sg_table
= table
;
221 if (ion_buffer_fault_user_mappings(buffer
)) {
222 int num_pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
223 struct scatterlist
*sg
;
226 buffer
->pages
= vmalloc(sizeof(struct page
*) * num_pages
);
227 if (!buffer
->pages
) {
232 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
233 struct page
*page
= sg_page(sg
);
235 for (j
= 0; j
< sg
->length
/ PAGE_SIZE
; j
++)
236 buffer
->pages
[k
++] = page
++;
242 INIT_LIST_HEAD(&buffer
->vmas
);
243 mutex_init(&buffer
->lock
);
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
254 for_each_sg(buffer
->sg_table
->sgl
, sg
, buffer
->sg_table
->nents
, i
)
255 sg_dma_address(sg
) = sg_phys(sg
);
256 mutex_lock(&dev
->buffer_lock
);
257 ion_buffer_add(dev
, buffer
);
258 mutex_unlock(&dev
->buffer_lock
);
262 heap
->ops
->unmap_dma(heap
, buffer
);
264 heap
->ops
->free(buffer
);
270 void ion_buffer_destroy(struct ion_buffer
*buffer
)
272 if (WARN_ON(buffer
->kmap_cnt
> 0))
273 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
274 buffer
->heap
->ops
->unmap_dma(buffer
->heap
, buffer
);
275 buffer
->heap
->ops
->free(buffer
);
276 vfree(buffer
->pages
);
280 static void _ion_buffer_destroy(struct kref
*kref
)
282 struct ion_buffer
*buffer
= container_of(kref
, struct ion_buffer
, ref
);
283 struct ion_heap
*heap
= buffer
->heap
;
284 struct ion_device
*dev
= buffer
->dev
;
286 mutex_lock(&dev
->buffer_lock
);
287 rb_erase(&buffer
->node
, &dev
->buffers
);
288 mutex_unlock(&dev
->buffer_lock
);
290 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
291 ion_heap_freelist_add(heap
, buffer
);
293 ion_buffer_destroy(buffer
);
296 static void ion_buffer_get(struct ion_buffer
*buffer
)
298 kref_get(&buffer
->ref
);
301 static int ion_buffer_put(struct ion_buffer
*buffer
)
303 return kref_put(&buffer
->ref
, _ion_buffer_destroy
);
306 static void ion_buffer_add_to_handle(struct ion_buffer
*buffer
)
308 mutex_lock(&buffer
->lock
);
309 buffer
->handle_count
++;
310 mutex_unlock(&buffer
->lock
);
313 static void ion_buffer_remove_from_handle(struct ion_buffer
*buffer
)
316 * when a buffer is removed from a handle, if it is not in
317 * any other handles, copy the taskcomm and the pid of the
318 * process it's being removed from into the buffer. At this
319 * point there will be no way to track what processes this buffer is
320 * being used by, it only exists as a dma_buf file descriptor.
321 * The taskcomm and pid can provide a debug hint as to where this fd
324 mutex_lock(&buffer
->lock
);
325 buffer
->handle_count
--;
326 BUG_ON(buffer
->handle_count
< 0);
327 if (!buffer
->handle_count
) {
328 struct task_struct
*task
;
330 task
= current
->group_leader
;
331 get_task_comm(buffer
->task_comm
, task
);
332 buffer
->pid
= task_pid_nr(task
);
334 mutex_unlock(&buffer
->lock
);
337 static struct ion_handle
*ion_handle_create(struct ion_client
*client
,
338 struct ion_buffer
*buffer
)
340 struct ion_handle
*handle
;
342 handle
= kzalloc(sizeof(struct ion_handle
), GFP_KERNEL
);
344 return ERR_PTR(-ENOMEM
);
345 kref_init(&handle
->ref
);
346 RB_CLEAR_NODE(&handle
->node
);
347 handle
->client
= client
;
348 ion_buffer_get(buffer
);
349 ion_buffer_add_to_handle(buffer
);
350 handle
->buffer
= buffer
;
355 static void ion_handle_kmap_put(struct ion_handle
*);
357 static void ion_handle_destroy(struct kref
*kref
)
359 struct ion_handle
*handle
= container_of(kref
, struct ion_handle
, ref
);
360 struct ion_client
*client
= handle
->client
;
361 struct ion_buffer
*buffer
= handle
->buffer
;
363 mutex_lock(&buffer
->lock
);
364 while (handle
->kmap_cnt
)
365 ion_handle_kmap_put(handle
);
366 mutex_unlock(&buffer
->lock
);
368 idr_remove(&client
->idr
, handle
->id
);
369 if (!RB_EMPTY_NODE(&handle
->node
))
370 rb_erase(&handle
->node
, &client
->handles
);
372 ion_buffer_remove_from_handle(buffer
);
373 ion_buffer_put(buffer
);
378 struct ion_buffer
*ion_handle_buffer(struct ion_handle
*handle
)
380 return handle
->buffer
;
383 static void ion_handle_get(struct ion_handle
*handle
)
385 kref_get(&handle
->ref
);
388 static int ion_handle_put(struct ion_handle
*handle
)
390 struct ion_client
*client
= handle
->client
;
393 mutex_lock(&client
->lock
);
394 ret
= kref_put(&handle
->ref
, ion_handle_destroy
);
395 mutex_unlock(&client
->lock
);
400 static struct ion_handle
*ion_handle_lookup(struct ion_client
*client
,
401 struct ion_buffer
*buffer
)
403 struct rb_node
*n
= client
->handles
.rb_node
;
406 struct ion_handle
*entry
= rb_entry(n
, struct ion_handle
, node
);
408 if (buffer
< entry
->buffer
)
410 else if (buffer
> entry
->buffer
)
415 return ERR_PTR(-EINVAL
);
418 static struct ion_handle
*ion_handle_get_by_id(struct ion_client
*client
,
421 struct ion_handle
*handle
;
423 mutex_lock(&client
->lock
);
424 handle
= idr_find(&client
->idr
, id
);
426 ion_handle_get(handle
);
427 mutex_unlock(&client
->lock
);
429 return handle
? handle
: ERR_PTR(-EINVAL
);
432 static bool ion_handle_validate(struct ion_client
*client
,
433 struct ion_handle
*handle
)
435 WARN_ON(!mutex_is_locked(&client
->lock
));
436 return idr_find(&client
->idr
, handle
->id
) == handle
;
439 static int ion_handle_add(struct ion_client
*client
, struct ion_handle
*handle
)
442 struct rb_node
**p
= &client
->handles
.rb_node
;
443 struct rb_node
*parent
= NULL
;
444 struct ion_handle
*entry
;
446 id
= idr_alloc(&client
->idr
, handle
, 1, 0, GFP_KERNEL
);
454 entry
= rb_entry(parent
, struct ion_handle
, node
);
456 if (handle
->buffer
< entry
->buffer
)
458 else if (handle
->buffer
> entry
->buffer
)
461 WARN(1, "%s: buffer already found.", __func__
);
464 rb_link_node(&handle
->node
, parent
, p
);
465 rb_insert_color(&handle
->node
, &client
->handles
);
470 struct ion_handle
*ion_alloc(struct ion_client
*client
, size_t len
,
471 size_t align
, unsigned int heap_id_mask
,
474 struct ion_handle
*handle
;
475 struct ion_device
*dev
= client
->dev
;
476 struct ion_buffer
*buffer
= NULL
;
477 struct ion_heap
*heap
;
480 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__
,
481 len
, align
, heap_id_mask
, flags
);
483 * traverse the list of heaps available in this system in priority
484 * order. If the heap type is supported by the client, and matches the
485 * request of the caller allocate from it. Repeat until allocate has
486 * succeeded or all heaps have been tried
488 len
= PAGE_ALIGN(len
);
491 return ERR_PTR(-EINVAL
);
493 down_read(&dev
->lock
);
494 plist_for_each_entry(heap
, &dev
->heaps
, node
) {
495 /* if the caller didn't specify this heap id */
496 if (!((1 << heap
->id
) & heap_id_mask
))
498 buffer
= ion_buffer_create(heap
, dev
, len
, align
, flags
);
505 return ERR_PTR(-ENODEV
);
508 return ERR_CAST(buffer
);
510 handle
= ion_handle_create(client
, buffer
);
513 * ion_buffer_create will create a buffer with a ref_cnt of 1,
514 * and ion_handle_create will take a second reference, drop one here
516 ion_buffer_put(buffer
);
521 mutex_lock(&client
->lock
);
522 ret
= ion_handle_add(client
, handle
);
523 mutex_unlock(&client
->lock
);
525 ion_handle_put(handle
);
526 handle
= ERR_PTR(ret
);
531 EXPORT_SYMBOL(ion_alloc
);
533 void ion_free(struct ion_client
*client
, struct ion_handle
*handle
)
537 BUG_ON(client
!= handle
->client
);
539 mutex_lock(&client
->lock
);
540 valid_handle
= ion_handle_validate(client
, handle
);
543 WARN(1, "%s: invalid handle passed to free.\n", __func__
);
544 mutex_unlock(&client
->lock
);
547 mutex_unlock(&client
->lock
);
548 ion_handle_put(handle
);
550 EXPORT_SYMBOL(ion_free
);
552 int ion_phys(struct ion_client
*client
, struct ion_handle
*handle
,
553 ion_phys_addr_t
*addr
, size_t *len
)
555 struct ion_buffer
*buffer
;
558 mutex_lock(&client
->lock
);
559 if (!ion_handle_validate(client
, handle
)) {
560 mutex_unlock(&client
->lock
);
564 buffer
= handle
->buffer
;
566 if (!buffer
->heap
->ops
->phys
) {
567 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
568 __func__
, buffer
->heap
->name
, buffer
->heap
->type
);
569 mutex_unlock(&client
->lock
);
572 mutex_unlock(&client
->lock
);
573 ret
= buffer
->heap
->ops
->phys(buffer
->heap
, buffer
, addr
, len
);
576 EXPORT_SYMBOL(ion_phys
);
578 static void *ion_buffer_kmap_get(struct ion_buffer
*buffer
)
582 if (buffer
->kmap_cnt
) {
584 return buffer
->vaddr
;
586 vaddr
= buffer
->heap
->ops
->map_kernel(buffer
->heap
, buffer
);
587 if (WARN_ONCE(vaddr
== NULL
,
588 "heap->ops->map_kernel should return ERR_PTR on error"))
589 return ERR_PTR(-EINVAL
);
592 buffer
->vaddr
= vaddr
;
597 static void *ion_handle_kmap_get(struct ion_handle
*handle
)
599 struct ion_buffer
*buffer
= handle
->buffer
;
602 if (handle
->kmap_cnt
) {
604 return buffer
->vaddr
;
606 vaddr
= ion_buffer_kmap_get(buffer
);
613 static void ion_buffer_kmap_put(struct ion_buffer
*buffer
)
616 if (!buffer
->kmap_cnt
) {
617 buffer
->heap
->ops
->unmap_kernel(buffer
->heap
, buffer
);
618 buffer
->vaddr
= NULL
;
622 static void ion_handle_kmap_put(struct ion_handle
*handle
)
624 struct ion_buffer
*buffer
= handle
->buffer
;
626 if (!handle
->kmap_cnt
) {
627 WARN(1, "%s: Double unmap detected! bailing...\n", __func__
);
631 if (!handle
->kmap_cnt
)
632 ion_buffer_kmap_put(buffer
);
635 void *ion_map_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
637 struct ion_buffer
*buffer
;
640 mutex_lock(&client
->lock
);
641 if (!ion_handle_validate(client
, handle
)) {
642 pr_err("%s: invalid handle passed to map_kernel.\n",
644 mutex_unlock(&client
->lock
);
645 return ERR_PTR(-EINVAL
);
648 buffer
= handle
->buffer
;
650 if (!handle
->buffer
->heap
->ops
->map_kernel
) {
651 pr_err("%s: map_kernel is not implemented by this heap.\n",
653 mutex_unlock(&client
->lock
);
654 return ERR_PTR(-ENODEV
);
657 mutex_lock(&buffer
->lock
);
658 vaddr
= ion_handle_kmap_get(handle
);
659 mutex_unlock(&buffer
->lock
);
660 mutex_unlock(&client
->lock
);
663 EXPORT_SYMBOL(ion_map_kernel
);
665 void ion_unmap_kernel(struct ion_client
*client
, struct ion_handle
*handle
)
667 struct ion_buffer
*buffer
;
669 mutex_lock(&client
->lock
);
670 buffer
= handle
->buffer
;
671 mutex_lock(&buffer
->lock
);
672 ion_handle_kmap_put(handle
);
673 mutex_unlock(&buffer
->lock
);
674 mutex_unlock(&client
->lock
);
676 EXPORT_SYMBOL(ion_unmap_kernel
);
678 static int ion_debug_client_show(struct seq_file
*s
, void *unused
)
680 struct ion_client
*client
= s
->private;
682 size_t sizes
[ION_NUM_HEAP_IDS
] = {0};
683 const char *names
[ION_NUM_HEAP_IDS
] = {NULL
};
686 mutex_lock(&client
->lock
);
687 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
688 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
690 unsigned int id
= handle
->buffer
->heap
->id
;
693 names
[id
] = handle
->buffer
->heap
->name
;
694 sizes
[id
] += handle
->buffer
->size
;
696 mutex_unlock(&client
->lock
);
698 seq_printf(s
, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
699 for (i
= 0; i
< ION_NUM_HEAP_IDS
; i
++) {
702 seq_printf(s
, "%16.16s: %16zu\n", names
[i
], sizes
[i
]);
707 static int ion_debug_client_open(struct inode
*inode
, struct file
*file
)
709 return single_open(file
, ion_debug_client_show
, inode
->i_private
);
712 static const struct file_operations debug_client_fops
= {
713 .open
= ion_debug_client_open
,
716 .release
= single_release
,
719 static int ion_get_client_serial(const struct rb_root
*root
,
720 const unsigned char *name
)
723 struct rb_node
*node
;
725 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
726 struct ion_client
*client
= rb_entry(node
, struct ion_client
,
729 if (strcmp(client
->name
, name
))
731 serial
= max(serial
, client
->display_serial
);
736 struct ion_client
*ion_client_create(struct ion_device
*dev
,
739 struct ion_client
*client
;
740 struct task_struct
*task
;
742 struct rb_node
*parent
= NULL
;
743 struct ion_client
*entry
;
747 pr_err("%s: Name cannot be null\n", __func__
);
748 return ERR_PTR(-EINVAL
);
751 get_task_struct(current
->group_leader
);
752 task_lock(current
->group_leader
);
753 pid
= task_pid_nr(current
->group_leader
);
755 * don't bother to store task struct for kernel threads,
756 * they can't be killed anyway
758 if (current
->group_leader
->flags
& PF_KTHREAD
) {
759 put_task_struct(current
->group_leader
);
762 task
= current
->group_leader
;
764 task_unlock(current
->group_leader
);
766 client
= kzalloc(sizeof(struct ion_client
), GFP_KERNEL
);
768 goto err_put_task_struct
;
771 client
->handles
= RB_ROOT
;
772 idr_init(&client
->idr
);
773 mutex_init(&client
->lock
);
776 client
->name
= kstrdup(name
, GFP_KERNEL
);
778 goto err_free_client
;
780 down_write(&dev
->lock
);
781 client
->display_serial
= ion_get_client_serial(&dev
->clients
, name
);
782 client
->display_name
= kasprintf(
783 GFP_KERNEL
, "%s-%d", name
, client
->display_serial
);
784 if (!client
->display_name
) {
785 up_write(&dev
->lock
);
786 goto err_free_client_name
;
788 p
= &dev
->clients
.rb_node
;
791 entry
= rb_entry(parent
, struct ion_client
, node
);
795 else if (client
> entry
)
798 rb_link_node(&client
->node
, parent
, p
);
799 rb_insert_color(&client
->node
, &dev
->clients
);
801 client
->debug_root
= debugfs_create_file(client
->display_name
, 0664,
802 dev
->clients_debug_root
,
803 client
, &debug_client_fops
);
804 if (!client
->debug_root
) {
805 char buf
[256], *path
;
807 path
= dentry_path(dev
->clients_debug_root
, buf
, 256);
808 pr_err("Failed to create client debugfs at %s/%s\n",
809 path
, client
->display_name
);
812 up_write(&dev
->lock
);
816 err_free_client_name
:
822 put_task_struct(current
->group_leader
);
823 return ERR_PTR(-ENOMEM
);
825 EXPORT_SYMBOL(ion_client_create
);
827 void ion_client_destroy(struct ion_client
*client
)
829 struct ion_device
*dev
= client
->dev
;
832 pr_debug("%s: %d\n", __func__
, __LINE__
);
833 while ((n
= rb_first(&client
->handles
))) {
834 struct ion_handle
*handle
= rb_entry(n
, struct ion_handle
,
836 ion_handle_destroy(&handle
->ref
);
839 idr_destroy(&client
->idr
);
841 down_write(&dev
->lock
);
843 put_task_struct(client
->task
);
844 rb_erase(&client
->node
, &dev
->clients
);
845 debugfs_remove_recursive(client
->debug_root
);
846 up_write(&dev
->lock
);
848 kfree(client
->display_name
);
852 EXPORT_SYMBOL(ion_client_destroy
);
854 struct sg_table
*ion_sg_table(struct ion_client
*client
,
855 struct ion_handle
*handle
)
857 struct ion_buffer
*buffer
;
858 struct sg_table
*table
;
860 mutex_lock(&client
->lock
);
861 if (!ion_handle_validate(client
, handle
)) {
862 pr_err("%s: invalid handle passed to map_dma.\n",
864 mutex_unlock(&client
->lock
);
865 return ERR_PTR(-EINVAL
);
867 buffer
= handle
->buffer
;
868 table
= buffer
->sg_table
;
869 mutex_unlock(&client
->lock
);
872 EXPORT_SYMBOL(ion_sg_table
);
874 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
876 enum dma_data_direction direction
);
878 static struct sg_table
*ion_map_dma_buf(struct dma_buf_attachment
*attachment
,
879 enum dma_data_direction direction
)
881 struct dma_buf
*dmabuf
= attachment
->dmabuf
;
882 struct ion_buffer
*buffer
= dmabuf
->priv
;
884 ion_buffer_sync_for_device(buffer
, attachment
->dev
, direction
);
885 return buffer
->sg_table
;
888 static void ion_unmap_dma_buf(struct dma_buf_attachment
*attachment
,
889 struct sg_table
*table
,
890 enum dma_data_direction direction
)
894 void ion_pages_sync_for_device(struct device
*dev
, struct page
*page
,
895 size_t size
, enum dma_data_direction dir
)
897 struct scatterlist sg
;
899 sg_init_table(&sg
, 1);
900 sg_set_page(&sg
, page
, size
, 0);
902 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
903 * for the targeted device, but this works on the currently targeted
906 sg_dma_address(&sg
) = page_to_phys(page
);
907 dma_sync_sg_for_device(dev
, &sg
, 1, dir
);
910 struct ion_vma_list
{
911 struct list_head list
;
912 struct vm_area_struct
*vma
;
915 static void ion_buffer_sync_for_device(struct ion_buffer
*buffer
,
917 enum dma_data_direction dir
)
919 struct ion_vma_list
*vma_list
;
920 int pages
= PAGE_ALIGN(buffer
->size
) / PAGE_SIZE
;
923 pr_debug("%s: syncing for device %s\n", __func__
,
924 dev
? dev_name(dev
) : "null");
926 if (!ion_buffer_fault_user_mappings(buffer
))
929 mutex_lock(&buffer
->lock
);
930 for (i
= 0; i
< pages
; i
++) {
931 struct page
*page
= buffer
->pages
[i
];
933 if (ion_buffer_page_is_dirty(page
))
934 ion_pages_sync_for_device(dev
, ion_buffer_page(page
),
937 ion_buffer_page_clean(buffer
->pages
+ i
);
939 list_for_each_entry(vma_list
, &buffer
->vmas
, list
) {
940 struct vm_area_struct
*vma
= vma_list
->vma
;
942 zap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
,
945 mutex_unlock(&buffer
->lock
);
948 static int ion_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
950 struct ion_buffer
*buffer
= vma
->vm_private_data
;
954 mutex_lock(&buffer
->lock
);
955 ion_buffer_page_dirty(buffer
->pages
+ vmf
->pgoff
);
956 BUG_ON(!buffer
->pages
|| !buffer
->pages
[vmf
->pgoff
]);
958 pfn
= page_to_pfn(ion_buffer_page(buffer
->pages
[vmf
->pgoff
]));
959 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
960 mutex_unlock(&buffer
->lock
);
962 return VM_FAULT_ERROR
;
964 return VM_FAULT_NOPAGE
;
967 static void ion_vm_open(struct vm_area_struct
*vma
)
969 struct ion_buffer
*buffer
= vma
->vm_private_data
;
970 struct ion_vma_list
*vma_list
;
972 vma_list
= kmalloc(sizeof(struct ion_vma_list
), GFP_KERNEL
);
976 mutex_lock(&buffer
->lock
);
977 list_add(&vma_list
->list
, &buffer
->vmas
);
978 mutex_unlock(&buffer
->lock
);
979 pr_debug("%s: adding %p\n", __func__
, vma
);
982 static void ion_vm_close(struct vm_area_struct
*vma
)
984 struct ion_buffer
*buffer
= vma
->vm_private_data
;
985 struct ion_vma_list
*vma_list
, *tmp
;
987 pr_debug("%s\n", __func__
);
988 mutex_lock(&buffer
->lock
);
989 list_for_each_entry_safe(vma_list
, tmp
, &buffer
->vmas
, list
) {
990 if (vma_list
->vma
!= vma
)
992 list_del(&vma_list
->list
);
994 pr_debug("%s: deleting %p\n", __func__
, vma
);
997 mutex_unlock(&buffer
->lock
);
1000 static const struct vm_operations_struct ion_vma_ops
= {
1001 .open
= ion_vm_open
,
1002 .close
= ion_vm_close
,
1003 .fault
= ion_vm_fault
,
1006 static int ion_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
)
1008 struct ion_buffer
*buffer
= dmabuf
->priv
;
1011 if (!buffer
->heap
->ops
->map_user
) {
1012 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1017 if (ion_buffer_fault_user_mappings(buffer
)) {
1018 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
|
1020 vma
->vm_private_data
= buffer
;
1021 vma
->vm_ops
= &ion_vma_ops
;
1026 if (!(buffer
->flags
& ION_FLAG_CACHED
))
1027 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
1029 mutex_lock(&buffer
->lock
);
1030 /* now map it to userspace */
1031 ret
= buffer
->heap
->ops
->map_user(buffer
->heap
, buffer
, vma
);
1032 mutex_unlock(&buffer
->lock
);
1035 pr_err("%s: failure mapping buffer to userspace\n",
1041 static void ion_dma_buf_release(struct dma_buf
*dmabuf
)
1043 struct ion_buffer
*buffer
= dmabuf
->priv
;
1045 ion_buffer_put(buffer
);
1048 static void *ion_dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long offset
)
1050 struct ion_buffer
*buffer
= dmabuf
->priv
;
1052 return buffer
->vaddr
+ offset
* PAGE_SIZE
;
1055 static void ion_dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long offset
,
1060 static int ion_dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
1062 enum dma_data_direction direction
)
1064 struct ion_buffer
*buffer
= dmabuf
->priv
;
1067 if (!buffer
->heap
->ops
->map_kernel
) {
1068 pr_err("%s: map kernel is not implemented by this heap.\n",
1073 mutex_lock(&buffer
->lock
);
1074 vaddr
= ion_buffer_kmap_get(buffer
);
1075 mutex_unlock(&buffer
->lock
);
1076 return PTR_ERR_OR_ZERO(vaddr
);
1079 static void ion_dma_buf_end_cpu_access(struct dma_buf
*dmabuf
, size_t start
,
1081 enum dma_data_direction direction
)
1083 struct ion_buffer
*buffer
= dmabuf
->priv
;
1085 mutex_lock(&buffer
->lock
);
1086 ion_buffer_kmap_put(buffer
);
1087 mutex_unlock(&buffer
->lock
);
1090 static struct dma_buf_ops dma_buf_ops
= {
1091 .map_dma_buf
= ion_map_dma_buf
,
1092 .unmap_dma_buf
= ion_unmap_dma_buf
,
1094 .release
= ion_dma_buf_release
,
1095 .begin_cpu_access
= ion_dma_buf_begin_cpu_access
,
1096 .end_cpu_access
= ion_dma_buf_end_cpu_access
,
1097 .kmap_atomic
= ion_dma_buf_kmap
,
1098 .kunmap_atomic
= ion_dma_buf_kunmap
,
1099 .kmap
= ion_dma_buf_kmap
,
1100 .kunmap
= ion_dma_buf_kunmap
,
1103 struct dma_buf
*ion_share_dma_buf(struct ion_client
*client
,
1104 struct ion_handle
*handle
)
1106 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1107 struct ion_buffer
*buffer
;
1108 struct dma_buf
*dmabuf
;
1111 mutex_lock(&client
->lock
);
1112 valid_handle
= ion_handle_validate(client
, handle
);
1113 if (!valid_handle
) {
1114 WARN(1, "%s: invalid handle passed to share.\n", __func__
);
1115 mutex_unlock(&client
->lock
);
1116 return ERR_PTR(-EINVAL
);
1118 buffer
= handle
->buffer
;
1119 ion_buffer_get(buffer
);
1120 mutex_unlock(&client
->lock
);
1122 exp_info
.ops
= &dma_buf_ops
;
1123 exp_info
.size
= buffer
->size
;
1124 exp_info
.flags
= O_RDWR
;
1125 exp_info
.priv
= buffer
;
1127 dmabuf
= dma_buf_export(&exp_info
);
1128 if (IS_ERR(dmabuf
)) {
1129 ion_buffer_put(buffer
);
1135 EXPORT_SYMBOL(ion_share_dma_buf
);
1137 int ion_share_dma_buf_fd(struct ion_client
*client
, struct ion_handle
*handle
)
1139 struct dma_buf
*dmabuf
;
1142 dmabuf
= ion_share_dma_buf(client
, handle
);
1144 return PTR_ERR(dmabuf
);
1146 fd
= dma_buf_fd(dmabuf
, O_CLOEXEC
);
1148 dma_buf_put(dmabuf
);
1152 EXPORT_SYMBOL(ion_share_dma_buf_fd
);
1154 struct ion_handle
*ion_import_dma_buf(struct ion_client
*client
, int fd
)
1156 struct dma_buf
*dmabuf
;
1157 struct ion_buffer
*buffer
;
1158 struct ion_handle
*handle
;
1161 dmabuf
= dma_buf_get(fd
);
1163 return ERR_CAST(dmabuf
);
1164 /* if this memory came from ion */
1166 if (dmabuf
->ops
!= &dma_buf_ops
) {
1167 pr_err("%s: can not import dmabuf from another exporter\n",
1169 dma_buf_put(dmabuf
);
1170 return ERR_PTR(-EINVAL
);
1172 buffer
= dmabuf
->priv
;
1174 mutex_lock(&client
->lock
);
1175 /* if a handle exists for this buffer just take a reference to it */
1176 handle
= ion_handle_lookup(client
, buffer
);
1177 if (!IS_ERR(handle
)) {
1178 ion_handle_get(handle
);
1179 mutex_unlock(&client
->lock
);
1183 handle
= ion_handle_create(client
, buffer
);
1184 if (IS_ERR(handle
)) {
1185 mutex_unlock(&client
->lock
);
1189 ret
= ion_handle_add(client
, handle
);
1190 mutex_unlock(&client
->lock
);
1192 ion_handle_put(handle
);
1193 handle
= ERR_PTR(ret
);
1197 dma_buf_put(dmabuf
);
1200 EXPORT_SYMBOL(ion_import_dma_buf
);
1202 static int ion_sync_for_device(struct ion_client
*client
, int fd
)
1204 struct dma_buf
*dmabuf
;
1205 struct ion_buffer
*buffer
;
1207 dmabuf
= dma_buf_get(fd
);
1209 return PTR_ERR(dmabuf
);
1211 /* if this memory came from ion */
1212 if (dmabuf
->ops
!= &dma_buf_ops
) {
1213 pr_err("%s: can not sync dmabuf from another exporter\n",
1215 dma_buf_put(dmabuf
);
1218 buffer
= dmabuf
->priv
;
1220 dma_sync_sg_for_device(NULL
, buffer
->sg_table
->sgl
,
1221 buffer
->sg_table
->nents
, DMA_BIDIRECTIONAL
);
1222 dma_buf_put(dmabuf
);
1226 /* fix up the cases where the ioctl direction bits are incorrect */
1227 static unsigned int ion_ioctl_dir(unsigned int cmd
)
1232 case ION_IOC_CUSTOM
:
1235 return _IOC_DIR(cmd
);
1239 static long ion_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1241 struct ion_client
*client
= filp
->private_data
;
1242 struct ion_device
*dev
= client
->dev
;
1243 struct ion_handle
*cleanup_handle
= NULL
;
1248 struct ion_fd_data fd
;
1249 struct ion_allocation_data allocation
;
1250 struct ion_handle_data handle
;
1251 struct ion_custom_data custom
;
1254 dir
= ion_ioctl_dir(cmd
);
1256 if (_IOC_SIZE(cmd
) > sizeof(data
))
1259 if (dir
& _IOC_WRITE
)
1260 if (copy_from_user(&data
, (void __user
*)arg
, _IOC_SIZE(cmd
)))
1266 struct ion_handle
*handle
;
1268 handle
= ion_alloc(client
, data
.allocation
.len
,
1269 data
.allocation
.align
,
1270 data
.allocation
.heap_id_mask
,
1271 data
.allocation
.flags
);
1273 return PTR_ERR(handle
);
1275 data
.allocation
.handle
= handle
->id
;
1277 cleanup_handle
= handle
;
1282 struct ion_handle
*handle
;
1284 handle
= ion_handle_get_by_id(client
, data
.handle
.handle
);
1286 return PTR_ERR(handle
);
1287 ion_free(client
, handle
);
1288 ion_handle_put(handle
);
1294 struct ion_handle
*handle
;
1296 handle
= ion_handle_get_by_id(client
, data
.handle
.handle
);
1298 return PTR_ERR(handle
);
1299 data
.fd
.fd
= ion_share_dma_buf_fd(client
, handle
);
1300 ion_handle_put(handle
);
1305 case ION_IOC_IMPORT
:
1307 struct ion_handle
*handle
;
1309 handle
= ion_import_dma_buf(client
, data
.fd
.fd
);
1311 ret
= PTR_ERR(handle
);
1313 data
.handle
.handle
= handle
->id
;
1318 ret
= ion_sync_for_device(client
, data
.fd
.fd
);
1321 case ION_IOC_CUSTOM
:
1323 if (!dev
->custom_ioctl
)
1325 ret
= dev
->custom_ioctl(client
, data
.custom
.cmd
,
1333 if (dir
& _IOC_READ
) {
1334 if (copy_to_user((void __user
*)arg
, &data
, _IOC_SIZE(cmd
))) {
1336 ion_free(client
, cleanup_handle
);
1343 static int ion_release(struct inode
*inode
, struct file
*file
)
1345 struct ion_client
*client
= file
->private_data
;
1347 pr_debug("%s: %d\n", __func__
, __LINE__
);
1348 ion_client_destroy(client
);
1352 static int ion_open(struct inode
*inode
, struct file
*file
)
1354 struct miscdevice
*miscdev
= file
->private_data
;
1355 struct ion_device
*dev
= container_of(miscdev
, struct ion_device
, dev
);
1356 struct ion_client
*client
;
1357 char debug_name
[64];
1359 pr_debug("%s: %d\n", __func__
, __LINE__
);
1360 snprintf(debug_name
, 64, "%u", task_pid_nr(current
->group_leader
));
1361 client
= ion_client_create(dev
, debug_name
);
1363 return PTR_ERR(client
);
1364 file
->private_data
= client
;
1369 static const struct file_operations ion_fops
= {
1370 .owner
= THIS_MODULE
,
1372 .release
= ion_release
,
1373 .unlocked_ioctl
= ion_ioctl
,
1374 .compat_ioctl
= compat_ion_ioctl
,
1377 static size_t ion_debug_heap_total(struct ion_client
*client
,
1383 mutex_lock(&client
->lock
);
1384 for (n
= rb_first(&client
->handles
); n
; n
= rb_next(n
)) {
1385 struct ion_handle
*handle
= rb_entry(n
,
1388 if (handle
->buffer
->heap
->id
== id
)
1389 size
+= handle
->buffer
->size
;
1391 mutex_unlock(&client
->lock
);
1395 static int ion_debug_heap_show(struct seq_file
*s
, void *unused
)
1397 struct ion_heap
*heap
= s
->private;
1398 struct ion_device
*dev
= heap
->dev
;
1400 size_t total_size
= 0;
1401 size_t total_orphaned_size
= 0;
1403 seq_printf(s
, "%16s %16s %16s\n", "client", "pid", "size");
1404 seq_puts(s
, "----------------------------------------------------\n");
1406 for (n
= rb_first(&dev
->clients
); n
; n
= rb_next(n
)) {
1407 struct ion_client
*client
= rb_entry(n
, struct ion_client
,
1409 size_t size
= ion_debug_heap_total(client
, heap
->id
);
1414 char task_comm
[TASK_COMM_LEN
];
1416 get_task_comm(task_comm
, client
->task
);
1417 seq_printf(s
, "%16s %16u %16zu\n", task_comm
,
1420 seq_printf(s
, "%16s %16u %16zu\n", client
->name
,
1424 seq_puts(s
, "----------------------------------------------------\n");
1425 seq_puts(s
, "orphaned allocations (info is from last known client):\n");
1426 mutex_lock(&dev
->buffer_lock
);
1427 for (n
= rb_first(&dev
->buffers
); n
; n
= rb_next(n
)) {
1428 struct ion_buffer
*buffer
= rb_entry(n
, struct ion_buffer
,
1430 if (buffer
->heap
->id
!= heap
->id
)
1432 total_size
+= buffer
->size
;
1433 if (!buffer
->handle_count
) {
1434 seq_printf(s
, "%16s %16u %16zu %d %d\n",
1435 buffer
->task_comm
, buffer
->pid
,
1436 buffer
->size
, buffer
->kmap_cnt
,
1437 atomic_read(&buffer
->ref
.refcount
));
1438 total_orphaned_size
+= buffer
->size
;
1441 mutex_unlock(&dev
->buffer_lock
);
1442 seq_puts(s
, "----------------------------------------------------\n");
1443 seq_printf(s
, "%16s %16zu\n", "total orphaned",
1444 total_orphaned_size
);
1445 seq_printf(s
, "%16s %16zu\n", "total ", total_size
);
1446 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1447 seq_printf(s
, "%16s %16zu\n", "deferred free",
1448 heap
->free_list_size
);
1449 seq_puts(s
, "----------------------------------------------------\n");
1451 if (heap
->debug_show
)
1452 heap
->debug_show(heap
, s
, unused
);
1457 static int ion_debug_heap_open(struct inode
*inode
, struct file
*file
)
1459 return single_open(file
, ion_debug_heap_show
, inode
->i_private
);
1462 static const struct file_operations debug_heap_fops
= {
1463 .open
= ion_debug_heap_open
,
1465 .llseek
= seq_lseek
,
1466 .release
= single_release
,
1469 static int debug_shrink_set(void *data
, u64 val
)
1471 struct ion_heap
*heap
= data
;
1472 struct shrink_control sc
;
1476 sc
.nr_to_scan
= val
;
1479 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
1480 sc
.nr_to_scan
= objs
;
1483 heap
->shrinker
.scan_objects(&heap
->shrinker
, &sc
);
1487 static int debug_shrink_get(void *data
, u64
*val
)
1489 struct ion_heap
*heap
= data
;
1490 struct shrink_control sc
;
1496 objs
= heap
->shrinker
.count_objects(&heap
->shrinker
, &sc
);
1501 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops
, debug_shrink_get
,
1502 debug_shrink_set
, "%llu\n");
1504 void ion_device_add_heap(struct ion_device
*dev
, struct ion_heap
*heap
)
1506 struct dentry
*debug_file
;
1508 if (!heap
->ops
->allocate
|| !heap
->ops
->free
|| !heap
->ops
->map_dma
||
1509 !heap
->ops
->unmap_dma
)
1510 pr_err("%s: can not add heap with invalid ops struct.\n",
1513 spin_lock_init(&heap
->free_lock
);
1514 heap
->free_list_size
= 0;
1516 if (heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
)
1517 ion_heap_init_deferred_free(heap
);
1519 if ((heap
->flags
& ION_HEAP_FLAG_DEFER_FREE
) || heap
->ops
->shrink
)
1520 ion_heap_init_shrinker(heap
);
1523 down_write(&dev
->lock
);
1525 * use negative heap->id to reverse the priority -- when traversing
1526 * the list later attempt higher id numbers first
1528 plist_node_init(&heap
->node
, -heap
->id
);
1529 plist_add(&heap
->node
, &dev
->heaps
);
1530 debug_file
= debugfs_create_file(heap
->name
, 0664,
1531 dev
->heaps_debug_root
, heap
,
1535 char buf
[256], *path
;
1537 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
1538 pr_err("Failed to create heap debugfs at %s/%s\n",
1542 if (heap
->shrinker
.count_objects
&& heap
->shrinker
.scan_objects
) {
1543 char debug_name
[64];
1545 snprintf(debug_name
, 64, "%s_shrink", heap
->name
);
1546 debug_file
= debugfs_create_file(
1547 debug_name
, 0644, dev
->heaps_debug_root
, heap
,
1548 &debug_shrink_fops
);
1550 char buf
[256], *path
;
1552 path
= dentry_path(dev
->heaps_debug_root
, buf
, 256);
1553 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1558 up_write(&dev
->lock
);
1560 EXPORT_SYMBOL(ion_device_add_heap
);
1562 struct ion_device
*ion_device_create(long (*custom_ioctl
)
1563 (struct ion_client
*client
,
1567 struct ion_device
*idev
;
1570 idev
= kzalloc(sizeof(struct ion_device
), GFP_KERNEL
);
1572 return ERR_PTR(-ENOMEM
);
1574 idev
->dev
.minor
= MISC_DYNAMIC_MINOR
;
1575 idev
->dev
.name
= "ion";
1576 idev
->dev
.fops
= &ion_fops
;
1577 idev
->dev
.parent
= NULL
;
1578 ret
= misc_register(&idev
->dev
);
1580 pr_err("ion: failed to register misc device.\n");
1582 return ERR_PTR(ret
);
1585 idev
->debug_root
= debugfs_create_dir("ion", NULL
);
1586 if (!idev
->debug_root
) {
1587 pr_err("ion: failed to create debugfs root directory.\n");
1590 idev
->heaps_debug_root
= debugfs_create_dir("heaps", idev
->debug_root
);
1591 if (!idev
->heaps_debug_root
) {
1592 pr_err("ion: failed to create debugfs heaps directory.\n");
1595 idev
->clients_debug_root
= debugfs_create_dir("clients",
1597 if (!idev
->clients_debug_root
)
1598 pr_err("ion: failed to create debugfs clients directory.\n");
1602 idev
->custom_ioctl
= custom_ioctl
;
1603 idev
->buffers
= RB_ROOT
;
1604 mutex_init(&idev
->buffer_lock
);
1605 init_rwsem(&idev
->lock
);
1606 plist_head_init(&idev
->heaps
);
1607 idev
->clients
= RB_ROOT
;
1610 EXPORT_SYMBOL(ion_device_create
);
1612 void ion_device_destroy(struct ion_device
*dev
)
1614 misc_deregister(&dev
->dev
);
1615 debugfs_remove_recursive(dev
->debug_root
);
1616 /* XXX need to free the heaps and clients ? */
1619 EXPORT_SYMBOL(ion_device_destroy
);
1621 void __init
ion_reserve(struct ion_platform_data
*data
)
1625 for (i
= 0; i
< data
->nr
; i
++) {
1626 if (data
->heaps
[i
].size
== 0)
1629 if (data
->heaps
[i
].base
== 0) {
1632 paddr
= memblock_alloc_base(data
->heaps
[i
].size
,
1633 data
->heaps
[i
].align
,
1634 MEMBLOCK_ALLOC_ANYWHERE
);
1636 pr_err("%s: error allocating memblock for heap %d\n",
1640 data
->heaps
[i
].base
= paddr
;
1642 int ret
= memblock_reserve(data
->heaps
[i
].base
,
1643 data
->heaps
[i
].size
);
1645 pr_err("memblock reserve of %zx@%lx failed\n",
1646 data
->heaps
[i
].size
,
1647 data
->heaps
[i
].base
);
1649 pr_info("%s: %s reserved base %lx size %zu\n", __func__
,
1650 data
->heaps
[i
].name
,
1651 data
->heaps
[i
].base
,
1652 data
->heaps
[i
].size
);