]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/android/ion/ion.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / android / ion / ion.c
1 /*
2 *
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53 struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
65 };
66
67 /**
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83 struct ion_client {
84 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
87 struct idr idr;
88 struct mutex lock;
89 const char *name;
90 char *display_name;
91 int display_serial;
92 struct task_struct *task;
93 pid_t pid;
94 struct dentry *debug_root;
95 };
96
97 /**
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
108 */
109 struct ion_handle {
110 struct kref ref;
111 struct ion_client *client;
112 struct ion_buffer *buffer;
113 struct rb_node node;
114 unsigned int kmap_cnt;
115 int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126 return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131 return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136 return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
152 {
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
156
157 while (*p) {
158 parent = *p;
159 entry = rb_entry(parent, struct ion_buffer, node);
160
161 if (buffer < entry) {
162 p = &(*p)->rb_left;
163 } else if (buffer > entry) {
164 p = &(*p)->rb_right;
165 } else {
166 pr_err("%s: buffer already found.", __func__);
167 BUG();
168 }
169 }
170
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177 struct ion_device *dev,
178 unsigned long len,
179 unsigned long align,
180 unsigned long flags)
181 {
182 struct ion_buffer *buffer;
183 struct sg_table *table;
184 struct scatterlist *sg;
185 int i, ret;
186
187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188 if (!buffer)
189 return ERR_PTR(-ENOMEM);
190
191 buffer->heap = heap;
192 buffer->flags = flags;
193 kref_init(&buffer->ref);
194
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197 if (ret) {
198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199 goto err2;
200
201 ion_heap_freelist_drain(heap, 0);
202 ret = heap->ops->allocate(heap, buffer, len, align,
203 flags);
204 if (ret)
205 goto err2;
206 }
207
208 buffer->dev = dev;
209 buffer->size = len;
210
211 table = heap->ops->map_dma(heap, buffer);
212 if (WARN_ONCE(table == NULL,
213 "heap->ops->map_dma should return ERR_PTR on error"))
214 table = ERR_PTR(-EINVAL);
215 if (IS_ERR(table)) {
216 ret = -EINVAL;
217 goto err1;
218 }
219
220 buffer->sg_table = table;
221 if (ion_buffer_fault_user_mappings(buffer)) {
222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223 struct scatterlist *sg;
224 int i, j, k = 0;
225
226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227 if (!buffer->pages) {
228 ret = -ENOMEM;
229 goto err;
230 }
231
232 for_each_sg(table->sgl, sg, table->nents, i) {
233 struct page *page = sg_page(sg);
234
235 for (j = 0; j < sg->length / PAGE_SIZE; j++)
236 buffer->pages[k++] = page++;
237 }
238 }
239
240 buffer->dev = dev;
241 buffer->size = len;
242 INIT_LIST_HEAD(&buffer->vmas);
243 mutex_init(&buffer->lock);
244 /*
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
253 */
254 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
255 sg_dma_address(sg) = sg_phys(sg);
256 mutex_lock(&dev->buffer_lock);
257 ion_buffer_add(dev, buffer);
258 mutex_unlock(&dev->buffer_lock);
259 return buffer;
260
261 err:
262 heap->ops->unmap_dma(heap, buffer);
263 err1:
264 heap->ops->free(buffer);
265 err2:
266 kfree(buffer);
267 return ERR_PTR(ret);
268 }
269
270 void ion_buffer_destroy(struct ion_buffer *buffer)
271 {
272 if (WARN_ON(buffer->kmap_cnt > 0))
273 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
274 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
275 buffer->heap->ops->free(buffer);
276 vfree(buffer->pages);
277 kfree(buffer);
278 }
279
280 static void _ion_buffer_destroy(struct kref *kref)
281 {
282 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
283 struct ion_heap *heap = buffer->heap;
284 struct ion_device *dev = buffer->dev;
285
286 mutex_lock(&dev->buffer_lock);
287 rb_erase(&buffer->node, &dev->buffers);
288 mutex_unlock(&dev->buffer_lock);
289
290 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
291 ion_heap_freelist_add(heap, buffer);
292 else
293 ion_buffer_destroy(buffer);
294 }
295
296 static void ion_buffer_get(struct ion_buffer *buffer)
297 {
298 kref_get(&buffer->ref);
299 }
300
301 static int ion_buffer_put(struct ion_buffer *buffer)
302 {
303 return kref_put(&buffer->ref, _ion_buffer_destroy);
304 }
305
306 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
307 {
308 mutex_lock(&buffer->lock);
309 buffer->handle_count++;
310 mutex_unlock(&buffer->lock);
311 }
312
313 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
314 {
315 /*
316 * when a buffer is removed from a handle, if it is not in
317 * any other handles, copy the taskcomm and the pid of the
318 * process it's being removed from into the buffer. At this
319 * point there will be no way to track what processes this buffer is
320 * being used by, it only exists as a dma_buf file descriptor.
321 * The taskcomm and pid can provide a debug hint as to where this fd
322 * is in the system
323 */
324 mutex_lock(&buffer->lock);
325 buffer->handle_count--;
326 BUG_ON(buffer->handle_count < 0);
327 if (!buffer->handle_count) {
328 struct task_struct *task;
329
330 task = current->group_leader;
331 get_task_comm(buffer->task_comm, task);
332 buffer->pid = task_pid_nr(task);
333 }
334 mutex_unlock(&buffer->lock);
335 }
336
337 static struct ion_handle *ion_handle_create(struct ion_client *client,
338 struct ion_buffer *buffer)
339 {
340 struct ion_handle *handle;
341
342 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
343 if (!handle)
344 return ERR_PTR(-ENOMEM);
345 kref_init(&handle->ref);
346 RB_CLEAR_NODE(&handle->node);
347 handle->client = client;
348 ion_buffer_get(buffer);
349 ion_buffer_add_to_handle(buffer);
350 handle->buffer = buffer;
351
352 return handle;
353 }
354
355 static void ion_handle_kmap_put(struct ion_handle *);
356
357 static void ion_handle_destroy(struct kref *kref)
358 {
359 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
360 struct ion_client *client = handle->client;
361 struct ion_buffer *buffer = handle->buffer;
362
363 mutex_lock(&buffer->lock);
364 while (handle->kmap_cnt)
365 ion_handle_kmap_put(handle);
366 mutex_unlock(&buffer->lock);
367
368 idr_remove(&client->idr, handle->id);
369 if (!RB_EMPTY_NODE(&handle->node))
370 rb_erase(&handle->node, &client->handles);
371
372 ion_buffer_remove_from_handle(buffer);
373 ion_buffer_put(buffer);
374
375 kfree(handle);
376 }
377
378 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
379 {
380 return handle->buffer;
381 }
382
383 static void ion_handle_get(struct ion_handle *handle)
384 {
385 kref_get(&handle->ref);
386 }
387
388 static int ion_handle_put(struct ion_handle *handle)
389 {
390 struct ion_client *client = handle->client;
391 int ret;
392
393 mutex_lock(&client->lock);
394 ret = kref_put(&handle->ref, ion_handle_destroy);
395 mutex_unlock(&client->lock);
396
397 return ret;
398 }
399
400 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
401 struct ion_buffer *buffer)
402 {
403 struct rb_node *n = client->handles.rb_node;
404
405 while (n) {
406 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
407
408 if (buffer < entry->buffer)
409 n = n->rb_left;
410 else if (buffer > entry->buffer)
411 n = n->rb_right;
412 else
413 return entry;
414 }
415 return ERR_PTR(-EINVAL);
416 }
417
418 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
419 int id)
420 {
421 struct ion_handle *handle;
422
423 mutex_lock(&client->lock);
424 handle = idr_find(&client->idr, id);
425 if (handle)
426 ion_handle_get(handle);
427 mutex_unlock(&client->lock);
428
429 return handle ? handle : ERR_PTR(-EINVAL);
430 }
431
432 static bool ion_handle_validate(struct ion_client *client,
433 struct ion_handle *handle)
434 {
435 WARN_ON(!mutex_is_locked(&client->lock));
436 return idr_find(&client->idr, handle->id) == handle;
437 }
438
439 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
440 {
441 int id;
442 struct rb_node **p = &client->handles.rb_node;
443 struct rb_node *parent = NULL;
444 struct ion_handle *entry;
445
446 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
447 if (id < 0)
448 return id;
449
450 handle->id = id;
451
452 while (*p) {
453 parent = *p;
454 entry = rb_entry(parent, struct ion_handle, node);
455
456 if (handle->buffer < entry->buffer)
457 p = &(*p)->rb_left;
458 else if (handle->buffer > entry->buffer)
459 p = &(*p)->rb_right;
460 else
461 WARN(1, "%s: buffer already found.", __func__);
462 }
463
464 rb_link_node(&handle->node, parent, p);
465 rb_insert_color(&handle->node, &client->handles);
466
467 return 0;
468 }
469
470 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
471 size_t align, unsigned int heap_id_mask,
472 unsigned int flags)
473 {
474 struct ion_handle *handle;
475 struct ion_device *dev = client->dev;
476 struct ion_buffer *buffer = NULL;
477 struct ion_heap *heap;
478 int ret;
479
480 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
481 len, align, heap_id_mask, flags);
482 /*
483 * traverse the list of heaps available in this system in priority
484 * order. If the heap type is supported by the client, and matches the
485 * request of the caller allocate from it. Repeat until allocate has
486 * succeeded or all heaps have been tried
487 */
488 len = PAGE_ALIGN(len);
489
490 if (!len)
491 return ERR_PTR(-EINVAL);
492
493 down_read(&dev->lock);
494 plist_for_each_entry(heap, &dev->heaps, node) {
495 /* if the caller didn't specify this heap id */
496 if (!((1 << heap->id) & heap_id_mask))
497 continue;
498 buffer = ion_buffer_create(heap, dev, len, align, flags);
499 if (!IS_ERR(buffer))
500 break;
501 }
502 up_read(&dev->lock);
503
504 if (buffer == NULL)
505 return ERR_PTR(-ENODEV);
506
507 if (IS_ERR(buffer))
508 return ERR_CAST(buffer);
509
510 handle = ion_handle_create(client, buffer);
511
512 /*
513 * ion_buffer_create will create a buffer with a ref_cnt of 1,
514 * and ion_handle_create will take a second reference, drop one here
515 */
516 ion_buffer_put(buffer);
517
518 if (IS_ERR(handle))
519 return handle;
520
521 mutex_lock(&client->lock);
522 ret = ion_handle_add(client, handle);
523 mutex_unlock(&client->lock);
524 if (ret) {
525 ion_handle_put(handle);
526 handle = ERR_PTR(ret);
527 }
528
529 return handle;
530 }
531 EXPORT_SYMBOL(ion_alloc);
532
533 void ion_free(struct ion_client *client, struct ion_handle *handle)
534 {
535 bool valid_handle;
536
537 BUG_ON(client != handle->client);
538
539 mutex_lock(&client->lock);
540 valid_handle = ion_handle_validate(client, handle);
541
542 if (!valid_handle) {
543 WARN(1, "%s: invalid handle passed to free.\n", __func__);
544 mutex_unlock(&client->lock);
545 return;
546 }
547 mutex_unlock(&client->lock);
548 ion_handle_put(handle);
549 }
550 EXPORT_SYMBOL(ion_free);
551
552 int ion_phys(struct ion_client *client, struct ion_handle *handle,
553 ion_phys_addr_t *addr, size_t *len)
554 {
555 struct ion_buffer *buffer;
556 int ret;
557
558 mutex_lock(&client->lock);
559 if (!ion_handle_validate(client, handle)) {
560 mutex_unlock(&client->lock);
561 return -EINVAL;
562 }
563
564 buffer = handle->buffer;
565
566 if (!buffer->heap->ops->phys) {
567 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
568 __func__, buffer->heap->name, buffer->heap->type);
569 mutex_unlock(&client->lock);
570 return -ENODEV;
571 }
572 mutex_unlock(&client->lock);
573 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
574 return ret;
575 }
576 EXPORT_SYMBOL(ion_phys);
577
578 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
579 {
580 void *vaddr;
581
582 if (buffer->kmap_cnt) {
583 buffer->kmap_cnt++;
584 return buffer->vaddr;
585 }
586 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
587 if (WARN_ONCE(vaddr == NULL,
588 "heap->ops->map_kernel should return ERR_PTR on error"))
589 return ERR_PTR(-EINVAL);
590 if (IS_ERR(vaddr))
591 return vaddr;
592 buffer->vaddr = vaddr;
593 buffer->kmap_cnt++;
594 return vaddr;
595 }
596
597 static void *ion_handle_kmap_get(struct ion_handle *handle)
598 {
599 struct ion_buffer *buffer = handle->buffer;
600 void *vaddr;
601
602 if (handle->kmap_cnt) {
603 handle->kmap_cnt++;
604 return buffer->vaddr;
605 }
606 vaddr = ion_buffer_kmap_get(buffer);
607 if (IS_ERR(vaddr))
608 return vaddr;
609 handle->kmap_cnt++;
610 return vaddr;
611 }
612
613 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
614 {
615 buffer->kmap_cnt--;
616 if (!buffer->kmap_cnt) {
617 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
618 buffer->vaddr = NULL;
619 }
620 }
621
622 static void ion_handle_kmap_put(struct ion_handle *handle)
623 {
624 struct ion_buffer *buffer = handle->buffer;
625
626 if (!handle->kmap_cnt) {
627 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
628 return;
629 }
630 handle->kmap_cnt--;
631 if (!handle->kmap_cnt)
632 ion_buffer_kmap_put(buffer);
633 }
634
635 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
636 {
637 struct ion_buffer *buffer;
638 void *vaddr;
639
640 mutex_lock(&client->lock);
641 if (!ion_handle_validate(client, handle)) {
642 pr_err("%s: invalid handle passed to map_kernel.\n",
643 __func__);
644 mutex_unlock(&client->lock);
645 return ERR_PTR(-EINVAL);
646 }
647
648 buffer = handle->buffer;
649
650 if (!handle->buffer->heap->ops->map_kernel) {
651 pr_err("%s: map_kernel is not implemented by this heap.\n",
652 __func__);
653 mutex_unlock(&client->lock);
654 return ERR_PTR(-ENODEV);
655 }
656
657 mutex_lock(&buffer->lock);
658 vaddr = ion_handle_kmap_get(handle);
659 mutex_unlock(&buffer->lock);
660 mutex_unlock(&client->lock);
661 return vaddr;
662 }
663 EXPORT_SYMBOL(ion_map_kernel);
664
665 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
666 {
667 struct ion_buffer *buffer;
668
669 mutex_lock(&client->lock);
670 buffer = handle->buffer;
671 mutex_lock(&buffer->lock);
672 ion_handle_kmap_put(handle);
673 mutex_unlock(&buffer->lock);
674 mutex_unlock(&client->lock);
675 }
676 EXPORT_SYMBOL(ion_unmap_kernel);
677
678 static int ion_debug_client_show(struct seq_file *s, void *unused)
679 {
680 struct ion_client *client = s->private;
681 struct rb_node *n;
682 size_t sizes[ION_NUM_HEAP_IDS] = {0};
683 const char *names[ION_NUM_HEAP_IDS] = {NULL};
684 int i;
685
686 mutex_lock(&client->lock);
687 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
688 struct ion_handle *handle = rb_entry(n, struct ion_handle,
689 node);
690 unsigned int id = handle->buffer->heap->id;
691
692 if (!names[id])
693 names[id] = handle->buffer->heap->name;
694 sizes[id] += handle->buffer->size;
695 }
696 mutex_unlock(&client->lock);
697
698 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
699 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
700 if (!names[i])
701 continue;
702 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
703 }
704 return 0;
705 }
706
707 static int ion_debug_client_open(struct inode *inode, struct file *file)
708 {
709 return single_open(file, ion_debug_client_show, inode->i_private);
710 }
711
712 static const struct file_operations debug_client_fops = {
713 .open = ion_debug_client_open,
714 .read = seq_read,
715 .llseek = seq_lseek,
716 .release = single_release,
717 };
718
719 static int ion_get_client_serial(const struct rb_root *root,
720 const unsigned char *name)
721 {
722 int serial = -1;
723 struct rb_node *node;
724
725 for (node = rb_first(root); node; node = rb_next(node)) {
726 struct ion_client *client = rb_entry(node, struct ion_client,
727 node);
728
729 if (strcmp(client->name, name))
730 continue;
731 serial = max(serial, client->display_serial);
732 }
733 return serial + 1;
734 }
735
736 struct ion_client *ion_client_create(struct ion_device *dev,
737 const char *name)
738 {
739 struct ion_client *client;
740 struct task_struct *task;
741 struct rb_node **p;
742 struct rb_node *parent = NULL;
743 struct ion_client *entry;
744 pid_t pid;
745
746 if (!name) {
747 pr_err("%s: Name cannot be null\n", __func__);
748 return ERR_PTR(-EINVAL);
749 }
750
751 get_task_struct(current->group_leader);
752 task_lock(current->group_leader);
753 pid = task_pid_nr(current->group_leader);
754 /*
755 * don't bother to store task struct for kernel threads,
756 * they can't be killed anyway
757 */
758 if (current->group_leader->flags & PF_KTHREAD) {
759 put_task_struct(current->group_leader);
760 task = NULL;
761 } else {
762 task = current->group_leader;
763 }
764 task_unlock(current->group_leader);
765
766 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
767 if (!client)
768 goto err_put_task_struct;
769
770 client->dev = dev;
771 client->handles = RB_ROOT;
772 idr_init(&client->idr);
773 mutex_init(&client->lock);
774 client->task = task;
775 client->pid = pid;
776 client->name = kstrdup(name, GFP_KERNEL);
777 if (!client->name)
778 goto err_free_client;
779
780 down_write(&dev->lock);
781 client->display_serial = ion_get_client_serial(&dev->clients, name);
782 client->display_name = kasprintf(
783 GFP_KERNEL, "%s-%d", name, client->display_serial);
784 if (!client->display_name) {
785 up_write(&dev->lock);
786 goto err_free_client_name;
787 }
788 p = &dev->clients.rb_node;
789 while (*p) {
790 parent = *p;
791 entry = rb_entry(parent, struct ion_client, node);
792
793 if (client < entry)
794 p = &(*p)->rb_left;
795 else if (client > entry)
796 p = &(*p)->rb_right;
797 }
798 rb_link_node(&client->node, parent, p);
799 rb_insert_color(&client->node, &dev->clients);
800
801 client->debug_root = debugfs_create_file(client->display_name, 0664,
802 dev->clients_debug_root,
803 client, &debug_client_fops);
804 if (!client->debug_root) {
805 char buf[256], *path;
806
807 path = dentry_path(dev->clients_debug_root, buf, 256);
808 pr_err("Failed to create client debugfs at %s/%s\n",
809 path, client->display_name);
810 }
811
812 up_write(&dev->lock);
813
814 return client;
815
816 err_free_client_name:
817 kfree(client->name);
818 err_free_client:
819 kfree(client);
820 err_put_task_struct:
821 if (task)
822 put_task_struct(current->group_leader);
823 return ERR_PTR(-ENOMEM);
824 }
825 EXPORT_SYMBOL(ion_client_create);
826
827 void ion_client_destroy(struct ion_client *client)
828 {
829 struct ion_device *dev = client->dev;
830 struct rb_node *n;
831
832 pr_debug("%s: %d\n", __func__, __LINE__);
833 while ((n = rb_first(&client->handles))) {
834 struct ion_handle *handle = rb_entry(n, struct ion_handle,
835 node);
836 ion_handle_destroy(&handle->ref);
837 }
838
839 idr_destroy(&client->idr);
840
841 down_write(&dev->lock);
842 if (client->task)
843 put_task_struct(client->task);
844 rb_erase(&client->node, &dev->clients);
845 debugfs_remove_recursive(client->debug_root);
846 up_write(&dev->lock);
847
848 kfree(client->display_name);
849 kfree(client->name);
850 kfree(client);
851 }
852 EXPORT_SYMBOL(ion_client_destroy);
853
854 struct sg_table *ion_sg_table(struct ion_client *client,
855 struct ion_handle *handle)
856 {
857 struct ion_buffer *buffer;
858 struct sg_table *table;
859
860 mutex_lock(&client->lock);
861 if (!ion_handle_validate(client, handle)) {
862 pr_err("%s: invalid handle passed to map_dma.\n",
863 __func__);
864 mutex_unlock(&client->lock);
865 return ERR_PTR(-EINVAL);
866 }
867 buffer = handle->buffer;
868 table = buffer->sg_table;
869 mutex_unlock(&client->lock);
870 return table;
871 }
872 EXPORT_SYMBOL(ion_sg_table);
873
874 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
875 struct device *dev,
876 enum dma_data_direction direction);
877
878 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
879 enum dma_data_direction direction)
880 {
881 struct dma_buf *dmabuf = attachment->dmabuf;
882 struct ion_buffer *buffer = dmabuf->priv;
883
884 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
885 return buffer->sg_table;
886 }
887
888 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
889 struct sg_table *table,
890 enum dma_data_direction direction)
891 {
892 }
893
894 void ion_pages_sync_for_device(struct device *dev, struct page *page,
895 size_t size, enum dma_data_direction dir)
896 {
897 struct scatterlist sg;
898
899 sg_init_table(&sg, 1);
900 sg_set_page(&sg, page, size, 0);
901 /*
902 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
903 * for the targeted device, but this works on the currently targeted
904 * hardware.
905 */
906 sg_dma_address(&sg) = page_to_phys(page);
907 dma_sync_sg_for_device(dev, &sg, 1, dir);
908 }
909
910 struct ion_vma_list {
911 struct list_head list;
912 struct vm_area_struct *vma;
913 };
914
915 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
916 struct device *dev,
917 enum dma_data_direction dir)
918 {
919 struct ion_vma_list *vma_list;
920 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
921 int i;
922
923 pr_debug("%s: syncing for device %s\n", __func__,
924 dev ? dev_name(dev) : "null");
925
926 if (!ion_buffer_fault_user_mappings(buffer))
927 return;
928
929 mutex_lock(&buffer->lock);
930 for (i = 0; i < pages; i++) {
931 struct page *page = buffer->pages[i];
932
933 if (ion_buffer_page_is_dirty(page))
934 ion_pages_sync_for_device(dev, ion_buffer_page(page),
935 PAGE_SIZE, dir);
936
937 ion_buffer_page_clean(buffer->pages + i);
938 }
939 list_for_each_entry(vma_list, &buffer->vmas, list) {
940 struct vm_area_struct *vma = vma_list->vma;
941
942 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
943 NULL);
944 }
945 mutex_unlock(&buffer->lock);
946 }
947
948 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
949 {
950 struct ion_buffer *buffer = vma->vm_private_data;
951 unsigned long pfn;
952 int ret;
953
954 mutex_lock(&buffer->lock);
955 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
956 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
957
958 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
959 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
960 mutex_unlock(&buffer->lock);
961 if (ret)
962 return VM_FAULT_ERROR;
963
964 return VM_FAULT_NOPAGE;
965 }
966
967 static void ion_vm_open(struct vm_area_struct *vma)
968 {
969 struct ion_buffer *buffer = vma->vm_private_data;
970 struct ion_vma_list *vma_list;
971
972 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
973 if (!vma_list)
974 return;
975 vma_list->vma = vma;
976 mutex_lock(&buffer->lock);
977 list_add(&vma_list->list, &buffer->vmas);
978 mutex_unlock(&buffer->lock);
979 pr_debug("%s: adding %p\n", __func__, vma);
980 }
981
982 static void ion_vm_close(struct vm_area_struct *vma)
983 {
984 struct ion_buffer *buffer = vma->vm_private_data;
985 struct ion_vma_list *vma_list, *tmp;
986
987 pr_debug("%s\n", __func__);
988 mutex_lock(&buffer->lock);
989 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
990 if (vma_list->vma != vma)
991 continue;
992 list_del(&vma_list->list);
993 kfree(vma_list);
994 pr_debug("%s: deleting %p\n", __func__, vma);
995 break;
996 }
997 mutex_unlock(&buffer->lock);
998 }
999
1000 static const struct vm_operations_struct ion_vma_ops = {
1001 .open = ion_vm_open,
1002 .close = ion_vm_close,
1003 .fault = ion_vm_fault,
1004 };
1005
1006 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1007 {
1008 struct ion_buffer *buffer = dmabuf->priv;
1009 int ret = 0;
1010
1011 if (!buffer->heap->ops->map_user) {
1012 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1013 __func__);
1014 return -EINVAL;
1015 }
1016
1017 if (ion_buffer_fault_user_mappings(buffer)) {
1018 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1019 VM_DONTDUMP;
1020 vma->vm_private_data = buffer;
1021 vma->vm_ops = &ion_vma_ops;
1022 ion_vm_open(vma);
1023 return 0;
1024 }
1025
1026 if (!(buffer->flags & ION_FLAG_CACHED))
1027 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1028
1029 mutex_lock(&buffer->lock);
1030 /* now map it to userspace */
1031 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1032 mutex_unlock(&buffer->lock);
1033
1034 if (ret)
1035 pr_err("%s: failure mapping buffer to userspace\n",
1036 __func__);
1037
1038 return ret;
1039 }
1040
1041 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1042 {
1043 struct ion_buffer *buffer = dmabuf->priv;
1044
1045 ion_buffer_put(buffer);
1046 }
1047
1048 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1049 {
1050 struct ion_buffer *buffer = dmabuf->priv;
1051
1052 return buffer->vaddr + offset * PAGE_SIZE;
1053 }
1054
1055 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1056 void *ptr)
1057 {
1058 }
1059
1060 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1061 size_t len,
1062 enum dma_data_direction direction)
1063 {
1064 struct ion_buffer *buffer = dmabuf->priv;
1065 void *vaddr;
1066
1067 if (!buffer->heap->ops->map_kernel) {
1068 pr_err("%s: map kernel is not implemented by this heap.\n",
1069 __func__);
1070 return -ENODEV;
1071 }
1072
1073 mutex_lock(&buffer->lock);
1074 vaddr = ion_buffer_kmap_get(buffer);
1075 mutex_unlock(&buffer->lock);
1076 return PTR_ERR_OR_ZERO(vaddr);
1077 }
1078
1079 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1080 size_t len,
1081 enum dma_data_direction direction)
1082 {
1083 struct ion_buffer *buffer = dmabuf->priv;
1084
1085 mutex_lock(&buffer->lock);
1086 ion_buffer_kmap_put(buffer);
1087 mutex_unlock(&buffer->lock);
1088 }
1089
1090 static struct dma_buf_ops dma_buf_ops = {
1091 .map_dma_buf = ion_map_dma_buf,
1092 .unmap_dma_buf = ion_unmap_dma_buf,
1093 .mmap = ion_mmap,
1094 .release = ion_dma_buf_release,
1095 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1096 .end_cpu_access = ion_dma_buf_end_cpu_access,
1097 .kmap_atomic = ion_dma_buf_kmap,
1098 .kunmap_atomic = ion_dma_buf_kunmap,
1099 .kmap = ion_dma_buf_kmap,
1100 .kunmap = ion_dma_buf_kunmap,
1101 };
1102
1103 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1104 struct ion_handle *handle)
1105 {
1106 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1107 struct ion_buffer *buffer;
1108 struct dma_buf *dmabuf;
1109 bool valid_handle;
1110
1111 mutex_lock(&client->lock);
1112 valid_handle = ion_handle_validate(client, handle);
1113 if (!valid_handle) {
1114 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1115 mutex_unlock(&client->lock);
1116 return ERR_PTR(-EINVAL);
1117 }
1118 buffer = handle->buffer;
1119 ion_buffer_get(buffer);
1120 mutex_unlock(&client->lock);
1121
1122 exp_info.ops = &dma_buf_ops;
1123 exp_info.size = buffer->size;
1124 exp_info.flags = O_RDWR;
1125 exp_info.priv = buffer;
1126
1127 dmabuf = dma_buf_export(&exp_info);
1128 if (IS_ERR(dmabuf)) {
1129 ion_buffer_put(buffer);
1130 return dmabuf;
1131 }
1132
1133 return dmabuf;
1134 }
1135 EXPORT_SYMBOL(ion_share_dma_buf);
1136
1137 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1138 {
1139 struct dma_buf *dmabuf;
1140 int fd;
1141
1142 dmabuf = ion_share_dma_buf(client, handle);
1143 if (IS_ERR(dmabuf))
1144 return PTR_ERR(dmabuf);
1145
1146 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1147 if (fd < 0)
1148 dma_buf_put(dmabuf);
1149
1150 return fd;
1151 }
1152 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1153
1154 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1155 {
1156 struct dma_buf *dmabuf;
1157 struct ion_buffer *buffer;
1158 struct ion_handle *handle;
1159 int ret;
1160
1161 dmabuf = dma_buf_get(fd);
1162 if (IS_ERR(dmabuf))
1163 return ERR_CAST(dmabuf);
1164 /* if this memory came from ion */
1165
1166 if (dmabuf->ops != &dma_buf_ops) {
1167 pr_err("%s: can not import dmabuf from another exporter\n",
1168 __func__);
1169 dma_buf_put(dmabuf);
1170 return ERR_PTR(-EINVAL);
1171 }
1172 buffer = dmabuf->priv;
1173
1174 mutex_lock(&client->lock);
1175 /* if a handle exists for this buffer just take a reference to it */
1176 handle = ion_handle_lookup(client, buffer);
1177 if (!IS_ERR(handle)) {
1178 ion_handle_get(handle);
1179 mutex_unlock(&client->lock);
1180 goto end;
1181 }
1182
1183 handle = ion_handle_create(client, buffer);
1184 if (IS_ERR(handle)) {
1185 mutex_unlock(&client->lock);
1186 goto end;
1187 }
1188
1189 ret = ion_handle_add(client, handle);
1190 mutex_unlock(&client->lock);
1191 if (ret) {
1192 ion_handle_put(handle);
1193 handle = ERR_PTR(ret);
1194 }
1195
1196 end:
1197 dma_buf_put(dmabuf);
1198 return handle;
1199 }
1200 EXPORT_SYMBOL(ion_import_dma_buf);
1201
1202 static int ion_sync_for_device(struct ion_client *client, int fd)
1203 {
1204 struct dma_buf *dmabuf;
1205 struct ion_buffer *buffer;
1206
1207 dmabuf = dma_buf_get(fd);
1208 if (IS_ERR(dmabuf))
1209 return PTR_ERR(dmabuf);
1210
1211 /* if this memory came from ion */
1212 if (dmabuf->ops != &dma_buf_ops) {
1213 pr_err("%s: can not sync dmabuf from another exporter\n",
1214 __func__);
1215 dma_buf_put(dmabuf);
1216 return -EINVAL;
1217 }
1218 buffer = dmabuf->priv;
1219
1220 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1221 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1222 dma_buf_put(dmabuf);
1223 return 0;
1224 }
1225
1226 /* fix up the cases where the ioctl direction bits are incorrect */
1227 static unsigned int ion_ioctl_dir(unsigned int cmd)
1228 {
1229 switch (cmd) {
1230 case ION_IOC_SYNC:
1231 case ION_IOC_FREE:
1232 case ION_IOC_CUSTOM:
1233 return _IOC_WRITE;
1234 default:
1235 return _IOC_DIR(cmd);
1236 }
1237 }
1238
1239 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1240 {
1241 struct ion_client *client = filp->private_data;
1242 struct ion_device *dev = client->dev;
1243 struct ion_handle *cleanup_handle = NULL;
1244 int ret = 0;
1245 unsigned int dir;
1246
1247 union {
1248 struct ion_fd_data fd;
1249 struct ion_allocation_data allocation;
1250 struct ion_handle_data handle;
1251 struct ion_custom_data custom;
1252 } data;
1253
1254 dir = ion_ioctl_dir(cmd);
1255
1256 if (_IOC_SIZE(cmd) > sizeof(data))
1257 return -EINVAL;
1258
1259 if (dir & _IOC_WRITE)
1260 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1261 return -EFAULT;
1262
1263 switch (cmd) {
1264 case ION_IOC_ALLOC:
1265 {
1266 struct ion_handle *handle;
1267
1268 handle = ion_alloc(client, data.allocation.len,
1269 data.allocation.align,
1270 data.allocation.heap_id_mask,
1271 data.allocation.flags);
1272 if (IS_ERR(handle))
1273 return PTR_ERR(handle);
1274
1275 data.allocation.handle = handle->id;
1276
1277 cleanup_handle = handle;
1278 break;
1279 }
1280 case ION_IOC_FREE:
1281 {
1282 struct ion_handle *handle;
1283
1284 handle = ion_handle_get_by_id(client, data.handle.handle);
1285 if (IS_ERR(handle))
1286 return PTR_ERR(handle);
1287 ion_free(client, handle);
1288 ion_handle_put(handle);
1289 break;
1290 }
1291 case ION_IOC_SHARE:
1292 case ION_IOC_MAP:
1293 {
1294 struct ion_handle *handle;
1295
1296 handle = ion_handle_get_by_id(client, data.handle.handle);
1297 if (IS_ERR(handle))
1298 return PTR_ERR(handle);
1299 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1300 ion_handle_put(handle);
1301 if (data.fd.fd < 0)
1302 ret = data.fd.fd;
1303 break;
1304 }
1305 case ION_IOC_IMPORT:
1306 {
1307 struct ion_handle *handle;
1308
1309 handle = ion_import_dma_buf(client, data.fd.fd);
1310 if (IS_ERR(handle))
1311 ret = PTR_ERR(handle);
1312 else
1313 data.handle.handle = handle->id;
1314 break;
1315 }
1316 case ION_IOC_SYNC:
1317 {
1318 ret = ion_sync_for_device(client, data.fd.fd);
1319 break;
1320 }
1321 case ION_IOC_CUSTOM:
1322 {
1323 if (!dev->custom_ioctl)
1324 return -ENOTTY;
1325 ret = dev->custom_ioctl(client, data.custom.cmd,
1326 data.custom.arg);
1327 break;
1328 }
1329 default:
1330 return -ENOTTY;
1331 }
1332
1333 if (dir & _IOC_READ) {
1334 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1335 if (cleanup_handle)
1336 ion_free(client, cleanup_handle);
1337 return -EFAULT;
1338 }
1339 }
1340 return ret;
1341 }
1342
1343 static int ion_release(struct inode *inode, struct file *file)
1344 {
1345 struct ion_client *client = file->private_data;
1346
1347 pr_debug("%s: %d\n", __func__, __LINE__);
1348 ion_client_destroy(client);
1349 return 0;
1350 }
1351
1352 static int ion_open(struct inode *inode, struct file *file)
1353 {
1354 struct miscdevice *miscdev = file->private_data;
1355 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1356 struct ion_client *client;
1357 char debug_name[64];
1358
1359 pr_debug("%s: %d\n", __func__, __LINE__);
1360 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1361 client = ion_client_create(dev, debug_name);
1362 if (IS_ERR(client))
1363 return PTR_ERR(client);
1364 file->private_data = client;
1365
1366 return 0;
1367 }
1368
1369 static const struct file_operations ion_fops = {
1370 .owner = THIS_MODULE,
1371 .open = ion_open,
1372 .release = ion_release,
1373 .unlocked_ioctl = ion_ioctl,
1374 .compat_ioctl = compat_ion_ioctl,
1375 };
1376
1377 static size_t ion_debug_heap_total(struct ion_client *client,
1378 unsigned int id)
1379 {
1380 size_t size = 0;
1381 struct rb_node *n;
1382
1383 mutex_lock(&client->lock);
1384 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1385 struct ion_handle *handle = rb_entry(n,
1386 struct ion_handle,
1387 node);
1388 if (handle->buffer->heap->id == id)
1389 size += handle->buffer->size;
1390 }
1391 mutex_unlock(&client->lock);
1392 return size;
1393 }
1394
1395 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1396 {
1397 struct ion_heap *heap = s->private;
1398 struct ion_device *dev = heap->dev;
1399 struct rb_node *n;
1400 size_t total_size = 0;
1401 size_t total_orphaned_size = 0;
1402
1403 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1404 seq_puts(s, "----------------------------------------------------\n");
1405
1406 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1407 struct ion_client *client = rb_entry(n, struct ion_client,
1408 node);
1409 size_t size = ion_debug_heap_total(client, heap->id);
1410
1411 if (!size)
1412 continue;
1413 if (client->task) {
1414 char task_comm[TASK_COMM_LEN];
1415
1416 get_task_comm(task_comm, client->task);
1417 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1418 client->pid, size);
1419 } else {
1420 seq_printf(s, "%16s %16u %16zu\n", client->name,
1421 client->pid, size);
1422 }
1423 }
1424 seq_puts(s, "----------------------------------------------------\n");
1425 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1426 mutex_lock(&dev->buffer_lock);
1427 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1428 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1429 node);
1430 if (buffer->heap->id != heap->id)
1431 continue;
1432 total_size += buffer->size;
1433 if (!buffer->handle_count) {
1434 seq_printf(s, "%16s %16u %16zu %d %d\n",
1435 buffer->task_comm, buffer->pid,
1436 buffer->size, buffer->kmap_cnt,
1437 atomic_read(&buffer->ref.refcount));
1438 total_orphaned_size += buffer->size;
1439 }
1440 }
1441 mutex_unlock(&dev->buffer_lock);
1442 seq_puts(s, "----------------------------------------------------\n");
1443 seq_printf(s, "%16s %16zu\n", "total orphaned",
1444 total_orphaned_size);
1445 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1446 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1447 seq_printf(s, "%16s %16zu\n", "deferred free",
1448 heap->free_list_size);
1449 seq_puts(s, "----------------------------------------------------\n");
1450
1451 if (heap->debug_show)
1452 heap->debug_show(heap, s, unused);
1453
1454 return 0;
1455 }
1456
1457 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1458 {
1459 return single_open(file, ion_debug_heap_show, inode->i_private);
1460 }
1461
1462 static const struct file_operations debug_heap_fops = {
1463 .open = ion_debug_heap_open,
1464 .read = seq_read,
1465 .llseek = seq_lseek,
1466 .release = single_release,
1467 };
1468
1469 static int debug_shrink_set(void *data, u64 val)
1470 {
1471 struct ion_heap *heap = data;
1472 struct shrink_control sc;
1473 int objs;
1474
1475 sc.gfp_mask = -1;
1476 sc.nr_to_scan = val;
1477
1478 if (!val) {
1479 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1480 sc.nr_to_scan = objs;
1481 }
1482
1483 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1484 return 0;
1485 }
1486
1487 static int debug_shrink_get(void *data, u64 *val)
1488 {
1489 struct ion_heap *heap = data;
1490 struct shrink_control sc;
1491 int objs;
1492
1493 sc.gfp_mask = -1;
1494 sc.nr_to_scan = 0;
1495
1496 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1497 *val = objs;
1498 return 0;
1499 }
1500
1501 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1502 debug_shrink_set, "%llu\n");
1503
1504 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1505 {
1506 struct dentry *debug_file;
1507
1508 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1509 !heap->ops->unmap_dma)
1510 pr_err("%s: can not add heap with invalid ops struct.\n",
1511 __func__);
1512
1513 spin_lock_init(&heap->free_lock);
1514 heap->free_list_size = 0;
1515
1516 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1517 ion_heap_init_deferred_free(heap);
1518
1519 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1520 ion_heap_init_shrinker(heap);
1521
1522 heap->dev = dev;
1523 down_write(&dev->lock);
1524 /*
1525 * use negative heap->id to reverse the priority -- when traversing
1526 * the list later attempt higher id numbers first
1527 */
1528 plist_node_init(&heap->node, -heap->id);
1529 plist_add(&heap->node, &dev->heaps);
1530 debug_file = debugfs_create_file(heap->name, 0664,
1531 dev->heaps_debug_root, heap,
1532 &debug_heap_fops);
1533
1534 if (!debug_file) {
1535 char buf[256], *path;
1536
1537 path = dentry_path(dev->heaps_debug_root, buf, 256);
1538 pr_err("Failed to create heap debugfs at %s/%s\n",
1539 path, heap->name);
1540 }
1541
1542 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1543 char debug_name[64];
1544
1545 snprintf(debug_name, 64, "%s_shrink", heap->name);
1546 debug_file = debugfs_create_file(
1547 debug_name, 0644, dev->heaps_debug_root, heap,
1548 &debug_shrink_fops);
1549 if (!debug_file) {
1550 char buf[256], *path;
1551
1552 path = dentry_path(dev->heaps_debug_root, buf, 256);
1553 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1554 path, debug_name);
1555 }
1556 }
1557
1558 up_write(&dev->lock);
1559 }
1560 EXPORT_SYMBOL(ion_device_add_heap);
1561
1562 struct ion_device *ion_device_create(long (*custom_ioctl)
1563 (struct ion_client *client,
1564 unsigned int cmd,
1565 unsigned long arg))
1566 {
1567 struct ion_device *idev;
1568 int ret;
1569
1570 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1571 if (!idev)
1572 return ERR_PTR(-ENOMEM);
1573
1574 idev->dev.minor = MISC_DYNAMIC_MINOR;
1575 idev->dev.name = "ion";
1576 idev->dev.fops = &ion_fops;
1577 idev->dev.parent = NULL;
1578 ret = misc_register(&idev->dev);
1579 if (ret) {
1580 pr_err("ion: failed to register misc device.\n");
1581 kfree(idev);
1582 return ERR_PTR(ret);
1583 }
1584
1585 idev->debug_root = debugfs_create_dir("ion", NULL);
1586 if (!idev->debug_root) {
1587 pr_err("ion: failed to create debugfs root directory.\n");
1588 goto debugfs_done;
1589 }
1590 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1591 if (!idev->heaps_debug_root) {
1592 pr_err("ion: failed to create debugfs heaps directory.\n");
1593 goto debugfs_done;
1594 }
1595 idev->clients_debug_root = debugfs_create_dir("clients",
1596 idev->debug_root);
1597 if (!idev->clients_debug_root)
1598 pr_err("ion: failed to create debugfs clients directory.\n");
1599
1600 debugfs_done:
1601
1602 idev->custom_ioctl = custom_ioctl;
1603 idev->buffers = RB_ROOT;
1604 mutex_init(&idev->buffer_lock);
1605 init_rwsem(&idev->lock);
1606 plist_head_init(&idev->heaps);
1607 idev->clients = RB_ROOT;
1608 return idev;
1609 }
1610 EXPORT_SYMBOL(ion_device_create);
1611
1612 void ion_device_destroy(struct ion_device *dev)
1613 {
1614 misc_deregister(&dev->dev);
1615 debugfs_remove_recursive(dev->debug_root);
1616 /* XXX need to free the heaps and clients ? */
1617 kfree(dev);
1618 }
1619 EXPORT_SYMBOL(ion_device_destroy);
1620
1621 void __init ion_reserve(struct ion_platform_data *data)
1622 {
1623 int i;
1624
1625 for (i = 0; i < data->nr; i++) {
1626 if (data->heaps[i].size == 0)
1627 continue;
1628
1629 if (data->heaps[i].base == 0) {
1630 phys_addr_t paddr;
1631
1632 paddr = memblock_alloc_base(data->heaps[i].size,
1633 data->heaps[i].align,
1634 MEMBLOCK_ALLOC_ANYWHERE);
1635 if (!paddr) {
1636 pr_err("%s: error allocating memblock for heap %d\n",
1637 __func__, i);
1638 continue;
1639 }
1640 data->heaps[i].base = paddr;
1641 } else {
1642 int ret = memblock_reserve(data->heaps[i].base,
1643 data->heaps[i].size);
1644 if (ret)
1645 pr_err("memblock reserve of %zx@%lx failed\n",
1646 data->heaps[i].size,
1647 data->heaps[i].base);
1648 }
1649 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1650 data->heaps[i].name,
1651 data->heaps[i].base,
1652 data->heaps[i].size);
1653 }
1654 }