]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/android/ion/ion.c
gpu: ion: Fix bug in ion shrinker
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / android / ion / ion.c
CommitLineData
c30707be 1/*
38eeeb51 2
c30707be
RSZ
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
fe2faea7 20#include <linux/freezer.h>
c30707be
RSZ
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
fe2faea7 23#include <linux/kthread.h>
c30707be 24#include <linux/list.h>
2991b7a0 25#include <linux/memblock.h>
c30707be
RSZ
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
fe2faea7 31#include <linux/rtmutex.h>
c30707be
RSZ
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/seq_file.h>
35#include <linux/uaccess.h>
36#include <linux/debugfs.h>
b892bf75 37#include <linux/dma-buf.h>
c30707be
RSZ
38
39#include "ion.h"
40#include "ion_priv.h"
c30707be
RSZ
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
8d7ab9a9
RSZ
45 * @buffers: an rb tree of all the existing buffers
46 * @buffer_lock: lock protecting the tree of buffers
47 * @lock: rwsem protecting the tree of heaps and clients
c30707be
RSZ
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
8d7ab9a9
RSZ
54 struct mutex buffer_lock;
55 struct rw_semaphore lock;
cd69488c 56 struct plist_head heaps;
c30707be
RSZ
57 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
58 unsigned long arg);
b892bf75 59 struct rb_root clients;
c30707be
RSZ
60 struct dentry *debug_root;
61};
62
63/**
64 * struct ion_client - a process/hw block local address space
c30707be
RSZ
65 * @node: node in the tree of all clients
66 * @dev: backpointer to ion device
67 * @handles: an rb tree of all the handles in this client
68 * @lock: lock protecting the tree of handles
c30707be
RSZ
69 * @name: used for debugging
70 * @task: used for debugging
71 *
72 * A client represents a list of buffers this client may access.
73 * The mutex stored here is used to protect both handles tree
74 * as well as the handles themselves, and should be held while modifying either.
75 */
76struct ion_client {
c30707be
RSZ
77 struct rb_node node;
78 struct ion_device *dev;
79 struct rb_root handles;
80 struct mutex lock;
c30707be
RSZ
81 const char *name;
82 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
c30707be
RSZ
95 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
98 */
99struct ion_handle {
100 struct kref ref;
101 struct ion_client *client;
102 struct ion_buffer *buffer;
103 struct rb_node node;
104 unsigned int kmap_cnt;
c30707be
RSZ
105};
106
13ba7805
RSZ
107bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
108{
109 return ((buffer->flags & ION_FLAG_CACHED) &&
110 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
111}
112
45b17a80
RSZ
113bool ion_buffer_cached(struct ion_buffer *buffer)
114{
115 return !!(buffer->flags & ION_FLAG_CACHED);
116}
117
c30707be
RSZ
118/* this function should only be called while dev->lock is held */
119static void ion_buffer_add(struct ion_device *dev,
120 struct ion_buffer *buffer)
121{
122 struct rb_node **p = &dev->buffers.rb_node;
123 struct rb_node *parent = NULL;
124 struct ion_buffer *entry;
125
126 while (*p) {
127 parent = *p;
128 entry = rb_entry(parent, struct ion_buffer, node);
129
130 if (buffer < entry) {
131 p = &(*p)->rb_left;
132 } else if (buffer > entry) {
133 p = &(*p)->rb_right;
134 } else {
135 pr_err("%s: buffer already found.", __func__);
136 BUG();
137 }
138 }
139
140 rb_link_node(&buffer->node, parent, p);
141 rb_insert_color(&buffer->node, &dev->buffers);
142}
143
56a7c185
RSZ
144static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
145
fe2faea7 146static bool ion_heap_drain_freelist(struct ion_heap *heap);
c30707be
RSZ
147/* this function should only be called while dev->lock is held */
148static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
149 struct ion_device *dev,
150 unsigned long len,
151 unsigned long align,
152 unsigned long flags)
153{
154 struct ion_buffer *buffer;
29ae6bc7 155 struct sg_table *table;
a46b6b2d
RSZ
156 struct scatterlist *sg;
157 int i, ret;
c30707be
RSZ
158
159 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
160 if (!buffer)
161 return ERR_PTR(-ENOMEM);
162
163 buffer->heap = heap;
13ba7805 164 buffer->flags = flags;
c30707be
RSZ
165 kref_init(&buffer->ref);
166
167 ret = heap->ops->allocate(heap, buffer, len, align, flags);
fe2faea7 168
c30707be 169 if (ret) {
fe2faea7
RSZ
170 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
171 goto err2;
172
173 ion_heap_drain_freelist(heap);
174 ret = heap->ops->allocate(heap, buffer, len, align,
175 flags);
176 if (ret)
177 goto err2;
c30707be 178 }
29ae6bc7 179
056be396
GH
180 buffer->dev = dev;
181 buffer->size = len;
182
56a7c185 183 table = heap->ops->map_dma(heap, buffer);
29ae6bc7
RSZ
184 if (IS_ERR_OR_NULL(table)) {
185 heap->ops->free(buffer);
186 kfree(buffer);
187 return ERR_PTR(PTR_ERR(table));
188 }
189 buffer->sg_table = table;
13ba7805 190 if (ion_buffer_fault_user_mappings(buffer)) {
56a7c185
RSZ
191 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
192 i) {
193 if (sg_dma_len(sg) == PAGE_SIZE)
194 continue;
13ba7805
RSZ
195 pr_err("%s: cached mappings that will be faulted in "
196 "must have pagewise sg_lists\n", __func__);
d3c0bced
RSZ
197 ret = -EINVAL;
198 goto err;
56a7c185 199 }
29ae6bc7 200
d3c0bced
RSZ
201 ret = ion_buffer_alloc_dirty(buffer);
202 if (ret)
203 goto err;
56a7c185
RSZ
204 }
205
206 buffer->dev = dev;
207 buffer->size = len;
208 INIT_LIST_HEAD(&buffer->vmas);
c30707be 209 mutex_init(&buffer->lock);
a46b6b2d
RSZ
210 /* this will set up dma addresses for the sglist -- it is not
211 technically correct as per the dma api -- a specific
212 device isn't really taking ownership here. However, in practice on
213 our systems the only dma_address space is physical addresses.
214 Additionally, we can't afford the overhead of invalidating every
215 allocation via dma_map_sg. The implicit contract here is that
216 memory comming from the heaps is ready for dma, ie if it has a
217 cached mapping that mapping has been invalidated */
218 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
219 sg_dma_address(sg) = sg_phys(sg);
8d7ab9a9 220 mutex_lock(&dev->buffer_lock);
c30707be 221 ion_buffer_add(dev, buffer);
8d7ab9a9 222 mutex_unlock(&dev->buffer_lock);
c30707be 223 return buffer;
d3c0bced
RSZ
224
225err:
226 heap->ops->unmap_dma(heap, buffer);
227 heap->ops->free(buffer);
fe2faea7 228err2:
d3c0bced
RSZ
229 kfree(buffer);
230 return ERR_PTR(ret);
c30707be
RSZ
231}
232
fe2faea7 233static void _ion_buffer_destroy(struct ion_buffer *buffer)
c30707be 234{
54ac0784
KC
235 if (WARN_ON(buffer->kmap_cnt > 0))
236 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
29ae6bc7 237 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
c30707be 238 buffer->heap->ops->free(buffer);
d3c0bced
RSZ
239 if (buffer->flags & ION_FLAG_CACHED)
240 kfree(buffer->dirty);
c30707be
RSZ
241 kfree(buffer);
242}
243
fe2faea7
RSZ
244static void ion_buffer_destroy(struct kref *kref)
245{
246 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
247 struct ion_heap *heap = buffer->heap;
248 struct ion_device *dev = buffer->dev;
249
250 mutex_lock(&dev->buffer_lock);
251 rb_erase(&buffer->node, &dev->buffers);
252 mutex_unlock(&dev->buffer_lock);
253
254 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
255 rt_mutex_lock(&heap->lock);
256 list_add(&buffer->list, &heap->free_list);
257 rt_mutex_unlock(&heap->lock);
258 wake_up(&heap->waitqueue);
259 return;
260 }
261 _ion_buffer_destroy(buffer);
262}
263
c30707be
RSZ
264static void ion_buffer_get(struct ion_buffer *buffer)
265{
266 kref_get(&buffer->ref);
267}
268
269static int ion_buffer_put(struct ion_buffer *buffer)
270{
271 return kref_put(&buffer->ref, ion_buffer_destroy);
272}
273
5ad7bc3a
RSZ
274static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
275{
8d7ab9a9 276 mutex_lock(&buffer->lock);
5ad7bc3a 277 buffer->handle_count++;
8d7ab9a9 278 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
279}
280
281static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
282{
283 /*
284 * when a buffer is removed from a handle, if it is not in
285 * any other handles, copy the taskcomm and the pid of the
286 * process it's being removed from into the buffer. At this
287 * point there will be no way to track what processes this buffer is
288 * being used by, it only exists as a dma_buf file descriptor.
289 * The taskcomm and pid can provide a debug hint as to where this fd
290 * is in the system
291 */
8d7ab9a9 292 mutex_lock(&buffer->lock);
5ad7bc3a
RSZ
293 buffer->handle_count--;
294 BUG_ON(buffer->handle_count < 0);
295 if (!buffer->handle_count) {
296 struct task_struct *task;
297
298 task = current->group_leader;
299 get_task_comm(buffer->task_comm, task);
300 buffer->pid = task_pid_nr(task);
301 }
8d7ab9a9 302 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
303}
304
c30707be
RSZ
305static struct ion_handle *ion_handle_create(struct ion_client *client,
306 struct ion_buffer *buffer)
307{
308 struct ion_handle *handle;
309
310 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
311 if (!handle)
312 return ERR_PTR(-ENOMEM);
313 kref_init(&handle->ref);
314 RB_CLEAR_NODE(&handle->node);
315 handle->client = client;
316 ion_buffer_get(buffer);
5ad7bc3a 317 ion_buffer_add_to_handle(buffer);
c30707be
RSZ
318 handle->buffer = buffer;
319
320 return handle;
321}
322
b892bf75
RSZ
323static void ion_handle_kmap_put(struct ion_handle *);
324
c30707be
RSZ
325static void ion_handle_destroy(struct kref *kref)
326{
327 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
b892bf75
RSZ
328 struct ion_client *client = handle->client;
329 struct ion_buffer *buffer = handle->buffer;
330
b892bf75 331 mutex_lock(&buffer->lock);
2900cd76 332 while (handle->kmap_cnt)
b892bf75
RSZ
333 ion_handle_kmap_put(handle);
334 mutex_unlock(&buffer->lock);
335
c30707be 336 if (!RB_EMPTY_NODE(&handle->node))
b892bf75 337 rb_erase(&handle->node, &client->handles);
b892bf75 338
5ad7bc3a 339 ion_buffer_remove_from_handle(buffer);
b892bf75 340 ion_buffer_put(buffer);
5ad7bc3a 341
c30707be
RSZ
342 kfree(handle);
343}
344
345struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
346{
347 return handle->buffer;
348}
349
350static void ion_handle_get(struct ion_handle *handle)
351{
352 kref_get(&handle->ref);
353}
354
355static int ion_handle_put(struct ion_handle *handle)
356{
357 return kref_put(&handle->ref, ion_handle_destroy);
358}
359
360static struct ion_handle *ion_handle_lookup(struct ion_client *client,
361 struct ion_buffer *buffer)
362{
363 struct rb_node *n;
364
365 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
366 struct ion_handle *handle = rb_entry(n, struct ion_handle,
367 node);
368 if (handle->buffer == buffer)
369 return handle;
370 }
371 return NULL;
372}
373
374static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
375{
376 struct rb_node *n = client->handles.rb_node;
377
378 while (n) {
379 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
380 node);
381 if (handle < handle_node)
382 n = n->rb_left;
383 else if (handle > handle_node)
384 n = n->rb_right;
385 else
386 return true;
387 }
388 return false;
389}
390
391static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
392{
393 struct rb_node **p = &client->handles.rb_node;
394 struct rb_node *parent = NULL;
395 struct ion_handle *entry;
396
397 while (*p) {
398 parent = *p;
399 entry = rb_entry(parent, struct ion_handle, node);
400
401 if (handle < entry)
402 p = &(*p)->rb_left;
403 else if (handle > entry)
404 p = &(*p)->rb_right;
405 else
406 WARN(1, "%s: buffer already found.", __func__);
407 }
408
409 rb_link_node(&handle->node, parent, p);
410 rb_insert_color(&handle->node, &client->handles);
411}
412
413struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
38eeeb51 414 size_t align, unsigned int heap_id_mask,
56a7c185 415 unsigned int flags)
c30707be 416{
c30707be
RSZ
417 struct ion_handle *handle;
418 struct ion_device *dev = client->dev;
419 struct ion_buffer *buffer = NULL;
cd69488c 420 struct ion_heap *heap;
c30707be 421
38eeeb51
RSZ
422 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
423 len, align, heap_id_mask, flags);
c30707be
RSZ
424 /*
425 * traverse the list of heaps available in this system in priority
426 * order. If the heap type is supported by the client, and matches the
427 * request of the caller allocate from it. Repeat until allocate has
428 * succeeded or all heaps have been tried
429 */
54ac0784
KC
430 if (WARN_ON(!len))
431 return ERR_PTR(-EINVAL);
432
433 len = PAGE_ALIGN(len);
434
8d7ab9a9 435 down_read(&dev->lock);
cd69488c 436 plist_for_each_entry(heap, &dev->heaps, node) {
38eeeb51
RSZ
437 /* if the caller didn't specify this heap id */
438 if (!((1 << heap->id) & heap_id_mask))
c30707be
RSZ
439 continue;
440 buffer = ion_buffer_create(heap, dev, len, align, flags);
441 if (!IS_ERR_OR_NULL(buffer))
442 break;
443 }
8d7ab9a9 444 up_read(&dev->lock);
c30707be 445
54ac0784
KC
446 if (buffer == NULL)
447 return ERR_PTR(-ENODEV);
448
449 if (IS_ERR(buffer))
c30707be
RSZ
450 return ERR_PTR(PTR_ERR(buffer));
451
452 handle = ion_handle_create(client, buffer);
453
c30707be
RSZ
454 /*
455 * ion_buffer_create will create a buffer with a ref_cnt of 1,
456 * and ion_handle_create will take a second reference, drop one here
457 */
458 ion_buffer_put(buffer);
459
54ac0784
KC
460 if (!IS_ERR(handle)) {
461 mutex_lock(&client->lock);
462 ion_handle_add(client, handle);
463 mutex_unlock(&client->lock);
464 }
c30707be 465
29ae6bc7 466
c30707be
RSZ
467 return handle;
468}
ee4c8aa9 469EXPORT_SYMBOL(ion_alloc);
c30707be
RSZ
470
471void ion_free(struct ion_client *client, struct ion_handle *handle)
472{
473 bool valid_handle;
474
475 BUG_ON(client != handle->client);
476
477 mutex_lock(&client->lock);
478 valid_handle = ion_handle_validate(client, handle);
c30707be
RSZ
479
480 if (!valid_handle) {
a9bb075d 481 WARN(1, "%s: invalid handle passed to free.\n", __func__);
37bdbf00 482 mutex_unlock(&client->lock);
c30707be
RSZ
483 return;
484 }
485 ion_handle_put(handle);
0e9c03a5 486 mutex_unlock(&client->lock);
c30707be 487}
ee4c8aa9 488EXPORT_SYMBOL(ion_free);
c30707be 489
c30707be
RSZ
490int ion_phys(struct ion_client *client, struct ion_handle *handle,
491 ion_phys_addr_t *addr, size_t *len)
492{
493 struct ion_buffer *buffer;
494 int ret;
495
496 mutex_lock(&client->lock);
497 if (!ion_handle_validate(client, handle)) {
498 mutex_unlock(&client->lock);
499 return -EINVAL;
500 }
501
502 buffer = handle->buffer;
503
504 if (!buffer->heap->ops->phys) {
505 pr_err("%s: ion_phys is not implemented by this heap.\n",
506 __func__);
507 mutex_unlock(&client->lock);
508 return -ENODEV;
509 }
510 mutex_unlock(&client->lock);
511 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
512 return ret;
513}
ee4c8aa9 514EXPORT_SYMBOL(ion_phys);
c30707be 515
0f34faf8
RSZ
516static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
517{
518 void *vaddr;
519
520 if (buffer->kmap_cnt) {
521 buffer->kmap_cnt++;
522 return buffer->vaddr;
523 }
524 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
525 if (IS_ERR_OR_NULL(vaddr))
526 return vaddr;
527 buffer->vaddr = vaddr;
528 buffer->kmap_cnt++;
529 return vaddr;
530}
531
b892bf75 532static void *ion_handle_kmap_get(struct ion_handle *handle)
c30707be 533{
b892bf75 534 struct ion_buffer *buffer = handle->buffer;
c30707be
RSZ
535 void *vaddr;
536
b892bf75
RSZ
537 if (handle->kmap_cnt) {
538 handle->kmap_cnt++;
539 return buffer->vaddr;
c30707be 540 }
0f34faf8
RSZ
541 vaddr = ion_buffer_kmap_get(buffer);
542 if (IS_ERR_OR_NULL(vaddr))
b892bf75 543 return vaddr;
b892bf75 544 handle->kmap_cnt++;
b892bf75
RSZ
545 return vaddr;
546}
c30707be 547
0f34faf8
RSZ
548static void ion_buffer_kmap_put(struct ion_buffer *buffer)
549{
550 buffer->kmap_cnt--;
551 if (!buffer->kmap_cnt) {
552 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
553 buffer->vaddr = NULL;
554 }
555}
556
b892bf75
RSZ
557static void ion_handle_kmap_put(struct ion_handle *handle)
558{
559 struct ion_buffer *buffer = handle->buffer;
560
561 handle->kmap_cnt--;
562 if (!handle->kmap_cnt)
0f34faf8 563 ion_buffer_kmap_put(buffer);
c30707be
RSZ
564}
565
b892bf75 566void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
567{
568 struct ion_buffer *buffer;
b892bf75 569 void *vaddr;
c30707be
RSZ
570
571 mutex_lock(&client->lock);
572 if (!ion_handle_validate(client, handle)) {
b892bf75 573 pr_err("%s: invalid handle passed to map_kernel.\n",
c30707be
RSZ
574 __func__);
575 mutex_unlock(&client->lock);
576 return ERR_PTR(-EINVAL);
577 }
b892bf75 578
c30707be 579 buffer = handle->buffer;
c30707be 580
b892bf75 581 if (!handle->buffer->heap->ops->map_kernel) {
c30707be
RSZ
582 pr_err("%s: map_kernel is not implemented by this heap.\n",
583 __func__);
c30707be
RSZ
584 mutex_unlock(&client->lock);
585 return ERR_PTR(-ENODEV);
586 }
c30707be 587
c30707be 588 mutex_lock(&buffer->lock);
b892bf75 589 vaddr = ion_handle_kmap_get(handle);
c30707be
RSZ
590 mutex_unlock(&buffer->lock);
591 mutex_unlock(&client->lock);
b892bf75 592 return vaddr;
c30707be 593}
ee4c8aa9 594EXPORT_SYMBOL(ion_map_kernel);
c30707be 595
b892bf75 596void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
597{
598 struct ion_buffer *buffer;
599
600 mutex_lock(&client->lock);
601 buffer = handle->buffer;
602 mutex_lock(&buffer->lock);
b892bf75 603 ion_handle_kmap_put(handle);
c30707be
RSZ
604 mutex_unlock(&buffer->lock);
605 mutex_unlock(&client->lock);
606}
ee4c8aa9 607EXPORT_SYMBOL(ion_unmap_kernel);
c30707be 608
c30707be
RSZ
609static int ion_debug_client_show(struct seq_file *s, void *unused)
610{
611 struct ion_client *client = s->private;
612 struct rb_node *n;
38eeeb51
RSZ
613 size_t sizes[ION_NUM_HEAP_IDS] = {0};
614 const char *names[ION_NUM_HEAP_IDS] = {0};
c30707be
RSZ
615 int i;
616
617 mutex_lock(&client->lock);
618 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
619 struct ion_handle *handle = rb_entry(n, struct ion_handle,
620 node);
38eeeb51 621 unsigned int id = handle->buffer->heap->id;
c30707be 622
38eeeb51
RSZ
623 if (!names[id])
624 names[id] = handle->buffer->heap->name;
625 sizes[id] += handle->buffer->size;
c30707be
RSZ
626 }
627 mutex_unlock(&client->lock);
628
629 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
38eeeb51 630 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
c30707be
RSZ
631 if (!names[i])
632 continue;
b892bf75 633 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
c30707be
RSZ
634 }
635 return 0;
636}
637
638static int ion_debug_client_open(struct inode *inode, struct file *file)
639{
640 return single_open(file, ion_debug_client_show, inode->i_private);
641}
642
643static const struct file_operations debug_client_fops = {
644 .open = ion_debug_client_open,
645 .read = seq_read,
646 .llseek = seq_lseek,
647 .release = single_release,
648};
649
c30707be 650struct ion_client *ion_client_create(struct ion_device *dev,
c30707be
RSZ
651 const char *name)
652{
653 struct ion_client *client;
654 struct task_struct *task;
655 struct rb_node **p;
656 struct rb_node *parent = NULL;
657 struct ion_client *entry;
658 char debug_name[64];
659 pid_t pid;
660
661 get_task_struct(current->group_leader);
662 task_lock(current->group_leader);
663 pid = task_pid_nr(current->group_leader);
664 /* don't bother to store task struct for kernel threads,
665 they can't be killed anyway */
666 if (current->group_leader->flags & PF_KTHREAD) {
667 put_task_struct(current->group_leader);
668 task = NULL;
669 } else {
670 task = current->group_leader;
671 }
672 task_unlock(current->group_leader);
673
c30707be
RSZ
674 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
675 if (!client) {
54ac0784
KC
676 if (task)
677 put_task_struct(current->group_leader);
c30707be
RSZ
678 return ERR_PTR(-ENOMEM);
679 }
680
681 client->dev = dev;
682 client->handles = RB_ROOT;
683 mutex_init(&client->lock);
684 client->name = name;
c30707be
RSZ
685 client->task = task;
686 client->pid = pid;
c30707be 687
8d7ab9a9 688 down_write(&dev->lock);
b892bf75
RSZ
689 p = &dev->clients.rb_node;
690 while (*p) {
691 parent = *p;
692 entry = rb_entry(parent, struct ion_client, node);
693
694 if (client < entry)
695 p = &(*p)->rb_left;
696 else if (client > entry)
697 p = &(*p)->rb_right;
c30707be 698 }
b892bf75
RSZ
699 rb_link_node(&client->node, parent, p);
700 rb_insert_color(&client->node, &dev->clients);
c30707be
RSZ
701
702 snprintf(debug_name, 64, "%u", client->pid);
703 client->debug_root = debugfs_create_file(debug_name, 0664,
704 dev->debug_root, client,
705 &debug_client_fops);
8d7ab9a9 706 up_write(&dev->lock);
c30707be
RSZ
707
708 return client;
709}
9122fe86 710EXPORT_SYMBOL(ion_client_create);
c30707be 711
b892bf75 712void ion_client_destroy(struct ion_client *client)
c30707be 713{
c30707be
RSZ
714 struct ion_device *dev = client->dev;
715 struct rb_node *n;
716
717 pr_debug("%s: %d\n", __func__, __LINE__);
718 while ((n = rb_first(&client->handles))) {
719 struct ion_handle *handle = rb_entry(n, struct ion_handle,
720 node);
721 ion_handle_destroy(&handle->ref);
722 }
8d7ab9a9 723 down_write(&dev->lock);
b892bf75 724 if (client->task)
c30707be 725 put_task_struct(client->task);
b892bf75 726 rb_erase(&client->node, &dev->clients);
c30707be 727 debugfs_remove_recursive(client->debug_root);
8d7ab9a9 728 up_write(&dev->lock);
c30707be
RSZ
729
730 kfree(client);
731}
ee4c8aa9 732EXPORT_SYMBOL(ion_client_destroy);
c30707be 733
ce1f147a
RSZ
734struct sg_table *ion_sg_table(struct ion_client *client,
735 struct ion_handle *handle)
c30707be 736{
29ae6bc7 737 struct ion_buffer *buffer;
b892bf75 738 struct sg_table *table;
c30707be 739
29ae6bc7
RSZ
740 mutex_lock(&client->lock);
741 if (!ion_handle_validate(client, handle)) {
742 pr_err("%s: invalid handle passed to map_dma.\n",
b892bf75 743 __func__);
29ae6bc7
RSZ
744 mutex_unlock(&client->lock);
745 return ERR_PTR(-EINVAL);
54ac0784 746 }
29ae6bc7
RSZ
747 buffer = handle->buffer;
748 table = buffer->sg_table;
749 mutex_unlock(&client->lock);
b892bf75 750 return table;
c30707be 751}
ee4c8aa9 752EXPORT_SYMBOL(ion_sg_table);
c30707be 753
56a7c185
RSZ
754static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
755 struct device *dev,
756 enum dma_data_direction direction);
757
29ae6bc7
RSZ
758static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
759 enum dma_data_direction direction)
c30707be 760{
b892bf75
RSZ
761 struct dma_buf *dmabuf = attachment->dmabuf;
762 struct ion_buffer *buffer = dmabuf->priv;
c30707be 763
0b9ec1cf 764 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
29ae6bc7
RSZ
765 return buffer->sg_table;
766}
767
768static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
769 struct sg_table *table,
770 enum dma_data_direction direction)
771{
c30707be
RSZ
772}
773
56a7c185
RSZ
774static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
775{
776 unsigned long pages = buffer->sg_table->nents;
777 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
778
779 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
780 if (!buffer->dirty)
781 return -ENOMEM;
782 return 0;
783}
784
785struct ion_vma_list {
786 struct list_head list;
787 struct vm_area_struct *vma;
788};
789
790static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
791 struct device *dev,
792 enum dma_data_direction dir)
793{
794 struct scatterlist *sg;
795 int i;
796 struct ion_vma_list *vma_list;
797
798 pr_debug("%s: syncing for device %s\n", __func__,
799 dev ? dev_name(dev) : "null");
0b9ec1cf 800
13ba7805 801 if (!ion_buffer_fault_user_mappings(buffer))
0b9ec1cf
RSZ
802 return;
803
56a7c185
RSZ
804 mutex_lock(&buffer->lock);
805 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
806 if (!test_bit(i, buffer->dirty))
807 continue;
808 dma_sync_sg_for_device(dev, sg, 1, dir);
809 clear_bit(i, buffer->dirty);
810 }
811 list_for_each_entry(vma_list, &buffer->vmas, list) {
812 struct vm_area_struct *vma = vma_list->vma;
813
814 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
815 NULL);
816 }
817 mutex_unlock(&buffer->lock);
818}
819
820int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
821{
822 struct ion_buffer *buffer = vma->vm_private_data;
823 struct scatterlist *sg;
824 int i;
825
826 mutex_lock(&buffer->lock);
827 set_bit(vmf->pgoff, buffer->dirty);
828
829 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
830 if (i != vmf->pgoff)
831 continue;
832 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
833 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
834 sg_page(sg));
835 break;
836 }
837 mutex_unlock(&buffer->lock);
838 return VM_FAULT_NOPAGE;
839}
840
841static void ion_vm_open(struct vm_area_struct *vma)
842{
843 struct ion_buffer *buffer = vma->vm_private_data;
844 struct ion_vma_list *vma_list;
845
846 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
847 if (!vma_list)
848 return;
849 vma_list->vma = vma;
850 mutex_lock(&buffer->lock);
851 list_add(&vma_list->list, &buffer->vmas);
852 mutex_unlock(&buffer->lock);
853 pr_debug("%s: adding %p\n", __func__, vma);
854}
855
856static void ion_vm_close(struct vm_area_struct *vma)
857{
858 struct ion_buffer *buffer = vma->vm_private_data;
859 struct ion_vma_list *vma_list, *tmp;
860
861 pr_debug("%s\n", __func__);
862 mutex_lock(&buffer->lock);
863 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
864 if (vma_list->vma != vma)
865 continue;
866 list_del(&vma_list->list);
867 kfree(vma_list);
868 pr_debug("%s: deleting %p\n", __func__, vma);
869 break;
870 }
871 mutex_unlock(&buffer->lock);
872}
873
874struct vm_operations_struct ion_vma_ops = {
875 .open = ion_vm_open,
876 .close = ion_vm_close,
877 .fault = ion_vm_fault,
878};
879
b892bf75 880static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
c30707be 881{
b892bf75 882 struct ion_buffer *buffer = dmabuf->priv;
56a7c185 883 int ret = 0;
c30707be 884
b892bf75 885 if (!buffer->heap->ops->map_user) {
c30707be
RSZ
886 pr_err("%s: this heap does not define a method for mapping "
887 "to userspace\n", __func__);
b892bf75 888 return -EINVAL;
c30707be
RSZ
889 }
890
13ba7805 891 if (ion_buffer_fault_user_mappings(buffer)) {
56a7c185
RSZ
892 vma->vm_private_data = buffer;
893 vma->vm_ops = &ion_vma_ops;
894 ion_vm_open(vma);
856661d5 895 return 0;
56a7c185 896 }
b892bf75 897
856661d5
RSZ
898 if (!(buffer->flags & ION_FLAG_CACHED))
899 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
900
901 mutex_lock(&buffer->lock);
902 /* now map it to userspace */
903 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
904 mutex_unlock(&buffer->lock);
905
b892bf75 906 if (ret)
c30707be
RSZ
907 pr_err("%s: failure mapping buffer to userspace\n",
908 __func__);
c30707be 909
c30707be
RSZ
910 return ret;
911}
912
b892bf75
RSZ
913static void ion_dma_buf_release(struct dma_buf *dmabuf)
914{
915 struct ion_buffer *buffer = dmabuf->priv;
916 ion_buffer_put(buffer);
917}
c30707be 918
b892bf75 919static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
c30707be 920{
0f34faf8 921 struct ion_buffer *buffer = dmabuf->priv;
12edf53d 922 return buffer->vaddr + offset * PAGE_SIZE;
b892bf75 923}
c30707be 924
b892bf75
RSZ
925static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
926 void *ptr)
927{
928 return;
929}
930
0f34faf8
RSZ
931static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
932 size_t len,
933 enum dma_data_direction direction)
b892bf75 934{
0f34faf8
RSZ
935 struct ion_buffer *buffer = dmabuf->priv;
936 void *vaddr;
937
938 if (!buffer->heap->ops->map_kernel) {
939 pr_err("%s: map kernel is not implemented by this heap.\n",
940 __func__);
941 return -ENODEV;
942 }
943
944 mutex_lock(&buffer->lock);
945 vaddr = ion_buffer_kmap_get(buffer);
946 mutex_unlock(&buffer->lock);
947 if (IS_ERR(vaddr))
948 return PTR_ERR(vaddr);
949 if (!vaddr)
950 return -ENOMEM;
951 return 0;
b892bf75
RSZ
952}
953
0f34faf8
RSZ
954static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
955 size_t len,
956 enum dma_data_direction direction)
b892bf75 957{
0f34faf8 958 struct ion_buffer *buffer = dmabuf->priv;
c30707be 959
0f34faf8
RSZ
960 mutex_lock(&buffer->lock);
961 ion_buffer_kmap_put(buffer);
962 mutex_unlock(&buffer->lock);
963}
c30707be 964
b892bf75
RSZ
965struct dma_buf_ops dma_buf_ops = {
966 .map_dma_buf = ion_map_dma_buf,
967 .unmap_dma_buf = ion_unmap_dma_buf,
968 .mmap = ion_mmap,
969 .release = ion_dma_buf_release,
0f34faf8
RSZ
970 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
971 .end_cpu_access = ion_dma_buf_end_cpu_access,
972 .kmap_atomic = ion_dma_buf_kmap,
973 .kunmap_atomic = ion_dma_buf_kunmap,
b892bf75
RSZ
974 .kmap = ion_dma_buf_kmap,
975 .kunmap = ion_dma_buf_kunmap,
976};
977
22ba4322
JM
978struct dma_buf *ion_share_dma_buf(struct ion_client *client,
979 struct ion_handle *handle)
b892bf75
RSZ
980{
981 struct ion_buffer *buffer;
982 struct dma_buf *dmabuf;
983 bool valid_handle;
b892bf75
RSZ
984
985 mutex_lock(&client->lock);
986 valid_handle = ion_handle_validate(client, handle);
987 mutex_unlock(&client->lock);
988 if (!valid_handle) {
a9bb075d 989 WARN(1, "%s: invalid handle passed to share.\n", __func__);
22ba4322 990 return ERR_PTR(-EINVAL);
b892bf75
RSZ
991 }
992
993 buffer = handle->buffer;
994 ion_buffer_get(buffer);
995 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
996 if (IS_ERR(dmabuf)) {
997 ion_buffer_put(buffer);
22ba4322 998 return dmabuf;
b892bf75 999 }
22ba4322
JM
1000
1001 return dmabuf;
1002}
1003EXPORT_SYMBOL(ion_share_dma_buf);
1004
1005int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1006{
1007 struct dma_buf *dmabuf;
1008 int fd;
1009
1010 dmabuf = ion_share_dma_buf(client, handle);
1011 if (IS_ERR(dmabuf))
1012 return PTR_ERR(dmabuf);
1013
b892bf75 1014 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
55808b8d 1015 if (fd < 0)
b892bf75 1016 dma_buf_put(dmabuf);
55808b8d 1017
c30707be 1018 return fd;
b892bf75 1019}
22ba4322 1020EXPORT_SYMBOL(ion_share_dma_buf_fd);
c30707be 1021
b892bf75
RSZ
1022struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1023{
1024 struct dma_buf *dmabuf;
1025 struct ion_buffer *buffer;
1026 struct ion_handle *handle;
1027
1028 dmabuf = dma_buf_get(fd);
1029 if (IS_ERR_OR_NULL(dmabuf))
1030 return ERR_PTR(PTR_ERR(dmabuf));
1031 /* if this memory came from ion */
1032
1033 if (dmabuf->ops != &dma_buf_ops) {
1034 pr_err("%s: can not import dmabuf from another exporter\n",
1035 __func__);
1036 dma_buf_put(dmabuf);
1037 return ERR_PTR(-EINVAL);
1038 }
1039 buffer = dmabuf->priv;
1040
1041 mutex_lock(&client->lock);
1042 /* if a handle exists for this buffer just take a reference to it */
1043 handle = ion_handle_lookup(client, buffer);
1044 if (!IS_ERR_OR_NULL(handle)) {
1045 ion_handle_get(handle);
1046 goto end;
1047 }
1048 handle = ion_handle_create(client, buffer);
1049 if (IS_ERR_OR_NULL(handle))
1050 goto end;
1051 ion_handle_add(client, handle);
1052end:
1053 mutex_unlock(&client->lock);
1054 dma_buf_put(dmabuf);
1055 return handle;
c30707be 1056}
ee4c8aa9 1057EXPORT_SYMBOL(ion_import_dma_buf);
c30707be 1058
0b9ec1cf
RSZ
1059static int ion_sync_for_device(struct ion_client *client, int fd)
1060{
1061 struct dma_buf *dmabuf;
1062 struct ion_buffer *buffer;
1063
1064 dmabuf = dma_buf_get(fd);
1065 if (IS_ERR_OR_NULL(dmabuf))
1066 return PTR_ERR(dmabuf);
1067
1068 /* if this memory came from ion */
1069 if (dmabuf->ops != &dma_buf_ops) {
1070 pr_err("%s: can not sync dmabuf from another exporter\n",
1071 __func__);
1072 dma_buf_put(dmabuf);
1073 return -EINVAL;
1074 }
1075 buffer = dmabuf->priv;
856661d5
RSZ
1076
1077 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1078 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
0b9ec1cf
RSZ
1079 dma_buf_put(dmabuf);
1080 return 0;
1081}
1082
c30707be
RSZ
1083static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1084{
1085 struct ion_client *client = filp->private_data;
1086
1087 switch (cmd) {
1088 case ION_IOC_ALLOC:
1089 {
1090 struct ion_allocation_data data;
1091
1092 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1093 return -EFAULT;
1094 data.handle = ion_alloc(client, data.len, data.align,
38eeeb51 1095 data.heap_id_mask, data.flags);
54ac0784
KC
1096
1097 if (IS_ERR(data.handle))
1098 return PTR_ERR(data.handle);
1099
1100 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1101 ion_free(client, data.handle);
c30707be 1102 return -EFAULT;
54ac0784 1103 }
c30707be
RSZ
1104 break;
1105 }
1106 case ION_IOC_FREE:
1107 {
1108 struct ion_handle_data data;
1109 bool valid;
1110
1111 if (copy_from_user(&data, (void __user *)arg,
1112 sizeof(struct ion_handle_data)))
1113 return -EFAULT;
1114 mutex_lock(&client->lock);
1115 valid = ion_handle_validate(client, data.handle);
1116 mutex_unlock(&client->lock);
1117 if (!valid)
1118 return -EINVAL;
1119 ion_free(client, data.handle);
1120 break;
1121 }
c30707be 1122 case ION_IOC_SHARE:
df0f6c76 1123 case ION_IOC_MAP:
c30707be
RSZ
1124 {
1125 struct ion_fd_data data;
1126
1127 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1128 return -EFAULT;
22ba4322 1129 data.fd = ion_share_dma_buf_fd(client, data.handle);
c30707be
RSZ
1130 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1131 return -EFAULT;
a1c6b996
OH
1132 if (data.fd < 0)
1133 return data.fd;
c30707be
RSZ
1134 break;
1135 }
1136 case ION_IOC_IMPORT:
1137 {
1138 struct ion_fd_data data;
a1c6b996 1139 int ret = 0;
c30707be
RSZ
1140 if (copy_from_user(&data, (void __user *)arg,
1141 sizeof(struct ion_fd_data)))
1142 return -EFAULT;
b892bf75 1143 data.handle = ion_import_dma_buf(client, data.fd);
a1c6b996
OH
1144 if (IS_ERR(data.handle)) {
1145 ret = PTR_ERR(data.handle);
c30707be 1146 data.handle = NULL;
a1c6b996 1147 }
c30707be
RSZ
1148 if (copy_to_user((void __user *)arg, &data,
1149 sizeof(struct ion_fd_data)))
1150 return -EFAULT;
a1c6b996
OH
1151 if (ret < 0)
1152 return ret;
c30707be
RSZ
1153 break;
1154 }
0b9ec1cf
RSZ
1155 case ION_IOC_SYNC:
1156 {
1157 struct ion_fd_data data;
1158 if (copy_from_user(&data, (void __user *)arg,
1159 sizeof(struct ion_fd_data)))
1160 return -EFAULT;
1161 ion_sync_for_device(client, data.fd);
1162 break;
1163 }
c30707be
RSZ
1164 case ION_IOC_CUSTOM:
1165 {
1166 struct ion_device *dev = client->dev;
1167 struct ion_custom_data data;
1168
1169 if (!dev->custom_ioctl)
1170 return -ENOTTY;
1171 if (copy_from_user(&data, (void __user *)arg,
1172 sizeof(struct ion_custom_data)))
1173 return -EFAULT;
1174 return dev->custom_ioctl(client, data.cmd, data.arg);
1175 }
1176 default:
1177 return -ENOTTY;
1178 }
1179 return 0;
1180}
1181
1182static int ion_release(struct inode *inode, struct file *file)
1183{
1184 struct ion_client *client = file->private_data;
1185
1186 pr_debug("%s: %d\n", __func__, __LINE__);
b892bf75 1187 ion_client_destroy(client);
c30707be
RSZ
1188 return 0;
1189}
1190
1191static int ion_open(struct inode *inode, struct file *file)
1192{
1193 struct miscdevice *miscdev = file->private_data;
1194 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1195 struct ion_client *client;
1196
1197 pr_debug("%s: %d\n", __func__, __LINE__);
2bb9f503 1198 client = ion_client_create(dev, "user");
c30707be
RSZ
1199 if (IS_ERR_OR_NULL(client))
1200 return PTR_ERR(client);
1201 file->private_data = client;
1202
1203 return 0;
1204}
1205
1206static const struct file_operations ion_fops = {
1207 .owner = THIS_MODULE,
1208 .open = ion_open,
1209 .release = ion_release,
1210 .unlocked_ioctl = ion_ioctl,
1211};
1212
1213static size_t ion_debug_heap_total(struct ion_client *client,
2bb9f503 1214 unsigned int id)
c30707be
RSZ
1215{
1216 size_t size = 0;
1217 struct rb_node *n;
1218
1219 mutex_lock(&client->lock);
1220 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1221 struct ion_handle *handle = rb_entry(n,
1222 struct ion_handle,
1223 node);
2bb9f503 1224 if (handle->buffer->heap->id == id)
c30707be
RSZ
1225 size += handle->buffer->size;
1226 }
1227 mutex_unlock(&client->lock);
1228 return size;
1229}
1230
1231static int ion_debug_heap_show(struct seq_file *s, void *unused)
1232{
1233 struct ion_heap *heap = s->private;
1234 struct ion_device *dev = heap->dev;
1235 struct rb_node *n;
5ad7bc3a
RSZ
1236 size_t total_size = 0;
1237 size_t total_orphaned_size = 0;
c30707be
RSZ
1238
1239 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
5ad7bc3a 1240 seq_printf(s, "----------------------------------------------------\n");
c30707be 1241
b892bf75 1242 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
c30707be
RSZ
1243 struct ion_client *client = rb_entry(n, struct ion_client,
1244 node);
2bb9f503 1245 size_t size = ion_debug_heap_total(client, heap->id);
c30707be
RSZ
1246 if (!size)
1247 continue;
b892bf75
RSZ
1248 if (client->task) {
1249 char task_comm[TASK_COMM_LEN];
1250
1251 get_task_comm(task_comm, client->task);
1252 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1253 client->pid, size);
1254 } else {
1255 seq_printf(s, "%16.s %16u %16u\n", client->name,
1256 client->pid, size);
1257 }
c30707be 1258 }
5ad7bc3a
RSZ
1259 seq_printf(s, "----------------------------------------------------\n");
1260 seq_printf(s, "orphaned allocations (info is from last known client):"
1261 "\n");
8d7ab9a9 1262 mutex_lock(&dev->buffer_lock);
5ad7bc3a
RSZ
1263 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1264 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1265 node);
2bb9f503 1266 if (buffer->heap->id != heap->id)
45b17a80
RSZ
1267 continue;
1268 total_size += buffer->size;
5ad7bc3a 1269 if (!buffer->handle_count) {
45b17a80 1270 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
092c354b
BG
1271 buffer->pid, buffer->size, buffer->kmap_cnt,
1272 atomic_read(&buffer->ref.refcount));
5ad7bc3a
RSZ
1273 total_orphaned_size += buffer->size;
1274 }
1275 }
8d7ab9a9 1276 mutex_unlock(&dev->buffer_lock);
5ad7bc3a
RSZ
1277 seq_printf(s, "----------------------------------------------------\n");
1278 seq_printf(s, "%16.s %16u\n", "total orphaned",
1279 total_orphaned_size);
1280 seq_printf(s, "%16.s %16u\n", "total ", total_size);
45b17a80
RSZ
1281 seq_printf(s, "----------------------------------------------------\n");
1282
1283 if (heap->debug_show)
1284 heap->debug_show(heap, s, unused);
5ad7bc3a 1285
c30707be
RSZ
1286 return 0;
1287}
1288
1289static int ion_debug_heap_open(struct inode *inode, struct file *file)
1290{
1291 return single_open(file, ion_debug_heap_show, inode->i_private);
1292}
1293
1294static const struct file_operations debug_heap_fops = {
1295 .open = ion_debug_heap_open,
1296 .read = seq_read,
1297 .llseek = seq_lseek,
1298 .release = single_release,
1299};
1300
fe2faea7
RSZ
1301static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
1302{
1303 bool is_empty;
1304
1305 rt_mutex_lock(&heap->lock);
1306 is_empty = list_empty(&heap->free_list);
1307 rt_mutex_unlock(&heap->lock);
1308
1309 return is_empty;
1310}
1311
1312static int ion_heap_deferred_free(void *data)
1313{
1314 struct ion_heap *heap = data;
1315
1316 while (true) {
1317 struct ion_buffer *buffer;
1318
1319 wait_event_freezable(heap->waitqueue,
1320 !ion_heap_free_list_is_empty(heap));
1321
1322 rt_mutex_lock(&heap->lock);
1323 if (list_empty(&heap->free_list)) {
1324 rt_mutex_unlock(&heap->lock);
1325 continue;
1326 }
1327 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
1328 list);
1329 list_del(&buffer->list);
1330 rt_mutex_unlock(&heap->lock);
1331 _ion_buffer_destroy(buffer);
1332 }
1333
1334 return 0;
1335}
1336
1337static bool ion_heap_drain_freelist(struct ion_heap *heap)
1338{
1339 struct ion_buffer *buffer, *tmp;
1340
1341 if (ion_heap_free_list_is_empty(heap))
1342 return false;
1343 rt_mutex_lock(&heap->lock);
1344 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
fe2faea7 1345 list_del(&buffer->list);
e09539a0 1346 _ion_buffer_destroy(buffer);
fe2faea7
RSZ
1347 }
1348 BUG_ON(!list_empty(&heap->free_list));
1349 rt_mutex_unlock(&heap->lock);
1350
1351
1352 return true;
1353}
1354
c30707be
RSZ
1355void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1356{
fe2faea7
RSZ
1357 struct sched_param param = { .sched_priority = 0 };
1358
29ae6bc7
RSZ
1359 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1360 !heap->ops->unmap_dma)
1361 pr_err("%s: can not add heap with invalid ops struct.\n",
1362 __func__);
1363
fe2faea7
RSZ
1364 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
1365 INIT_LIST_HEAD(&heap->free_list);
1366 rt_mutex_init(&heap->lock);
1367 init_waitqueue_head(&heap->waitqueue);
1368 heap->task = kthread_run(ion_heap_deferred_free, heap,
1369 "%s", heap->name);
1370 sched_setscheduler(heap->task, SCHED_IDLE, &param);
1371 if (IS_ERR(heap->task))
1372 pr_err("%s: creating thread for deferred free failed\n",
1373 __func__);
1374 }
1375
c30707be 1376 heap->dev = dev;
8d7ab9a9 1377 down_write(&dev->lock);
cd69488c
RSZ
1378 /* use negative heap->id to reverse the priority -- when traversing
1379 the list later attempt higher id numbers first */
1380 plist_node_init(&heap->node, -heap->id);
1381 plist_add(&heap->node, &dev->heaps);
c30707be
RSZ
1382 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1383 &debug_heap_fops);
8d7ab9a9 1384 up_write(&dev->lock);
c30707be
RSZ
1385}
1386
1387struct ion_device *ion_device_create(long (*custom_ioctl)
1388 (struct ion_client *client,
1389 unsigned int cmd,
1390 unsigned long arg))
1391{
1392 struct ion_device *idev;
1393 int ret;
1394
1395 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1396 if (!idev)
1397 return ERR_PTR(-ENOMEM);
1398
1399 idev->dev.minor = MISC_DYNAMIC_MINOR;
1400 idev->dev.name = "ion";
1401 idev->dev.fops = &ion_fops;
1402 idev->dev.parent = NULL;
1403 ret = misc_register(&idev->dev);
1404 if (ret) {
1405 pr_err("ion: failed to register misc device.\n");
1406 return ERR_PTR(ret);
1407 }
1408
1409 idev->debug_root = debugfs_create_dir("ion", NULL);
1410 if (IS_ERR_OR_NULL(idev->debug_root))
1411 pr_err("ion: failed to create debug files.\n");
1412
1413 idev->custom_ioctl = custom_ioctl;
1414 idev->buffers = RB_ROOT;
8d7ab9a9
RSZ
1415 mutex_init(&idev->buffer_lock);
1416 init_rwsem(&idev->lock);
cd69488c 1417 plist_head_init(&idev->heaps);
b892bf75 1418 idev->clients = RB_ROOT;
c30707be
RSZ
1419 return idev;
1420}
1421
1422void ion_device_destroy(struct ion_device *dev)
1423{
1424 misc_deregister(&dev->dev);
1425 /* XXX need to free the heaps and clients ? */
1426 kfree(dev);
1427}
2991b7a0
RSZ
1428
1429void __init ion_reserve(struct ion_platform_data *data)
1430{
fa9bba55 1431 int i;
2991b7a0
RSZ
1432
1433 for (i = 0; i < data->nr; i++) {
1434 if (data->heaps[i].size == 0)
1435 continue;
fa9bba55
RSZ
1436
1437 if (data->heaps[i].base == 0) {
1438 phys_addr_t paddr;
1439 paddr = memblock_alloc_base(data->heaps[i].size,
1440 data->heaps[i].align,
1441 MEMBLOCK_ALLOC_ANYWHERE);
1442 if (!paddr) {
1443 pr_err("%s: error allocating memblock for "
1444 "heap %d\n",
1445 __func__, i);
1446 continue;
1447 }
1448 data->heaps[i].base = paddr;
1449 } else {
1450 int ret = memblock_reserve(data->heaps[i].base,
1451 data->heaps[i].size);
1452 if (ret)
1453 pr_err("memblock reserve of %x@%lx failed\n",
1454 data->heaps[i].size,
1455 data->heaps[i].base);
1456 }
1457 pr_info("%s: %s reserved base %lx size %d\n", __func__,
1458 data->heaps[i].name,
1459 data->heaps[i].base,
1460 data->heaps[i].size);
2991b7a0
RSZ
1461 }
1462}