]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/android/ion/ion.c
gpu: ion: fix carveout ops
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / android / ion / ion.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/list.h>
2991b7a0 22#include <linux/memblock.h>
c30707be
RSZ
23#include <linux/miscdevice.h>
24#include <linux/export.h>
25#include <linux/mm.h>
26#include <linux/mm_types.h>
27#include <linux/rbtree.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/seq_file.h>
31#include <linux/uaccess.h>
32#include <linux/debugfs.h>
b892bf75 33#include <linux/dma-buf.h>
c30707be
RSZ
34
35#include "ion.h"
36#include "ion_priv.h"
c30707be
RSZ
37
38/**
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
8d7ab9a9
RSZ
41 * @buffers: an rb tree of all the existing buffers
42 * @buffer_lock: lock protecting the tree of buffers
43 * @lock: rwsem protecting the tree of heaps and clients
c30707be
RSZ
44 * @heaps: list of all the heaps in the system
45 * @user_clients: list of all the clients created from userspace
46 */
47struct ion_device {
48 struct miscdevice dev;
49 struct rb_root buffers;
8d7ab9a9
RSZ
50 struct mutex buffer_lock;
51 struct rw_semaphore lock;
c30707be
RSZ
52 struct rb_root heaps;
53 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
54 unsigned long arg);
b892bf75 55 struct rb_root clients;
c30707be
RSZ
56 struct dentry *debug_root;
57};
58
59/**
60 * struct ion_client - a process/hw block local address space
c30707be
RSZ
61 * @node: node in the tree of all clients
62 * @dev: backpointer to ion device
63 * @handles: an rb tree of all the handles in this client
64 * @lock: lock protecting the tree of handles
65 * @heap_mask: mask of all supported heaps
66 * @name: used for debugging
67 * @task: used for debugging
68 *
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
72 */
73struct ion_client {
c30707be
RSZ
74 struct rb_node node;
75 struct ion_device *dev;
76 struct rb_root handles;
77 struct mutex lock;
78 unsigned int heap_mask;
79 const char *name;
80 struct task_struct *task;
81 pid_t pid;
82 struct dentry *debug_root;
83};
84
85/**
86 * ion_handle - a client local reference to a buffer
87 * @ref: reference count
88 * @client: back pointer to the client the buffer resides in
89 * @buffer: pointer to the buffer
90 * @node: node in the client's handle rbtree
91 * @kmap_cnt: count of times this client has mapped to kernel
92 * @dmap_cnt: count of times this client has mapped for dma
c30707be
RSZ
93 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client. Other fields are never changed after initialization.
96 */
97struct ion_handle {
98 struct kref ref;
99 struct ion_client *client;
100 struct ion_buffer *buffer;
101 struct rb_node node;
102 unsigned int kmap_cnt;
c30707be
RSZ
103};
104
13ba7805
RSZ
105bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
106{
107 return ((buffer->flags & ION_FLAG_CACHED) &&
108 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
109}
110
45b17a80
RSZ
111bool ion_buffer_cached(struct ion_buffer *buffer)
112{
113 return !!(buffer->flags & ION_FLAG_CACHED);
114}
115
c30707be
RSZ
116/* this function should only be called while dev->lock is held */
117static void ion_buffer_add(struct ion_device *dev,
118 struct ion_buffer *buffer)
119{
120 struct rb_node **p = &dev->buffers.rb_node;
121 struct rb_node *parent = NULL;
122 struct ion_buffer *entry;
123
124 while (*p) {
125 parent = *p;
126 entry = rb_entry(parent, struct ion_buffer, node);
127
128 if (buffer < entry) {
129 p = &(*p)->rb_left;
130 } else if (buffer > entry) {
131 p = &(*p)->rb_right;
132 } else {
133 pr_err("%s: buffer already found.", __func__);
134 BUG();
135 }
136 }
137
138 rb_link_node(&buffer->node, parent, p);
139 rb_insert_color(&buffer->node, &dev->buffers);
140}
141
56a7c185
RSZ
142static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
143
c30707be
RSZ
144/* this function should only be called while dev->lock is held */
145static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
146 struct ion_device *dev,
147 unsigned long len,
148 unsigned long align,
149 unsigned long flags)
150{
151 struct ion_buffer *buffer;
29ae6bc7 152 struct sg_table *table;
a46b6b2d
RSZ
153 struct scatterlist *sg;
154 int i, ret;
c30707be
RSZ
155
156 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
157 if (!buffer)
158 return ERR_PTR(-ENOMEM);
159
160 buffer->heap = heap;
13ba7805 161 buffer->flags = flags;
c30707be
RSZ
162 kref_init(&buffer->ref);
163
164 ret = heap->ops->allocate(heap, buffer, len, align, flags);
165 if (ret) {
166 kfree(buffer);
167 return ERR_PTR(ret);
168 }
29ae6bc7 169
056be396
GH
170 buffer->dev = dev;
171 buffer->size = len;
172
56a7c185 173 table = heap->ops->map_dma(heap, buffer);
29ae6bc7
RSZ
174 if (IS_ERR_OR_NULL(table)) {
175 heap->ops->free(buffer);
176 kfree(buffer);
177 return ERR_PTR(PTR_ERR(table));
178 }
179 buffer->sg_table = table;
13ba7805 180 if (ion_buffer_fault_user_mappings(buffer)) {
56a7c185
RSZ
181 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
182 i) {
183 if (sg_dma_len(sg) == PAGE_SIZE)
184 continue;
13ba7805
RSZ
185 pr_err("%s: cached mappings that will be faulted in "
186 "must have pagewise sg_lists\n", __func__);
d3c0bced
RSZ
187 ret = -EINVAL;
188 goto err;
56a7c185 189 }
29ae6bc7 190
d3c0bced
RSZ
191 ret = ion_buffer_alloc_dirty(buffer);
192 if (ret)
193 goto err;
56a7c185
RSZ
194 }
195
196 buffer->dev = dev;
197 buffer->size = len;
198 INIT_LIST_HEAD(&buffer->vmas);
c30707be 199 mutex_init(&buffer->lock);
a46b6b2d
RSZ
200 /* this will set up dma addresses for the sglist -- it is not
201 technically correct as per the dma api -- a specific
202 device isn't really taking ownership here. However, in practice on
203 our systems the only dma_address space is physical addresses.
204 Additionally, we can't afford the overhead of invalidating every
205 allocation via dma_map_sg. The implicit contract here is that
206 memory comming from the heaps is ready for dma, ie if it has a
207 cached mapping that mapping has been invalidated */
208 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
209 sg_dma_address(sg) = sg_phys(sg);
8d7ab9a9 210 mutex_lock(&dev->buffer_lock);
c30707be 211 ion_buffer_add(dev, buffer);
8d7ab9a9 212 mutex_unlock(&dev->buffer_lock);
c30707be 213 return buffer;
d3c0bced
RSZ
214
215err:
216 heap->ops->unmap_dma(heap, buffer);
217 heap->ops->free(buffer);
218 kfree(buffer);
219 return ERR_PTR(ret);
c30707be
RSZ
220}
221
222static void ion_buffer_destroy(struct kref *kref)
223{
224 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
225 struct ion_device *dev = buffer->dev;
226
54ac0784
KC
227 if (WARN_ON(buffer->kmap_cnt > 0))
228 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
29ae6bc7 229 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
c30707be 230 buffer->heap->ops->free(buffer);
8d7ab9a9 231 mutex_lock(&dev->buffer_lock);
c30707be 232 rb_erase(&buffer->node, &dev->buffers);
8d7ab9a9 233 mutex_unlock(&dev->buffer_lock);
d3c0bced
RSZ
234 if (buffer->flags & ION_FLAG_CACHED)
235 kfree(buffer->dirty);
c30707be
RSZ
236 kfree(buffer);
237}
238
239static void ion_buffer_get(struct ion_buffer *buffer)
240{
241 kref_get(&buffer->ref);
242}
243
244static int ion_buffer_put(struct ion_buffer *buffer)
245{
246 return kref_put(&buffer->ref, ion_buffer_destroy);
247}
248
5ad7bc3a
RSZ
249static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
250{
8d7ab9a9 251 mutex_lock(&buffer->lock);
5ad7bc3a 252 buffer->handle_count++;
8d7ab9a9 253 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
254}
255
256static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
257{
258 /*
259 * when a buffer is removed from a handle, if it is not in
260 * any other handles, copy the taskcomm and the pid of the
261 * process it's being removed from into the buffer. At this
262 * point there will be no way to track what processes this buffer is
263 * being used by, it only exists as a dma_buf file descriptor.
264 * The taskcomm and pid can provide a debug hint as to where this fd
265 * is in the system
266 */
8d7ab9a9 267 mutex_lock(&buffer->lock);
5ad7bc3a
RSZ
268 buffer->handle_count--;
269 BUG_ON(buffer->handle_count < 0);
270 if (!buffer->handle_count) {
271 struct task_struct *task;
272
273 task = current->group_leader;
274 get_task_comm(buffer->task_comm, task);
275 buffer->pid = task_pid_nr(task);
276 }
8d7ab9a9 277 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
278}
279
c30707be
RSZ
280static struct ion_handle *ion_handle_create(struct ion_client *client,
281 struct ion_buffer *buffer)
282{
283 struct ion_handle *handle;
284
285 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
286 if (!handle)
287 return ERR_PTR(-ENOMEM);
288 kref_init(&handle->ref);
289 RB_CLEAR_NODE(&handle->node);
290 handle->client = client;
291 ion_buffer_get(buffer);
5ad7bc3a 292 ion_buffer_add_to_handle(buffer);
c30707be
RSZ
293 handle->buffer = buffer;
294
295 return handle;
296}
297
b892bf75
RSZ
298static void ion_handle_kmap_put(struct ion_handle *);
299
c30707be
RSZ
300static void ion_handle_destroy(struct kref *kref)
301{
302 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
b892bf75
RSZ
303 struct ion_client *client = handle->client;
304 struct ion_buffer *buffer = handle->buffer;
305
b892bf75 306 mutex_lock(&buffer->lock);
2900cd76 307 while (handle->kmap_cnt)
b892bf75
RSZ
308 ion_handle_kmap_put(handle);
309 mutex_unlock(&buffer->lock);
310
c30707be 311 if (!RB_EMPTY_NODE(&handle->node))
b892bf75 312 rb_erase(&handle->node, &client->handles);
b892bf75 313
5ad7bc3a 314 ion_buffer_remove_from_handle(buffer);
b892bf75 315 ion_buffer_put(buffer);
5ad7bc3a 316
c30707be
RSZ
317 kfree(handle);
318}
319
320struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
321{
322 return handle->buffer;
323}
324
325static void ion_handle_get(struct ion_handle *handle)
326{
327 kref_get(&handle->ref);
328}
329
330static int ion_handle_put(struct ion_handle *handle)
331{
332 return kref_put(&handle->ref, ion_handle_destroy);
333}
334
335static struct ion_handle *ion_handle_lookup(struct ion_client *client,
336 struct ion_buffer *buffer)
337{
338 struct rb_node *n;
339
340 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
341 struct ion_handle *handle = rb_entry(n, struct ion_handle,
342 node);
343 if (handle->buffer == buffer)
344 return handle;
345 }
346 return NULL;
347}
348
349static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
350{
351 struct rb_node *n = client->handles.rb_node;
352
353 while (n) {
354 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
355 node);
356 if (handle < handle_node)
357 n = n->rb_left;
358 else if (handle > handle_node)
359 n = n->rb_right;
360 else
361 return true;
362 }
363 return false;
364}
365
366static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
367{
368 struct rb_node **p = &client->handles.rb_node;
369 struct rb_node *parent = NULL;
370 struct ion_handle *entry;
371
372 while (*p) {
373 parent = *p;
374 entry = rb_entry(parent, struct ion_handle, node);
375
376 if (handle < entry)
377 p = &(*p)->rb_left;
378 else if (handle > entry)
379 p = &(*p)->rb_right;
380 else
381 WARN(1, "%s: buffer already found.", __func__);
382 }
383
384 rb_link_node(&handle->node, parent, p);
385 rb_insert_color(&handle->node, &client->handles);
386}
387
388struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
56a7c185
RSZ
389 size_t align, unsigned int heap_mask,
390 unsigned int flags)
c30707be
RSZ
391{
392 struct rb_node *n;
393 struct ion_handle *handle;
394 struct ion_device *dev = client->dev;
395 struct ion_buffer *buffer = NULL;
396
56a7c185
RSZ
397 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
398 align, heap_mask, flags);
c30707be
RSZ
399 /*
400 * traverse the list of heaps available in this system in priority
401 * order. If the heap type is supported by the client, and matches the
402 * request of the caller allocate from it. Repeat until allocate has
403 * succeeded or all heaps have been tried
404 */
54ac0784
KC
405 if (WARN_ON(!len))
406 return ERR_PTR(-EINVAL);
407
408 len = PAGE_ALIGN(len);
409
8d7ab9a9 410 down_read(&dev->lock);
c30707be
RSZ
411 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
412 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
413 /* if the client doesn't support this heap type */
414 if (!((1 << heap->type) & client->heap_mask))
415 continue;
416 /* if the caller didn't specify this heap type */
56a7c185 417 if (!((1 << heap->id) & heap_mask))
c30707be
RSZ
418 continue;
419 buffer = ion_buffer_create(heap, dev, len, align, flags);
420 if (!IS_ERR_OR_NULL(buffer))
421 break;
422 }
8d7ab9a9 423 up_read(&dev->lock);
c30707be 424
54ac0784
KC
425 if (buffer == NULL)
426 return ERR_PTR(-ENODEV);
427
428 if (IS_ERR(buffer))
c30707be
RSZ
429 return ERR_PTR(PTR_ERR(buffer));
430
431 handle = ion_handle_create(client, buffer);
432
c30707be
RSZ
433 /*
434 * ion_buffer_create will create a buffer with a ref_cnt of 1,
435 * and ion_handle_create will take a second reference, drop one here
436 */
437 ion_buffer_put(buffer);
438
54ac0784
KC
439 if (!IS_ERR(handle)) {
440 mutex_lock(&client->lock);
441 ion_handle_add(client, handle);
442 mutex_unlock(&client->lock);
443 }
c30707be 444
29ae6bc7 445
c30707be
RSZ
446 return handle;
447}
ee4c8aa9 448EXPORT_SYMBOL(ion_alloc);
c30707be
RSZ
449
450void ion_free(struct ion_client *client, struct ion_handle *handle)
451{
452 bool valid_handle;
453
454 BUG_ON(client != handle->client);
455
456 mutex_lock(&client->lock);
457 valid_handle = ion_handle_validate(client, handle);
c30707be
RSZ
458
459 if (!valid_handle) {
a9bb075d 460 WARN(1, "%s: invalid handle passed to free.\n", __func__);
37bdbf00 461 mutex_unlock(&client->lock);
c30707be
RSZ
462 return;
463 }
464 ion_handle_put(handle);
0e9c03a5 465 mutex_unlock(&client->lock);
c30707be 466}
ee4c8aa9 467EXPORT_SYMBOL(ion_free);
c30707be 468
c30707be
RSZ
469int ion_phys(struct ion_client *client, struct ion_handle *handle,
470 ion_phys_addr_t *addr, size_t *len)
471{
472 struct ion_buffer *buffer;
473 int ret;
474
475 mutex_lock(&client->lock);
476 if (!ion_handle_validate(client, handle)) {
477 mutex_unlock(&client->lock);
478 return -EINVAL;
479 }
480
481 buffer = handle->buffer;
482
483 if (!buffer->heap->ops->phys) {
484 pr_err("%s: ion_phys is not implemented by this heap.\n",
485 __func__);
486 mutex_unlock(&client->lock);
487 return -ENODEV;
488 }
489 mutex_unlock(&client->lock);
490 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
491 return ret;
492}
ee4c8aa9 493EXPORT_SYMBOL(ion_phys);
c30707be 494
0f34faf8
RSZ
495static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
496{
497 void *vaddr;
498
499 if (buffer->kmap_cnt) {
500 buffer->kmap_cnt++;
501 return buffer->vaddr;
502 }
503 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
504 if (IS_ERR_OR_NULL(vaddr))
505 return vaddr;
506 buffer->vaddr = vaddr;
507 buffer->kmap_cnt++;
508 return vaddr;
509}
510
b892bf75 511static void *ion_handle_kmap_get(struct ion_handle *handle)
c30707be 512{
b892bf75 513 struct ion_buffer *buffer = handle->buffer;
c30707be
RSZ
514 void *vaddr;
515
b892bf75
RSZ
516 if (handle->kmap_cnt) {
517 handle->kmap_cnt++;
518 return buffer->vaddr;
c30707be 519 }
0f34faf8
RSZ
520 vaddr = ion_buffer_kmap_get(buffer);
521 if (IS_ERR_OR_NULL(vaddr))
b892bf75 522 return vaddr;
b892bf75 523 handle->kmap_cnt++;
b892bf75
RSZ
524 return vaddr;
525}
c30707be 526
0f34faf8
RSZ
527static void ion_buffer_kmap_put(struct ion_buffer *buffer)
528{
529 buffer->kmap_cnt--;
530 if (!buffer->kmap_cnt) {
531 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
532 buffer->vaddr = NULL;
533 }
534}
535
b892bf75
RSZ
536static void ion_handle_kmap_put(struct ion_handle *handle)
537{
538 struct ion_buffer *buffer = handle->buffer;
539
540 handle->kmap_cnt--;
541 if (!handle->kmap_cnt)
0f34faf8 542 ion_buffer_kmap_put(buffer);
c30707be
RSZ
543}
544
b892bf75 545void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
546{
547 struct ion_buffer *buffer;
b892bf75 548 void *vaddr;
c30707be
RSZ
549
550 mutex_lock(&client->lock);
551 if (!ion_handle_validate(client, handle)) {
b892bf75 552 pr_err("%s: invalid handle passed to map_kernel.\n",
c30707be
RSZ
553 __func__);
554 mutex_unlock(&client->lock);
555 return ERR_PTR(-EINVAL);
556 }
b892bf75 557
c30707be 558 buffer = handle->buffer;
c30707be 559
b892bf75 560 if (!handle->buffer->heap->ops->map_kernel) {
c30707be
RSZ
561 pr_err("%s: map_kernel is not implemented by this heap.\n",
562 __func__);
c30707be
RSZ
563 mutex_unlock(&client->lock);
564 return ERR_PTR(-ENODEV);
565 }
c30707be 566
c30707be 567 mutex_lock(&buffer->lock);
b892bf75 568 vaddr = ion_handle_kmap_get(handle);
c30707be
RSZ
569 mutex_unlock(&buffer->lock);
570 mutex_unlock(&client->lock);
b892bf75 571 return vaddr;
c30707be 572}
ee4c8aa9 573EXPORT_SYMBOL(ion_map_kernel);
c30707be 574
b892bf75 575void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
576{
577 struct ion_buffer *buffer;
578
579 mutex_lock(&client->lock);
580 buffer = handle->buffer;
581 mutex_lock(&buffer->lock);
b892bf75 582 ion_handle_kmap_put(handle);
c30707be
RSZ
583 mutex_unlock(&buffer->lock);
584 mutex_unlock(&client->lock);
585}
ee4c8aa9 586EXPORT_SYMBOL(ion_unmap_kernel);
c30707be 587
c30707be
RSZ
588static int ion_debug_client_show(struct seq_file *s, void *unused)
589{
590 struct ion_client *client = s->private;
591 struct rb_node *n;
592 size_t sizes[ION_NUM_HEAPS] = {0};
593 const char *names[ION_NUM_HEAPS] = {0};
594 int i;
595
596 mutex_lock(&client->lock);
597 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
598 struct ion_handle *handle = rb_entry(n, struct ion_handle,
599 node);
600 enum ion_heap_type type = handle->buffer->heap->type;
601
602 if (!names[type])
603 names[type] = handle->buffer->heap->name;
604 sizes[type] += handle->buffer->size;
605 }
606 mutex_unlock(&client->lock);
607
608 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
609 for (i = 0; i < ION_NUM_HEAPS; i++) {
610 if (!names[i])
611 continue;
b892bf75 612 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
c30707be
RSZ
613 }
614 return 0;
615}
616
617static int ion_debug_client_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, ion_debug_client_show, inode->i_private);
620}
621
622static const struct file_operations debug_client_fops = {
623 .open = ion_debug_client_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
c30707be
RSZ
629struct ion_client *ion_client_create(struct ion_device *dev,
630 unsigned int heap_mask,
631 const char *name)
632{
633 struct ion_client *client;
634 struct task_struct *task;
635 struct rb_node **p;
636 struct rb_node *parent = NULL;
637 struct ion_client *entry;
638 char debug_name[64];
639 pid_t pid;
640
641 get_task_struct(current->group_leader);
642 task_lock(current->group_leader);
643 pid = task_pid_nr(current->group_leader);
644 /* don't bother to store task struct for kernel threads,
645 they can't be killed anyway */
646 if (current->group_leader->flags & PF_KTHREAD) {
647 put_task_struct(current->group_leader);
648 task = NULL;
649 } else {
650 task = current->group_leader;
651 }
652 task_unlock(current->group_leader);
653
c30707be
RSZ
654 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
655 if (!client) {
54ac0784
KC
656 if (task)
657 put_task_struct(current->group_leader);
c30707be
RSZ
658 return ERR_PTR(-ENOMEM);
659 }
660
661 client->dev = dev;
662 client->handles = RB_ROOT;
663 mutex_init(&client->lock);
664 client->name = name;
665 client->heap_mask = heap_mask;
666 client->task = task;
667 client->pid = pid;
c30707be 668
8d7ab9a9 669 down_write(&dev->lock);
b892bf75
RSZ
670 p = &dev->clients.rb_node;
671 while (*p) {
672 parent = *p;
673 entry = rb_entry(parent, struct ion_client, node);
674
675 if (client < entry)
676 p = &(*p)->rb_left;
677 else if (client > entry)
678 p = &(*p)->rb_right;
c30707be 679 }
b892bf75
RSZ
680 rb_link_node(&client->node, parent, p);
681 rb_insert_color(&client->node, &dev->clients);
c30707be
RSZ
682
683 snprintf(debug_name, 64, "%u", client->pid);
684 client->debug_root = debugfs_create_file(debug_name, 0664,
685 dev->debug_root, client,
686 &debug_client_fops);
8d7ab9a9 687 up_write(&dev->lock);
c30707be
RSZ
688
689 return client;
690}
691
b892bf75 692void ion_client_destroy(struct ion_client *client)
c30707be 693{
c30707be
RSZ
694 struct ion_device *dev = client->dev;
695 struct rb_node *n;
696
697 pr_debug("%s: %d\n", __func__, __LINE__);
698 while ((n = rb_first(&client->handles))) {
699 struct ion_handle *handle = rb_entry(n, struct ion_handle,
700 node);
701 ion_handle_destroy(&handle->ref);
702 }
8d7ab9a9 703 down_write(&dev->lock);
b892bf75 704 if (client->task)
c30707be 705 put_task_struct(client->task);
b892bf75 706 rb_erase(&client->node, &dev->clients);
c30707be 707 debugfs_remove_recursive(client->debug_root);
8d7ab9a9 708 up_write(&dev->lock);
c30707be
RSZ
709
710 kfree(client);
711}
ee4c8aa9 712EXPORT_SYMBOL(ion_client_destroy);
c30707be 713
ce1f147a
RSZ
714struct sg_table *ion_sg_table(struct ion_client *client,
715 struct ion_handle *handle)
c30707be 716{
29ae6bc7 717 struct ion_buffer *buffer;
b892bf75 718 struct sg_table *table;
c30707be 719
29ae6bc7
RSZ
720 mutex_lock(&client->lock);
721 if (!ion_handle_validate(client, handle)) {
722 pr_err("%s: invalid handle passed to map_dma.\n",
b892bf75 723 __func__);
29ae6bc7
RSZ
724 mutex_unlock(&client->lock);
725 return ERR_PTR(-EINVAL);
54ac0784 726 }
29ae6bc7
RSZ
727 buffer = handle->buffer;
728 table = buffer->sg_table;
729 mutex_unlock(&client->lock);
b892bf75 730 return table;
c30707be 731}
ee4c8aa9 732EXPORT_SYMBOL(ion_sg_table);
c30707be 733
56a7c185
RSZ
734static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
735 struct device *dev,
736 enum dma_data_direction direction);
737
29ae6bc7
RSZ
738static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
739 enum dma_data_direction direction)
c30707be 740{
b892bf75
RSZ
741 struct dma_buf *dmabuf = attachment->dmabuf;
742 struct ion_buffer *buffer = dmabuf->priv;
c30707be 743
0b9ec1cf 744 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
29ae6bc7
RSZ
745 return buffer->sg_table;
746}
747
748static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
749 struct sg_table *table,
750 enum dma_data_direction direction)
751{
c30707be
RSZ
752}
753
56a7c185
RSZ
754static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
755{
756 unsigned long pages = buffer->sg_table->nents;
757 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
758
759 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
760 if (!buffer->dirty)
761 return -ENOMEM;
762 return 0;
763}
764
765struct ion_vma_list {
766 struct list_head list;
767 struct vm_area_struct *vma;
768};
769
770static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
771 struct device *dev,
772 enum dma_data_direction dir)
773{
774 struct scatterlist *sg;
775 int i;
776 struct ion_vma_list *vma_list;
777
778 pr_debug("%s: syncing for device %s\n", __func__,
779 dev ? dev_name(dev) : "null");
0b9ec1cf 780
13ba7805 781 if (!ion_buffer_fault_user_mappings(buffer))
0b9ec1cf
RSZ
782 return;
783
56a7c185
RSZ
784 mutex_lock(&buffer->lock);
785 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
786 if (!test_bit(i, buffer->dirty))
787 continue;
788 dma_sync_sg_for_device(dev, sg, 1, dir);
789 clear_bit(i, buffer->dirty);
790 }
791 list_for_each_entry(vma_list, &buffer->vmas, list) {
792 struct vm_area_struct *vma = vma_list->vma;
793
794 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
795 NULL);
796 }
797 mutex_unlock(&buffer->lock);
798}
799
800int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
801{
802 struct ion_buffer *buffer = vma->vm_private_data;
803 struct scatterlist *sg;
804 int i;
805
806 mutex_lock(&buffer->lock);
807 set_bit(vmf->pgoff, buffer->dirty);
808
809 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
810 if (i != vmf->pgoff)
811 continue;
812 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
813 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
814 sg_page(sg));
815 break;
816 }
817 mutex_unlock(&buffer->lock);
818 return VM_FAULT_NOPAGE;
819}
820
821static void ion_vm_open(struct vm_area_struct *vma)
822{
823 struct ion_buffer *buffer = vma->vm_private_data;
824 struct ion_vma_list *vma_list;
825
826 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
827 if (!vma_list)
828 return;
829 vma_list->vma = vma;
830 mutex_lock(&buffer->lock);
831 list_add(&vma_list->list, &buffer->vmas);
832 mutex_unlock(&buffer->lock);
833 pr_debug("%s: adding %p\n", __func__, vma);
834}
835
836static void ion_vm_close(struct vm_area_struct *vma)
837{
838 struct ion_buffer *buffer = vma->vm_private_data;
839 struct ion_vma_list *vma_list, *tmp;
840
841 pr_debug("%s\n", __func__);
842 mutex_lock(&buffer->lock);
843 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
844 if (vma_list->vma != vma)
845 continue;
846 list_del(&vma_list->list);
847 kfree(vma_list);
848 pr_debug("%s: deleting %p\n", __func__, vma);
849 break;
850 }
851 mutex_unlock(&buffer->lock);
852}
853
854struct vm_operations_struct ion_vma_ops = {
855 .open = ion_vm_open,
856 .close = ion_vm_close,
857 .fault = ion_vm_fault,
858};
859
b892bf75 860static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
c30707be 861{
b892bf75 862 struct ion_buffer *buffer = dmabuf->priv;
56a7c185 863 int ret = 0;
c30707be 864
b892bf75 865 if (!buffer->heap->ops->map_user) {
c30707be
RSZ
866 pr_err("%s: this heap does not define a method for mapping "
867 "to userspace\n", __func__);
b892bf75 868 return -EINVAL;
c30707be
RSZ
869 }
870
13ba7805 871 if (ion_buffer_fault_user_mappings(buffer)) {
56a7c185
RSZ
872 vma->vm_private_data = buffer;
873 vma->vm_ops = &ion_vma_ops;
874 ion_vm_open(vma);
856661d5 875 return 0;
56a7c185 876 }
b892bf75 877
856661d5
RSZ
878 if (!(buffer->flags & ION_FLAG_CACHED))
879 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
880
881 mutex_lock(&buffer->lock);
882 /* now map it to userspace */
883 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
884 mutex_unlock(&buffer->lock);
885
b892bf75 886 if (ret)
c30707be
RSZ
887 pr_err("%s: failure mapping buffer to userspace\n",
888 __func__);
c30707be 889
c30707be
RSZ
890 return ret;
891}
892
b892bf75
RSZ
893static void ion_dma_buf_release(struct dma_buf *dmabuf)
894{
895 struct ion_buffer *buffer = dmabuf->priv;
896 ion_buffer_put(buffer);
897}
c30707be 898
b892bf75 899static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
c30707be 900{
0f34faf8 901 struct ion_buffer *buffer = dmabuf->priv;
12edf53d 902 return buffer->vaddr + offset * PAGE_SIZE;
b892bf75 903}
c30707be 904
b892bf75
RSZ
905static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
906 void *ptr)
907{
908 return;
909}
910
0f34faf8
RSZ
911static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
912 size_t len,
913 enum dma_data_direction direction)
b892bf75 914{
0f34faf8
RSZ
915 struct ion_buffer *buffer = dmabuf->priv;
916 void *vaddr;
917
918 if (!buffer->heap->ops->map_kernel) {
919 pr_err("%s: map kernel is not implemented by this heap.\n",
920 __func__);
921 return -ENODEV;
922 }
923
924 mutex_lock(&buffer->lock);
925 vaddr = ion_buffer_kmap_get(buffer);
926 mutex_unlock(&buffer->lock);
927 if (IS_ERR(vaddr))
928 return PTR_ERR(vaddr);
929 if (!vaddr)
930 return -ENOMEM;
931 return 0;
b892bf75
RSZ
932}
933
0f34faf8
RSZ
934static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
935 size_t len,
936 enum dma_data_direction direction)
b892bf75 937{
0f34faf8 938 struct ion_buffer *buffer = dmabuf->priv;
c30707be 939
0f34faf8
RSZ
940 mutex_lock(&buffer->lock);
941 ion_buffer_kmap_put(buffer);
942 mutex_unlock(&buffer->lock);
943}
c30707be 944
b892bf75
RSZ
945struct dma_buf_ops dma_buf_ops = {
946 .map_dma_buf = ion_map_dma_buf,
947 .unmap_dma_buf = ion_unmap_dma_buf,
948 .mmap = ion_mmap,
949 .release = ion_dma_buf_release,
0f34faf8
RSZ
950 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
951 .end_cpu_access = ion_dma_buf_end_cpu_access,
952 .kmap_atomic = ion_dma_buf_kmap,
953 .kunmap_atomic = ion_dma_buf_kunmap,
b892bf75
RSZ
954 .kmap = ion_dma_buf_kmap,
955 .kunmap = ion_dma_buf_kunmap,
956};
957
958int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
959{
960 struct ion_buffer *buffer;
961 struct dma_buf *dmabuf;
962 bool valid_handle;
963 int fd;
964
965 mutex_lock(&client->lock);
966 valid_handle = ion_handle_validate(client, handle);
967 mutex_unlock(&client->lock);
968 if (!valid_handle) {
a9bb075d 969 WARN(1, "%s: invalid handle passed to share.\n", __func__);
b892bf75
RSZ
970 return -EINVAL;
971 }
972
973 buffer = handle->buffer;
974 ion_buffer_get(buffer);
975 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
976 if (IS_ERR(dmabuf)) {
977 ion_buffer_put(buffer);
978 return PTR_ERR(dmabuf);
979 }
980 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
55808b8d 981 if (fd < 0)
b892bf75 982 dma_buf_put(dmabuf);
55808b8d 983
c30707be 984 return fd;
b892bf75 985}
ee4c8aa9 986EXPORT_SYMBOL(ion_share_dma_buf);
c30707be 987
b892bf75
RSZ
988struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
989{
990 struct dma_buf *dmabuf;
991 struct ion_buffer *buffer;
992 struct ion_handle *handle;
993
994 dmabuf = dma_buf_get(fd);
995 if (IS_ERR_OR_NULL(dmabuf))
996 return ERR_PTR(PTR_ERR(dmabuf));
997 /* if this memory came from ion */
998
999 if (dmabuf->ops != &dma_buf_ops) {
1000 pr_err("%s: can not import dmabuf from another exporter\n",
1001 __func__);
1002 dma_buf_put(dmabuf);
1003 return ERR_PTR(-EINVAL);
1004 }
1005 buffer = dmabuf->priv;
1006
1007 mutex_lock(&client->lock);
1008 /* if a handle exists for this buffer just take a reference to it */
1009 handle = ion_handle_lookup(client, buffer);
1010 if (!IS_ERR_OR_NULL(handle)) {
1011 ion_handle_get(handle);
1012 goto end;
1013 }
1014 handle = ion_handle_create(client, buffer);
1015 if (IS_ERR_OR_NULL(handle))
1016 goto end;
1017 ion_handle_add(client, handle);
1018end:
1019 mutex_unlock(&client->lock);
1020 dma_buf_put(dmabuf);
1021 return handle;
c30707be 1022}
ee4c8aa9 1023EXPORT_SYMBOL(ion_import_dma_buf);
c30707be 1024
0b9ec1cf
RSZ
1025static int ion_sync_for_device(struct ion_client *client, int fd)
1026{
1027 struct dma_buf *dmabuf;
1028 struct ion_buffer *buffer;
1029
1030 dmabuf = dma_buf_get(fd);
1031 if (IS_ERR_OR_NULL(dmabuf))
1032 return PTR_ERR(dmabuf);
1033
1034 /* if this memory came from ion */
1035 if (dmabuf->ops != &dma_buf_ops) {
1036 pr_err("%s: can not sync dmabuf from another exporter\n",
1037 __func__);
1038 dma_buf_put(dmabuf);
1039 return -EINVAL;
1040 }
1041 buffer = dmabuf->priv;
856661d5
RSZ
1042
1043 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1044 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
0b9ec1cf
RSZ
1045 dma_buf_put(dmabuf);
1046 return 0;
1047}
1048
c30707be
RSZ
1049static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1050{
1051 struct ion_client *client = filp->private_data;
1052
1053 switch (cmd) {
1054 case ION_IOC_ALLOC:
1055 {
1056 struct ion_allocation_data data;
1057
1058 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1059 return -EFAULT;
1060 data.handle = ion_alloc(client, data.len, data.align,
56a7c185 1061 data.heap_mask, data.flags);
54ac0784
KC
1062
1063 if (IS_ERR(data.handle))
1064 return PTR_ERR(data.handle);
1065
1066 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1067 ion_free(client, data.handle);
c30707be 1068 return -EFAULT;
54ac0784 1069 }
c30707be
RSZ
1070 break;
1071 }
1072 case ION_IOC_FREE:
1073 {
1074 struct ion_handle_data data;
1075 bool valid;
1076
1077 if (copy_from_user(&data, (void __user *)arg,
1078 sizeof(struct ion_handle_data)))
1079 return -EFAULT;
1080 mutex_lock(&client->lock);
1081 valid = ion_handle_validate(client, data.handle);
1082 mutex_unlock(&client->lock);
1083 if (!valid)
1084 return -EINVAL;
1085 ion_free(client, data.handle);
1086 break;
1087 }
c30707be
RSZ
1088 case ION_IOC_SHARE:
1089 {
1090 struct ion_fd_data data;
1091
1092 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1093 return -EFAULT;
b892bf75 1094 data.fd = ion_share_dma_buf(client, data.handle);
c30707be
RSZ
1095 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1096 return -EFAULT;
a1c6b996
OH
1097 if (data.fd < 0)
1098 return data.fd;
c30707be
RSZ
1099 break;
1100 }
1101 case ION_IOC_IMPORT:
1102 {
1103 struct ion_fd_data data;
a1c6b996 1104 int ret = 0;
c30707be
RSZ
1105 if (copy_from_user(&data, (void __user *)arg,
1106 sizeof(struct ion_fd_data)))
1107 return -EFAULT;
b892bf75 1108 data.handle = ion_import_dma_buf(client, data.fd);
a1c6b996
OH
1109 if (IS_ERR(data.handle)) {
1110 ret = PTR_ERR(data.handle);
c30707be 1111 data.handle = NULL;
a1c6b996 1112 }
c30707be
RSZ
1113 if (copy_to_user((void __user *)arg, &data,
1114 sizeof(struct ion_fd_data)))
1115 return -EFAULT;
a1c6b996
OH
1116 if (ret < 0)
1117 return ret;
c30707be
RSZ
1118 break;
1119 }
0b9ec1cf
RSZ
1120 case ION_IOC_SYNC:
1121 {
1122 struct ion_fd_data data;
1123 if (copy_from_user(&data, (void __user *)arg,
1124 sizeof(struct ion_fd_data)))
1125 return -EFAULT;
1126 ion_sync_for_device(client, data.fd);
1127 break;
1128 }
c30707be
RSZ
1129 case ION_IOC_CUSTOM:
1130 {
1131 struct ion_device *dev = client->dev;
1132 struct ion_custom_data data;
1133
1134 if (!dev->custom_ioctl)
1135 return -ENOTTY;
1136 if (copy_from_user(&data, (void __user *)arg,
1137 sizeof(struct ion_custom_data)))
1138 return -EFAULT;
1139 return dev->custom_ioctl(client, data.cmd, data.arg);
1140 }
1141 default:
1142 return -ENOTTY;
1143 }
1144 return 0;
1145}
1146
1147static int ion_release(struct inode *inode, struct file *file)
1148{
1149 struct ion_client *client = file->private_data;
1150
1151 pr_debug("%s: %d\n", __func__, __LINE__);
b892bf75 1152 ion_client_destroy(client);
c30707be
RSZ
1153 return 0;
1154}
1155
1156static int ion_open(struct inode *inode, struct file *file)
1157{
1158 struct miscdevice *miscdev = file->private_data;
1159 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1160 struct ion_client *client;
1161
1162 pr_debug("%s: %d\n", __func__, __LINE__);
1163 client = ion_client_create(dev, -1, "user");
1164 if (IS_ERR_OR_NULL(client))
1165 return PTR_ERR(client);
1166 file->private_data = client;
1167
1168 return 0;
1169}
1170
1171static const struct file_operations ion_fops = {
1172 .owner = THIS_MODULE,
1173 .open = ion_open,
1174 .release = ion_release,
1175 .unlocked_ioctl = ion_ioctl,
1176};
1177
1178static size_t ion_debug_heap_total(struct ion_client *client,
1179 enum ion_heap_type type)
1180{
1181 size_t size = 0;
1182 struct rb_node *n;
1183
1184 mutex_lock(&client->lock);
1185 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1186 struct ion_handle *handle = rb_entry(n,
1187 struct ion_handle,
1188 node);
1189 if (handle->buffer->heap->type == type)
1190 size += handle->buffer->size;
1191 }
1192 mutex_unlock(&client->lock);
1193 return size;
1194}
1195
1196static int ion_debug_heap_show(struct seq_file *s, void *unused)
1197{
1198 struct ion_heap *heap = s->private;
1199 struct ion_device *dev = heap->dev;
1200 struct rb_node *n;
5ad7bc3a
RSZ
1201 size_t total_size = 0;
1202 size_t total_orphaned_size = 0;
c30707be
RSZ
1203
1204 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
5ad7bc3a 1205 seq_printf(s, "----------------------------------------------------\n");
c30707be 1206
b892bf75 1207 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
c30707be
RSZ
1208 struct ion_client *client = rb_entry(n, struct ion_client,
1209 node);
1210 size_t size = ion_debug_heap_total(client, heap->type);
1211 if (!size)
1212 continue;
b892bf75
RSZ
1213 if (client->task) {
1214 char task_comm[TASK_COMM_LEN];
1215
1216 get_task_comm(task_comm, client->task);
1217 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1218 client->pid, size);
1219 } else {
1220 seq_printf(s, "%16.s %16u %16u\n", client->name,
1221 client->pid, size);
1222 }
c30707be 1223 }
5ad7bc3a
RSZ
1224 seq_printf(s, "----------------------------------------------------\n");
1225 seq_printf(s, "orphaned allocations (info is from last known client):"
1226 "\n");
8d7ab9a9 1227 mutex_lock(&dev->buffer_lock);
5ad7bc3a
RSZ
1228 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1229 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1230 node);
45b17a80
RSZ
1231 if (buffer->heap->type != heap->type)
1232 continue;
1233 total_size += buffer->size;
5ad7bc3a 1234 if (!buffer->handle_count) {
45b17a80
RSZ
1235 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
1236 buffer->pid, buffer->size, buffer->kmap_cnt, buffer->ref);
5ad7bc3a
RSZ
1237 total_orphaned_size += buffer->size;
1238 }
1239 }
8d7ab9a9 1240 mutex_unlock(&dev->buffer_lock);
5ad7bc3a
RSZ
1241 seq_printf(s, "----------------------------------------------------\n");
1242 seq_printf(s, "%16.s %16u\n", "total orphaned",
1243 total_orphaned_size);
1244 seq_printf(s, "%16.s %16u\n", "total ", total_size);
45b17a80
RSZ
1245 seq_printf(s, "----------------------------------------------------\n");
1246
1247 if (heap->debug_show)
1248 heap->debug_show(heap, s, unused);
5ad7bc3a 1249
c30707be
RSZ
1250 return 0;
1251}
1252
1253static int ion_debug_heap_open(struct inode *inode, struct file *file)
1254{
1255 return single_open(file, ion_debug_heap_show, inode->i_private);
1256}
1257
1258static const struct file_operations debug_heap_fops = {
1259 .open = ion_debug_heap_open,
1260 .read = seq_read,
1261 .llseek = seq_lseek,
1262 .release = single_release,
1263};
1264
1265void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1266{
1267 struct rb_node **p = &dev->heaps.rb_node;
1268 struct rb_node *parent = NULL;
1269 struct ion_heap *entry;
1270
29ae6bc7
RSZ
1271 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1272 !heap->ops->unmap_dma)
1273 pr_err("%s: can not add heap with invalid ops struct.\n",
1274 __func__);
1275
c30707be 1276 heap->dev = dev;
8d7ab9a9 1277 down_write(&dev->lock);
c30707be
RSZ
1278 while (*p) {
1279 parent = *p;
1280 entry = rb_entry(parent, struct ion_heap, node);
1281
1282 if (heap->id < entry->id) {
1283 p = &(*p)->rb_left;
1284 } else if (heap->id > entry->id ) {
1285 p = &(*p)->rb_right;
1286 } else {
1287 pr_err("%s: can not insert multiple heaps with "
1288 "id %d\n", __func__, heap->id);
1289 goto end;
1290 }
1291 }
1292
1293 rb_link_node(&heap->node, parent, p);
1294 rb_insert_color(&heap->node, &dev->heaps);
1295 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1296 &debug_heap_fops);
1297end:
8d7ab9a9 1298 up_write(&dev->lock);
c30707be
RSZ
1299}
1300
1301struct ion_device *ion_device_create(long (*custom_ioctl)
1302 (struct ion_client *client,
1303 unsigned int cmd,
1304 unsigned long arg))
1305{
1306 struct ion_device *idev;
1307 int ret;
1308
1309 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1310 if (!idev)
1311 return ERR_PTR(-ENOMEM);
1312
1313 idev->dev.minor = MISC_DYNAMIC_MINOR;
1314 idev->dev.name = "ion";
1315 idev->dev.fops = &ion_fops;
1316 idev->dev.parent = NULL;
1317 ret = misc_register(&idev->dev);
1318 if (ret) {
1319 pr_err("ion: failed to register misc device.\n");
1320 return ERR_PTR(ret);
1321 }
1322
1323 idev->debug_root = debugfs_create_dir("ion", NULL);
1324 if (IS_ERR_OR_NULL(idev->debug_root))
1325 pr_err("ion: failed to create debug files.\n");
1326
1327 idev->custom_ioctl = custom_ioctl;
1328 idev->buffers = RB_ROOT;
8d7ab9a9
RSZ
1329 mutex_init(&idev->buffer_lock);
1330 init_rwsem(&idev->lock);
c30707be 1331 idev->heaps = RB_ROOT;
b892bf75 1332 idev->clients = RB_ROOT;
c30707be
RSZ
1333 return idev;
1334}
1335
1336void ion_device_destroy(struct ion_device *dev)
1337{
1338 misc_deregister(&dev->dev);
1339 /* XXX need to free the heaps and clients ? */
1340 kfree(dev);
1341}
2991b7a0
RSZ
1342
1343void __init ion_reserve(struct ion_platform_data *data)
1344{
1345 int i, ret;
1346
1347 for (i = 0; i < data->nr; i++) {
1348 if (data->heaps[i].size == 0)
1349 continue;
1350 ret = memblock_reserve(data->heaps[i].base,
1351 data->heaps[i].size);
1352 if (ret)
1353 pr_err("memblock reserve of %x@%lx failed\n",
1354 data->heaps[i].size,
1355 data->heaps[i].base);
1356 }
1357}