]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/staging/android/ion/ion.c
Merge remote-tracking branches 'asoc/topic/tas6424', 'asoc/topic/tfa9879', 'asoc...
[mirror_ubuntu-focal-kernel.git] / drivers / staging / android / ion / ion.c
CommitLineData
c30707be 1/*
7e416174 2 *
c30707be
RSZ
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
ab0c069a 19#include <linux/err.h>
c30707be 20#include <linux/file.h>
fe2faea7 21#include <linux/freezer.h>
c30707be
RSZ
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
fe2faea7 24#include <linux/kthread.h>
c30707be 25#include <linux/list.h>
2991b7a0 26#include <linux/memblock.h>
c30707be
RSZ
27#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
c30707be
RSZ
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
c13bd1c4 35#include <linux/vmalloc.h>
c30707be 36#include <linux/debugfs.h>
b892bf75 37#include <linux/dma-buf.h>
47b40458 38#include <linux/idr.h>
f719ff9b 39#include <linux/sched/task.h>
c30707be
RSZ
40
41#include "ion.h"
c30707be 42
2f87f50b 43static struct ion_device *internal_dev;
817bf56a 44static int heap_id;
13ba7805 45
45b17a80
RSZ
46bool ion_buffer_cached(struct ion_buffer *buffer)
47{
c13bd1c4
RSZ
48 return !!(buffer->flags & ION_FLAG_CACHED);
49}
50
c30707be
RSZ
51/* this function should only be called while dev->lock is held */
52static void ion_buffer_add(struct ion_device *dev,
53 struct ion_buffer *buffer)
54{
55 struct rb_node **p = &dev->buffers.rb_node;
56 struct rb_node *parent = NULL;
57 struct ion_buffer *entry;
58
59 while (*p) {
60 parent = *p;
61 entry = rb_entry(parent, struct ion_buffer, node);
62
63 if (buffer < entry) {
64 p = &(*p)->rb_left;
65 } else if (buffer > entry) {
66 p = &(*p)->rb_right;
67 } else {
68 pr_err("%s: buffer already found.", __func__);
69 BUG();
70 }
71 }
72
73 rb_link_node(&buffer->node, parent, p);
74 rb_insert_color(&buffer->node, &dev->buffers);
75}
76
77/* this function should only be called while dev->lock is held */
78static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
121ca0c6
JA
79 struct ion_device *dev,
80 unsigned long len,
121ca0c6 81 unsigned long flags)
c30707be
RSZ
82{
83 struct ion_buffer *buffer;
62b3a094 84 int ret;
c30707be 85
411059f7 86 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
c30707be
RSZ
87 if (!buffer)
88 return ERR_PTR(-ENOMEM);
89
90 buffer->heap = heap;
13ba7805 91 buffer->flags = flags;
c30707be 92
1d9735ef 93 ret = heap->ops->allocate(heap, buffer, len, flags);
fe2faea7 94
c30707be 95 if (ret) {
fe2faea7
RSZ
96 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
97 goto err2;
98
ea313b5f 99 ion_heap_freelist_drain(heap, 0);
1d9735ef 100 ret = heap->ops->allocate(heap, buffer, len, flags);
fe2faea7
RSZ
101 if (ret)
102 goto err2;
c30707be 103 }
29ae6bc7 104
878c33a7 105 if (!buffer->sg_table) {
f82ad60e 106 WARN_ONCE(1, "This heap needs to set the sgtable");
a56d092a
R
107 ret = -EINVAL;
108 goto err1;
29ae6bc7 109 }
a56d092a 110
f82ad60e
LA
111 buffer->dev = dev;
112 buffer->size = len;
113
56a7c185
RSZ
114 buffer->dev = dev;
115 buffer->size = len;
2a55e7b5 116 INIT_LIST_HEAD(&buffer->attachments);
c30707be 117 mutex_init(&buffer->lock);
8d7ab9a9 118 mutex_lock(&dev->buffer_lock);
c30707be 119 ion_buffer_add(dev, buffer);
8d7ab9a9 120 mutex_unlock(&dev->buffer_lock);
c30707be 121 return buffer;
d3c0bced 122
c13bd1c4 123err1:
a56d092a 124 heap->ops->free(buffer);
fe2faea7 125err2:
d3c0bced
RSZ
126 kfree(buffer);
127 return ERR_PTR(ret);
c30707be
RSZ
128}
129
ea313b5f 130void ion_buffer_destroy(struct ion_buffer *buffer)
c30707be 131{
54ac0784
KC
132 if (WARN_ON(buffer->kmap_cnt > 0))
133 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
c30707be 134 buffer->heap->ops->free(buffer);
c30707be
RSZ
135 kfree(buffer);
136}
137
15c6098c 138static void _ion_buffer_destroy(struct ion_buffer *buffer)
fe2faea7 139{
fe2faea7
RSZ
140 struct ion_heap *heap = buffer->heap;
141 struct ion_device *dev = buffer->dev;
142
143 mutex_lock(&dev->buffer_lock);
144 rb_erase(&buffer->node, &dev->buffers);
145 mutex_unlock(&dev->buffer_lock);
146
ea313b5f
RSZ
147 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
148 ion_heap_freelist_add(heap, buffer);
149 else
150 ion_buffer_destroy(buffer);
fe2faea7
RSZ
151}
152
0f34faf8
RSZ
153static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
154{
155 void *vaddr;
156
157 if (buffer->kmap_cnt) {
158 buffer->kmap_cnt++;
159 return buffer->vaddr;
160 }
161 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
878c33a7 162 if (WARN_ONCE(!vaddr,
121ca0c6 163 "heap->ops->map_kernel should return ERR_PTR on error"))
9e907654
CC
164 return ERR_PTR(-EINVAL);
165 if (IS_ERR(vaddr))
0f34faf8
RSZ
166 return vaddr;
167 buffer->vaddr = vaddr;
168 buffer->kmap_cnt++;
169 return vaddr;
170}
171
0f34faf8
RSZ
172static void ion_buffer_kmap_put(struct ion_buffer *buffer)
173{
174 buffer->kmap_cnt--;
175 if (!buffer->kmap_cnt) {
176 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
177 buffer->vaddr = NULL;
178 }
179}
180
17fd283f 181static struct sg_table *dup_sg_table(struct sg_table *table)
b892bf75 182{
17fd283f
LA
183 struct sg_table *new_table;
184 int ret, i;
185 struct scatterlist *sg, *new_sg;
b892bf75 186
17fd283f
LA
187 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
188 if (!new_table)
189 return ERR_PTR(-ENOMEM);
948c4db4 190
17fd283f
LA
191 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
192 if (ret) {
193 kfree(new_table);
194 return ERR_PTR(-ENOMEM);
948c4db4
NZ
195 }
196
17fd283f
LA
197 new_sg = new_table->sgl;
198 for_each_sg(table->sgl, sg, table->nents, i) {
199 memcpy(new_sg, sg, sizeof(*sg));
200 sg->dma_address = 0;
201 new_sg = sg_next(new_sg);
c30707be 202 }
c30707be 203
17fd283f 204 return new_table;
c30707be
RSZ
205}
206
2a55e7b5 207static void free_duped_table(struct sg_table *table)
c30707be 208{
2a55e7b5
LA
209 sg_free_table(table);
210 kfree(table);
c30707be
RSZ
211}
212
2a55e7b5
LA
213struct ion_dma_buf_attachment {
214 struct device *dev;
215 struct sg_table *table;
216 struct list_head list;
c30707be
RSZ
217};
218
2a55e7b5 219static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
1f536fcc 220 struct dma_buf_attachment *attachment)
c30707be 221{
2a55e7b5
LA
222 struct ion_dma_buf_attachment *a;
223 struct sg_table *table;
224 struct ion_buffer *buffer = dmabuf->priv;
2803ac7b 225
2a55e7b5
LA
226 a = kzalloc(sizeof(*a), GFP_KERNEL);
227 if (!a)
228 return -ENOMEM;
c30707be 229
2a55e7b5
LA
230 table = dup_sg_table(buffer->sg_table);
231 if (IS_ERR(table)) {
232 kfree(a);
233 return -ENOMEM;
2803ac7b 234 }
b892bf75 235
2a55e7b5
LA
236 a->table = table;
237 a->dev = dev;
238 INIT_LIST_HEAD(&a->list);
b08585fb 239
2a55e7b5 240 attachment->priv = a;
c30707be 241
2a55e7b5
LA
242 mutex_lock(&buffer->lock);
243 list_add(&a->list, &buffer->attachments);
244 mutex_unlock(&buffer->lock);
ae5cbf4a 245
2a55e7b5 246 return 0;
c30707be
RSZ
247}
248
2a55e7b5
LA
249static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
250 struct dma_buf_attachment *attachment)
c30707be 251{
2a55e7b5
LA
252 struct ion_dma_buf_attachment *a = attachment->priv;
253 struct ion_buffer *buffer = dmabuf->priv;
47b40458 254
2a55e7b5
LA
255 free_duped_table(a->table);
256 mutex_lock(&buffer->lock);
257 list_del(&a->list);
258 mutex_unlock(&buffer->lock);
c30707be 259
2a55e7b5 260 kfree(a);
c30707be
RSZ
261}
262
29ae6bc7
RSZ
263static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
264 enum dma_data_direction direction)
c30707be 265{
2a55e7b5
LA
266 struct ion_dma_buf_attachment *a = attachment->priv;
267 struct sg_table *table;
56a7c185 268
2a55e7b5 269 table = a->table;
c13bd1c4 270
2a55e7b5 271 if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
df794cfb
AT
272 direction))
273 return ERR_PTR(-ENOMEM);
56a7c185 274
df794cfb 275 return table;
56a7c185
RSZ
276}
277
29ae6bc7
RSZ
278static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
279 struct sg_table *table,
280 enum dma_data_direction direction)
56a7c185 281{
2a55e7b5 282 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
56a7c185
RSZ
283}
284
b892bf75 285static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
c30707be 286{
b892bf75 287 struct ion_buffer *buffer = dmabuf->priv;
56a7c185 288 int ret = 0;
c30707be 289
b892bf75 290 if (!buffer->heap->ops->map_user) {
7287bb52 291 pr_err("%s: this heap does not define a method for mapping to userspace\n",
121ca0c6 292 __func__);
b892bf75 293 return -EINVAL;
c30707be
RSZ
294 }
295
856661d5
RSZ
296 if (!(buffer->flags & ION_FLAG_CACHED))
297 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
298
299 mutex_lock(&buffer->lock);
300 /* now map it to userspace */
301 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
302 mutex_unlock(&buffer->lock);
303
b892bf75 304 if (ret)
c30707be
RSZ
305 pr_err("%s: failure mapping buffer to userspace\n",
306 __func__);
c30707be 307
c30707be
RSZ
308 return ret;
309}
310
b892bf75
RSZ
311static void ion_dma_buf_release(struct dma_buf *dmabuf)
312{
313 struct ion_buffer *buffer = dmabuf->priv;
10f62861 314
15c6098c 315 _ion_buffer_destroy(buffer);
b892bf75 316}
c30707be 317
b892bf75 318static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
c30707be 319{
0f34faf8 320 struct ion_buffer *buffer = dmabuf->priv;
10f62861 321
12edf53d 322 return buffer->vaddr + offset * PAGE_SIZE;
b892bf75 323}
c30707be 324
b892bf75
RSZ
325static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
326 void *ptr)
327{
b892bf75
RSZ
328}
329
831e9da7 330static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
0f34faf8 331 enum dma_data_direction direction)
b892bf75 332{
0f34faf8
RSZ
333 struct ion_buffer *buffer = dmabuf->priv;
334 void *vaddr;
2a55e7b5 335 struct ion_dma_buf_attachment *a;
0f34faf8 336
2a55e7b5
LA
337 /*
338 * TODO: Move this elsewhere because we don't always need a vaddr
339 */
340 if (buffer->heap->ops->map_kernel) {
341 mutex_lock(&buffer->lock);
342 vaddr = ion_buffer_kmap_get(buffer);
343 mutex_unlock(&buffer->lock);
0f34faf8
RSZ
344 }
345
346 mutex_lock(&buffer->lock);
2a55e7b5
LA
347 list_for_each_entry(a, &buffer->attachments, list) {
348 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
d6b246bb 349 direction);
2a55e7b5 350 }
0f34faf8 351 mutex_unlock(&buffer->lock);
2a55e7b5
LA
352
353 return 0;
b892bf75
RSZ
354}
355
18b862dc
CW
356static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
357 enum dma_data_direction direction)
b892bf75 358{
0f34faf8 359 struct ion_buffer *buffer = dmabuf->priv;
2a55e7b5
LA
360 struct ion_dma_buf_attachment *a;
361
362 if (buffer->heap->ops->map_kernel) {
363 mutex_lock(&buffer->lock);
364 ion_buffer_kmap_put(buffer);
365 mutex_unlock(&buffer->lock);
366 }
c30707be 367
0f34faf8 368 mutex_lock(&buffer->lock);
2a55e7b5
LA
369 list_for_each_entry(a, &buffer->attachments, list) {
370 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
d6b246bb 371 direction);
2a55e7b5 372 }
0f34faf8 373 mutex_unlock(&buffer->lock);
18b862dc
CW
374
375 return 0;
0f34faf8 376}
c30707be 377
2328ed66 378static const struct dma_buf_ops dma_buf_ops = {
b892bf75
RSZ
379 .map_dma_buf = ion_map_dma_buf,
380 .unmap_dma_buf = ion_unmap_dma_buf,
381 .mmap = ion_mmap,
382 .release = ion_dma_buf_release,
2a55e7b5
LA
383 .attach = ion_dma_buf_attach,
384 .detach = ion_dma_buf_detatch,
0f34faf8
RSZ
385 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
386 .end_cpu_access = ion_dma_buf_end_cpu_access,
f9b67f00
LG
387 .map_atomic = ion_dma_buf_kmap,
388 .unmap_atomic = ion_dma_buf_kunmap,
389 .map = ion_dma_buf_kmap,
390 .unmap = ion_dma_buf_kunmap,
b892bf75
RSZ
391};
392
15c6098c 393int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
b892bf75 394{
15c6098c
LA
395 struct ion_device *dev = internal_dev;
396 struct ion_buffer *buffer = NULL;
397 struct ion_heap *heap;
5605b188 398 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
15c6098c 399 int fd;
b892bf75 400 struct dma_buf *dmabuf;
d8fbe341 401
15c6098c
LA
402 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
403 len, heap_id_mask, flags);
404 /*
405 * traverse the list of heaps available in this system in priority
406 * order. If the heap type is supported by the client, and matches the
407 * request of the caller allocate from it. Repeat until allocate has
408 * succeeded or all heaps have been tried
409 */
410 len = PAGE_ALIGN(len);
411
412 if (!len)
413 return -EINVAL;
414
415 down_read(&dev->lock);
416 plist_for_each_entry(heap, &dev->heaps, node) {
417 /* if the caller didn't specify this heap id */
418 if (!((1 << heap->id) & heap_id_mask))
419 continue;
420 buffer = ion_buffer_create(heap, dev, len, flags);
421 if (!IS_ERR(buffer))
422 break;
b892bf75 423 }
15c6098c
LA
424 up_read(&dev->lock);
425
878c33a7 426 if (!buffer)
15c6098c
LA
427 return -ENODEV;
428
429 if (IS_ERR(buffer))
430 return PTR_ERR(buffer);
83271f62 431
72449cb4
SS
432 exp_info.ops = &dma_buf_ops;
433 exp_info.size = buffer->size;
434 exp_info.flags = O_RDWR;
435 exp_info.priv = buffer;
436
d8fbe341 437 dmabuf = dma_buf_export(&exp_info);
b892bf75 438 if (IS_ERR(dmabuf)) {
15c6098c 439 _ion_buffer_destroy(buffer);
22ba4322 440 return PTR_ERR(dmabuf);
15c6098c 441 }
22ba4322 442
b892bf75 443 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
55808b8d 444 if (fd < 0)
b892bf75 445 dma_buf_put(dmabuf);
55808b8d 446
c30707be 447 return fd;
b892bf75 448}
0b9ec1cf 449
15c6098c 450int ion_query_heaps(struct ion_heap_query *query)
02b23803 451{
15c6098c 452 struct ion_device *dev = internal_dev;
02b23803
LA
453 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
454 int ret = -EINVAL, cnt = 0, max_cnt;
455 struct ion_heap *heap;
456 struct ion_heap_data hdata;
457
458 memset(&hdata, 0, sizeof(hdata));
459
460 down_read(&dev->lock);
461 if (!buffer) {
462 query->cnt = dev->heap_cnt;
463 ret = 0;
464 goto out;
465 }
466
467 if (query->cnt <= 0)
468 goto out;
469
470 max_cnt = query->cnt;
471
472 plist_for_each_entry(heap, &dev->heaps, node) {
473 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
474 hdata.name[sizeof(hdata.name) - 1] = '\0';
475 hdata.type = heap->type;
476 hdata.heap_id = heap->id;
477
cf55902b
DC
478 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
479 ret = -EFAULT;
480 goto out;
481 }
02b23803
LA
482
483 cnt++;
484 if (cnt >= max_cnt)
485 break;
486 }
487
488 query->cnt = cnt;
804ca94a 489 ret = 0;
02b23803
LA
490out:
491 up_read(&dev->lock);
492 return ret;
493}
494
c30707be
RSZ
495static const struct file_operations ion_fops = {
496 .owner = THIS_MODULE,
c30707be 497 .unlocked_ioctl = ion_ioctl,
b0c7cb26
LA
498#ifdef CONFIG_COMPAT
499 .compat_ioctl = ion_ioctl,
500#endif
c30707be
RSZ
501};
502
ea313b5f 503static int debug_shrink_set(void *data, u64 val)
fe2faea7 504{
e1d855b0
JS
505 struct ion_heap *heap = data;
506 struct shrink_control sc;
507 int objs;
fe2faea7 508
3b0ae7be 509 sc.gfp_mask = GFP_HIGHUSER;
aeb7fa7b 510 sc.nr_to_scan = val;
fe2faea7 511
aeb7fa7b
GK
512 if (!val) {
513 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
514 sc.nr_to_scan = objs;
515 }
fe2faea7 516
aeb7fa7b 517 heap->shrinker.scan_objects(&heap->shrinker, &sc);
e1d855b0 518 return 0;
fe2faea7
RSZ
519}
520
ea313b5f 521static int debug_shrink_get(void *data, u64 *val)
fe2faea7 522{
e1d855b0
JS
523 struct ion_heap *heap = data;
524 struct shrink_control sc;
525 int objs;
fe2faea7 526
3b0ae7be 527 sc.gfp_mask = GFP_HIGHUSER;
e1d855b0 528 sc.nr_to_scan = 0;
fe2faea7 529
aeb7fa7b 530 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
e1d855b0
JS
531 *val = objs;
532 return 0;
fe2faea7
RSZ
533}
534
ea313b5f 535DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
e1d855b0 536 debug_shrink_set, "%llu\n");
ea313b5f 537
2f87f50b 538void ion_device_add_heap(struct ion_heap *heap)
c30707be 539{
b08585fb 540 struct dentry *debug_file;
2f87f50b 541 struct ion_device *dev = internal_dev;
b08585fb 542
f82ad60e 543 if (!heap->ops->allocate || !heap->ops->free)
29ae6bc7
RSZ
544 pr_err("%s: can not add heap with invalid ops struct.\n",
545 __func__);
546
95e53ddd
MH
547 spin_lock_init(&heap->free_lock);
548 heap->free_list_size = 0;
549
ea313b5f
RSZ
550 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
551 ion_heap_init_deferred_free(heap);
fe2faea7 552
b9daf0b6
CC
553 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
554 ion_heap_init_shrinker(heap);
555
c30707be 556 heap->dev = dev;
8d7ab9a9 557 down_write(&dev->lock);
2f87f50b 558 heap->id = heap_id++;
7e416174
SR
559 /*
560 * use negative heap->id to reverse the priority -- when traversing
561 * the list later attempt higher id numbers first
562 */
cd69488c
RSZ
563 plist_node_init(&heap->node, -heap->id);
564 plist_add(&heap->node, &dev->heaps);
b08585fb 565
aeb7fa7b 566 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
ea313b5f
RSZ
567 char debug_name[64];
568
569 snprintf(debug_name, 64, "%s_shrink", heap->name);
b08585fb 570 debug_file = debugfs_create_file(
15c6098c 571 debug_name, 0644, dev->debug_root, heap,
b08585fb
MH
572 &debug_shrink_fops);
573 if (!debug_file) {
574 char buf[256], *path;
10f62861 575
15c6098c 576 path = dentry_path(dev->debug_root, buf, 256);
b08585fb 577 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
121ca0c6 578 path, debug_name);
b08585fb 579 }
ea313b5f 580 }
aeb7fa7b 581
02b23803 582 dev->heap_cnt++;
8d7ab9a9 583 up_write(&dev->lock);
c30707be 584}
8c6c463e 585EXPORT_SYMBOL(ion_device_add_heap);
c30707be 586
7e4e3bca 587static int ion_device_create(void)
c30707be
RSZ
588{
589 struct ion_device *idev;
590 int ret;
591
411059f7 592 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
c30707be 593 if (!idev)
2f87f50b 594 return -ENOMEM;
c30707be
RSZ
595
596 idev->dev.minor = MISC_DYNAMIC_MINOR;
597 idev->dev.name = "ion";
598 idev->dev.fops = &ion_fops;
599 idev->dev.parent = NULL;
600 ret = misc_register(&idev->dev);
601 if (ret) {
602 pr_err("ion: failed to register misc device.\n");
283d9304 603 kfree(idev);
2f87f50b 604 return ret;
c30707be
RSZ
605 }
606
607 idev->debug_root = debugfs_create_dir("ion", NULL);
b08585fb
MH
608 if (!idev->debug_root) {
609 pr_err("ion: failed to create debugfs root directory.\n");
610 goto debugfs_done;
611 }
b08585fb
MH
612
613debugfs_done:
c30707be 614 idev->buffers = RB_ROOT;
8d7ab9a9
RSZ
615 mutex_init(&idev->buffer_lock);
616 init_rwsem(&idev->lock);
cd69488c 617 plist_head_init(&idev->heaps);
2f87f50b
LA
618 internal_dev = idev;
619 return 0;
c30707be 620}
2f87f50b 621subsys_initcall(ion_device_create);