]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/android/ion/ion.c
ion: add new ion_user_handle_t type for the user-space token
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / android / ion / ion.c
CommitLineData
c30707be 1/*
38eeeb51 2
c30707be
RSZ
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/file.h>
fe2faea7 20#include <linux/freezer.h>
c30707be
RSZ
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
fe2faea7 23#include <linux/kthread.h>
c30707be 24#include <linux/list.h>
2991b7a0 25#include <linux/memblock.h>
c30707be
RSZ
26#include <linux/miscdevice.h>
27#include <linux/export.h>
28#include <linux/mm.h>
29#include <linux/mm_types.h>
30#include <linux/rbtree.h>
c30707be
RSZ
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
c13bd1c4 34#include <linux/vmalloc.h>
c30707be 35#include <linux/debugfs.h>
b892bf75 36#include <linux/dma-buf.h>
47b40458 37#include <linux/idr.h>
c30707be
RSZ
38
39#include "ion.h"
40#include "ion_priv.h"
c30707be
RSZ
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
8d7ab9a9
RSZ
45 * @buffers: an rb tree of all the existing buffers
46 * @buffer_lock: lock protecting the tree of buffers
47 * @lock: rwsem protecting the tree of heaps and clients
c30707be
RSZ
48 * @heaps: list of all the heaps in the system
49 * @user_clients: list of all the clients created from userspace
50 */
51struct ion_device {
52 struct miscdevice dev;
53 struct rb_root buffers;
8d7ab9a9
RSZ
54 struct mutex buffer_lock;
55 struct rw_semaphore lock;
cd69488c 56 struct plist_head heaps;
c30707be
RSZ
57 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
58 unsigned long arg);
b892bf75 59 struct rb_root clients;
c30707be
RSZ
60 struct dentry *debug_root;
61};
62
63/**
64 * struct ion_client - a process/hw block local address space
c30707be
RSZ
65 * @node: node in the tree of all clients
66 * @dev: backpointer to ion device
67 * @handles: an rb tree of all the handles in this client
47b40458 68 * @idr: an idr space for allocating handle ids
c30707be 69 * @lock: lock protecting the tree of handles
c30707be
RSZ
70 * @name: used for debugging
71 * @task: used for debugging
72 *
73 * A client represents a list of buffers this client may access.
74 * The mutex stored here is used to protect both handles tree
75 * as well as the handles themselves, and should be held while modifying either.
76 */
77struct ion_client {
c30707be
RSZ
78 struct rb_node node;
79 struct ion_device *dev;
80 struct rb_root handles;
47b40458 81 struct idr idr;
c30707be 82 struct mutex lock;
c30707be
RSZ
83 const char *name;
84 struct task_struct *task;
85 pid_t pid;
86 struct dentry *debug_root;
87};
88
89/**
90 * ion_handle - a client local reference to a buffer
91 * @ref: reference count
92 * @client: back pointer to the client the buffer resides in
93 * @buffer: pointer to the buffer
94 * @node: node in the client's handle rbtree
95 * @kmap_cnt: count of times this client has mapped to kernel
47b40458 96 * @id: client-unique id allocated by client->idr
c30707be
RSZ
97 *
98 * Modifications to node, map_cnt or mapping should be protected by the
99 * lock in the client. Other fields are never changed after initialization.
100 */
101struct ion_handle {
102 struct kref ref;
103 struct ion_client *client;
104 struct ion_buffer *buffer;
105 struct rb_node node;
106 unsigned int kmap_cnt;
47b40458 107 int id;
c30707be
RSZ
108};
109
13ba7805
RSZ
110bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
111{
c13bd1c4
RSZ
112 return ((buffer->flags & ION_FLAG_CACHED) &&
113 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
13ba7805
RSZ
114}
115
45b17a80
RSZ
116bool ion_buffer_cached(struct ion_buffer *buffer)
117{
c13bd1c4
RSZ
118 return !!(buffer->flags & ION_FLAG_CACHED);
119}
120
121static inline struct page *ion_buffer_page(struct page *page)
122{
123 return (struct page *)((unsigned long)page & ~(1UL));
124}
125
126static inline bool ion_buffer_page_is_dirty(struct page *page)
127{
128 return !!((unsigned long)page & 1UL);
129}
130
131static inline void ion_buffer_page_dirty(struct page **page)
132{
133 *page = (struct page *)((unsigned long)(*page) | 1UL);
134}
135
136static inline void ion_buffer_page_clean(struct page **page)
137{
138 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
45b17a80
RSZ
139}
140
c30707be
RSZ
141/* this function should only be called while dev->lock is held */
142static void ion_buffer_add(struct ion_device *dev,
143 struct ion_buffer *buffer)
144{
145 struct rb_node **p = &dev->buffers.rb_node;
146 struct rb_node *parent = NULL;
147 struct ion_buffer *entry;
148
149 while (*p) {
150 parent = *p;
151 entry = rb_entry(parent, struct ion_buffer, node);
152
153 if (buffer < entry) {
154 p = &(*p)->rb_left;
155 } else if (buffer > entry) {
156 p = &(*p)->rb_right;
157 } else {
158 pr_err("%s: buffer already found.", __func__);
159 BUG();
160 }
161 }
162
163 rb_link_node(&buffer->node, parent, p);
164 rb_insert_color(&buffer->node, &dev->buffers);
165}
166
167/* this function should only be called while dev->lock is held */
168static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
169 struct ion_device *dev,
170 unsigned long len,
171 unsigned long align,
172 unsigned long flags)
173{
174 struct ion_buffer *buffer;
29ae6bc7 175 struct sg_table *table;
a46b6b2d
RSZ
176 struct scatterlist *sg;
177 int i, ret;
c30707be
RSZ
178
179 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
180 if (!buffer)
181 return ERR_PTR(-ENOMEM);
182
183 buffer->heap = heap;
13ba7805 184 buffer->flags = flags;
c30707be
RSZ
185 kref_init(&buffer->ref);
186
187 ret = heap->ops->allocate(heap, buffer, len, align, flags);
fe2faea7 188
c30707be 189 if (ret) {
fe2faea7
RSZ
190 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
191 goto err2;
192
ea313b5f 193 ion_heap_freelist_drain(heap, 0);
fe2faea7
RSZ
194 ret = heap->ops->allocate(heap, buffer, len, align,
195 flags);
196 if (ret)
197 goto err2;
c30707be 198 }
29ae6bc7 199
056be396
GH
200 buffer->dev = dev;
201 buffer->size = len;
202
56a7c185 203 table = heap->ops->map_dma(heap, buffer);
9e907654
CC
204 if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
205 table = ERR_PTR(-EINVAL);
206 if (IS_ERR(table)) {
29ae6bc7
RSZ
207 heap->ops->free(buffer);
208 kfree(buffer);
209 return ERR_PTR(PTR_ERR(table));
210 }
211 buffer->sg_table = table;
13ba7805 212 if (ion_buffer_fault_user_mappings(buffer)) {
c13bd1c4
RSZ
213 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
214 struct scatterlist *sg;
215 int i, j, k = 0;
216
217 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
218 if (!buffer->pages) {
219 ret = -ENOMEM;
220 goto err1;
221 }
222
223 for_each_sg(table->sgl, sg, table->nents, i) {
224 struct page *page = sg_page(sg);
225
226 for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
227 buffer->pages[k++] = page++;
56a7c185 228 }
29ae6bc7 229
d3c0bced
RSZ
230 if (ret)
231 goto err;
56a7c185
RSZ
232 }
233
234 buffer->dev = dev;
235 buffer->size = len;
236 INIT_LIST_HEAD(&buffer->vmas);
c30707be 237 mutex_init(&buffer->lock);
a46b6b2d
RSZ
238 /* this will set up dma addresses for the sglist -- it is not
239 technically correct as per the dma api -- a specific
240 device isn't really taking ownership here. However, in practice on
241 our systems the only dma_address space is physical addresses.
242 Additionally, we can't afford the overhead of invalidating every
243 allocation via dma_map_sg. The implicit contract here is that
244 memory comming from the heaps is ready for dma, ie if it has a
245 cached mapping that mapping has been invalidated */
246 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
247 sg_dma_address(sg) = sg_phys(sg);
8d7ab9a9 248 mutex_lock(&dev->buffer_lock);
c30707be 249 ion_buffer_add(dev, buffer);
8d7ab9a9 250 mutex_unlock(&dev->buffer_lock);
c30707be 251 return buffer;
d3c0bced
RSZ
252
253err:
254 heap->ops->unmap_dma(heap, buffer);
255 heap->ops->free(buffer);
c13bd1c4
RSZ
256err1:
257 if (buffer->pages)
258 vfree(buffer->pages);
fe2faea7 259err2:
d3c0bced
RSZ
260 kfree(buffer);
261 return ERR_PTR(ret);
c30707be
RSZ
262}
263
ea313b5f 264void ion_buffer_destroy(struct ion_buffer *buffer)
c30707be 265{
54ac0784
KC
266 if (WARN_ON(buffer->kmap_cnt > 0))
267 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
29ae6bc7 268 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
c30707be 269 buffer->heap->ops->free(buffer);
c13bd1c4
RSZ
270 if (buffer->pages)
271 vfree(buffer->pages);
c30707be
RSZ
272 kfree(buffer);
273}
274
ea313b5f 275static void _ion_buffer_destroy(struct kref *kref)
fe2faea7
RSZ
276{
277 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
278 struct ion_heap *heap = buffer->heap;
279 struct ion_device *dev = buffer->dev;
280
281 mutex_lock(&dev->buffer_lock);
282 rb_erase(&buffer->node, &dev->buffers);
283 mutex_unlock(&dev->buffer_lock);
284
ea313b5f
RSZ
285 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
286 ion_heap_freelist_add(heap, buffer);
287 else
288 ion_buffer_destroy(buffer);
fe2faea7
RSZ
289}
290
c30707be
RSZ
291static void ion_buffer_get(struct ion_buffer *buffer)
292{
293 kref_get(&buffer->ref);
294}
295
296static int ion_buffer_put(struct ion_buffer *buffer)
297{
ea313b5f 298 return kref_put(&buffer->ref, _ion_buffer_destroy);
c30707be
RSZ
299}
300
5ad7bc3a
RSZ
301static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
302{
8d7ab9a9 303 mutex_lock(&buffer->lock);
5ad7bc3a 304 buffer->handle_count++;
8d7ab9a9 305 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
306}
307
308static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
309{
310 /*
311 * when a buffer is removed from a handle, if it is not in
312 * any other handles, copy the taskcomm and the pid of the
313 * process it's being removed from into the buffer. At this
314 * point there will be no way to track what processes this buffer is
315 * being used by, it only exists as a dma_buf file descriptor.
316 * The taskcomm and pid can provide a debug hint as to where this fd
317 * is in the system
318 */
8d7ab9a9 319 mutex_lock(&buffer->lock);
5ad7bc3a
RSZ
320 buffer->handle_count--;
321 BUG_ON(buffer->handle_count < 0);
322 if (!buffer->handle_count) {
323 struct task_struct *task;
324
325 task = current->group_leader;
326 get_task_comm(buffer->task_comm, task);
327 buffer->pid = task_pid_nr(task);
328 }
8d7ab9a9 329 mutex_unlock(&buffer->lock);
5ad7bc3a
RSZ
330}
331
c30707be
RSZ
332static struct ion_handle *ion_handle_create(struct ion_client *client,
333 struct ion_buffer *buffer)
334{
335 struct ion_handle *handle;
336
337 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
338 if (!handle)
339 return ERR_PTR(-ENOMEM);
340 kref_init(&handle->ref);
341 RB_CLEAR_NODE(&handle->node);
342 handle->client = client;
343 ion_buffer_get(buffer);
5ad7bc3a 344 ion_buffer_add_to_handle(buffer);
c30707be
RSZ
345 handle->buffer = buffer;
346
347 return handle;
348}
349
b892bf75
RSZ
350static void ion_handle_kmap_put(struct ion_handle *);
351
c30707be
RSZ
352static void ion_handle_destroy(struct kref *kref)
353{
354 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
b892bf75
RSZ
355 struct ion_client *client = handle->client;
356 struct ion_buffer *buffer = handle->buffer;
357
b892bf75 358 mutex_lock(&buffer->lock);
2900cd76 359 while (handle->kmap_cnt)
b892bf75
RSZ
360 ion_handle_kmap_put(handle);
361 mutex_unlock(&buffer->lock);
362
47b40458 363 idr_remove(&client->idr, handle->id);
c30707be 364 if (!RB_EMPTY_NODE(&handle->node))
b892bf75 365 rb_erase(&handle->node, &client->handles);
b892bf75 366
5ad7bc3a 367 ion_buffer_remove_from_handle(buffer);
b892bf75 368 ion_buffer_put(buffer);
5ad7bc3a 369
c30707be
RSZ
370 kfree(handle);
371}
372
373struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
374{
375 return handle->buffer;
376}
377
378static void ion_handle_get(struct ion_handle *handle)
379{
380 kref_get(&handle->ref);
381}
382
383static int ion_handle_put(struct ion_handle *handle)
384{
385 return kref_put(&handle->ref, ion_handle_destroy);
386}
387
388static struct ion_handle *ion_handle_lookup(struct ion_client *client,
389 struct ion_buffer *buffer)
390{
e1cf3682
CC
391 struct rb_node *n = client->handles.rb_node;
392
393 while (n) {
394 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
395 if (buffer < entry->buffer)
396 n = n->rb_left;
397 else if (buffer > entry->buffer)
398 n = n->rb_right;
399 else
400 return entry;
c30707be 401 }
9e907654 402 return ERR_PTR(-EINVAL);
c30707be
RSZ
403}
404
47b40458
CC
405static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
406{
407 return idr_find(&client->idr, id);
408}
409
c30707be
RSZ
410static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
411{
47b40458 412 return (ion_uhandle_get(client, handle->id) == handle);
c30707be
RSZ
413}
414
47b40458 415static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
c30707be 416{
47b40458 417 int rc;
c30707be
RSZ
418 struct rb_node **p = &client->handles.rb_node;
419 struct rb_node *parent = NULL;
420 struct ion_handle *entry;
421
47b40458
CC
422 do {
423 int id;
424 rc = idr_pre_get(&client->idr, GFP_KERNEL);
425 if (!rc)
426 return -ENOMEM;
b4f8d242 427 rc = idr_get_new_above(&client->idr, handle, 1, &id);
47b40458
CC
428 handle->id = id;
429 } while (rc == -EAGAIN);
430
431 if (rc < 0)
432 return rc;
433
c30707be
RSZ
434 while (*p) {
435 parent = *p;
436 entry = rb_entry(parent, struct ion_handle, node);
437
e1cf3682 438 if (handle->buffer < entry->buffer)
c30707be 439 p = &(*p)->rb_left;
e1cf3682 440 else if (handle->buffer > entry->buffer)
c30707be
RSZ
441 p = &(*p)->rb_right;
442 else
443 WARN(1, "%s: buffer already found.", __func__);
444 }
445
446 rb_link_node(&handle->node, parent, p);
447 rb_insert_color(&handle->node, &client->handles);
47b40458
CC
448
449 return 0;
c30707be
RSZ
450}
451
452struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
38eeeb51 453 size_t align, unsigned int heap_id_mask,
56a7c185 454 unsigned int flags)
c30707be 455{
c30707be
RSZ
456 struct ion_handle *handle;
457 struct ion_device *dev = client->dev;
458 struct ion_buffer *buffer = NULL;
cd69488c 459 struct ion_heap *heap;
47b40458 460 int ret;
c30707be 461
38eeeb51
RSZ
462 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
463 len, align, heap_id_mask, flags);
c30707be
RSZ
464 /*
465 * traverse the list of heaps available in this system in priority
466 * order. If the heap type is supported by the client, and matches the
467 * request of the caller allocate from it. Repeat until allocate has
468 * succeeded or all heaps have been tried
469 */
54ac0784
KC
470 if (WARN_ON(!len))
471 return ERR_PTR(-EINVAL);
472
473 len = PAGE_ALIGN(len);
474
8d7ab9a9 475 down_read(&dev->lock);
cd69488c 476 plist_for_each_entry(heap, &dev->heaps, node) {
38eeeb51
RSZ
477 /* if the caller didn't specify this heap id */
478 if (!((1 << heap->id) & heap_id_mask))
c30707be
RSZ
479 continue;
480 buffer = ion_buffer_create(heap, dev, len, align, flags);
9e907654 481 if (!IS_ERR(buffer))
c30707be
RSZ
482 break;
483 }
8d7ab9a9 484 up_read(&dev->lock);
c30707be 485
54ac0784
KC
486 if (buffer == NULL)
487 return ERR_PTR(-ENODEV);
488
489 if (IS_ERR(buffer))
c30707be
RSZ
490 return ERR_PTR(PTR_ERR(buffer));
491
492 handle = ion_handle_create(client, buffer);
493
c30707be
RSZ
494 /*
495 * ion_buffer_create will create a buffer with a ref_cnt of 1,
496 * and ion_handle_create will take a second reference, drop one here
497 */
498 ion_buffer_put(buffer);
499
47b40458
CC
500 if (IS_ERR(handle))
501 return handle;
c30707be 502
47b40458
CC
503 mutex_lock(&client->lock);
504 ret = ion_handle_add(client, handle);
505 if (ret) {
506 ion_handle_put(handle);
507 handle = ERR_PTR(ret);
508 }
509 mutex_unlock(&client->lock);
29ae6bc7 510
c30707be
RSZ
511 return handle;
512}
ee4c8aa9 513EXPORT_SYMBOL(ion_alloc);
c30707be
RSZ
514
515void ion_free(struct ion_client *client, struct ion_handle *handle)
516{
517 bool valid_handle;
518
519 BUG_ON(client != handle->client);
520
521 mutex_lock(&client->lock);
522 valid_handle = ion_handle_validate(client, handle);
c30707be
RSZ
523
524 if (!valid_handle) {
a9bb075d 525 WARN(1, "%s: invalid handle passed to free.\n", __func__);
37bdbf00 526 mutex_unlock(&client->lock);
c30707be
RSZ
527 return;
528 }
529 ion_handle_put(handle);
0e9c03a5 530 mutex_unlock(&client->lock);
c30707be 531}
ee4c8aa9 532EXPORT_SYMBOL(ion_free);
c30707be 533
c30707be
RSZ
534int ion_phys(struct ion_client *client, struct ion_handle *handle,
535 ion_phys_addr_t *addr, size_t *len)
536{
537 struct ion_buffer *buffer;
538 int ret;
539
540 mutex_lock(&client->lock);
541 if (!ion_handle_validate(client, handle)) {
542 mutex_unlock(&client->lock);
543 return -EINVAL;
544 }
545
546 buffer = handle->buffer;
547
548 if (!buffer->heap->ops->phys) {
549 pr_err("%s: ion_phys is not implemented by this heap.\n",
550 __func__);
551 mutex_unlock(&client->lock);
552 return -ENODEV;
553 }
554 mutex_unlock(&client->lock);
555 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
556 return ret;
557}
ee4c8aa9 558EXPORT_SYMBOL(ion_phys);
c30707be 559
0f34faf8
RSZ
560static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
561{
562 void *vaddr;
563
564 if (buffer->kmap_cnt) {
565 buffer->kmap_cnt++;
566 return buffer->vaddr;
567 }
568 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
9e907654
CC
569 if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
570 return ERR_PTR(-EINVAL);
571 if (IS_ERR(vaddr))
0f34faf8
RSZ
572 return vaddr;
573 buffer->vaddr = vaddr;
574 buffer->kmap_cnt++;
575 return vaddr;
576}
577
b892bf75 578static void *ion_handle_kmap_get(struct ion_handle *handle)
c30707be 579{
b892bf75 580 struct ion_buffer *buffer = handle->buffer;
c30707be
RSZ
581 void *vaddr;
582
b892bf75
RSZ
583 if (handle->kmap_cnt) {
584 handle->kmap_cnt++;
585 return buffer->vaddr;
c30707be 586 }
0f34faf8 587 vaddr = ion_buffer_kmap_get(buffer);
9e907654 588 if (IS_ERR(vaddr))
b892bf75 589 return vaddr;
b892bf75 590 handle->kmap_cnt++;
b892bf75
RSZ
591 return vaddr;
592}
c30707be 593
0f34faf8
RSZ
594static void ion_buffer_kmap_put(struct ion_buffer *buffer)
595{
596 buffer->kmap_cnt--;
597 if (!buffer->kmap_cnt) {
598 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
599 buffer->vaddr = NULL;
600 }
601}
602
b892bf75
RSZ
603static void ion_handle_kmap_put(struct ion_handle *handle)
604{
605 struct ion_buffer *buffer = handle->buffer;
606
607 handle->kmap_cnt--;
608 if (!handle->kmap_cnt)
0f34faf8 609 ion_buffer_kmap_put(buffer);
c30707be
RSZ
610}
611
b892bf75 612void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
613{
614 struct ion_buffer *buffer;
b892bf75 615 void *vaddr;
c30707be
RSZ
616
617 mutex_lock(&client->lock);
618 if (!ion_handle_validate(client, handle)) {
b892bf75 619 pr_err("%s: invalid handle passed to map_kernel.\n",
c30707be
RSZ
620 __func__);
621 mutex_unlock(&client->lock);
622 return ERR_PTR(-EINVAL);
623 }
b892bf75 624
c30707be 625 buffer = handle->buffer;
c30707be 626
b892bf75 627 if (!handle->buffer->heap->ops->map_kernel) {
c30707be
RSZ
628 pr_err("%s: map_kernel is not implemented by this heap.\n",
629 __func__);
c30707be
RSZ
630 mutex_unlock(&client->lock);
631 return ERR_PTR(-ENODEV);
632 }
c30707be 633
c30707be 634 mutex_lock(&buffer->lock);
b892bf75 635 vaddr = ion_handle_kmap_get(handle);
c30707be
RSZ
636 mutex_unlock(&buffer->lock);
637 mutex_unlock(&client->lock);
b892bf75 638 return vaddr;
c30707be 639}
ee4c8aa9 640EXPORT_SYMBOL(ion_map_kernel);
c30707be 641
b892bf75 642void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
c30707be
RSZ
643{
644 struct ion_buffer *buffer;
645
646 mutex_lock(&client->lock);
647 buffer = handle->buffer;
648 mutex_lock(&buffer->lock);
b892bf75 649 ion_handle_kmap_put(handle);
c30707be
RSZ
650 mutex_unlock(&buffer->lock);
651 mutex_unlock(&client->lock);
652}
ee4c8aa9 653EXPORT_SYMBOL(ion_unmap_kernel);
c30707be 654
c30707be
RSZ
655static int ion_debug_client_show(struct seq_file *s, void *unused)
656{
657 struct ion_client *client = s->private;
658 struct rb_node *n;
38eeeb51
RSZ
659 size_t sizes[ION_NUM_HEAP_IDS] = {0};
660 const char *names[ION_NUM_HEAP_IDS] = {0};
c30707be
RSZ
661 int i;
662
663 mutex_lock(&client->lock);
664 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
665 struct ion_handle *handle = rb_entry(n, struct ion_handle,
666 node);
38eeeb51 667 unsigned int id = handle->buffer->heap->id;
c30707be 668
38eeeb51
RSZ
669 if (!names[id])
670 names[id] = handle->buffer->heap->name;
671 sizes[id] += handle->buffer->size;
c30707be
RSZ
672 }
673 mutex_unlock(&client->lock);
674
675 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
38eeeb51 676 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
c30707be
RSZ
677 if (!names[i])
678 continue;
b892bf75 679 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
c30707be
RSZ
680 }
681 return 0;
682}
683
684static int ion_debug_client_open(struct inode *inode, struct file *file)
685{
686 return single_open(file, ion_debug_client_show, inode->i_private);
687}
688
689static const struct file_operations debug_client_fops = {
690 .open = ion_debug_client_open,
691 .read = seq_read,
692 .llseek = seq_lseek,
693 .release = single_release,
694};
695
c30707be 696struct ion_client *ion_client_create(struct ion_device *dev,
c30707be
RSZ
697 const char *name)
698{
699 struct ion_client *client;
700 struct task_struct *task;
701 struct rb_node **p;
702 struct rb_node *parent = NULL;
703 struct ion_client *entry;
704 char debug_name[64];
705 pid_t pid;
706
707 get_task_struct(current->group_leader);
708 task_lock(current->group_leader);
709 pid = task_pid_nr(current->group_leader);
710 /* don't bother to store task struct for kernel threads,
711 they can't be killed anyway */
712 if (current->group_leader->flags & PF_KTHREAD) {
713 put_task_struct(current->group_leader);
714 task = NULL;
715 } else {
716 task = current->group_leader;
717 }
718 task_unlock(current->group_leader);
719
c30707be
RSZ
720 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
721 if (!client) {
54ac0784
KC
722 if (task)
723 put_task_struct(current->group_leader);
c30707be
RSZ
724 return ERR_PTR(-ENOMEM);
725 }
726
727 client->dev = dev;
728 client->handles = RB_ROOT;
47b40458 729 idr_init(&client->idr);
c30707be
RSZ
730 mutex_init(&client->lock);
731 client->name = name;
c30707be
RSZ
732 client->task = task;
733 client->pid = pid;
c30707be 734
8d7ab9a9 735 down_write(&dev->lock);
b892bf75
RSZ
736 p = &dev->clients.rb_node;
737 while (*p) {
738 parent = *p;
739 entry = rb_entry(parent, struct ion_client, node);
740
741 if (client < entry)
742 p = &(*p)->rb_left;
743 else if (client > entry)
744 p = &(*p)->rb_right;
c30707be 745 }
b892bf75
RSZ
746 rb_link_node(&client->node, parent, p);
747 rb_insert_color(&client->node, &dev->clients);
c30707be
RSZ
748
749 snprintf(debug_name, 64, "%u", client->pid);
750 client->debug_root = debugfs_create_file(debug_name, 0664,
751 dev->debug_root, client,
752 &debug_client_fops);
8d7ab9a9 753 up_write(&dev->lock);
c30707be
RSZ
754
755 return client;
756}
9122fe86 757EXPORT_SYMBOL(ion_client_create);
c30707be 758
b892bf75 759void ion_client_destroy(struct ion_client *client)
c30707be 760{
c30707be
RSZ
761 struct ion_device *dev = client->dev;
762 struct rb_node *n;
763
764 pr_debug("%s: %d\n", __func__, __LINE__);
765 while ((n = rb_first(&client->handles))) {
766 struct ion_handle *handle = rb_entry(n, struct ion_handle,
767 node);
768 ion_handle_destroy(&handle->ref);
769 }
47b40458
CC
770
771 idr_remove_all(&client->idr);
772 idr_destroy(&client->idr);
773
8d7ab9a9 774 down_write(&dev->lock);
b892bf75 775 if (client->task)
c30707be 776 put_task_struct(client->task);
b892bf75 777 rb_erase(&client->node, &dev->clients);
c30707be 778 debugfs_remove_recursive(client->debug_root);
8d7ab9a9 779 up_write(&dev->lock);
c30707be
RSZ
780
781 kfree(client);
782}
ee4c8aa9 783EXPORT_SYMBOL(ion_client_destroy);
c30707be 784
ce1f147a
RSZ
785struct sg_table *ion_sg_table(struct ion_client *client,
786 struct ion_handle *handle)
c30707be 787{
29ae6bc7 788 struct ion_buffer *buffer;
b892bf75 789 struct sg_table *table;
c30707be 790
29ae6bc7
RSZ
791 mutex_lock(&client->lock);
792 if (!ion_handle_validate(client, handle)) {
793 pr_err("%s: invalid handle passed to map_dma.\n",
b892bf75 794 __func__);
29ae6bc7
RSZ
795 mutex_unlock(&client->lock);
796 return ERR_PTR(-EINVAL);
54ac0784 797 }
29ae6bc7
RSZ
798 buffer = handle->buffer;
799 table = buffer->sg_table;
800 mutex_unlock(&client->lock);
b892bf75 801 return table;
c30707be 802}
ee4c8aa9 803EXPORT_SYMBOL(ion_sg_table);
c30707be 804
56a7c185
RSZ
805static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
806 struct device *dev,
807 enum dma_data_direction direction);
808
29ae6bc7
RSZ
809static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
810 enum dma_data_direction direction)
c30707be 811{
b892bf75
RSZ
812 struct dma_buf *dmabuf = attachment->dmabuf;
813 struct ion_buffer *buffer = dmabuf->priv;
c30707be 814
0b9ec1cf 815 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
29ae6bc7
RSZ
816 return buffer->sg_table;
817}
818
819static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
820 struct sg_table *table,
821 enum dma_data_direction direction)
822{
c30707be
RSZ
823}
824
56a7c185
RSZ
825struct ion_vma_list {
826 struct list_head list;
827 struct vm_area_struct *vma;
828};
829
830static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
831 struct device *dev,
832 enum dma_data_direction dir)
833{
56a7c185 834 struct ion_vma_list *vma_list;
c13bd1c4
RSZ
835 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
836 int i;
56a7c185
RSZ
837
838 pr_debug("%s: syncing for device %s\n", __func__,
839 dev ? dev_name(dev) : "null");
0b9ec1cf 840
13ba7805 841 if (!ion_buffer_fault_user_mappings(buffer))
0b9ec1cf
RSZ
842 return;
843
56a7c185 844 mutex_lock(&buffer->lock);
c13bd1c4
RSZ
845 for (i = 0; i < pages; i++) {
846 struct page *page = buffer->pages[i];
847
848 if (ion_buffer_page_is_dirty(page))
849 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
850 ion_buffer_page_clean(buffer->pages + i);
56a7c185
RSZ
851 }
852 list_for_each_entry(vma_list, &buffer->vmas, list) {
853 struct vm_area_struct *vma = vma_list->vma;
854
855 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
856 NULL);
857 }
858 mutex_unlock(&buffer->lock);
859}
860
861int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
862{
863 struct ion_buffer *buffer = vma->vm_private_data;
c13bd1c4 864 int ret;
56a7c185
RSZ
865
866 mutex_lock(&buffer->lock);
c13bd1c4 867 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
56a7c185 868
c13bd1c4
RSZ
869 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
870 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
871 ion_buffer_page(buffer->pages[vmf->pgoff]));
56a7c185 872 mutex_unlock(&buffer->lock);
c13bd1c4
RSZ
873 if (ret)
874 return VM_FAULT_ERROR;
875
56a7c185
RSZ
876 return VM_FAULT_NOPAGE;
877}
878
879static void ion_vm_open(struct vm_area_struct *vma)
880{
881 struct ion_buffer *buffer = vma->vm_private_data;
882 struct ion_vma_list *vma_list;
883
884 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
885 if (!vma_list)
886 return;
887 vma_list->vma = vma;
888 mutex_lock(&buffer->lock);
889 list_add(&vma_list->list, &buffer->vmas);
890 mutex_unlock(&buffer->lock);
891 pr_debug("%s: adding %p\n", __func__, vma);
892}
893
894static void ion_vm_close(struct vm_area_struct *vma)
895{
896 struct ion_buffer *buffer = vma->vm_private_data;
897 struct ion_vma_list *vma_list, *tmp;
898
899 pr_debug("%s\n", __func__);
900 mutex_lock(&buffer->lock);
901 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
902 if (vma_list->vma != vma)
903 continue;
904 list_del(&vma_list->list);
905 kfree(vma_list);
906 pr_debug("%s: deleting %p\n", __func__, vma);
907 break;
908 }
909 mutex_unlock(&buffer->lock);
910}
911
912struct vm_operations_struct ion_vma_ops = {
913 .open = ion_vm_open,
914 .close = ion_vm_close,
915 .fault = ion_vm_fault,
916};
917
b892bf75 918static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
c30707be 919{
b892bf75 920 struct ion_buffer *buffer = dmabuf->priv;
56a7c185 921 int ret = 0;
c30707be 922
b892bf75 923 if (!buffer->heap->ops->map_user) {
c30707be
RSZ
924 pr_err("%s: this heap does not define a method for mapping "
925 "to userspace\n", __func__);
b892bf75 926 return -EINVAL;
c30707be
RSZ
927 }
928
13ba7805 929 if (ion_buffer_fault_user_mappings(buffer)) {
56a7c185
RSZ
930 vma->vm_private_data = buffer;
931 vma->vm_ops = &ion_vma_ops;
932 ion_vm_open(vma);
856661d5 933 return 0;
56a7c185 934 }
b892bf75 935
856661d5
RSZ
936 if (!(buffer->flags & ION_FLAG_CACHED))
937 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
938
939 mutex_lock(&buffer->lock);
940 /* now map it to userspace */
941 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
942 mutex_unlock(&buffer->lock);
943
b892bf75 944 if (ret)
c30707be
RSZ
945 pr_err("%s: failure mapping buffer to userspace\n",
946 __func__);
c30707be 947
c30707be
RSZ
948 return ret;
949}
950
b892bf75
RSZ
951static void ion_dma_buf_release(struct dma_buf *dmabuf)
952{
953 struct ion_buffer *buffer = dmabuf->priv;
954 ion_buffer_put(buffer);
955}
c30707be 956
b892bf75 957static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
c30707be 958{
0f34faf8 959 struct ion_buffer *buffer = dmabuf->priv;
12edf53d 960 return buffer->vaddr + offset * PAGE_SIZE;
b892bf75 961}
c30707be 962
b892bf75
RSZ
963static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
964 void *ptr)
965{
966 return;
967}
968
0f34faf8
RSZ
969static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
970 size_t len,
971 enum dma_data_direction direction)
b892bf75 972{
0f34faf8
RSZ
973 struct ion_buffer *buffer = dmabuf->priv;
974 void *vaddr;
975
976 if (!buffer->heap->ops->map_kernel) {
977 pr_err("%s: map kernel is not implemented by this heap.\n",
978 __func__);
979 return -ENODEV;
980 }
981
982 mutex_lock(&buffer->lock);
983 vaddr = ion_buffer_kmap_get(buffer);
984 mutex_unlock(&buffer->lock);
985 if (IS_ERR(vaddr))
986 return PTR_ERR(vaddr);
0f34faf8 987 return 0;
b892bf75
RSZ
988}
989
0f34faf8
RSZ
990static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
991 size_t len,
992 enum dma_data_direction direction)
b892bf75 993{
0f34faf8 994 struct ion_buffer *buffer = dmabuf->priv;
c30707be 995
0f34faf8
RSZ
996 mutex_lock(&buffer->lock);
997 ion_buffer_kmap_put(buffer);
998 mutex_unlock(&buffer->lock);
999}
c30707be 1000
b892bf75
RSZ
1001struct dma_buf_ops dma_buf_ops = {
1002 .map_dma_buf = ion_map_dma_buf,
1003 .unmap_dma_buf = ion_unmap_dma_buf,
1004 .mmap = ion_mmap,
1005 .release = ion_dma_buf_release,
0f34faf8
RSZ
1006 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1007 .end_cpu_access = ion_dma_buf_end_cpu_access,
1008 .kmap_atomic = ion_dma_buf_kmap,
1009 .kunmap_atomic = ion_dma_buf_kunmap,
b892bf75
RSZ
1010 .kmap = ion_dma_buf_kmap,
1011 .kunmap = ion_dma_buf_kunmap,
1012};
1013
22ba4322
JM
1014struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1015 struct ion_handle *handle)
b892bf75
RSZ
1016{
1017 struct ion_buffer *buffer;
1018 struct dma_buf *dmabuf;
1019 bool valid_handle;
b892bf75
RSZ
1020
1021 mutex_lock(&client->lock);
1022 valid_handle = ion_handle_validate(client, handle);
1023 mutex_unlock(&client->lock);
1024 if (!valid_handle) {
a9bb075d 1025 WARN(1, "%s: invalid handle passed to share.\n", __func__);
22ba4322 1026 return ERR_PTR(-EINVAL);
b892bf75
RSZ
1027 }
1028
1029 buffer = handle->buffer;
1030 ion_buffer_get(buffer);
1031 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1032 if (IS_ERR(dmabuf)) {
1033 ion_buffer_put(buffer);
22ba4322 1034 return dmabuf;
b892bf75 1035 }
22ba4322
JM
1036
1037 return dmabuf;
1038}
1039EXPORT_SYMBOL(ion_share_dma_buf);
1040
1041int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1042{
1043 struct dma_buf *dmabuf;
1044 int fd;
1045
1046 dmabuf = ion_share_dma_buf(client, handle);
1047 if (IS_ERR(dmabuf))
1048 return PTR_ERR(dmabuf);
1049
b892bf75 1050 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
55808b8d 1051 if (fd < 0)
b892bf75 1052 dma_buf_put(dmabuf);
55808b8d 1053
c30707be 1054 return fd;
b892bf75 1055}
22ba4322 1056EXPORT_SYMBOL(ion_share_dma_buf_fd);
c30707be 1057
b892bf75
RSZ
1058struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1059{
1060 struct dma_buf *dmabuf;
1061 struct ion_buffer *buffer;
1062 struct ion_handle *handle;
47b40458 1063 int ret;
b892bf75
RSZ
1064
1065 dmabuf = dma_buf_get(fd);
9e907654 1066 if (IS_ERR(dmabuf))
b892bf75
RSZ
1067 return ERR_PTR(PTR_ERR(dmabuf));
1068 /* if this memory came from ion */
1069
1070 if (dmabuf->ops != &dma_buf_ops) {
1071 pr_err("%s: can not import dmabuf from another exporter\n",
1072 __func__);
1073 dma_buf_put(dmabuf);
1074 return ERR_PTR(-EINVAL);
1075 }
1076 buffer = dmabuf->priv;
1077
1078 mutex_lock(&client->lock);
1079 /* if a handle exists for this buffer just take a reference to it */
1080 handle = ion_handle_lookup(client, buffer);
9e907654 1081 if (!IS_ERR(handle)) {
b892bf75
RSZ
1082 ion_handle_get(handle);
1083 goto end;
1084 }
1085 handle = ion_handle_create(client, buffer);
9e907654 1086 if (IS_ERR(handle))
b892bf75 1087 goto end;
47b40458
CC
1088 ret = ion_handle_add(client, handle);
1089 if (ret) {
1090 ion_handle_put(handle);
1091 handle = ERR_PTR(ret);
1092 }
b892bf75
RSZ
1093end:
1094 mutex_unlock(&client->lock);
1095 dma_buf_put(dmabuf);
1096 return handle;
c30707be 1097}
ee4c8aa9 1098EXPORT_SYMBOL(ion_import_dma_buf);
c30707be 1099
0b9ec1cf
RSZ
1100static int ion_sync_for_device(struct ion_client *client, int fd)
1101{
1102 struct dma_buf *dmabuf;
1103 struct ion_buffer *buffer;
1104
1105 dmabuf = dma_buf_get(fd);
9e907654 1106 if (IS_ERR(dmabuf))
0b9ec1cf
RSZ
1107 return PTR_ERR(dmabuf);
1108
1109 /* if this memory came from ion */
1110 if (dmabuf->ops != &dma_buf_ops) {
1111 pr_err("%s: can not sync dmabuf from another exporter\n",
1112 __func__);
1113 dma_buf_put(dmabuf);
1114 return -EINVAL;
1115 }
1116 buffer = dmabuf->priv;
856661d5
RSZ
1117
1118 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1119 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
0b9ec1cf
RSZ
1120 dma_buf_put(dmabuf);
1121 return 0;
1122}
1123
c30707be
RSZ
1124static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1125{
1126 struct ion_client *client = filp->private_data;
1127
1128 switch (cmd) {
1129 case ION_IOC_ALLOC:
1130 {
1131 struct ion_allocation_data data;
47b40458 1132 struct ion_handle *handle;
c30707be
RSZ
1133
1134 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1135 return -EFAULT;
47b40458 1136 handle = ion_alloc(client, data.len, data.align,
38eeeb51 1137 data.heap_id_mask, data.flags);
54ac0784 1138
47b40458
CC
1139 if (IS_ERR(handle))
1140 return PTR_ERR(handle);
1141
22b7f24d 1142 data.handle = (ion_user_handle_t)handle->id;
54ac0784
KC
1143
1144 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
47b40458 1145 ion_free(client, handle);
c30707be 1146 return -EFAULT;
54ac0784 1147 }
c30707be
RSZ
1148 break;
1149 }
1150 case ION_IOC_FREE:
1151 {
1152 struct ion_handle_data data;
47b40458 1153 struct ion_handle *handle;
c30707be
RSZ
1154
1155 if (copy_from_user(&data, (void __user *)arg,
1156 sizeof(struct ion_handle_data)))
1157 return -EFAULT;
1158 mutex_lock(&client->lock);
47b40458 1159 handle = ion_uhandle_get(client, (int)data.handle);
c30707be 1160 mutex_unlock(&client->lock);
47b40458 1161 if (!handle)
c30707be 1162 return -EINVAL;
47b40458 1163 ion_free(client, handle);
c30707be
RSZ
1164 break;
1165 }
c30707be 1166 case ION_IOC_SHARE:
df0f6c76 1167 case ION_IOC_MAP:
c30707be
RSZ
1168 {
1169 struct ion_fd_data data;
47b40458 1170 struct ion_handle *handle;
c30707be
RSZ
1171
1172 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1173 return -EFAULT;
47b40458
CC
1174 handle = ion_uhandle_get(client, (int)data.handle);
1175 data.fd = ion_share_dma_buf_fd(client, handle);
c30707be
RSZ
1176 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1177 return -EFAULT;
a1c6b996
OH
1178 if (data.fd < 0)
1179 return data.fd;
c30707be
RSZ
1180 break;
1181 }
1182 case ION_IOC_IMPORT:
1183 {
1184 struct ion_fd_data data;
47b40458 1185 struct ion_handle *handle;
a1c6b996 1186 int ret = 0;
c30707be
RSZ
1187 if (copy_from_user(&data, (void __user *)arg,
1188 sizeof(struct ion_fd_data)))
1189 return -EFAULT;
47b40458
CC
1190 handle = ion_import_dma_buf(client, data.fd);
1191 if (IS_ERR(handle))
1192 ret = PTR_ERR(handle);
1193 else
22b7f24d 1194 data.handle = (ion_user_handle_t)handle->id;
47b40458 1195
c30707be
RSZ
1196 if (copy_to_user((void __user *)arg, &data,
1197 sizeof(struct ion_fd_data)))
1198 return -EFAULT;
a1c6b996
OH
1199 if (ret < 0)
1200 return ret;
c30707be
RSZ
1201 break;
1202 }
0b9ec1cf
RSZ
1203 case ION_IOC_SYNC:
1204 {
1205 struct ion_fd_data data;
1206 if (copy_from_user(&data, (void __user *)arg,
1207 sizeof(struct ion_fd_data)))
1208 return -EFAULT;
1209 ion_sync_for_device(client, data.fd);
1210 break;
1211 }
c30707be
RSZ
1212 case ION_IOC_CUSTOM:
1213 {
1214 struct ion_device *dev = client->dev;
1215 struct ion_custom_data data;
1216
1217 if (!dev->custom_ioctl)
1218 return -ENOTTY;
1219 if (copy_from_user(&data, (void __user *)arg,
1220 sizeof(struct ion_custom_data)))
1221 return -EFAULT;
1222 return dev->custom_ioctl(client, data.cmd, data.arg);
1223 }
1224 default:
1225 return -ENOTTY;
1226 }
1227 return 0;
1228}
1229
1230static int ion_release(struct inode *inode, struct file *file)
1231{
1232 struct ion_client *client = file->private_data;
1233
1234 pr_debug("%s: %d\n", __func__, __LINE__);
b892bf75 1235 ion_client_destroy(client);
c30707be
RSZ
1236 return 0;
1237}
1238
1239static int ion_open(struct inode *inode, struct file *file)
1240{
1241 struct miscdevice *miscdev = file->private_data;
1242 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1243 struct ion_client *client;
1244
1245 pr_debug("%s: %d\n", __func__, __LINE__);
2bb9f503 1246 client = ion_client_create(dev, "user");
9e907654 1247 if (IS_ERR(client))
c30707be
RSZ
1248 return PTR_ERR(client);
1249 file->private_data = client;
1250
1251 return 0;
1252}
1253
1254static const struct file_operations ion_fops = {
1255 .owner = THIS_MODULE,
1256 .open = ion_open,
1257 .release = ion_release,
1258 .unlocked_ioctl = ion_ioctl,
1259};
1260
1261static size_t ion_debug_heap_total(struct ion_client *client,
2bb9f503 1262 unsigned int id)
c30707be
RSZ
1263{
1264 size_t size = 0;
1265 struct rb_node *n;
1266
1267 mutex_lock(&client->lock);
1268 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1269 struct ion_handle *handle = rb_entry(n,
1270 struct ion_handle,
1271 node);
2bb9f503 1272 if (handle->buffer->heap->id == id)
c30707be
RSZ
1273 size += handle->buffer->size;
1274 }
1275 mutex_unlock(&client->lock);
1276 return size;
1277}
1278
1279static int ion_debug_heap_show(struct seq_file *s, void *unused)
1280{
1281 struct ion_heap *heap = s->private;
1282 struct ion_device *dev = heap->dev;
1283 struct rb_node *n;
5ad7bc3a
RSZ
1284 size_t total_size = 0;
1285 size_t total_orphaned_size = 0;
c30707be
RSZ
1286
1287 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
5ad7bc3a 1288 seq_printf(s, "----------------------------------------------------\n");
c30707be 1289
b892bf75 1290 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
c30707be
RSZ
1291 struct ion_client *client = rb_entry(n, struct ion_client,
1292 node);
2bb9f503 1293 size_t size = ion_debug_heap_total(client, heap->id);
c30707be
RSZ
1294 if (!size)
1295 continue;
b892bf75
RSZ
1296 if (client->task) {
1297 char task_comm[TASK_COMM_LEN];
1298
1299 get_task_comm(task_comm, client->task);
1300 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1301 client->pid, size);
1302 } else {
1303 seq_printf(s, "%16.s %16u %16u\n", client->name,
1304 client->pid, size);
1305 }
c30707be 1306 }
5ad7bc3a
RSZ
1307 seq_printf(s, "----------------------------------------------------\n");
1308 seq_printf(s, "orphaned allocations (info is from last known client):"
1309 "\n");
8d7ab9a9 1310 mutex_lock(&dev->buffer_lock);
5ad7bc3a
RSZ
1311 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1312 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1313 node);
2bb9f503 1314 if (buffer->heap->id != heap->id)
45b17a80
RSZ
1315 continue;
1316 total_size += buffer->size;
5ad7bc3a 1317 if (!buffer->handle_count) {
45b17a80 1318 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
092c354b
BG
1319 buffer->pid, buffer->size, buffer->kmap_cnt,
1320 atomic_read(&buffer->ref.refcount));
5ad7bc3a
RSZ
1321 total_orphaned_size += buffer->size;
1322 }
1323 }
8d7ab9a9 1324 mutex_unlock(&dev->buffer_lock);
5ad7bc3a
RSZ
1325 seq_printf(s, "----------------------------------------------------\n");
1326 seq_printf(s, "%16.s %16u\n", "total orphaned",
1327 total_orphaned_size);
1328 seq_printf(s, "%16.s %16u\n", "total ", total_size);
2540c73a
CC
1329 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1330 seq_printf(s, "%16.s %16u\n", "deferred free",
1331 heap->free_list_size);
45b17a80
RSZ
1332 seq_printf(s, "----------------------------------------------------\n");
1333
1334 if (heap->debug_show)
1335 heap->debug_show(heap, s, unused);
5ad7bc3a 1336
c30707be
RSZ
1337 return 0;
1338}
1339
1340static int ion_debug_heap_open(struct inode *inode, struct file *file)
1341{
1342 return single_open(file, ion_debug_heap_show, inode->i_private);
1343}
1344
1345static const struct file_operations debug_heap_fops = {
1346 .open = ion_debug_heap_open,
1347 .read = seq_read,
1348 .llseek = seq_lseek,
1349 .release = single_release,
1350};
1351
ea313b5f
RSZ
1352#ifdef DEBUG_HEAP_SHRINKER
1353static int debug_shrink_set(void *data, u64 val)
fe2faea7 1354{
ea313b5f
RSZ
1355 struct ion_heap *heap = data;
1356 struct shrink_control sc;
1357 int objs;
fe2faea7 1358
ea313b5f
RSZ
1359 sc.gfp_mask = -1;
1360 sc.nr_to_scan = 0;
fe2faea7 1361
ea313b5f
RSZ
1362 if (!val)
1363 return 0;
fe2faea7 1364
ea313b5f
RSZ
1365 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1366 sc.nr_to_scan = objs;
fe2faea7 1367
ea313b5f
RSZ
1368 heap->shrinker.shrink(&heap->shrinker, &sc);
1369 return 0;
fe2faea7
RSZ
1370}
1371
ea313b5f 1372static int debug_shrink_get(void *data, u64 *val)
fe2faea7 1373{
ea313b5f
RSZ
1374 struct ion_heap *heap = data;
1375 struct shrink_control sc;
1376 int objs;
fe2faea7 1377
ea313b5f
RSZ
1378 sc.gfp_mask = -1;
1379 sc.nr_to_scan = 0;
fe2faea7 1380
ea313b5f
RSZ
1381 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1382 *val = objs;
1383 return 0;
fe2faea7
RSZ
1384}
1385
ea313b5f
RSZ
1386DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1387 debug_shrink_set, "%llu\n");
1388#endif
1389
c30707be
RSZ
1390void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1391{
29ae6bc7
RSZ
1392 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1393 !heap->ops->unmap_dma)
1394 pr_err("%s: can not add heap with invalid ops struct.\n",
1395 __func__);
1396
ea313b5f
RSZ
1397 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1398 ion_heap_init_deferred_free(heap);
fe2faea7 1399
c30707be 1400 heap->dev = dev;
8d7ab9a9 1401 down_write(&dev->lock);
cd69488c
RSZ
1402 /* use negative heap->id to reverse the priority -- when traversing
1403 the list later attempt higher id numbers first */
1404 plist_node_init(&heap->node, -heap->id);
1405 plist_add(&heap->node, &dev->heaps);
c30707be
RSZ
1406 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1407 &debug_heap_fops);
ea313b5f
RSZ
1408#ifdef DEBUG_HEAP_SHRINKER
1409 if (heap->shrinker.shrink) {
1410 char debug_name[64];
1411
1412 snprintf(debug_name, 64, "%s_shrink", heap->name);
1413 debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1414 &debug_shrink_fops);
1415 }
1416#endif
8d7ab9a9 1417 up_write(&dev->lock);
c30707be
RSZ
1418}
1419
1420struct ion_device *ion_device_create(long (*custom_ioctl)
1421 (struct ion_client *client,
1422 unsigned int cmd,
1423 unsigned long arg))
1424{
1425 struct ion_device *idev;
1426 int ret;
1427
1428 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1429 if (!idev)
1430 return ERR_PTR(-ENOMEM);
1431
1432 idev->dev.minor = MISC_DYNAMIC_MINOR;
1433 idev->dev.name = "ion";
1434 idev->dev.fops = &ion_fops;
1435 idev->dev.parent = NULL;
1436 ret = misc_register(&idev->dev);
1437 if (ret) {
1438 pr_err("ion: failed to register misc device.\n");
1439 return ERR_PTR(ret);
1440 }
1441
1442 idev->debug_root = debugfs_create_dir("ion", NULL);
9e907654 1443 if (!idev->debug_root)
c30707be
RSZ
1444 pr_err("ion: failed to create debug files.\n");
1445
1446 idev->custom_ioctl = custom_ioctl;
1447 idev->buffers = RB_ROOT;
8d7ab9a9
RSZ
1448 mutex_init(&idev->buffer_lock);
1449 init_rwsem(&idev->lock);
cd69488c 1450 plist_head_init(&idev->heaps);
b892bf75 1451 idev->clients = RB_ROOT;
c30707be
RSZ
1452 return idev;
1453}
1454
1455void ion_device_destroy(struct ion_device *dev)
1456{
1457 misc_deregister(&dev->dev);
1458 /* XXX need to free the heaps and clients ? */
1459 kfree(dev);
1460}
2991b7a0
RSZ
1461
1462void __init ion_reserve(struct ion_platform_data *data)
1463{
fa9bba55 1464 int i;
2991b7a0
RSZ
1465
1466 for (i = 0; i < data->nr; i++) {
1467 if (data->heaps[i].size == 0)
1468 continue;
fa9bba55
RSZ
1469
1470 if (data->heaps[i].base == 0) {
1471 phys_addr_t paddr;
1472 paddr = memblock_alloc_base(data->heaps[i].size,
1473 data->heaps[i].align,
1474 MEMBLOCK_ALLOC_ANYWHERE);
1475 if (!paddr) {
1476 pr_err("%s: error allocating memblock for "
1477 "heap %d\n",
1478 __func__, i);
1479 continue;
1480 }
1481 data->heaps[i].base = paddr;
1482 } else {
1483 int ret = memblock_reserve(data->heaps[i].base,
1484 data->heaps[i].size);
1485 if (ret)
1486 pr_err("memblock reserve of %x@%lx failed\n",
1487 data->heaps[i].size,
1488 data->heaps[i].base);
1489 }
1490 pr_info("%s: %s reserved base %lx size %d\n", __func__,
1491 data->heaps[i].name,
1492 data->heaps[i].base,
1493 data->heaps[i].size);
2991b7a0
RSZ
1494 }
1495}