]>
Commit | Line | Data |
---|---|---|
c30707be | 1 | /* |
7e416174 | 2 | * |
c30707be RSZ |
3 | * drivers/staging/android/ion/ion.c |
4 | * | |
5 | * Copyright (C) 2011 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include <linux/device.h> | |
ab0c069a | 19 | #include <linux/err.h> |
c30707be | 20 | #include <linux/file.h> |
fe2faea7 | 21 | #include <linux/freezer.h> |
c30707be RSZ |
22 | #include <linux/fs.h> |
23 | #include <linux/anon_inodes.h> | |
fe2faea7 | 24 | #include <linux/kthread.h> |
c30707be | 25 | #include <linux/list.h> |
2991b7a0 | 26 | #include <linux/memblock.h> |
c30707be RSZ |
27 | #include <linux/miscdevice.h> |
28 | #include <linux/export.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/mm_types.h> | |
31 | #include <linux/rbtree.h> | |
c30707be RSZ |
32 | #include <linux/slab.h> |
33 | #include <linux/seq_file.h> | |
34 | #include <linux/uaccess.h> | |
c13bd1c4 | 35 | #include <linux/vmalloc.h> |
c30707be | 36 | #include <linux/debugfs.h> |
b892bf75 | 37 | #include <linux/dma-buf.h> |
47b40458 | 38 | #include <linux/idr.h> |
f719ff9b | 39 | #include <linux/sched/task.h> |
c30707be RSZ |
40 | |
41 | #include "ion.h" | |
42 | #include "ion_priv.h" | |
827c849e | 43 | #include "compat_ion.h" |
c30707be | 44 | |
13ba7805 RSZ |
45 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) |
46 | { | |
e1d855b0 JS |
47 | return (buffer->flags & ION_FLAG_CACHED) && |
48 | !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); | |
13ba7805 RSZ |
49 | } |
50 | ||
45b17a80 RSZ |
51 | bool ion_buffer_cached(struct ion_buffer *buffer) |
52 | { | |
c13bd1c4 RSZ |
53 | return !!(buffer->flags & ION_FLAG_CACHED); |
54 | } | |
55 | ||
56 | static inline struct page *ion_buffer_page(struct page *page) | |
57 | { | |
58 | return (struct page *)((unsigned long)page & ~(1UL)); | |
59 | } | |
60 | ||
61 | static inline bool ion_buffer_page_is_dirty(struct page *page) | |
62 | { | |
63 | return !!((unsigned long)page & 1UL); | |
64 | } | |
65 | ||
66 | static inline void ion_buffer_page_dirty(struct page **page) | |
67 | { | |
68 | *page = (struct page *)((unsigned long)(*page) | 1UL); | |
69 | } | |
70 | ||
71 | static inline void ion_buffer_page_clean(struct page **page) | |
72 | { | |
73 | *page = (struct page *)((unsigned long)(*page) & ~(1UL)); | |
45b17a80 RSZ |
74 | } |
75 | ||
c30707be RSZ |
76 | /* this function should only be called while dev->lock is held */ |
77 | static void ion_buffer_add(struct ion_device *dev, | |
78 | struct ion_buffer *buffer) | |
79 | { | |
80 | struct rb_node **p = &dev->buffers.rb_node; | |
81 | struct rb_node *parent = NULL; | |
82 | struct ion_buffer *entry; | |
83 | ||
84 | while (*p) { | |
85 | parent = *p; | |
86 | entry = rb_entry(parent, struct ion_buffer, node); | |
87 | ||
88 | if (buffer < entry) { | |
89 | p = &(*p)->rb_left; | |
90 | } else if (buffer > entry) { | |
91 | p = &(*p)->rb_right; | |
92 | } else { | |
93 | pr_err("%s: buffer already found.", __func__); | |
94 | BUG(); | |
95 | } | |
96 | } | |
97 | ||
98 | rb_link_node(&buffer->node, parent, p); | |
99 | rb_insert_color(&buffer->node, &dev->buffers); | |
100 | } | |
101 | ||
102 | /* this function should only be called while dev->lock is held */ | |
103 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |
121ca0c6 JA |
104 | struct ion_device *dev, |
105 | unsigned long len, | |
106 | unsigned long align, | |
107 | unsigned long flags) | |
c30707be RSZ |
108 | { |
109 | struct ion_buffer *buffer; | |
29ae6bc7 | 110 | struct sg_table *table; |
a46b6b2d RSZ |
111 | struct scatterlist *sg; |
112 | int i, ret; | |
c30707be | 113 | |
411059f7 | 114 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
c30707be RSZ |
115 | if (!buffer) |
116 | return ERR_PTR(-ENOMEM); | |
117 | ||
118 | buffer->heap = heap; | |
13ba7805 | 119 | buffer->flags = flags; |
c30707be RSZ |
120 | kref_init(&buffer->ref); |
121 | ||
122 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | |
fe2faea7 | 123 | |
c30707be | 124 | if (ret) { |
fe2faea7 RSZ |
125 | if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) |
126 | goto err2; | |
127 | ||
ea313b5f | 128 | ion_heap_freelist_drain(heap, 0); |
fe2faea7 RSZ |
129 | ret = heap->ops->allocate(heap, buffer, len, align, |
130 | flags); | |
131 | if (ret) | |
132 | goto err2; | |
c30707be | 133 | } |
29ae6bc7 | 134 | |
f82ad60e LA |
135 | if (buffer->sg_table == NULL) { |
136 | WARN_ONCE(1, "This heap needs to set the sgtable"); | |
a56d092a R |
137 | ret = -EINVAL; |
138 | goto err1; | |
29ae6bc7 | 139 | } |
a56d092a | 140 | |
f82ad60e LA |
141 | table = buffer->sg_table; |
142 | buffer->dev = dev; | |
143 | buffer->size = len; | |
144 | ||
13ba7805 | 145 | if (ion_buffer_fault_user_mappings(buffer)) { |
c13bd1c4 RSZ |
146 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
147 | struct scatterlist *sg; | |
148 | int i, j, k = 0; | |
149 | ||
150 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); | |
151 | if (!buffer->pages) { | |
152 | ret = -ENOMEM; | |
f82ad60e | 153 | goto err1; |
c13bd1c4 RSZ |
154 | } |
155 | ||
156 | for_each_sg(table->sgl, sg, table->nents, i) { | |
157 | struct page *page = sg_page(sg); | |
158 | ||
06e0dcae | 159 | for (j = 0; j < sg->length / PAGE_SIZE; j++) |
c13bd1c4 | 160 | buffer->pages[k++] = page++; |
56a7c185 | 161 | } |
56a7c185 RSZ |
162 | } |
163 | ||
164 | buffer->dev = dev; | |
165 | buffer->size = len; | |
166 | INIT_LIST_HEAD(&buffer->vmas); | |
c30707be | 167 | mutex_init(&buffer->lock); |
7e416174 SR |
168 | /* |
169 | * this will set up dma addresses for the sglist -- it is not | |
170 | * technically correct as per the dma api -- a specific | |
171 | * device isn't really taking ownership here. However, in practice on | |
172 | * our systems the only dma_address space is physical addresses. | |
173 | * Additionally, we can't afford the overhead of invalidating every | |
174 | * allocation via dma_map_sg. The implicit contract here is that | |
175 | * memory coming from the heaps is ready for dma, ie if it has a | |
176 | * cached mapping that mapping has been invalidated | |
177 | */ | |
70bc916b | 178 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { |
a46b6b2d | 179 | sg_dma_address(sg) = sg_phys(sg); |
70bc916b LD |
180 | sg_dma_len(sg) = sg->length; |
181 | } | |
8d7ab9a9 | 182 | mutex_lock(&dev->buffer_lock); |
c30707be | 183 | ion_buffer_add(dev, buffer); |
8d7ab9a9 | 184 | mutex_unlock(&dev->buffer_lock); |
c30707be | 185 | return buffer; |
d3c0bced | 186 | |
c13bd1c4 | 187 | err1: |
a56d092a | 188 | heap->ops->free(buffer); |
fe2faea7 | 189 | err2: |
d3c0bced RSZ |
190 | kfree(buffer); |
191 | return ERR_PTR(ret); | |
c30707be RSZ |
192 | } |
193 | ||
ea313b5f | 194 | void ion_buffer_destroy(struct ion_buffer *buffer) |
c30707be | 195 | { |
54ac0784 KC |
196 | if (WARN_ON(buffer->kmap_cnt > 0)) |
197 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
c30707be | 198 | buffer->heap->ops->free(buffer); |
698f140d | 199 | vfree(buffer->pages); |
c30707be RSZ |
200 | kfree(buffer); |
201 | } | |
202 | ||
ea313b5f | 203 | static void _ion_buffer_destroy(struct kref *kref) |
fe2faea7 RSZ |
204 | { |
205 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | |
206 | struct ion_heap *heap = buffer->heap; | |
207 | struct ion_device *dev = buffer->dev; | |
208 | ||
209 | mutex_lock(&dev->buffer_lock); | |
210 | rb_erase(&buffer->node, &dev->buffers); | |
211 | mutex_unlock(&dev->buffer_lock); | |
212 | ||
ea313b5f RSZ |
213 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
214 | ion_heap_freelist_add(heap, buffer); | |
215 | else | |
216 | ion_buffer_destroy(buffer); | |
fe2faea7 RSZ |
217 | } |
218 | ||
c30707be RSZ |
219 | static void ion_buffer_get(struct ion_buffer *buffer) |
220 | { | |
221 | kref_get(&buffer->ref); | |
222 | } | |
223 | ||
224 | static int ion_buffer_put(struct ion_buffer *buffer) | |
225 | { | |
ea313b5f | 226 | return kref_put(&buffer->ref, _ion_buffer_destroy); |
c30707be RSZ |
227 | } |
228 | ||
5ad7bc3a RSZ |
229 | static void ion_buffer_add_to_handle(struct ion_buffer *buffer) |
230 | { | |
8d7ab9a9 | 231 | mutex_lock(&buffer->lock); |
5ad7bc3a | 232 | buffer->handle_count++; |
8d7ab9a9 | 233 | mutex_unlock(&buffer->lock); |
5ad7bc3a RSZ |
234 | } |
235 | ||
236 | static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) | |
237 | { | |
238 | /* | |
239 | * when a buffer is removed from a handle, if it is not in | |
240 | * any other handles, copy the taskcomm and the pid of the | |
241 | * process it's being removed from into the buffer. At this | |
242 | * point there will be no way to track what processes this buffer is | |
243 | * being used by, it only exists as a dma_buf file descriptor. | |
244 | * The taskcomm and pid can provide a debug hint as to where this fd | |
245 | * is in the system | |
246 | */ | |
8d7ab9a9 | 247 | mutex_lock(&buffer->lock); |
5ad7bc3a RSZ |
248 | buffer->handle_count--; |
249 | BUG_ON(buffer->handle_count < 0); | |
250 | if (!buffer->handle_count) { | |
251 | struct task_struct *task; | |
252 | ||
253 | task = current->group_leader; | |
254 | get_task_comm(buffer->task_comm, task); | |
255 | buffer->pid = task_pid_nr(task); | |
256 | } | |
8d7ab9a9 | 257 | mutex_unlock(&buffer->lock); |
5ad7bc3a RSZ |
258 | } |
259 | ||
c30707be | 260 | static struct ion_handle *ion_handle_create(struct ion_client *client, |
121ca0c6 | 261 | struct ion_buffer *buffer) |
c30707be RSZ |
262 | { |
263 | struct ion_handle *handle; | |
264 | ||
411059f7 | 265 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
c30707be RSZ |
266 | if (!handle) |
267 | return ERR_PTR(-ENOMEM); | |
268 | kref_init(&handle->ref); | |
269 | RB_CLEAR_NODE(&handle->node); | |
270 | handle->client = client; | |
271 | ion_buffer_get(buffer); | |
5ad7bc3a | 272 | ion_buffer_add_to_handle(buffer); |
c30707be RSZ |
273 | handle->buffer = buffer; |
274 | ||
275 | return handle; | |
276 | } | |
277 | ||
b892bf75 RSZ |
278 | static void ion_handle_kmap_put(struct ion_handle *); |
279 | ||
c30707be RSZ |
280 | static void ion_handle_destroy(struct kref *kref) |
281 | { | |
282 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | |
b892bf75 RSZ |
283 | struct ion_client *client = handle->client; |
284 | struct ion_buffer *buffer = handle->buffer; | |
285 | ||
b892bf75 | 286 | mutex_lock(&buffer->lock); |
2900cd76 | 287 | while (handle->kmap_cnt) |
b892bf75 RSZ |
288 | ion_handle_kmap_put(handle); |
289 | mutex_unlock(&buffer->lock); | |
290 | ||
47b40458 | 291 | idr_remove(&client->idr, handle->id); |
c30707be | 292 | if (!RB_EMPTY_NODE(&handle->node)) |
b892bf75 | 293 | rb_erase(&handle->node, &client->handles); |
b892bf75 | 294 | |
5ad7bc3a | 295 | ion_buffer_remove_from_handle(buffer); |
b892bf75 | 296 | ion_buffer_put(buffer); |
5ad7bc3a | 297 | |
c30707be RSZ |
298 | kfree(handle); |
299 | } | |
300 | ||
c30707be RSZ |
301 | static void ion_handle_get(struct ion_handle *handle) |
302 | { | |
303 | kref_get(&handle->ref); | |
304 | } | |
305 | ||
b1fa6d8a | 306 | int ion_handle_put_nolock(struct ion_handle *handle) |
9590232b | 307 | { |
45052461 | 308 | return kref_put(&handle->ref, ion_handle_destroy); |
9590232b EL |
309 | } |
310 | ||
b1fa6d8a | 311 | int ion_handle_put(struct ion_handle *handle) |
c30707be | 312 | { |
83271f62 CC |
313 | struct ion_client *client = handle->client; |
314 | int ret; | |
315 | ||
316 | mutex_lock(&client->lock); | |
9590232b | 317 | ret = ion_handle_put_nolock(handle); |
83271f62 CC |
318 | mutex_unlock(&client->lock); |
319 | ||
320 | return ret; | |
c30707be RSZ |
321 | } |
322 | ||
323 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | |
324 | struct ion_buffer *buffer) | |
325 | { | |
e1cf3682 CC |
326 | struct rb_node *n = client->handles.rb_node; |
327 | ||
328 | while (n) { | |
329 | struct ion_handle *entry = rb_entry(n, struct ion_handle, node); | |
10f62861 | 330 | |
e1cf3682 CC |
331 | if (buffer < entry->buffer) |
332 | n = n->rb_left; | |
333 | else if (buffer > entry->buffer) | |
334 | n = n->rb_right; | |
335 | else | |
336 | return entry; | |
c30707be | 337 | } |
9e907654 | 338 | return ERR_PTR(-EINVAL); |
c30707be RSZ |
339 | } |
340 | ||
b1fa6d8a LA |
341 | struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, |
342 | int id) | |
47b40458 | 343 | { |
83271f62 CC |
344 | struct ion_handle *handle; |
345 | ||
83271f62 CC |
346 | handle = idr_find(&client->idr, id); |
347 | if (handle) | |
348 | ion_handle_get(handle); | |
83271f62 CC |
349 | |
350 | return handle ? handle : ERR_PTR(-EINVAL); | |
47b40458 CC |
351 | } |
352 | ||
b1fa6d8a | 353 | struct ion_handle *ion_handle_get_by_id(struct ion_client *client, |
0045c8dd | 354 | int id) |
9590232b EL |
355 | { |
356 | struct ion_handle *handle; | |
357 | ||
358 | mutex_lock(&client->lock); | |
359 | handle = ion_handle_get_by_id_nolock(client, id); | |
360 | mutex_unlock(&client->lock); | |
361 | ||
362 | return handle; | |
363 | } | |
364 | ||
e1d855b0 JS |
365 | static bool ion_handle_validate(struct ion_client *client, |
366 | struct ion_handle *handle) | |
c30707be | 367 | { |
83271f62 | 368 | WARN_ON(!mutex_is_locked(&client->lock)); |
51108985 | 369 | return idr_find(&client->idr, handle->id) == handle; |
c30707be RSZ |
370 | } |
371 | ||
47b40458 | 372 | static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) |
c30707be | 373 | { |
b26661d1 | 374 | int id; |
c30707be RSZ |
375 | struct rb_node **p = &client->handles.rb_node; |
376 | struct rb_node *parent = NULL; | |
377 | struct ion_handle *entry; | |
378 | ||
b26661d1 CC |
379 | id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); |
380 | if (id < 0) | |
381 | return id; | |
47b40458 | 382 | |
b26661d1 | 383 | handle->id = id; |
47b40458 | 384 | |
c30707be RSZ |
385 | while (*p) { |
386 | parent = *p; | |
387 | entry = rb_entry(parent, struct ion_handle, node); | |
388 | ||
e1cf3682 | 389 | if (handle->buffer < entry->buffer) |
c30707be | 390 | p = &(*p)->rb_left; |
e1cf3682 | 391 | else if (handle->buffer > entry->buffer) |
c30707be RSZ |
392 | p = &(*p)->rb_right; |
393 | else | |
394 | WARN(1, "%s: buffer already found.", __func__); | |
395 | } | |
396 | ||
397 | rb_link_node(&handle->node, parent, p); | |
398 | rb_insert_color(&handle->node, &client->handles); | |
47b40458 CC |
399 | |
400 | return 0; | |
c30707be RSZ |
401 | } |
402 | ||
403 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | |
38eeeb51 | 404 | size_t align, unsigned int heap_id_mask, |
56a7c185 | 405 | unsigned int flags) |
c30707be | 406 | { |
c30707be RSZ |
407 | struct ion_handle *handle; |
408 | struct ion_device *dev = client->dev; | |
409 | struct ion_buffer *buffer = NULL; | |
cd69488c | 410 | struct ion_heap *heap; |
47b40458 | 411 | int ret; |
c30707be | 412 | |
e61fc915 | 413 | pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, |
38eeeb51 | 414 | len, align, heap_id_mask, flags); |
c30707be RSZ |
415 | /* |
416 | * traverse the list of heaps available in this system in priority | |
417 | * order. If the heap type is supported by the client, and matches the | |
418 | * request of the caller allocate from it. Repeat until allocate has | |
419 | * succeeded or all heaps have been tried | |
420 | */ | |
54ac0784 KC |
421 | len = PAGE_ALIGN(len); |
422 | ||
a14baf71 CC |
423 | if (!len) |
424 | return ERR_PTR(-EINVAL); | |
425 | ||
8d7ab9a9 | 426 | down_read(&dev->lock); |
cd69488c | 427 | plist_for_each_entry(heap, &dev->heaps, node) { |
38eeeb51 RSZ |
428 | /* if the caller didn't specify this heap id */ |
429 | if (!((1 << heap->id) & heap_id_mask)) | |
c30707be RSZ |
430 | continue; |
431 | buffer = ion_buffer_create(heap, dev, len, align, flags); | |
9e907654 | 432 | if (!IS_ERR(buffer)) |
c30707be RSZ |
433 | break; |
434 | } | |
8d7ab9a9 | 435 | up_read(&dev->lock); |
c30707be | 436 | |
54ac0784 KC |
437 | if (buffer == NULL) |
438 | return ERR_PTR(-ENODEV); | |
439 | ||
440 | if (IS_ERR(buffer)) | |
464a5028 | 441 | return ERR_CAST(buffer); |
c30707be RSZ |
442 | |
443 | handle = ion_handle_create(client, buffer); | |
444 | ||
c30707be RSZ |
445 | /* |
446 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | |
447 | * and ion_handle_create will take a second reference, drop one here | |
448 | */ | |
449 | ion_buffer_put(buffer); | |
450 | ||
47b40458 CC |
451 | if (IS_ERR(handle)) |
452 | return handle; | |
c30707be | 453 | |
47b40458 CC |
454 | mutex_lock(&client->lock); |
455 | ret = ion_handle_add(client, handle); | |
83271f62 | 456 | mutex_unlock(&client->lock); |
47b40458 CC |
457 | if (ret) { |
458 | ion_handle_put(handle); | |
459 | handle = ERR_PTR(ret); | |
460 | } | |
29ae6bc7 | 461 | |
c30707be RSZ |
462 | return handle; |
463 | } | |
ee4c8aa9 | 464 | EXPORT_SYMBOL(ion_alloc); |
c30707be | 465 | |
b1fa6d8a LA |
466 | void ion_free_nolock(struct ion_client *client, |
467 | struct ion_handle *handle) | |
c30707be | 468 | { |
c2bbedf0 | 469 | if (!ion_handle_validate(client, handle)) { |
a9bb075d | 470 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
c30707be RSZ |
471 | return; |
472 | } | |
9590232b EL |
473 | ion_handle_put_nolock(handle); |
474 | } | |
475 | ||
476 | void ion_free(struct ion_client *client, struct ion_handle *handle) | |
477 | { | |
478 | BUG_ON(client != handle->client); | |
479 | ||
480 | mutex_lock(&client->lock); | |
481 | ion_free_nolock(client, handle); | |
0e9c03a5 | 482 | mutex_unlock(&client->lock); |
c30707be | 483 | } |
ee4c8aa9 | 484 | EXPORT_SYMBOL(ion_free); |
c30707be | 485 | |
0f34faf8 RSZ |
486 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
487 | { | |
488 | void *vaddr; | |
489 | ||
490 | if (buffer->kmap_cnt) { | |
491 | buffer->kmap_cnt++; | |
492 | return buffer->vaddr; | |
493 | } | |
494 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | |
e1d855b0 | 495 | if (WARN_ONCE(vaddr == NULL, |
121ca0c6 | 496 | "heap->ops->map_kernel should return ERR_PTR on error")) |
9e907654 CC |
497 | return ERR_PTR(-EINVAL); |
498 | if (IS_ERR(vaddr)) | |
0f34faf8 RSZ |
499 | return vaddr; |
500 | buffer->vaddr = vaddr; | |
501 | buffer->kmap_cnt++; | |
502 | return vaddr; | |
503 | } | |
504 | ||
b892bf75 | 505 | static void *ion_handle_kmap_get(struct ion_handle *handle) |
c30707be | 506 | { |
b892bf75 | 507 | struct ion_buffer *buffer = handle->buffer; |
c30707be RSZ |
508 | void *vaddr; |
509 | ||
b892bf75 RSZ |
510 | if (handle->kmap_cnt) { |
511 | handle->kmap_cnt++; | |
512 | return buffer->vaddr; | |
c30707be | 513 | } |
0f34faf8 | 514 | vaddr = ion_buffer_kmap_get(buffer); |
9e907654 | 515 | if (IS_ERR(vaddr)) |
b892bf75 | 516 | return vaddr; |
b892bf75 | 517 | handle->kmap_cnt++; |
b892bf75 RSZ |
518 | return vaddr; |
519 | } | |
c30707be | 520 | |
0f34faf8 RSZ |
521 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
522 | { | |
523 | buffer->kmap_cnt--; | |
524 | if (!buffer->kmap_cnt) { | |
525 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
526 | buffer->vaddr = NULL; | |
527 | } | |
528 | } | |
529 | ||
b892bf75 RSZ |
530 | static void ion_handle_kmap_put(struct ion_handle *handle) |
531 | { | |
532 | struct ion_buffer *buffer = handle->buffer; | |
533 | ||
22f6b978 MH |
534 | if (!handle->kmap_cnt) { |
535 | WARN(1, "%s: Double unmap detected! bailing...\n", __func__); | |
536 | return; | |
537 | } | |
b892bf75 RSZ |
538 | handle->kmap_cnt--; |
539 | if (!handle->kmap_cnt) | |
0f34faf8 | 540 | ion_buffer_kmap_put(buffer); |
c30707be RSZ |
541 | } |
542 | ||
b892bf75 | 543 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
544 | { |
545 | struct ion_buffer *buffer; | |
b892bf75 | 546 | void *vaddr; |
c30707be RSZ |
547 | |
548 | mutex_lock(&client->lock); | |
549 | if (!ion_handle_validate(client, handle)) { | |
b892bf75 | 550 | pr_err("%s: invalid handle passed to map_kernel.\n", |
c30707be RSZ |
551 | __func__); |
552 | mutex_unlock(&client->lock); | |
553 | return ERR_PTR(-EINVAL); | |
554 | } | |
b892bf75 | 555 | |
c30707be | 556 | buffer = handle->buffer; |
c30707be | 557 | |
b892bf75 | 558 | if (!handle->buffer->heap->ops->map_kernel) { |
c30707be RSZ |
559 | pr_err("%s: map_kernel is not implemented by this heap.\n", |
560 | __func__); | |
c30707be RSZ |
561 | mutex_unlock(&client->lock); |
562 | return ERR_PTR(-ENODEV); | |
563 | } | |
c30707be | 564 | |
c30707be | 565 | mutex_lock(&buffer->lock); |
b892bf75 | 566 | vaddr = ion_handle_kmap_get(handle); |
c30707be RSZ |
567 | mutex_unlock(&buffer->lock); |
568 | mutex_unlock(&client->lock); | |
b892bf75 | 569 | return vaddr; |
c30707be | 570 | } |
ee4c8aa9 | 571 | EXPORT_SYMBOL(ion_map_kernel); |
c30707be | 572 | |
b892bf75 | 573 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
574 | { |
575 | struct ion_buffer *buffer; | |
576 | ||
577 | mutex_lock(&client->lock); | |
578 | buffer = handle->buffer; | |
579 | mutex_lock(&buffer->lock); | |
b892bf75 | 580 | ion_handle_kmap_put(handle); |
c30707be RSZ |
581 | mutex_unlock(&buffer->lock); |
582 | mutex_unlock(&client->lock); | |
583 | } | |
ee4c8aa9 | 584 | EXPORT_SYMBOL(ion_unmap_kernel); |
c30707be | 585 | |
948c4db4 NZ |
586 | static struct mutex debugfs_mutex; |
587 | static struct rb_root *ion_root_client; | |
588 | static int is_client_alive(struct ion_client *client) | |
589 | { | |
590 | struct rb_node *node; | |
591 | struct ion_client *tmp; | |
592 | struct ion_device *dev; | |
593 | ||
594 | node = ion_root_client->rb_node; | |
595 | dev = container_of(ion_root_client, struct ion_device, clients); | |
596 | ||
597 | down_read(&dev->lock); | |
598 | while (node) { | |
599 | tmp = rb_entry(node, struct ion_client, node); | |
600 | if (client < tmp) { | |
601 | node = node->rb_left; | |
602 | } else if (client > tmp) { | |
603 | node = node->rb_right; | |
604 | } else { | |
605 | up_read(&dev->lock); | |
606 | return 1; | |
607 | } | |
608 | } | |
609 | ||
610 | up_read(&dev->lock); | |
611 | return 0; | |
612 | } | |
613 | ||
c30707be RSZ |
614 | static int ion_debug_client_show(struct seq_file *s, void *unused) |
615 | { | |
616 | struct ion_client *client = s->private; | |
617 | struct rb_node *n; | |
38eeeb51 | 618 | size_t sizes[ION_NUM_HEAP_IDS] = {0}; |
f63958d8 | 619 | const char *names[ION_NUM_HEAP_IDS] = {NULL}; |
c30707be RSZ |
620 | int i; |
621 | ||
948c4db4 NZ |
622 | mutex_lock(&debugfs_mutex); |
623 | if (!is_client_alive(client)) { | |
624 | seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", | |
625 | client); | |
626 | mutex_unlock(&debugfs_mutex); | |
627 | return 0; | |
628 | } | |
629 | ||
c30707be RSZ |
630 | mutex_lock(&client->lock); |
631 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
632 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
633 | node); | |
38eeeb51 | 634 | unsigned int id = handle->buffer->heap->id; |
c30707be | 635 | |
38eeeb51 RSZ |
636 | if (!names[id]) |
637 | names[id] = handle->buffer->heap->name; | |
638 | sizes[id] += handle->buffer->size; | |
c30707be RSZ |
639 | } |
640 | mutex_unlock(&client->lock); | |
948c4db4 | 641 | mutex_unlock(&debugfs_mutex); |
c30707be RSZ |
642 | |
643 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | |
38eeeb51 | 644 | for (i = 0; i < ION_NUM_HEAP_IDS; i++) { |
c30707be RSZ |
645 | if (!names[i]) |
646 | continue; | |
e61fc915 | 647 | seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); |
c30707be RSZ |
648 | } |
649 | return 0; | |
650 | } | |
651 | ||
652 | static int ion_debug_client_open(struct inode *inode, struct file *file) | |
653 | { | |
654 | return single_open(file, ion_debug_client_show, inode->i_private); | |
655 | } | |
656 | ||
657 | static const struct file_operations debug_client_fops = { | |
658 | .open = ion_debug_client_open, | |
659 | .read = seq_read, | |
660 | .llseek = seq_lseek, | |
661 | .release = single_release, | |
662 | }; | |
663 | ||
2803ac7b | 664 | static int ion_get_client_serial(const struct rb_root *root, |
121ca0c6 | 665 | const unsigned char *name) |
2803ac7b MH |
666 | { |
667 | int serial = -1; | |
668 | struct rb_node *node; | |
10f62861 | 669 | |
2803ac7b MH |
670 | for (node = rb_first(root); node; node = rb_next(node)) { |
671 | struct ion_client *client = rb_entry(node, struct ion_client, | |
b2bcdadc | 672 | node); |
10f62861 | 673 | |
2803ac7b MH |
674 | if (strcmp(client->name, name)) |
675 | continue; | |
676 | serial = max(serial, client->display_serial); | |
677 | } | |
678 | return serial + 1; | |
679 | } | |
680 | ||
c30707be | 681 | struct ion_client *ion_client_create(struct ion_device *dev, |
c30707be RSZ |
682 | const char *name) |
683 | { | |
684 | struct ion_client *client; | |
685 | struct task_struct *task; | |
686 | struct rb_node **p; | |
687 | struct rb_node *parent = NULL; | |
688 | struct ion_client *entry; | |
c30707be RSZ |
689 | pid_t pid; |
690 | ||
2803ac7b MH |
691 | if (!name) { |
692 | pr_err("%s: Name cannot be null\n", __func__); | |
693 | return ERR_PTR(-EINVAL); | |
694 | } | |
695 | ||
c30707be RSZ |
696 | get_task_struct(current->group_leader); |
697 | task_lock(current->group_leader); | |
698 | pid = task_pid_nr(current->group_leader); | |
7e416174 SR |
699 | /* |
700 | * don't bother to store task struct for kernel threads, | |
701 | * they can't be killed anyway | |
702 | */ | |
c30707be RSZ |
703 | if (current->group_leader->flags & PF_KTHREAD) { |
704 | put_task_struct(current->group_leader); | |
705 | task = NULL; | |
706 | } else { | |
707 | task = current->group_leader; | |
708 | } | |
709 | task_unlock(current->group_leader); | |
710 | ||
411059f7 | 711 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
ae5cbf4a MH |
712 | if (!client) |
713 | goto err_put_task_struct; | |
c30707be RSZ |
714 | |
715 | client->dev = dev; | |
716 | client->handles = RB_ROOT; | |
47b40458 | 717 | idr_init(&client->idr); |
c30707be | 718 | mutex_init(&client->lock); |
c30707be RSZ |
719 | client->task = task; |
720 | client->pid = pid; | |
ae5cbf4a MH |
721 | client->name = kstrdup(name, GFP_KERNEL); |
722 | if (!client->name) | |
723 | goto err_free_client; | |
c30707be | 724 | |
8d7ab9a9 | 725 | down_write(&dev->lock); |
2803ac7b MH |
726 | client->display_serial = ion_get_client_serial(&dev->clients, name); |
727 | client->display_name = kasprintf( | |
728 | GFP_KERNEL, "%s-%d", name, client->display_serial); | |
729 | if (!client->display_name) { | |
730 | up_write(&dev->lock); | |
731 | goto err_free_client_name; | |
732 | } | |
b892bf75 RSZ |
733 | p = &dev->clients.rb_node; |
734 | while (*p) { | |
735 | parent = *p; | |
736 | entry = rb_entry(parent, struct ion_client, node); | |
737 | ||
738 | if (client < entry) | |
739 | p = &(*p)->rb_left; | |
740 | else if (client > entry) | |
741 | p = &(*p)->rb_right; | |
c30707be | 742 | } |
b892bf75 RSZ |
743 | rb_link_node(&client->node, parent, p); |
744 | rb_insert_color(&client->node, &dev->clients); | |
c30707be | 745 | |
2803ac7b | 746 | client->debug_root = debugfs_create_file(client->display_name, 0664, |
b2bcdadc DS |
747 | dev->clients_debug_root, |
748 | client, &debug_client_fops); | |
b08585fb MH |
749 | if (!client->debug_root) { |
750 | char buf[256], *path; | |
04e14356 | 751 | |
b08585fb MH |
752 | path = dentry_path(dev->clients_debug_root, buf, 256); |
753 | pr_err("Failed to create client debugfs at %s/%s\n", | |
121ca0c6 | 754 | path, client->display_name); |
b08585fb MH |
755 | } |
756 | ||
8d7ab9a9 | 757 | up_write(&dev->lock); |
c30707be RSZ |
758 | |
759 | return client; | |
ae5cbf4a | 760 | |
2803ac7b MH |
761 | err_free_client_name: |
762 | kfree(client->name); | |
ae5cbf4a MH |
763 | err_free_client: |
764 | kfree(client); | |
765 | err_put_task_struct: | |
766 | if (task) | |
767 | put_task_struct(current->group_leader); | |
768 | return ERR_PTR(-ENOMEM); | |
c30707be | 769 | } |
9122fe86 | 770 | EXPORT_SYMBOL(ion_client_create); |
c30707be | 771 | |
b892bf75 | 772 | void ion_client_destroy(struct ion_client *client) |
c30707be | 773 | { |
c30707be RSZ |
774 | struct ion_device *dev = client->dev; |
775 | struct rb_node *n; | |
776 | ||
777 | pr_debug("%s: %d\n", __func__, __LINE__); | |
948c4db4 | 778 | mutex_lock(&debugfs_mutex); |
c30707be RSZ |
779 | while ((n = rb_first(&client->handles))) { |
780 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
781 | node); | |
782 | ion_handle_destroy(&handle->ref); | |
783 | } | |
47b40458 | 784 | |
47b40458 CC |
785 | idr_destroy(&client->idr); |
786 | ||
8d7ab9a9 | 787 | down_write(&dev->lock); |
b892bf75 | 788 | if (client->task) |
c30707be | 789 | put_task_struct(client->task); |
b892bf75 | 790 | rb_erase(&client->node, &dev->clients); |
c30707be | 791 | debugfs_remove_recursive(client->debug_root); |
8d7ab9a9 | 792 | up_write(&dev->lock); |
c30707be | 793 | |
2803ac7b | 794 | kfree(client->display_name); |
ae5cbf4a | 795 | kfree(client->name); |
c30707be | 796 | kfree(client); |
948c4db4 | 797 | mutex_unlock(&debugfs_mutex); |
c30707be | 798 | } |
ee4c8aa9 | 799 | EXPORT_SYMBOL(ion_client_destroy); |
c30707be | 800 | |
56a7c185 RSZ |
801 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
802 | struct device *dev, | |
803 | enum dma_data_direction direction); | |
804 | ||
29ae6bc7 RSZ |
805 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
806 | enum dma_data_direction direction) | |
c30707be | 807 | { |
b892bf75 RSZ |
808 | struct dma_buf *dmabuf = attachment->dmabuf; |
809 | struct ion_buffer *buffer = dmabuf->priv; | |
c30707be | 810 | |
0b9ec1cf | 811 | ion_buffer_sync_for_device(buffer, attachment->dev, direction); |
29ae6bc7 RSZ |
812 | return buffer->sg_table; |
813 | } | |
814 | ||
815 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
816 | struct sg_table *table, | |
817 | enum dma_data_direction direction) | |
818 | { | |
c30707be RSZ |
819 | } |
820 | ||
e946b209 | 821 | void ion_pages_sync_for_device(struct device *dev, struct page *page, |
121ca0c6 | 822 | size_t size, enum dma_data_direction dir) |
e946b209 CC |
823 | { |
824 | struct scatterlist sg; | |
825 | ||
826 | sg_init_table(&sg, 1); | |
827 | sg_set_page(&sg, page, size, 0); | |
828 | /* | |
829 | * This is not correct - sg_dma_address needs a dma_addr_t that is valid | |
8e4ec4fe | 830 | * for the targeted device, but this works on the currently targeted |
e946b209 CC |
831 | * hardware. |
832 | */ | |
833 | sg_dma_address(&sg) = page_to_phys(page); | |
834 | dma_sync_sg_for_device(dev, &sg, 1, dir); | |
835 | } | |
836 | ||
56a7c185 RSZ |
837 | struct ion_vma_list { |
838 | struct list_head list; | |
839 | struct vm_area_struct *vma; | |
840 | }; | |
841 | ||
842 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, | |
843 | struct device *dev, | |
844 | enum dma_data_direction dir) | |
845 | { | |
56a7c185 | 846 | struct ion_vma_list *vma_list; |
c13bd1c4 RSZ |
847 | int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
848 | int i; | |
56a7c185 RSZ |
849 | |
850 | pr_debug("%s: syncing for device %s\n", __func__, | |
851 | dev ? dev_name(dev) : "null"); | |
0b9ec1cf | 852 | |
13ba7805 | 853 | if (!ion_buffer_fault_user_mappings(buffer)) |
0b9ec1cf RSZ |
854 | return; |
855 | ||
56a7c185 | 856 | mutex_lock(&buffer->lock); |
c13bd1c4 RSZ |
857 | for (i = 0; i < pages; i++) { |
858 | struct page *page = buffer->pages[i]; | |
859 | ||
860 | if (ion_buffer_page_is_dirty(page)) | |
e946b209 | 861 | ion_pages_sync_for_device(dev, ion_buffer_page(page), |
121ca0c6 | 862 | PAGE_SIZE, dir); |
e946b209 | 863 | |
c13bd1c4 | 864 | ion_buffer_page_clean(buffer->pages + i); |
56a7c185 RSZ |
865 | } |
866 | list_for_each_entry(vma_list, &buffer->vmas, list) { | |
867 | struct vm_area_struct *vma = vma_list->vma; | |
868 | ||
ecf1385d | 869 | zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); |
56a7c185 RSZ |
870 | } |
871 | mutex_unlock(&buffer->lock); | |
872 | } | |
873 | ||
11bac800 | 874 | static int ion_vm_fault(struct vm_fault *vmf) |
56a7c185 | 875 | { |
11bac800 | 876 | struct ion_buffer *buffer = vmf->vma->vm_private_data; |
462be0c6 | 877 | unsigned long pfn; |
c13bd1c4 | 878 | int ret; |
56a7c185 RSZ |
879 | |
880 | mutex_lock(&buffer->lock); | |
c13bd1c4 | 881 | ion_buffer_page_dirty(buffer->pages + vmf->pgoff); |
c13bd1c4 | 882 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); |
462be0c6 CC |
883 | |
884 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); | |
11bac800 | 885 | ret = vm_insert_pfn(vmf->vma, vmf->address, pfn); |
56a7c185 | 886 | mutex_unlock(&buffer->lock); |
c13bd1c4 RSZ |
887 | if (ret) |
888 | return VM_FAULT_ERROR; | |
889 | ||
56a7c185 RSZ |
890 | return VM_FAULT_NOPAGE; |
891 | } | |
892 | ||
893 | static void ion_vm_open(struct vm_area_struct *vma) | |
894 | { | |
895 | struct ion_buffer *buffer = vma->vm_private_data; | |
896 | struct ion_vma_list *vma_list; | |
897 | ||
411059f7 | 898 | vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); |
56a7c185 RSZ |
899 | if (!vma_list) |
900 | return; | |
901 | vma_list->vma = vma; | |
902 | mutex_lock(&buffer->lock); | |
903 | list_add(&vma_list->list, &buffer->vmas); | |
904 | mutex_unlock(&buffer->lock); | |
905 | pr_debug("%s: adding %p\n", __func__, vma); | |
906 | } | |
907 | ||
908 | static void ion_vm_close(struct vm_area_struct *vma) | |
909 | { | |
910 | struct ion_buffer *buffer = vma->vm_private_data; | |
911 | struct ion_vma_list *vma_list, *tmp; | |
912 | ||
913 | pr_debug("%s\n", __func__); | |
914 | mutex_lock(&buffer->lock); | |
915 | list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { | |
916 | if (vma_list->vma != vma) | |
917 | continue; | |
918 | list_del(&vma_list->list); | |
919 | kfree(vma_list); | |
920 | pr_debug("%s: deleting %p\n", __func__, vma); | |
921 | break; | |
922 | } | |
923 | mutex_unlock(&buffer->lock); | |
924 | } | |
925 | ||
7cbea8dc | 926 | static const struct vm_operations_struct ion_vma_ops = { |
56a7c185 RSZ |
927 | .open = ion_vm_open, |
928 | .close = ion_vm_close, | |
929 | .fault = ion_vm_fault, | |
930 | }; | |
931 | ||
b892bf75 | 932 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
c30707be | 933 | { |
b892bf75 | 934 | struct ion_buffer *buffer = dmabuf->priv; |
56a7c185 | 935 | int ret = 0; |
c30707be | 936 | |
b892bf75 | 937 | if (!buffer->heap->ops->map_user) { |
7287bb52 | 938 | pr_err("%s: this heap does not define a method for mapping to userspace\n", |
121ca0c6 | 939 | __func__); |
b892bf75 | 940 | return -EINVAL; |
c30707be RSZ |
941 | } |
942 | ||
13ba7805 | 943 | if (ion_buffer_fault_user_mappings(buffer)) { |
462be0c6 CC |
944 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | |
945 | VM_DONTDUMP; | |
56a7c185 RSZ |
946 | vma->vm_private_data = buffer; |
947 | vma->vm_ops = &ion_vma_ops; | |
948 | ion_vm_open(vma); | |
856661d5 | 949 | return 0; |
56a7c185 | 950 | } |
b892bf75 | 951 | |
856661d5 RSZ |
952 | if (!(buffer->flags & ION_FLAG_CACHED)) |
953 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
954 | ||
955 | mutex_lock(&buffer->lock); | |
956 | /* now map it to userspace */ | |
957 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | |
958 | mutex_unlock(&buffer->lock); | |
959 | ||
b892bf75 | 960 | if (ret) |
c30707be RSZ |
961 | pr_err("%s: failure mapping buffer to userspace\n", |
962 | __func__); | |
c30707be | 963 | |
c30707be RSZ |
964 | return ret; |
965 | } | |
966 | ||
b892bf75 RSZ |
967 | static void ion_dma_buf_release(struct dma_buf *dmabuf) |
968 | { | |
969 | struct ion_buffer *buffer = dmabuf->priv; | |
10f62861 | 970 | |
b892bf75 RSZ |
971 | ion_buffer_put(buffer); |
972 | } | |
c30707be | 973 | |
b892bf75 | 974 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
c30707be | 975 | { |
0f34faf8 | 976 | struct ion_buffer *buffer = dmabuf->priv; |
10f62861 | 977 | |
12edf53d | 978 | return buffer->vaddr + offset * PAGE_SIZE; |
b892bf75 | 979 | } |
c30707be | 980 | |
b892bf75 RSZ |
981 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
982 | void *ptr) | |
983 | { | |
b892bf75 RSZ |
984 | } |
985 | ||
831e9da7 | 986 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
0f34faf8 | 987 | enum dma_data_direction direction) |
b892bf75 | 988 | { |
0f34faf8 RSZ |
989 | struct ion_buffer *buffer = dmabuf->priv; |
990 | void *vaddr; | |
991 | ||
992 | if (!buffer->heap->ops->map_kernel) { | |
993 | pr_err("%s: map kernel is not implemented by this heap.\n", | |
994 | __func__); | |
995 | return -ENODEV; | |
996 | } | |
997 | ||
998 | mutex_lock(&buffer->lock); | |
999 | vaddr = ion_buffer_kmap_get(buffer); | |
1000 | mutex_unlock(&buffer->lock); | |
ab0c069a | 1001 | return PTR_ERR_OR_ZERO(vaddr); |
b892bf75 RSZ |
1002 | } |
1003 | ||
18b862dc CW |
1004 | static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
1005 | enum dma_data_direction direction) | |
b892bf75 | 1006 | { |
0f34faf8 | 1007 | struct ion_buffer *buffer = dmabuf->priv; |
c30707be | 1008 | |
0f34faf8 RSZ |
1009 | mutex_lock(&buffer->lock); |
1010 | ion_buffer_kmap_put(buffer); | |
1011 | mutex_unlock(&buffer->lock); | |
18b862dc CW |
1012 | |
1013 | return 0; | |
0f34faf8 | 1014 | } |
c30707be | 1015 | |
2328ed66 | 1016 | static const struct dma_buf_ops dma_buf_ops = { |
b892bf75 RSZ |
1017 | .map_dma_buf = ion_map_dma_buf, |
1018 | .unmap_dma_buf = ion_unmap_dma_buf, | |
1019 | .mmap = ion_mmap, | |
1020 | .release = ion_dma_buf_release, | |
0f34faf8 RSZ |
1021 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
1022 | .end_cpu_access = ion_dma_buf_end_cpu_access, | |
1023 | .kmap_atomic = ion_dma_buf_kmap, | |
1024 | .kunmap_atomic = ion_dma_buf_kunmap, | |
b892bf75 RSZ |
1025 | .kmap = ion_dma_buf_kmap, |
1026 | .kunmap = ion_dma_buf_kunmap, | |
1027 | }; | |
1028 | ||
22ba4322 | 1029 | struct dma_buf *ion_share_dma_buf(struct ion_client *client, |
121ca0c6 | 1030 | struct ion_handle *handle) |
b892bf75 | 1031 | { |
5605b188 | 1032 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
b892bf75 RSZ |
1033 | struct ion_buffer *buffer; |
1034 | struct dma_buf *dmabuf; | |
1035 | bool valid_handle; | |
d8fbe341 | 1036 | |
b892bf75 RSZ |
1037 | mutex_lock(&client->lock); |
1038 | valid_handle = ion_handle_validate(client, handle); | |
b892bf75 | 1039 | if (!valid_handle) { |
a9bb075d | 1040 | WARN(1, "%s: invalid handle passed to share.\n", __func__); |
83271f62 | 1041 | mutex_unlock(&client->lock); |
22ba4322 | 1042 | return ERR_PTR(-EINVAL); |
b892bf75 | 1043 | } |
b892bf75 RSZ |
1044 | buffer = handle->buffer; |
1045 | ion_buffer_get(buffer); | |
83271f62 CC |
1046 | mutex_unlock(&client->lock); |
1047 | ||
72449cb4 SS |
1048 | exp_info.ops = &dma_buf_ops; |
1049 | exp_info.size = buffer->size; | |
1050 | exp_info.flags = O_RDWR; | |
1051 | exp_info.priv = buffer; | |
1052 | ||
d8fbe341 | 1053 | dmabuf = dma_buf_export(&exp_info); |
b892bf75 RSZ |
1054 | if (IS_ERR(dmabuf)) { |
1055 | ion_buffer_put(buffer); | |
22ba4322 | 1056 | return dmabuf; |
b892bf75 | 1057 | } |
22ba4322 JM |
1058 | |
1059 | return dmabuf; | |
1060 | } | |
1061 | EXPORT_SYMBOL(ion_share_dma_buf); | |
1062 | ||
1063 | int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) | |
1064 | { | |
1065 | struct dma_buf *dmabuf; | |
1066 | int fd; | |
1067 | ||
1068 | dmabuf = ion_share_dma_buf(client, handle); | |
1069 | if (IS_ERR(dmabuf)) | |
1070 | return PTR_ERR(dmabuf); | |
1071 | ||
b892bf75 | 1072 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); |
55808b8d | 1073 | if (fd < 0) |
b892bf75 | 1074 | dma_buf_put(dmabuf); |
55808b8d | 1075 | |
c30707be | 1076 | return fd; |
b892bf75 | 1077 | } |
22ba4322 | 1078 | EXPORT_SYMBOL(ion_share_dma_buf_fd); |
c30707be | 1079 | |
9f90381b R |
1080 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, |
1081 | struct dma_buf *dmabuf) | |
b892bf75 | 1082 | { |
b892bf75 RSZ |
1083 | struct ion_buffer *buffer; |
1084 | struct ion_handle *handle; | |
47b40458 | 1085 | int ret; |
b892bf75 | 1086 | |
b892bf75 RSZ |
1087 | /* if this memory came from ion */ |
1088 | ||
1089 | if (dmabuf->ops != &dma_buf_ops) { | |
1090 | pr_err("%s: can not import dmabuf from another exporter\n", | |
1091 | __func__); | |
b892bf75 RSZ |
1092 | return ERR_PTR(-EINVAL); |
1093 | } | |
1094 | buffer = dmabuf->priv; | |
1095 | ||
1096 | mutex_lock(&client->lock); | |
1097 | /* if a handle exists for this buffer just take a reference to it */ | |
1098 | handle = ion_handle_lookup(client, buffer); | |
9e907654 | 1099 | if (!IS_ERR(handle)) { |
b892bf75 | 1100 | ion_handle_get(handle); |
83271f62 | 1101 | mutex_unlock(&client->lock); |
b892bf75 RSZ |
1102 | goto end; |
1103 | } | |
83271f62 | 1104 | |
b892bf75 | 1105 | handle = ion_handle_create(client, buffer); |
6fa92e2b SL |
1106 | if (IS_ERR(handle)) { |
1107 | mutex_unlock(&client->lock); | |
b892bf75 | 1108 | goto end; |
6fa92e2b | 1109 | } |
83271f62 | 1110 | |
47b40458 | 1111 | ret = ion_handle_add(client, handle); |
83271f62 | 1112 | mutex_unlock(&client->lock); |
47b40458 CC |
1113 | if (ret) { |
1114 | ion_handle_put(handle); | |
1115 | handle = ERR_PTR(ret); | |
1116 | } | |
83271f62 | 1117 | |
b892bf75 | 1118 | end: |
b892bf75 | 1119 | return handle; |
c30707be | 1120 | } |
ee4c8aa9 | 1121 | EXPORT_SYMBOL(ion_import_dma_buf); |
c30707be | 1122 | |
9f90381b R |
1123 | struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) |
1124 | { | |
1125 | struct dma_buf *dmabuf; | |
1126 | struct ion_handle *handle; | |
1127 | ||
1128 | dmabuf = dma_buf_get(fd); | |
1129 | if (IS_ERR(dmabuf)) | |
1130 | return ERR_CAST(dmabuf); | |
1131 | ||
1132 | handle = ion_import_dma_buf(client, dmabuf); | |
1133 | dma_buf_put(dmabuf); | |
1134 | return handle; | |
1135 | } | |
1136 | EXPORT_SYMBOL(ion_import_dma_buf_fd); | |
1137 | ||
b1fa6d8a | 1138 | int ion_sync_for_device(struct ion_client *client, int fd) |
0b9ec1cf RSZ |
1139 | { |
1140 | struct dma_buf *dmabuf; | |
1141 | struct ion_buffer *buffer; | |
1142 | ||
1143 | dmabuf = dma_buf_get(fd); | |
9e907654 | 1144 | if (IS_ERR(dmabuf)) |
0b9ec1cf RSZ |
1145 | return PTR_ERR(dmabuf); |
1146 | ||
1147 | /* if this memory came from ion */ | |
1148 | if (dmabuf->ops != &dma_buf_ops) { | |
1149 | pr_err("%s: can not sync dmabuf from another exporter\n", | |
1150 | __func__); | |
1151 | dma_buf_put(dmabuf); | |
1152 | return -EINVAL; | |
1153 | } | |
1154 | buffer = dmabuf->priv; | |
856661d5 RSZ |
1155 | |
1156 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, | |
1157 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); | |
0b9ec1cf RSZ |
1158 | dma_buf_put(dmabuf); |
1159 | return 0; | |
1160 | } | |
1161 | ||
02b23803 LA |
1162 | int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) |
1163 | { | |
1164 | struct ion_device *dev = client->dev; | |
1165 | struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); | |
1166 | int ret = -EINVAL, cnt = 0, max_cnt; | |
1167 | struct ion_heap *heap; | |
1168 | struct ion_heap_data hdata; | |
1169 | ||
1170 | memset(&hdata, 0, sizeof(hdata)); | |
1171 | ||
1172 | down_read(&dev->lock); | |
1173 | if (!buffer) { | |
1174 | query->cnt = dev->heap_cnt; | |
1175 | ret = 0; | |
1176 | goto out; | |
1177 | } | |
1178 | ||
1179 | if (query->cnt <= 0) | |
1180 | goto out; | |
1181 | ||
1182 | max_cnt = query->cnt; | |
1183 | ||
1184 | plist_for_each_entry(heap, &dev->heaps, node) { | |
1185 | strncpy(hdata.name, heap->name, MAX_HEAP_NAME); | |
1186 | hdata.name[sizeof(hdata.name) - 1] = '\0'; | |
1187 | hdata.type = heap->type; | |
1188 | hdata.heap_id = heap->id; | |
1189 | ||
cf55902b DC |
1190 | if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { |
1191 | ret = -EFAULT; | |
1192 | goto out; | |
1193 | } | |
02b23803 LA |
1194 | |
1195 | cnt++; | |
1196 | if (cnt >= max_cnt) | |
1197 | break; | |
1198 | } | |
1199 | ||
1200 | query->cnt = cnt; | |
1201 | out: | |
1202 | up_read(&dev->lock); | |
1203 | return ret; | |
1204 | } | |
1205 | ||
c30707be RSZ |
1206 | static int ion_release(struct inode *inode, struct file *file) |
1207 | { | |
1208 | struct ion_client *client = file->private_data; | |
1209 | ||
1210 | pr_debug("%s: %d\n", __func__, __LINE__); | |
b892bf75 | 1211 | ion_client_destroy(client); |
c30707be RSZ |
1212 | return 0; |
1213 | } | |
1214 | ||
1215 | static int ion_open(struct inode *inode, struct file *file) | |
1216 | { | |
1217 | struct miscdevice *miscdev = file->private_data; | |
1218 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | |
1219 | struct ion_client *client; | |
483ed03f | 1220 | char debug_name[64]; |
c30707be RSZ |
1221 | |
1222 | pr_debug("%s: %d\n", __func__, __LINE__); | |
483ed03f LA |
1223 | snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); |
1224 | client = ion_client_create(dev, debug_name); | |
9e907654 | 1225 | if (IS_ERR(client)) |
c30707be RSZ |
1226 | return PTR_ERR(client); |
1227 | file->private_data = client; | |
1228 | ||
1229 | return 0; | |
1230 | } | |
1231 | ||
1232 | static const struct file_operations ion_fops = { | |
1233 | .owner = THIS_MODULE, | |
1234 | .open = ion_open, | |
1235 | .release = ion_release, | |
1236 | .unlocked_ioctl = ion_ioctl, | |
827c849e | 1237 | .compat_ioctl = compat_ion_ioctl, |
c30707be RSZ |
1238 | }; |
1239 | ||
1240 | static size_t ion_debug_heap_total(struct ion_client *client, | |
2bb9f503 | 1241 | unsigned int id) |
c30707be RSZ |
1242 | { |
1243 | size_t size = 0; | |
1244 | struct rb_node *n; | |
1245 | ||
1246 | mutex_lock(&client->lock); | |
1247 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
1248 | struct ion_handle *handle = rb_entry(n, | |
1249 | struct ion_handle, | |
1250 | node); | |
2bb9f503 | 1251 | if (handle->buffer->heap->id == id) |
c30707be RSZ |
1252 | size += handle->buffer->size; |
1253 | } | |
1254 | mutex_unlock(&client->lock); | |
1255 | return size; | |
1256 | } | |
1257 | ||
1258 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | |
1259 | { | |
1260 | struct ion_heap *heap = s->private; | |
1261 | struct ion_device *dev = heap->dev; | |
1262 | struct rb_node *n; | |
5ad7bc3a RSZ |
1263 | size_t total_size = 0; |
1264 | size_t total_orphaned_size = 0; | |
c30707be | 1265 | |
b5693964 | 1266 | seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); |
164ad86d | 1267 | seq_puts(s, "----------------------------------------------------\n"); |
c30707be | 1268 | |
948c4db4 | 1269 | mutex_lock(&debugfs_mutex); |
b892bf75 | 1270 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
c30707be RSZ |
1271 | struct ion_client *client = rb_entry(n, struct ion_client, |
1272 | node); | |
2bb9f503 | 1273 | size_t size = ion_debug_heap_total(client, heap->id); |
10f62861 | 1274 | |
c30707be RSZ |
1275 | if (!size) |
1276 | continue; | |
b892bf75 RSZ |
1277 | if (client->task) { |
1278 | char task_comm[TASK_COMM_LEN]; | |
1279 | ||
1280 | get_task_comm(task_comm, client->task); | |
b5693964 | 1281 | seq_printf(s, "%16s %16u %16zu\n", task_comm, |
b892bf75 RSZ |
1282 | client->pid, size); |
1283 | } else { | |
b5693964 | 1284 | seq_printf(s, "%16s %16u %16zu\n", client->name, |
b892bf75 RSZ |
1285 | client->pid, size); |
1286 | } | |
c30707be | 1287 | } |
948c4db4 NZ |
1288 | mutex_unlock(&debugfs_mutex); |
1289 | ||
164ad86d IM |
1290 | seq_puts(s, "----------------------------------------------------\n"); |
1291 | seq_puts(s, "orphaned allocations (info is from last known client):\n"); | |
8d7ab9a9 | 1292 | mutex_lock(&dev->buffer_lock); |
5ad7bc3a RSZ |
1293 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
1294 | struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, | |
1295 | node); | |
2bb9f503 | 1296 | if (buffer->heap->id != heap->id) |
45b17a80 RSZ |
1297 | continue; |
1298 | total_size += buffer->size; | |
5ad7bc3a | 1299 | if (!buffer->handle_count) { |
b5693964 | 1300 | seq_printf(s, "%16s %16u %16zu %d %d\n", |
e61fc915 CC |
1301 | buffer->task_comm, buffer->pid, |
1302 | buffer->size, buffer->kmap_cnt, | |
2c935bc5 | 1303 | kref_read(&buffer->ref)); |
5ad7bc3a RSZ |
1304 | total_orphaned_size += buffer->size; |
1305 | } | |
1306 | } | |
8d7ab9a9 | 1307 | mutex_unlock(&dev->buffer_lock); |
164ad86d | 1308 | seq_puts(s, "----------------------------------------------------\n"); |
b5693964 | 1309 | seq_printf(s, "%16s %16zu\n", "total orphaned", |
5ad7bc3a | 1310 | total_orphaned_size); |
b5693964 | 1311 | seq_printf(s, "%16s %16zu\n", "total ", total_size); |
2540c73a | 1312 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
b5693964 | 1313 | seq_printf(s, "%16s %16zu\n", "deferred free", |
121ca0c6 | 1314 | heap->free_list_size); |
164ad86d | 1315 | seq_puts(s, "----------------------------------------------------\n"); |
45b17a80 RSZ |
1316 | |
1317 | if (heap->debug_show) | |
1318 | heap->debug_show(heap, s, unused); | |
5ad7bc3a | 1319 | |
c30707be RSZ |
1320 | return 0; |
1321 | } | |
1322 | ||
1323 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | |
1324 | { | |
1325 | return single_open(file, ion_debug_heap_show, inode->i_private); | |
1326 | } | |
1327 | ||
1328 | static const struct file_operations debug_heap_fops = { | |
1329 | .open = ion_debug_heap_open, | |
1330 | .read = seq_read, | |
1331 | .llseek = seq_lseek, | |
1332 | .release = single_release, | |
1333 | }; | |
1334 | ||
ea313b5f | 1335 | static int debug_shrink_set(void *data, u64 val) |
fe2faea7 | 1336 | { |
e1d855b0 JS |
1337 | struct ion_heap *heap = data; |
1338 | struct shrink_control sc; | |
1339 | int objs; | |
fe2faea7 | 1340 | |
3b0ae7be | 1341 | sc.gfp_mask = GFP_HIGHUSER; |
aeb7fa7b | 1342 | sc.nr_to_scan = val; |
fe2faea7 | 1343 | |
aeb7fa7b GK |
1344 | if (!val) { |
1345 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); | |
1346 | sc.nr_to_scan = objs; | |
1347 | } | |
fe2faea7 | 1348 | |
aeb7fa7b | 1349 | heap->shrinker.scan_objects(&heap->shrinker, &sc); |
e1d855b0 | 1350 | return 0; |
fe2faea7 RSZ |
1351 | } |
1352 | ||
ea313b5f | 1353 | static int debug_shrink_get(void *data, u64 *val) |
fe2faea7 | 1354 | { |
e1d855b0 JS |
1355 | struct ion_heap *heap = data; |
1356 | struct shrink_control sc; | |
1357 | int objs; | |
fe2faea7 | 1358 | |
3b0ae7be | 1359 | sc.gfp_mask = GFP_HIGHUSER; |
e1d855b0 | 1360 | sc.nr_to_scan = 0; |
fe2faea7 | 1361 | |
aeb7fa7b | 1362 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); |
e1d855b0 JS |
1363 | *val = objs; |
1364 | return 0; | |
fe2faea7 RSZ |
1365 | } |
1366 | ||
ea313b5f | 1367 | DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, |
e1d855b0 | 1368 | debug_shrink_set, "%llu\n"); |
ea313b5f | 1369 | |
c30707be RSZ |
1370 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) |
1371 | { | |
b08585fb MH |
1372 | struct dentry *debug_file; |
1373 | ||
f82ad60e | 1374 | if (!heap->ops->allocate || !heap->ops->free) |
29ae6bc7 RSZ |
1375 | pr_err("%s: can not add heap with invalid ops struct.\n", |
1376 | __func__); | |
1377 | ||
95e53ddd MH |
1378 | spin_lock_init(&heap->free_lock); |
1379 | heap->free_list_size = 0; | |
1380 | ||
ea313b5f RSZ |
1381 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
1382 | ion_heap_init_deferred_free(heap); | |
fe2faea7 | 1383 | |
b9daf0b6 CC |
1384 | if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) |
1385 | ion_heap_init_shrinker(heap); | |
1386 | ||
c30707be | 1387 | heap->dev = dev; |
8d7ab9a9 | 1388 | down_write(&dev->lock); |
7e416174 SR |
1389 | /* |
1390 | * use negative heap->id to reverse the priority -- when traversing | |
1391 | * the list later attempt higher id numbers first | |
1392 | */ | |
cd69488c RSZ |
1393 | plist_node_init(&heap->node, -heap->id); |
1394 | plist_add(&heap->node, &dev->heaps); | |
b08585fb | 1395 | debug_file = debugfs_create_file(heap->name, 0664, |
121ca0c6 JA |
1396 | dev->heaps_debug_root, heap, |
1397 | &debug_heap_fops); | |
b08585fb MH |
1398 | |
1399 | if (!debug_file) { | |
1400 | char buf[256], *path; | |
10f62861 | 1401 | |
b08585fb MH |
1402 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
1403 | pr_err("Failed to create heap debugfs at %s/%s\n", | |
121ca0c6 | 1404 | path, heap->name); |
b08585fb MH |
1405 | } |
1406 | ||
aeb7fa7b | 1407 | if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { |
ea313b5f RSZ |
1408 | char debug_name[64]; |
1409 | ||
1410 | snprintf(debug_name, 64, "%s_shrink", heap->name); | |
b08585fb MH |
1411 | debug_file = debugfs_create_file( |
1412 | debug_name, 0644, dev->heaps_debug_root, heap, | |
1413 | &debug_shrink_fops); | |
1414 | if (!debug_file) { | |
1415 | char buf[256], *path; | |
10f62861 | 1416 | |
b08585fb MH |
1417 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
1418 | pr_err("Failed to create heap shrinker debugfs at %s/%s\n", | |
121ca0c6 | 1419 | path, debug_name); |
b08585fb | 1420 | } |
ea313b5f | 1421 | } |
aeb7fa7b | 1422 | |
02b23803 | 1423 | dev->heap_cnt++; |
8d7ab9a9 | 1424 | up_write(&dev->lock); |
c30707be | 1425 | } |
8c6c463e | 1426 | EXPORT_SYMBOL(ion_device_add_heap); |
c30707be RSZ |
1427 | |
1428 | struct ion_device *ion_device_create(long (*custom_ioctl) | |
1429 | (struct ion_client *client, | |
1430 | unsigned int cmd, | |
1431 | unsigned long arg)) | |
1432 | { | |
1433 | struct ion_device *idev; | |
1434 | int ret; | |
1435 | ||
411059f7 | 1436 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); |
c30707be RSZ |
1437 | if (!idev) |
1438 | return ERR_PTR(-ENOMEM); | |
1439 | ||
1440 | idev->dev.minor = MISC_DYNAMIC_MINOR; | |
1441 | idev->dev.name = "ion"; | |
1442 | idev->dev.fops = &ion_fops; | |
1443 | idev->dev.parent = NULL; | |
1444 | ret = misc_register(&idev->dev); | |
1445 | if (ret) { | |
1446 | pr_err("ion: failed to register misc device.\n"); | |
283d9304 | 1447 | kfree(idev); |
c30707be RSZ |
1448 | return ERR_PTR(ret); |
1449 | } | |
1450 | ||
1451 | idev->debug_root = debugfs_create_dir("ion", NULL); | |
b08585fb MH |
1452 | if (!idev->debug_root) { |
1453 | pr_err("ion: failed to create debugfs root directory.\n"); | |
1454 | goto debugfs_done; | |
1455 | } | |
1456 | idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); | |
1457 | if (!idev->heaps_debug_root) { | |
1458 | pr_err("ion: failed to create debugfs heaps directory.\n"); | |
1459 | goto debugfs_done; | |
1460 | } | |
1461 | idev->clients_debug_root = debugfs_create_dir("clients", | |
1462 | idev->debug_root); | |
1463 | if (!idev->clients_debug_root) | |
1464 | pr_err("ion: failed to create debugfs clients directory.\n"); | |
1465 | ||
1466 | debugfs_done: | |
c30707be RSZ |
1467 | |
1468 | idev->custom_ioctl = custom_ioctl; | |
1469 | idev->buffers = RB_ROOT; | |
8d7ab9a9 RSZ |
1470 | mutex_init(&idev->buffer_lock); |
1471 | init_rwsem(&idev->lock); | |
cd69488c | 1472 | plist_head_init(&idev->heaps); |
b892bf75 | 1473 | idev->clients = RB_ROOT; |
948c4db4 NZ |
1474 | ion_root_client = &idev->clients; |
1475 | mutex_init(&debugfs_mutex); | |
c30707be RSZ |
1476 | return idev; |
1477 | } | |
8c6c463e | 1478 | EXPORT_SYMBOL(ion_device_create); |
c30707be RSZ |
1479 | |
1480 | void ion_device_destroy(struct ion_device *dev) | |
1481 | { | |
1482 | misc_deregister(&dev->dev); | |
b08585fb | 1483 | debugfs_remove_recursive(dev->debug_root); |
c30707be RSZ |
1484 | /* XXX need to free the heaps and clients ? */ |
1485 | kfree(dev); | |
1486 | } | |
8c6c463e | 1487 | EXPORT_SYMBOL(ion_device_destroy); |