]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/staging/android/ion/ion_cma_heap.c
Merge remote-tracking branches 'asoc/topic/cs35l32', 'asoc/topic/cs35l34', 'asoc...
[mirror_ubuntu-jammy-kernel.git] / drivers / staging / android / ion / ion_cma_heap.c
1 /*
2 * drivers/staging/android/ion/ion_cma_heap.c
3 *
4 * Copyright (C) Linaro 2012
5 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/cma.h>
23 #include <linux/scatterlist.h>
24
25 #include "ion.h"
26
27 struct ion_cma_heap {
28 struct ion_heap heap;
29 struct cma *cma;
30 };
31
32 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
33
34 /* ION CMA heap operations functions */
35 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
36 unsigned long len,
37 unsigned long flags)
38 {
39 struct ion_cma_heap *cma_heap = to_cma_heap(heap);
40 struct sg_table *table;
41 struct page *pages;
42 unsigned long size = PAGE_ALIGN(len);
43 unsigned long nr_pages = size >> PAGE_SHIFT;
44 unsigned long align = get_order(size);
45 int ret;
46
47 if (align > CONFIG_CMA_ALIGNMENT)
48 align = CONFIG_CMA_ALIGNMENT;
49
50 pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
51 if (!pages)
52 return -ENOMEM;
53
54 table = kmalloc(sizeof(*table), GFP_KERNEL);
55 if (!table)
56 goto err;
57
58 ret = sg_alloc_table(table, 1, GFP_KERNEL);
59 if (ret)
60 goto free_mem;
61
62 sg_set_page(table->sgl, pages, size, 0);
63
64 buffer->priv_virt = pages;
65 buffer->sg_table = table;
66 return 0;
67
68 free_mem:
69 kfree(table);
70 err:
71 cma_release(cma_heap->cma, pages, nr_pages);
72 return -ENOMEM;
73 }
74
75 static void ion_cma_free(struct ion_buffer *buffer)
76 {
77 struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
78 struct page *pages = buffer->priv_virt;
79 unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
80
81 /* release memory */
82 cma_release(cma_heap->cma, pages, nr_pages);
83 /* release sg table */
84 sg_free_table(buffer->sg_table);
85 kfree(buffer->sg_table);
86 }
87
88 static struct ion_heap_ops ion_cma_ops = {
89 .allocate = ion_cma_allocate,
90 .free = ion_cma_free,
91 .map_user = ion_heap_map_user,
92 .map_kernel = ion_heap_map_kernel,
93 .unmap_kernel = ion_heap_unmap_kernel,
94 };
95
96 static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
97 {
98 struct ion_cma_heap *cma_heap;
99
100 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
101
102 if (!cma_heap)
103 return ERR_PTR(-ENOMEM);
104
105 cma_heap->heap.ops = &ion_cma_ops;
106 /*
107 * get device from private heaps data, later it will be
108 * used to make the link with reserved CMA memory
109 */
110 cma_heap->cma = cma;
111 cma_heap->heap.type = ION_HEAP_TYPE_DMA;
112 return &cma_heap->heap;
113 }
114
115 static int __ion_add_cma_heaps(struct cma *cma, void *data)
116 {
117 struct ion_heap *heap;
118
119 heap = __ion_cma_heap_create(cma);
120 if (IS_ERR(heap))
121 return PTR_ERR(heap);
122
123 heap->name = cma_get_name(cma);
124
125 ion_device_add_heap(heap);
126 return 0;
127 }
128
129 static int ion_add_cma_heaps(void)
130 {
131 cma_for_each_area(__ion_add_cma_heaps, NULL);
132 return 0;
133 }
134 device_initcall(ion_add_cma_heaps);