]>
Commit | Line | Data |
---|---|---|
e3c2eb7c RSZ |
1 | /* |
2 | * drivers/staging/android/ion/ion_chunk_heap.c | |
3 | * | |
4 | * Copyright (C) 2012 Google, Inc. | |
5 | * | |
6 | * This software is licensed under the terms of the GNU General Public | |
7 | * License version 2, as published by the Free Software Foundation, and | |
8 | * may be copied, distributed, and modified under those terms. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | */ | |
e3c2eb7c RSZ |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/err.h> | |
18 | #include <linux/genalloc.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/scatterlist.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include "ion.h" | |
25 | #include "ion_priv.h" | |
26 | ||
e3c2eb7c RSZ |
27 | struct ion_chunk_heap { |
28 | struct ion_heap heap; | |
29 | struct gen_pool *pool; | |
30 | ion_phys_addr_t base; | |
31 | unsigned long chunk_size; | |
32 | unsigned long size; | |
33 | unsigned long allocated; | |
34 | }; | |
35 | ||
36 | static int ion_chunk_heap_allocate(struct ion_heap *heap, | |
37 | struct ion_buffer *buffer, | |
38 | unsigned long size, unsigned long align, | |
39 | unsigned long flags) | |
40 | { | |
41 | struct ion_chunk_heap *chunk_heap = | |
42 | container_of(heap, struct ion_chunk_heap, heap); | |
43 | struct sg_table *table; | |
44 | struct scatterlist *sg; | |
45 | int ret, i; | |
46 | unsigned long num_chunks; | |
ea89faff | 47 | unsigned long allocated_size; |
e3c2eb7c | 48 | |
dd608dd2 CC |
49 | if (align > chunk_heap->chunk_size) |
50 | return -EINVAL; | |
51 | ||
ea89faff CC |
52 | allocated_size = ALIGN(size, chunk_heap->chunk_size); |
53 | num_chunks = allocated_size / chunk_heap->chunk_size; | |
e3c2eb7c | 54 | |
ea89faff | 55 | if (allocated_size > chunk_heap->size - chunk_heap->allocated) |
e3c2eb7c RSZ |
56 | return -ENOMEM; |
57 | ||
b6152016 | 58 | table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
e3c2eb7c RSZ |
59 | if (!table) |
60 | return -ENOMEM; | |
61 | ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); | |
62 | if (ret) { | |
63 | kfree(table); | |
64 | return ret; | |
65 | } | |
66 | ||
67 | sg = table->sgl; | |
68 | for (i = 0; i < num_chunks; i++) { | |
69 | unsigned long paddr = gen_pool_alloc(chunk_heap->pool, | |
70 | chunk_heap->chunk_size); | |
71 | if (!paddr) | |
72 | goto err; | |
1d804535 CC |
73 | sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), |
74 | chunk_heap->chunk_size, 0); | |
e3c2eb7c RSZ |
75 | sg = sg_next(sg); |
76 | } | |
77 | ||
78 | buffer->priv_virt = table; | |
ea89faff | 79 | chunk_heap->allocated += allocated_size; |
e3c2eb7c RSZ |
80 | return 0; |
81 | err: | |
82 | sg = table->sgl; | |
83 | for (i -= 1; i >= 0; i--) { | |
3e6110fd | 84 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), |
06e0dcae | 85 | sg->length); |
e3c2eb7c RSZ |
86 | sg = sg_next(sg); |
87 | } | |
88 | sg_free_table(table); | |
89 | kfree(table); | |
90 | return -ENOMEM; | |
91 | } | |
92 | ||
93 | static void ion_chunk_heap_free(struct ion_buffer *buffer) | |
94 | { | |
95 | struct ion_heap *heap = buffer->heap; | |
96 | struct ion_chunk_heap *chunk_heap = | |
97 | container_of(heap, struct ion_chunk_heap, heap); | |
98 | struct sg_table *table = buffer->priv_virt; | |
99 | struct scatterlist *sg; | |
100 | int i; | |
ea89faff CC |
101 | unsigned long allocated_size; |
102 | ||
103 | allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); | |
e3c2eb7c | 104 | |
0b6b2cde RSZ |
105 | ion_heap_buffer_zero(buffer); |
106 | ||
e946b209 CC |
107 | if (ion_buffer_cached(buffer)) |
108 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, | |
69b2b20e | 109 | DMA_BIDIRECTIONAL); |
e946b209 | 110 | |
e3c2eb7c | 111 | for_each_sg(table->sgl, sg, table->nents, i) { |
3e6110fd | 112 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), |
06e0dcae | 113 | sg->length); |
e3c2eb7c | 114 | } |
ea89faff | 115 | chunk_heap->allocated -= allocated_size; |
e3c2eb7c RSZ |
116 | sg_free_table(table); |
117 | kfree(table); | |
118 | } | |
119 | ||
f63958d8 CC |
120 | static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, |
121 | struct ion_buffer *buffer) | |
e3c2eb7c RSZ |
122 | { |
123 | return buffer->priv_virt; | |
124 | } | |
125 | ||
f63958d8 CC |
126 | static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, |
127 | struct ion_buffer *buffer) | |
e3c2eb7c | 128 | { |
e3c2eb7c RSZ |
129 | } |
130 | ||
131 | static struct ion_heap_ops chunk_heap_ops = { | |
132 | .allocate = ion_chunk_heap_allocate, | |
133 | .free = ion_chunk_heap_free, | |
134 | .map_dma = ion_chunk_heap_map_dma, | |
135 | .unmap_dma = ion_chunk_heap_unmap_dma, | |
136 | .map_user = ion_heap_map_user, | |
137 | .map_kernel = ion_heap_map_kernel, | |
138 | .unmap_kernel = ion_heap_unmap_kernel, | |
139 | }; | |
140 | ||
141 | struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) | |
142 | { | |
143 | struct ion_chunk_heap *chunk_heap; | |
df6cf5c8 CC |
144 | int ret; |
145 | struct page *page; | |
146 | size_t size; | |
147 | ||
148 | page = pfn_to_page(PFN_DOWN(heap_data->base)); | |
149 | size = heap_data->size; | |
150 | ||
151 | ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); | |
152 | ||
153 | ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); | |
154 | if (ret) | |
155 | return ERR_PTR(ret); | |
57b5cd06 | 156 | |
e3c2eb7c RSZ |
157 | chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); |
158 | if (!chunk_heap) | |
159 | return ERR_PTR(-ENOMEM); | |
160 | ||
161 | chunk_heap->chunk_size = (unsigned long)heap_data->priv; | |
162 | chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + | |
163 | PAGE_SHIFT, -1); | |
164 | if (!chunk_heap->pool) { | |
57b5cd06 RSZ |
165 | ret = -ENOMEM; |
166 | goto error_gen_pool_create; | |
e3c2eb7c RSZ |
167 | } |
168 | chunk_heap->base = heap_data->base; | |
169 | chunk_heap->size = heap_data->size; | |
170 | chunk_heap->allocated = 0; | |
57b5cd06 | 171 | |
e3c2eb7c RSZ |
172 | gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); |
173 | chunk_heap->heap.ops = &chunk_heap_ops; | |
174 | chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; | |
fe2faea7 | 175 | chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; |
cef85381 GF |
176 | pr_debug("%s: base %lu size %zu align %ld\n", __func__, |
177 | chunk_heap->base, heap_data->size, heap_data->align); | |
e3c2eb7c RSZ |
178 | |
179 | return &chunk_heap->heap; | |
57b5cd06 | 180 | |
57b5cd06 RSZ |
181 | error_gen_pool_create: |
182 | kfree(chunk_heap); | |
183 | return ERR_PTR(ret); | |
e3c2eb7c RSZ |
184 | } |
185 | ||
186 | void ion_chunk_heap_destroy(struct ion_heap *heap) | |
187 | { | |
188 | struct ion_chunk_heap *chunk_heap = | |
189 | container_of(heap, struct ion_chunk_heap, heap); | |
190 | ||
191 | gen_pool_destroy(chunk_heap->pool); | |
192 | kfree(chunk_heap); | |
193 | chunk_heap = NULL; | |
194 | } |