]>
Commit | Line | Data |
---|---|---|
c30707be RSZ |
1 | /* |
2 | * drivers/staging/android/ion/ion_system_heap.c | |
3 | * | |
4 | * Copyright (C) 2011 Google, Inc. | |
5 | * | |
6 | * This software is licensed under the terms of the GNU General Public | |
7 | * License version 2, as published by the Free Software Foundation, and | |
8 | * may be copied, distributed, and modified under those terms. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | */ | |
16 | ||
bd5d6bda RSZ |
17 | #include <asm/page.h> |
18 | #include <linux/dma-mapping.h> | |
c30707be | 19 | #include <linux/err.h> |
bd5d6bda | 20 | #include <linux/highmem.h> |
c30707be RSZ |
21 | #include <linux/mm.h> |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include "ion.h" | |
26 | #include "ion_priv.h" | |
27 | ||
bd5d6bda RSZ |
28 | struct page_info { |
29 | struct page *page; | |
30 | unsigned long order; | |
31 | struct list_head list; | |
32 | }; | |
33 | ||
b0599c01 RSZ |
34 | static unsigned int orders[] = {8, 4, 0}; |
35 | ||
13ba7805 | 36 | static struct page_info *alloc_largest_available(unsigned long size, |
ba96a2ee RSZ |
37 | bool split_pages, |
38 | unsigned int max_order) | |
bd5d6bda | 39 | { |
bd5d6bda RSZ |
40 | struct page *page; |
41 | struct page_info *info; | |
42 | int i; | |
43 | ||
44 | for (i = 0; i < ARRAY_SIZE(orders); i++) { | |
45 | if (size < (1 << orders[i]) * PAGE_SIZE) | |
46 | continue; | |
ba96a2ee RSZ |
47 | if (max_order < orders[i]) |
48 | continue; | |
fe65ec5b | 49 | page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO | |
bd5d6bda RSZ |
50 | __GFP_NOWARN | __GFP_NORETRY, orders[i]); |
51 | if (!page) | |
52 | continue; | |
13ba7805 RSZ |
53 | if (split_pages) |
54 | split_page(page, orders[i]); | |
708f0cac | 55 | info = kmalloc(sizeof(struct page_info *), GFP_KERNEL); |
bd5d6bda RSZ |
56 | info->page = page; |
57 | info->order = orders[i]; | |
58 | return info; | |
59 | } | |
60 | return NULL; | |
61 | } | |
62 | ||
c30707be RSZ |
63 | static int ion_system_heap_allocate(struct ion_heap *heap, |
64 | struct ion_buffer *buffer, | |
65 | unsigned long size, unsigned long align, | |
66 | unsigned long flags) | |
c30707be | 67 | { |
4d5ca329 RSZ |
68 | struct sg_table *table; |
69 | struct scatterlist *sg; | |
bd5d6bda RSZ |
70 | int ret; |
71 | struct list_head pages; | |
72 | struct page_info *info, *tmp_info; | |
13ba7805 | 73 | int i = 0; |
bd5d6bda | 74 | long size_remaining = PAGE_ALIGN(size); |
13ba7805 RSZ |
75 | bool split_pages = ion_buffer_fault_user_mappings(buffer); |
76 | ||
bd5d6bda | 77 | |
ba96a2ee RSZ |
78 | unsigned int max_order = orders[0]; |
79 | ||
bd5d6bda RSZ |
80 | INIT_LIST_HEAD(&pages); |
81 | while (size_remaining > 0) { | |
ba96a2ee RSZ |
82 | info = alloc_largest_available(size_remaining, split_pages, |
83 | max_order); | |
bd5d6bda RSZ |
84 | if (!info) |
85 | goto err; | |
86 | list_add_tail(&info->list, &pages); | |
87 | size_remaining -= (1 << info->order) * PAGE_SIZE; | |
ba96a2ee | 88 | max_order = info->order; |
13ba7805 | 89 | i++; |
bd5d6bda | 90 | } |
c30707be | 91 | |
b15934b6 | 92 | table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
4d5ca329 | 93 | if (!table) |
bd5d6bda RSZ |
94 | goto err; |
95 | ||
13ba7805 RSZ |
96 | if (split_pages) |
97 | ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, | |
98 | GFP_KERNEL); | |
99 | else | |
100 | ret = sg_alloc_table(table, i, GFP_KERNEL); | |
101 | ||
bd5d6bda RSZ |
102 | if (ret) |
103 | goto err1; | |
104 | ||
105 | sg = table->sgl; | |
106 | list_for_each_entry_safe(info, tmp_info, &pages, list) { | |
107 | struct page *page = info->page; | |
13ba7805 RSZ |
108 | |
109 | if (split_pages) { | |
110 | for (i = 0; i < (1 << info->order); i++) { | |
111 | sg_set_page(sg, page + i, PAGE_SIZE, 0); | |
112 | sg = sg_next(sg); | |
113 | } | |
114 | } else { | |
115 | sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, | |
116 | 0); | |
bd5d6bda RSZ |
117 | sg = sg_next(sg); |
118 | } | |
119 | list_del(&info->list); | |
708f0cac | 120 | kfree(info); |
c30707be | 121 | } |
bd5d6bda RSZ |
122 | |
123 | dma_sync_sg_for_device(NULL, table->sgl, table->nents, | |
124 | DMA_BIDIRECTIONAL); | |
125 | ||
b15934b6 RSZ |
126 | buffer->priv_virt = table; |
127 | return 0; | |
4d5ca329 | 128 | err1: |
4d5ca329 | 129 | kfree(table); |
bd5d6bda RSZ |
130 | err: |
131 | list_for_each_entry(info, &pages, list) { | |
13ba7805 RSZ |
132 | if (split_pages) |
133 | for (i = 0; i < (1 << info->order); i++) | |
134 | __free_page(info->page + i); | |
135 | else | |
136 | __free_pages(info->page, info->order); | |
137 | ||
708f0cac | 138 | kfree(info); |
bd5d6bda | 139 | } |
b15934b6 | 140 | return -ENOMEM; |
c30707be RSZ |
141 | } |
142 | ||
b15934b6 | 143 | void ion_system_heap_free(struct ion_buffer *buffer) |
c30707be | 144 | { |
b15934b6 RSZ |
145 | int i; |
146 | struct scatterlist *sg; | |
147 | struct sg_table *table = buffer->priv_virt; | |
148 | ||
149 | for_each_sg(table->sgl, sg, table->nents, i) | |
bd5d6bda | 150 | __free_pages(sg_page(sg), get_order(sg_dma_len(sg))); |
4d5ca329 RSZ |
151 | if (buffer->sg_table) |
152 | sg_free_table(buffer->sg_table); | |
153 | kfree(buffer->sg_table); | |
c30707be RSZ |
154 | } |
155 | ||
b15934b6 RSZ |
156 | struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, |
157 | struct ion_buffer *buffer) | |
158 | { | |
159 | return buffer->priv_virt; | |
160 | } | |
161 | ||
162 | void ion_system_heap_unmap_dma(struct ion_heap *heap, | |
163 | struct ion_buffer *buffer) | |
164 | { | |
165 | return; | |
166 | } | |
167 | ||
c30707be RSZ |
168 | void *ion_system_heap_map_kernel(struct ion_heap *heap, |
169 | struct ion_buffer *buffer) | |
170 | { | |
b15934b6 | 171 | struct scatterlist *sg; |
bd5d6bda | 172 | int i, j; |
b15934b6 | 173 | void *vaddr; |
56a7c185 | 174 | pgprot_t pgprot; |
b15934b6 | 175 | struct sg_table *table = buffer->priv_virt; |
bd5d6bda | 176 | int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
98d5d5f8 | 177 | struct page **pages = vmalloc(sizeof(struct page *) * npages); |
bd5d6bda | 178 | struct page **tmp = pages; |
56a7c185 | 179 | |
98d5d5f8 RSZ |
180 | if (!pages) |
181 | return 0; | |
182 | ||
56a7c185 RSZ |
183 | if (buffer->flags & ION_FLAG_CACHED) |
184 | pgprot = PAGE_KERNEL; | |
185 | else | |
186 | pgprot = pgprot_writecombine(PAGE_KERNEL); | |
187 | ||
bd5d6bda RSZ |
188 | for_each_sg(table->sgl, sg, table->nents, i) { |
189 | int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE; | |
190 | struct page *page = sg_page(sg); | |
191 | BUG_ON(i >= npages); | |
192 | for (j = 0; j < npages_this_entry; j++) { | |
193 | *(tmp++) = page++; | |
194 | } | |
195 | } | |
196 | vaddr = vmap(pages, npages, VM_MAP, pgprot); | |
98d5d5f8 | 197 | vfree(pages); |
b15934b6 RSZ |
198 | |
199 | return vaddr; | |
c30707be RSZ |
200 | } |
201 | ||
202 | void ion_system_heap_unmap_kernel(struct ion_heap *heap, | |
203 | struct ion_buffer *buffer) | |
204 | { | |
b15934b6 | 205 | vunmap(buffer->vaddr); |
c30707be RSZ |
206 | } |
207 | ||
208 | int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, | |
209 | struct vm_area_struct *vma) | |
210 | { | |
b15934b6 RSZ |
211 | struct sg_table *table = buffer->priv_virt; |
212 | unsigned long addr = vma->vm_start; | |
213 | unsigned long offset = vma->vm_pgoff; | |
214 | struct scatterlist *sg; | |
215 | int i; | |
216 | ||
217 | for_each_sg(table->sgl, sg, table->nents, i) { | |
218 | if (offset) { | |
219 | offset--; | |
220 | continue; | |
221 | } | |
bd5d6bda RSZ |
222 | remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)), |
223 | sg_dma_len(sg), vma->vm_page_prot); | |
224 | addr += sg_dma_len(sg); | |
b8230243 RSZ |
225 | if (addr >= vma->vm_end) |
226 | return 0; | |
b15934b6 RSZ |
227 | } |
228 | return 0; | |
c30707be RSZ |
229 | } |
230 | ||
231 | static struct ion_heap_ops vmalloc_ops = { | |
232 | .allocate = ion_system_heap_allocate, | |
233 | .free = ion_system_heap_free, | |
234 | .map_dma = ion_system_heap_map_dma, | |
235 | .unmap_dma = ion_system_heap_unmap_dma, | |
236 | .map_kernel = ion_system_heap_map_kernel, | |
237 | .unmap_kernel = ion_system_heap_unmap_kernel, | |
238 | .map_user = ion_system_heap_map_user, | |
239 | }; | |
240 | ||
241 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) | |
242 | { | |
243 | struct ion_heap *heap; | |
244 | ||
245 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | |
246 | if (!heap) | |
247 | return ERR_PTR(-ENOMEM); | |
248 | heap->ops = &vmalloc_ops; | |
249 | heap->type = ION_HEAP_TYPE_SYSTEM; | |
250 | return heap; | |
251 | } | |
252 | ||
253 | void ion_system_heap_destroy(struct ion_heap *heap) | |
254 | { | |
255 | kfree(heap); | |
256 | } | |
257 | ||
258 | static int ion_system_contig_heap_allocate(struct ion_heap *heap, | |
259 | struct ion_buffer *buffer, | |
260 | unsigned long len, | |
261 | unsigned long align, | |
262 | unsigned long flags) | |
263 | { | |
264 | buffer->priv_virt = kzalloc(len, GFP_KERNEL); | |
265 | if (!buffer->priv_virt) | |
266 | return -ENOMEM; | |
267 | return 0; | |
268 | } | |
269 | ||
270 | void ion_system_contig_heap_free(struct ion_buffer *buffer) | |
271 | { | |
272 | kfree(buffer->priv_virt); | |
273 | } | |
274 | ||
275 | static int ion_system_contig_heap_phys(struct ion_heap *heap, | |
276 | struct ion_buffer *buffer, | |
277 | ion_phys_addr_t *addr, size_t *len) | |
278 | { | |
279 | *addr = virt_to_phys(buffer->priv_virt); | |
280 | *len = buffer->size; | |
281 | return 0; | |
282 | } | |
283 | ||
4d5ca329 | 284 | struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, |
56a7c185 | 285 | struct ion_buffer *buffer) |
c30707be | 286 | { |
4d5ca329 RSZ |
287 | struct sg_table *table; |
288 | int ret; | |
c30707be | 289 | |
4d5ca329 RSZ |
290 | table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
291 | if (!table) | |
c30707be | 292 | return ERR_PTR(-ENOMEM); |
4d5ca329 RSZ |
293 | ret = sg_alloc_table(table, 1, GFP_KERNEL); |
294 | if (ret) { | |
295 | kfree(table); | |
296 | return ERR_PTR(ret); | |
297 | } | |
298 | sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size, | |
299 | 0); | |
300 | return table; | |
c30707be RSZ |
301 | } |
302 | ||
56a7c185 RSZ |
303 | void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, |
304 | struct ion_buffer *buffer) | |
305 | { | |
306 | sg_free_table(buffer->sg_table); | |
307 | kfree(buffer->sg_table); | |
308 | } | |
309 | ||
c30707be RSZ |
310 | int ion_system_contig_heap_map_user(struct ion_heap *heap, |
311 | struct ion_buffer *buffer, | |
312 | struct vm_area_struct *vma) | |
313 | { | |
314 | unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); | |
315 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, | |
316 | vma->vm_end - vma->vm_start, | |
317 | vma->vm_page_prot); | |
318 | ||
319 | } | |
320 | ||
321 | static struct ion_heap_ops kmalloc_ops = { | |
322 | .allocate = ion_system_contig_heap_allocate, | |
323 | .free = ion_system_contig_heap_free, | |
324 | .phys = ion_system_contig_heap_phys, | |
325 | .map_dma = ion_system_contig_heap_map_dma, | |
56a7c185 | 326 | .unmap_dma = ion_system_contig_heap_unmap_dma, |
c30707be RSZ |
327 | .map_kernel = ion_system_heap_map_kernel, |
328 | .unmap_kernel = ion_system_heap_unmap_kernel, | |
329 | .map_user = ion_system_contig_heap_map_user, | |
330 | }; | |
331 | ||
332 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) | |
333 | { | |
334 | struct ion_heap *heap; | |
335 | ||
336 | heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); | |
337 | if (!heap) | |
338 | return ERR_PTR(-ENOMEM); | |
339 | heap->ops = &kmalloc_ops; | |
340 | heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; | |
341 | return heap; | |
342 | } | |
343 | ||
344 | void ion_system_contig_heap_destroy(struct ion_heap *heap) | |
345 | { | |
346 | kfree(heap); | |
347 | } | |
348 |