]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/android/ion/ion_system_heap.c
gpu: ion: Modify zeroing code so it only allocates address space once
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / android / ion / ion_system_heap.c
CommitLineData
c30707be
RSZ
1/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
bd5d6bda
RSZ
17#include <asm/page.h>
18#include <linux/dma-mapping.h>
c30707be 19#include <linux/err.h>
bd5d6bda 20#include <linux/highmem.h>
c30707be
RSZ
21#include <linux/mm.h>
22#include <linux/scatterlist.h>
45b17a80 23#include <linux/seq_file.h>
c30707be
RSZ
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
ee4a4986
RSZ
29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30 __GFP_NOWARN | __GFP_NORETRY |
31b9432b 31 __GFP_NO_KSWAPD) & ~__GFP_WAIT;
ee4a4986
RSZ
32static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
33 __GFP_NOWARN);
45b17a80
RSZ
34static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38 int i;
39 for (i = 0; i < num_orders; i++)
40 if (order == orders[i])
41 return i;
42 BUG();
43 return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48 return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52 struct ion_heap heap;
53 struct ion_page_pool **pools;
54};
55
bd5d6bda
RSZ
56struct page_info {
57 struct page *page;
45b17a80 58 unsigned int order;
bd5d6bda
RSZ
59 struct list_head list;
60};
61
45b17a80
RSZ
62static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63 struct ion_buffer *buffer,
64 unsigned long order)
65{
66 bool cached = ion_buffer_cached(buffer);
67 bool split_pages = ion_buffer_fault_user_mappings(buffer);
68 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69 struct page *page;
70
ee4a4986 71 if (!cached) {
45b17a80 72 page = ion_page_pool_alloc(pool);
ee4a4986
RSZ
73 } else {
74 gfp_t gfp_flags = low_order_gfp_flags;
75
76 if (order > 4)
77 gfp_flags = high_order_gfp_flags;
78 page = alloc_pages(gfp_flags, order);
8fae8312
RSZ
79 if (!page)
80 return 0;
81 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
82 DMA_BIDIRECTIONAL);
ee4a4986 83 }
45b17a80
RSZ
84 if (!page)
85 return 0;
8fae8312 86
45b17a80
RSZ
87 if (split_pages)
88 split_page(page, order);
89 return page;
90}
91
92static void free_buffer_page(struct ion_system_heap *heap,
93 struct ion_buffer *buffer, struct page *page,
77cbe828 94 unsigned int order, struct vm_struct *vm_struct)
45b17a80
RSZ
95{
96 bool cached = ion_buffer_cached(buffer);
97 bool split_pages = ion_buffer_fault_user_mappings(buffer);
98 int i;
99
100 if (!cached) {
101 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102 /* zero the pages before returning them to the pool for
103 security. This uses vmap as we want to set the pgprot so
104 the writes to occur to noncached mappings, as the pool's
105 purpose is to keep the pages out of the cache */
4f597eac 106 for (i = 0; i < (1 << order); i++) {
45b17a80 107 struct page *sub_page = page + i;
77cbe828
RSZ
108 struct page **pages = &sub_page;
109 map_vm_area(vm_struct,
110 pgprot_writecombine(PAGE_KERNEL),
111 &pages);
112 memset(vm_struct->addr, 0, PAGE_SIZE);
113 unmap_kernel_range((unsigned long)vm_struct->addr,
114 PAGE_SIZE);
45b17a80
RSZ
115 }
116 ion_page_pool_free(pool, page);
117 } else if (split_pages) {
118 for (i = 0; i < (1 << order); i++)
119 __free_page(page + i);
120 } else {
121 __free_pages(page, order);
122 }
123}
124
b0599c01 125
45b17a80
RSZ
126static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
127 struct ion_buffer *buffer,
128 unsigned long size,
ba96a2ee 129 unsigned int max_order)
bd5d6bda 130{
bd5d6bda
RSZ
131 struct page *page;
132 struct page_info *info;
133 int i;
134
45b17a80
RSZ
135 for (i = 0; i < num_orders; i++) {
136 if (size < order_to_size(orders[i]))
bd5d6bda 137 continue;
ba96a2ee
RSZ
138 if (max_order < orders[i])
139 continue;
45b17a80
RSZ
140
141 page = alloc_buffer_page(heap, buffer, orders[i]);
bd5d6bda
RSZ
142 if (!page)
143 continue;
45b17a80
RSZ
144
145 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
bd5d6bda
RSZ
146 info->page = page;
147 info->order = orders[i];
148 return info;
149 }
150 return NULL;
151}
152
c30707be
RSZ
153static int ion_system_heap_allocate(struct ion_heap *heap,
154 struct ion_buffer *buffer,
155 unsigned long size, unsigned long align,
156 unsigned long flags)
c30707be 157{
45b17a80
RSZ
158 struct ion_system_heap *sys_heap = container_of(heap,
159 struct ion_system_heap,
160 heap);
4d5ca329
RSZ
161 struct sg_table *table;
162 struct scatterlist *sg;
bd5d6bda
RSZ
163 int ret;
164 struct list_head pages;
165 struct page_info *info, *tmp_info;
13ba7805 166 int i = 0;
bd5d6bda 167 long size_remaining = PAGE_ALIGN(size);
ba96a2ee 168 unsigned int max_order = orders[0];
45b17a80 169 bool split_pages = ion_buffer_fault_user_mappings(buffer);
77cbe828
RSZ
170 struct vm_struct *vm_struct;
171 pte_t *ptes;
ba96a2ee 172
bd5d6bda
RSZ
173 INIT_LIST_HEAD(&pages);
174 while (size_remaining > 0) {
45b17a80 175 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
bd5d6bda
RSZ
176 if (!info)
177 goto err;
178 list_add_tail(&info->list, &pages);
179 size_remaining -= (1 << info->order) * PAGE_SIZE;
ba96a2ee 180 max_order = info->order;
13ba7805 181 i++;
bd5d6bda 182 }
c30707be 183
b15934b6 184 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
4d5ca329 185 if (!table)
bd5d6bda
RSZ
186 goto err;
187
13ba7805
RSZ
188 if (split_pages)
189 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
190 GFP_KERNEL);
191 else
192 ret = sg_alloc_table(table, i, GFP_KERNEL);
193
bd5d6bda
RSZ
194 if (ret)
195 goto err1;
196
197 sg = table->sgl;
198 list_for_each_entry_safe(info, tmp_info, &pages, list) {
199 struct page *page = info->page;
13ba7805
RSZ
200 if (split_pages) {
201 for (i = 0; i < (1 << info->order); i++) {
202 sg_set_page(sg, page + i, PAGE_SIZE, 0);
203 sg = sg_next(sg);
204 }
205 } else {
206 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
207 0);
bd5d6bda
RSZ
208 sg = sg_next(sg);
209 }
210 list_del(&info->list);
708f0cac 211 kfree(info);
c30707be 212 }
bd5d6bda 213
b15934b6
RSZ
214 buffer->priv_virt = table;
215 return 0;
4d5ca329 216err1:
4d5ca329 217 kfree(table);
bd5d6bda 218err:
77cbe828 219 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
bd5d6bda 220 list_for_each_entry(info, &pages, list) {
77cbe828
RSZ
221 free_buffer_page(sys_heap, buffer, info->page, info->order,
222 vm_struct);
708f0cac 223 kfree(info);
bd5d6bda 224 }
77cbe828 225 free_vm_area(vm_struct);
b15934b6 226 return -ENOMEM;
c30707be
RSZ
227}
228
b15934b6 229void ion_system_heap_free(struct ion_buffer *buffer)
c30707be 230{
45b17a80
RSZ
231 struct ion_heap *heap = buffer->heap;
232 struct ion_system_heap *sys_heap = container_of(heap,
233 struct ion_system_heap,
234 heap);
8898227e 235 struct sg_table *table = buffer->sg_table;
45b17a80
RSZ
236 struct scatterlist *sg;
237 LIST_HEAD(pages);
77cbe828
RSZ
238 struct vm_struct *vm_struct;
239 pte_t *ptes;
45b17a80 240 int i;
b15934b6 241
77cbe828
RSZ
242 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
243
b15934b6 244 for_each_sg(table->sgl, sg, table->nents, i)
77cbe828
RSZ
245 free_buffer_page(sys_heap, buffer, sg_page(sg),
246 get_order(sg_dma_len(sg)), vm_struct);
247 free_vm_area(vm_struct);
45b17a80
RSZ
248 sg_free_table(table);
249 kfree(table);
c30707be
RSZ
250}
251
b15934b6
RSZ
252struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
253 struct ion_buffer *buffer)
254{
255 return buffer->priv_virt;
256}
257
258void ion_system_heap_unmap_dma(struct ion_heap *heap,
259 struct ion_buffer *buffer)
260{
261 return;
262}
263
45b17a80 264static struct ion_heap_ops system_heap_ops = {
c30707be
RSZ
265 .allocate = ion_system_heap_allocate,
266 .free = ion_system_heap_free,
267 .map_dma = ion_system_heap_map_dma,
268 .unmap_dma = ion_system_heap_unmap_dma,
8898227e
RSZ
269 .map_kernel = ion_heap_map_kernel,
270 .unmap_kernel = ion_heap_unmap_kernel,
271 .map_user = ion_heap_map_user,
c30707be
RSZ
272};
273
45b17a80
RSZ
274static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
275 void *unused)
276{
277
278 struct ion_system_heap *sys_heap = container_of(heap,
279 struct ion_system_heap,
280 heap);
281 int i;
282 for (i = 0; i < num_orders; i++) {
283 struct ion_page_pool *pool = sys_heap->pools[i];
0fb9b815
RSZ
284 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
285 pool->high_count, pool->order,
286 (1 << pool->order) * PAGE_SIZE * pool->high_count);
287 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
288 pool->low_count, pool->order,
289 (1 << pool->order) * PAGE_SIZE * pool->low_count);
45b17a80
RSZ
290 }
291 return 0;
292}
293
c30707be
RSZ
294struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
295{
45b17a80
RSZ
296 struct ion_system_heap *heap;
297 int i;
c30707be 298
45b17a80 299 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
c30707be
RSZ
300 if (!heap)
301 return ERR_PTR(-ENOMEM);
45b17a80
RSZ
302 heap->heap.ops = &system_heap_ops;
303 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
304 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
305 GFP_KERNEL);
306 if (!heap->pools)
307 goto err_alloc_pools;
308 for (i = 0; i < num_orders; i++) {
309 struct ion_page_pool *pool;
ee4a4986
RSZ
310 gfp_t gfp_flags = low_order_gfp_flags;
311
312 if (orders[i] > 4)
313 gfp_flags = high_order_gfp_flags;
314 pool = ion_page_pool_create(gfp_flags, orders[i]);
45b17a80
RSZ
315 if (!pool)
316 goto err_create_pool;
317 heap->pools[i] = pool;
318 }
319 heap->heap.debug_show = ion_system_heap_debug_show;
320 return &heap->heap;
321err_create_pool:
322 for (i = 0; i < num_orders; i++)
323 if (heap->pools[i])
324 ion_page_pool_destroy(heap->pools[i]);
325 kfree(heap->pools);
326err_alloc_pools:
327 kfree(heap);
328 return ERR_PTR(-ENOMEM);
c30707be
RSZ
329}
330
331void ion_system_heap_destroy(struct ion_heap *heap)
332{
45b17a80
RSZ
333 struct ion_system_heap *sys_heap = container_of(heap,
334 struct ion_system_heap,
335 heap);
336 int i;
337
338 for (i = 0; i < num_orders; i++)
339 ion_page_pool_destroy(sys_heap->pools[i]);
340 kfree(sys_heap->pools);
341 kfree(sys_heap);
c30707be
RSZ
342}
343
344static int ion_system_contig_heap_allocate(struct ion_heap *heap,
345 struct ion_buffer *buffer,
346 unsigned long len,
347 unsigned long align,
348 unsigned long flags)
349{
350 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
351 if (!buffer->priv_virt)
352 return -ENOMEM;
353 return 0;
354}
355
356void ion_system_contig_heap_free(struct ion_buffer *buffer)
357{
358 kfree(buffer->priv_virt);
359}
360
361static int ion_system_contig_heap_phys(struct ion_heap *heap,
362 struct ion_buffer *buffer,
363 ion_phys_addr_t *addr, size_t *len)
364{
365 *addr = virt_to_phys(buffer->priv_virt);
366 *len = buffer->size;
367 return 0;
368}
369
4d5ca329 370struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
56a7c185 371 struct ion_buffer *buffer)
c30707be 372{
4d5ca329
RSZ
373 struct sg_table *table;
374 int ret;
c30707be 375
4d5ca329
RSZ
376 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
377 if (!table)
c30707be 378 return ERR_PTR(-ENOMEM);
4d5ca329
RSZ
379 ret = sg_alloc_table(table, 1, GFP_KERNEL);
380 if (ret) {
381 kfree(table);
382 return ERR_PTR(ret);
383 }
384 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
385 0);
386 return table;
c30707be
RSZ
387}
388
56a7c185
RSZ
389void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
390 struct ion_buffer *buffer)
391{
392 sg_free_table(buffer->sg_table);
393 kfree(buffer->sg_table);
394}
395
c30707be
RSZ
396int ion_system_contig_heap_map_user(struct ion_heap *heap,
397 struct ion_buffer *buffer,
398 struct vm_area_struct *vma)
399{
400 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
401 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
402 vma->vm_end - vma->vm_start,
403 vma->vm_page_prot);
404
405}
406
407static struct ion_heap_ops kmalloc_ops = {
408 .allocate = ion_system_contig_heap_allocate,
409 .free = ion_system_contig_heap_free,
410 .phys = ion_system_contig_heap_phys,
411 .map_dma = ion_system_contig_heap_map_dma,
56a7c185 412 .unmap_dma = ion_system_contig_heap_unmap_dma,
8898227e
RSZ
413 .map_kernel = ion_heap_map_kernel,
414 .unmap_kernel = ion_heap_unmap_kernel,
c30707be
RSZ
415 .map_user = ion_system_contig_heap_map_user,
416};
417
418struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
419{
420 struct ion_heap *heap;
421
422 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
423 if (!heap)
424 return ERR_PTR(-ENOMEM);
425 heap->ops = &kmalloc_ops;
426 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
427 return heap;
428}
429
430void ion_system_contig_heap_destroy(struct ion_heap *heap)
431{
432 kfree(heap);
433}
434