]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/android/ion/ion_chunk_heap.c
regulator: pv88060: fix error handling in probe
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / android / ion / ion_chunk_heap.c
1 /*
2 * drivers/staging/android/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/genalloc.h>
19 #include <linux/io.h>
20 #include <linux/mm.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include "ion.h"
25 #include "ion_priv.h"
26
27 struct ion_chunk_heap {
28 struct ion_heap heap;
29 struct gen_pool *pool;
30 ion_phys_addr_t base;
31 unsigned long chunk_size;
32 unsigned long size;
33 unsigned long allocated;
34 };
35
36 static int ion_chunk_heap_allocate(struct ion_heap *heap,
37 struct ion_buffer *buffer,
38 unsigned long size, unsigned long align,
39 unsigned long flags)
40 {
41 struct ion_chunk_heap *chunk_heap =
42 container_of(heap, struct ion_chunk_heap, heap);
43 struct sg_table *table;
44 struct scatterlist *sg;
45 int ret, i;
46 unsigned long num_chunks;
47 unsigned long allocated_size;
48
49 if (align > chunk_heap->chunk_size)
50 return -EINVAL;
51
52 allocated_size = ALIGN(size, chunk_heap->chunk_size);
53 num_chunks = allocated_size / chunk_heap->chunk_size;
54
55 if (allocated_size > chunk_heap->size - chunk_heap->allocated)
56 return -ENOMEM;
57
58 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
59 if (!table)
60 return -ENOMEM;
61 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
62 if (ret) {
63 kfree(table);
64 return ret;
65 }
66
67 sg = table->sgl;
68 for (i = 0; i < num_chunks; i++) {
69 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
70 chunk_heap->chunk_size);
71 if (!paddr)
72 goto err;
73 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
74 chunk_heap->chunk_size, 0);
75 sg = sg_next(sg);
76 }
77
78 buffer->priv_virt = table;
79 chunk_heap->allocated += allocated_size;
80 return 0;
81 err:
82 sg = table->sgl;
83 for (i -= 1; i >= 0; i--) {
84 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
85 sg->length);
86 sg = sg_next(sg);
87 }
88 sg_free_table(table);
89 kfree(table);
90 return -ENOMEM;
91 }
92
93 static void ion_chunk_heap_free(struct ion_buffer *buffer)
94 {
95 struct ion_heap *heap = buffer->heap;
96 struct ion_chunk_heap *chunk_heap =
97 container_of(heap, struct ion_chunk_heap, heap);
98 struct sg_table *table = buffer->priv_virt;
99 struct scatterlist *sg;
100 int i;
101 unsigned long allocated_size;
102
103 allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
104
105 ion_heap_buffer_zero(buffer);
106
107 if (ion_buffer_cached(buffer))
108 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
109 DMA_BIDIRECTIONAL);
110
111 for_each_sg(table->sgl, sg, table->nents, i) {
112 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
113 sg->length);
114 }
115 chunk_heap->allocated -= allocated_size;
116 sg_free_table(table);
117 kfree(table);
118 }
119
120 static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
121 struct ion_buffer *buffer)
122 {
123 return buffer->priv_virt;
124 }
125
126 static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
127 struct ion_buffer *buffer)
128 {
129 }
130
131 static struct ion_heap_ops chunk_heap_ops = {
132 .allocate = ion_chunk_heap_allocate,
133 .free = ion_chunk_heap_free,
134 .map_dma = ion_chunk_heap_map_dma,
135 .unmap_dma = ion_chunk_heap_unmap_dma,
136 .map_user = ion_heap_map_user,
137 .map_kernel = ion_heap_map_kernel,
138 .unmap_kernel = ion_heap_unmap_kernel,
139 };
140
141 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
142 {
143 struct ion_chunk_heap *chunk_heap;
144 int ret;
145 struct page *page;
146 size_t size;
147
148 page = pfn_to_page(PFN_DOWN(heap_data->base));
149 size = heap_data->size;
150
151 ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
152
153 ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
154 if (ret)
155 return ERR_PTR(ret);
156
157 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
158 if (!chunk_heap)
159 return ERR_PTR(-ENOMEM);
160
161 chunk_heap->chunk_size = (unsigned long)heap_data->priv;
162 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
163 PAGE_SHIFT, -1);
164 if (!chunk_heap->pool) {
165 ret = -ENOMEM;
166 goto error_gen_pool_create;
167 }
168 chunk_heap->base = heap_data->base;
169 chunk_heap->size = heap_data->size;
170 chunk_heap->allocated = 0;
171
172 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
173 chunk_heap->heap.ops = &chunk_heap_ops;
174 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
175 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
176 pr_debug("%s: base %lu size %zu align %ld\n", __func__,
177 chunk_heap->base, heap_data->size, heap_data->align);
178
179 return &chunk_heap->heap;
180
181 error_gen_pool_create:
182 kfree(chunk_heap);
183 return ERR_PTR(ret);
184 }
185
186 void ion_chunk_heap_destroy(struct ion_heap *heap)
187 {
188 struct ion_chunk_heap *chunk_heap =
189 container_of(heap, struct ion_chunk_heap, heap);
190
191 gen_pool_destroy(chunk_heap->pool);
192 kfree(chunk_heap);
193 chunk_heap = NULL;
194 }