]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/android/ion/ion_heap.c
sched/headers: Prepare for new header dependencies before moving code to <uapi/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / staging / android / ion / ion_heap.c
1 /*
2 * drivers/staging/android/ion/ion_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
20 #include <linux/mm.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include "ion.h"
27 #include "ion_priv.h"
28
29 void *ion_heap_map_kernel(struct ion_heap *heap,
30 struct ion_buffer *buffer)
31 {
32 struct scatterlist *sg;
33 int i, j;
34 void *vaddr;
35 pgprot_t pgprot;
36 struct sg_table *table = buffer->sg_table;
37 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
38 struct page **pages = vmalloc(sizeof(struct page *) * npages);
39 struct page **tmp = pages;
40
41 if (!pages)
42 return NULL;
43
44 if (buffer->flags & ION_FLAG_CACHED)
45 pgprot = PAGE_KERNEL;
46 else
47 pgprot = pgprot_writecombine(PAGE_KERNEL);
48
49 for_each_sg(table->sgl, sg, table->nents, i) {
50 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
51 struct page *page = sg_page(sg);
52
53 BUG_ON(i >= npages);
54 for (j = 0; j < npages_this_entry; j++)
55 *(tmp++) = page++;
56 }
57 vaddr = vmap(pages, npages, VM_MAP, pgprot);
58 vfree(pages);
59
60 if (!vaddr)
61 return ERR_PTR(-ENOMEM);
62
63 return vaddr;
64 }
65
66 void ion_heap_unmap_kernel(struct ion_heap *heap,
67 struct ion_buffer *buffer)
68 {
69 vunmap(buffer->vaddr);
70 }
71
72 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
73 struct vm_area_struct *vma)
74 {
75 struct sg_table *table = buffer->sg_table;
76 unsigned long addr = vma->vm_start;
77 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
78 struct scatterlist *sg;
79 int i;
80 int ret;
81
82 for_each_sg(table->sgl, sg, table->nents, i) {
83 struct page *page = sg_page(sg);
84 unsigned long remainder = vma->vm_end - addr;
85 unsigned long len = sg->length;
86
87 if (offset >= sg->length) {
88 offset -= sg->length;
89 continue;
90 } else if (offset) {
91 page += offset / PAGE_SIZE;
92 len = sg->length - offset;
93 offset = 0;
94 }
95 len = min(len, remainder);
96 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
97 vma->vm_page_prot);
98 if (ret)
99 return ret;
100 addr += len;
101 if (addr >= vma->vm_end)
102 return 0;
103 }
104 return 0;
105 }
106
107 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
108 {
109 void *addr = vm_map_ram(pages, num, -1, pgprot);
110
111 if (!addr)
112 return -ENOMEM;
113 memset(addr, 0, PAGE_SIZE * num);
114 vm_unmap_ram(addr, num);
115
116 return 0;
117 }
118
119 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
120 pgprot_t pgprot)
121 {
122 int p = 0;
123 int ret = 0;
124 struct sg_page_iter piter;
125 struct page *pages[32];
126
127 for_each_sg_page(sgl, &piter, nents, 0) {
128 pages[p++] = sg_page_iter_page(&piter);
129 if (p == ARRAY_SIZE(pages)) {
130 ret = ion_heap_clear_pages(pages, p, pgprot);
131 if (ret)
132 return ret;
133 p = 0;
134 }
135 }
136 if (p)
137 ret = ion_heap_clear_pages(pages, p, pgprot);
138
139 return ret;
140 }
141
142 int ion_heap_buffer_zero(struct ion_buffer *buffer)
143 {
144 struct sg_table *table = buffer->sg_table;
145 pgprot_t pgprot;
146
147 if (buffer->flags & ION_FLAG_CACHED)
148 pgprot = PAGE_KERNEL;
149 else
150 pgprot = pgprot_writecombine(PAGE_KERNEL);
151
152 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
153 }
154
155 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
156 {
157 struct scatterlist sg;
158
159 sg_init_table(&sg, 1);
160 sg_set_page(&sg, page, size, 0);
161 return ion_heap_sglist_zero(&sg, 1, pgprot);
162 }
163
164 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
165 {
166 spin_lock(&heap->free_lock);
167 list_add(&buffer->list, &heap->free_list);
168 heap->free_list_size += buffer->size;
169 spin_unlock(&heap->free_lock);
170 wake_up(&heap->waitqueue);
171 }
172
173 size_t ion_heap_freelist_size(struct ion_heap *heap)
174 {
175 size_t size;
176
177 spin_lock(&heap->free_lock);
178 size = heap->free_list_size;
179 spin_unlock(&heap->free_lock);
180
181 return size;
182 }
183
184 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
185 bool skip_pools)
186 {
187 struct ion_buffer *buffer;
188 size_t total_drained = 0;
189
190 if (ion_heap_freelist_size(heap) == 0)
191 return 0;
192
193 spin_lock(&heap->free_lock);
194 if (size == 0)
195 size = heap->free_list_size;
196
197 while (!list_empty(&heap->free_list)) {
198 if (total_drained >= size)
199 break;
200 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
201 list);
202 list_del(&buffer->list);
203 heap->free_list_size -= buffer->size;
204 if (skip_pools)
205 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
206 total_drained += buffer->size;
207 spin_unlock(&heap->free_lock);
208 ion_buffer_destroy(buffer);
209 spin_lock(&heap->free_lock);
210 }
211 spin_unlock(&heap->free_lock);
212
213 return total_drained;
214 }
215
216 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
217 {
218 return _ion_heap_freelist_drain(heap, size, false);
219 }
220
221 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
222 {
223 return _ion_heap_freelist_drain(heap, size, true);
224 }
225
226 static int ion_heap_deferred_free(void *data)
227 {
228 struct ion_heap *heap = data;
229
230 while (true) {
231 struct ion_buffer *buffer;
232
233 wait_event_freezable(heap->waitqueue,
234 ion_heap_freelist_size(heap) > 0);
235
236 spin_lock(&heap->free_lock);
237 if (list_empty(&heap->free_list)) {
238 spin_unlock(&heap->free_lock);
239 continue;
240 }
241 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
242 list);
243 list_del(&buffer->list);
244 heap->free_list_size -= buffer->size;
245 spin_unlock(&heap->free_lock);
246 ion_buffer_destroy(buffer);
247 }
248
249 return 0;
250 }
251
252 int ion_heap_init_deferred_free(struct ion_heap *heap)
253 {
254 struct sched_param param = { .sched_priority = 0 };
255
256 INIT_LIST_HEAD(&heap->free_list);
257 init_waitqueue_head(&heap->waitqueue);
258 heap->task = kthread_run(ion_heap_deferred_free, heap,
259 "%s", heap->name);
260 if (IS_ERR(heap->task)) {
261 pr_err("%s: creating thread for deferred free failed\n",
262 __func__);
263 return PTR_ERR_OR_ZERO(heap->task);
264 }
265 sched_setscheduler(heap->task, SCHED_IDLE, &param);
266 return 0;
267 }
268
269 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
270 struct shrink_control *sc)
271 {
272 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
273 shrinker);
274 int total = 0;
275
276 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
277 if (heap->ops->shrink)
278 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
279 return total;
280 }
281
282 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
283 struct shrink_control *sc)
284 {
285 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
286 shrinker);
287 int freed = 0;
288 int to_scan = sc->nr_to_scan;
289
290 if (to_scan == 0)
291 return 0;
292
293 /*
294 * shrink the free list first, no point in zeroing the memory if we're
295 * just going to reclaim it. Also, skip any possible page pooling.
296 */
297 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
298 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
299 PAGE_SIZE;
300
301 to_scan -= freed;
302 if (to_scan <= 0)
303 return freed;
304
305 if (heap->ops->shrink)
306 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
307 return freed;
308 }
309
310 void ion_heap_init_shrinker(struct ion_heap *heap)
311 {
312 heap->shrinker.count_objects = ion_heap_shrink_count;
313 heap->shrinker.scan_objects = ion_heap_shrink_scan;
314 heap->shrinker.seeks = DEFAULT_SEEKS;
315 heap->shrinker.batch = 0;
316 register_shrinker(&heap->shrinker);
317 }
318
319 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
320 {
321 struct ion_heap *heap = NULL;
322
323 switch (heap_data->type) {
324 case ION_HEAP_TYPE_SYSTEM_CONTIG:
325 heap = ion_system_contig_heap_create(heap_data);
326 break;
327 case ION_HEAP_TYPE_SYSTEM:
328 heap = ion_system_heap_create(heap_data);
329 break;
330 case ION_HEAP_TYPE_CARVEOUT:
331 heap = ion_carveout_heap_create(heap_data);
332 break;
333 case ION_HEAP_TYPE_CHUNK:
334 heap = ion_chunk_heap_create(heap_data);
335 break;
336 case ION_HEAP_TYPE_DMA:
337 heap = ion_cma_heap_create(heap_data);
338 break;
339 default:
340 pr_err("%s: Invalid heap type %d\n", __func__,
341 heap_data->type);
342 return ERR_PTR(-EINVAL);
343 }
344
345 if (IS_ERR_OR_NULL(heap)) {
346 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
347 __func__, heap_data->name, heap_data->type,
348 heap_data->base, heap_data->size);
349 return ERR_PTR(-EINVAL);
350 }
351
352 heap->name = heap_data->name;
353 heap->id = heap_data->id;
354 return heap;
355 }
356 EXPORT_SYMBOL(ion_heap_create);
357
358 void ion_heap_destroy(struct ion_heap *heap)
359 {
360 if (!heap)
361 return;
362
363 switch (heap->type) {
364 case ION_HEAP_TYPE_SYSTEM_CONTIG:
365 ion_system_contig_heap_destroy(heap);
366 break;
367 case ION_HEAP_TYPE_SYSTEM:
368 ion_system_heap_destroy(heap);
369 break;
370 case ION_HEAP_TYPE_CARVEOUT:
371 ion_carveout_heap_destroy(heap);
372 break;
373 case ION_HEAP_TYPE_CHUNK:
374 ion_chunk_heap_destroy(heap);
375 break;
376 case ION_HEAP_TYPE_DMA:
377 ion_cma_heap_destroy(heap);
378 break;
379 default:
380 pr_err("%s: Invalid heap type %d\n", __func__,
381 heap->type);
382 }
383 }
384 EXPORT_SYMBOL(ion_heap_destroy);