2 * drivers/staging/android/ion/ion_chunk_heap.c
4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/genalloc.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
27 struct ion_chunk_heap
{
29 struct gen_pool
*pool
;
31 unsigned long chunk_size
;
33 unsigned long allocated
;
36 static int ion_chunk_heap_allocate(struct ion_heap
*heap
,
37 struct ion_buffer
*buffer
,
38 unsigned long size
, unsigned long align
,
41 struct ion_chunk_heap
*chunk_heap
=
42 container_of(heap
, struct ion_chunk_heap
, heap
);
43 struct sg_table
*table
;
44 struct scatterlist
*sg
;
46 unsigned long num_chunks
;
47 unsigned long allocated_size
;
49 if (align
> chunk_heap
->chunk_size
)
52 allocated_size
= ALIGN(size
, chunk_heap
->chunk_size
);
53 num_chunks
= allocated_size
/ chunk_heap
->chunk_size
;
55 if (allocated_size
> chunk_heap
->size
- chunk_heap
->allocated
)
58 table
= kmalloc(sizeof(struct sg_table
), GFP_KERNEL
);
61 ret
= sg_alloc_table(table
, num_chunks
, GFP_KERNEL
);
68 for (i
= 0; i
< num_chunks
; i
++) {
69 unsigned long paddr
= gen_pool_alloc(chunk_heap
->pool
,
70 chunk_heap
->chunk_size
);
73 sg_set_page(sg
, pfn_to_page(PFN_DOWN(paddr
)),
74 chunk_heap
->chunk_size
, 0);
78 buffer
->priv_virt
= table
;
79 chunk_heap
->allocated
+= allocated_size
;
83 for (i
-= 1; i
>= 0; i
--) {
84 gen_pool_free(chunk_heap
->pool
, sg_phys(sg
) & PAGE_MASK
,
93 static void ion_chunk_heap_free(struct ion_buffer
*buffer
)
95 struct ion_heap
*heap
= buffer
->heap
;
96 struct ion_chunk_heap
*chunk_heap
=
97 container_of(heap
, struct ion_chunk_heap
, heap
);
98 struct sg_table
*table
= buffer
->priv_virt
;
99 struct scatterlist
*sg
;
101 unsigned long allocated_size
;
103 allocated_size
= ALIGN(buffer
->size
, chunk_heap
->chunk_size
);
105 ion_heap_buffer_zero(buffer
);
107 if (ion_buffer_cached(buffer
))
108 dma_sync_sg_for_device(NULL
, table
->sgl
, table
->nents
,
111 for_each_sg(table
->sgl
, sg
, table
->nents
, i
) {
112 gen_pool_free(chunk_heap
->pool
, sg_phys(sg
) & PAGE_MASK
,
115 chunk_heap
->allocated
-= allocated_size
;
116 sg_free_table(table
);
120 static struct sg_table
*ion_chunk_heap_map_dma(struct ion_heap
*heap
,
121 struct ion_buffer
*buffer
)
123 return buffer
->priv_virt
;
126 static void ion_chunk_heap_unmap_dma(struct ion_heap
*heap
,
127 struct ion_buffer
*buffer
)
131 static struct ion_heap_ops chunk_heap_ops
= {
132 .allocate
= ion_chunk_heap_allocate
,
133 .free
= ion_chunk_heap_free
,
134 .map_dma
= ion_chunk_heap_map_dma
,
135 .unmap_dma
= ion_chunk_heap_unmap_dma
,
136 .map_user
= ion_heap_map_user
,
137 .map_kernel
= ion_heap_map_kernel
,
138 .unmap_kernel
= ion_heap_unmap_kernel
,
141 struct ion_heap
*ion_chunk_heap_create(struct ion_platform_heap
*heap_data
)
143 struct ion_chunk_heap
*chunk_heap
;
148 page
= pfn_to_page(PFN_DOWN(heap_data
->base
));
149 size
= heap_data
->size
;
151 ion_pages_sync_for_device(NULL
, page
, size
, DMA_BIDIRECTIONAL
);
153 ret
= ion_heap_pages_zero(page
, size
, pgprot_writecombine(PAGE_KERNEL
));
157 chunk_heap
= kzalloc(sizeof(struct ion_chunk_heap
), GFP_KERNEL
);
159 return ERR_PTR(-ENOMEM
);
161 chunk_heap
->chunk_size
= (unsigned long)heap_data
->priv
;
162 chunk_heap
->pool
= gen_pool_create(get_order(chunk_heap
->chunk_size
) +
164 if (!chunk_heap
->pool
) {
166 goto error_gen_pool_create
;
168 chunk_heap
->base
= heap_data
->base
;
169 chunk_heap
->size
= heap_data
->size
;
170 chunk_heap
->allocated
= 0;
172 gen_pool_add(chunk_heap
->pool
, chunk_heap
->base
, heap_data
->size
, -1);
173 chunk_heap
->heap
.ops
= &chunk_heap_ops
;
174 chunk_heap
->heap
.type
= ION_HEAP_TYPE_CHUNK
;
175 chunk_heap
->heap
.flags
= ION_HEAP_FLAG_DEFER_FREE
;
176 pr_debug("%s: base %lu size %zu align %ld\n", __func__
,
177 chunk_heap
->base
, heap_data
->size
, heap_data
->align
);
179 return &chunk_heap
->heap
;
181 error_gen_pool_create
:
186 void ion_chunk_heap_destroy(struct ion_heap
*heap
)
188 struct ion_chunk_heap
*chunk_heap
=
189 container_of(heap
, struct ion_chunk_heap
, heap
);
191 gen_pool_destroy(chunk_heap
->pool
);