]>
Commit | Line | Data |
---|---|---|
c64be2bb MS |
1 | /* |
2 | * Contiguous Memory Allocator for DMA mapping framework | |
3 | * Copyright (c) 2010-2011 by Samsung Electronics. | |
4 | * Written by: | |
5 | * Marek Szyprowski <m.szyprowski@samsung.com> | |
6 | * Michal Nazarewicz <mina86@mina86.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License as | |
10 | * published by the Free Software Foundation; either version 2 of the | |
11 | * License or (at your optional) any later version of the license. | |
12 | */ | |
13 | ||
14 | #define pr_fmt(fmt) "cma: " fmt | |
15 | ||
16 | #ifdef CONFIG_CMA_DEBUG | |
17 | #ifndef DEBUG | |
18 | # define DEBUG | |
19 | #endif | |
20 | #endif | |
21 | ||
22 | #include <asm/page.h> | |
23 | #include <asm/dma-contiguous.h> | |
24 | ||
25 | #include <linux/memblock.h> | |
26 | #include <linux/err.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/mutex.h> | |
29 | #include <linux/page-isolation.h> | |
446c82fc | 30 | #include <linux/sizes.h> |
c64be2bb MS |
31 | #include <linux/slab.h> |
32 | #include <linux/swap.h> | |
33 | #include <linux/mm_types.h> | |
34 | #include <linux/dma-contiguous.h> | |
35 | ||
c64be2bb MS |
36 | struct cma { |
37 | unsigned long base_pfn; | |
38 | unsigned long count; | |
39 | unsigned long *bitmap; | |
7ee793a6 | 40 | struct mutex lock; |
c64be2bb MS |
41 | }; |
42 | ||
43 | struct cma *dma_contiguous_default_area; | |
44 | ||
45 | #ifdef CONFIG_CMA_SIZE_MBYTES | |
46 | #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES | |
47 | #else | |
48 | #define CMA_SIZE_MBYTES 0 | |
49 | #endif | |
50 | ||
51 | /* | |
52 | * Default global CMA area size can be defined in kernel's .config. | |
73678804 | 53 | * This is useful mainly for distro maintainers to create a kernel |
c64be2bb MS |
54 | * that works correctly for most supported systems. |
55 | * The size can be set in bytes or as a percentage of the total memory | |
56 | * in the system. | |
57 | * | |
58 | * Users, who want to set the size of global CMA area for their system | |
59 | * should use cma= kernel parameter. | |
60 | */ | |
4009793e VA |
61 | static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; |
62 | static phys_addr_t size_cmdline = -1; | |
5ea3b1b2 AM |
63 | static phys_addr_t base_cmdline; |
64 | static phys_addr_t limit_cmdline; | |
c64be2bb MS |
65 | |
66 | static int __init early_cma(char *p) | |
67 | { | |
68 | pr_debug("%s(%s)\n", __func__, p); | |
69 | size_cmdline = memparse(p, &p); | |
5ea3b1b2 AM |
70 | if (*p != '@') |
71 | return 0; | |
72 | base_cmdline = memparse(p + 1, &p); | |
73 | if (*p != '-') { | |
74 | limit_cmdline = base_cmdline + size_cmdline; | |
75 | return 0; | |
76 | } | |
77 | limit_cmdline = memparse(p + 1, &p); | |
78 | ||
c64be2bb MS |
79 | return 0; |
80 | } | |
81 | early_param("cma", early_cma); | |
82 | ||
83 | #ifdef CONFIG_CMA_SIZE_PERCENTAGE | |
84 | ||
4009793e | 85 | static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) |
c64be2bb MS |
86 | { |
87 | struct memblock_region *reg; | |
88 | unsigned long total_pages = 0; | |
89 | ||
90 | /* | |
91 | * We cannot use memblock_phys_mem_size() here, because | |
92 | * memblock_analyze() has not been called yet. | |
93 | */ | |
94 | for_each_memblock(memory, reg) | |
95 | total_pages += memblock_region_memory_end_pfn(reg) - | |
96 | memblock_region_memory_base_pfn(reg); | |
97 | ||
98 | return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; | |
99 | } | |
100 | ||
101 | #else | |
102 | ||
4009793e | 103 | static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) |
c64be2bb MS |
104 | { |
105 | return 0; | |
106 | } | |
107 | ||
108 | #endif | |
109 | ||
110 | /** | |
a2547380 | 111 | * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling |
c64be2bb MS |
112 | * @limit: End address of the reserved memory (optional, 0 for any). |
113 | * | |
114 | * This function reserves memory from early allocator. It should be | |
115 | * called by arch specific code once the early allocator (memblock or bootmem) | |
116 | * has been activated and all other subsystems have already allocated/reserved | |
117 | * memory. | |
118 | */ | |
119 | void __init dma_contiguous_reserve(phys_addr_t limit) | |
120 | { | |
4009793e | 121 | phys_addr_t selected_size = 0; |
5ea3b1b2 AM |
122 | phys_addr_t selected_base = 0; |
123 | phys_addr_t selected_limit = limit; | |
124 | bool fixed = false; | |
c64be2bb MS |
125 | |
126 | pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); | |
127 | ||
128 | if (size_cmdline != -1) { | |
129 | selected_size = size_cmdline; | |
5ea3b1b2 AM |
130 | selected_base = base_cmdline; |
131 | selected_limit = min_not_zero(limit_cmdline, limit); | |
132 | if (base_cmdline + size_cmdline == limit_cmdline) | |
133 | fixed = true; | |
c64be2bb MS |
134 | } else { |
135 | #ifdef CONFIG_CMA_SIZE_SEL_MBYTES | |
136 | selected_size = size_bytes; | |
137 | #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) | |
138 | selected_size = cma_early_percent_memory(); | |
139 | #elif defined(CONFIG_CMA_SIZE_SEL_MIN) | |
140 | selected_size = min(size_bytes, cma_early_percent_memory()); | |
141 | #elif defined(CONFIG_CMA_SIZE_SEL_MAX) | |
142 | selected_size = max(size_bytes, cma_early_percent_memory()); | |
143 | #endif | |
144 | } | |
145 | ||
a2547380 | 146 | if (selected_size && !dma_contiguous_default_area) { |
c64be2bb | 147 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
4009793e | 148 | (unsigned long)selected_size / SZ_1M); |
c64be2bb | 149 | |
5ea3b1b2 AM |
150 | dma_contiguous_reserve_area(selected_size, selected_base, |
151 | selected_limit, | |
152 | &dma_contiguous_default_area, | |
153 | fixed); | |
c64be2bb | 154 | } |
5ea3b1b2 | 155 | } |
c64be2bb MS |
156 | |
157 | static DEFINE_MUTEX(cma_mutex); | |
158 | ||
a2547380 | 159 | static int __init cma_activate_area(struct cma *cma) |
c64be2bb | 160 | { |
a2547380 MS |
161 | int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); |
162 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; | |
163 | unsigned i = cma->count >> pageblock_order; | |
c64be2bb MS |
164 | struct zone *zone; |
165 | ||
a2547380 MS |
166 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
167 | ||
168 | if (!cma->bitmap) | |
169 | return -ENOMEM; | |
170 | ||
c64be2bb MS |
171 | WARN_ON_ONCE(!pfn_valid(pfn)); |
172 | zone = page_zone(pfn_to_page(pfn)); | |
173 | ||
174 | do { | |
175 | unsigned j; | |
176 | base_pfn = pfn; | |
177 | for (j = pageblock_nr_pages; j; --j, pfn++) { | |
178 | WARN_ON_ONCE(!pfn_valid(pfn)); | |
179 | if (page_zone(pfn_to_page(pfn)) != zone) | |
180 | return -EINVAL; | |
181 | } | |
182 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | |
183 | } while (--i); | |
c64be2bb | 184 | |
7ee793a6 | 185 | mutex_init(&cma->lock); |
a2547380 | 186 | return 0; |
c64be2bb MS |
187 | } |
188 | ||
a2547380 MS |
189 | static struct cma cma_areas[MAX_CMA_AREAS]; |
190 | static unsigned cma_area_count; | |
c64be2bb MS |
191 | |
192 | static int __init cma_init_reserved_areas(void) | |
193 | { | |
a2547380 | 194 | int i; |
c64be2bb | 195 | |
a2547380 MS |
196 | for (i = 0; i < cma_area_count; i++) { |
197 | int ret = cma_activate_area(&cma_areas[i]); | |
198 | if (ret) | |
199 | return ret; | |
c64be2bb | 200 | } |
a2547380 | 201 | |
c64be2bb MS |
202 | return 0; |
203 | } | |
204 | core_initcall(cma_init_reserved_areas); | |
205 | ||
206 | /** | |
a2547380 MS |
207 | * dma_contiguous_reserve_area() - reserve custom contiguous area |
208 | * @size: Size of the reserved area (in bytes), | |
209 | * @base: Base address of the reserved area optional, use 0 for any | |
c64be2bb | 210 | * @limit: End address of the reserved memory (optional, 0 for any). |
a2547380 | 211 | * @res_cma: Pointer to store the created cma region. |
5ea3b1b2 | 212 | * @fixed: hint about where to place the reserved area |
c64be2bb | 213 | * |
a2547380 MS |
214 | * This function reserves memory from early allocator. It should be |
215 | * called by arch specific code once the early allocator (memblock or bootmem) | |
216 | * has been activated and all other subsystems have already allocated/reserved | |
217 | * memory. This function allows to create custom reserved areas for specific | |
218 | * devices. | |
5ea3b1b2 AM |
219 | * |
220 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | |
221 | * reserve in range from @base to @limit. | |
c64be2bb | 222 | */ |
a2547380 | 223 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
5ea3b1b2 AM |
224 | phys_addr_t limit, struct cma **res_cma, |
225 | bool fixed) | |
c64be2bb | 226 | { |
a2547380 | 227 | struct cma *cma = &cma_areas[cma_area_count]; |
4009793e | 228 | phys_addr_t alignment; |
a2547380 | 229 | int ret = 0; |
c64be2bb MS |
230 | |
231 | pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, | |
232 | (unsigned long)size, (unsigned long)base, | |
233 | (unsigned long)limit); | |
234 | ||
235 | /* Sanity checks */ | |
a2547380 | 236 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
c64be2bb MS |
237 | pr_err("Not enough slots for CMA reserved regions!\n"); |
238 | return -ENOSPC; | |
239 | } | |
240 | ||
241 | if (!size) | |
242 | return -EINVAL; | |
243 | ||
244 | /* Sanitise input arguments */ | |
7ce9bf1f | 245 | alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); |
c64be2bb MS |
246 | base = ALIGN(base, alignment); |
247 | size = ALIGN(size, alignment); | |
248 | limit &= ~(alignment - 1); | |
249 | ||
250 | /* Reserve memory */ | |
5ea3b1b2 | 251 | if (base && fixed) { |
c64be2bb MS |
252 | if (memblock_is_region_reserved(base, size) || |
253 | memblock_reserve(base, size) < 0) { | |
a2547380 | 254 | ret = -EBUSY; |
c64be2bb MS |
255 | goto err; |
256 | } | |
257 | } else { | |
5ea3b1b2 AM |
258 | phys_addr_t addr = memblock_alloc_range(size, alignment, base, |
259 | limit); | |
c64be2bb | 260 | if (!addr) { |
a2547380 | 261 | ret = -ENOMEM; |
c64be2bb | 262 | goto err; |
c64be2bb MS |
263 | } else { |
264 | base = addr; | |
265 | } | |
266 | } | |
267 | ||
268 | /* | |
269 | * Each reserved area must be initialised later, when more kernel | |
270 | * subsystems (like slab allocator) are available. | |
271 | */ | |
a2547380 MS |
272 | cma->base_pfn = PFN_DOWN(base); |
273 | cma->count = size >> PAGE_SHIFT; | |
274 | *res_cma = cma; | |
275 | cma_area_count++; | |
276 | ||
4009793e | 277 | pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, |
c64be2bb MS |
278 | (unsigned long)base); |
279 | ||
280 | /* Architecture specific contiguous memory fixup. */ | |
281 | dma_contiguous_early_fixup(base, size); | |
282 | return 0; | |
283 | err: | |
4009793e | 284 | pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
a2547380 | 285 | return ret; |
c64be2bb MS |
286 | } |
287 | ||
7ee793a6 LA |
288 | static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) |
289 | { | |
290 | mutex_lock(&cma->lock); | |
291 | bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); | |
292 | mutex_unlock(&cma->lock); | |
293 | } | |
294 | ||
c64be2bb MS |
295 | /** |
296 | * dma_alloc_from_contiguous() - allocate pages from contiguous area | |
297 | * @dev: Pointer to device for which the allocation is performed. | |
298 | * @count: Requested number of pages. | |
299 | * @align: Requested alignment of pages (in PAGE_SIZE order). | |
300 | * | |
301 | * This function allocates memory buffer for specified device. It uses | |
302 | * device specific contiguous memory area if available or the default | |
bb56d0dc | 303 | * global one. Requires architecture specific dev_get_cma_area() helper |
c64be2bb MS |
304 | * function. |
305 | */ | |
306 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | |
307 | unsigned int align) | |
308 | { | |
309 | unsigned long mask, pfn, pageno, start = 0; | |
310 | struct cma *cma = dev_get_cma_area(dev); | |
bdd43cb3 | 311 | struct page *page = NULL; |
c64be2bb MS |
312 | int ret; |
313 | ||
314 | if (!cma || !cma->count) | |
315 | return NULL; | |
316 | ||
317 | if (align > CONFIG_CMA_ALIGNMENT) | |
318 | align = CONFIG_CMA_ALIGNMENT; | |
319 | ||
320 | pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, | |
321 | count, align); | |
322 | ||
323 | if (!count) | |
324 | return NULL; | |
325 | ||
326 | mask = (1 << align) - 1; | |
327 | ||
c64be2bb MS |
328 | |
329 | for (;;) { | |
7ee793a6 | 330 | mutex_lock(&cma->lock); |
c64be2bb MS |
331 | pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, |
332 | start, count, mask); | |
7ee793a6 | 333 | if (pageno >= cma->count) { |
f70e3c4f | 334 | mutex_unlock(&cma->lock); |
bdd43cb3 | 335 | break; |
7ee793a6 LA |
336 | } |
337 | bitmap_set(cma->bitmap, pageno, count); | |
338 | /* | |
339 | * It's safe to drop the lock here. We've marked this region for | |
340 | * our exclusive use. If the migration fails we will take the | |
341 | * lock again and unmark it. | |
342 | */ | |
343 | mutex_unlock(&cma->lock); | |
c64be2bb MS |
344 | |
345 | pfn = cma->base_pfn + pageno; | |
7ee793a6 | 346 | mutex_lock(&cma_mutex); |
c64be2bb | 347 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); |
7ee793a6 | 348 | mutex_unlock(&cma_mutex); |
c64be2bb | 349 | if (ret == 0) { |
bdd43cb3 | 350 | page = pfn_to_page(pfn); |
c64be2bb MS |
351 | break; |
352 | } else if (ret != -EBUSY) { | |
7ee793a6 | 353 | clear_cma_bitmap(cma, pfn, count); |
bdd43cb3 | 354 | break; |
c64be2bb | 355 | } |
7ee793a6 | 356 | clear_cma_bitmap(cma, pfn, count); |
c64be2bb MS |
357 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
358 | __func__, pfn_to_page(pfn)); | |
359 | /* try again with a bit different memory target */ | |
360 | start = pageno + mask + 1; | |
361 | } | |
362 | ||
bdd43cb3 MN |
363 | pr_debug("%s(): returned %p\n", __func__, page); |
364 | return page; | |
c64be2bb MS |
365 | } |
366 | ||
367 | /** | |
368 | * dma_release_from_contiguous() - release allocated pages | |
369 | * @dev: Pointer to device for which the pages were allocated. | |
370 | * @pages: Allocated pages. | |
371 | * @count: Number of allocated pages. | |
372 | * | |
373 | * This function releases memory allocated by dma_alloc_from_contiguous(). | |
374 | * It returns false when provided pages do not belong to contiguous area and | |
375 | * true otherwise. | |
376 | */ | |
377 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | |
378 | int count) | |
379 | { | |
380 | struct cma *cma = dev_get_cma_area(dev); | |
381 | unsigned long pfn; | |
382 | ||
383 | if (!cma || !pages) | |
384 | return false; | |
385 | ||
386 | pr_debug("%s(page %p)\n", __func__, (void *)pages); | |
387 | ||
388 | pfn = page_to_pfn(pages); | |
389 | ||
390 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) | |
391 | return false; | |
392 | ||
393 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | |
394 | ||
c64be2bb | 395 | free_contig_range(pfn, count); |
7ee793a6 | 396 | clear_cma_bitmap(cma, pfn, count); |
c64be2bb MS |
397 | |
398 | return true; | |
399 | } |