]>
Commit | Line | Data |
---|---|---|
a254129e JK |
1 | /* |
2 | * Contiguous Memory Allocator | |
3 | * | |
4 | * Copyright (c) 2010-2011 by Samsung Electronics. | |
5 | * Copyright IBM Corporation, 2013 | |
6 | * Copyright LG Electronics Inc., 2014 | |
7 | * Written by: | |
8 | * Marek Szyprowski <m.szyprowski@samsung.com> | |
9 | * Michal Nazarewicz <mina86@mina86.com> | |
10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | |
11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License as | |
15 | * published by the Free Software Foundation; either version 2 of the | |
16 | * License or (at your optional) any later version of the license. | |
17 | */ | |
18 | ||
19 | #define pr_fmt(fmt) "cma: " fmt | |
20 | ||
21 | #ifdef CONFIG_CMA_DEBUG | |
22 | #ifndef DEBUG | |
23 | # define DEBUG | |
24 | #endif | |
25 | #endif | |
99e8ea6c | 26 | #define CREATE_TRACE_POINTS |
a254129e JK |
27 | |
28 | #include <linux/memblock.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/mm.h> | |
31 | #include <linux/mutex.h> | |
32 | #include <linux/sizes.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/log2.h> | |
35 | #include <linux/cma.h> | |
f7426b98 | 36 | #include <linux/highmem.h> |
620951e2 | 37 | #include <linux/io.h> |
99e8ea6c | 38 | #include <trace/events/cma.h> |
a254129e | 39 | |
28b24c1f SL |
40 | #include "cma.h" |
41 | ||
42 | struct cma cma_areas[MAX_CMA_AREAS]; | |
43 | unsigned cma_area_count; | |
a254129e JK |
44 | static DEFINE_MUTEX(cma_mutex); |
45 | ||
ac173824 | 46 | phys_addr_t cma_get_base(const struct cma *cma) |
a254129e JK |
47 | { |
48 | return PFN_PHYS(cma->base_pfn); | |
49 | } | |
76115202 | 50 | EXPORT_SYMBOL(cma_get_base); |
a254129e | 51 | |
ac173824 | 52 | unsigned long cma_get_size(const struct cma *cma) |
a254129e JK |
53 | { |
54 | return cma->count << PAGE_SHIFT; | |
55 | } | |
76115202 | 56 | EXPORT_SYMBOL(cma_get_size); |
a254129e | 57 | |
ac173824 SL |
58 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
59 | int align_order) | |
a254129e | 60 | { |
68faed63 WY |
61 | if (align_order <= cma->order_per_bit) |
62 | return 0; | |
63 | return (1UL << (align_order - cma->order_per_bit)) - 1; | |
a254129e JK |
64 | } |
65 | ||
850fc430 DP |
66 | /* |
67 | * Find a PFN aligned to the specified order and return an offset represented in | |
68 | * order_per_bits. | |
69 | */ | |
ac173824 SL |
70 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
71 | int align_order) | |
b5be83e3 | 72 | { |
b5be83e3 GF |
73 | if (align_order <= cma->order_per_bit) |
74 | return 0; | |
850fc430 DP |
75 | |
76 | return (ALIGN(cma->base_pfn, (1UL << align_order)) | |
77 | - cma->base_pfn) >> cma->order_per_bit; | |
b5be83e3 GF |
78 | } |
79 | ||
ac173824 SL |
80 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
81 | unsigned long pages) | |
a254129e JK |
82 | { |
83 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; | |
84 | } | |
85 | ||
ac173824 SL |
86 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
87 | unsigned int count) | |
a254129e JK |
88 | { |
89 | unsigned long bitmap_no, bitmap_count; | |
90 | ||
91 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; | |
92 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | |
93 | ||
94 | mutex_lock(&cma->lock); | |
95 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); | |
96 | mutex_unlock(&cma->lock); | |
97 | } | |
98 | ||
99 | static int __init cma_activate_area(struct cma *cma) | |
100 | { | |
101 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); | |
102 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; | |
103 | unsigned i = cma->count >> pageblock_order; | |
104 | struct zone *zone; | |
105 | ||
106 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | |
107 | ||
108 | if (!cma->bitmap) | |
109 | return -ENOMEM; | |
110 | ||
111 | WARN_ON_ONCE(!pfn_valid(pfn)); | |
112 | zone = page_zone(pfn_to_page(pfn)); | |
113 | ||
114 | do { | |
115 | unsigned j; | |
116 | ||
117 | base_pfn = pfn; | |
118 | for (j = pageblock_nr_pages; j; --j, pfn++) { | |
119 | WARN_ON_ONCE(!pfn_valid(pfn)); | |
120 | /* | |
121 | * alloc_contig_range requires the pfn range | |
122 | * specified to be in the same zone. Make this | |
123 | * simple by forcing the entire CMA resv range | |
124 | * to be in the same zone. | |
125 | */ | |
126 | if (page_zone(pfn_to_page(pfn)) != zone) | |
127 | goto err; | |
128 | } | |
129 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | |
130 | } while (--i); | |
131 | ||
132 | mutex_init(&cma->lock); | |
26b02a1f SL |
133 | |
134 | #ifdef CONFIG_CMA_DEBUGFS | |
135 | INIT_HLIST_HEAD(&cma->mem_head); | |
136 | spin_lock_init(&cma->mem_head_lock); | |
137 | #endif | |
138 | ||
a254129e JK |
139 | return 0; |
140 | ||
141 | err: | |
142 | kfree(cma->bitmap); | |
f022d8cb | 143 | cma->count = 0; |
a254129e JK |
144 | return -EINVAL; |
145 | } | |
146 | ||
147 | static int __init cma_init_reserved_areas(void) | |
148 | { | |
149 | int i; | |
150 | ||
151 | for (i = 0; i < cma_area_count; i++) { | |
152 | int ret = cma_activate_area(&cma_areas[i]); | |
153 | ||
154 | if (ret) | |
155 | return ret; | |
156 | } | |
157 | ||
158 | return 0; | |
159 | } | |
160 | core_initcall(cma_init_reserved_areas); | |
161 | ||
de9e14ee MS |
162 | /** |
163 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory | |
164 | * @base: Base address of the reserved area | |
165 | * @size: Size of the reserved area (in bytes), | |
166 | * @order_per_bit: Order of pages represented by one bit on bitmap. | |
167 | * @res_cma: Pointer to store the created cma region. | |
168 | * | |
169 | * This function creates custom contiguous area from already reserved memory. | |
170 | */ | |
171 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, | |
ac173824 SL |
172 | unsigned int order_per_bit, |
173 | struct cma **res_cma) | |
de9e14ee MS |
174 | { |
175 | struct cma *cma; | |
176 | phys_addr_t alignment; | |
177 | ||
178 | /* Sanity checks */ | |
179 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | |
180 | pr_err("Not enough slots for CMA reserved regions!\n"); | |
181 | return -ENOSPC; | |
182 | } | |
183 | ||
184 | if (!size || !memblock_is_region_reserved(base, size)) | |
185 | return -EINVAL; | |
186 | ||
0f96ae29 | 187 | /* ensure minimal alignment required by mm core */ |
badbda53 SR |
188 | alignment = PAGE_SIZE << |
189 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); | |
de9e14ee MS |
190 | |
191 | /* alignment should be aligned with order_per_bit */ | |
192 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) | |
193 | return -EINVAL; | |
194 | ||
195 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) | |
196 | return -EINVAL; | |
197 | ||
198 | /* | |
199 | * Each reserved area must be initialised later, when more kernel | |
200 | * subsystems (like slab allocator) are available. | |
201 | */ | |
202 | cma = &cma_areas[cma_area_count]; | |
203 | cma->base_pfn = PFN_DOWN(base); | |
204 | cma->count = size >> PAGE_SHIFT; | |
205 | cma->order_per_bit = order_per_bit; | |
206 | *res_cma = cma; | |
207 | cma_area_count++; | |
94737a85 | 208 | totalcma_pages += (size / PAGE_SIZE); |
de9e14ee MS |
209 | |
210 | return 0; | |
211 | } | |
212 | ||
a254129e JK |
213 | /** |
214 | * cma_declare_contiguous() - reserve custom contiguous area | |
a254129e | 215 | * @base: Base address of the reserved area optional, use 0 for any |
c1f733aa | 216 | * @size: Size of the reserved area (in bytes), |
a254129e JK |
217 | * @limit: End address of the reserved memory (optional, 0 for any). |
218 | * @alignment: Alignment for the CMA area, should be power of 2 or zero | |
219 | * @order_per_bit: Order of pages represented by one bit on bitmap. | |
a254129e | 220 | * @fixed: hint about where to place the reserved area |
c1f733aa | 221 | * @res_cma: Pointer to store the created cma region. |
a254129e JK |
222 | * |
223 | * This function reserves memory from early allocator. It should be | |
224 | * called by arch specific code once the early allocator (memblock or bootmem) | |
225 | * has been activated and all other subsystems have already allocated/reserved | |
226 | * memory. This function allows to create custom reserved areas. | |
227 | * | |
228 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | |
229 | * reserve in range from @base to @limit. | |
230 | */ | |
c1f733aa JK |
231 | int __init cma_declare_contiguous(phys_addr_t base, |
232 | phys_addr_t size, phys_addr_t limit, | |
a254129e | 233 | phys_addr_t alignment, unsigned int order_per_bit, |
c1f733aa | 234 | bool fixed, struct cma **res_cma) |
a254129e | 235 | { |
f7426b98 | 236 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
6b101e2a | 237 | phys_addr_t highmem_start; |
a254129e JK |
238 | int ret = 0; |
239 | ||
6b101e2a JK |
240 | #ifdef CONFIG_X86 |
241 | /* | |
242 | * high_memory isn't direct mapped memory so retrieving its physical | |
243 | * address isn't appropriate. But it would be useful to check the | |
0f96ae29 | 244 | * physical address of the highmem boundary so it's justifiable to get |
6b101e2a JK |
245 | * the physical address from it. On x86 there is a validation check for |
246 | * this case, so the following workaround is needed to avoid it. | |
247 | */ | |
248 | highmem_start = __pa_nodebug(high_memory); | |
249 | #else | |
250 | highmem_start = __pa(high_memory); | |
251 | #endif | |
56fa4f60 LP |
252 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", |
253 | __func__, &size, &base, &limit, &alignment); | |
a254129e JK |
254 | |
255 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { | |
256 | pr_err("Not enough slots for CMA reserved regions!\n"); | |
257 | return -ENOSPC; | |
258 | } | |
259 | ||
260 | if (!size) | |
261 | return -EINVAL; | |
262 | ||
263 | if (alignment && !is_power_of_2(alignment)) | |
264 | return -EINVAL; | |
265 | ||
266 | /* | |
267 | * Sanitise input arguments. | |
268 | * Pages both ends in CMA area could be merged into adjacent unmovable | |
269 | * migratetype page by page allocator's buddy algorithm. In the case, | |
270 | * you couldn't get a contiguous memory, which is not what we want. | |
271 | */ | |
badbda53 SR |
272 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << |
273 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); | |
a254129e JK |
274 | base = ALIGN(base, alignment); |
275 | size = ALIGN(size, alignment); | |
276 | limit &= ~(alignment - 1); | |
277 | ||
800a85d3 LP |
278 | if (!base) |
279 | fixed = false; | |
280 | ||
a254129e JK |
281 | /* size should be aligned with order_per_bit */ |
282 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) | |
283 | return -EINVAL; | |
284 | ||
f7426b98 | 285 | /* |
16195ddd LP |
286 | * If allocating at a fixed base the request region must not cross the |
287 | * low/high memory boundary. | |
f7426b98 | 288 | */ |
16195ddd | 289 | if (fixed && base < highmem_start && base + size > highmem_start) { |
f7426b98 | 290 | ret = -EINVAL; |
56fa4f60 LP |
291 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", |
292 | &base, &highmem_start); | |
f7426b98 MS |
293 | goto err; |
294 | } | |
295 | ||
16195ddd LP |
296 | /* |
297 | * If the limit is unspecified or above the memblock end, its effective | |
298 | * value will be the memblock end. Set it explicitly to simplify further | |
299 | * checks. | |
300 | */ | |
301 | if (limit == 0 || limit > memblock_end) | |
302 | limit = memblock_end; | |
303 | ||
a254129e | 304 | /* Reserve memory */ |
800a85d3 | 305 | if (fixed) { |
a254129e JK |
306 | if (memblock_is_region_reserved(base, size) || |
307 | memblock_reserve(base, size) < 0) { | |
308 | ret = -EBUSY; | |
309 | goto err; | |
310 | } | |
311 | } else { | |
16195ddd LP |
312 | phys_addr_t addr = 0; |
313 | ||
314 | /* | |
315 | * All pages in the reserved area must come from the same zone. | |
316 | * If the requested region crosses the low/high memory boundary, | |
317 | * try allocating from high memory first and fall back to low | |
318 | * memory in case of failure. | |
319 | */ | |
320 | if (base < highmem_start && limit > highmem_start) { | |
321 | addr = memblock_alloc_range(size, alignment, | |
fc6daaf9 TL |
322 | highmem_start, limit, |
323 | MEMBLOCK_NONE); | |
16195ddd LP |
324 | limit = highmem_start; |
325 | } | |
326 | ||
a254129e | 327 | if (!addr) { |
16195ddd | 328 | addr = memblock_alloc_range(size, alignment, base, |
fc6daaf9 TL |
329 | limit, |
330 | MEMBLOCK_NONE); | |
16195ddd LP |
331 | if (!addr) { |
332 | ret = -ENOMEM; | |
333 | goto err; | |
334 | } | |
a254129e | 335 | } |
16195ddd | 336 | |
620951e2 TR |
337 | /* |
338 | * kmemleak scans/reads tracked objects for pointers to other | |
339 | * objects but this address isn't mapped and accessible | |
340 | */ | |
9099daed | 341 | kmemleak_ignore_phys(addr); |
16195ddd | 342 | base = addr; |
a254129e JK |
343 | } |
344 | ||
de9e14ee MS |
345 | ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); |
346 | if (ret) | |
347 | goto err; | |
a254129e | 348 | |
56fa4f60 LP |
349 | pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, |
350 | &base); | |
a254129e JK |
351 | return 0; |
352 | ||
353 | err: | |
0de9d2eb | 354 | pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); |
a254129e JK |
355 | return ret; |
356 | } | |
357 | ||
358 | /** | |
359 | * cma_alloc() - allocate pages from contiguous area | |
360 | * @cma: Contiguous memory region for which the allocation is performed. | |
361 | * @count: Requested number of pages. | |
362 | * @align: Requested alignment of pages (in PAGE_SIZE order). | |
363 | * | |
364 | * This function allocates part of contiguous memory on specific | |
365 | * contiguous memory area. | |
366 | */ | |
67a2e213 | 367 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) |
a254129e | 368 | { |
3acaea68 AM |
369 | unsigned long mask, offset; |
370 | unsigned long pfn = -1; | |
371 | unsigned long start = 0; | |
a254129e JK |
372 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
373 | struct page *page = NULL; | |
374 | int ret; | |
375 | ||
376 | if (!cma || !cma->count) | |
377 | return NULL; | |
378 | ||
67a2e213 | 379 | pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, |
a254129e JK |
380 | count, align); |
381 | ||
382 | if (!count) | |
383 | return NULL; | |
384 | ||
385 | mask = cma_bitmap_aligned_mask(cma, align); | |
b5be83e3 | 386 | offset = cma_bitmap_aligned_offset(cma, align); |
a254129e JK |
387 | bitmap_maxno = cma_bitmap_maxno(cma); |
388 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); | |
389 | ||
6b36ba59 SH |
390 | if (bitmap_count > bitmap_maxno) |
391 | return NULL; | |
392 | ||
a254129e JK |
393 | for (;;) { |
394 | mutex_lock(&cma->lock); | |
b5be83e3 GF |
395 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
396 | bitmap_maxno, start, bitmap_count, mask, | |
397 | offset); | |
a254129e JK |
398 | if (bitmap_no >= bitmap_maxno) { |
399 | mutex_unlock(&cma->lock); | |
400 | break; | |
401 | } | |
402 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); | |
403 | /* | |
404 | * It's safe to drop the lock here. We've marked this region for | |
405 | * our exclusive use. If the migration fails we will take the | |
406 | * lock again and unmark it. | |
407 | */ | |
408 | mutex_unlock(&cma->lock); | |
409 | ||
410 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); | |
411 | mutex_lock(&cma_mutex); | |
412 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); | |
413 | mutex_unlock(&cma_mutex); | |
414 | if (ret == 0) { | |
415 | page = pfn_to_page(pfn); | |
416 | break; | |
a254129e | 417 | } |
b7155e76 | 418 | |
a254129e | 419 | cma_clear_bitmap(cma, pfn, count); |
b7155e76 JK |
420 | if (ret != -EBUSY) |
421 | break; | |
422 | ||
a254129e JK |
423 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
424 | __func__, pfn_to_page(pfn)); | |
425 | /* try again with a bit different memory target */ | |
426 | start = bitmap_no + mask + 1; | |
427 | } | |
428 | ||
3acaea68 | 429 | trace_cma_alloc(pfn, page, count, align); |
99e8ea6c | 430 | |
a254129e JK |
431 | pr_debug("%s(): returned %p\n", __func__, page); |
432 | return page; | |
433 | } | |
434 | ||
435 | /** | |
436 | * cma_release() - release allocated pages | |
437 | * @cma: Contiguous memory region for which the allocation is performed. | |
438 | * @pages: Allocated pages. | |
439 | * @count: Number of allocated pages. | |
440 | * | |
441 | * This function releases memory allocated by alloc_cma(). | |
442 | * It returns false when provided pages do not belong to contiguous area and | |
443 | * true otherwise. | |
444 | */ | |
ac173824 | 445 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
a254129e JK |
446 | { |
447 | unsigned long pfn; | |
448 | ||
449 | if (!cma || !pages) | |
450 | return false; | |
451 | ||
452 | pr_debug("%s(page %p)\n", __func__, (void *)pages); | |
453 | ||
454 | pfn = page_to_pfn(pages); | |
455 | ||
456 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) | |
457 | return false; | |
458 | ||
459 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | |
460 | ||
461 | free_contig_range(pfn, count); | |
462 | cma_clear_bitmap(cma, pfn, count); | |
99e8ea6c | 463 | trace_cma_release(pfn, pages, count); |
a254129e JK |
464 | |
465 | return true; | |
466 | } |