]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - mm/cma.c
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-hirsute-kernel.git] / mm / cma.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15 #define pr_fmt(fmt) "cma: " fmt
16
17 #ifdef CONFIG_CMA_DEBUG
18 #ifndef DEBUG
19 # define DEBUG
20 #endif
21 #endif
22 #define CREATE_TRACE_POINTS
23
24 #include <linux/memblock.h>
25 #include <linux/err.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/log2.h>
31 #include <linux/cma.h>
32 #include <linux/highmem.h>
33 #include <linux/io.h>
34 #include <linux/kmemleak.h>
35 #include <trace/events/cma.h>
36
37 #include "cma.h"
38
39 struct cma cma_areas[MAX_CMA_AREAS];
40 unsigned cma_area_count;
41 static DEFINE_MUTEX(cma_mutex);
42
43 phys_addr_t cma_get_base(const struct cma *cma)
44 {
45 return PFN_PHYS(cma->base_pfn);
46 }
47
48 unsigned long cma_get_size(const struct cma *cma)
49 {
50 return cma->count << PAGE_SHIFT;
51 }
52
53 const char *cma_get_name(const struct cma *cma)
54 {
55 return cma->name ? cma->name : "(undefined)";
56 }
57
58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 unsigned int align_order)
60 {
61 if (align_order <= cma->order_per_bit)
62 return 0;
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
64 }
65
66 /*
67 * Find the offset of the base PFN from the specified align_order.
68 * The value returned is represented in order_per_bits.
69 */
70 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 unsigned int align_order)
72 {
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
75 }
76
77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 unsigned long pages)
79 {
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81 }
82
83 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 unsigned int count)
85 {
86 unsigned long bitmap_no, bitmap_count;
87
88 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
89 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90
91 mutex_lock(&cma->lock);
92 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
93 mutex_unlock(&cma->lock);
94 }
95
96 static int __init cma_activate_area(struct cma *cma)
97 {
98 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
99 unsigned i = cma->count >> pageblock_order;
100 struct zone *zone;
101
102 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
103 if (!cma->bitmap) {
104 cma->count = 0;
105 return -ENOMEM;
106 }
107
108 WARN_ON_ONCE(!pfn_valid(pfn));
109 zone = page_zone(pfn_to_page(pfn));
110
111 do {
112 unsigned j;
113
114 base_pfn = pfn;
115 for (j = pageblock_nr_pages; j; --j, pfn++) {
116 WARN_ON_ONCE(!pfn_valid(pfn));
117 /*
118 * alloc_contig_range requires the pfn range
119 * specified to be in the same zone. Make this
120 * simple by forcing the entire CMA resv range
121 * to be in the same zone.
122 */
123 if (page_zone(pfn_to_page(pfn)) != zone)
124 goto not_in_zone;
125 }
126 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
127 } while (--i);
128
129 mutex_init(&cma->lock);
130
131 #ifdef CONFIG_CMA_DEBUGFS
132 INIT_HLIST_HEAD(&cma->mem_head);
133 spin_lock_init(&cma->mem_head_lock);
134 #endif
135
136 return 0;
137
138 not_in_zone:
139 pr_err("CMA area %s could not be activated\n", cma->name);
140 bitmap_free(cma->bitmap);
141 cma->count = 0;
142 return -EINVAL;
143 }
144
145 static int __init cma_init_reserved_areas(void)
146 {
147 int i;
148
149 for (i = 0; i < cma_area_count; i++) {
150 int ret = cma_activate_area(&cma_areas[i]);
151
152 if (ret)
153 return ret;
154 }
155
156 return 0;
157 }
158 core_initcall(cma_init_reserved_areas);
159
160 /**
161 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162 * @base: Base address of the reserved area
163 * @size: Size of the reserved area (in bytes),
164 * @order_per_bit: Order of pages represented by one bit on bitmap.
165 * @name: The name of the area. If this parameter is NULL, the name of
166 * the area will be set to "cmaN", where N is a running counter of
167 * used areas.
168 * @res_cma: Pointer to store the created cma region.
169 *
170 * This function creates custom contiguous area from already reserved memory.
171 */
172 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
173 unsigned int order_per_bit,
174 const char *name,
175 struct cma **res_cma)
176 {
177 struct cma *cma;
178 phys_addr_t alignment;
179
180 /* Sanity checks */
181 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
182 pr_err("Not enough slots for CMA reserved regions!\n");
183 return -ENOSPC;
184 }
185
186 if (!size || !memblock_is_region_reserved(base, size))
187 return -EINVAL;
188
189 /* ensure minimal alignment required by mm core */
190 alignment = PAGE_SIZE <<
191 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
192
193 /* alignment should be aligned with order_per_bit */
194 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
195 return -EINVAL;
196
197 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
198 return -EINVAL;
199
200 /*
201 * Each reserved area must be initialised later, when more kernel
202 * subsystems (like slab allocator) are available.
203 */
204 cma = &cma_areas[cma_area_count];
205 if (name) {
206 cma->name = name;
207 } else {
208 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
209 if (!cma->name)
210 return -ENOMEM;
211 }
212 cma->base_pfn = PFN_DOWN(base);
213 cma->count = size >> PAGE_SHIFT;
214 cma->order_per_bit = order_per_bit;
215 *res_cma = cma;
216 cma_area_count++;
217 totalcma_pages += (size / PAGE_SIZE);
218
219 return 0;
220 }
221
222 /**
223 * cma_declare_contiguous() - reserve custom contiguous area
224 * @base: Base address of the reserved area optional, use 0 for any
225 * @size: Size of the reserved area (in bytes),
226 * @limit: End address of the reserved memory (optional, 0 for any).
227 * @alignment: Alignment for the CMA area, should be power of 2 or zero
228 * @order_per_bit: Order of pages represented by one bit on bitmap.
229 * @fixed: hint about where to place the reserved area
230 * @name: The name of the area. See function cma_init_reserved_mem()
231 * @res_cma: Pointer to store the created cma region.
232 *
233 * This function reserves memory from early allocator. It should be
234 * called by arch specific code once the early allocator (memblock or bootmem)
235 * has been activated and all other subsystems have already allocated/reserved
236 * memory. This function allows to create custom reserved areas.
237 *
238 * If @fixed is true, reserve contiguous area at exactly @base. If false,
239 * reserve in range from @base to @limit.
240 */
241 int __init cma_declare_contiguous(phys_addr_t base,
242 phys_addr_t size, phys_addr_t limit,
243 phys_addr_t alignment, unsigned int order_per_bit,
244 bool fixed, const char *name, struct cma **res_cma)
245 {
246 phys_addr_t memblock_end = memblock_end_of_DRAM();
247 phys_addr_t highmem_start;
248 int ret = 0;
249
250 /*
251 * We can't use __pa(high_memory) directly, since high_memory
252 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
253 * complain. Find the boundary by adding one to the last valid
254 * address.
255 */
256 highmem_start = __pa(high_memory - 1) + 1;
257 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
258 __func__, &size, &base, &limit, &alignment);
259
260 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
261 pr_err("Not enough slots for CMA reserved regions!\n");
262 return -ENOSPC;
263 }
264
265 if (!size)
266 return -EINVAL;
267
268 if (alignment && !is_power_of_2(alignment))
269 return -EINVAL;
270
271 /*
272 * Sanitise input arguments.
273 * Pages both ends in CMA area could be merged into adjacent unmovable
274 * migratetype page by page allocator's buddy algorithm. In the case,
275 * you couldn't get a contiguous memory, which is not what we want.
276 */
277 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
278 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
279 if (fixed && base & (alignment - 1)) {
280 ret = -EINVAL;
281 pr_err("Region at %pa must be aligned to %pa bytes\n",
282 &base, &alignment);
283 goto err;
284 }
285 base = ALIGN(base, alignment);
286 size = ALIGN(size, alignment);
287 limit &= ~(alignment - 1);
288
289 if (!base)
290 fixed = false;
291
292 /* size should be aligned with order_per_bit */
293 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
294 return -EINVAL;
295
296 /*
297 * If allocating at a fixed base the request region must not cross the
298 * low/high memory boundary.
299 */
300 if (fixed && base < highmem_start && base + size > highmem_start) {
301 ret = -EINVAL;
302 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
303 &base, &highmem_start);
304 goto err;
305 }
306
307 /*
308 * If the limit is unspecified or above the memblock end, its effective
309 * value will be the memblock end. Set it explicitly to simplify further
310 * checks.
311 */
312 if (limit == 0 || limit > memblock_end)
313 limit = memblock_end;
314
315 if (base + size > limit) {
316 ret = -EINVAL;
317 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
318 &size, &base, &limit);
319 goto err;
320 }
321
322 /* Reserve memory */
323 if (fixed) {
324 if (memblock_is_region_reserved(base, size) ||
325 memblock_reserve(base, size) < 0) {
326 ret = -EBUSY;
327 goto err;
328 }
329 } else {
330 phys_addr_t addr = 0;
331
332 /*
333 * All pages in the reserved area must come from the same zone.
334 * If the requested region crosses the low/high memory boundary,
335 * try allocating from high memory first and fall back to low
336 * memory in case of failure.
337 */
338 if (base < highmem_start && limit > highmem_start) {
339 addr = memblock_phys_alloc_range(size, alignment,
340 highmem_start, limit);
341 limit = highmem_start;
342 }
343
344 if (!addr) {
345 addr = memblock_phys_alloc_range(size, alignment, base,
346 limit);
347 if (!addr) {
348 ret = -ENOMEM;
349 goto err;
350 }
351 }
352
353 /*
354 * kmemleak scans/reads tracked objects for pointers to other
355 * objects but this address isn't mapped and accessible
356 */
357 kmemleak_ignore_phys(addr);
358 base = addr;
359 }
360
361 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
362 if (ret)
363 goto free_mem;
364
365 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
366 &base);
367 return 0;
368
369 free_mem:
370 memblock_free(base, size);
371 err:
372 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
373 return ret;
374 }
375
376 #ifdef CONFIG_CMA_DEBUG
377 static void cma_debug_show_areas(struct cma *cma)
378 {
379 unsigned long next_zero_bit, next_set_bit, nr_zero;
380 unsigned long start = 0;
381 unsigned long nr_part, nr_total = 0;
382 unsigned long nbits = cma_bitmap_maxno(cma);
383
384 mutex_lock(&cma->lock);
385 pr_info("number of available pages: ");
386 for (;;) {
387 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
388 if (next_zero_bit >= nbits)
389 break;
390 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
391 nr_zero = next_set_bit - next_zero_bit;
392 nr_part = nr_zero << cma->order_per_bit;
393 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
394 next_zero_bit);
395 nr_total += nr_part;
396 start = next_zero_bit + nr_zero;
397 }
398 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
399 mutex_unlock(&cma->lock);
400 }
401 #else
402 static inline void cma_debug_show_areas(struct cma *cma) { }
403 #endif
404
405 /**
406 * cma_alloc() - allocate pages from contiguous area
407 * @cma: Contiguous memory region for which the allocation is performed.
408 * @count: Requested number of pages.
409 * @align: Requested alignment of pages (in PAGE_SIZE order).
410 * @no_warn: Avoid printing message about failed allocation
411 *
412 * This function allocates part of contiguous memory on specific
413 * contiguous memory area.
414 */
415 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
416 bool no_warn)
417 {
418 unsigned long mask, offset;
419 unsigned long pfn = -1;
420 unsigned long start = 0;
421 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
422 size_t i;
423 struct page *page = NULL;
424 int ret = -ENOMEM;
425
426 if (!cma || !cma->count)
427 return NULL;
428
429 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
430 count, align);
431
432 if (!count)
433 return NULL;
434
435 mask = cma_bitmap_aligned_mask(cma, align);
436 offset = cma_bitmap_aligned_offset(cma, align);
437 bitmap_maxno = cma_bitmap_maxno(cma);
438 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
439
440 if (bitmap_count > bitmap_maxno)
441 return NULL;
442
443 for (;;) {
444 mutex_lock(&cma->lock);
445 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
446 bitmap_maxno, start, bitmap_count, mask,
447 offset);
448 if (bitmap_no >= bitmap_maxno) {
449 mutex_unlock(&cma->lock);
450 break;
451 }
452 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
453 /*
454 * It's safe to drop the lock here. We've marked this region for
455 * our exclusive use. If the migration fails we will take the
456 * lock again and unmark it.
457 */
458 mutex_unlock(&cma->lock);
459
460 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
461 mutex_lock(&cma_mutex);
462 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
463 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
464 mutex_unlock(&cma_mutex);
465 if (ret == 0) {
466 page = pfn_to_page(pfn);
467 break;
468 }
469
470 cma_clear_bitmap(cma, pfn, count);
471 if (ret != -EBUSY)
472 break;
473
474 pr_debug("%s(): memory range at %p is busy, retrying\n",
475 __func__, pfn_to_page(pfn));
476 /* try again with a bit different memory target */
477 start = bitmap_no + mask + 1;
478 }
479
480 trace_cma_alloc(pfn, page, count, align);
481
482 /*
483 * CMA can allocate multiple page blocks, which results in different
484 * blocks being marked with different tags. Reset the tags to ignore
485 * those page blocks.
486 */
487 if (page) {
488 for (i = 0; i < count; i++)
489 page_kasan_tag_reset(page + i);
490 }
491
492 if (ret && !no_warn) {
493 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
494 __func__, count, ret);
495 cma_debug_show_areas(cma);
496 }
497
498 pr_debug("%s(): returned %p\n", __func__, page);
499 return page;
500 }
501
502 /**
503 * cma_release() - release allocated pages
504 * @cma: Contiguous memory region for which the allocation is performed.
505 * @pages: Allocated pages.
506 * @count: Number of allocated pages.
507 *
508 * This function releases memory allocated by cma_alloc().
509 * It returns false when provided pages do not belong to contiguous area and
510 * true otherwise.
511 */
512 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
513 {
514 unsigned long pfn;
515
516 if (!cma || !pages)
517 return false;
518
519 pr_debug("%s(page %p)\n", __func__, (void *)pages);
520
521 pfn = page_to_pfn(pages);
522
523 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
524 return false;
525
526 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
527
528 free_contig_range(pfn, count);
529 cma_clear_bitmap(cma, pfn, count);
530 trace_cma_release(pfn, pages, count);
531
532 return true;
533 }
534
535 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
536 {
537 int i;
538
539 for (i = 0; i < cma_area_count; i++) {
540 int ret = it(&cma_areas[i], data);
541
542 if (ret)
543 return ret;
544 }
545
546 return 0;
547 }