]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/cma.c
bpf: Wrap aux data inside bpf_sanitize_info container
[mirror_ubuntu-hirsute-kernel.git] / mm / cma.c
CommitLineData
8607a965 1// SPDX-License-Identifier: GPL-2.0-or-later
a254129e
JK
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
a254129e
JK
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#ifdef CONFIG_CMA_DEBUG
18#ifndef DEBUG
19# define DEBUG
20#endif
21#endif
99e8ea6c 22#define CREATE_TRACE_POINTS
a254129e
JK
23
24#include <linux/memblock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/mutex.h>
28#include <linux/sizes.h>
29#include <linux/slab.h>
30#include <linux/log2.h>
31#include <linux/cma.h>
f7426b98 32#include <linux/highmem.h>
620951e2 33#include <linux/io.h>
514c6032 34#include <linux/kmemleak.h>
99e8ea6c 35#include <trace/events/cma.h>
a254129e 36
28b24c1f
SL
37#include "cma.h"
38
39struct cma cma_areas[MAX_CMA_AREAS];
40unsigned cma_area_count;
a254129e 41
ac173824 42phys_addr_t cma_get_base(const struct cma *cma)
a254129e
JK
43{
44 return PFN_PHYS(cma->base_pfn);
45}
46
ac173824 47unsigned long cma_get_size(const struct cma *cma)
a254129e
JK
48{
49 return cma->count << PAGE_SHIFT;
50}
51
f318dd08
LA
52const char *cma_get_name(const struct cma *cma)
53{
18e98e56 54 return cma->name;
f318dd08
LA
55}
56
ac173824 57static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
e048cb32 58 unsigned int align_order)
a254129e 59{
68faed63
WY
60 if (align_order <= cma->order_per_bit)
61 return 0;
62 return (1UL << (align_order - cma->order_per_bit)) - 1;
a254129e
JK
63}
64
850fc430 65/*
e048cb32
DB
66 * Find the offset of the base PFN from the specified align_order.
67 * The value returned is represented in order_per_bits.
850fc430 68 */
ac173824 69static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
e048cb32 70 unsigned int align_order)
b5be83e3 71{
e048cb32
DB
72 return (cma->base_pfn & ((1UL << align_order) - 1))
73 >> cma->order_per_bit;
b5be83e3
GF
74}
75
ac173824
SL
76static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
77 unsigned long pages)
a254129e
JK
78{
79 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
80}
81
ac173824
SL
82static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
83 unsigned int count)
a254129e
JK
84{
85 unsigned long bitmap_no, bitmap_count;
86
87 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
88 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
89
90 mutex_lock(&cma->lock);
91 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
92 mutex_unlock(&cma->lock);
93}
94
3a5139f1 95static void __init cma_activate_area(struct cma *cma)
a254129e 96{
a254129e
JK
97 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
98 unsigned i = cma->count >> pageblock_order;
99 struct zone *zone;
100
2184f992 101 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
3a5139f1
MK
102 if (!cma->bitmap)
103 goto out_error;
a254129e 104
d883c6cf
JK
105 WARN_ON_ONCE(!pfn_valid(pfn));
106 zone = page_zone(pfn_to_page(pfn));
107
a254129e
JK
108 do {
109 unsigned j;
110
111 base_pfn = pfn;
112 for (j = pageblock_nr_pages; j; --j, pfn++) {
d883c6cf 113 WARN_ON_ONCE(!pfn_valid(pfn));
a254129e 114 /*
d883c6cf
JK
115 * alloc_contig_range requires the pfn range
116 * specified to be in the same zone. Make this
117 * simple by forcing the entire CMA resv range
118 * to be in the same zone.
a254129e
JK
119 */
120 if (page_zone(pfn_to_page(pfn)) != zone)
d883c6cf 121 goto not_in_zone;
a254129e
JK
122 }
123 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
124 } while (--i);
125
126 mutex_init(&cma->lock);
26b02a1f
SL
127
128#ifdef CONFIG_CMA_DEBUGFS
129 INIT_HLIST_HEAD(&cma->mem_head);
130 spin_lock_init(&cma->mem_head_lock);
131#endif
132
3a5139f1 133 return;
a254129e 134
d883c6cf 135not_in_zone:
2184f992 136 bitmap_free(cma->bitmap);
3a5139f1 137out_error:
f022d8cb 138 cma->count = 0;
3a5139f1
MK
139 pr_err("CMA area %s could not be activated\n", cma->name);
140 return;
a254129e
JK
141}
142
143static int __init cma_init_reserved_areas(void)
144{
145 int i;
146
3a5139f1
MK
147 for (i = 0; i < cma_area_count; i++)
148 cma_activate_area(&cma_areas[i]);
a254129e
JK
149
150 return 0;
151}
d883c6cf 152core_initcall(cma_init_reserved_areas);
a254129e 153
de9e14ee
MS
154/**
155 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
156 * @base: Base address of the reserved area
157 * @size: Size of the reserved area (in bytes),
158 * @order_per_bit: Order of pages represented by one bit on bitmap.
e8b098fc
MR
159 * @name: The name of the area. If this parameter is NULL, the name of
160 * the area will be set to "cmaN", where N is a running counter of
161 * used areas.
de9e14ee
MS
162 * @res_cma: Pointer to store the created cma region.
163 *
164 * This function creates custom contiguous area from already reserved memory.
165 */
166int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
ac173824 167 unsigned int order_per_bit,
f318dd08 168 const char *name,
ac173824 169 struct cma **res_cma)
de9e14ee
MS
170{
171 struct cma *cma;
172 phys_addr_t alignment;
173
174 /* Sanity checks */
175 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
176 pr_err("Not enough slots for CMA reserved regions!\n");
177 return -ENOSPC;
178 }
179
180 if (!size || !memblock_is_region_reserved(base, size))
181 return -EINVAL;
182
0f96ae29 183 /* ensure minimal alignment required by mm core */
badbda53
SR
184 alignment = PAGE_SIZE <<
185 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
de9e14ee
MS
186
187 /* alignment should be aligned with order_per_bit */
188 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
189 return -EINVAL;
190
191 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
192 return -EINVAL;
193
194 /*
195 * Each reserved area must be initialised later, when more kernel
196 * subsystems (like slab allocator) are available.
197 */
198 cma = &cma_areas[cma_area_count];
18e98e56
BS
199
200 if (name)
201 snprintf(cma->name, CMA_MAX_NAME, name);
202 else
203 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
204
de9e14ee
MS
205 cma->base_pfn = PFN_DOWN(base);
206 cma->count = size >> PAGE_SHIFT;
207 cma->order_per_bit = order_per_bit;
208 *res_cma = cma;
209 cma_area_count++;
94737a85 210 totalcma_pages += (size / PAGE_SIZE);
de9e14ee
MS
211
212 return 0;
213}
214
a254129e 215/**
8676af1f 216 * cma_declare_contiguous_nid() - reserve custom contiguous area
a254129e 217 * @base: Base address of the reserved area optional, use 0 for any
c1f733aa 218 * @size: Size of the reserved area (in bytes),
a254129e
JK
219 * @limit: End address of the reserved memory (optional, 0 for any).
220 * @alignment: Alignment for the CMA area, should be power of 2 or zero
221 * @order_per_bit: Order of pages represented by one bit on bitmap.
a254129e 222 * @fixed: hint about where to place the reserved area
e8b098fc 223 * @name: The name of the area. See function cma_init_reserved_mem()
c1f733aa 224 * @res_cma: Pointer to store the created cma region.
8676af1f 225 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
a254129e
JK
226 *
227 * This function reserves memory from early allocator. It should be
228 * called by arch specific code once the early allocator (memblock or bootmem)
229 * has been activated and all other subsystems have already allocated/reserved
230 * memory. This function allows to create custom reserved areas.
231 *
232 * If @fixed is true, reserve contiguous area at exactly @base. If false,
233 * reserve in range from @base to @limit.
234 */
8676af1f 235int __init cma_declare_contiguous_nid(phys_addr_t base,
c1f733aa 236 phys_addr_t size, phys_addr_t limit,
a254129e 237 phys_addr_t alignment, unsigned int order_per_bit,
8676af1f
AB
238 bool fixed, const char *name, struct cma **res_cma,
239 int nid)
a254129e 240{
f7426b98 241 phys_addr_t memblock_end = memblock_end_of_DRAM();
6b101e2a 242 phys_addr_t highmem_start;
a254129e
JK
243 int ret = 0;
244
6b101e2a 245 /*
2dece445
LA
246 * We can't use __pa(high_memory) directly, since high_memory
247 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
248 * complain. Find the boundary by adding one to the last valid
249 * address.
6b101e2a 250 */
2dece445 251 highmem_start = __pa(high_memory - 1) + 1;
56fa4f60
LP
252 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
253 __func__, &size, &base, &limit, &alignment);
a254129e
JK
254
255 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
256 pr_err("Not enough slots for CMA reserved regions!\n");
257 return -ENOSPC;
258 }
259
260 if (!size)
261 return -EINVAL;
262
263 if (alignment && !is_power_of_2(alignment))
264 return -EINVAL;
265
266 /*
267 * Sanitise input arguments.
268 * Pages both ends in CMA area could be merged into adjacent unmovable
269 * migratetype page by page allocator's buddy algorithm. In the case,
270 * you couldn't get a contiguous memory, which is not what we want.
271 */
badbda53
SR
272 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
273 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
c633324e
DB
274 if (fixed && base & (alignment - 1)) {
275 ret = -EINVAL;
276 pr_err("Region at %pa must be aligned to %pa bytes\n",
277 &base, &alignment);
278 goto err;
279 }
a254129e
JK
280 base = ALIGN(base, alignment);
281 size = ALIGN(size, alignment);
282 limit &= ~(alignment - 1);
283
800a85d3
LP
284 if (!base)
285 fixed = false;
286
a254129e
JK
287 /* size should be aligned with order_per_bit */
288 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
289 return -EINVAL;
290
f7426b98 291 /*
16195ddd
LP
292 * If allocating at a fixed base the request region must not cross the
293 * low/high memory boundary.
f7426b98 294 */
16195ddd 295 if (fixed && base < highmem_start && base + size > highmem_start) {
f7426b98 296 ret = -EINVAL;
56fa4f60
LP
297 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
298 &base, &highmem_start);
f7426b98
MS
299 goto err;
300 }
301
16195ddd
LP
302 /*
303 * If the limit is unspecified or above the memblock end, its effective
304 * value will be the memblock end. Set it explicitly to simplify further
305 * checks.
306 */
307 if (limit == 0 || limit > memblock_end)
308 limit = memblock_end;
309
c633324e
DB
310 if (base + size > limit) {
311 ret = -EINVAL;
312 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
313 &size, &base, &limit);
314 goto err;
315 }
316
a254129e 317 /* Reserve memory */
800a85d3 318 if (fixed) {
a254129e
JK
319 if (memblock_is_region_reserved(base, size) ||
320 memblock_reserve(base, size) < 0) {
321 ret = -EBUSY;
322 goto err;
323 }
324 } else {
16195ddd
LP
325 phys_addr_t addr = 0;
326
327 /*
328 * All pages in the reserved area must come from the same zone.
329 * If the requested region crosses the low/high memory boundary,
330 * try allocating from high memory first and fall back to low
331 * memory in case of failure.
332 */
333 if (base < highmem_start && limit > highmem_start) {
8676af1f 334 addr = memblock_alloc_range_nid(size, alignment,
40366bd7 335 highmem_start, limit, nid, true);
16195ddd
LP
336 limit = highmem_start;
337 }
338
a254129e 339 if (!addr) {
8676af1f 340 addr = memblock_alloc_range_nid(size, alignment, base,
40366bd7 341 limit, nid, true);
16195ddd
LP
342 if (!addr) {
343 ret = -ENOMEM;
344 goto err;
345 }
a254129e 346 }
16195ddd 347
620951e2
TR
348 /*
349 * kmemleak scans/reads tracked objects for pointers to other
350 * objects but this address isn't mapped and accessible
351 */
9099daed 352 kmemleak_ignore_phys(addr);
16195ddd 353 base = addr;
a254129e
JK
354 }
355
f318dd08 356 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
de9e14ee 357 if (ret)
0d3bd18a 358 goto free_mem;
a254129e 359
56fa4f60
LP
360 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
361 &base);
a254129e
JK
362 return 0;
363
0d3bd18a
PF
364free_mem:
365 memblock_free(base, size);
a254129e 366err:
0de9d2eb 367 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
a254129e
JK
368 return ret;
369}
370
dbe43d4d
JK
371#ifdef CONFIG_CMA_DEBUG
372static void cma_debug_show_areas(struct cma *cma)
373{
2b59e01a 374 unsigned long next_zero_bit, next_set_bit, nr_zero;
dbe43d4d 375 unsigned long start = 0;
2b59e01a
YH
376 unsigned long nr_part, nr_total = 0;
377 unsigned long nbits = cma_bitmap_maxno(cma);
dbe43d4d
JK
378
379 mutex_lock(&cma->lock);
380 pr_info("number of available pages: ");
381 for (;;) {
2b59e01a
YH
382 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
383 if (next_zero_bit >= nbits)
dbe43d4d 384 break;
2b59e01a 385 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
dbe43d4d 386 nr_zero = next_set_bit - next_zero_bit;
2b59e01a
YH
387 nr_part = nr_zero << cma->order_per_bit;
388 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
389 next_zero_bit);
390 nr_total += nr_part;
dbe43d4d
JK
391 start = next_zero_bit + nr_zero;
392 }
2b59e01a 393 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
dbe43d4d
JK
394 mutex_unlock(&cma->lock);
395}
396#else
397static inline void cma_debug_show_areas(struct cma *cma) { }
398#endif
399
a254129e
JK
400/**
401 * cma_alloc() - allocate pages from contiguous area
402 * @cma: Contiguous memory region for which the allocation is performed.
403 * @count: Requested number of pages.
404 * @align: Requested alignment of pages (in PAGE_SIZE order).
65182029 405 * @no_warn: Avoid printing message about failed allocation
a254129e
JK
406 *
407 * This function allocates part of contiguous memory on specific
408 * contiguous memory area.
409 */
e2f466e3 410struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
65182029 411 bool no_warn)
a254129e 412{
3acaea68
AM
413 unsigned long mask, offset;
414 unsigned long pfn = -1;
415 unsigned long start = 0;
a254129e 416 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
2813b9c0 417 size_t i;
a254129e 418 struct page *page = NULL;
dbe43d4d 419 int ret = -ENOMEM;
a254129e 420
835832ba 421 if (!cma || !cma->count || !cma->bitmap)
a254129e
JK
422 return NULL;
423
67a2e213 424 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
a254129e
JK
425 count, align);
426
427 if (!count)
428 return NULL;
429
430 mask = cma_bitmap_aligned_mask(cma, align);
b5be83e3 431 offset = cma_bitmap_aligned_offset(cma, align);
a254129e
JK
432 bitmap_maxno = cma_bitmap_maxno(cma);
433 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
434
6b36ba59
SH
435 if (bitmap_count > bitmap_maxno)
436 return NULL;
437
a254129e
JK
438 for (;;) {
439 mutex_lock(&cma->lock);
b5be83e3
GF
440 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
441 bitmap_maxno, start, bitmap_count, mask,
442 offset);
a254129e
JK
443 if (bitmap_no >= bitmap_maxno) {
444 mutex_unlock(&cma->lock);
445 break;
446 }
447 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
448 /*
449 * It's safe to drop the lock here. We've marked this region for
450 * our exclusive use. If the migration fails we will take the
451 * lock again and unmark it.
452 */
453 mutex_unlock(&cma->lock);
454
455 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
ca96b625 456 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
65182029 457 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
a4efc174 458
a254129e
JK
459 if (ret == 0) {
460 page = pfn_to_page(pfn);
461 break;
a254129e 462 }
b7155e76 463
a254129e 464 cma_clear_bitmap(cma, pfn, count);
b7155e76
JK
465 if (ret != -EBUSY)
466 break;
467
a254129e
JK
468 pr_debug("%s(): memory range at %p is busy, retrying\n",
469 __func__, pfn_to_page(pfn));
470 /* try again with a bit different memory target */
471 start = bitmap_no + mask + 1;
472 }
473
3acaea68 474 trace_cma_alloc(pfn, page, count, align);
99e8ea6c 475
2813b9c0
AK
476 /*
477 * CMA can allocate multiple page blocks, which results in different
478 * blocks being marked with different tags. Reset the tags to ignore
479 * those page blocks.
480 */
481 if (page) {
482 for (i = 0; i < count; i++)
483 page_kasan_tag_reset(page + i);
484 }
485
717c3806 486 if (ret && !no_warn && printk_ratelimit()) {
5984af10 487 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
dbe43d4d
JK
488 __func__, count, ret);
489 cma_debug_show_areas(cma);
490 }
491
a254129e
JK
492 pr_debug("%s(): returned %p\n", __func__, page);
493 return page;
494}
495
496/**
497 * cma_release() - release allocated pages
498 * @cma: Contiguous memory region for which the allocation is performed.
499 * @pages: Allocated pages.
500 * @count: Number of allocated pages.
501 *
929f92f7 502 * This function releases memory allocated by cma_alloc().
a254129e
JK
503 * It returns false when provided pages do not belong to contiguous area and
504 * true otherwise.
505 */
ac173824 506bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
a254129e
JK
507{
508 unsigned long pfn;
509
510 if (!cma || !pages)
511 return false;
512
b8ca396f 513 pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count);
a254129e
JK
514
515 pfn = page_to_pfn(pages);
516
517 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
518 return false;
519
520 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
521
522 free_contig_range(pfn, count);
523 cma_clear_bitmap(cma, pfn, count);
99e8ea6c 524 trace_cma_release(pfn, pages, count);
a254129e
JK
525
526 return true;
527}
e4231bcd
LA
528
529int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
530{
531 int i;
532
533 for (i = 0; i < cma_area_count; i++) {
534 int ret = it(&cma_areas[i], data);
535
536 if (ret)
537 return ret;
538 }
539
540 return 0;
541}