1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
20 #include <asm/sections.h>
25 #define INIT_MEMBLOCK_REGIONS 128
26 #define INIT_PHYSMEM_REGIONS 4
28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
33 * DOC: memblock overview
35 * Memblock is a method of managing memory regions during the early
36 * boot period when the usual kernel memory allocators are not up and
39 * Memblock views the system memory as collections of contiguous
40 * regions. There are several types of these collections:
42 * * ``memory`` - describes the physical memory available to the
43 * kernel; this may differ from the actual physical memory installed
44 * in the system, for instance when the memory is restricted with
45 * ``mem=`` command line parameter
46 * * ``reserved`` - describes the regions that were allocated
47 * * ``physmem`` - describes the actual physical memory available during
48 * boot regardless of the possible restrictions and memory hot(un)plug;
49 * the ``physmem`` type is only available on some architectures.
51 * Each region is represented by struct memblock_region that
52 * defines the region extents, its attributes and NUMA node id on NUMA
53 * systems. Every memory type is described by the struct memblock_type
54 * which contains an array of memory regions along with
55 * the allocator metadata. The "memory" and "reserved" types are nicely
56 * wrapped with struct memblock. This structure is statically
57 * initialized at build time. The region arrays are initially sized to
58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
59 * for "reserved". The region array for "physmem" is initially sized to
60 * %INIT_PHYSMEM_REGIONS.
61 * The memblock_allow_resize() enables automatic resizing of the region
62 * arrays during addition of new regions. This feature should be used
63 * with care so that memory allocated for the region array will not
64 * overlap with areas that should be reserved, for example initrd.
66 * The early architecture setup should tell memblock what the physical
67 * memory layout is by using memblock_add() or memblock_add_node()
68 * functions. The first function does not assign the region to a NUMA
69 * node and it is appropriate for UMA systems. Yet, it is possible to
70 * use it on NUMA systems as well and assign the region to a NUMA node
71 * later in the setup process using memblock_set_node(). The
72 * memblock_add_node() performs such an assignment directly.
74 * Once memblock is setup the memory can be allocated using one of the
77 * * memblock_phys_alloc*() - these functions return the **physical**
78 * address of the allocated memory
79 * * memblock_alloc*() - these functions return the **virtual** address
80 * of the allocated memory.
82 * Note, that both API variants use implicit assumptions about allowed
83 * memory ranges and the fallback methods. Consult the documentation
84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
85 * functions for more elaborate description.
87 * As the system boot progresses, the architecture specific mem_init()
88 * function frees all the memory to the buddy page allocator.
90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
91 * memblock data structures (except "physmem") will be discarded after the
92 * system initialization completes.
95 #ifndef CONFIG_NEED_MULTIPLE_NODES
96 struct pglist_data __refdata contig_page_data
;
97 EXPORT_SYMBOL(contig_page_data
);
100 unsigned long max_low_pfn
;
101 unsigned long min_low_pfn
;
102 unsigned long max_pfn
;
103 unsigned long long max_possible_pfn
;
105 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
106 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_RESERVED_REGIONS
] __initdata_memblock
;
107 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
];
111 struct memblock memblock __initdata_memblock
= {
112 .memory
.regions
= memblock_memory_init_regions
,
113 .memory
.cnt
= 1, /* empty dummy entry */
114 .memory
.max
= INIT_MEMBLOCK_REGIONS
,
115 .memory
.name
= "memory",
117 .reserved
.regions
= memblock_reserved_init_regions
,
118 .reserved
.cnt
= 1, /* empty dummy entry */
119 .reserved
.max
= INIT_MEMBLOCK_RESERVED_REGIONS
,
120 .reserved
.name
= "reserved",
123 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 struct memblock_type physmem
= {
128 .regions
= memblock_physmem_init_regions
,
129 .cnt
= 1, /* empty dummy entry */
130 .max
= INIT_PHYSMEM_REGIONS
,
136 * keep a pointer to &memblock.memory in the text section to use it in
137 * __next_mem_range() and its helpers.
138 * For architectures that do not keep memblock data after init, this
139 * pointer will be reset to NULL at memblock_discard()
141 static __refdata
struct memblock_type
*memblock_memory
= &memblock
.memory
;
143 #define for_each_memblock_type(i, memblock_type, rgn) \
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
148 #define memblock_dbg(fmt, ...) \
150 if (memblock_debug) \
151 pr_info(fmt, ##__VA_ARGS__); \
154 static int memblock_debug __initdata_memblock
;
155 static bool system_has_some_mirror __initdata_memblock
= false;
156 static int memblock_can_resize __initdata_memblock
;
157 static int memblock_memory_in_slab __initdata_memblock
= 0;
158 static int memblock_reserved_in_slab __initdata_memblock
= 0;
160 static enum memblock_flags __init_memblock
choose_memblock_flags(void)
162 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
165 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
166 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
168 return *size
= min(*size
, PHYS_ADDR_MAX
- base
);
172 * Address comparison utilities
174 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
175 phys_addr_t base2
, phys_addr_t size2
)
177 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
180 bool __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
181 phys_addr_t base
, phys_addr_t size
)
185 for (i
= 0; i
< type
->cnt
; i
++)
186 if (memblock_addrs_overlap(base
, size
, type
->regions
[i
].base
,
187 type
->regions
[i
].size
))
189 return i
< type
->cnt
;
193 * __memblock_find_range_bottom_up - find free area utility in bottom-up
194 * @start: start of candidate range
195 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
196 * %MEMBLOCK_ALLOC_ACCESSIBLE
197 * @size: size of free area to find
198 * @align: alignment of free area to find
199 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
200 * @flags: pick from blocks based on memory attributes
202 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
205 * Found address on success, 0 on failure.
207 static phys_addr_t __init_memblock
208 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
209 phys_addr_t size
, phys_addr_t align
, int nid
,
210 enum memblock_flags flags
)
212 phys_addr_t this_start
, this_end
, cand
;
215 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
216 this_start
= clamp(this_start
, start
, end
);
217 this_end
= clamp(this_end
, start
, end
);
219 cand
= round_up(this_start
, align
);
220 if (cand
< this_end
&& this_end
- cand
>= size
)
228 * __memblock_find_range_top_down - find free area utility, in top-down
229 * @start: start of candidate range
230 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
231 * %MEMBLOCK_ALLOC_ACCESSIBLE
232 * @size: size of free area to find
233 * @align: alignment of free area to find
234 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
235 * @flags: pick from blocks based on memory attributes
237 * Utility called from memblock_find_in_range_node(), find free area top-down.
240 * Found address on success, 0 on failure.
242 static phys_addr_t __init_memblock
243 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
244 phys_addr_t size
, phys_addr_t align
, int nid
,
245 enum memblock_flags flags
)
247 phys_addr_t this_start
, this_end
, cand
;
250 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
252 this_start
= clamp(this_start
, start
, end
);
253 this_end
= clamp(this_end
, start
, end
);
258 cand
= round_down(this_end
- size
, align
);
259 if (cand
>= this_start
)
267 * memblock_find_in_range_node - find free area in given range and node
268 * @size: size of free area to find
269 * @align: alignment of free area to find
270 * @start: start of candidate range
271 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
272 * %MEMBLOCK_ALLOC_ACCESSIBLE
273 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
274 * @flags: pick from blocks based on memory attributes
276 * Find @size free area aligned to @align in the specified range and node.
279 * Found address on success, 0 on failure.
281 static phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
282 phys_addr_t align
, phys_addr_t start
,
283 phys_addr_t end
, int nid
,
284 enum memblock_flags flags
)
287 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
||
288 end
== MEMBLOCK_ALLOC_KASAN
)
289 end
= memblock
.current_limit
;
291 /* avoid allocating the first page */
292 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
293 end
= max(start
, end
);
295 if (memblock_bottom_up())
296 return __memblock_find_range_bottom_up(start
, end
, size
, align
,
299 return __memblock_find_range_top_down(start
, end
, size
, align
,
304 * memblock_find_in_range - find free area in given range
305 * @start: start of candidate range
306 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
307 * %MEMBLOCK_ALLOC_ACCESSIBLE
308 * @size: size of free area to find
309 * @align: alignment of free area to find
311 * Find @size free area aligned to @align in the specified range.
314 * Found address on success, 0 on failure.
316 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
317 phys_addr_t end
, phys_addr_t size
,
321 enum memblock_flags flags
= choose_memblock_flags();
324 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
325 NUMA_NO_NODE
, flags
);
327 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
328 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
330 flags
&= ~MEMBLOCK_MIRROR
;
337 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
339 type
->total_size
-= type
->regions
[r
].size
;
340 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
341 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
344 /* Special case for empty arrays */
345 if (type
->cnt
== 0) {
346 WARN_ON(type
->total_size
!= 0);
348 type
->regions
[0].base
= 0;
349 type
->regions
[0].size
= 0;
350 type
->regions
[0].flags
= 0;
351 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
355 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
357 * memblock_discard - discard memory and reserved arrays if they were allocated
359 void __init
memblock_discard(void)
361 phys_addr_t addr
, size
;
363 if (memblock
.reserved
.regions
!= memblock_reserved_init_regions
) {
364 addr
= __pa(memblock
.reserved
.regions
);
365 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
366 memblock
.reserved
.max
);
367 __memblock_free_late(addr
, size
);
370 if (memblock
.memory
.regions
!= memblock_memory_init_regions
) {
371 addr
= __pa(memblock
.memory
.regions
);
372 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
373 memblock
.memory
.max
);
374 __memblock_free_late(addr
, size
);
377 memblock_memory
= NULL
;
382 * memblock_double_array - double the size of the memblock regions array
383 * @type: memblock type of the regions array being doubled
384 * @new_area_start: starting address of memory range to avoid overlap with
385 * @new_area_size: size of memory range to avoid overlap with
387 * Double the size of the @type regions array. If memblock is being used to
388 * allocate memory for a new reserved regions array and there is a previously
389 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
390 * waiting to be reserved, ensure the memory used by the new array does
394 * 0 on success, -1 on failure.
396 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
397 phys_addr_t new_area_start
,
398 phys_addr_t new_area_size
)
400 struct memblock_region
*new_array
, *old_array
;
401 phys_addr_t old_alloc_size
, new_alloc_size
;
402 phys_addr_t old_size
, new_size
, addr
, new_end
;
403 int use_slab
= slab_is_available();
406 /* We don't allow resizing until we know about the reserved regions
407 * of memory that aren't suitable for allocation
409 if (!memblock_can_resize
)
412 /* Calculate new doubled size */
413 old_size
= type
->max
* sizeof(struct memblock_region
);
414 new_size
= old_size
<< 1;
416 * We need to allocated new one align to PAGE_SIZE,
417 * so we can free them completely later.
419 old_alloc_size
= PAGE_ALIGN(old_size
);
420 new_alloc_size
= PAGE_ALIGN(new_size
);
422 /* Retrieve the slab flag */
423 if (type
== &memblock
.memory
)
424 in_slab
= &memblock_memory_in_slab
;
426 in_slab
= &memblock_reserved_in_slab
;
428 /* Try to find some space for it */
430 new_array
= kmalloc(new_size
, GFP_KERNEL
);
431 addr
= new_array
? __pa(new_array
) : 0;
433 /* only exclude range when trying to double reserved.regions */
434 if (type
!= &memblock
.reserved
)
435 new_area_start
= new_area_size
= 0;
437 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
438 memblock
.current_limit
,
439 new_alloc_size
, PAGE_SIZE
);
440 if (!addr
&& new_area_size
)
441 addr
= memblock_find_in_range(0,
442 min(new_area_start
, memblock
.current_limit
),
443 new_alloc_size
, PAGE_SIZE
);
445 new_array
= addr
? __va(addr
) : NULL
;
448 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
449 type
->name
, type
->max
, type
->max
* 2);
453 new_end
= addr
+ new_size
- 1;
454 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
455 type
->name
, type
->max
* 2, &addr
, &new_end
);
458 * Found space, we now need to move the array over before we add the
459 * reserved region since it may be our reserved array itself that is
462 memcpy(new_array
, type
->regions
, old_size
);
463 memset(new_array
+ type
->max
, 0, old_size
);
464 old_array
= type
->regions
;
465 type
->regions
= new_array
;
468 /* Free old array. We needn't free it if the array is the static one */
471 else if (old_array
!= memblock_memory_init_regions
&&
472 old_array
!= memblock_reserved_init_regions
)
473 memblock_free(__pa(old_array
), old_alloc_size
);
476 * Reserve the new array if that comes from the memblock. Otherwise, we
480 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
482 /* Update slab flag */
489 * memblock_merge_regions - merge neighboring compatible regions
490 * @type: memblock type to scan
492 * Scan @type and merge neighboring compatible regions.
494 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
498 /* cnt never goes below 1 */
499 while (i
< type
->cnt
- 1) {
500 struct memblock_region
*this = &type
->regions
[i
];
501 struct memblock_region
*next
= &type
->regions
[i
+ 1];
503 if (this->base
+ this->size
!= next
->base
||
504 memblock_get_region_node(this) !=
505 memblock_get_region_node(next
) ||
506 this->flags
!= next
->flags
) {
507 BUG_ON(this->base
+ this->size
> next
->base
);
512 this->size
+= next
->size
;
513 /* move forward from next + 1, index of which is i + 2 */
514 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
520 * memblock_insert_region - insert new memblock region
521 * @type: memblock type to insert into
522 * @idx: index for the insertion point
523 * @base: base address of the new region
524 * @size: size of the new region
525 * @nid: node id of the new region
526 * @flags: flags of the new region
528 * Insert new memblock region [@base, @base + @size) into @type at @idx.
529 * @type must already have extra room to accommodate the new region.
531 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
532 int idx
, phys_addr_t base
,
535 enum memblock_flags flags
)
537 struct memblock_region
*rgn
= &type
->regions
[idx
];
539 BUG_ON(type
->cnt
>= type
->max
);
540 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
544 memblock_set_region_node(rgn
, nid
);
546 type
->total_size
+= size
;
550 * memblock_add_range - add new memblock region
551 * @type: memblock type to add new region into
552 * @base: base address of the new region
553 * @size: size of the new region
554 * @nid: nid of the new region
555 * @flags: flags of the new region
557 * Add new memblock region [@base, @base + @size) into @type. The new region
558 * is allowed to overlap with existing ones - overlaps don't affect already
559 * existing regions. @type is guaranteed to be minimal (all neighbouring
560 * compatible regions are merged) after the addition.
563 * 0 on success, -errno on failure.
565 static int __init_memblock
memblock_add_range(struct memblock_type
*type
,
566 phys_addr_t base
, phys_addr_t size
,
567 int nid
, enum memblock_flags flags
)
570 phys_addr_t obase
= base
;
571 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
573 struct memblock_region
*rgn
;
578 /* special case for empty array */
579 if (type
->regions
[0].size
== 0) {
580 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
581 type
->regions
[0].base
= base
;
582 type
->regions
[0].size
= size
;
583 type
->regions
[0].flags
= flags
;
584 memblock_set_region_node(&type
->regions
[0], nid
);
585 type
->total_size
= size
;
590 * The following is executed twice. Once with %false @insert and
591 * then with %true. The first counts the number of regions needed
592 * to accommodate the new area. The second actually inserts them.
597 for_each_memblock_type(idx
, type
, rgn
) {
598 phys_addr_t rbase
= rgn
->base
;
599 phys_addr_t rend
= rbase
+ rgn
->size
;
606 * @rgn overlaps. If it separates the lower part of new
607 * area, insert that portion.
610 #ifdef CONFIG_NEED_MULTIPLE_NODES
611 WARN_ON(nid
!= memblock_get_region_node(rgn
));
613 WARN_ON(flags
!= rgn
->flags
);
616 memblock_insert_region(type
, idx
++, base
,
620 /* area below @rend is dealt with, forget about it */
621 base
= min(rend
, end
);
624 /* insert the remaining portion */
628 memblock_insert_region(type
, idx
, base
, end
- base
,
636 * If this was the first round, resize array and repeat for actual
637 * insertions; otherwise, merge and return.
640 while (type
->cnt
+ nr_new
> type
->max
)
641 if (memblock_double_array(type
, obase
, size
) < 0)
646 memblock_merge_regions(type
);
652 * memblock_add_node - add new memblock region within a NUMA node
653 * @base: base address of the new region
654 * @size: size of the new region
655 * @nid: nid of the new region
657 * Add new memblock region [@base, @base + @size) to the "memory"
658 * type. See memblock_add_range() description for mode details
661 * 0 on success, -errno on failure.
663 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
666 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, 0);
670 * memblock_add - add new memblock region
671 * @base: base address of the new region
672 * @size: size of the new region
674 * Add new memblock region [@base, @base + @size) to the "memory"
675 * type. See memblock_add_range() description for mode details
678 * 0 on success, -errno on failure.
680 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
682 phys_addr_t end
= base
+ size
- 1;
684 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
685 &base
, &end
, (void *)_RET_IP_
);
687 return memblock_add_range(&memblock
.memory
, base
, size
, MAX_NUMNODES
, 0);
691 * memblock_isolate_range - isolate given range into disjoint memblocks
692 * @type: memblock type to isolate range for
693 * @base: base of range to isolate
694 * @size: size of range to isolate
695 * @start_rgn: out parameter for the start of isolated region
696 * @end_rgn: out parameter for the end of isolated region
698 * Walk @type and ensure that regions don't cross the boundaries defined by
699 * [@base, @base + @size). Crossing regions are split at the boundaries,
700 * which may create at most two more regions. The index of the first
701 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
704 * 0 on success, -errno on failure.
706 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
707 phys_addr_t base
, phys_addr_t size
,
708 int *start_rgn
, int *end_rgn
)
710 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
712 struct memblock_region
*rgn
;
714 *start_rgn
= *end_rgn
= 0;
719 /* we'll create at most two more regions */
720 while (type
->cnt
+ 2 > type
->max
)
721 if (memblock_double_array(type
, base
, size
) < 0)
724 for_each_memblock_type(idx
, type
, rgn
) {
725 phys_addr_t rbase
= rgn
->base
;
726 phys_addr_t rend
= rbase
+ rgn
->size
;
735 * @rgn intersects from below. Split and continue
736 * to process the next region - the new top half.
739 rgn
->size
-= base
- rbase
;
740 type
->total_size
-= base
- rbase
;
741 memblock_insert_region(type
, idx
, rbase
, base
- rbase
,
742 memblock_get_region_node(rgn
),
744 } else if (rend
> end
) {
746 * @rgn intersects from above. Split and redo the
747 * current region - the new bottom half.
750 rgn
->size
-= end
- rbase
;
751 type
->total_size
-= end
- rbase
;
752 memblock_insert_region(type
, idx
--, rbase
, end
- rbase
,
753 memblock_get_region_node(rgn
),
756 /* @rgn is fully contained, record it */
766 static int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
767 phys_addr_t base
, phys_addr_t size
)
769 int start_rgn
, end_rgn
;
772 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
776 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
777 memblock_remove_region(type
, i
);
781 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
783 phys_addr_t end
= base
+ size
- 1;
785 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
786 &base
, &end
, (void *)_RET_IP_
);
788 return memblock_remove_range(&memblock
.memory
, base
, size
);
792 * memblock_free - free boot memory block
793 * @base: phys starting address of the boot memory block
794 * @size: size of the boot memory block in bytes
796 * Free boot memory block previously allocated by memblock_alloc_xx() API.
797 * The freeing memory will not be released to the buddy allocator.
799 int __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
801 phys_addr_t end
= base
+ size
- 1;
803 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
804 &base
, &end
, (void *)_RET_IP_
);
806 kmemleak_free_part_phys(base
, size
);
807 return memblock_remove_range(&memblock
.reserved
, base
, size
);
810 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
812 phys_addr_t end
= base
+ size
- 1;
814 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
815 &base
, &end
, (void *)_RET_IP_
);
817 return memblock_add_range(&memblock
.reserved
, base
, size
, MAX_NUMNODES
, 0);
820 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
821 int __init_memblock
memblock_physmem_add(phys_addr_t base
, phys_addr_t size
)
823 phys_addr_t end
= base
+ size
- 1;
825 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__
,
826 &base
, &end
, (void *)_RET_IP_
);
828 return memblock_add_range(&physmem
, base
, size
, MAX_NUMNODES
, 0);
833 * memblock_setclr_flag - set or clear flag for a memory region
834 * @base: base address of the region
835 * @size: size of the region
836 * @set: set or clear the flag
837 * @flag: the flag to update
839 * This function isolates region [@base, @base + @size), and sets/clears flag
841 * Return: 0 on success, -errno on failure.
843 static int __init_memblock
memblock_setclr_flag(phys_addr_t base
,
844 phys_addr_t size
, int set
, int flag
)
846 struct memblock_type
*type
= &memblock
.memory
;
847 int i
, ret
, start_rgn
, end_rgn
;
849 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
853 for (i
= start_rgn
; i
< end_rgn
; i
++) {
854 struct memblock_region
*r
= &type
->regions
[i
];
862 memblock_merge_regions(type
);
867 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
868 * @base: the base phys addr of the region
869 * @size: the size of the region
871 * Return: 0 on success, -errno on failure.
873 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
875 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_HOTPLUG
);
879 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
880 * @base: the base phys addr of the region
881 * @size: the size of the region
883 * Return: 0 on success, -errno on failure.
885 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
887 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_HOTPLUG
);
891 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
892 * @base: the base phys addr of the region
893 * @size: the size of the region
895 * Return: 0 on success, -errno on failure.
897 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
899 system_has_some_mirror
= true;
901 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_MIRROR
);
905 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
906 * @base: the base phys addr of the region
907 * @size: the size of the region
909 * Return: 0 on success, -errno on failure.
911 int __init_memblock
memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
)
913 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_NOMAP
);
917 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
918 * @base: the base phys addr of the region
919 * @size: the size of the region
921 * Return: 0 on success, -errno on failure.
923 int __init_memblock
memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
)
925 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_NOMAP
);
928 static bool should_skip_region(struct memblock_type
*type
,
929 struct memblock_region
*m
,
932 int m_nid
= memblock_get_region_node(m
);
934 /* we never skip regions when iterating memblock.reserved or physmem */
935 if (type
!= memblock_memory
)
938 /* only memory regions are associated with nodes, check it */
939 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
942 /* skip hotpluggable memory regions if needed */
943 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
946 /* if we want mirror memory skip non-mirror memory regions */
947 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
950 /* skip nomap memory unless we were asked for it explicitly */
951 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
958 * __next_mem_range - next function for for_each_free_mem_range() etc.
959 * @idx: pointer to u64 loop variable
960 * @nid: node selector, %NUMA_NO_NODE for all nodes
961 * @flags: pick from blocks based on memory attributes
962 * @type_a: pointer to memblock_type from where the range is taken
963 * @type_b: pointer to memblock_type which excludes memory from being taken
964 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
965 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
966 * @out_nid: ptr to int for nid of the range, can be %NULL
968 * Find the first area from *@idx which matches @nid, fill the out
969 * parameters, and update *@idx for the next iteration. The lower 32bit of
970 * *@idx contains index into type_a and the upper 32bit indexes the
971 * areas before each region in type_b. For example, if type_b regions
972 * look like the following,
974 * 0:[0-16), 1:[32-48), 2:[128-130)
976 * The upper 32bit indexes the following regions.
978 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
980 * As both region arrays are sorted, the function advances the two indices
981 * in lockstep and returns each intersection.
983 void __next_mem_range(u64
*idx
, int nid
, enum memblock_flags flags
,
984 struct memblock_type
*type_a
,
985 struct memblock_type
*type_b
, phys_addr_t
*out_start
,
986 phys_addr_t
*out_end
, int *out_nid
)
988 int idx_a
= *idx
& 0xffffffff;
989 int idx_b
= *idx
>> 32;
991 if (WARN_ONCE(nid
== MAX_NUMNODES
,
992 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
995 for (; idx_a
< type_a
->cnt
; idx_a
++) {
996 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
998 phys_addr_t m_start
= m
->base
;
999 phys_addr_t m_end
= m
->base
+ m
->size
;
1000 int m_nid
= memblock_get_region_node(m
);
1002 if (should_skip_region(type_a
, m
, nid
, flags
))
1007 *out_start
= m_start
;
1013 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1017 /* scan areas before each reservation */
1018 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
1019 struct memblock_region
*r
;
1020 phys_addr_t r_start
;
1023 r
= &type_b
->regions
[idx_b
];
1024 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1025 r_end
= idx_b
< type_b
->cnt
?
1026 r
->base
: PHYS_ADDR_MAX
;
1029 * if idx_b advanced past idx_a,
1030 * break out to advance idx_a
1032 if (r_start
>= m_end
)
1034 /* if the two regions intersect, we're done */
1035 if (m_start
< r_end
) {
1038 max(m_start
, r_start
);
1040 *out_end
= min(m_end
, r_end
);
1044 * The region which ends first is
1045 * advanced for the next iteration.
1051 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1057 /* signal end of iteration */
1062 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1064 * @idx: pointer to u64 loop variable
1065 * @nid: node selector, %NUMA_NO_NODE for all nodes
1066 * @flags: pick from blocks based on memory attributes
1067 * @type_a: pointer to memblock_type from where the range is taken
1068 * @type_b: pointer to memblock_type which excludes memory from being taken
1069 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1070 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1071 * @out_nid: ptr to int for nid of the range, can be %NULL
1073 * Finds the next range from type_a which is not marked as unsuitable
1076 * Reverse of __next_mem_range().
1078 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
,
1079 enum memblock_flags flags
,
1080 struct memblock_type
*type_a
,
1081 struct memblock_type
*type_b
,
1082 phys_addr_t
*out_start
,
1083 phys_addr_t
*out_end
, int *out_nid
)
1085 int idx_a
= *idx
& 0xffffffff;
1086 int idx_b
= *idx
>> 32;
1088 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1091 if (*idx
== (u64
)ULLONG_MAX
) {
1092 idx_a
= type_a
->cnt
- 1;
1094 idx_b
= type_b
->cnt
;
1099 for (; idx_a
>= 0; idx_a
--) {
1100 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1102 phys_addr_t m_start
= m
->base
;
1103 phys_addr_t m_end
= m
->base
+ m
->size
;
1104 int m_nid
= memblock_get_region_node(m
);
1106 if (should_skip_region(type_a
, m
, nid
, flags
))
1111 *out_start
= m_start
;
1117 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1121 /* scan areas before each reservation */
1122 for (; idx_b
>= 0; idx_b
--) {
1123 struct memblock_region
*r
;
1124 phys_addr_t r_start
;
1127 r
= &type_b
->regions
[idx_b
];
1128 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1129 r_end
= idx_b
< type_b
->cnt
?
1130 r
->base
: PHYS_ADDR_MAX
;
1132 * if idx_b advanced past idx_a,
1133 * break out to advance idx_a
1136 if (r_end
<= m_start
)
1138 /* if the two regions intersect, we're done */
1139 if (m_end
> r_start
) {
1141 *out_start
= max(m_start
, r_start
);
1143 *out_end
= min(m_end
, r_end
);
1146 if (m_start
>= r_start
)
1150 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1155 /* signal end of iteration */
1160 * Common iterator interface used to define for_each_mem_pfn_range().
1162 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1163 unsigned long *out_start_pfn
,
1164 unsigned long *out_end_pfn
, int *out_nid
)
1166 struct memblock_type
*type
= &memblock
.memory
;
1167 struct memblock_region
*r
;
1170 while (++*idx
< type
->cnt
) {
1171 r
= &type
->regions
[*idx
];
1172 r_nid
= memblock_get_region_node(r
);
1174 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1176 if (nid
== MAX_NUMNODES
|| nid
== r_nid
)
1179 if (*idx
>= type
->cnt
) {
1185 *out_start_pfn
= PFN_UP(r
->base
);
1187 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1193 * memblock_set_node - set node ID on memblock regions
1194 * @base: base of area to set node ID for
1195 * @size: size of area to set node ID for
1196 * @type: memblock type to set node ID for
1197 * @nid: node ID to set
1199 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1200 * Regions which cross the area boundaries are split as necessary.
1203 * 0 on success, -errno on failure.
1205 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1206 struct memblock_type
*type
, int nid
)
1208 #ifdef CONFIG_NEED_MULTIPLE_NODES
1209 int start_rgn
, end_rgn
;
1212 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1216 for (i
= start_rgn
; i
< end_rgn
; i
++)
1217 memblock_set_region_node(&type
->regions
[i
], nid
);
1219 memblock_merge_regions(type
);
1224 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1226 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1228 * @idx: pointer to u64 loop variable
1229 * @zone: zone in which all of the memory blocks reside
1230 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1231 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1233 * This function is meant to be a zone/pfn specific wrapper for the
1234 * for_each_mem_range type iterators. Specifically they are used in the
1235 * deferred memory init routines and as such we were duplicating much of
1236 * this logic throughout the code. So instead of having it in multiple
1237 * locations it seemed like it would make more sense to centralize this to
1238 * one new iterator that does everything they need.
1240 void __init_memblock
1241 __next_mem_pfn_range_in_zone(u64
*idx
, struct zone
*zone
,
1242 unsigned long *out_spfn
, unsigned long *out_epfn
)
1244 int zone_nid
= zone_to_nid(zone
);
1245 phys_addr_t spa
, epa
;
1248 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1249 &memblock
.memory
, &memblock
.reserved
,
1252 while (*idx
!= U64_MAX
) {
1253 unsigned long epfn
= PFN_DOWN(epa
);
1254 unsigned long spfn
= PFN_UP(spa
);
1257 * Verify the end is at least past the start of the zone and
1258 * that we have at least one PFN to initialize.
1260 if (zone
->zone_start_pfn
< epfn
&& spfn
< epfn
) {
1261 /* if we went too far just stop searching */
1262 if (zone_end_pfn(zone
) <= spfn
) {
1268 *out_spfn
= max(zone
->zone_start_pfn
, spfn
);
1270 *out_epfn
= min(zone_end_pfn(zone
), epfn
);
1275 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1276 &memblock
.memory
, &memblock
.reserved
,
1280 /* signal end of iteration */
1282 *out_spfn
= ULONG_MAX
;
1287 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1290 * memblock_alloc_range_nid - allocate boot memory block
1291 * @size: size of memory block to be allocated in bytes
1292 * @align: alignment of the region and block's size
1293 * @start: the lower bound of the memory region to allocate (phys address)
1294 * @end: the upper bound of the memory region to allocate (phys address)
1295 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1296 * @exact_nid: control the allocation fall back to other nodes
1298 * The allocation is performed from memory region limited by
1299 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1301 * If the specified node can not hold the requested memory and @exact_nid
1302 * is false, the allocation falls back to any node in the system.
1304 * For systems with memory mirroring, the allocation is attempted first
1305 * from the regions with mirroring enabled and then retried from any
1308 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1309 * allocated boot memory block, so that it is never reported as leaks.
1312 * Physical address of allocated memory block on success, %0 on failure.
1314 phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1315 phys_addr_t align
, phys_addr_t start
,
1316 phys_addr_t end
, int nid
,
1319 enum memblock_flags flags
= choose_memblock_flags();
1322 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1326 /* Can't use WARNs this early in boot on powerpc */
1328 align
= SMP_CACHE_BYTES
;
1332 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1334 if (found
&& !memblock_reserve(found
, size
))
1337 if (nid
!= NUMA_NO_NODE
&& !exact_nid
) {
1338 found
= memblock_find_in_range_node(size
, align
, start
,
1341 if (found
&& !memblock_reserve(found
, size
))
1345 if (flags
& MEMBLOCK_MIRROR
) {
1346 flags
&= ~MEMBLOCK_MIRROR
;
1347 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1355 /* Skip kmemleak for kasan_init() due to high volume. */
1356 if (end
!= MEMBLOCK_ALLOC_KASAN
)
1358 * The min_count is set to 0 so that memblock allocated
1359 * blocks are never reported as leaks. This is because many
1360 * of these blocks are only referred via the physical
1361 * address which is not looked up by kmemleak.
1363 kmemleak_alloc_phys(found
, size
, 0, 0);
1369 * memblock_phys_alloc_range - allocate a memory block inside specified range
1370 * @size: size of memory block to be allocated in bytes
1371 * @align: alignment of the region and block's size
1372 * @start: the lower bound of the memory region to allocate (physical address)
1373 * @end: the upper bound of the memory region to allocate (physical address)
1375 * Allocate @size bytes in the between @start and @end.
1377 * Return: physical address of the allocated memory block on success,
1380 phys_addr_t __init
memblock_phys_alloc_range(phys_addr_t size
,
1385 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1386 __func__
, (u64
)size
, (u64
)align
, &start
, &end
,
1388 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
,
1393 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1394 * @size: size of memory block to be allocated in bytes
1395 * @align: alignment of the region and block's size
1396 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1398 * Allocates memory block from the specified NUMA node. If the node
1399 * has no available memory, attempts to allocated from any node in the
1402 * Return: physical address of the allocated memory block on success,
1405 phys_addr_t __init
memblock_phys_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1407 return memblock_alloc_range_nid(size
, align
, 0,
1408 MEMBLOCK_ALLOC_ACCESSIBLE
, nid
, false);
1412 * memblock_alloc_internal - allocate boot memory block
1413 * @size: size of memory block to be allocated in bytes
1414 * @align: alignment of the region and block's size
1415 * @min_addr: the lower bound of the memory region to allocate (phys address)
1416 * @max_addr: the upper bound of the memory region to allocate (phys address)
1417 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1418 * @exact_nid: control the allocation fall back to other nodes
1420 * Allocates memory block using memblock_alloc_range_nid() and
1421 * converts the returned physical address to virtual.
1423 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1424 * will fall back to memory below @min_addr. Other constraints, such
1425 * as node and mirrored memory will be handled again in
1426 * memblock_alloc_range_nid().
1429 * Virtual address of allocated memory block on success, NULL on failure.
1431 static void * __init
memblock_alloc_internal(
1432 phys_addr_t size
, phys_addr_t align
,
1433 phys_addr_t min_addr
, phys_addr_t max_addr
,
1434 int nid
, bool exact_nid
)
1439 * Detect any accidental use of these APIs after slab is ready, as at
1440 * this moment memblock may be deinitialized already and its
1441 * internal data may be destroyed (after execution of memblock_free_all)
1443 if (WARN_ON_ONCE(slab_is_available()))
1444 return kzalloc_node(size
, GFP_NOWAIT
, nid
);
1446 if (max_addr
> memblock
.current_limit
)
1447 max_addr
= memblock
.current_limit
;
1449 alloc
= memblock_alloc_range_nid(size
, align
, min_addr
, max_addr
, nid
,
1452 /* retry allocation without lower limit */
1453 if (!alloc
&& min_addr
)
1454 alloc
= memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
,
1460 return phys_to_virt(alloc
);
1464 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1465 * without zeroing memory
1466 * @size: size of memory block to be allocated in bytes
1467 * @align: alignment of the region and block's size
1468 * @min_addr: the lower bound of the memory region from where the allocation
1469 * is preferred (phys address)
1470 * @max_addr: the upper bound of the memory region from where the allocation
1471 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1472 * allocate only from memory limited by memblock.current_limit value
1473 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1475 * Public function, provides additional debug information (including caller
1476 * info), if enabled. Does not zero allocated memory.
1479 * Virtual address of allocated memory block on success, NULL on failure.
1481 void * __init
memblock_alloc_exact_nid_raw(
1482 phys_addr_t size
, phys_addr_t align
,
1483 phys_addr_t min_addr
, phys_addr_t max_addr
,
1488 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1489 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1490 &max_addr
, (void *)_RET_IP_
);
1492 ptr
= memblock_alloc_internal(size
, align
,
1493 min_addr
, max_addr
, nid
, true);
1494 if (ptr
&& size
> 0)
1495 page_init_poison(ptr
, size
);
1501 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1502 * memory and without panicking
1503 * @size: size of memory block to be allocated in bytes
1504 * @align: alignment of the region and block's size
1505 * @min_addr: the lower bound of the memory region from where the allocation
1506 * is preferred (phys address)
1507 * @max_addr: the upper bound of the memory region from where the allocation
1508 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1509 * allocate only from memory limited by memblock.current_limit value
1510 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1512 * Public function, provides additional debug information (including caller
1513 * info), if enabled. Does not zero allocated memory, does not panic if request
1514 * cannot be satisfied.
1517 * Virtual address of allocated memory block on success, NULL on failure.
1519 void * __init
memblock_alloc_try_nid_raw(
1520 phys_addr_t size
, phys_addr_t align
,
1521 phys_addr_t min_addr
, phys_addr_t max_addr
,
1526 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1527 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1528 &max_addr
, (void *)_RET_IP_
);
1530 ptr
= memblock_alloc_internal(size
, align
,
1531 min_addr
, max_addr
, nid
, false);
1532 if (ptr
&& size
> 0)
1533 page_init_poison(ptr
, size
);
1539 * memblock_alloc_try_nid - allocate boot memory block
1540 * @size: size of memory block to be allocated in bytes
1541 * @align: alignment of the region and block's size
1542 * @min_addr: the lower bound of the memory region from where the allocation
1543 * is preferred (phys address)
1544 * @max_addr: the upper bound of the memory region from where the allocation
1545 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1546 * allocate only from memory limited by memblock.current_limit value
1547 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1549 * Public function, provides additional debug information (including caller
1550 * info), if enabled. This function zeroes the allocated memory.
1553 * Virtual address of allocated memory block on success, NULL on failure.
1555 void * __init
memblock_alloc_try_nid(
1556 phys_addr_t size
, phys_addr_t align
,
1557 phys_addr_t min_addr
, phys_addr_t max_addr
,
1562 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1563 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1564 &max_addr
, (void *)_RET_IP_
);
1565 ptr
= memblock_alloc_internal(size
, align
,
1566 min_addr
, max_addr
, nid
, false);
1568 memset(ptr
, 0, size
);
1574 * __memblock_free_late - free pages directly to buddy allocator
1575 * @base: phys starting address of the boot memory block
1576 * @size: size of the boot memory block in bytes
1578 * This is only useful when the memblock allocator has already been torn
1579 * down, but we are still initializing the system. Pages are released directly
1580 * to the buddy allocator.
1582 void __init
__memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1584 phys_addr_t cursor
, end
;
1586 end
= base
+ size
- 1;
1587 memblock_dbg("%s: [%pa-%pa] %pS\n",
1588 __func__
, &base
, &end
, (void *)_RET_IP_
);
1589 kmemleak_free_part_phys(base
, size
);
1590 cursor
= PFN_UP(base
);
1591 end
= PFN_DOWN(base
+ size
);
1593 for (; cursor
< end
; cursor
++) {
1594 memblock_free_pages(pfn_to_page(cursor
), cursor
, 0);
1595 totalram_pages_inc();
1600 * Remaining API functions
1603 phys_addr_t __init_memblock
memblock_phys_mem_size(void)
1605 return memblock
.memory
.total_size
;
1608 phys_addr_t __init_memblock
memblock_reserved_size(void)
1610 return memblock
.reserved
.total_size
;
1613 /* lowest address */
1614 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1616 return memblock
.memory
.regions
[0].base
;
1619 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1621 int idx
= memblock
.memory
.cnt
- 1;
1623 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1626 static phys_addr_t __init_memblock
__find_max_addr(phys_addr_t limit
)
1628 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1629 struct memblock_region
*r
;
1632 * translate the memory @limit size into the max address within one of
1633 * the memory memblock regions, if the @limit exceeds the total size
1634 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1636 for_each_mem_region(r
) {
1637 if (limit
<= r
->size
) {
1638 max_addr
= r
->base
+ limit
;
1647 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1649 phys_addr_t max_addr
;
1654 max_addr
= __find_max_addr(limit
);
1656 /* @limit exceeds the total size of the memory, do nothing */
1657 if (max_addr
== PHYS_ADDR_MAX
)
1660 /* truncate both memory and reserved regions */
1661 memblock_remove_range(&memblock
.memory
, max_addr
,
1663 memblock_remove_range(&memblock
.reserved
, max_addr
,
1667 void __init
memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
)
1669 int start_rgn
, end_rgn
;
1675 ret
= memblock_isolate_range(&memblock
.memory
, base
, size
,
1676 &start_rgn
, &end_rgn
);
1680 /* remove all the MAP regions */
1681 for (i
= memblock
.memory
.cnt
- 1; i
>= end_rgn
; i
--)
1682 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1683 memblock_remove_region(&memblock
.memory
, i
);
1685 for (i
= start_rgn
- 1; i
>= 0; i
--)
1686 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1687 memblock_remove_region(&memblock
.memory
, i
);
1689 /* truncate the reserved regions */
1690 memblock_remove_range(&memblock
.reserved
, 0, base
);
1691 memblock_remove_range(&memblock
.reserved
,
1692 base
+ size
, PHYS_ADDR_MAX
);
1695 void __init
memblock_mem_limit_remove_map(phys_addr_t limit
)
1697 phys_addr_t max_addr
;
1702 max_addr
= __find_max_addr(limit
);
1704 /* @limit exceeds the total size of the memory, do nothing */
1705 if (max_addr
== PHYS_ADDR_MAX
)
1708 memblock_cap_memory_range(0, max_addr
);
1711 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1713 unsigned int left
= 0, right
= type
->cnt
;
1716 unsigned int mid
= (right
+ left
) / 2;
1718 if (addr
< type
->regions
[mid
].base
)
1720 else if (addr
>= (type
->regions
[mid
].base
+
1721 type
->regions
[mid
].size
))
1725 } while (left
< right
);
1729 bool __init_memblock
memblock_is_reserved(phys_addr_t addr
)
1731 return memblock_search(&memblock
.reserved
, addr
) != -1;
1734 bool __init_memblock
memblock_is_memory(phys_addr_t addr
)
1736 return memblock_search(&memblock
.memory
, addr
) != -1;
1739 bool __init_memblock
memblock_is_map_memory(phys_addr_t addr
)
1741 int i
= memblock_search(&memblock
.memory
, addr
);
1745 return !memblock_is_nomap(&memblock
.memory
.regions
[i
]);
1748 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1749 unsigned long *start_pfn
, unsigned long *end_pfn
)
1751 struct memblock_type
*type
= &memblock
.memory
;
1752 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
1757 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
1758 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
1760 return memblock_get_region_node(&type
->regions
[mid
]);
1764 * memblock_is_region_memory - check if a region is a subset of memory
1765 * @base: base of region to check
1766 * @size: size of region to check
1768 * Check if the region [@base, @base + @size) is a subset of a memory block.
1771 * 0 if false, non-zero if true
1773 bool __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1775 int idx
= memblock_search(&memblock
.memory
, base
);
1776 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1780 return (memblock
.memory
.regions
[idx
].base
+
1781 memblock
.memory
.regions
[idx
].size
) >= end
;
1785 * memblock_is_region_reserved - check if a region intersects reserved memory
1786 * @base: base of region to check
1787 * @size: size of region to check
1789 * Check if the region [@base, @base + @size) intersects a reserved
1793 * True if they intersect, false if not.
1795 bool __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1797 memblock_cap_size(base
, &size
);
1798 return memblock_overlaps_region(&memblock
.reserved
, base
, size
);
1801 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1803 phys_addr_t start
, end
, orig_start
, orig_end
;
1804 struct memblock_region
*r
;
1806 for_each_mem_region(r
) {
1807 orig_start
= r
->base
;
1808 orig_end
= r
->base
+ r
->size
;
1809 start
= round_up(orig_start
, align
);
1810 end
= round_down(orig_end
, align
);
1812 if (start
== orig_start
&& end
== orig_end
)
1817 r
->size
= end
- start
;
1819 memblock_remove_region(&memblock
.memory
,
1820 r
- memblock
.memory
.regions
);
1826 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1828 memblock
.current_limit
= limit
;
1831 phys_addr_t __init_memblock
memblock_get_current_limit(void)
1833 return memblock
.current_limit
;
1836 static void __init_memblock
memblock_dump(struct memblock_type
*type
)
1838 phys_addr_t base
, end
, size
;
1839 enum memblock_flags flags
;
1841 struct memblock_region
*rgn
;
1843 pr_info(" %s.cnt = 0x%lx\n", type
->name
, type
->cnt
);
1845 for_each_memblock_type(idx
, type
, rgn
) {
1846 char nid_buf
[32] = "";
1850 end
= base
+ size
- 1;
1852 #ifdef CONFIG_NEED_MULTIPLE_NODES
1853 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1854 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1855 memblock_get_region_node(rgn
));
1857 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1858 type
->name
, idx
, &base
, &end
, &size
, nid_buf
, flags
);
1862 static void __init_memblock
__memblock_dump_all(void)
1864 pr_info("MEMBLOCK configuration:\n");
1865 pr_info(" memory size = %pa reserved size = %pa\n",
1866 &memblock
.memory
.total_size
,
1867 &memblock
.reserved
.total_size
);
1869 memblock_dump(&memblock
.memory
);
1870 memblock_dump(&memblock
.reserved
);
1871 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1872 memblock_dump(&physmem
);
1876 void __init_memblock
memblock_dump_all(void)
1879 __memblock_dump_all();
1882 void __init
memblock_allow_resize(void)
1884 memblock_can_resize
= 1;
1887 static int __init
early_memblock(char *p
)
1889 if (p
&& strstr(p
, "debug"))
1893 early_param("memblock", early_memblock
);
1895 static void __init
free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
1897 struct page
*start_pg
, *end_pg
;
1898 phys_addr_t pg
, pgend
;
1901 * Convert start_pfn/end_pfn to a struct page pointer.
1903 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
1904 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
1907 * Convert to physical addresses, and round start upwards and end
1910 pg
= PAGE_ALIGN(__pa(start_pg
));
1911 pgend
= __pa(end_pg
) & PAGE_MASK
;
1914 * If there are free pages between these, free the section of the
1918 memblock_free(pg
, pgend
- pg
);
1922 * The mem_map array can get very big. Free the unused area of the memory map.
1924 static void __init
free_unused_memmap(void)
1926 unsigned long start
, end
, prev_end
= 0;
1929 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID
) ||
1930 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP
))
1934 * This relies on each bank being in address order.
1935 * The banks are sorted previously in bootmem_init().
1937 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start
, &end
, NULL
) {
1938 #ifdef CONFIG_SPARSEMEM
1940 * Take care not to free memmap entries that don't exist
1941 * due to SPARSEMEM sections which aren't present.
1943 start
= min(start
, ALIGN(prev_end
, PAGES_PER_SECTION
));
1946 * Align down here since the VM subsystem insists that the
1947 * memmap entries are valid from the bank start aligned to
1948 * MAX_ORDER_NR_PAGES.
1950 start
= round_down(start
, MAX_ORDER_NR_PAGES
);
1954 * If we had a previous bank, and there is a space
1955 * between the current bank and the previous, free it.
1957 if (prev_end
&& prev_end
< start
)
1958 free_memmap(prev_end
, start
);
1961 * Align up here since the VM subsystem insists that the
1962 * memmap entries are valid from the bank end aligned to
1963 * MAX_ORDER_NR_PAGES.
1965 prev_end
= ALIGN(end
, MAX_ORDER_NR_PAGES
);
1968 #ifdef CONFIG_SPARSEMEM
1969 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
))
1970 free_memmap(prev_end
, ALIGN(prev_end
, PAGES_PER_SECTION
));
1974 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
1978 while (start
< end
) {
1979 order
= min(MAX_ORDER
- 1UL, __ffs(start
));
1981 while (start
+ (1UL << order
) > end
)
1984 memblock_free_pages(pfn_to_page(start
), start
, order
);
1986 start
+= (1UL << order
);
1990 static unsigned long __init
__free_memory_core(phys_addr_t start
,
1993 unsigned long start_pfn
= PFN_UP(start
);
1994 unsigned long end_pfn
= min_t(unsigned long,
1995 PFN_DOWN(end
), max_low_pfn
);
1997 if (start_pfn
>= end_pfn
)
2000 __free_pages_memory(start_pfn
, end_pfn
);
2002 return end_pfn
- start_pfn
;
2005 static unsigned long __init
free_low_memory_core_early(void)
2007 unsigned long count
= 0;
2008 phys_addr_t start
, end
;
2011 memblock_clear_hotplug(0, -1);
2013 for_each_reserved_mem_range(i
, &start
, &end
)
2014 reserve_bootmem_region(start
, end
);
2017 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2018 * because in some case like Node0 doesn't have RAM installed
2019 * low ram will be on Node1
2021 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &start
, &end
,
2023 count
+= __free_memory_core(start
, end
);
2028 static int reset_managed_pages_done __initdata
;
2030 void reset_node_managed_pages(pg_data_t
*pgdat
)
2034 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
2035 atomic_long_set(&z
->managed_pages
, 0);
2038 void __init
reset_all_zones_managed_pages(void)
2040 struct pglist_data
*pgdat
;
2042 if (reset_managed_pages_done
)
2045 for_each_online_pgdat(pgdat
)
2046 reset_node_managed_pages(pgdat
);
2048 reset_managed_pages_done
= 1;
2052 * memblock_free_all - release free pages to the buddy allocator
2054 void __init
memblock_free_all(void)
2056 unsigned long pages
;
2058 free_unused_memmap();
2059 reset_all_zones_managed_pages();
2061 pages
= free_low_memory_core_early();
2062 totalram_pages_add(pages
);
2065 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2067 static int memblock_debug_show(struct seq_file
*m
, void *private)
2069 struct memblock_type
*type
= m
->private;
2070 struct memblock_region
*reg
;
2074 for (i
= 0; i
< type
->cnt
; i
++) {
2075 reg
= &type
->regions
[i
];
2076 end
= reg
->base
+ reg
->size
- 1;
2078 seq_printf(m
, "%4d: ", i
);
2079 seq_printf(m
, "%pa..%pa\n", ®
->base
, &end
);
2083 DEFINE_SHOW_ATTRIBUTE(memblock_debug
);
2085 static int __init
memblock_init_debugfs(void)
2087 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
2089 debugfs_create_file("memory", 0444, root
,
2090 &memblock
.memory
, &memblock_debug_fops
);
2091 debugfs_create_file("reserved", 0444, root
,
2092 &memblock
.reserved
, &memblock_debug_fops
);
2093 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2094 debugfs_create_file("physmem", 0444, root
, &physmem
,
2095 &memblock_debug_fops
);
2100 __initcall(memblock_init_debugfs
);
2102 #endif /* CONFIG_DEBUG_FS */