]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/memblock.c
Merge tag 'md/4.2' of git://neil.brown.name/md
[mirror_ubuntu-zesty-kernel.git] / mm / memblock.c
CommitLineData
95f72d1e
YL
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
142b45a7 14#include <linux/slab.h>
95f72d1e
YL
15#include <linux/init.h>
16#include <linux/bitops.h>
449e8df3 17#include <linux/poison.h>
c196f76f 18#include <linux/pfn.h>
6d03b885
BH
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
95f72d1e
YL
21#include <linux/memblock.h>
22
79442ed1 23#include <asm-generic/sections.h>
26f09e9b
SS
24#include <linux/io.h>
25
26#include "internal.h"
79442ed1 27
fe091c20
TH
28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
70210ed9
PH
30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32#endif
fe091c20
TH
33
34struct memblock memblock __initdata_memblock = {
35 .memory.regions = memblock_memory_init_regions,
36 .memory.cnt = 1, /* empty dummy entry */
37 .memory.max = INIT_MEMBLOCK_REGIONS,
38
39 .reserved.regions = memblock_reserved_init_regions,
40 .reserved.cnt = 1, /* empty dummy entry */
41 .reserved.max = INIT_MEMBLOCK_REGIONS,
42
70210ed9
PH
43#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
44 .physmem.regions = memblock_physmem_init_regions,
45 .physmem.cnt = 1, /* empty dummy entry */
46 .physmem.max = INIT_PHYSMEM_REGIONS,
47#endif
48
79442ed1 49 .bottom_up = false,
fe091c20
TH
50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
51};
95f72d1e 52
10d06439 53int memblock_debug __initdata_memblock;
55ac590c
TC
54#ifdef CONFIG_MOVABLE_NODE
55bool movable_node_enabled __initdata_memblock = false;
56#endif
a3f5bafc 57static bool system_has_some_mirror __initdata_memblock = false;
1aadc056 58static int memblock_can_resize __initdata_memblock;
181eb394
GS
59static int memblock_memory_in_slab __initdata_memblock = 0;
60static int memblock_reserved_in_slab __initdata_memblock = 0;
95f72d1e 61
a3f5bafc
TL
62ulong __init_memblock choose_memblock_flags(void)
63{
64 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
65}
66
142b45a7 67/* inline so we don't get a warning when pr_debug is compiled out */
c2233116
RP
68static __init_memblock const char *
69memblock_type_name(struct memblock_type *type)
142b45a7
BH
70{
71 if (type == &memblock.memory)
72 return "memory";
73 else if (type == &memblock.reserved)
74 return "reserved";
75 else
76 return "unknown";
77}
78
eb18f1b5
TH
79/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
80static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
81{
82 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
83}
84
6ed311b2
BH
85/*
86 * Address comparison utilities
87 */
10d06439 88static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
2898cc4c 89 phys_addr_t base2, phys_addr_t size2)
95f72d1e
YL
90{
91 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
92}
93
2d7d3eb2
HS
94static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
95 phys_addr_t base, phys_addr_t size)
6ed311b2
BH
96{
97 unsigned long i;
98
99 for (i = 0; i < type->cnt; i++) {
100 phys_addr_t rgnbase = type->regions[i].base;
101 phys_addr_t rgnsize = type->regions[i].size;
102 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
103 break;
104 }
105
106 return (i < type->cnt) ? i : -1;
107}
108
79442ed1
TC
109/*
110 * __memblock_find_range_bottom_up - find free area utility in bottom-up
111 * @start: start of candidate range
112 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
113 * @size: size of free area to find
114 * @align: alignment of free area to find
b1154233 115 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
fc6daaf9 116 * @flags: pick from blocks based on memory attributes
79442ed1
TC
117 *
118 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
119 *
120 * RETURNS:
121 * Found address on success, 0 on failure.
122 */
123static phys_addr_t __init_memblock
124__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
fc6daaf9
TL
125 phys_addr_t size, phys_addr_t align, int nid,
126 ulong flags)
79442ed1
TC
127{
128 phys_addr_t this_start, this_end, cand;
129 u64 i;
130
fc6daaf9 131 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
79442ed1
TC
132 this_start = clamp(this_start, start, end);
133 this_end = clamp(this_end, start, end);
134
135 cand = round_up(this_start, align);
136 if (cand < this_end && this_end - cand >= size)
137 return cand;
138 }
139
140 return 0;
141}
142
7bd0b0f0 143/**
1402899e 144 * __memblock_find_range_top_down - find free area utility, in top-down
7bd0b0f0
TH
145 * @start: start of candidate range
146 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
147 * @size: size of free area to find
148 * @align: alignment of free area to find
b1154233 149 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
fc6daaf9 150 * @flags: pick from blocks based on memory attributes
7bd0b0f0 151 *
1402899e 152 * Utility called from memblock_find_in_range_node(), find free area top-down.
7bd0b0f0
TH
153 *
154 * RETURNS:
79442ed1 155 * Found address on success, 0 on failure.
6ed311b2 156 */
1402899e
TC
157static phys_addr_t __init_memblock
158__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
fc6daaf9
TL
159 phys_addr_t size, phys_addr_t align, int nid,
160 ulong flags)
f7210e6c
TC
161{
162 phys_addr_t this_start, this_end, cand;
163 u64 i;
164
fc6daaf9
TL
165 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
166 NULL) {
f7210e6c
TC
167 this_start = clamp(this_start, start, end);
168 this_end = clamp(this_end, start, end);
169
170 if (this_end < size)
171 continue;
172
173 cand = round_down(this_end - size, align);
174 if (cand >= this_start)
175 return cand;
176 }
1402899e 177
f7210e6c
TC
178 return 0;
179}
6ed311b2 180
1402899e
TC
181/**
182 * memblock_find_in_range_node - find free area in given range and node
1402899e
TC
183 * @size: size of free area to find
184 * @align: alignment of free area to find
87029ee9
GS
185 * @start: start of candidate range
186 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
b1154233 187 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
fc6daaf9 188 * @flags: pick from blocks based on memory attributes
1402899e
TC
189 *
190 * Find @size free area aligned to @align in the specified range and node.
191 *
79442ed1
TC
192 * When allocation direction is bottom-up, the @start should be greater
193 * than the end of the kernel image. Otherwise, it will be trimmed. The
194 * reason is that we want the bottom-up allocation just near the kernel
195 * image so it is highly likely that the allocated memory and the kernel
196 * will reside in the same node.
197 *
198 * If bottom-up allocation failed, will try to allocate memory top-down.
199 *
1402899e 200 * RETURNS:
79442ed1 201 * Found address on success, 0 on failure.
1402899e 202 */
87029ee9
GS
203phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
204 phys_addr_t align, phys_addr_t start,
fc6daaf9 205 phys_addr_t end, int nid, ulong flags)
1402899e 206{
0cfb8f0c 207 phys_addr_t kernel_end, ret;
79442ed1 208
1402899e
TC
209 /* pump up @end */
210 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
211 end = memblock.current_limit;
212
213 /* avoid allocating the first page */
214 start = max_t(phys_addr_t, start, PAGE_SIZE);
215 end = max(start, end);
79442ed1
TC
216 kernel_end = __pa_symbol(_end);
217
218 /*
219 * try bottom-up allocation only when bottom-up mode
220 * is set and @end is above the kernel image.
221 */
222 if (memblock_bottom_up() && end > kernel_end) {
223 phys_addr_t bottom_up_start;
224
225 /* make sure we will allocate above the kernel */
226 bottom_up_start = max(start, kernel_end);
227
228 /* ok, try bottom-up allocation first */
229 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
fc6daaf9 230 size, align, nid, flags);
79442ed1
TC
231 if (ret)
232 return ret;
233
234 /*
235 * we always limit bottom-up allocation above the kernel,
236 * but top-down allocation doesn't have the limit, so
237 * retrying top-down allocation may succeed when bottom-up
238 * allocation failed.
239 *
240 * bottom-up allocation is expected to be fail very rarely,
241 * so we use WARN_ONCE() here to see the stack trace if
242 * fail happens.
243 */
244 WARN_ONCE(1, "memblock: bottom-up allocation failed, "
245 "memory hotunplug may be affected\n");
246 }
1402899e 247
fc6daaf9
TL
248 return __memblock_find_range_top_down(start, end, size, align, nid,
249 flags);
1402899e
TC
250}
251
7bd0b0f0
TH
252/**
253 * memblock_find_in_range - find free area in given range
254 * @start: start of candidate range
255 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
256 * @size: size of free area to find
257 * @align: alignment of free area to find
258 *
259 * Find @size free area aligned to @align in the specified range.
260 *
261 * RETURNS:
79442ed1 262 * Found address on success, 0 on failure.
fc769a8e 263 */
7bd0b0f0
TH
264phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
265 phys_addr_t end, phys_addr_t size,
266 phys_addr_t align)
6ed311b2 267{
a3f5bafc
TL
268 phys_addr_t ret;
269 ulong flags = choose_memblock_flags();
270
271again:
272 ret = memblock_find_in_range_node(size, align, start, end,
273 NUMA_NO_NODE, flags);
274
275 if (!ret && (flags & MEMBLOCK_MIRROR)) {
276 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
277 &size);
278 flags &= ~MEMBLOCK_MIRROR;
279 goto again;
280 }
281
282 return ret;
6ed311b2
BH
283}
284
10d06439 285static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
95f72d1e 286{
1440c4e2 287 type->total_size -= type->regions[r].size;
7c0caeb8
TH
288 memmove(&type->regions[r], &type->regions[r + 1],
289 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
e3239ff9 290 type->cnt--;
95f72d1e 291
8f7a6605
BH
292 /* Special case for empty arrays */
293 if (type->cnt == 0) {
1440c4e2 294 WARN_ON(type->total_size != 0);
8f7a6605
BH
295 type->cnt = 1;
296 type->regions[0].base = 0;
297 type->regions[0].size = 0;
66a20757 298 type->regions[0].flags = 0;
7c0caeb8 299 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
8f7a6605 300 }
95f72d1e
YL
301}
302
354f17e1
PH
303#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
304
29f67386
YL
305phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
306 phys_addr_t *addr)
307{
308 if (memblock.reserved.regions == memblock_reserved_init_regions)
309 return 0;
310
311 *addr = __pa(memblock.reserved.regions);
312
313 return PAGE_ALIGN(sizeof(struct memblock_region) *
314 memblock.reserved.max);
315}
316
5e270e25
PH
317phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
318 phys_addr_t *addr)
319{
320 if (memblock.memory.regions == memblock_memory_init_regions)
321 return 0;
322
323 *addr = __pa(memblock.memory.regions);
324
325 return PAGE_ALIGN(sizeof(struct memblock_region) *
326 memblock.memory.max);
327}
328
329#endif
330
48c3b583
GP
331/**
332 * memblock_double_array - double the size of the memblock regions array
333 * @type: memblock type of the regions array being doubled
334 * @new_area_start: starting address of memory range to avoid overlap with
335 * @new_area_size: size of memory range to avoid overlap with
336 *
337 * Double the size of the @type regions array. If memblock is being used to
338 * allocate memory for a new reserved regions array and there is a previously
339 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
340 * waiting to be reserved, ensure the memory used by the new array does
341 * not overlap.
342 *
343 * RETURNS:
344 * 0 on success, -1 on failure.
345 */
346static int __init_memblock memblock_double_array(struct memblock_type *type,
347 phys_addr_t new_area_start,
348 phys_addr_t new_area_size)
142b45a7
BH
349{
350 struct memblock_region *new_array, *old_array;
29f67386 351 phys_addr_t old_alloc_size, new_alloc_size;
142b45a7
BH
352 phys_addr_t old_size, new_size, addr;
353 int use_slab = slab_is_available();
181eb394 354 int *in_slab;
142b45a7
BH
355
356 /* We don't allow resizing until we know about the reserved regions
357 * of memory that aren't suitable for allocation
358 */
359 if (!memblock_can_resize)
360 return -1;
361
142b45a7
BH
362 /* Calculate new doubled size */
363 old_size = type->max * sizeof(struct memblock_region);
364 new_size = old_size << 1;
29f67386
YL
365 /*
366 * We need to allocated new one align to PAGE_SIZE,
367 * so we can free them completely later.
368 */
369 old_alloc_size = PAGE_ALIGN(old_size);
370 new_alloc_size = PAGE_ALIGN(new_size);
142b45a7 371
181eb394
GS
372 /* Retrieve the slab flag */
373 if (type == &memblock.memory)
374 in_slab = &memblock_memory_in_slab;
375 else
376 in_slab = &memblock_reserved_in_slab;
377
142b45a7
BH
378 /* Try to find some space for it.
379 *
380 * WARNING: We assume that either slab_is_available() and we use it or
fd07383b
AM
381 * we use MEMBLOCK for allocations. That means that this is unsafe to
382 * use when bootmem is currently active (unless bootmem itself is
383 * implemented on top of MEMBLOCK which isn't the case yet)
142b45a7
BH
384 *
385 * This should however not be an issue for now, as we currently only
fd07383b
AM
386 * call into MEMBLOCK while it's still active, or much later when slab
387 * is active for memory hotplug operations
142b45a7
BH
388 */
389 if (use_slab) {
390 new_array = kmalloc(new_size, GFP_KERNEL);
1f5026a7 391 addr = new_array ? __pa(new_array) : 0;
4e2f0775 392 } else {
48c3b583
GP
393 /* only exclude range when trying to double reserved.regions */
394 if (type != &memblock.reserved)
395 new_area_start = new_area_size = 0;
396
397 addr = memblock_find_in_range(new_area_start + new_area_size,
398 memblock.current_limit,
29f67386 399 new_alloc_size, PAGE_SIZE);
48c3b583
GP
400 if (!addr && new_area_size)
401 addr = memblock_find_in_range(0,
fd07383b
AM
402 min(new_area_start, memblock.current_limit),
403 new_alloc_size, PAGE_SIZE);
48c3b583 404
15674868 405 new_array = addr ? __va(addr) : NULL;
4e2f0775 406 }
1f5026a7 407 if (!addr) {
142b45a7
BH
408 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
409 memblock_type_name(type), type->max, type->max * 2);
410 return -1;
411 }
142b45a7 412
fd07383b
AM
413 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
414 memblock_type_name(type), type->max * 2, (u64)addr,
415 (u64)addr + new_size - 1);
ea9e4376 416
fd07383b
AM
417 /*
418 * Found space, we now need to move the array over before we add the
419 * reserved region since it may be our reserved array itself that is
420 * full.
142b45a7
BH
421 */
422 memcpy(new_array, type->regions, old_size);
423 memset(new_array + type->max, 0, old_size);
424 old_array = type->regions;
425 type->regions = new_array;
426 type->max <<= 1;
427
fd07383b 428 /* Free old array. We needn't free it if the array is the static one */
181eb394
GS
429 if (*in_slab)
430 kfree(old_array);
431 else if (old_array != memblock_memory_init_regions &&
432 old_array != memblock_reserved_init_regions)
29f67386 433 memblock_free(__pa(old_array), old_alloc_size);
142b45a7 434
fd07383b
AM
435 /*
436 * Reserve the new array if that comes from the memblock. Otherwise, we
437 * needn't do it
181eb394
GS
438 */
439 if (!use_slab)
29f67386 440 BUG_ON(memblock_reserve(addr, new_alloc_size));
181eb394
GS
441
442 /* Update slab flag */
443 *in_slab = use_slab;
444
142b45a7
BH
445 return 0;
446}
447
784656f9
TH
448/**
449 * memblock_merge_regions - merge neighboring compatible regions
450 * @type: memblock type to scan
451 *
452 * Scan @type and merge neighboring compatible regions.
453 */
454static void __init_memblock memblock_merge_regions(struct memblock_type *type)
95f72d1e 455{
784656f9 456 int i = 0;
95f72d1e 457
784656f9
TH
458 /* cnt never goes below 1 */
459 while (i < type->cnt - 1) {
460 struct memblock_region *this = &type->regions[i];
461 struct memblock_region *next = &type->regions[i + 1];
95f72d1e 462
7c0caeb8
TH
463 if (this->base + this->size != next->base ||
464 memblock_get_region_node(this) !=
66a20757
TC
465 memblock_get_region_node(next) ||
466 this->flags != next->flags) {
784656f9
TH
467 BUG_ON(this->base + this->size > next->base);
468 i++;
469 continue;
8f7a6605
BH
470 }
471
784656f9 472 this->size += next->size;
c0232ae8
LF
473 /* move forward from next + 1, index of which is i + 2 */
474 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
784656f9 475 type->cnt--;
95f72d1e 476 }
784656f9 477}
95f72d1e 478
784656f9
TH
479/**
480 * memblock_insert_region - insert new memblock region
209ff86d
TC
481 * @type: memblock type to insert into
482 * @idx: index for the insertion point
483 * @base: base address of the new region
484 * @size: size of the new region
485 * @nid: node id of the new region
66a20757 486 * @flags: flags of the new region
784656f9
TH
487 *
488 * Insert new memblock region [@base,@base+@size) into @type at @idx.
489 * @type must already have extra room to accomodate the new region.
490 */
491static void __init_memblock memblock_insert_region(struct memblock_type *type,
492 int idx, phys_addr_t base,
66a20757
TC
493 phys_addr_t size,
494 int nid, unsigned long flags)
784656f9
TH
495{
496 struct memblock_region *rgn = &type->regions[idx];
497
498 BUG_ON(type->cnt >= type->max);
499 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
500 rgn->base = base;
501 rgn->size = size;
66a20757 502 rgn->flags = flags;
7c0caeb8 503 memblock_set_region_node(rgn, nid);
784656f9 504 type->cnt++;
1440c4e2 505 type->total_size += size;
784656f9
TH
506}
507
508/**
f1af9d3a 509 * memblock_add_range - add new memblock region
784656f9
TH
510 * @type: memblock type to add new region into
511 * @base: base address of the new region
512 * @size: size of the new region
7fb0bc3f 513 * @nid: nid of the new region
66a20757 514 * @flags: flags of the new region
784656f9
TH
515 *
516 * Add new memblock region [@base,@base+@size) into @type. The new region
517 * is allowed to overlap with existing ones - overlaps don't affect already
518 * existing regions. @type is guaranteed to be minimal (all neighbouring
519 * compatible regions are merged) after the addition.
520 *
521 * RETURNS:
522 * 0 on success, -errno on failure.
523 */
f1af9d3a 524int __init_memblock memblock_add_range(struct memblock_type *type,
66a20757
TC
525 phys_addr_t base, phys_addr_t size,
526 int nid, unsigned long flags)
784656f9
TH
527{
528 bool insert = false;
eb18f1b5
TH
529 phys_addr_t obase = base;
530 phys_addr_t end = base + memblock_cap_size(base, &size);
784656f9
TH
531 int i, nr_new;
532
b3dc627c
TH
533 if (!size)
534 return 0;
535
784656f9
TH
536 /* special case for empty array */
537 if (type->regions[0].size == 0) {
1440c4e2 538 WARN_ON(type->cnt != 1 || type->total_size);
8f7a6605
BH
539 type->regions[0].base = base;
540 type->regions[0].size = size;
66a20757 541 type->regions[0].flags = flags;
7fb0bc3f 542 memblock_set_region_node(&type->regions[0], nid);
1440c4e2 543 type->total_size = size;
8f7a6605 544 return 0;
95f72d1e 545 }
784656f9
TH
546repeat:
547 /*
548 * The following is executed twice. Once with %false @insert and
549 * then with %true. The first counts the number of regions needed
550 * to accomodate the new area. The second actually inserts them.
142b45a7 551 */
784656f9
TH
552 base = obase;
553 nr_new = 0;
95f72d1e 554
784656f9
TH
555 for (i = 0; i < type->cnt; i++) {
556 struct memblock_region *rgn = &type->regions[i];
557 phys_addr_t rbase = rgn->base;
558 phys_addr_t rend = rbase + rgn->size;
559
560 if (rbase >= end)
95f72d1e 561 break;
784656f9
TH
562 if (rend <= base)
563 continue;
564 /*
565 * @rgn overlaps. If it separates the lower part of new
566 * area, insert that portion.
567 */
568 if (rbase > base) {
569 nr_new++;
570 if (insert)
571 memblock_insert_region(type, i++, base,
66a20757
TC
572 rbase - base, nid,
573 flags);
95f72d1e 574 }
784656f9
TH
575 /* area below @rend is dealt with, forget about it */
576 base = min(rend, end);
95f72d1e 577 }
784656f9
TH
578
579 /* insert the remaining portion */
580 if (base < end) {
581 nr_new++;
582 if (insert)
66a20757
TC
583 memblock_insert_region(type, i, base, end - base,
584 nid, flags);
95f72d1e 585 }
95f72d1e 586
784656f9
TH
587 /*
588 * If this was the first round, resize array and repeat for actual
589 * insertions; otherwise, merge and return.
142b45a7 590 */
784656f9
TH
591 if (!insert) {
592 while (type->cnt + nr_new > type->max)
48c3b583 593 if (memblock_double_array(type, obase, size) < 0)
784656f9
TH
594 return -ENOMEM;
595 insert = true;
596 goto repeat;
597 } else {
598 memblock_merge_regions(type);
599 return 0;
142b45a7 600 }
95f72d1e
YL
601}
602
7fb0bc3f
TH
603int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
604 int nid)
605{
f1af9d3a 606 return memblock_add_range(&memblock.memory, base, size, nid, 0);
7fb0bc3f
TH
607}
608
6a4055bc
AK
609static int __init_memblock memblock_add_region(phys_addr_t base,
610 phys_addr_t size,
611 int nid,
612 unsigned long flags)
613{
614 struct memblock_type *_rgn = &memblock.memory;
615
616 memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
617 (unsigned long long)base,
618 (unsigned long long)base + size - 1,
619 flags, (void *)_RET_IP_);
620
621 return memblock_add_range(_rgn, base, size, nid, flags);
622}
623
581adcbe 624int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
95f72d1e 625{
6a4055bc 626 return memblock_add_region(base, size, MAX_NUMNODES, 0);
95f72d1e
YL
627}
628
6a9ceb31
TH
629/**
630 * memblock_isolate_range - isolate given range into disjoint memblocks
631 * @type: memblock type to isolate range for
632 * @base: base of range to isolate
633 * @size: size of range to isolate
634 * @start_rgn: out parameter for the start of isolated region
635 * @end_rgn: out parameter for the end of isolated region
636 *
637 * Walk @type and ensure that regions don't cross the boundaries defined by
638 * [@base,@base+@size). Crossing regions are split at the boundaries,
639 * which may create at most two more regions. The index of the first
640 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
641 *
642 * RETURNS:
643 * 0 on success, -errno on failure.
644 */
645static int __init_memblock memblock_isolate_range(struct memblock_type *type,
646 phys_addr_t base, phys_addr_t size,
647 int *start_rgn, int *end_rgn)
648{
eb18f1b5 649 phys_addr_t end = base + memblock_cap_size(base, &size);
6a9ceb31
TH
650 int i;
651
652 *start_rgn = *end_rgn = 0;
653
b3dc627c
TH
654 if (!size)
655 return 0;
656
6a9ceb31
TH
657 /* we'll create at most two more regions */
658 while (type->cnt + 2 > type->max)
48c3b583 659 if (memblock_double_array(type, base, size) < 0)
6a9ceb31
TH
660 return -ENOMEM;
661
662 for (i = 0; i < type->cnt; i++) {
663 struct memblock_region *rgn = &type->regions[i];
664 phys_addr_t rbase = rgn->base;
665 phys_addr_t rend = rbase + rgn->size;
666
667 if (rbase >= end)
668 break;
669 if (rend <= base)
670 continue;
671
672 if (rbase < base) {
673 /*
674 * @rgn intersects from below. Split and continue
675 * to process the next region - the new top half.
676 */
677 rgn->base = base;
1440c4e2
TH
678 rgn->size -= base - rbase;
679 type->total_size -= base - rbase;
6a9ceb31 680 memblock_insert_region(type, i, rbase, base - rbase,
66a20757
TC
681 memblock_get_region_node(rgn),
682 rgn->flags);
6a9ceb31
TH
683 } else if (rend > end) {
684 /*
685 * @rgn intersects from above. Split and redo the
686 * current region - the new bottom half.
687 */
688 rgn->base = end;
1440c4e2
TH
689 rgn->size -= end - rbase;
690 type->total_size -= end - rbase;
6a9ceb31 691 memblock_insert_region(type, i--, rbase, end - rbase,
66a20757
TC
692 memblock_get_region_node(rgn),
693 rgn->flags);
6a9ceb31
TH
694 } else {
695 /* @rgn is fully contained, record it */
696 if (!*end_rgn)
697 *start_rgn = i;
698 *end_rgn = i + 1;
699 }
700 }
701
702 return 0;
703}
6a9ceb31 704
f1af9d3a
PH
705int __init_memblock memblock_remove_range(struct memblock_type *type,
706 phys_addr_t base, phys_addr_t size)
95f72d1e 707{
71936180
TH
708 int start_rgn, end_rgn;
709 int i, ret;
95f72d1e 710
71936180
TH
711 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
712 if (ret)
713 return ret;
95f72d1e 714
71936180
TH
715 for (i = end_rgn - 1; i >= start_rgn; i--)
716 memblock_remove_region(type, i);
8f7a6605 717 return 0;
95f72d1e
YL
718}
719
581adcbe 720int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
95f72d1e 721{
f1af9d3a 722 return memblock_remove_range(&memblock.memory, base, size);
95f72d1e
YL
723}
724
f1af9d3a 725
581adcbe 726int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
95f72d1e 727{
24aa0788 728 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
a150439c 729 (unsigned long long)base,
931d13f5 730 (unsigned long long)base + size - 1,
a150439c 731 (void *)_RET_IP_);
24aa0788 732
aedf95ea 733 kmemleak_free_part(__va(base), size);
f1af9d3a 734 return memblock_remove_range(&memblock.reserved, base, size);
95f72d1e
YL
735}
736
66a20757
TC
737static int __init_memblock memblock_reserve_region(phys_addr_t base,
738 phys_addr_t size,
739 int nid,
740 unsigned long flags)
95f72d1e 741{
7fc825b4 742 struct memblock_type *type = &memblock.reserved;
95f72d1e 743
66a20757 744 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
a150439c 745 (unsigned long long)base,
931d13f5 746 (unsigned long long)base + size - 1,
66a20757
TC
747 flags, (void *)_RET_IP_);
748
7fc825b4 749 return memblock_add_range(type, base, size, nid, flags);
66a20757 750}
95f72d1e 751
66a20757
TC
752int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
753{
754 return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
95f72d1e
YL
755}
756
66b16edf 757/**
66b16edf 758 *
4308ce17 759 * This function isolates region [@base, @base + @size), and sets/clears flag
66b16edf
TC
760 *
761 * Return 0 on succees, -errno on failure.
762 */
4308ce17
TL
763static int __init_memblock memblock_setclr_flag(phys_addr_t base,
764 phys_addr_t size, int set, int flag)
66b16edf
TC
765{
766 struct memblock_type *type = &memblock.memory;
767 int i, ret, start_rgn, end_rgn;
768
769 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
770 if (ret)
771 return ret;
772
773 for (i = start_rgn; i < end_rgn; i++)
4308ce17
TL
774 if (set)
775 memblock_set_region_flags(&type->regions[i], flag);
776 else
777 memblock_clear_region_flags(&type->regions[i], flag);
66b16edf
TC
778
779 memblock_merge_regions(type);
780 return 0;
781}
782
783/**
4308ce17 784 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
66b16edf
TC
785 * @base: the base phys addr of the region
786 * @size: the size of the region
787 *
4308ce17
TL
788 * Return 0 on succees, -errno on failure.
789 */
790int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
791{
792 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
793}
794
795/**
796 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
797 * @base: the base phys addr of the region
798 * @size: the size of the region
66b16edf
TC
799 *
800 * Return 0 on succees, -errno on failure.
801 */
802int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
803{
4308ce17 804 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
66b16edf
TC
805}
806
a3f5bafc
TL
807/**
808 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
809 * @base: the base phys addr of the region
810 * @size: the size of the region
811 *
812 * Return 0 on succees, -errno on failure.
813 */
814int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
815{
816 system_has_some_mirror = true;
817
818 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
819}
820
821
35fd0808 822/**
f1af9d3a 823 * __next__mem_range - next function for for_each_free_mem_range() etc.
35fd0808 824 * @idx: pointer to u64 loop variable
b1154233 825 * @nid: node selector, %NUMA_NO_NODE for all nodes
fc6daaf9 826 * @flags: pick from blocks based on memory attributes
f1af9d3a
PH
827 * @type_a: pointer to memblock_type from where the range is taken
828 * @type_b: pointer to memblock_type which excludes memory from being taken
dad7557e
WL
829 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
830 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
831 * @out_nid: ptr to int for nid of the range, can be %NULL
35fd0808 832 *
f1af9d3a 833 * Find the first area from *@idx which matches @nid, fill the out
35fd0808 834 * parameters, and update *@idx for the next iteration. The lower 32bit of
f1af9d3a
PH
835 * *@idx contains index into type_a and the upper 32bit indexes the
836 * areas before each region in type_b. For example, if type_b regions
35fd0808
TH
837 * look like the following,
838 *
839 * 0:[0-16), 1:[32-48), 2:[128-130)
840 *
841 * The upper 32bit indexes the following regions.
842 *
843 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
844 *
845 * As both region arrays are sorted, the function advances the two indices
846 * in lockstep and returns each intersection.
847 */
fc6daaf9 848void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
f1af9d3a
PH
849 struct memblock_type *type_a,
850 struct memblock_type *type_b,
851 phys_addr_t *out_start,
852 phys_addr_t *out_end, int *out_nid)
35fd0808 853{
f1af9d3a
PH
854 int idx_a = *idx & 0xffffffff;
855 int idx_b = *idx >> 32;
b1154233 856
f1af9d3a
PH
857 if (WARN_ONCE(nid == MAX_NUMNODES,
858 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
560dca27 859 nid = NUMA_NO_NODE;
35fd0808 860
f1af9d3a
PH
861 for (; idx_a < type_a->cnt; idx_a++) {
862 struct memblock_region *m = &type_a->regions[idx_a];
863
35fd0808
TH
864 phys_addr_t m_start = m->base;
865 phys_addr_t m_end = m->base + m->size;
f1af9d3a 866 int m_nid = memblock_get_region_node(m);
35fd0808
TH
867
868 /* only memory regions are associated with nodes, check it */
f1af9d3a 869 if (nid != NUMA_NO_NODE && nid != m_nid)
35fd0808
TH
870 continue;
871
0a313a99
XQ
872 /* skip hotpluggable memory regions if needed */
873 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
874 continue;
875
a3f5bafc
TL
876 /* if we want mirror memory skip non-mirror memory regions */
877 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
878 continue;
879
f1af9d3a
PH
880 if (!type_b) {
881 if (out_start)
882 *out_start = m_start;
883 if (out_end)
884 *out_end = m_end;
885 if (out_nid)
886 *out_nid = m_nid;
887 idx_a++;
888 *idx = (u32)idx_a | (u64)idx_b << 32;
889 return;
890 }
891
892 /* scan areas before each reservation */
893 for (; idx_b < type_b->cnt + 1; idx_b++) {
894 struct memblock_region *r;
895 phys_addr_t r_start;
896 phys_addr_t r_end;
897
898 r = &type_b->regions[idx_b];
899 r_start = idx_b ? r[-1].base + r[-1].size : 0;
900 r_end = idx_b < type_b->cnt ?
901 r->base : ULLONG_MAX;
35fd0808 902
f1af9d3a
PH
903 /*
904 * if idx_b advanced past idx_a,
905 * break out to advance idx_a
906 */
35fd0808
TH
907 if (r_start >= m_end)
908 break;
909 /* if the two regions intersect, we're done */
910 if (m_start < r_end) {
911 if (out_start)
f1af9d3a
PH
912 *out_start =
913 max(m_start, r_start);
35fd0808
TH
914 if (out_end)
915 *out_end = min(m_end, r_end);
916 if (out_nid)
f1af9d3a 917 *out_nid = m_nid;
35fd0808 918 /*
f1af9d3a
PH
919 * The region which ends first is
920 * advanced for the next iteration.
35fd0808
TH
921 */
922 if (m_end <= r_end)
f1af9d3a 923 idx_a++;
35fd0808 924 else
f1af9d3a
PH
925 idx_b++;
926 *idx = (u32)idx_a | (u64)idx_b << 32;
35fd0808
TH
927 return;
928 }
929 }
930 }
931
932 /* signal end of iteration */
933 *idx = ULLONG_MAX;
934}
935
7bd0b0f0 936/**
f1af9d3a
PH
937 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
938 *
939 * Finds the next range from type_a which is not marked as unsuitable
940 * in type_b.
941 *
7bd0b0f0 942 * @idx: pointer to u64 loop variable
b1154233 943 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
fc6daaf9 944 * @flags: pick from blocks based on memory attributes
f1af9d3a
PH
945 * @type_a: pointer to memblock_type from where the range is taken
946 * @type_b: pointer to memblock_type which excludes memory from being taken
dad7557e
WL
947 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
948 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
949 * @out_nid: ptr to int for nid of the range, can be %NULL
7bd0b0f0 950 *
f1af9d3a 951 * Reverse of __next_mem_range().
7bd0b0f0 952 */
fc6daaf9 953void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
f1af9d3a
PH
954 struct memblock_type *type_a,
955 struct memblock_type *type_b,
956 phys_addr_t *out_start,
957 phys_addr_t *out_end, int *out_nid)
7bd0b0f0 958{
f1af9d3a
PH
959 int idx_a = *idx & 0xffffffff;
960 int idx_b = *idx >> 32;
b1154233 961
560dca27
GS
962 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
963 nid = NUMA_NO_NODE;
7bd0b0f0
TH
964
965 if (*idx == (u64)ULLONG_MAX) {
f1af9d3a
PH
966 idx_a = type_a->cnt - 1;
967 idx_b = type_b->cnt;
7bd0b0f0
TH
968 }
969
f1af9d3a
PH
970 for (; idx_a >= 0; idx_a--) {
971 struct memblock_region *m = &type_a->regions[idx_a];
972
7bd0b0f0
TH
973 phys_addr_t m_start = m->base;
974 phys_addr_t m_end = m->base + m->size;
f1af9d3a 975 int m_nid = memblock_get_region_node(m);
7bd0b0f0
TH
976
977 /* only memory regions are associated with nodes, check it */
f1af9d3a 978 if (nid != NUMA_NO_NODE && nid != m_nid)
7bd0b0f0
TH
979 continue;
980
55ac590c
TC
981 /* skip hotpluggable memory regions if needed */
982 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
983 continue;
984
a3f5bafc
TL
985 /* if we want mirror memory skip non-mirror memory regions */
986 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
987 continue;
988
f1af9d3a
PH
989 if (!type_b) {
990 if (out_start)
991 *out_start = m_start;
992 if (out_end)
993 *out_end = m_end;
994 if (out_nid)
995 *out_nid = m_nid;
996 idx_a++;
997 *idx = (u32)idx_a | (u64)idx_b << 32;
998 return;
999 }
1000
1001 /* scan areas before each reservation */
1002 for (; idx_b >= 0; idx_b--) {
1003 struct memblock_region *r;
1004 phys_addr_t r_start;
1005 phys_addr_t r_end;
1006
1007 r = &type_b->regions[idx_b];
1008 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1009 r_end = idx_b < type_b->cnt ?
1010 r->base : ULLONG_MAX;
1011 /*
1012 * if idx_b advanced past idx_a,
1013 * break out to advance idx_a
1014 */
7bd0b0f0 1015
7bd0b0f0
TH
1016 if (r_end <= m_start)
1017 break;
1018 /* if the two regions intersect, we're done */
1019 if (m_end > r_start) {
1020 if (out_start)
1021 *out_start = max(m_start, r_start);
1022 if (out_end)
1023 *out_end = min(m_end, r_end);
1024 if (out_nid)
f1af9d3a 1025 *out_nid = m_nid;
7bd0b0f0 1026 if (m_start >= r_start)
f1af9d3a 1027 idx_a--;
7bd0b0f0 1028 else
f1af9d3a
PH
1029 idx_b--;
1030 *idx = (u32)idx_a | (u64)idx_b << 32;
7bd0b0f0
TH
1031 return;
1032 }
1033 }
1034 }
f1af9d3a 1035 /* signal end of iteration */
7bd0b0f0
TH
1036 *idx = ULLONG_MAX;
1037}
1038
7c0caeb8
TH
1039#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1040/*
1041 * Common iterator interface used to define for_each_mem_range().
1042 */
1043void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1044 unsigned long *out_start_pfn,
1045 unsigned long *out_end_pfn, int *out_nid)
1046{
1047 struct memblock_type *type = &memblock.memory;
1048 struct memblock_region *r;
1049
1050 while (++*idx < type->cnt) {
1051 r = &type->regions[*idx];
1052
1053 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1054 continue;
1055 if (nid == MAX_NUMNODES || nid == r->nid)
1056 break;
1057 }
1058 if (*idx >= type->cnt) {
1059 *idx = -1;
1060 return;
1061 }
1062
1063 if (out_start_pfn)
1064 *out_start_pfn = PFN_UP(r->base);
1065 if (out_end_pfn)
1066 *out_end_pfn = PFN_DOWN(r->base + r->size);
1067 if (out_nid)
1068 *out_nid = r->nid;
1069}
1070
1071/**
1072 * memblock_set_node - set node ID on memblock regions
1073 * @base: base of area to set node ID for
1074 * @size: size of area to set node ID for
e7e8de59 1075 * @type: memblock type to set node ID for
7c0caeb8
TH
1076 * @nid: node ID to set
1077 *
e7e8de59 1078 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
7c0caeb8
TH
1079 * Regions which cross the area boundaries are split as necessary.
1080 *
1081 * RETURNS:
1082 * 0 on success, -errno on failure.
1083 */
1084int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
e7e8de59 1085 struct memblock_type *type, int nid)
7c0caeb8 1086{
6a9ceb31
TH
1087 int start_rgn, end_rgn;
1088 int i, ret;
7c0caeb8 1089
6a9ceb31
TH
1090 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1091 if (ret)
1092 return ret;
7c0caeb8 1093
6a9ceb31 1094 for (i = start_rgn; i < end_rgn; i++)
e9d24ad3 1095 memblock_set_region_node(&type->regions[i], nid);
7c0caeb8
TH
1096
1097 memblock_merge_regions(type);
1098 return 0;
1099}
1100#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1101
2bfc2862
AM
1102static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1103 phys_addr_t align, phys_addr_t start,
fc6daaf9 1104 phys_addr_t end, int nid, ulong flags)
95f72d1e 1105{
6ed311b2 1106 phys_addr_t found;
95f72d1e 1107
79f40fab
GS
1108 if (!align)
1109 align = SMP_CACHE_BYTES;
94f3d3af 1110
fc6daaf9
TL
1111 found = memblock_find_in_range_node(size, align, start, end, nid,
1112 flags);
aedf95ea
CM
1113 if (found && !memblock_reserve(found, size)) {
1114 /*
1115 * The min_count is set to 0 so that memblock allocations are
1116 * never reported as leaks.
1117 */
1118 kmemleak_alloc(__va(found), size, 0, 0);
6ed311b2 1119 return found;
aedf95ea 1120 }
6ed311b2 1121 return 0;
95f72d1e
YL
1122}
1123
2bfc2862 1124phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
fc6daaf9
TL
1125 phys_addr_t start, phys_addr_t end,
1126 ulong flags)
2bfc2862 1127{
fc6daaf9
TL
1128 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1129 flags);
2bfc2862
AM
1130}
1131
1132static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1133 phys_addr_t align, phys_addr_t max_addr,
fc6daaf9 1134 int nid, ulong flags)
2bfc2862 1135{
fc6daaf9 1136 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
2bfc2862
AM
1137}
1138
7bd0b0f0
TH
1139phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1140{
a3f5bafc
TL
1141 ulong flags = choose_memblock_flags();
1142 phys_addr_t ret;
1143
1144again:
1145 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1146 nid, flags);
1147
1148 if (!ret && (flags & MEMBLOCK_MIRROR)) {
1149 flags &= ~MEMBLOCK_MIRROR;
1150 goto again;
1151 }
1152 return ret;
7bd0b0f0
TH
1153}
1154
1155phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1156{
fc6daaf9
TL
1157 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1158 MEMBLOCK_NONE);
7bd0b0f0
TH
1159}
1160
6ed311b2 1161phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
95f72d1e 1162{
6ed311b2
BH
1163 phys_addr_t alloc;
1164
1165 alloc = __memblock_alloc_base(size, align, max_addr);
1166
1167 if (alloc == 0)
1168 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1169 (unsigned long long) size, (unsigned long long) max_addr);
1170
1171 return alloc;
95f72d1e
YL
1172}
1173
6ed311b2 1174phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
95f72d1e 1175{
6ed311b2
BH
1176 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1177}
95f72d1e 1178
9d1e2492
BH
1179phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1180{
1181 phys_addr_t res = memblock_alloc_nid(size, align, nid);
1182
1183 if (res)
1184 return res;
15fb0972 1185 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
95f72d1e
YL
1186}
1187
26f09e9b
SS
1188/**
1189 * memblock_virt_alloc_internal - allocate boot memory block
1190 * @size: size of memory block to be allocated in bytes
1191 * @align: alignment of the region and block's size
1192 * @min_addr: the lower bound of the memory region to allocate (phys address)
1193 * @max_addr: the upper bound of the memory region to allocate (phys address)
1194 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1195 *
1196 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1197 * will fall back to memory below @min_addr. Also, allocation may fall back
1198 * to any node in the system if the specified node can not
1199 * hold the requested memory.
1200 *
1201 * The allocation is performed from memory region limited by
1202 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1203 *
1204 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1205 *
1206 * The phys address of allocated boot memory block is converted to virtual and
1207 * allocated memory is reset to 0.
1208 *
1209 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1210 * allocated boot memory block, so that it is never reported as leaks.
1211 *
1212 * RETURNS:
1213 * Virtual address of allocated memory block on success, NULL on failure.
1214 */
1215static void * __init memblock_virt_alloc_internal(
1216 phys_addr_t size, phys_addr_t align,
1217 phys_addr_t min_addr, phys_addr_t max_addr,
1218 int nid)
1219{
1220 phys_addr_t alloc;
1221 void *ptr;
a3f5bafc 1222 ulong flags = choose_memblock_flags();
26f09e9b 1223
560dca27
GS
1224 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1225 nid = NUMA_NO_NODE;
26f09e9b
SS
1226
1227 /*
1228 * Detect any accidental use of these APIs after slab is ready, as at
1229 * this moment memblock may be deinitialized already and its
1230 * internal data may be destroyed (after execution of free_all_bootmem)
1231 */
1232 if (WARN_ON_ONCE(slab_is_available()))
1233 return kzalloc_node(size, GFP_NOWAIT, nid);
1234
1235 if (!align)
1236 align = SMP_CACHE_BYTES;
1237
f544e14f
YL
1238 if (max_addr > memblock.current_limit)
1239 max_addr = memblock.current_limit;
1240
26f09e9b
SS
1241again:
1242 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
a3f5bafc 1243 nid, flags);
26f09e9b
SS
1244 if (alloc)
1245 goto done;
1246
1247 if (nid != NUMA_NO_NODE) {
1248 alloc = memblock_find_in_range_node(size, align, min_addr,
fc6daaf9 1249 max_addr, NUMA_NO_NODE,
a3f5bafc 1250 flags);
26f09e9b
SS
1251 if (alloc)
1252 goto done;
1253 }
1254
1255 if (min_addr) {
1256 min_addr = 0;
1257 goto again;
26f09e9b
SS
1258 }
1259
a3f5bafc
TL
1260 if (flags & MEMBLOCK_MIRROR) {
1261 flags &= ~MEMBLOCK_MIRROR;
1262 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1263 &size);
1264 goto again;
1265 }
1266
1267 return NULL;
26f09e9b
SS
1268done:
1269 memblock_reserve(alloc, size);
1270 ptr = phys_to_virt(alloc);
1271 memset(ptr, 0, size);
1272
1273 /*
1274 * The min_count is set to 0 so that bootmem allocated blocks
1275 * are never reported as leaks. This is because many of these blocks
1276 * are only referred via the physical address which is not
1277 * looked up by kmemleak.
1278 */
1279 kmemleak_alloc(ptr, size, 0, 0);
1280
1281 return ptr;
26f09e9b
SS
1282}
1283
1284/**
1285 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1286 * @size: size of memory block to be allocated in bytes
1287 * @align: alignment of the region and block's size
1288 * @min_addr: the lower bound of the memory region from where the allocation
1289 * is preferred (phys address)
1290 * @max_addr: the upper bound of the memory region from where the allocation
1291 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1292 * allocate only from memory limited by memblock.current_limit value
1293 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1294 *
1295 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1296 * additional debug information (including caller info), if enabled.
1297 *
1298 * RETURNS:
1299 * Virtual address of allocated memory block on success, NULL on failure.
1300 */
1301void * __init memblock_virt_alloc_try_nid_nopanic(
1302 phys_addr_t size, phys_addr_t align,
1303 phys_addr_t min_addr, phys_addr_t max_addr,
1304 int nid)
1305{
1306 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1307 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1308 (u64)max_addr, (void *)_RET_IP_);
1309 return memblock_virt_alloc_internal(size, align, min_addr,
1310 max_addr, nid);
1311}
1312
1313/**
1314 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1315 * @size: size of memory block to be allocated in bytes
1316 * @align: alignment of the region and block's size
1317 * @min_addr: the lower bound of the memory region from where the allocation
1318 * is preferred (phys address)
1319 * @max_addr: the upper bound of the memory region from where the allocation
1320 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1321 * allocate only from memory limited by memblock.current_limit value
1322 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1323 *
1324 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1325 * which provides debug information (including caller info), if enabled,
1326 * and panics if the request can not be satisfied.
1327 *
1328 * RETURNS:
1329 * Virtual address of allocated memory block on success, NULL on failure.
1330 */
1331void * __init memblock_virt_alloc_try_nid(
1332 phys_addr_t size, phys_addr_t align,
1333 phys_addr_t min_addr, phys_addr_t max_addr,
1334 int nid)
1335{
1336 void *ptr;
1337
1338 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1339 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1340 (u64)max_addr, (void *)_RET_IP_);
1341 ptr = memblock_virt_alloc_internal(size, align,
1342 min_addr, max_addr, nid);
1343 if (ptr)
1344 return ptr;
1345
1346 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1347 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1348 (u64)max_addr);
1349 return NULL;
1350}
1351
1352/**
1353 * __memblock_free_early - free boot memory block
1354 * @base: phys starting address of the boot memory block
1355 * @size: size of the boot memory block in bytes
1356 *
1357 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1358 * The freeing memory will not be released to the buddy allocator.
1359 */
1360void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1361{
1362 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1363 __func__, (u64)base, (u64)base + size - 1,
1364 (void *)_RET_IP_);
1365 kmemleak_free_part(__va(base), size);
f1af9d3a 1366 memblock_remove_range(&memblock.reserved, base, size);
26f09e9b
SS
1367}
1368
1369/*
1370 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1371 * @addr: phys starting address of the boot memory block
1372 * @size: size of the boot memory block in bytes
1373 *
1374 * This is only useful when the bootmem allocator has already been torn
1375 * down, but we are still initializing the system. Pages are released directly
1376 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1377 */
1378void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1379{
1380 u64 cursor, end;
1381
1382 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1383 __func__, (u64)base, (u64)base + size - 1,
1384 (void *)_RET_IP_);
1385 kmemleak_free_part(__va(base), size);
1386 cursor = PFN_UP(base);
1387 end = PFN_DOWN(base + size);
1388
1389 for (; cursor < end; cursor++) {
1390 __free_pages_bootmem(pfn_to_page(cursor), 0);
1391 totalram_pages++;
1392 }
1393}
9d1e2492
BH
1394
1395/*
1396 * Remaining API functions
1397 */
1398
2898cc4c 1399phys_addr_t __init memblock_phys_mem_size(void)
95f72d1e 1400{
1440c4e2 1401 return memblock.memory.total_size;
95f72d1e
YL
1402}
1403
595ad9af
YL
1404phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1405{
1406 unsigned long pages = 0;
1407 struct memblock_region *r;
1408 unsigned long start_pfn, end_pfn;
1409
1410 for_each_memblock(memory, r) {
1411 start_pfn = memblock_region_memory_base_pfn(r);
1412 end_pfn = memblock_region_memory_end_pfn(r);
1413 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1414 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1415 pages += end_pfn - start_pfn;
1416 }
1417
16763230 1418 return PFN_PHYS(pages);
595ad9af
YL
1419}
1420
0a93ebef
SR
1421/* lowest address */
1422phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1423{
1424 return memblock.memory.regions[0].base;
1425}
1426
10d06439 1427phys_addr_t __init_memblock memblock_end_of_DRAM(void)
95f72d1e
YL
1428{
1429 int idx = memblock.memory.cnt - 1;
1430
e3239ff9 1431 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
95f72d1e
YL
1432}
1433
c0ce8fef 1434void __init memblock_enforce_memory_limit(phys_addr_t limit)
95f72d1e 1435{
c0ce8fef 1436 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
136199f0 1437 struct memblock_region *r;
95f72d1e 1438
c0ce8fef 1439 if (!limit)
95f72d1e
YL
1440 return;
1441
c0ce8fef 1442 /* find out max address */
136199f0 1443 for_each_memblock(memory, r) {
c0ce8fef
TH
1444 if (limit <= r->size) {
1445 max_addr = r->base + limit;
1446 break;
95f72d1e 1447 }
c0ce8fef 1448 limit -= r->size;
95f72d1e 1449 }
c0ce8fef
TH
1450
1451 /* truncate both memory and reserved regions */
f1af9d3a
PH
1452 memblock_remove_range(&memblock.memory, max_addr,
1453 (phys_addr_t)ULLONG_MAX);
1454 memblock_remove_range(&memblock.reserved, max_addr,
1455 (phys_addr_t)ULLONG_MAX);
95f72d1e
YL
1456}
1457
cd79481d 1458static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
72d4b0b4
BH
1459{
1460 unsigned int left = 0, right = type->cnt;
1461
1462 do {
1463 unsigned int mid = (right + left) / 2;
1464
1465 if (addr < type->regions[mid].base)
1466 right = mid;
1467 else if (addr >= (type->regions[mid].base +
1468 type->regions[mid].size))
1469 left = mid + 1;
1470 else
1471 return mid;
1472 } while (left < right);
1473 return -1;
1474}
1475
2898cc4c 1476int __init memblock_is_reserved(phys_addr_t addr)
95f72d1e 1477{
72d4b0b4
BH
1478 return memblock_search(&memblock.reserved, addr) != -1;
1479}
95f72d1e 1480
3661ca66 1481int __init_memblock memblock_is_memory(phys_addr_t addr)
72d4b0b4
BH
1482{
1483 return memblock_search(&memblock.memory, addr) != -1;
1484}
1485
e76b63f8
YL
1486#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1487int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1488 unsigned long *start_pfn, unsigned long *end_pfn)
1489{
1490 struct memblock_type *type = &memblock.memory;
16763230 1491 int mid = memblock_search(type, PFN_PHYS(pfn));
e76b63f8
YL
1492
1493 if (mid == -1)
1494 return -1;
1495
f7e2f7e8
FF
1496 *start_pfn = PFN_DOWN(type->regions[mid].base);
1497 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
e76b63f8
YL
1498
1499 return type->regions[mid].nid;
1500}
1501#endif
1502
eab30949
SB
1503/**
1504 * memblock_is_region_memory - check if a region is a subset of memory
1505 * @base: base of region to check
1506 * @size: size of region to check
1507 *
1508 * Check if the region [@base, @base+@size) is a subset of a memory block.
1509 *
1510 * RETURNS:
1511 * 0 if false, non-zero if true
1512 */
3661ca66 1513int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
72d4b0b4 1514{
abb65272 1515 int idx = memblock_search(&memblock.memory, base);
eb18f1b5 1516 phys_addr_t end = base + memblock_cap_size(base, &size);
72d4b0b4
BH
1517
1518 if (idx == -1)
1519 return 0;
abb65272
TV
1520 return memblock.memory.regions[idx].base <= base &&
1521 (memblock.memory.regions[idx].base +
eb18f1b5 1522 memblock.memory.regions[idx].size) >= end;
95f72d1e
YL
1523}
1524
eab30949
SB
1525/**
1526 * memblock_is_region_reserved - check if a region intersects reserved memory
1527 * @base: base of region to check
1528 * @size: size of region to check
1529 *
1530 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1531 *
1532 * RETURNS:
1533 * 0 if false, non-zero if true
1534 */
10d06439 1535int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
95f72d1e 1536{
eb18f1b5 1537 memblock_cap_size(base, &size);
f1c2c19c 1538 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
95f72d1e
YL
1539}
1540
6ede1fd3
YL
1541void __init_memblock memblock_trim_memory(phys_addr_t align)
1542{
6ede1fd3 1543 phys_addr_t start, end, orig_start, orig_end;
136199f0 1544 struct memblock_region *r;
6ede1fd3 1545
136199f0
EM
1546 for_each_memblock(memory, r) {
1547 orig_start = r->base;
1548 orig_end = r->base + r->size;
6ede1fd3
YL
1549 start = round_up(orig_start, align);
1550 end = round_down(orig_end, align);
1551
1552 if (start == orig_start && end == orig_end)
1553 continue;
1554
1555 if (start < end) {
136199f0
EM
1556 r->base = start;
1557 r->size = end - start;
6ede1fd3 1558 } else {
136199f0
EM
1559 memblock_remove_region(&memblock.memory,
1560 r - memblock.memory.regions);
1561 r--;
6ede1fd3
YL
1562 }
1563 }
1564}
e63075a3 1565
3661ca66 1566void __init_memblock memblock_set_current_limit(phys_addr_t limit)
e63075a3
BH
1567{
1568 memblock.current_limit = limit;
1569}
1570
fec51014
LA
1571phys_addr_t __init_memblock memblock_get_current_limit(void)
1572{
1573 return memblock.current_limit;
1574}
1575
7c0caeb8 1576static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
6ed311b2
BH
1577{
1578 unsigned long long base, size;
66a20757 1579 unsigned long flags;
6ed311b2
BH
1580 int i;
1581
7c0caeb8 1582 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
6ed311b2 1583
7c0caeb8
TH
1584 for (i = 0; i < type->cnt; i++) {
1585 struct memblock_region *rgn = &type->regions[i];
1586 char nid_buf[32] = "";
1587
1588 base = rgn->base;
1589 size = rgn->size;
66a20757 1590 flags = rgn->flags;
7c0caeb8
TH
1591#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1592 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1593 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1594 memblock_get_region_node(rgn));
1595#endif
66a20757
TC
1596 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1597 name, i, base, base + size - 1, size, nid_buf, flags);
6ed311b2
BH
1598 }
1599}
1600
4ff7b82f 1601void __init_memblock __memblock_dump_all(void)
6ed311b2 1602{
6ed311b2 1603 pr_info("MEMBLOCK configuration:\n");
1440c4e2
TH
1604 pr_info(" memory size = %#llx reserved size = %#llx\n",
1605 (unsigned long long)memblock.memory.total_size,
1606 (unsigned long long)memblock.reserved.total_size);
6ed311b2
BH
1607
1608 memblock_dump(&memblock.memory, "memory");
1609 memblock_dump(&memblock.reserved, "reserved");
1610}
1611
1aadc056 1612void __init memblock_allow_resize(void)
6ed311b2 1613{
142b45a7 1614 memblock_can_resize = 1;
6ed311b2
BH
1615}
1616
6ed311b2
BH
1617static int __init early_memblock(char *p)
1618{
1619 if (p && strstr(p, "debug"))
1620 memblock_debug = 1;
1621 return 0;
1622}
1623early_param("memblock", early_memblock);
1624
c378ddd5 1625#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
6d03b885
BH
1626
1627static int memblock_debug_show(struct seq_file *m, void *private)
1628{
1629 struct memblock_type *type = m->private;
1630 struct memblock_region *reg;
1631 int i;
1632
1633 for (i = 0; i < type->cnt; i++) {
1634 reg = &type->regions[i];
1635 seq_printf(m, "%4d: ", i);
1636 if (sizeof(phys_addr_t) == 4)
1637 seq_printf(m, "0x%08lx..0x%08lx\n",
1638 (unsigned long)reg->base,
1639 (unsigned long)(reg->base + reg->size - 1));
1640 else
1641 seq_printf(m, "0x%016llx..0x%016llx\n",
1642 (unsigned long long)reg->base,
1643 (unsigned long long)(reg->base + reg->size - 1));
1644
1645 }
1646 return 0;
1647}
1648
1649static int memblock_debug_open(struct inode *inode, struct file *file)
1650{
1651 return single_open(file, memblock_debug_show, inode->i_private);
1652}
1653
1654static const struct file_operations memblock_debug_fops = {
1655 .open = memblock_debug_open,
1656 .read = seq_read,
1657 .llseek = seq_lseek,
1658 .release = single_release,
1659};
1660
1661static int __init memblock_init_debugfs(void)
1662{
1663 struct dentry *root = debugfs_create_dir("memblock", NULL);
1664 if (!root)
1665 return -ENXIO;
1666 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1667 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
70210ed9
PH
1668#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1669 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1670#endif
6d03b885
BH
1671
1672 return 0;
1673}
1674__initcall(memblock_init_debugfs);
1675
1676#endif /* CONFIG_DEBUG_FS */