]>
Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
142b45a7 | 14 | #include <linux/slab.h> |
95f72d1e YL |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> | |
449e8df3 | 17 | #include <linux/poison.h> |
c196f76f | 18 | #include <linux/pfn.h> |
6d03b885 BH |
19 | #include <linux/debugfs.h> |
20 | #include <linux/seq_file.h> | |
95f72d1e YL |
21 | #include <linux/memblock.h> |
22 | ||
10d06439 | 23 | struct memblock memblock __initdata_memblock; |
95f72d1e | 24 | |
10d06439 YL |
25 | int memblock_debug __initdata_memblock; |
26 | int memblock_can_resize __initdata_memblock; | |
27 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | |
28 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | |
95f72d1e | 29 | |
142b45a7 BH |
30 | /* inline so we don't get a warning when pr_debug is compiled out */ |
31 | static inline const char *memblock_type_name(struct memblock_type *type) | |
32 | { | |
33 | if (type == &memblock.memory) | |
34 | return "memory"; | |
35 | else if (type == &memblock.reserved) | |
36 | return "reserved"; | |
37 | else | |
38 | return "unknown"; | |
39 | } | |
40 | ||
6ed311b2 BH |
41 | /* |
42 | * Address comparison utilities | |
43 | */ | |
10d06439 | 44 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
2898cc4c | 45 | phys_addr_t base2, phys_addr_t size2) |
95f72d1e YL |
46 | { |
47 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
48 | } | |
49 | ||
10d06439 | 50 | long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
6ed311b2 BH |
51 | { |
52 | unsigned long i; | |
53 | ||
54 | for (i = 0; i < type->cnt; i++) { | |
55 | phys_addr_t rgnbase = type->regions[i].base; | |
56 | phys_addr_t rgnsize = type->regions[i].size; | |
57 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
58 | break; | |
59 | } | |
60 | ||
61 | return (i < type->cnt) ? i : -1; | |
62 | } | |
63 | ||
64 | /* | |
65 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | |
66 | * are top-down. | |
67 | */ | |
68 | ||
cd79481d | 69 | static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, |
6ed311b2 BH |
70 | phys_addr_t size, phys_addr_t align) |
71 | { | |
72 | phys_addr_t base, res_base; | |
73 | long j; | |
74 | ||
f1af98c7 YL |
75 | /* In case, huge size is requested */ |
76 | if (end < size) | |
1f5026a7 | 77 | return 0; |
f1af98c7 | 78 | |
348968eb | 79 | base = round_down(end - size, align); |
f1af98c7 | 80 | |
25818f0f BH |
81 | /* Prevent allocations returning 0 as it's also used to |
82 | * indicate an allocation failure | |
83 | */ | |
84 | if (start == 0) | |
85 | start = PAGE_SIZE; | |
86 | ||
6ed311b2 BH |
87 | while (start <= base) { |
88 | j = memblock_overlaps_region(&memblock.reserved, base, size); | |
89 | if (j < 0) | |
90 | return base; | |
91 | res_base = memblock.reserved.regions[j].base; | |
92 | if (res_base < size) | |
93 | break; | |
348968eb | 94 | base = round_down(res_base - size, align); |
6ed311b2 BH |
95 | } |
96 | ||
1f5026a7 | 97 | return 0; |
6ed311b2 BH |
98 | } |
99 | ||
fc769a8e TH |
100 | /* |
101 | * Find a free area with specified alignment in a specific range. | |
102 | */ | |
103 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end, | |
104 | phys_addr_t size, phys_addr_t align) | |
6ed311b2 BH |
105 | { |
106 | long i; | |
6ed311b2 BH |
107 | |
108 | BUG_ON(0 == size); | |
109 | ||
6ed311b2 | 110 | /* Pump up max_addr */ |
fef501d4 BH |
111 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
112 | end = memblock.current_limit; | |
6ed311b2 BH |
113 | |
114 | /* We do a top-down search, this tends to limit memory | |
115 | * fragmentation by keeping early boot allocs near the | |
116 | * top of memory | |
117 | */ | |
118 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | |
119 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | |
120 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | |
fef501d4 | 121 | phys_addr_t bottom, top, found; |
6ed311b2 BH |
122 | |
123 | if (memblocksize < size) | |
124 | continue; | |
fef501d4 BH |
125 | if ((memblockbase + memblocksize) <= start) |
126 | break; | |
127 | bottom = max(memblockbase, start); | |
128 | top = min(memblockbase + memblocksize, end); | |
129 | if (bottom >= top) | |
130 | continue; | |
131 | found = memblock_find_region(bottom, top, size, align); | |
1f5026a7 | 132 | if (found) |
fef501d4 | 133 | return found; |
6ed311b2 | 134 | } |
1f5026a7 | 135 | return 0; |
6ed311b2 BH |
136 | } |
137 | ||
7950c407 YL |
138 | /* |
139 | * Free memblock.reserved.regions | |
140 | */ | |
141 | int __init_memblock memblock_free_reserved_regions(void) | |
142 | { | |
143 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
144 | return 0; | |
145 | ||
146 | return memblock_free(__pa(memblock.reserved.regions), | |
147 | sizeof(struct memblock_region) * memblock.reserved.max); | |
148 | } | |
149 | ||
150 | /* | |
151 | * Reserve memblock.reserved.regions | |
152 | */ | |
153 | int __init_memblock memblock_reserve_reserved_regions(void) | |
154 | { | |
155 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
156 | return 0; | |
157 | ||
158 | return memblock_reserve(__pa(memblock.reserved.regions), | |
159 | sizeof(struct memblock_region) * memblock.reserved.max); | |
160 | } | |
161 | ||
10d06439 | 162 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
95f72d1e | 163 | { |
7c0caeb8 TH |
164 | memmove(&type->regions[r], &type->regions[r + 1], |
165 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); | |
e3239ff9 | 166 | type->cnt--; |
95f72d1e | 167 | |
8f7a6605 BH |
168 | /* Special case for empty arrays */ |
169 | if (type->cnt == 0) { | |
170 | type->cnt = 1; | |
171 | type->regions[0].base = 0; | |
172 | type->regions[0].size = 0; | |
7c0caeb8 | 173 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); |
8f7a6605 | 174 | } |
95f72d1e YL |
175 | } |
176 | ||
142b45a7 BH |
177 | /* Defined below but needed now */ |
178 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | |
179 | ||
10d06439 | 180 | static int __init_memblock memblock_double_array(struct memblock_type *type) |
142b45a7 BH |
181 | { |
182 | struct memblock_region *new_array, *old_array; | |
183 | phys_addr_t old_size, new_size, addr; | |
184 | int use_slab = slab_is_available(); | |
185 | ||
186 | /* We don't allow resizing until we know about the reserved regions | |
187 | * of memory that aren't suitable for allocation | |
188 | */ | |
189 | if (!memblock_can_resize) | |
190 | return -1; | |
191 | ||
142b45a7 BH |
192 | /* Calculate new doubled size */ |
193 | old_size = type->max * sizeof(struct memblock_region); | |
194 | new_size = old_size << 1; | |
195 | ||
196 | /* Try to find some space for it. | |
197 | * | |
198 | * WARNING: We assume that either slab_is_available() and we use it or | |
199 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | |
200 | * when bootmem is currently active (unless bootmem itself is implemented | |
201 | * on top of MEMBLOCK which isn't the case yet) | |
202 | * | |
203 | * This should however not be an issue for now, as we currently only | |
204 | * call into MEMBLOCK while it's still active, or much later when slab is | |
205 | * active for memory hotplug operations | |
206 | */ | |
207 | if (use_slab) { | |
208 | new_array = kmalloc(new_size, GFP_KERNEL); | |
1f5026a7 | 209 | addr = new_array ? __pa(new_array) : 0; |
142b45a7 | 210 | } else |
fc769a8e | 211 | addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); |
1f5026a7 | 212 | if (!addr) { |
142b45a7 BH |
213 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
214 | memblock_type_name(type), type->max, type->max * 2); | |
215 | return -1; | |
216 | } | |
217 | new_array = __va(addr); | |
218 | ||
ea9e4376 YL |
219 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
220 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); | |
221 | ||
142b45a7 BH |
222 | /* Found space, we now need to move the array over before |
223 | * we add the reserved region since it may be our reserved | |
224 | * array itself that is full. | |
225 | */ | |
226 | memcpy(new_array, type->regions, old_size); | |
227 | memset(new_array + type->max, 0, old_size); | |
228 | old_array = type->regions; | |
229 | type->regions = new_array; | |
230 | type->max <<= 1; | |
231 | ||
232 | /* If we use SLAB that's it, we are done */ | |
233 | if (use_slab) | |
234 | return 0; | |
235 | ||
236 | /* Add the new reserved region now. Should not fail ! */ | |
8f7a6605 | 237 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size)); |
142b45a7 BH |
238 | |
239 | /* If the array wasn't our static init one, then free it. We only do | |
240 | * that before SLAB is available as later on, we don't know whether | |
241 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | |
242 | * anyways | |
243 | */ | |
244 | if (old_array != memblock_memory_init_regions && | |
245 | old_array != memblock_reserved_init_regions) | |
246 | memblock_free(__pa(old_array), old_size); | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
784656f9 TH |
251 | /** |
252 | * memblock_merge_regions - merge neighboring compatible regions | |
253 | * @type: memblock type to scan | |
254 | * | |
255 | * Scan @type and merge neighboring compatible regions. | |
256 | */ | |
257 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) | |
95f72d1e | 258 | { |
784656f9 | 259 | int i = 0; |
95f72d1e | 260 | |
784656f9 TH |
261 | /* cnt never goes below 1 */ |
262 | while (i < type->cnt - 1) { | |
263 | struct memblock_region *this = &type->regions[i]; | |
264 | struct memblock_region *next = &type->regions[i + 1]; | |
95f72d1e | 265 | |
7c0caeb8 TH |
266 | if (this->base + this->size != next->base || |
267 | memblock_get_region_node(this) != | |
268 | memblock_get_region_node(next)) { | |
784656f9 TH |
269 | BUG_ON(this->base + this->size > next->base); |
270 | i++; | |
271 | continue; | |
8f7a6605 BH |
272 | } |
273 | ||
784656f9 TH |
274 | this->size += next->size; |
275 | memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next)); | |
276 | type->cnt--; | |
95f72d1e | 277 | } |
784656f9 | 278 | } |
95f72d1e | 279 | |
784656f9 TH |
280 | /** |
281 | * memblock_insert_region - insert new memblock region | |
282 | * @type: memblock type to insert into | |
283 | * @idx: index for the insertion point | |
284 | * @base: base address of the new region | |
285 | * @size: size of the new region | |
286 | * | |
287 | * Insert new memblock region [@base,@base+@size) into @type at @idx. | |
288 | * @type must already have extra room to accomodate the new region. | |
289 | */ | |
290 | static void __init_memblock memblock_insert_region(struct memblock_type *type, | |
291 | int idx, phys_addr_t base, | |
7c0caeb8 | 292 | phys_addr_t size, int nid) |
784656f9 TH |
293 | { |
294 | struct memblock_region *rgn = &type->regions[idx]; | |
295 | ||
296 | BUG_ON(type->cnt >= type->max); | |
297 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); | |
298 | rgn->base = base; | |
299 | rgn->size = size; | |
7c0caeb8 | 300 | memblock_set_region_node(rgn, nid); |
784656f9 TH |
301 | type->cnt++; |
302 | } | |
303 | ||
304 | /** | |
305 | * memblock_add_region - add new memblock region | |
306 | * @type: memblock type to add new region into | |
307 | * @base: base address of the new region | |
308 | * @size: size of the new region | |
309 | * | |
310 | * Add new memblock region [@base,@base+@size) into @type. The new region | |
311 | * is allowed to overlap with existing ones - overlaps don't affect already | |
312 | * existing regions. @type is guaranteed to be minimal (all neighbouring | |
313 | * compatible regions are merged) after the addition. | |
314 | * | |
315 | * RETURNS: | |
316 | * 0 on success, -errno on failure. | |
317 | */ | |
318 | static long __init_memblock memblock_add_region(struct memblock_type *type, | |
319 | phys_addr_t base, phys_addr_t size) | |
320 | { | |
321 | bool insert = false; | |
322 | phys_addr_t obase = base, end = base + size; | |
323 | int i, nr_new; | |
324 | ||
325 | /* special case for empty array */ | |
326 | if (type->regions[0].size == 0) { | |
327 | WARN_ON(type->cnt != 1); | |
8f7a6605 BH |
328 | type->regions[0].base = base; |
329 | type->regions[0].size = size; | |
7c0caeb8 | 330 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); |
8f7a6605 | 331 | return 0; |
95f72d1e | 332 | } |
784656f9 TH |
333 | repeat: |
334 | /* | |
335 | * The following is executed twice. Once with %false @insert and | |
336 | * then with %true. The first counts the number of regions needed | |
337 | * to accomodate the new area. The second actually inserts them. | |
142b45a7 | 338 | */ |
784656f9 TH |
339 | base = obase; |
340 | nr_new = 0; | |
95f72d1e | 341 | |
784656f9 TH |
342 | for (i = 0; i < type->cnt; i++) { |
343 | struct memblock_region *rgn = &type->regions[i]; | |
344 | phys_addr_t rbase = rgn->base; | |
345 | phys_addr_t rend = rbase + rgn->size; | |
346 | ||
347 | if (rbase >= end) | |
95f72d1e | 348 | break; |
784656f9 TH |
349 | if (rend <= base) |
350 | continue; | |
351 | /* | |
352 | * @rgn overlaps. If it separates the lower part of new | |
353 | * area, insert that portion. | |
354 | */ | |
355 | if (rbase > base) { | |
356 | nr_new++; | |
357 | if (insert) | |
358 | memblock_insert_region(type, i++, base, | |
7c0caeb8 | 359 | rbase - base, MAX_NUMNODES); |
95f72d1e | 360 | } |
784656f9 TH |
361 | /* area below @rend is dealt with, forget about it */ |
362 | base = min(rend, end); | |
95f72d1e | 363 | } |
784656f9 TH |
364 | |
365 | /* insert the remaining portion */ | |
366 | if (base < end) { | |
367 | nr_new++; | |
368 | if (insert) | |
7c0caeb8 TH |
369 | memblock_insert_region(type, i, base, end - base, |
370 | MAX_NUMNODES); | |
95f72d1e | 371 | } |
95f72d1e | 372 | |
784656f9 TH |
373 | /* |
374 | * If this was the first round, resize array and repeat for actual | |
375 | * insertions; otherwise, merge and return. | |
142b45a7 | 376 | */ |
784656f9 TH |
377 | if (!insert) { |
378 | while (type->cnt + nr_new > type->max) | |
379 | if (memblock_double_array(type) < 0) | |
380 | return -ENOMEM; | |
381 | insert = true; | |
382 | goto repeat; | |
383 | } else { | |
384 | memblock_merge_regions(type); | |
385 | return 0; | |
142b45a7 | 386 | } |
95f72d1e YL |
387 | } |
388 | ||
10d06439 | 389 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
95f72d1e | 390 | { |
e3239ff9 | 391 | return memblock_add_region(&memblock.memory, base, size); |
95f72d1e YL |
392 | } |
393 | ||
8f7a6605 BH |
394 | static long __init_memblock __memblock_remove(struct memblock_type *type, |
395 | phys_addr_t base, phys_addr_t size) | |
95f72d1e | 396 | { |
2898cc4c | 397 | phys_addr_t end = base + size; |
95f72d1e YL |
398 | int i; |
399 | ||
8f7a6605 BH |
400 | /* Walk through the array for collisions */ |
401 | for (i = 0; i < type->cnt; i++) { | |
402 | struct memblock_region *rgn = &type->regions[i]; | |
403 | phys_addr_t rend = rgn->base + rgn->size; | |
95f72d1e | 404 | |
8f7a6605 BH |
405 | /* Nothing more to do, exit */ |
406 | if (rgn->base > end || rgn->size == 0) | |
95f72d1e | 407 | break; |
95f72d1e | 408 | |
8f7a6605 BH |
409 | /* If we fully enclose the block, drop it */ |
410 | if (base <= rgn->base && end >= rend) { | |
411 | memblock_remove_region(type, i--); | |
412 | continue; | |
413 | } | |
95f72d1e | 414 | |
8f7a6605 BH |
415 | /* If we are fully enclosed within a block |
416 | * then we need to split it and we are done | |
417 | */ | |
418 | if (base > rgn->base && end < rend) { | |
419 | rgn->size = base - rgn->base; | |
420 | if (!memblock_add_region(type, end, rend - end)) | |
421 | return 0; | |
422 | /* Failure to split is bad, we at least | |
423 | * restore the block before erroring | |
424 | */ | |
425 | rgn->size = rend - rgn->base; | |
426 | WARN_ON(1); | |
427 | return -1; | |
428 | } | |
95f72d1e | 429 | |
8f7a6605 BH |
430 | /* Check if we need to trim the bottom of a block */ |
431 | if (rgn->base < end && rend > end) { | |
432 | rgn->size -= end - rgn->base; | |
433 | rgn->base = end; | |
434 | break; | |
435 | } | |
95f72d1e | 436 | |
8f7a6605 BH |
437 | /* And check if we need to trim the top of a block */ |
438 | if (base < rend) | |
439 | rgn->size -= rend - base; | |
95f72d1e | 440 | |
8f7a6605 BH |
441 | } |
442 | return 0; | |
95f72d1e YL |
443 | } |
444 | ||
10d06439 | 445 | long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
446 | { |
447 | return __memblock_remove(&memblock.memory, base, size); | |
448 | } | |
449 | ||
3661ca66 | 450 | long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
451 | { |
452 | return __memblock_remove(&memblock.reserved, base, size); | |
453 | } | |
454 | ||
3661ca66 | 455 | long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
95f72d1e | 456 | { |
e3239ff9 | 457 | struct memblock_type *_rgn = &memblock.reserved; |
95f72d1e YL |
458 | |
459 | BUG_ON(0 == size); | |
460 | ||
461 | return memblock_add_region(_rgn, base, size); | |
462 | } | |
463 | ||
7c0caeb8 TH |
464 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
465 | /* | |
466 | * Common iterator interface used to define for_each_mem_range(). | |
467 | */ | |
468 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, | |
469 | unsigned long *out_start_pfn, | |
470 | unsigned long *out_end_pfn, int *out_nid) | |
471 | { | |
472 | struct memblock_type *type = &memblock.memory; | |
473 | struct memblock_region *r; | |
474 | ||
475 | while (++*idx < type->cnt) { | |
476 | r = &type->regions[*idx]; | |
477 | ||
478 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) | |
479 | continue; | |
480 | if (nid == MAX_NUMNODES || nid == r->nid) | |
481 | break; | |
482 | } | |
483 | if (*idx >= type->cnt) { | |
484 | *idx = -1; | |
485 | return; | |
486 | } | |
487 | ||
488 | if (out_start_pfn) | |
489 | *out_start_pfn = PFN_UP(r->base); | |
490 | if (out_end_pfn) | |
491 | *out_end_pfn = PFN_DOWN(r->base + r->size); | |
492 | if (out_nid) | |
493 | *out_nid = r->nid; | |
494 | } | |
495 | ||
496 | /** | |
497 | * memblock_set_node - set node ID on memblock regions | |
498 | * @base: base of area to set node ID for | |
499 | * @size: size of area to set node ID for | |
500 | * @nid: node ID to set | |
501 | * | |
502 | * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. | |
503 | * Regions which cross the area boundaries are split as necessary. | |
504 | * | |
505 | * RETURNS: | |
506 | * 0 on success, -errno on failure. | |
507 | */ | |
508 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, | |
509 | int nid) | |
510 | { | |
511 | struct memblock_type *type = &memblock.memory; | |
512 | phys_addr_t end = base + size; | |
513 | int i; | |
514 | ||
515 | /* we'll create at most two more regions */ | |
516 | while (type->cnt + 2 > type->max) | |
517 | if (memblock_double_array(type) < 0) | |
518 | return -ENOMEM; | |
519 | ||
520 | for (i = 0; i < type->cnt; i++) { | |
521 | struct memblock_region *rgn = &type->regions[i]; | |
522 | phys_addr_t rbase = rgn->base; | |
523 | phys_addr_t rend = rbase + rgn->size; | |
524 | ||
525 | if (rbase >= end) | |
526 | break; | |
527 | if (rend <= base) | |
528 | continue; | |
529 | ||
530 | if (rbase < base) { | |
531 | /* | |
532 | * @rgn intersects from below. Split and continue | |
533 | * to process the next region - the new top half. | |
534 | */ | |
535 | rgn->base = base; | |
536 | rgn->size = rend - rgn->base; | |
537 | memblock_insert_region(type, i, rbase, base - rbase, | |
538 | rgn->nid); | |
539 | } else if (rend > end) { | |
540 | /* | |
541 | * @rgn intersects from above. Split and redo the | |
542 | * current region - the new bottom half. | |
543 | */ | |
544 | rgn->base = end; | |
545 | rgn->size = rend - rgn->base; | |
546 | memblock_insert_region(type, i--, rbase, end - rbase, | |
547 | rgn->nid); | |
548 | } else { | |
549 | /* @rgn is fully contained, set ->nid */ | |
550 | rgn->nid = nid; | |
551 | } | |
552 | } | |
553 | ||
554 | memblock_merge_regions(type); | |
555 | return 0; | |
556 | } | |
557 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | |
558 | ||
6ed311b2 | 559 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 560 | { |
6ed311b2 | 561 | phys_addr_t found; |
95f72d1e | 562 | |
6ed311b2 BH |
563 | /* We align the size to limit fragmentation. Without this, a lot of |
564 | * small allocs quickly eat up the whole reserve array on sparc | |
565 | */ | |
348968eb | 566 | size = round_up(size, align); |
95f72d1e | 567 | |
fc769a8e | 568 | found = memblock_find_in_range(0, max_addr, size, align); |
1f5026a7 | 569 | if (found && !memblock_add_region(&memblock.reserved, found, size)) |
6ed311b2 | 570 | return found; |
95f72d1e | 571 | |
6ed311b2 | 572 | return 0; |
95f72d1e YL |
573 | } |
574 | ||
6ed311b2 | 575 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 576 | { |
6ed311b2 BH |
577 | phys_addr_t alloc; |
578 | ||
579 | alloc = __memblock_alloc_base(size, align, max_addr); | |
580 | ||
581 | if (alloc == 0) | |
582 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
583 | (unsigned long long) size, (unsigned long long) max_addr); | |
584 | ||
585 | return alloc; | |
95f72d1e YL |
586 | } |
587 | ||
6ed311b2 | 588 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
95f72d1e | 589 | { |
6ed311b2 BH |
590 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
591 | } | |
95f72d1e | 592 | |
95f72d1e | 593 | |
6ed311b2 | 594 | /* |
34e18455 | 595 | * Additional node-local top-down allocators. |
c196f76f BH |
596 | * |
597 | * WARNING: Only available after early_node_map[] has been populated, | |
598 | * on some architectures, that is after all the calls to add_active_range() | |
599 | * have been done to populate it. | |
6ed311b2 | 600 | */ |
95f72d1e | 601 | |
34e18455 TH |
602 | static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start, |
603 | phys_addr_t end, int *nid) | |
c3f72b57 | 604 | { |
c196f76f | 605 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
c196f76f BH |
606 | unsigned long start_pfn, end_pfn; |
607 | int i; | |
608 | ||
b2fea988 | 609 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid) |
34e18455 TH |
610 | if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn)) |
611 | return max(start, PFN_PHYS(start_pfn)); | |
c196f76f | 612 | #endif |
c3f72b57 | 613 | *nid = 0; |
34e18455 | 614 | return start; |
c3f72b57 BH |
615 | } |
616 | ||
e6498040 TH |
617 | phys_addr_t __init memblock_find_in_range_node(phys_addr_t start, |
618 | phys_addr_t end, | |
2898cc4c BH |
619 | phys_addr_t size, |
620 | phys_addr_t align, int nid) | |
95f72d1e | 621 | { |
e6498040 TH |
622 | struct memblock_type *mem = &memblock.memory; |
623 | int i; | |
95f72d1e | 624 | |
e6498040 | 625 | BUG_ON(0 == size); |
95f72d1e | 626 | |
e6498040 TH |
627 | /* Pump up max_addr */ |
628 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | |
629 | end = memblock.current_limit; | |
95f72d1e | 630 | |
e6498040 TH |
631 | for (i = mem->cnt - 1; i >= 0; i--) { |
632 | struct memblock_region *r = &mem->regions[i]; | |
633 | phys_addr_t base = max(start, r->base); | |
634 | phys_addr_t top = min(end, r->base + r->size); | |
635 | ||
636 | while (base < top) { | |
637 | phys_addr_t tbase, ret; | |
638 | int tnid; | |
639 | ||
640 | tbase = memblock_nid_range_rev(base, top, &tnid); | |
641 | if (nid == MAX_NUMNODES || tnid == nid) { | |
642 | ret = memblock_find_region(tbase, top, size, align); | |
643 | if (ret) | |
644 | return ret; | |
645 | } | |
646 | top = tbase; | |
95f72d1e | 647 | } |
95f72d1e | 648 | } |
e6498040 | 649 | |
1f5026a7 | 650 | return 0; |
95f72d1e YL |
651 | } |
652 | ||
2898cc4c | 653 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
95f72d1e | 654 | { |
e6498040 | 655 | phys_addr_t found; |
95f72d1e | 656 | |
e6498040 TH |
657 | /* |
658 | * We align the size to limit fragmentation. Without this, a lot of | |
7f219c73 BH |
659 | * small allocs quickly eat up the whole reserve array on sparc |
660 | */ | |
348968eb | 661 | size = round_up(size, align); |
7f219c73 | 662 | |
e6498040 TH |
663 | found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE, |
664 | size, align, nid); | |
665 | if (found && !memblock_add_region(&memblock.reserved, found, size)) | |
666 | return found; | |
95f72d1e | 667 | |
9d1e2492 BH |
668 | return 0; |
669 | } | |
670 | ||
671 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) | |
672 | { | |
673 | phys_addr_t res = memblock_alloc_nid(size, align, nid); | |
674 | ||
675 | if (res) | |
676 | return res; | |
15fb0972 | 677 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
95f72d1e YL |
678 | } |
679 | ||
9d1e2492 BH |
680 | |
681 | /* | |
682 | * Remaining API functions | |
683 | */ | |
684 | ||
95f72d1e | 685 | /* You must call memblock_analyze() before this. */ |
2898cc4c | 686 | phys_addr_t __init memblock_phys_mem_size(void) |
95f72d1e | 687 | { |
4734b594 | 688 | return memblock.memory_size; |
95f72d1e YL |
689 | } |
690 | ||
10d06439 | 691 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
95f72d1e YL |
692 | { |
693 | int idx = memblock.memory.cnt - 1; | |
694 | ||
e3239ff9 | 695 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
95f72d1e YL |
696 | } |
697 | ||
698 | /* You must call memblock_analyze() after this. */ | |
2898cc4c | 699 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
95f72d1e YL |
700 | { |
701 | unsigned long i; | |
2898cc4c | 702 | phys_addr_t limit; |
e3239ff9 | 703 | struct memblock_region *p; |
95f72d1e YL |
704 | |
705 | if (!memory_limit) | |
706 | return; | |
707 | ||
708 | /* Truncate the memblock regions to satisfy the memory limit. */ | |
709 | limit = memory_limit; | |
710 | for (i = 0; i < memblock.memory.cnt; i++) { | |
e3239ff9 BH |
711 | if (limit > memblock.memory.regions[i].size) { |
712 | limit -= memblock.memory.regions[i].size; | |
95f72d1e YL |
713 | continue; |
714 | } | |
715 | ||
e3239ff9 | 716 | memblock.memory.regions[i].size = limit; |
95f72d1e YL |
717 | memblock.memory.cnt = i + 1; |
718 | break; | |
719 | } | |
720 | ||
95f72d1e YL |
721 | memory_limit = memblock_end_of_DRAM(); |
722 | ||
723 | /* And truncate any reserves above the limit also. */ | |
724 | for (i = 0; i < memblock.reserved.cnt; i++) { | |
e3239ff9 | 725 | p = &memblock.reserved.regions[i]; |
95f72d1e YL |
726 | |
727 | if (p->base > memory_limit) | |
728 | p->size = 0; | |
729 | else if ((p->base + p->size) > memory_limit) | |
730 | p->size = memory_limit - p->base; | |
731 | ||
732 | if (p->size == 0) { | |
733 | memblock_remove_region(&memblock.reserved, i); | |
734 | i--; | |
735 | } | |
736 | } | |
737 | } | |
738 | ||
cd79481d | 739 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
72d4b0b4 BH |
740 | { |
741 | unsigned int left = 0, right = type->cnt; | |
742 | ||
743 | do { | |
744 | unsigned int mid = (right + left) / 2; | |
745 | ||
746 | if (addr < type->regions[mid].base) | |
747 | right = mid; | |
748 | else if (addr >= (type->regions[mid].base + | |
749 | type->regions[mid].size)) | |
750 | left = mid + 1; | |
751 | else | |
752 | return mid; | |
753 | } while (left < right); | |
754 | return -1; | |
755 | } | |
756 | ||
2898cc4c | 757 | int __init memblock_is_reserved(phys_addr_t addr) |
95f72d1e | 758 | { |
72d4b0b4 BH |
759 | return memblock_search(&memblock.reserved, addr) != -1; |
760 | } | |
95f72d1e | 761 | |
3661ca66 | 762 | int __init_memblock memblock_is_memory(phys_addr_t addr) |
72d4b0b4 BH |
763 | { |
764 | return memblock_search(&memblock.memory, addr) != -1; | |
765 | } | |
766 | ||
3661ca66 | 767 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
72d4b0b4 | 768 | { |
abb65272 | 769 | int idx = memblock_search(&memblock.memory, base); |
72d4b0b4 BH |
770 | |
771 | if (idx == -1) | |
772 | return 0; | |
abb65272 TV |
773 | return memblock.memory.regions[idx].base <= base && |
774 | (memblock.memory.regions[idx].base + | |
775 | memblock.memory.regions[idx].size) >= (base + size); | |
95f72d1e YL |
776 | } |
777 | ||
10d06439 | 778 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
95f72d1e | 779 | { |
f1c2c19c | 780 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
95f72d1e YL |
781 | } |
782 | ||
e63075a3 | 783 | |
3661ca66 | 784 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
e63075a3 BH |
785 | { |
786 | memblock.current_limit = limit; | |
787 | } | |
788 | ||
7c0caeb8 | 789 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) |
6ed311b2 BH |
790 | { |
791 | unsigned long long base, size; | |
792 | int i; | |
793 | ||
7c0caeb8 | 794 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); |
6ed311b2 | 795 | |
7c0caeb8 TH |
796 | for (i = 0; i < type->cnt; i++) { |
797 | struct memblock_region *rgn = &type->regions[i]; | |
798 | char nid_buf[32] = ""; | |
799 | ||
800 | base = rgn->base; | |
801 | size = rgn->size; | |
802 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | |
803 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) | |
804 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", | |
805 | memblock_get_region_node(rgn)); | |
806 | #endif | |
807 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", | |
808 | name, i, base, base + size - 1, size, nid_buf); | |
6ed311b2 BH |
809 | } |
810 | } | |
811 | ||
10d06439 | 812 | void __init_memblock memblock_dump_all(void) |
6ed311b2 BH |
813 | { |
814 | if (!memblock_debug) | |
815 | return; | |
816 | ||
817 | pr_info("MEMBLOCK configuration:\n"); | |
818 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | |
819 | ||
820 | memblock_dump(&memblock.memory, "memory"); | |
821 | memblock_dump(&memblock.reserved, "reserved"); | |
822 | } | |
823 | ||
824 | void __init memblock_analyze(void) | |
825 | { | |
826 | int i; | |
827 | ||
828 | /* Check marker in the unused last array entry */ | |
829 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base | |
830 | != (phys_addr_t)RED_INACTIVE); | |
831 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | |
832 | != (phys_addr_t)RED_INACTIVE); | |
833 | ||
834 | memblock.memory_size = 0; | |
835 | ||
836 | for (i = 0; i < memblock.memory.cnt; i++) | |
837 | memblock.memory_size += memblock.memory.regions[i].size; | |
142b45a7 BH |
838 | |
839 | /* We allow resizing from there */ | |
840 | memblock_can_resize = 1; | |
6ed311b2 BH |
841 | } |
842 | ||
7590abe8 BH |
843 | void __init memblock_init(void) |
844 | { | |
236260b9 JF |
845 | static int init_done __initdata = 0; |
846 | ||
847 | if (init_done) | |
848 | return; | |
849 | init_done = 1; | |
850 | ||
7590abe8 BH |
851 | /* Hookup the initial arrays */ |
852 | memblock.memory.regions = memblock_memory_init_regions; | |
853 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | |
854 | memblock.reserved.regions = memblock_reserved_init_regions; | |
855 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | |
856 | ||
857 | /* Write a marker in the unused last array entry */ | |
858 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
859 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
860 | ||
861 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | |
862 | * This simplifies the memblock_add() code below... | |
863 | */ | |
864 | memblock.memory.regions[0].base = 0; | |
865 | memblock.memory.regions[0].size = 0; | |
7c0caeb8 | 866 | memblock_set_region_node(&memblock.memory.regions[0], MAX_NUMNODES); |
7590abe8 BH |
867 | memblock.memory.cnt = 1; |
868 | ||
869 | /* Ditto. */ | |
870 | memblock.reserved.regions[0].base = 0; | |
871 | memblock.reserved.regions[0].size = 0; | |
7c0caeb8 | 872 | memblock_set_region_node(&memblock.reserved.regions[0], MAX_NUMNODES); |
7590abe8 BH |
873 | memblock.reserved.cnt = 1; |
874 | ||
875 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | |
876 | } | |
877 | ||
6ed311b2 BH |
878 | static int __init early_memblock(char *p) |
879 | { | |
880 | if (p && strstr(p, "debug")) | |
881 | memblock_debug = 1; | |
882 | return 0; | |
883 | } | |
884 | early_param("memblock", early_memblock); | |
885 | ||
10d06439 | 886 | #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) |
6d03b885 BH |
887 | |
888 | static int memblock_debug_show(struct seq_file *m, void *private) | |
889 | { | |
890 | struct memblock_type *type = m->private; | |
891 | struct memblock_region *reg; | |
892 | int i; | |
893 | ||
894 | for (i = 0; i < type->cnt; i++) { | |
895 | reg = &type->regions[i]; | |
896 | seq_printf(m, "%4d: ", i); | |
897 | if (sizeof(phys_addr_t) == 4) | |
898 | seq_printf(m, "0x%08lx..0x%08lx\n", | |
899 | (unsigned long)reg->base, | |
900 | (unsigned long)(reg->base + reg->size - 1)); | |
901 | else | |
902 | seq_printf(m, "0x%016llx..0x%016llx\n", | |
903 | (unsigned long long)reg->base, | |
904 | (unsigned long long)(reg->base + reg->size - 1)); | |
905 | ||
906 | } | |
907 | return 0; | |
908 | } | |
909 | ||
910 | static int memblock_debug_open(struct inode *inode, struct file *file) | |
911 | { | |
912 | return single_open(file, memblock_debug_show, inode->i_private); | |
913 | } | |
914 | ||
915 | static const struct file_operations memblock_debug_fops = { | |
916 | .open = memblock_debug_open, | |
917 | .read = seq_read, | |
918 | .llseek = seq_lseek, | |
919 | .release = single_release, | |
920 | }; | |
921 | ||
922 | static int __init memblock_init_debugfs(void) | |
923 | { | |
924 | struct dentry *root = debugfs_create_dir("memblock", NULL); | |
925 | if (!root) | |
926 | return -ENXIO; | |
927 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | |
928 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | |
929 | ||
930 | return 0; | |
931 | } | |
932 | __initcall(memblock_init_debugfs); | |
933 | ||
934 | #endif /* CONFIG_DEBUG_FS */ |