]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/bitops.h> | |
17 | #include <linux/poison.h> | |
18 | #include <linux/pfn.h> | |
19 | #include <linux/debugfs.h> | |
20 | #include <linux/seq_file.h> | |
21 | #include <linux/memblock.h> | |
22 | ||
23 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | |
24 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | |
25 | ||
26 | struct memblock memblock __initdata_memblock = { | |
27 | .memory.regions = memblock_memory_init_regions, | |
28 | .memory.cnt = 1, /* empty dummy entry */ | |
29 | .memory.max = INIT_MEMBLOCK_REGIONS, | |
30 | ||
31 | .reserved.regions = memblock_reserved_init_regions, | |
32 | .reserved.cnt = 1, /* empty dummy entry */ | |
33 | .reserved.max = INIT_MEMBLOCK_REGIONS, | |
34 | ||
35 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, | |
36 | }; | |
37 | ||
38 | int memblock_debug __initdata_memblock; | |
39 | static int memblock_can_resize __initdata_memblock; | |
40 | static int memblock_memory_in_slab __initdata_memblock = 0; | |
41 | static int memblock_reserved_in_slab __initdata_memblock = 0; | |
42 | ||
43 | /* inline so we don't get a warning when pr_debug is compiled out */ | |
44 | static __init_memblock const char * | |
45 | memblock_type_name(struct memblock_type *type) | |
46 | { | |
47 | if (type == &memblock.memory) | |
48 | return "memory"; | |
49 | else if (type == &memblock.reserved) | |
50 | return "reserved"; | |
51 | else | |
52 | return "unknown"; | |
53 | } | |
54 | ||
55 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ | |
56 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) | |
57 | { | |
58 | return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); | |
59 | } | |
60 | ||
61 | /* | |
62 | * Address comparison utilities | |
63 | */ | |
64 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, | |
65 | phys_addr_t base2, phys_addr_t size2) | |
66 | { | |
67 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
68 | } | |
69 | ||
70 | static long __init_memblock memblock_overlaps_region(struct memblock_type *type, | |
71 | phys_addr_t base, phys_addr_t size) | |
72 | { | |
73 | unsigned long i; | |
74 | ||
75 | for (i = 0; i < type->cnt; i++) { | |
76 | phys_addr_t rgnbase = type->regions[i].base; | |
77 | phys_addr_t rgnsize = type->regions[i].size; | |
78 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
79 | break; | |
80 | } | |
81 | ||
82 | return (i < type->cnt) ? i : -1; | |
83 | } | |
84 | ||
85 | /** | |
86 | * memblock_find_in_range_node - find free area in given range and node | |
87 | * @start: start of candidate range | |
88 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | |
89 | * @size: size of free area to find | |
90 | * @align: alignment of free area to find | |
91 | * @nid: nid of the free area to find, %MAX_NUMNODES for any node | |
92 | * | |
93 | * Find @size free area aligned to @align in the specified range and node. | |
94 | * | |
95 | * RETURNS: | |
96 | * Found address on success, %0 on failure. | |
97 | */ | |
98 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, | |
99 | phys_addr_t end, phys_addr_t size, | |
100 | phys_addr_t align, int nid) | |
101 | { | |
102 | phys_addr_t this_start, this_end, cand; | |
103 | u64 i; | |
104 | ||
105 | /* pump up @end */ | |
106 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | |
107 | end = memblock.current_limit; | |
108 | ||
109 | /* avoid allocating the first page */ | |
110 | start = max_t(phys_addr_t, start, PAGE_SIZE); | |
111 | end = max(start, end); | |
112 | ||
113 | for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { | |
114 | this_start = clamp(this_start, start, end); | |
115 | this_end = clamp(this_end, start, end); | |
116 | ||
117 | if (this_end < size) | |
118 | continue; | |
119 | ||
120 | cand = round_down(this_end - size, align); | |
121 | if (cand >= this_start) | |
122 | return cand; | |
123 | } | |
124 | return 0; | |
125 | } | |
126 | ||
127 | /** | |
128 | * memblock_find_in_range - find free area in given range | |
129 | * @start: start of candidate range | |
130 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} | |
131 | * @size: size of free area to find | |
132 | * @align: alignment of free area to find | |
133 | * | |
134 | * Find @size free area aligned to @align in the specified range. | |
135 | * | |
136 | * RETURNS: | |
137 | * Found address on success, %0 on failure. | |
138 | */ | |
139 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, | |
140 | phys_addr_t end, phys_addr_t size, | |
141 | phys_addr_t align) | |
142 | { | |
143 | return memblock_find_in_range_node(start, end, size, align, | |
144 | MAX_NUMNODES); | |
145 | } | |
146 | ||
147 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) | |
148 | { | |
149 | type->total_size -= type->regions[r].size; | |
150 | memmove(&type->regions[r], &type->regions[r + 1], | |
151 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); | |
152 | type->cnt--; | |
153 | ||
154 | /* Special case for empty arrays */ | |
155 | if (type->cnt == 0) { | |
156 | WARN_ON(type->total_size != 0); | |
157 | type->cnt = 1; | |
158 | type->regions[0].base = 0; | |
159 | type->regions[0].size = 0; | |
160 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); | |
161 | } | |
162 | } | |
163 | ||
164 | phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( | |
165 | phys_addr_t *addr) | |
166 | { | |
167 | if (memblock.reserved.regions == memblock_reserved_init_regions) | |
168 | return 0; | |
169 | ||
170 | *addr = __pa(memblock.reserved.regions); | |
171 | ||
172 | return PAGE_ALIGN(sizeof(struct memblock_region) * | |
173 | memblock.reserved.max); | |
174 | } | |
175 | ||
176 | /** | |
177 | * memblock_double_array - double the size of the memblock regions array | |
178 | * @type: memblock type of the regions array being doubled | |
179 | * @new_area_start: starting address of memory range to avoid overlap with | |
180 | * @new_area_size: size of memory range to avoid overlap with | |
181 | * | |
182 | * Double the size of the @type regions array. If memblock is being used to | |
183 | * allocate memory for a new reserved regions array and there is a previously | |
184 | * allocated memory range [@new_area_start,@new_area_start+@new_area_size] | |
185 | * waiting to be reserved, ensure the memory used by the new array does | |
186 | * not overlap. | |
187 | * | |
188 | * RETURNS: | |
189 | * 0 on success, -1 on failure. | |
190 | */ | |
191 | static int __init_memblock memblock_double_array(struct memblock_type *type, | |
192 | phys_addr_t new_area_start, | |
193 | phys_addr_t new_area_size) | |
194 | { | |
195 | struct memblock_region *new_array, *old_array; | |
196 | phys_addr_t old_alloc_size, new_alloc_size; | |
197 | phys_addr_t old_size, new_size, addr; | |
198 | int use_slab = slab_is_available(); | |
199 | int *in_slab; | |
200 | ||
201 | /* We don't allow resizing until we know about the reserved regions | |
202 | * of memory that aren't suitable for allocation | |
203 | */ | |
204 | if (!memblock_can_resize) | |
205 | return -1; | |
206 | ||
207 | /* Calculate new doubled size */ | |
208 | old_size = type->max * sizeof(struct memblock_region); | |
209 | new_size = old_size << 1; | |
210 | /* | |
211 | * We need to allocated new one align to PAGE_SIZE, | |
212 | * so we can free them completely later. | |
213 | */ | |
214 | old_alloc_size = PAGE_ALIGN(old_size); | |
215 | new_alloc_size = PAGE_ALIGN(new_size); | |
216 | ||
217 | /* Retrieve the slab flag */ | |
218 | if (type == &memblock.memory) | |
219 | in_slab = &memblock_memory_in_slab; | |
220 | else | |
221 | in_slab = &memblock_reserved_in_slab; | |
222 | ||
223 | /* Try to find some space for it. | |
224 | * | |
225 | * WARNING: We assume that either slab_is_available() and we use it or | |
226 | * we use MEMBLOCK for allocations. That means that this is unsafe to | |
227 | * use when bootmem is currently active (unless bootmem itself is | |
228 | * implemented on top of MEMBLOCK which isn't the case yet) | |
229 | * | |
230 | * This should however not be an issue for now, as we currently only | |
231 | * call into MEMBLOCK while it's still active, or much later when slab | |
232 | * is active for memory hotplug operations | |
233 | */ | |
234 | if (use_slab) { | |
235 | new_array = kmalloc(new_size, GFP_KERNEL); | |
236 | addr = new_array ? __pa(new_array) : 0; | |
237 | } else { | |
238 | /* only exclude range when trying to double reserved.regions */ | |
239 | if (type != &memblock.reserved) | |
240 | new_area_start = new_area_size = 0; | |
241 | ||
242 | addr = memblock_find_in_range(new_area_start + new_area_size, | |
243 | memblock.current_limit, | |
244 | new_alloc_size, PAGE_SIZE); | |
245 | if (!addr && new_area_size) | |
246 | addr = memblock_find_in_range(0, | |
247 | min(new_area_start, memblock.current_limit), | |
248 | new_alloc_size, PAGE_SIZE); | |
249 | ||
250 | new_array = addr ? __va(addr) : NULL; | |
251 | } | |
252 | if (!addr) { | |
253 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | |
254 | memblock_type_name(type), type->max, type->max * 2); | |
255 | return -1; | |
256 | } | |
257 | ||
258 | memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", | |
259 | memblock_type_name(type), type->max * 2, (u64)addr, | |
260 | (u64)addr + new_size - 1); | |
261 | ||
262 | /* | |
263 | * Found space, we now need to move the array over before we add the | |
264 | * reserved region since it may be our reserved array itself that is | |
265 | * full. | |
266 | */ | |
267 | memcpy(new_array, type->regions, old_size); | |
268 | memset(new_array + type->max, 0, old_size); | |
269 | old_array = type->regions; | |
270 | type->regions = new_array; | |
271 | type->max <<= 1; | |
272 | ||
273 | /* Free old array. We needn't free it if the array is the static one */ | |
274 | if (*in_slab) | |
275 | kfree(old_array); | |
276 | else if (old_array != memblock_memory_init_regions && | |
277 | old_array != memblock_reserved_init_regions) | |
278 | memblock_free(__pa(old_array), old_alloc_size); | |
279 | ||
280 | /* | |
281 | * Reserve the new array if that comes from the memblock. Otherwise, we | |
282 | * needn't do it | |
283 | */ | |
284 | if (!use_slab) | |
285 | BUG_ON(memblock_reserve(addr, new_alloc_size)); | |
286 | ||
287 | /* Update slab flag */ | |
288 | *in_slab = use_slab; | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
293 | /** | |
294 | * memblock_merge_regions - merge neighboring compatible regions | |
295 | * @type: memblock type to scan | |
296 | * | |
297 | * Scan @type and merge neighboring compatible regions. | |
298 | */ | |
299 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) | |
300 | { | |
301 | int i = 0; | |
302 | ||
303 | /* cnt never goes below 1 */ | |
304 | while (i < type->cnt - 1) { | |
305 | struct memblock_region *this = &type->regions[i]; | |
306 | struct memblock_region *next = &type->regions[i + 1]; | |
307 | ||
308 | if (this->base + this->size != next->base || | |
309 | memblock_get_region_node(this) != | |
310 | memblock_get_region_node(next)) { | |
311 | BUG_ON(this->base + this->size > next->base); | |
312 | i++; | |
313 | continue; | |
314 | } | |
315 | ||
316 | this->size += next->size; | |
317 | /* move forward from next + 1, index of which is i + 2 */ | |
318 | memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); | |
319 | type->cnt--; | |
320 | } | |
321 | } | |
322 | ||
323 | /** | |
324 | * memblock_insert_region - insert new memblock region | |
325 | * @type: memblock type to insert into | |
326 | * @idx: index for the insertion point | |
327 | * @base: base address of the new region | |
328 | * @size: size of the new region | |
329 | * @nid: node id of the new region | |
330 | * | |
331 | * Insert new memblock region [@base,@base+@size) into @type at @idx. | |
332 | * @type must already have extra room to accomodate the new region. | |
333 | */ | |
334 | static void __init_memblock memblock_insert_region(struct memblock_type *type, | |
335 | int idx, phys_addr_t base, | |
336 | phys_addr_t size, int nid) | |
337 | { | |
338 | struct memblock_region *rgn = &type->regions[idx]; | |
339 | ||
340 | BUG_ON(type->cnt >= type->max); | |
341 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); | |
342 | rgn->base = base; | |
343 | rgn->size = size; | |
344 | memblock_set_region_node(rgn, nid); | |
345 | type->cnt++; | |
346 | type->total_size += size; | |
347 | } | |
348 | ||
349 | /** | |
350 | * memblock_add_region - add new memblock region | |
351 | * @type: memblock type to add new region into | |
352 | * @base: base address of the new region | |
353 | * @size: size of the new region | |
354 | * @nid: nid of the new region | |
355 | * | |
356 | * Add new memblock region [@base,@base+@size) into @type. The new region | |
357 | * is allowed to overlap with existing ones - overlaps don't affect already | |
358 | * existing regions. @type is guaranteed to be minimal (all neighbouring | |
359 | * compatible regions are merged) after the addition. | |
360 | * | |
361 | * RETURNS: | |
362 | * 0 on success, -errno on failure. | |
363 | */ | |
364 | static int __init_memblock memblock_add_region(struct memblock_type *type, | |
365 | phys_addr_t base, phys_addr_t size, int nid) | |
366 | { | |
367 | bool insert = false; | |
368 | phys_addr_t obase = base; | |
369 | phys_addr_t end = base + memblock_cap_size(base, &size); | |
370 | int i, nr_new; | |
371 | ||
372 | if (!size) | |
373 | return 0; | |
374 | ||
375 | /* special case for empty array */ | |
376 | if (type->regions[0].size == 0) { | |
377 | WARN_ON(type->cnt != 1 || type->total_size); | |
378 | type->regions[0].base = base; | |
379 | type->regions[0].size = size; | |
380 | memblock_set_region_node(&type->regions[0], nid); | |
381 | type->total_size = size; | |
382 | return 0; | |
383 | } | |
384 | repeat: | |
385 | /* | |
386 | * The following is executed twice. Once with %false @insert and | |
387 | * then with %true. The first counts the number of regions needed | |
388 | * to accomodate the new area. The second actually inserts them. | |
389 | */ | |
390 | base = obase; | |
391 | nr_new = 0; | |
392 | ||
393 | for (i = 0; i < type->cnt; i++) { | |
394 | struct memblock_region *rgn = &type->regions[i]; | |
395 | phys_addr_t rbase = rgn->base; | |
396 | phys_addr_t rend = rbase + rgn->size; | |
397 | ||
398 | if (rbase >= end) | |
399 | break; | |
400 | if (rend <= base) | |
401 | continue; | |
402 | /* | |
403 | * @rgn overlaps. If it separates the lower part of new | |
404 | * area, insert that portion. | |
405 | */ | |
406 | if (rbase > base) { | |
407 | nr_new++; | |
408 | if (insert) | |
409 | memblock_insert_region(type, i++, base, | |
410 | rbase - base, nid); | |
411 | } | |
412 | /* area below @rend is dealt with, forget about it */ | |
413 | base = min(rend, end); | |
414 | } | |
415 | ||
416 | /* insert the remaining portion */ | |
417 | if (base < end) { | |
418 | nr_new++; | |
419 | if (insert) | |
420 | memblock_insert_region(type, i, base, end - base, nid); | |
421 | } | |
422 | ||
423 | /* | |
424 | * If this was the first round, resize array and repeat for actual | |
425 | * insertions; otherwise, merge and return. | |
426 | */ | |
427 | if (!insert) { | |
428 | while (type->cnt + nr_new > type->max) | |
429 | if (memblock_double_array(type, obase, size) < 0) | |
430 | return -ENOMEM; | |
431 | insert = true; | |
432 | goto repeat; | |
433 | } else { | |
434 | memblock_merge_regions(type); | |
435 | return 0; | |
436 | } | |
437 | } | |
438 | ||
439 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, | |
440 | int nid) | |
441 | { | |
442 | return memblock_add_region(&memblock.memory, base, size, nid); | |
443 | } | |
444 | ||
445 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) | |
446 | { | |
447 | return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES); | |
448 | } | |
449 | ||
450 | /** | |
451 | * memblock_isolate_range - isolate given range into disjoint memblocks | |
452 | * @type: memblock type to isolate range for | |
453 | * @base: base of range to isolate | |
454 | * @size: size of range to isolate | |
455 | * @start_rgn: out parameter for the start of isolated region | |
456 | * @end_rgn: out parameter for the end of isolated region | |
457 | * | |
458 | * Walk @type and ensure that regions don't cross the boundaries defined by | |
459 | * [@base,@base+@size). Crossing regions are split at the boundaries, | |
460 | * which may create at most two more regions. The index of the first | |
461 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. | |
462 | * | |
463 | * RETURNS: | |
464 | * 0 on success, -errno on failure. | |
465 | */ | |
466 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |
467 | phys_addr_t base, phys_addr_t size, | |
468 | int *start_rgn, int *end_rgn) | |
469 | { | |
470 | phys_addr_t end = base + memblock_cap_size(base, &size); | |
471 | int i; | |
472 | ||
473 | *start_rgn = *end_rgn = 0; | |
474 | ||
475 | if (!size) | |
476 | return 0; | |
477 | ||
478 | /* we'll create at most two more regions */ | |
479 | while (type->cnt + 2 > type->max) | |
480 | if (memblock_double_array(type, base, size) < 0) | |
481 | return -ENOMEM; | |
482 | ||
483 | for (i = 0; i < type->cnt; i++) { | |
484 | struct memblock_region *rgn = &type->regions[i]; | |
485 | phys_addr_t rbase = rgn->base; | |
486 | phys_addr_t rend = rbase + rgn->size; | |
487 | ||
488 | if (rbase >= end) | |
489 | break; | |
490 | if (rend <= base) | |
491 | continue; | |
492 | ||
493 | if (rbase < base) { | |
494 | /* | |
495 | * @rgn intersects from below. Split and continue | |
496 | * to process the next region - the new top half. | |
497 | */ | |
498 | rgn->base = base; | |
499 | rgn->size -= base - rbase; | |
500 | type->total_size -= base - rbase; | |
501 | memblock_insert_region(type, i, rbase, base - rbase, | |
502 | memblock_get_region_node(rgn)); | |
503 | } else if (rend > end) { | |
504 | /* | |
505 | * @rgn intersects from above. Split and redo the | |
506 | * current region - the new bottom half. | |
507 | */ | |
508 | rgn->base = end; | |
509 | rgn->size -= end - rbase; | |
510 | type->total_size -= end - rbase; | |
511 | memblock_insert_region(type, i--, rbase, end - rbase, | |
512 | memblock_get_region_node(rgn)); | |
513 | } else { | |
514 | /* @rgn is fully contained, record it */ | |
515 | if (!*end_rgn) | |
516 | *start_rgn = i; | |
517 | *end_rgn = i + 1; | |
518 | } | |
519 | } | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
524 | static int __init_memblock __memblock_remove(struct memblock_type *type, | |
525 | phys_addr_t base, phys_addr_t size) | |
526 | { | |
527 | int start_rgn, end_rgn; | |
528 | int i, ret; | |
529 | ||
530 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); | |
531 | if (ret) | |
532 | return ret; | |
533 | ||
534 | for (i = end_rgn - 1; i >= start_rgn; i--) | |
535 | memblock_remove_region(type, i); | |
536 | return 0; | |
537 | } | |
538 | ||
539 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) | |
540 | { | |
541 | return __memblock_remove(&memblock.memory, base, size); | |
542 | } | |
543 | ||
544 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) | |
545 | { | |
546 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", | |
547 | (unsigned long long)base, | |
548 | (unsigned long long)base + size, | |
549 | (void *)_RET_IP_); | |
550 | ||
551 | return __memblock_remove(&memblock.reserved, base, size); | |
552 | } | |
553 | ||
554 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |
555 | { | |
556 | struct memblock_type *_rgn = &memblock.reserved; | |
557 | ||
558 | memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n", | |
559 | (unsigned long long)base, | |
560 | (unsigned long long)base + size, | |
561 | (void *)_RET_IP_); | |
562 | ||
563 | return memblock_add_region(_rgn, base, size, MAX_NUMNODES); | |
564 | } | |
565 | ||
566 | /** | |
567 | * __next_free_mem_range - next function for for_each_free_mem_range() | |
568 | * @idx: pointer to u64 loop variable | |
569 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | |
570 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
571 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
572 | * @out_nid: ptr to int for nid of the range, can be %NULL | |
573 | * | |
574 | * Find the first free area from *@idx which matches @nid, fill the out | |
575 | * parameters, and update *@idx for the next iteration. The lower 32bit of | |
576 | * *@idx contains index into memory region and the upper 32bit indexes the | |
577 | * areas before each reserved region. For example, if reserved regions | |
578 | * look like the following, | |
579 | * | |
580 | * 0:[0-16), 1:[32-48), 2:[128-130) | |
581 | * | |
582 | * The upper 32bit indexes the following regions. | |
583 | * | |
584 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) | |
585 | * | |
586 | * As both region arrays are sorted, the function advances the two indices | |
587 | * in lockstep and returns each intersection. | |
588 | */ | |
589 | void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |
590 | phys_addr_t *out_start, | |
591 | phys_addr_t *out_end, int *out_nid) | |
592 | { | |
593 | struct memblock_type *mem = &memblock.memory; | |
594 | struct memblock_type *rsv = &memblock.reserved; | |
595 | int mi = *idx & 0xffffffff; | |
596 | int ri = *idx >> 32; | |
597 | ||
598 | for ( ; mi < mem->cnt; mi++) { | |
599 | struct memblock_region *m = &mem->regions[mi]; | |
600 | phys_addr_t m_start = m->base; | |
601 | phys_addr_t m_end = m->base + m->size; | |
602 | ||
603 | /* only memory regions are associated with nodes, check it */ | |
604 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) | |
605 | continue; | |
606 | ||
607 | /* scan areas before each reservation for intersection */ | |
608 | for ( ; ri < rsv->cnt + 1; ri++) { | |
609 | struct memblock_region *r = &rsv->regions[ri]; | |
610 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | |
611 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | |
612 | ||
613 | /* if ri advanced past mi, break out to advance mi */ | |
614 | if (r_start >= m_end) | |
615 | break; | |
616 | /* if the two regions intersect, we're done */ | |
617 | if (m_start < r_end) { | |
618 | if (out_start) | |
619 | *out_start = max(m_start, r_start); | |
620 | if (out_end) | |
621 | *out_end = min(m_end, r_end); | |
622 | if (out_nid) | |
623 | *out_nid = memblock_get_region_node(m); | |
624 | /* | |
625 | * The region which ends first is advanced | |
626 | * for the next iteration. | |
627 | */ | |
628 | if (m_end <= r_end) | |
629 | mi++; | |
630 | else | |
631 | ri++; | |
632 | *idx = (u32)mi | (u64)ri << 32; | |
633 | return; | |
634 | } | |
635 | } | |
636 | } | |
637 | ||
638 | /* signal end of iteration */ | |
639 | *idx = ULLONG_MAX; | |
640 | } | |
641 | ||
642 | /** | |
643 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | |
644 | * @idx: pointer to u64 loop variable | |
645 | * @nid: nid: node selector, %MAX_NUMNODES for all nodes | |
646 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
647 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
648 | * @out_nid: ptr to int for nid of the range, can be %NULL | |
649 | * | |
650 | * Reverse of __next_free_mem_range(). | |
651 | */ | |
652 | void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | |
653 | phys_addr_t *out_start, | |
654 | phys_addr_t *out_end, int *out_nid) | |
655 | { | |
656 | struct memblock_type *mem = &memblock.memory; | |
657 | struct memblock_type *rsv = &memblock.reserved; | |
658 | int mi = *idx & 0xffffffff; | |
659 | int ri = *idx >> 32; | |
660 | ||
661 | if (*idx == (u64)ULLONG_MAX) { | |
662 | mi = mem->cnt - 1; | |
663 | ri = rsv->cnt; | |
664 | } | |
665 | ||
666 | for ( ; mi >= 0; mi--) { | |
667 | struct memblock_region *m = &mem->regions[mi]; | |
668 | phys_addr_t m_start = m->base; | |
669 | phys_addr_t m_end = m->base + m->size; | |
670 | ||
671 | /* only memory regions are associated with nodes, check it */ | |
672 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) | |
673 | continue; | |
674 | ||
675 | /* scan areas before each reservation for intersection */ | |
676 | for ( ; ri >= 0; ri--) { | |
677 | struct memblock_region *r = &rsv->regions[ri]; | |
678 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | |
679 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | |
680 | ||
681 | /* if ri advanced past mi, break out to advance mi */ | |
682 | if (r_end <= m_start) | |
683 | break; | |
684 | /* if the two regions intersect, we're done */ | |
685 | if (m_end > r_start) { | |
686 | if (out_start) | |
687 | *out_start = max(m_start, r_start); | |
688 | if (out_end) | |
689 | *out_end = min(m_end, r_end); | |
690 | if (out_nid) | |
691 | *out_nid = memblock_get_region_node(m); | |
692 | ||
693 | if (m_start >= r_start) | |
694 | mi--; | |
695 | else | |
696 | ri--; | |
697 | *idx = (u32)mi | (u64)ri << 32; | |
698 | return; | |
699 | } | |
700 | } | |
701 | } | |
702 | ||
703 | *idx = ULLONG_MAX; | |
704 | } | |
705 | ||
706 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | |
707 | /* | |
708 | * Common iterator interface used to define for_each_mem_range(). | |
709 | */ | |
710 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, | |
711 | unsigned long *out_start_pfn, | |
712 | unsigned long *out_end_pfn, int *out_nid) | |
713 | { | |
714 | struct memblock_type *type = &memblock.memory; | |
715 | struct memblock_region *r; | |
716 | ||
717 | while (++*idx < type->cnt) { | |
718 | r = &type->regions[*idx]; | |
719 | ||
720 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) | |
721 | continue; | |
722 | if (nid == MAX_NUMNODES || nid == r->nid) | |
723 | break; | |
724 | } | |
725 | if (*idx >= type->cnt) { | |
726 | *idx = -1; | |
727 | return; | |
728 | } | |
729 | ||
730 | if (out_start_pfn) | |
731 | *out_start_pfn = PFN_UP(r->base); | |
732 | if (out_end_pfn) | |
733 | *out_end_pfn = PFN_DOWN(r->base + r->size); | |
734 | if (out_nid) | |
735 | *out_nid = r->nid; | |
736 | } | |
737 | ||
738 | /** | |
739 | * memblock_set_node - set node ID on memblock regions | |
740 | * @base: base of area to set node ID for | |
741 | * @size: size of area to set node ID for | |
742 | * @nid: node ID to set | |
743 | * | |
744 | * Set the nid of memblock memory regions in [@base,@base+@size) to @nid. | |
745 | * Regions which cross the area boundaries are split as necessary. | |
746 | * | |
747 | * RETURNS: | |
748 | * 0 on success, -errno on failure. | |
749 | */ | |
750 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, | |
751 | int nid) | |
752 | { | |
753 | struct memblock_type *type = &memblock.memory; | |
754 | int start_rgn, end_rgn; | |
755 | int i, ret; | |
756 | ||
757 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); | |
758 | if (ret) | |
759 | return ret; | |
760 | ||
761 | for (i = start_rgn; i < end_rgn; i++) | |
762 | memblock_set_region_node(&type->regions[i], nid); | |
763 | ||
764 | memblock_merge_regions(type); | |
765 | return 0; | |
766 | } | |
767 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | |
768 | ||
769 | static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, | |
770 | phys_addr_t align, phys_addr_t max_addr, | |
771 | int nid) | |
772 | { | |
773 | phys_addr_t found; | |
774 | ||
775 | if (WARN_ON(!align)) | |
776 | align = __alignof__(long long); | |
777 | ||
778 | /* align @size to avoid excessive fragmentation on reserved array */ | |
779 | size = round_up(size, align); | |
780 | ||
781 | found = memblock_find_in_range_node(0, max_addr, size, align, nid); | |
782 | if (found && !memblock_reserve(found, size)) | |
783 | return found; | |
784 | ||
785 | return 0; | |
786 | } | |
787 | ||
788 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) | |
789 | { | |
790 | return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); | |
791 | } | |
792 | ||
793 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | |
794 | { | |
795 | return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES); | |
796 | } | |
797 | ||
798 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | |
799 | { | |
800 | phys_addr_t alloc; | |
801 | ||
802 | alloc = __memblock_alloc_base(size, align, max_addr); | |
803 | ||
804 | if (alloc == 0) | |
805 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
806 | (unsigned long long) size, (unsigned long long) max_addr); | |
807 | ||
808 | return alloc; | |
809 | } | |
810 | ||
811 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) | |
812 | { | |
813 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | |
814 | } | |
815 | ||
816 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) | |
817 | { | |
818 | phys_addr_t res = memblock_alloc_nid(size, align, nid); | |
819 | ||
820 | if (res) | |
821 | return res; | |
822 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | |
823 | } | |
824 | ||
825 | ||
826 | /* | |
827 | * Remaining API functions | |
828 | */ | |
829 | ||
830 | phys_addr_t __init memblock_phys_mem_size(void) | |
831 | { | |
832 | return memblock.memory.total_size; | |
833 | } | |
834 | ||
835 | phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) | |
836 | { | |
837 | unsigned long pages = 0; | |
838 | struct memblock_region *r; | |
839 | unsigned long start_pfn, end_pfn; | |
840 | ||
841 | for_each_memblock(memory, r) { | |
842 | start_pfn = memblock_region_memory_base_pfn(r); | |
843 | end_pfn = memblock_region_memory_end_pfn(r); | |
844 | start_pfn = min_t(unsigned long, start_pfn, limit_pfn); | |
845 | end_pfn = min_t(unsigned long, end_pfn, limit_pfn); | |
846 | pages += end_pfn - start_pfn; | |
847 | } | |
848 | ||
849 | return (phys_addr_t)pages << PAGE_SHIFT; | |
850 | } | |
851 | ||
852 | /* lowest address */ | |
853 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) | |
854 | { | |
855 | return memblock.memory.regions[0].base; | |
856 | } | |
857 | ||
858 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) | |
859 | { | |
860 | int idx = memblock.memory.cnt - 1; | |
861 | ||
862 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); | |
863 | } | |
864 | ||
865 | void __init memblock_enforce_memory_limit(phys_addr_t limit) | |
866 | { | |
867 | unsigned long i; | |
868 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; | |
869 | ||
870 | if (!limit) | |
871 | return; | |
872 | ||
873 | /* find out max address */ | |
874 | for (i = 0; i < memblock.memory.cnt; i++) { | |
875 | struct memblock_region *r = &memblock.memory.regions[i]; | |
876 | ||
877 | if (limit <= r->size) { | |
878 | max_addr = r->base + limit; | |
879 | break; | |
880 | } | |
881 | limit -= r->size; | |
882 | } | |
883 | ||
884 | /* truncate both memory and reserved regions */ | |
885 | __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); | |
886 | __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); | |
887 | } | |
888 | ||
889 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) | |
890 | { | |
891 | unsigned int left = 0, right = type->cnt; | |
892 | ||
893 | do { | |
894 | unsigned int mid = (right + left) / 2; | |
895 | ||
896 | if (addr < type->regions[mid].base) | |
897 | right = mid; | |
898 | else if (addr >= (type->regions[mid].base + | |
899 | type->regions[mid].size)) | |
900 | left = mid + 1; | |
901 | else | |
902 | return mid; | |
903 | } while (left < right); | |
904 | return -1; | |
905 | } | |
906 | ||
907 | int __init memblock_is_reserved(phys_addr_t addr) | |
908 | { | |
909 | return memblock_search(&memblock.reserved, addr) != -1; | |
910 | } | |
911 | ||
912 | int __init_memblock memblock_is_memory(phys_addr_t addr) | |
913 | { | |
914 | return memblock_search(&memblock.memory, addr) != -1; | |
915 | } | |
916 | ||
917 | /** | |
918 | * memblock_is_region_memory - check if a region is a subset of memory | |
919 | * @base: base of region to check | |
920 | * @size: size of region to check | |
921 | * | |
922 | * Check if the region [@base, @base+@size) is a subset of a memory block. | |
923 | * | |
924 | * RETURNS: | |
925 | * 0 if false, non-zero if true | |
926 | */ | |
927 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | |
928 | { | |
929 | int idx = memblock_search(&memblock.memory, base); | |
930 | phys_addr_t end = base + memblock_cap_size(base, &size); | |
931 | ||
932 | if (idx == -1) | |
933 | return 0; | |
934 | return memblock.memory.regions[idx].base <= base && | |
935 | (memblock.memory.regions[idx].base + | |
936 | memblock.memory.regions[idx].size) >= end; | |
937 | } | |
938 | ||
939 | /** | |
940 | * memblock_is_region_reserved - check if a region intersects reserved memory | |
941 | * @base: base of region to check | |
942 | * @size: size of region to check | |
943 | * | |
944 | * Check if the region [@base, @base+@size) intersects a reserved memory block. | |
945 | * | |
946 | * RETURNS: | |
947 | * 0 if false, non-zero if true | |
948 | */ | |
949 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | |
950 | { | |
951 | memblock_cap_size(base, &size); | |
952 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | |
953 | } | |
954 | ||
955 | void __init_memblock memblock_trim_memory(phys_addr_t align) | |
956 | { | |
957 | int i; | |
958 | phys_addr_t start, end, orig_start, orig_end; | |
959 | struct memblock_type *mem = &memblock.memory; | |
960 | ||
961 | for (i = 0; i < mem->cnt; i++) { | |
962 | orig_start = mem->regions[i].base; | |
963 | orig_end = mem->regions[i].base + mem->regions[i].size; | |
964 | start = round_up(orig_start, align); | |
965 | end = round_down(orig_end, align); | |
966 | ||
967 | if (start == orig_start && end == orig_end) | |
968 | continue; | |
969 | ||
970 | if (start < end) { | |
971 | mem->regions[i].base = start; | |
972 | mem->regions[i].size = end - start; | |
973 | } else { | |
974 | memblock_remove_region(mem, i); | |
975 | i--; | |
976 | } | |
977 | } | |
978 | } | |
979 | ||
980 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) | |
981 | { | |
982 | memblock.current_limit = limit; | |
983 | } | |
984 | ||
985 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) | |
986 | { | |
987 | unsigned long long base, size; | |
988 | int i; | |
989 | ||
990 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); | |
991 | ||
992 | for (i = 0; i < type->cnt; i++) { | |
993 | struct memblock_region *rgn = &type->regions[i]; | |
994 | char nid_buf[32] = ""; | |
995 | ||
996 | base = rgn->base; | |
997 | size = rgn->size; | |
998 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | |
999 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) | |
1000 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", | |
1001 | memblock_get_region_node(rgn)); | |
1002 | #endif | |
1003 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n", | |
1004 | name, i, base, base + size - 1, size, nid_buf); | |
1005 | } | |
1006 | } | |
1007 | ||
1008 | void __init_memblock __memblock_dump_all(void) | |
1009 | { | |
1010 | pr_info("MEMBLOCK configuration:\n"); | |
1011 | pr_info(" memory size = %#llx reserved size = %#llx\n", | |
1012 | (unsigned long long)memblock.memory.total_size, | |
1013 | (unsigned long long)memblock.reserved.total_size); | |
1014 | ||
1015 | memblock_dump(&memblock.memory, "memory"); | |
1016 | memblock_dump(&memblock.reserved, "reserved"); | |
1017 | } | |
1018 | ||
1019 | void __init memblock_allow_resize(void) | |
1020 | { | |
1021 | memblock_can_resize = 1; | |
1022 | } | |
1023 | ||
1024 | static int __init early_memblock(char *p) | |
1025 | { | |
1026 | if (p && strstr(p, "debug")) | |
1027 | memblock_debug = 1; | |
1028 | return 0; | |
1029 | } | |
1030 | early_param("memblock", early_memblock); | |
1031 | ||
1032 | #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) | |
1033 | ||
1034 | static int memblock_debug_show(struct seq_file *m, void *private) | |
1035 | { | |
1036 | struct memblock_type *type = m->private; | |
1037 | struct memblock_region *reg; | |
1038 | int i; | |
1039 | ||
1040 | for (i = 0; i < type->cnt; i++) { | |
1041 | reg = &type->regions[i]; | |
1042 | seq_printf(m, "%4d: ", i); | |
1043 | if (sizeof(phys_addr_t) == 4) | |
1044 | seq_printf(m, "0x%08lx..0x%08lx\n", | |
1045 | (unsigned long)reg->base, | |
1046 | (unsigned long)(reg->base + reg->size - 1)); | |
1047 | else | |
1048 | seq_printf(m, "0x%016llx..0x%016llx\n", | |
1049 | (unsigned long long)reg->base, | |
1050 | (unsigned long long)(reg->base + reg->size - 1)); | |
1051 | ||
1052 | } | |
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static int memblock_debug_open(struct inode *inode, struct file *file) | |
1057 | { | |
1058 | return single_open(file, memblock_debug_show, inode->i_private); | |
1059 | } | |
1060 | ||
1061 | static const struct file_operations memblock_debug_fops = { | |
1062 | .open = memblock_debug_open, | |
1063 | .read = seq_read, | |
1064 | .llseek = seq_lseek, | |
1065 | .release = single_release, | |
1066 | }; | |
1067 | ||
1068 | static int __init memblock_init_debugfs(void) | |
1069 | { | |
1070 | struct dentry *root = debugfs_create_dir("memblock", NULL); | |
1071 | if (!root) | |
1072 | return -ENXIO; | |
1073 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | |
1074 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | |
1075 | ||
1076 | return 0; | |
1077 | } | |
1078 | __initcall(memblock_init_debugfs); | |
1079 | ||
1080 | #endif /* CONFIG_DEBUG_FS */ |