]>
Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
142b45a7 | 14 | #include <linux/slab.h> |
95f72d1e YL |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> | |
449e8df3 | 17 | #include <linux/poison.h> |
95f72d1e YL |
18 | #include <linux/memblock.h> |
19 | ||
95f72d1e YL |
20 | struct memblock memblock; |
21 | ||
142b45a7 | 22 | static int memblock_debug, memblock_can_resize; |
bf23c51f BH |
23 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; |
24 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; | |
95f72d1e | 25 | |
4d629f9a BH |
26 | #define MEMBLOCK_ERROR (~(phys_addr_t)0) |
27 | ||
142b45a7 BH |
28 | /* inline so we don't get a warning when pr_debug is compiled out */ |
29 | static inline const char *memblock_type_name(struct memblock_type *type) | |
30 | { | |
31 | if (type == &memblock.memory) | |
32 | return "memory"; | |
33 | else if (type == &memblock.reserved) | |
34 | return "reserved"; | |
35 | else | |
36 | return "unknown"; | |
37 | } | |
38 | ||
6ed311b2 BH |
39 | /* |
40 | * Address comparison utilities | |
41 | */ | |
95f72d1e | 42 | |
6ed311b2 | 43 | static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) |
95f72d1e | 44 | { |
6ed311b2 | 45 | return addr & ~(size - 1); |
95f72d1e YL |
46 | } |
47 | ||
6ed311b2 | 48 | static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) |
95f72d1e | 49 | { |
6ed311b2 | 50 | return (addr + (size - 1)) & ~(size - 1); |
95f72d1e YL |
51 | } |
52 | ||
2898cc4c BH |
53 | static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
54 | phys_addr_t base2, phys_addr_t size2) | |
95f72d1e YL |
55 | { |
56 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
57 | } | |
58 | ||
2898cc4c BH |
59 | static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, |
60 | phys_addr_t base2, phys_addr_t size2) | |
95f72d1e YL |
61 | { |
62 | if (base2 == base1 + size1) | |
63 | return 1; | |
64 | else if (base1 == base2 + size2) | |
65 | return -1; | |
66 | ||
67 | return 0; | |
68 | } | |
69 | ||
e3239ff9 | 70 | static long memblock_regions_adjacent(struct memblock_type *type, |
2898cc4c | 71 | unsigned long r1, unsigned long r2) |
95f72d1e | 72 | { |
2898cc4c BH |
73 | phys_addr_t base1 = type->regions[r1].base; |
74 | phys_addr_t size1 = type->regions[r1].size; | |
75 | phys_addr_t base2 = type->regions[r2].base; | |
76 | phys_addr_t size2 = type->regions[r2].size; | |
95f72d1e YL |
77 | |
78 | return memblock_addrs_adjacent(base1, size1, base2, size2); | |
79 | } | |
80 | ||
6ed311b2 BH |
81 | long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
82 | { | |
83 | unsigned long i; | |
84 | ||
85 | for (i = 0; i < type->cnt; i++) { | |
86 | phys_addr_t rgnbase = type->regions[i].base; | |
87 | phys_addr_t rgnsize = type->regions[i].size; | |
88 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
89 | break; | |
90 | } | |
91 | ||
92 | return (i < type->cnt) ? i : -1; | |
93 | } | |
94 | ||
95 | /* | |
96 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | |
97 | * are top-down. | |
98 | */ | |
99 | ||
100 | static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, | |
101 | phys_addr_t size, phys_addr_t align) | |
102 | { | |
103 | phys_addr_t base, res_base; | |
104 | long j; | |
105 | ||
106 | base = memblock_align_down((end - size), align); | |
107 | while (start <= base) { | |
108 | j = memblock_overlaps_region(&memblock.reserved, base, size); | |
109 | if (j < 0) | |
110 | return base; | |
111 | res_base = memblock.reserved.regions[j].base; | |
112 | if (res_base < size) | |
113 | break; | |
114 | base = memblock_align_down(res_base - size, align); | |
115 | } | |
116 | ||
117 | return MEMBLOCK_ERROR; | |
118 | } | |
119 | ||
120 | static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | |
121 | { | |
122 | long i; | |
123 | phys_addr_t base = 0; | |
124 | phys_addr_t res_base; | |
125 | ||
126 | BUG_ON(0 == size); | |
127 | ||
128 | size = memblock_align_up(size, align); | |
129 | ||
130 | /* Pump up max_addr */ | |
131 | if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) | |
132 | max_addr = memblock.current_limit; | |
133 | ||
134 | /* We do a top-down search, this tends to limit memory | |
135 | * fragmentation by keeping early boot allocs near the | |
136 | * top of memory | |
137 | */ | |
138 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | |
139 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | |
140 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | |
141 | ||
142 | if (memblocksize < size) | |
143 | continue; | |
144 | base = min(memblockbase + memblocksize, max_addr); | |
145 | res_base = memblock_find_region(memblockbase, base, size, align); | |
146 | if (res_base != MEMBLOCK_ERROR) | |
147 | return res_base; | |
148 | } | |
149 | return MEMBLOCK_ERROR; | |
150 | } | |
151 | ||
e3239ff9 | 152 | static void memblock_remove_region(struct memblock_type *type, unsigned long r) |
95f72d1e YL |
153 | { |
154 | unsigned long i; | |
155 | ||
e3239ff9 BH |
156 | for (i = r; i < type->cnt - 1; i++) { |
157 | type->regions[i].base = type->regions[i + 1].base; | |
158 | type->regions[i].size = type->regions[i + 1].size; | |
95f72d1e | 159 | } |
e3239ff9 | 160 | type->cnt--; |
95f72d1e YL |
161 | } |
162 | ||
163 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
e3239ff9 | 164 | static void memblock_coalesce_regions(struct memblock_type *type, |
95f72d1e YL |
165 | unsigned long r1, unsigned long r2) |
166 | { | |
e3239ff9 BH |
167 | type->regions[r1].size += type->regions[r2].size; |
168 | memblock_remove_region(type, r2); | |
95f72d1e YL |
169 | } |
170 | ||
142b45a7 BH |
171 | /* Defined below but needed now */ |
172 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | |
173 | ||
174 | static int memblock_double_array(struct memblock_type *type) | |
175 | { | |
176 | struct memblock_region *new_array, *old_array; | |
177 | phys_addr_t old_size, new_size, addr; | |
178 | int use_slab = slab_is_available(); | |
179 | ||
180 | /* We don't allow resizing until we know about the reserved regions | |
181 | * of memory that aren't suitable for allocation | |
182 | */ | |
183 | if (!memblock_can_resize) | |
184 | return -1; | |
185 | ||
186 | pr_debug("memblock: %s array full, doubling...", memblock_type_name(type)); | |
187 | ||
188 | /* Calculate new doubled size */ | |
189 | old_size = type->max * sizeof(struct memblock_region); | |
190 | new_size = old_size << 1; | |
191 | ||
192 | /* Try to find some space for it. | |
193 | * | |
194 | * WARNING: We assume that either slab_is_available() and we use it or | |
195 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | |
196 | * when bootmem is currently active (unless bootmem itself is implemented | |
197 | * on top of MEMBLOCK which isn't the case yet) | |
198 | * | |
199 | * This should however not be an issue for now, as we currently only | |
200 | * call into MEMBLOCK while it's still active, or much later when slab is | |
201 | * active for memory hotplug operations | |
202 | */ | |
203 | if (use_slab) { | |
204 | new_array = kmalloc(new_size, GFP_KERNEL); | |
205 | addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); | |
206 | } else | |
207 | addr = memblock_find_base(new_size, sizeof(phys_addr_t), MEMBLOCK_ALLOC_ACCESSIBLE); | |
208 | if (addr == MEMBLOCK_ERROR) { | |
209 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | |
210 | memblock_type_name(type), type->max, type->max * 2); | |
211 | return -1; | |
212 | } | |
213 | new_array = __va(addr); | |
214 | ||
215 | /* Found space, we now need to move the array over before | |
216 | * we add the reserved region since it may be our reserved | |
217 | * array itself that is full. | |
218 | */ | |
219 | memcpy(new_array, type->regions, old_size); | |
220 | memset(new_array + type->max, 0, old_size); | |
221 | old_array = type->regions; | |
222 | type->regions = new_array; | |
223 | type->max <<= 1; | |
224 | ||
225 | /* If we use SLAB that's it, we are done */ | |
226 | if (use_slab) | |
227 | return 0; | |
228 | ||
229 | /* Add the new reserved region now. Should not fail ! */ | |
230 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0); | |
231 | ||
232 | /* If the array wasn't our static init one, then free it. We only do | |
233 | * that before SLAB is available as later on, we don't know whether | |
234 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | |
235 | * anyways | |
236 | */ | |
237 | if (old_array != memblock_memory_init_regions && | |
238 | old_array != memblock_reserved_init_regions) | |
239 | memblock_free(__pa(old_array), old_size); | |
240 | ||
241 | return 0; | |
242 | } | |
243 | ||
d2cd563b BH |
244 | extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, |
245 | phys_addr_t addr2, phys_addr_t size2) | |
246 | { | |
247 | return 1; | |
248 | } | |
249 | ||
2898cc4c | 250 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
251 | { |
252 | unsigned long coalesced = 0; | |
253 | long adjacent, i; | |
254 | ||
e3239ff9 BH |
255 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { |
256 | type->regions[0].base = base; | |
257 | type->regions[0].size = size; | |
95f72d1e YL |
258 | return 0; |
259 | } | |
260 | ||
261 | /* First try and coalesce this MEMBLOCK with another. */ | |
e3239ff9 | 262 | for (i = 0; i < type->cnt; i++) { |
2898cc4c BH |
263 | phys_addr_t rgnbase = type->regions[i].base; |
264 | phys_addr_t rgnsize = type->regions[i].size; | |
95f72d1e YL |
265 | |
266 | if ((rgnbase == base) && (rgnsize == size)) | |
267 | /* Already have this region, so we're done */ | |
268 | return 0; | |
269 | ||
270 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | |
d2cd563b BH |
271 | /* Check if arch allows coalescing */ |
272 | if (adjacent != 0 && type == &memblock.memory && | |
273 | !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize)) | |
274 | break; | |
95f72d1e | 275 | if (adjacent > 0) { |
e3239ff9 BH |
276 | type->regions[i].base -= size; |
277 | type->regions[i].size += size; | |
95f72d1e YL |
278 | coalesced++; |
279 | break; | |
280 | } else if (adjacent < 0) { | |
e3239ff9 | 281 | type->regions[i].size += size; |
95f72d1e YL |
282 | coalesced++; |
283 | break; | |
284 | } | |
285 | } | |
286 | ||
d2cd563b BH |
287 | /* If we plugged a hole, we may want to also coalesce with the |
288 | * next region | |
289 | */ | |
290 | if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) && | |
291 | ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base, | |
292 | type->regions[i].size, | |
293 | type->regions[i+1].base, | |
294 | type->regions[i+1].size)))) { | |
e3239ff9 | 295 | memblock_coalesce_regions(type, i, i+1); |
95f72d1e YL |
296 | coalesced++; |
297 | } | |
298 | ||
299 | if (coalesced) | |
300 | return coalesced; | |
142b45a7 BH |
301 | |
302 | /* If we are out of space, we fail. It's too late to resize the array | |
303 | * but then this shouldn't have happened in the first place. | |
304 | */ | |
305 | if (WARN_ON(type->cnt >= type->max)) | |
95f72d1e YL |
306 | return -1; |
307 | ||
308 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | |
e3239ff9 BH |
309 | for (i = type->cnt - 1; i >= 0; i--) { |
310 | if (base < type->regions[i].base) { | |
311 | type->regions[i+1].base = type->regions[i].base; | |
312 | type->regions[i+1].size = type->regions[i].size; | |
95f72d1e | 313 | } else { |
e3239ff9 BH |
314 | type->regions[i+1].base = base; |
315 | type->regions[i+1].size = size; | |
95f72d1e YL |
316 | break; |
317 | } | |
318 | } | |
319 | ||
e3239ff9 BH |
320 | if (base < type->regions[0].base) { |
321 | type->regions[0].base = base; | |
322 | type->regions[0].size = size; | |
95f72d1e | 323 | } |
e3239ff9 | 324 | type->cnt++; |
95f72d1e | 325 | |
142b45a7 BH |
326 | /* The array is full ? Try to resize it. If that fails, we undo |
327 | * our allocation and return an error | |
328 | */ | |
329 | if (type->cnt == type->max && memblock_double_array(type)) { | |
330 | type->cnt--; | |
331 | return -1; | |
332 | } | |
333 | ||
95f72d1e YL |
334 | return 0; |
335 | } | |
336 | ||
2898cc4c | 337 | long memblock_add(phys_addr_t base, phys_addr_t size) |
95f72d1e | 338 | { |
e3239ff9 | 339 | return memblock_add_region(&memblock.memory, base, size); |
95f72d1e YL |
340 | |
341 | } | |
342 | ||
2898cc4c | 343 | static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
95f72d1e | 344 | { |
2898cc4c BH |
345 | phys_addr_t rgnbegin, rgnend; |
346 | phys_addr_t end = base + size; | |
95f72d1e YL |
347 | int i; |
348 | ||
349 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | |
350 | ||
351 | /* Find the region where (base, size) belongs to */ | |
e3239ff9 BH |
352 | for (i=0; i < type->cnt; i++) { |
353 | rgnbegin = type->regions[i].base; | |
354 | rgnend = rgnbegin + type->regions[i].size; | |
95f72d1e YL |
355 | |
356 | if ((rgnbegin <= base) && (end <= rgnend)) | |
357 | break; | |
358 | } | |
359 | ||
360 | /* Didn't find the region */ | |
e3239ff9 | 361 | if (i == type->cnt) |
95f72d1e YL |
362 | return -1; |
363 | ||
364 | /* Check to see if we are removing entire region */ | |
365 | if ((rgnbegin == base) && (rgnend == end)) { | |
e3239ff9 | 366 | memblock_remove_region(type, i); |
95f72d1e YL |
367 | return 0; |
368 | } | |
369 | ||
370 | /* Check to see if region is matching at the front */ | |
371 | if (rgnbegin == base) { | |
e3239ff9 BH |
372 | type->regions[i].base = end; |
373 | type->regions[i].size -= size; | |
95f72d1e YL |
374 | return 0; |
375 | } | |
376 | ||
377 | /* Check to see if the region is matching at the end */ | |
378 | if (rgnend == end) { | |
e3239ff9 | 379 | type->regions[i].size -= size; |
95f72d1e YL |
380 | return 0; |
381 | } | |
382 | ||
383 | /* | |
384 | * We need to split the entry - adjust the current one to the | |
385 | * beginging of the hole and add the region after hole. | |
386 | */ | |
e3239ff9 BH |
387 | type->regions[i].size = base - type->regions[i].base; |
388 | return memblock_add_region(type, end, rgnend - end); | |
95f72d1e YL |
389 | } |
390 | ||
2898cc4c | 391 | long memblock_remove(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
392 | { |
393 | return __memblock_remove(&memblock.memory, base, size); | |
394 | } | |
395 | ||
2898cc4c | 396 | long __init memblock_free(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
397 | { |
398 | return __memblock_remove(&memblock.reserved, base, size); | |
399 | } | |
400 | ||
2898cc4c | 401 | long __init memblock_reserve(phys_addr_t base, phys_addr_t size) |
95f72d1e | 402 | { |
e3239ff9 | 403 | struct memblock_type *_rgn = &memblock.reserved; |
95f72d1e YL |
404 | |
405 | BUG_ON(0 == size); | |
406 | ||
407 | return memblock_add_region(_rgn, base, size); | |
408 | } | |
409 | ||
6ed311b2 | 410 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 411 | { |
6ed311b2 | 412 | phys_addr_t found; |
95f72d1e | 413 | |
6ed311b2 BH |
414 | /* We align the size to limit fragmentation. Without this, a lot of |
415 | * small allocs quickly eat up the whole reserve array on sparc | |
416 | */ | |
417 | size = memblock_align_up(size, align); | |
95f72d1e | 418 | |
6ed311b2 BH |
419 | found = memblock_find_base(size, align, max_addr); |
420 | if (found != MEMBLOCK_ERROR && | |
421 | memblock_add_region(&memblock.reserved, found, size) >= 0) | |
422 | return found; | |
95f72d1e | 423 | |
6ed311b2 | 424 | return 0; |
95f72d1e YL |
425 | } |
426 | ||
6ed311b2 | 427 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 428 | { |
6ed311b2 BH |
429 | phys_addr_t alloc; |
430 | ||
431 | alloc = __memblock_alloc_base(size, align, max_addr); | |
432 | ||
433 | if (alloc == 0) | |
434 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
435 | (unsigned long long) size, (unsigned long long) max_addr); | |
436 | ||
437 | return alloc; | |
95f72d1e YL |
438 | } |
439 | ||
6ed311b2 | 440 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
95f72d1e | 441 | { |
6ed311b2 BH |
442 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
443 | } | |
95f72d1e | 444 | |
95f72d1e | 445 | |
6ed311b2 BH |
446 | /* |
447 | * Additional node-local allocators. Search for node memory is bottom up | |
448 | * and walks memblock regions within that node bottom-up as well, but allocation | |
449 | * within an memblock region is top-down. | |
450 | */ | |
95f72d1e | 451 | |
2898cc4c | 452 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) |
c3f72b57 BH |
453 | { |
454 | *nid = 0; | |
455 | ||
456 | return end; | |
457 | } | |
458 | ||
2898cc4c BH |
459 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, |
460 | phys_addr_t size, | |
461 | phys_addr_t align, int nid) | |
95f72d1e | 462 | { |
2898cc4c | 463 | phys_addr_t start, end; |
95f72d1e YL |
464 | |
465 | start = mp->base; | |
466 | end = start + mp->size; | |
467 | ||
468 | start = memblock_align_up(start, align); | |
469 | while (start < end) { | |
2898cc4c | 470 | phys_addr_t this_end; |
95f72d1e YL |
471 | int this_nid; |
472 | ||
35a1f0bd | 473 | this_end = memblock_nid_range(start, end, &this_nid); |
95f72d1e | 474 | if (this_nid == nid) { |
3a9c2c81 | 475 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); |
4d629f9a | 476 | if (ret != MEMBLOCK_ERROR && |
3a9c2c81 | 477 | memblock_add_region(&memblock.reserved, ret, size) >= 0) |
95f72d1e YL |
478 | return ret; |
479 | } | |
480 | start = this_end; | |
481 | } | |
482 | ||
4d629f9a | 483 | return MEMBLOCK_ERROR; |
95f72d1e YL |
484 | } |
485 | ||
2898cc4c | 486 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
95f72d1e | 487 | { |
e3239ff9 | 488 | struct memblock_type *mem = &memblock.memory; |
95f72d1e YL |
489 | int i; |
490 | ||
491 | BUG_ON(0 == size); | |
492 | ||
7f219c73 BH |
493 | /* We align the size to limit fragmentation. Without this, a lot of |
494 | * small allocs quickly eat up the whole reserve array on sparc | |
495 | */ | |
496 | size = memblock_align_up(size, align); | |
497 | ||
c3f72b57 BH |
498 | /* We do a bottom-up search for a region with the right |
499 | * nid since that's easier considering how memblock_nid_range() | |
500 | * works | |
501 | */ | |
95f72d1e | 502 | for (i = 0; i < mem->cnt; i++) { |
2898cc4c | 503 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], |
95f72d1e | 504 | size, align, nid); |
4d629f9a | 505 | if (ret != MEMBLOCK_ERROR) |
95f72d1e YL |
506 | return ret; |
507 | } | |
508 | ||
509 | return memblock_alloc(size, align); | |
510 | } | |
511 | ||
95f72d1e | 512 | /* You must call memblock_analyze() before this. */ |
2898cc4c | 513 | phys_addr_t __init memblock_phys_mem_size(void) |
95f72d1e | 514 | { |
4734b594 | 515 | return memblock.memory_size; |
95f72d1e YL |
516 | } |
517 | ||
2898cc4c | 518 | phys_addr_t memblock_end_of_DRAM(void) |
95f72d1e YL |
519 | { |
520 | int idx = memblock.memory.cnt - 1; | |
521 | ||
e3239ff9 | 522 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
95f72d1e YL |
523 | } |
524 | ||
525 | /* You must call memblock_analyze() after this. */ | |
2898cc4c | 526 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
95f72d1e YL |
527 | { |
528 | unsigned long i; | |
2898cc4c | 529 | phys_addr_t limit; |
e3239ff9 | 530 | struct memblock_region *p; |
95f72d1e YL |
531 | |
532 | if (!memory_limit) | |
533 | return; | |
534 | ||
535 | /* Truncate the memblock regions to satisfy the memory limit. */ | |
536 | limit = memory_limit; | |
537 | for (i = 0; i < memblock.memory.cnt; i++) { | |
e3239ff9 BH |
538 | if (limit > memblock.memory.regions[i].size) { |
539 | limit -= memblock.memory.regions[i].size; | |
95f72d1e YL |
540 | continue; |
541 | } | |
542 | ||
e3239ff9 | 543 | memblock.memory.regions[i].size = limit; |
95f72d1e YL |
544 | memblock.memory.cnt = i + 1; |
545 | break; | |
546 | } | |
547 | ||
95f72d1e YL |
548 | memory_limit = memblock_end_of_DRAM(); |
549 | ||
550 | /* And truncate any reserves above the limit also. */ | |
551 | for (i = 0; i < memblock.reserved.cnt; i++) { | |
e3239ff9 | 552 | p = &memblock.reserved.regions[i]; |
95f72d1e YL |
553 | |
554 | if (p->base > memory_limit) | |
555 | p->size = 0; | |
556 | else if ((p->base + p->size) > memory_limit) | |
557 | p->size = memory_limit - p->base; | |
558 | ||
559 | if (p->size == 0) { | |
560 | memblock_remove_region(&memblock.reserved, i); | |
561 | i--; | |
562 | } | |
563 | } | |
564 | } | |
565 | ||
2898cc4c | 566 | static int memblock_search(struct memblock_type *type, phys_addr_t addr) |
72d4b0b4 BH |
567 | { |
568 | unsigned int left = 0, right = type->cnt; | |
569 | ||
570 | do { | |
571 | unsigned int mid = (right + left) / 2; | |
572 | ||
573 | if (addr < type->regions[mid].base) | |
574 | right = mid; | |
575 | else if (addr >= (type->regions[mid].base + | |
576 | type->regions[mid].size)) | |
577 | left = mid + 1; | |
578 | else | |
579 | return mid; | |
580 | } while (left < right); | |
581 | return -1; | |
582 | } | |
583 | ||
2898cc4c | 584 | int __init memblock_is_reserved(phys_addr_t addr) |
95f72d1e | 585 | { |
72d4b0b4 BH |
586 | return memblock_search(&memblock.reserved, addr) != -1; |
587 | } | |
95f72d1e | 588 | |
2898cc4c | 589 | int memblock_is_memory(phys_addr_t addr) |
72d4b0b4 BH |
590 | { |
591 | return memblock_search(&memblock.memory, addr) != -1; | |
592 | } | |
593 | ||
2898cc4c | 594 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
72d4b0b4 BH |
595 | { |
596 | int idx = memblock_search(&memblock.reserved, base); | |
597 | ||
598 | if (idx == -1) | |
599 | return 0; | |
600 | return memblock.reserved.regions[idx].base <= base && | |
601 | (memblock.reserved.regions[idx].base + | |
602 | memblock.reserved.regions[idx].size) >= (base + size); | |
95f72d1e YL |
603 | } |
604 | ||
2898cc4c | 605 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
95f72d1e | 606 | { |
f1c2c19c | 607 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
95f72d1e YL |
608 | } |
609 | ||
e63075a3 | 610 | |
2898cc4c | 611 | void __init memblock_set_current_limit(phys_addr_t limit) |
e63075a3 BH |
612 | { |
613 | memblock.current_limit = limit; | |
614 | } | |
615 | ||
6ed311b2 BH |
616 | static void memblock_dump(struct memblock_type *region, char *name) |
617 | { | |
618 | unsigned long long base, size; | |
619 | int i; | |
620 | ||
621 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | |
622 | ||
623 | for (i = 0; i < region->cnt; i++) { | |
624 | base = region->regions[i].base; | |
625 | size = region->regions[i].size; | |
626 | ||
627 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | |
628 | name, i, base, base + size - 1, size); | |
629 | } | |
630 | } | |
631 | ||
632 | void memblock_dump_all(void) | |
633 | { | |
634 | if (!memblock_debug) | |
635 | return; | |
636 | ||
637 | pr_info("MEMBLOCK configuration:\n"); | |
638 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | |
639 | ||
640 | memblock_dump(&memblock.memory, "memory"); | |
641 | memblock_dump(&memblock.reserved, "reserved"); | |
642 | } | |
643 | ||
644 | void __init memblock_analyze(void) | |
645 | { | |
646 | int i; | |
647 | ||
648 | /* Check marker in the unused last array entry */ | |
649 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base | |
650 | != (phys_addr_t)RED_INACTIVE); | |
651 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | |
652 | != (phys_addr_t)RED_INACTIVE); | |
653 | ||
654 | memblock.memory_size = 0; | |
655 | ||
656 | for (i = 0; i < memblock.memory.cnt; i++) | |
657 | memblock.memory_size += memblock.memory.regions[i].size; | |
142b45a7 BH |
658 | |
659 | /* We allow resizing from there */ | |
660 | memblock_can_resize = 1; | |
6ed311b2 BH |
661 | } |
662 | ||
7590abe8 BH |
663 | void __init memblock_init(void) |
664 | { | |
665 | /* Hookup the initial arrays */ | |
666 | memblock.memory.regions = memblock_memory_init_regions; | |
667 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | |
668 | memblock.reserved.regions = memblock_reserved_init_regions; | |
669 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | |
670 | ||
671 | /* Write a marker in the unused last array entry */ | |
672 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
673 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
674 | ||
675 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | |
676 | * This simplifies the memblock_add() code below... | |
677 | */ | |
678 | memblock.memory.regions[0].base = 0; | |
679 | memblock.memory.regions[0].size = 0; | |
680 | memblock.memory.cnt = 1; | |
681 | ||
682 | /* Ditto. */ | |
683 | memblock.reserved.regions[0].base = 0; | |
684 | memblock.reserved.regions[0].size = 0; | |
685 | memblock.reserved.cnt = 1; | |
686 | ||
687 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | |
688 | } | |
689 | ||
6ed311b2 BH |
690 | static int __init early_memblock(char *p) |
691 | { | |
692 | if (p && strstr(p, "debug")) | |
693 | memblock_debug = 1; | |
694 | return 0; | |
695 | } | |
696 | early_param("memblock", early_memblock); | |
697 |