]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/bitops.h> | |
16 | #include <linux/poison.h> | |
17 | #include <linux/memblock.h> | |
18 | ||
19 | struct memblock memblock; | |
20 | ||
21 | static int memblock_debug; | |
22 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; | |
23 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; | |
24 | ||
25 | #define MEMBLOCK_ERROR (~(phys_addr_t)0) | |
26 | ||
27 | static int __init early_memblock(char *p) | |
28 | { | |
29 | if (p && strstr(p, "debug")) | |
30 | memblock_debug = 1; | |
31 | return 0; | |
32 | } | |
33 | early_param("memblock", early_memblock); | |
34 | ||
35 | static void memblock_dump(struct memblock_type *region, char *name) | |
36 | { | |
37 | unsigned long long base, size; | |
38 | int i; | |
39 | ||
40 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | |
41 | ||
42 | for (i = 0; i < region->cnt; i++) { | |
43 | base = region->regions[i].base; | |
44 | size = region->regions[i].size; | |
45 | ||
46 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | |
47 | name, i, base, base + size - 1, size); | |
48 | } | |
49 | } | |
50 | ||
51 | void memblock_dump_all(void) | |
52 | { | |
53 | if (!memblock_debug) | |
54 | return; | |
55 | ||
56 | pr_info("MEMBLOCK configuration:\n"); | |
57 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | |
58 | ||
59 | memblock_dump(&memblock.memory, "memory"); | |
60 | memblock_dump(&memblock.reserved, "reserved"); | |
61 | } | |
62 | ||
63 | static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, | |
64 | phys_addr_t base2, phys_addr_t size2) | |
65 | { | |
66 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
67 | } | |
68 | ||
69 | static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, | |
70 | phys_addr_t base2, phys_addr_t size2) | |
71 | { | |
72 | if (base2 == base1 + size1) | |
73 | return 1; | |
74 | else if (base1 == base2 + size2) | |
75 | return -1; | |
76 | ||
77 | return 0; | |
78 | } | |
79 | ||
80 | static long memblock_regions_adjacent(struct memblock_type *type, | |
81 | unsigned long r1, unsigned long r2) | |
82 | { | |
83 | phys_addr_t base1 = type->regions[r1].base; | |
84 | phys_addr_t size1 = type->regions[r1].size; | |
85 | phys_addr_t base2 = type->regions[r2].base; | |
86 | phys_addr_t size2 = type->regions[r2].size; | |
87 | ||
88 | return memblock_addrs_adjacent(base1, size1, base2, size2); | |
89 | } | |
90 | ||
91 | static void memblock_remove_region(struct memblock_type *type, unsigned long r) | |
92 | { | |
93 | unsigned long i; | |
94 | ||
95 | for (i = r; i < type->cnt - 1; i++) { | |
96 | type->regions[i].base = type->regions[i + 1].base; | |
97 | type->regions[i].size = type->regions[i + 1].size; | |
98 | } | |
99 | type->cnt--; | |
100 | } | |
101 | ||
102 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
103 | static void memblock_coalesce_regions(struct memblock_type *type, | |
104 | unsigned long r1, unsigned long r2) | |
105 | { | |
106 | type->regions[r1].size += type->regions[r2].size; | |
107 | memblock_remove_region(type, r2); | |
108 | } | |
109 | ||
110 | void __init memblock_init(void) | |
111 | { | |
112 | /* Hookup the initial arrays */ | |
113 | memblock.memory.regions = memblock_memory_init_regions; | |
114 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | |
115 | memblock.reserved.regions = memblock_reserved_init_regions; | |
116 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | |
117 | ||
118 | /* Write a marker in the unused last array entry */ | |
119 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
120 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
121 | ||
122 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | |
123 | * This simplifies the memblock_add() code below... | |
124 | */ | |
125 | memblock.memory.regions[0].base = 0; | |
126 | memblock.memory.regions[0].size = 0; | |
127 | memblock.memory.cnt = 1; | |
128 | ||
129 | /* Ditto. */ | |
130 | memblock.reserved.regions[0].base = 0; | |
131 | memblock.reserved.regions[0].size = 0; | |
132 | memblock.reserved.cnt = 1; | |
133 | ||
134 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | |
135 | } | |
136 | ||
137 | void __init memblock_analyze(void) | |
138 | { | |
139 | int i; | |
140 | ||
141 | /* Check marker in the unused last array entry */ | |
142 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base | |
143 | != (phys_addr_t)RED_INACTIVE); | |
144 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | |
145 | != (phys_addr_t)RED_INACTIVE); | |
146 | ||
147 | memblock.memory_size = 0; | |
148 | ||
149 | for (i = 0; i < memblock.memory.cnt; i++) | |
150 | memblock.memory_size += memblock.memory.regions[i].size; | |
151 | } | |
152 | ||
153 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) | |
154 | { | |
155 | unsigned long coalesced = 0; | |
156 | long adjacent, i; | |
157 | ||
158 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { | |
159 | type->regions[0].base = base; | |
160 | type->regions[0].size = size; | |
161 | return 0; | |
162 | } | |
163 | ||
164 | /* First try and coalesce this MEMBLOCK with another. */ | |
165 | for (i = 0; i < type->cnt; i++) { | |
166 | phys_addr_t rgnbase = type->regions[i].base; | |
167 | phys_addr_t rgnsize = type->regions[i].size; | |
168 | ||
169 | if ((rgnbase == base) && (rgnsize == size)) | |
170 | /* Already have this region, so we're done */ | |
171 | return 0; | |
172 | ||
173 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | |
174 | if (adjacent > 0) { | |
175 | type->regions[i].base -= size; | |
176 | type->regions[i].size += size; | |
177 | coalesced++; | |
178 | break; | |
179 | } else if (adjacent < 0) { | |
180 | type->regions[i].size += size; | |
181 | coalesced++; | |
182 | break; | |
183 | } | |
184 | } | |
185 | ||
186 | if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1)) { | |
187 | memblock_coalesce_regions(type, i, i+1); | |
188 | coalesced++; | |
189 | } | |
190 | ||
191 | if (coalesced) | |
192 | return coalesced; | |
193 | if (type->cnt >= type->max) | |
194 | return -1; | |
195 | ||
196 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | |
197 | for (i = type->cnt - 1; i >= 0; i--) { | |
198 | if (base < type->regions[i].base) { | |
199 | type->regions[i+1].base = type->regions[i].base; | |
200 | type->regions[i+1].size = type->regions[i].size; | |
201 | } else { | |
202 | type->regions[i+1].base = base; | |
203 | type->regions[i+1].size = size; | |
204 | break; | |
205 | } | |
206 | } | |
207 | ||
208 | if (base < type->regions[0].base) { | |
209 | type->regions[0].base = base; | |
210 | type->regions[0].size = size; | |
211 | } | |
212 | type->cnt++; | |
213 | ||
214 | return 0; | |
215 | } | |
216 | ||
217 | long memblock_add(phys_addr_t base, phys_addr_t size) | |
218 | { | |
219 | return memblock_add_region(&memblock.memory, base, size); | |
220 | ||
221 | } | |
222 | ||
223 | static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) | |
224 | { | |
225 | phys_addr_t rgnbegin, rgnend; | |
226 | phys_addr_t end = base + size; | |
227 | int i; | |
228 | ||
229 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | |
230 | ||
231 | /* Find the region where (base, size) belongs to */ | |
232 | for (i=0; i < type->cnt; i++) { | |
233 | rgnbegin = type->regions[i].base; | |
234 | rgnend = rgnbegin + type->regions[i].size; | |
235 | ||
236 | if ((rgnbegin <= base) && (end <= rgnend)) | |
237 | break; | |
238 | } | |
239 | ||
240 | /* Didn't find the region */ | |
241 | if (i == type->cnt) | |
242 | return -1; | |
243 | ||
244 | /* Check to see if we are removing entire region */ | |
245 | if ((rgnbegin == base) && (rgnend == end)) { | |
246 | memblock_remove_region(type, i); | |
247 | return 0; | |
248 | } | |
249 | ||
250 | /* Check to see if region is matching at the front */ | |
251 | if (rgnbegin == base) { | |
252 | type->regions[i].base = end; | |
253 | type->regions[i].size -= size; | |
254 | return 0; | |
255 | } | |
256 | ||
257 | /* Check to see if the region is matching at the end */ | |
258 | if (rgnend == end) { | |
259 | type->regions[i].size -= size; | |
260 | return 0; | |
261 | } | |
262 | ||
263 | /* | |
264 | * We need to split the entry - adjust the current one to the | |
265 | * beginging of the hole and add the region after hole. | |
266 | */ | |
267 | type->regions[i].size = base - type->regions[i].base; | |
268 | return memblock_add_region(type, end, rgnend - end); | |
269 | } | |
270 | ||
271 | long memblock_remove(phys_addr_t base, phys_addr_t size) | |
272 | { | |
273 | return __memblock_remove(&memblock.memory, base, size); | |
274 | } | |
275 | ||
276 | long __init memblock_free(phys_addr_t base, phys_addr_t size) | |
277 | { | |
278 | return __memblock_remove(&memblock.reserved, base, size); | |
279 | } | |
280 | ||
281 | long __init memblock_reserve(phys_addr_t base, phys_addr_t size) | |
282 | { | |
283 | struct memblock_type *_rgn = &memblock.reserved; | |
284 | ||
285 | BUG_ON(0 == size); | |
286 | ||
287 | return memblock_add_region(_rgn, base, size); | |
288 | } | |
289 | ||
290 | long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) | |
291 | { | |
292 | unsigned long i; | |
293 | ||
294 | for (i = 0; i < type->cnt; i++) { | |
295 | phys_addr_t rgnbase = type->regions[i].base; | |
296 | phys_addr_t rgnsize = type->regions[i].size; | |
297 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
298 | break; | |
299 | } | |
300 | ||
301 | return (i < type->cnt) ? i : -1; | |
302 | } | |
303 | ||
304 | static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) | |
305 | { | |
306 | return addr & ~(size - 1); | |
307 | } | |
308 | ||
309 | static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) | |
310 | { | |
311 | return (addr + (size - 1)) & ~(size - 1); | |
312 | } | |
313 | ||
314 | static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, | |
315 | phys_addr_t size, phys_addr_t align) | |
316 | { | |
317 | phys_addr_t base, res_base; | |
318 | long j; | |
319 | ||
320 | base = memblock_align_down((end - size), align); | |
321 | while (start <= base) { | |
322 | j = memblock_overlaps_region(&memblock.reserved, base, size); | |
323 | if (j < 0) | |
324 | return base; | |
325 | res_base = memblock.reserved.regions[j].base; | |
326 | if (res_base < size) | |
327 | break; | |
328 | base = memblock_align_down(res_base - size, align); | |
329 | } | |
330 | ||
331 | return MEMBLOCK_ERROR; | |
332 | } | |
333 | ||
334 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) | |
335 | { | |
336 | *nid = 0; | |
337 | ||
338 | return end; | |
339 | } | |
340 | ||
341 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, | |
342 | phys_addr_t size, | |
343 | phys_addr_t align, int nid) | |
344 | { | |
345 | phys_addr_t start, end; | |
346 | ||
347 | start = mp->base; | |
348 | end = start + mp->size; | |
349 | ||
350 | start = memblock_align_up(start, align); | |
351 | while (start < end) { | |
352 | phys_addr_t this_end; | |
353 | int this_nid; | |
354 | ||
355 | this_end = memblock_nid_range(start, end, &this_nid); | |
356 | if (this_nid == nid) { | |
357 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); | |
358 | if (ret != MEMBLOCK_ERROR && | |
359 | memblock_add_region(&memblock.reserved, ret, size) >= 0) | |
360 | return ret; | |
361 | } | |
362 | start = this_end; | |
363 | } | |
364 | ||
365 | return MEMBLOCK_ERROR; | |
366 | } | |
367 | ||
368 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) | |
369 | { | |
370 | struct memblock_type *mem = &memblock.memory; | |
371 | int i; | |
372 | ||
373 | BUG_ON(0 == size); | |
374 | ||
375 | /* We do a bottom-up search for a region with the right | |
376 | * nid since that's easier considering how memblock_nid_range() | |
377 | * works | |
378 | */ | |
379 | size = memblock_align_up(size, align); | |
380 | ||
381 | for (i = 0; i < mem->cnt; i++) { | |
382 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], | |
383 | size, align, nid); | |
384 | if (ret != MEMBLOCK_ERROR) | |
385 | return ret; | |
386 | } | |
387 | ||
388 | return memblock_alloc(size, align); | |
389 | } | |
390 | ||
391 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) | |
392 | { | |
393 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); | |
394 | } | |
395 | ||
396 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | |
397 | { | |
398 | phys_addr_t alloc; | |
399 | ||
400 | alloc = __memblock_alloc_base(size, align, max_addr); | |
401 | ||
402 | if (alloc == 0) | |
403 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
404 | (unsigned long long) size, (unsigned long long) max_addr); | |
405 | ||
406 | return alloc; | |
407 | } | |
408 | ||
409 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) | |
410 | { | |
411 | long i; | |
412 | phys_addr_t base = 0; | |
413 | phys_addr_t res_base; | |
414 | ||
415 | BUG_ON(0 == size); | |
416 | ||
417 | size = memblock_align_up(size, align); | |
418 | ||
419 | /* Pump up max_addr */ | |
420 | if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) | |
421 | max_addr = memblock.current_limit; | |
422 | ||
423 | /* We do a top-down search, this tends to limit memory | |
424 | * fragmentation by keeping early boot allocs near the | |
425 | * top of memory | |
426 | */ | |
427 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | |
428 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | |
429 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | |
430 | ||
431 | if (memblocksize < size) | |
432 | continue; | |
433 | base = min(memblockbase + memblocksize, max_addr); | |
434 | res_base = memblock_find_region(memblockbase, base, size, align); | |
435 | if (res_base != MEMBLOCK_ERROR && | |
436 | memblock_add_region(&memblock.reserved, res_base, size) >= 0) | |
437 | return res_base; | |
438 | } | |
439 | return 0; | |
440 | } | |
441 | ||
442 | /* You must call memblock_analyze() before this. */ | |
443 | phys_addr_t __init memblock_phys_mem_size(void) | |
444 | { | |
445 | return memblock.memory_size; | |
446 | } | |
447 | ||
448 | phys_addr_t memblock_end_of_DRAM(void) | |
449 | { | |
450 | int idx = memblock.memory.cnt - 1; | |
451 | ||
452 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); | |
453 | } | |
454 | ||
455 | /* You must call memblock_analyze() after this. */ | |
456 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) | |
457 | { | |
458 | unsigned long i; | |
459 | phys_addr_t limit; | |
460 | struct memblock_region *p; | |
461 | ||
462 | if (!memory_limit) | |
463 | return; | |
464 | ||
465 | /* Truncate the memblock regions to satisfy the memory limit. */ | |
466 | limit = memory_limit; | |
467 | for (i = 0; i < memblock.memory.cnt; i++) { | |
468 | if (limit > memblock.memory.regions[i].size) { | |
469 | limit -= memblock.memory.regions[i].size; | |
470 | continue; | |
471 | } | |
472 | ||
473 | memblock.memory.regions[i].size = limit; | |
474 | memblock.memory.cnt = i + 1; | |
475 | break; | |
476 | } | |
477 | ||
478 | memory_limit = memblock_end_of_DRAM(); | |
479 | ||
480 | /* And truncate any reserves above the limit also. */ | |
481 | for (i = 0; i < memblock.reserved.cnt; i++) { | |
482 | p = &memblock.reserved.regions[i]; | |
483 | ||
484 | if (p->base > memory_limit) | |
485 | p->size = 0; | |
486 | else if ((p->base + p->size) > memory_limit) | |
487 | p->size = memory_limit - p->base; | |
488 | ||
489 | if (p->size == 0) { | |
490 | memblock_remove_region(&memblock.reserved, i); | |
491 | i--; | |
492 | } | |
493 | } | |
494 | } | |
495 | ||
496 | static int memblock_search(struct memblock_type *type, phys_addr_t addr) | |
497 | { | |
498 | unsigned int left = 0, right = type->cnt; | |
499 | ||
500 | do { | |
501 | unsigned int mid = (right + left) / 2; | |
502 | ||
503 | if (addr < type->regions[mid].base) | |
504 | right = mid; | |
505 | else if (addr >= (type->regions[mid].base + | |
506 | type->regions[mid].size)) | |
507 | left = mid + 1; | |
508 | else | |
509 | return mid; | |
510 | } while (left < right); | |
511 | return -1; | |
512 | } | |
513 | ||
514 | int __init memblock_is_reserved(phys_addr_t addr) | |
515 | { | |
516 | return memblock_search(&memblock.reserved, addr) != -1; | |
517 | } | |
518 | ||
519 | int memblock_is_memory(phys_addr_t addr) | |
520 | { | |
521 | return memblock_search(&memblock.memory, addr) != -1; | |
522 | } | |
523 | ||
524 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | |
525 | { | |
526 | int idx = memblock_search(&memblock.reserved, base); | |
527 | ||
528 | if (idx == -1) | |
529 | return 0; | |
530 | return memblock.reserved.regions[idx].base <= base && | |
531 | (memblock.reserved.regions[idx].base + | |
532 | memblock.reserved.regions[idx].size) >= (base + size); | |
533 | } | |
534 | ||
535 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | |
536 | { | |
537 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | |
538 | } | |
539 | ||
540 | ||
541 | void __init memblock_set_current_limit(phys_addr_t limit) | |
542 | { | |
543 | memblock.current_limit = limit; | |
544 | } | |
545 |