]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/memblock.c
memblock: Fix section mismatch warnings
[mirror_ubuntu-artful-kernel.git] / mm / memblock.c
1 /*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22
23 struct memblock memblock __initdata_memblock;
24
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
29
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
32 {
33 if (type == &memblock.memory)
34 return "memory";
35 else if (type == &memblock.reserved)
36 return "reserved";
37 else
38 return "unknown";
39 }
40
41 /*
42 * Address comparison utilities
43 */
44
45 static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
46 {
47 return addr & ~(size - 1);
48 }
49
50 static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
51 {
52 return (addr + (size - 1)) & ~(size - 1);
53 }
54
55 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 phys_addr_t base2, phys_addr_t size2)
57 {
58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59 }
60
61 static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
62 phys_addr_t base2, phys_addr_t size2)
63 {
64 if (base2 == base1 + size1)
65 return 1;
66 else if (base1 == base2 + size2)
67 return -1;
68
69 return 0;
70 }
71
72 static long __init_memblock memblock_regions_adjacent(struct memblock_type *type,
73 unsigned long r1, unsigned long r2)
74 {
75 phys_addr_t base1 = type->regions[r1].base;
76 phys_addr_t size1 = type->regions[r1].size;
77 phys_addr_t base2 = type->regions[r2].base;
78 phys_addr_t size2 = type->regions[r2].size;
79
80 return memblock_addrs_adjacent(base1, size1, base2, size2);
81 }
82
83 long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
84 {
85 unsigned long i;
86
87 for (i = 0; i < type->cnt; i++) {
88 phys_addr_t rgnbase = type->regions[i].base;
89 phys_addr_t rgnsize = type->regions[i].size;
90 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
91 break;
92 }
93
94 return (i < type->cnt) ? i : -1;
95 }
96
97 /*
98 * Find, allocate, deallocate or reserve unreserved regions. All allocations
99 * are top-down.
100 */
101
102 static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
103 phys_addr_t size, phys_addr_t align)
104 {
105 phys_addr_t base, res_base;
106 long j;
107
108 /* Prevent allocations returning 0 as it's also used to
109 * indicate an allocation failure
110 */
111 if (start == 0)
112 start = PAGE_SIZE;
113
114 base = memblock_align_down((end - size), align);
115 while (start <= base) {
116 j = memblock_overlaps_region(&memblock.reserved, base, size);
117 if (j < 0)
118 return base;
119 res_base = memblock.reserved.regions[j].base;
120 if (res_base < size)
121 break;
122 base = memblock_align_down(res_base - size, align);
123 }
124
125 return MEMBLOCK_ERROR;
126 }
127
128 static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size,
129 phys_addr_t align, phys_addr_t start, phys_addr_t end)
130 {
131 long i;
132
133 BUG_ON(0 == size);
134
135 size = memblock_align_up(size, align);
136
137 /* Pump up max_addr */
138 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
139 end = memblock.current_limit;
140
141 /* We do a top-down search, this tends to limit memory
142 * fragmentation by keeping early boot allocs near the
143 * top of memory
144 */
145 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
146 phys_addr_t memblockbase = memblock.memory.regions[i].base;
147 phys_addr_t memblocksize = memblock.memory.regions[i].size;
148 phys_addr_t bottom, top, found;
149
150 if (memblocksize < size)
151 continue;
152 if ((memblockbase + memblocksize) <= start)
153 break;
154 bottom = max(memblockbase, start);
155 top = min(memblockbase + memblocksize, end);
156 if (bottom >= top)
157 continue;
158 found = memblock_find_region(bottom, top, size, align);
159 if (found != MEMBLOCK_ERROR)
160 return found;
161 }
162 return MEMBLOCK_ERROR;
163 }
164
165 /*
166 * Find a free area with specified alignment in a specific range.
167 */
168 u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align)
169 {
170 return memblock_find_base(size, align, start, end);
171 }
172
173 /*
174 * Free memblock.reserved.regions
175 */
176 int __init_memblock memblock_free_reserved_regions(void)
177 {
178 if (memblock.reserved.regions == memblock_reserved_init_regions)
179 return 0;
180
181 return memblock_free(__pa(memblock.reserved.regions),
182 sizeof(struct memblock_region) * memblock.reserved.max);
183 }
184
185 /*
186 * Reserve memblock.reserved.regions
187 */
188 int __init_memblock memblock_reserve_reserved_regions(void)
189 {
190 if (memblock.reserved.regions == memblock_reserved_init_regions)
191 return 0;
192
193 return memblock_reserve(__pa(memblock.reserved.regions),
194 sizeof(struct memblock_region) * memblock.reserved.max);
195 }
196
197 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
198 {
199 unsigned long i;
200
201 for (i = r; i < type->cnt - 1; i++) {
202 type->regions[i].base = type->regions[i + 1].base;
203 type->regions[i].size = type->regions[i + 1].size;
204 }
205 type->cnt--;
206 }
207
208 /* Assumption: base addr of region 1 < base addr of region 2 */
209 static void __init_memblock memblock_coalesce_regions(struct memblock_type *type,
210 unsigned long r1, unsigned long r2)
211 {
212 type->regions[r1].size += type->regions[r2].size;
213 memblock_remove_region(type, r2);
214 }
215
216 /* Defined below but needed now */
217 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
218
219 static int __init_memblock memblock_double_array(struct memblock_type *type)
220 {
221 struct memblock_region *new_array, *old_array;
222 phys_addr_t old_size, new_size, addr;
223 int use_slab = slab_is_available();
224
225 /* We don't allow resizing until we know about the reserved regions
226 * of memory that aren't suitable for allocation
227 */
228 if (!memblock_can_resize)
229 return -1;
230
231 /* Calculate new doubled size */
232 old_size = type->max * sizeof(struct memblock_region);
233 new_size = old_size << 1;
234
235 /* Try to find some space for it.
236 *
237 * WARNING: We assume that either slab_is_available() and we use it or
238 * we use MEMBLOCK for allocations. That means that this is unsafe to use
239 * when bootmem is currently active (unless bootmem itself is implemented
240 * on top of MEMBLOCK which isn't the case yet)
241 *
242 * This should however not be an issue for now, as we currently only
243 * call into MEMBLOCK while it's still active, or much later when slab is
244 * active for memory hotplug operations
245 */
246 if (use_slab) {
247 new_array = kmalloc(new_size, GFP_KERNEL);
248 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
249 } else
250 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
251 if (addr == MEMBLOCK_ERROR) {
252 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
253 memblock_type_name(type), type->max, type->max * 2);
254 return -1;
255 }
256 new_array = __va(addr);
257
258 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
259 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
260
261 /* Found space, we now need to move the array over before
262 * we add the reserved region since it may be our reserved
263 * array itself that is full.
264 */
265 memcpy(new_array, type->regions, old_size);
266 memset(new_array + type->max, 0, old_size);
267 old_array = type->regions;
268 type->regions = new_array;
269 type->max <<= 1;
270
271 /* If we use SLAB that's it, we are done */
272 if (use_slab)
273 return 0;
274
275 /* Add the new reserved region now. Should not fail ! */
276 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
277
278 /* If the array wasn't our static init one, then free it. We only do
279 * that before SLAB is available as later on, we don't know whether
280 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
281 * anyways
282 */
283 if (old_array != memblock_memory_init_regions &&
284 old_array != memblock_reserved_init_regions)
285 memblock_free(__pa(old_array), old_size);
286
287 return 0;
288 }
289
290 extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
291 phys_addr_t addr2, phys_addr_t size2)
292 {
293 return 1;
294 }
295
296 static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
297 {
298 unsigned long coalesced = 0;
299 long adjacent, i;
300
301 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
302 type->regions[0].base = base;
303 type->regions[0].size = size;
304 return 0;
305 }
306
307 /* First try and coalesce this MEMBLOCK with another. */
308 for (i = 0; i < type->cnt; i++) {
309 phys_addr_t rgnbase = type->regions[i].base;
310 phys_addr_t rgnsize = type->regions[i].size;
311
312 if ((rgnbase == base) && (rgnsize == size))
313 /* Already have this region, so we're done */
314 return 0;
315
316 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
317 /* Check if arch allows coalescing */
318 if (adjacent != 0 && type == &memblock.memory &&
319 !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
320 break;
321 if (adjacent > 0) {
322 type->regions[i].base -= size;
323 type->regions[i].size += size;
324 coalesced++;
325 break;
326 } else if (adjacent < 0) {
327 type->regions[i].size += size;
328 coalesced++;
329 break;
330 }
331 }
332
333 /* If we plugged a hole, we may want to also coalesce with the
334 * next region
335 */
336 if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
337 ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
338 type->regions[i].size,
339 type->regions[i+1].base,
340 type->regions[i+1].size)))) {
341 memblock_coalesce_regions(type, i, i+1);
342 coalesced++;
343 }
344
345 if (coalesced)
346 return coalesced;
347
348 /* If we are out of space, we fail. It's too late to resize the array
349 * but then this shouldn't have happened in the first place.
350 */
351 if (WARN_ON(type->cnt >= type->max))
352 return -1;
353
354 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
355 for (i = type->cnt - 1; i >= 0; i--) {
356 if (base < type->regions[i].base) {
357 type->regions[i+1].base = type->regions[i].base;
358 type->regions[i+1].size = type->regions[i].size;
359 } else {
360 type->regions[i+1].base = base;
361 type->regions[i+1].size = size;
362 break;
363 }
364 }
365
366 if (base < type->regions[0].base) {
367 type->regions[0].base = base;
368 type->regions[0].size = size;
369 }
370 type->cnt++;
371
372 /* The array is full ? Try to resize it. If that fails, we undo
373 * our allocation and return an error
374 */
375 if (type->cnt == type->max && memblock_double_array(type)) {
376 type->cnt--;
377 return -1;
378 }
379
380 return 0;
381 }
382
383 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
384 {
385 return memblock_add_region(&memblock.memory, base, size);
386
387 }
388
389 static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
390 {
391 phys_addr_t rgnbegin, rgnend;
392 phys_addr_t end = base + size;
393 int i;
394
395 rgnbegin = rgnend = 0; /* supress gcc warnings */
396
397 /* Find the region where (base, size) belongs to */
398 for (i=0; i < type->cnt; i++) {
399 rgnbegin = type->regions[i].base;
400 rgnend = rgnbegin + type->regions[i].size;
401
402 if ((rgnbegin <= base) && (end <= rgnend))
403 break;
404 }
405
406 /* Didn't find the region */
407 if (i == type->cnt)
408 return -1;
409
410 /* Check to see if we are removing entire region */
411 if ((rgnbegin == base) && (rgnend == end)) {
412 memblock_remove_region(type, i);
413 return 0;
414 }
415
416 /* Check to see if region is matching at the front */
417 if (rgnbegin == base) {
418 type->regions[i].base = end;
419 type->regions[i].size -= size;
420 return 0;
421 }
422
423 /* Check to see if the region is matching at the end */
424 if (rgnend == end) {
425 type->regions[i].size -= size;
426 return 0;
427 }
428
429 /*
430 * We need to split the entry - adjust the current one to the
431 * beginging of the hole and add the region after hole.
432 */
433 type->regions[i].size = base - type->regions[i].base;
434 return memblock_add_region(type, end, rgnend - end);
435 }
436
437 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
438 {
439 return __memblock_remove(&memblock.memory, base, size);
440 }
441
442 long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
443 {
444 return __memblock_remove(&memblock.reserved, base, size);
445 }
446
447 long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
448 {
449 struct memblock_type *_rgn = &memblock.reserved;
450
451 BUG_ON(0 == size);
452
453 return memblock_add_region(_rgn, base, size);
454 }
455
456 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
457 {
458 phys_addr_t found;
459
460 /* We align the size to limit fragmentation. Without this, a lot of
461 * small allocs quickly eat up the whole reserve array on sparc
462 */
463 size = memblock_align_up(size, align);
464
465 found = memblock_find_base(size, align, 0, max_addr);
466 if (found != MEMBLOCK_ERROR &&
467 memblock_add_region(&memblock.reserved, found, size) >= 0)
468 return found;
469
470 return 0;
471 }
472
473 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
474 {
475 phys_addr_t alloc;
476
477 alloc = __memblock_alloc_base(size, align, max_addr);
478
479 if (alloc == 0)
480 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
481 (unsigned long long) size, (unsigned long long) max_addr);
482
483 return alloc;
484 }
485
486 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
487 {
488 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
489 }
490
491
492 /*
493 * Additional node-local allocators. Search for node memory is bottom up
494 * and walks memblock regions within that node bottom-up as well, but allocation
495 * within an memblock region is top-down. XXX I plan to fix that at some stage
496 *
497 * WARNING: Only available after early_node_map[] has been populated,
498 * on some architectures, that is after all the calls to add_active_range()
499 * have been done to populate it.
500 */
501
502 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
503 {
504 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
505 /*
506 * This code originates from sparc which really wants use to walk by addresses
507 * and returns the nid. This is not very convenient for early_pfn_map[] users
508 * as the map isn't sorted yet, and it really wants to be walked by nid.
509 *
510 * For now, I implement the inefficient method below which walks the early
511 * map multiple times. Eventually we may want to use an ARCH config option
512 * to implement a completely different method for both case.
513 */
514 unsigned long start_pfn, end_pfn;
515 int i;
516
517 for (i = 0; i < MAX_NUMNODES; i++) {
518 get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
519 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
520 continue;
521 *nid = i;
522 return min(end, PFN_PHYS(end_pfn));
523 }
524 #endif
525 *nid = 0;
526
527 return end;
528 }
529
530 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
531 phys_addr_t size,
532 phys_addr_t align, int nid)
533 {
534 phys_addr_t start, end;
535
536 start = mp->base;
537 end = start + mp->size;
538
539 start = memblock_align_up(start, align);
540 while (start < end) {
541 phys_addr_t this_end;
542 int this_nid;
543
544 this_end = memblock_nid_range(start, end, &this_nid);
545 if (this_nid == nid) {
546 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
547 if (ret != MEMBLOCK_ERROR &&
548 memblock_add_region(&memblock.reserved, ret, size) >= 0)
549 return ret;
550 }
551 start = this_end;
552 }
553
554 return MEMBLOCK_ERROR;
555 }
556
557 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
558 {
559 struct memblock_type *mem = &memblock.memory;
560 int i;
561
562 BUG_ON(0 == size);
563
564 /* We align the size to limit fragmentation. Without this, a lot of
565 * small allocs quickly eat up the whole reserve array on sparc
566 */
567 size = memblock_align_up(size, align);
568
569 /* We do a bottom-up search for a region with the right
570 * nid since that's easier considering how memblock_nid_range()
571 * works
572 */
573 for (i = 0; i < mem->cnt; i++) {
574 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
575 size, align, nid);
576 if (ret != MEMBLOCK_ERROR)
577 return ret;
578 }
579
580 return 0;
581 }
582
583 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
584 {
585 phys_addr_t res = memblock_alloc_nid(size, align, nid);
586
587 if (res)
588 return res;
589 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
590 }
591
592
593 /*
594 * Remaining API functions
595 */
596
597 /* You must call memblock_analyze() before this. */
598 phys_addr_t __init memblock_phys_mem_size(void)
599 {
600 return memblock.memory_size;
601 }
602
603 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
604 {
605 int idx = memblock.memory.cnt - 1;
606
607 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
608 }
609
610 /* You must call memblock_analyze() after this. */
611 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
612 {
613 unsigned long i;
614 phys_addr_t limit;
615 struct memblock_region *p;
616
617 if (!memory_limit)
618 return;
619
620 /* Truncate the memblock regions to satisfy the memory limit. */
621 limit = memory_limit;
622 for (i = 0; i < memblock.memory.cnt; i++) {
623 if (limit > memblock.memory.regions[i].size) {
624 limit -= memblock.memory.regions[i].size;
625 continue;
626 }
627
628 memblock.memory.regions[i].size = limit;
629 memblock.memory.cnt = i + 1;
630 break;
631 }
632
633 memory_limit = memblock_end_of_DRAM();
634
635 /* And truncate any reserves above the limit also. */
636 for (i = 0; i < memblock.reserved.cnt; i++) {
637 p = &memblock.reserved.regions[i];
638
639 if (p->base > memory_limit)
640 p->size = 0;
641 else if ((p->base + p->size) > memory_limit)
642 p->size = memory_limit - p->base;
643
644 if (p->size == 0) {
645 memblock_remove_region(&memblock.reserved, i);
646 i--;
647 }
648 }
649 }
650
651 static int memblock_search(struct memblock_type *type, phys_addr_t addr)
652 {
653 unsigned int left = 0, right = type->cnt;
654
655 do {
656 unsigned int mid = (right + left) / 2;
657
658 if (addr < type->regions[mid].base)
659 right = mid;
660 else if (addr >= (type->regions[mid].base +
661 type->regions[mid].size))
662 left = mid + 1;
663 else
664 return mid;
665 } while (left < right);
666 return -1;
667 }
668
669 int __init memblock_is_reserved(phys_addr_t addr)
670 {
671 return memblock_search(&memblock.reserved, addr) != -1;
672 }
673
674 int __init_memblock memblock_is_memory(phys_addr_t addr)
675 {
676 return memblock_search(&memblock.memory, addr) != -1;
677 }
678
679 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
680 {
681 int idx = memblock_search(&memblock.reserved, base);
682
683 if (idx == -1)
684 return 0;
685 return memblock.reserved.regions[idx].base <= base &&
686 (memblock.reserved.regions[idx].base +
687 memblock.reserved.regions[idx].size) >= (base + size);
688 }
689
690 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
691 {
692 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
693 }
694
695
696 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
697 {
698 memblock.current_limit = limit;
699 }
700
701 static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
702 {
703 unsigned long long base, size;
704 int i;
705
706 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
707
708 for (i = 0; i < region->cnt; i++) {
709 base = region->regions[i].base;
710 size = region->regions[i].size;
711
712 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
713 name, i, base, base + size - 1, size);
714 }
715 }
716
717 void __init_memblock memblock_dump_all(void)
718 {
719 if (!memblock_debug)
720 return;
721
722 pr_info("MEMBLOCK configuration:\n");
723 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
724
725 memblock_dump(&memblock.memory, "memory");
726 memblock_dump(&memblock.reserved, "reserved");
727 }
728
729 void __init memblock_analyze(void)
730 {
731 int i;
732
733 /* Check marker in the unused last array entry */
734 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
735 != (phys_addr_t)RED_INACTIVE);
736 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
737 != (phys_addr_t)RED_INACTIVE);
738
739 memblock.memory_size = 0;
740
741 for (i = 0; i < memblock.memory.cnt; i++)
742 memblock.memory_size += memblock.memory.regions[i].size;
743
744 /* We allow resizing from there */
745 memblock_can_resize = 1;
746 }
747
748 void __init memblock_init(void)
749 {
750 /* Hookup the initial arrays */
751 memblock.memory.regions = memblock_memory_init_regions;
752 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
753 memblock.reserved.regions = memblock_reserved_init_regions;
754 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
755
756 /* Write a marker in the unused last array entry */
757 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
758 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
759
760 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
761 * This simplifies the memblock_add() code below...
762 */
763 memblock.memory.regions[0].base = 0;
764 memblock.memory.regions[0].size = 0;
765 memblock.memory.cnt = 1;
766
767 /* Ditto. */
768 memblock.reserved.regions[0].base = 0;
769 memblock.reserved.regions[0].size = 0;
770 memblock.reserved.cnt = 1;
771
772 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
773 }
774
775 static int __init early_memblock(char *p)
776 {
777 if (p && strstr(p, "debug"))
778 memblock_debug = 1;
779 return 0;
780 }
781 early_param("memblock", early_memblock);
782
783 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
784
785 static int memblock_debug_show(struct seq_file *m, void *private)
786 {
787 struct memblock_type *type = m->private;
788 struct memblock_region *reg;
789 int i;
790
791 for (i = 0; i < type->cnt; i++) {
792 reg = &type->regions[i];
793 seq_printf(m, "%4d: ", i);
794 if (sizeof(phys_addr_t) == 4)
795 seq_printf(m, "0x%08lx..0x%08lx\n",
796 (unsigned long)reg->base,
797 (unsigned long)(reg->base + reg->size - 1));
798 else
799 seq_printf(m, "0x%016llx..0x%016llx\n",
800 (unsigned long long)reg->base,
801 (unsigned long long)(reg->base + reg->size - 1));
802
803 }
804 return 0;
805 }
806
807 static int memblock_debug_open(struct inode *inode, struct file *file)
808 {
809 return single_open(file, memblock_debug_show, inode->i_private);
810 }
811
812 static const struct file_operations memblock_debug_fops = {
813 .open = memblock_debug_open,
814 .read = seq_read,
815 .llseek = seq_lseek,
816 .release = single_release,
817 };
818
819 static int __init memblock_init_debugfs(void)
820 {
821 struct dentry *root = debugfs_create_dir("memblock", NULL);
822 if (!root)
823 return -ENXIO;
824 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
825 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
826
827 return 0;
828 }
829 __initcall(memblock_init_debugfs);
830
831 #endif /* CONFIG_DEBUG_FS */