2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23 #include <linux/dma-contiguous.h>
24 #include <linux/sizes.h>
26 #include <asm/mach-types.h>
27 #include <asm/memblock.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
32 #include <asm/fixmap.h>
34 #include <asm/mach/arch.h>
35 #include <asm/mach/map.h>
39 static phys_addr_t phys_initrd_start __initdata
= 0;
40 static unsigned long phys_initrd_size __initdata
= 0;
42 static int __init
early_initrd(char *p
)
48 start
= memparse(p
, &endp
);
50 size
= memparse(endp
+ 1, NULL
);
52 phys_initrd_start
= start
;
53 phys_initrd_size
= size
;
57 early_param("initrd", early_initrd
);
59 static int __init
parse_tag_initrd(const struct tag
*tag
)
61 printk(KERN_WARNING
"ATAG_INITRD is deprecated; "
62 "please update your bootloader.\n");
63 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
64 phys_initrd_size
= tag
->u
.initrd
.size
;
68 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
70 static int __init
parse_tag_initrd2(const struct tag
*tag
)
72 phys_initrd_start
= tag
->u
.initrd
.start
;
73 phys_initrd_size
= tag
->u
.initrd
.size
;
77 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
80 * This keeps memory configuration data used by a couple memory
81 * initialization functions, as well as show_mem() for the skipping
82 * of holes in the memory map. It is populated by arm_add_memory().
84 struct meminfo meminfo
;
86 void show_mem(unsigned int filter
)
88 int free
= 0, total
= 0, reserved
= 0;
89 int shared
= 0, cached
= 0, slab
= 0, i
;
90 struct meminfo
* mi
= &meminfo
;
92 printk("Mem-info:\n");
93 show_free_areas(filter
);
95 for_each_bank (i
, mi
) {
96 struct membank
*bank
= &mi
->bank
[i
];
97 unsigned int pfn1
, pfn2
;
98 struct page
*page
, *end
;
100 pfn1
= bank_pfn_start(bank
);
101 pfn2
= bank_pfn_end(bank
);
103 page
= pfn_to_page(pfn1
);
104 end
= pfn_to_page(pfn2
- 1) + 1;
108 if (PageReserved(page
))
110 else if (PageSwapCache(page
))
112 else if (PageSlab(page
))
114 else if (!page_count(page
))
117 shared
+= page_count(page
) - 1;
119 } while (page
< end
);
122 printk("%d pages of RAM\n", total
);
123 printk("%d free pages\n", free
);
124 printk("%d reserved pages\n", reserved
);
125 printk("%d slab pages\n", slab
);
126 printk("%d pages shared\n", shared
);
127 printk("%d pages swap cached\n", cached
);
130 static void __init
find_limits(unsigned long *min
, unsigned long *max_low
,
131 unsigned long *max_high
)
133 struct meminfo
*mi
= &meminfo
;
136 /* This assumes the meminfo array is properly sorted */
137 *min
= bank_pfn_start(&mi
->bank
[0]);
138 for_each_bank (i
, mi
)
139 if (mi
->bank
[i
].highmem
)
141 *max_low
= bank_pfn_end(&mi
->bank
[i
- 1]);
142 *max_high
= bank_pfn_end(&mi
->bank
[mi
->nr_banks
- 1]);
145 #ifdef CONFIG_ZONE_DMA
147 phys_addr_t arm_dma_zone_size __read_mostly
;
148 EXPORT_SYMBOL(arm_dma_zone_size
);
151 * The DMA mask corresponding to the maximum bus address allocatable
152 * using GFP_DMA. The default here places no restriction on DMA
153 * allocations. This must be the smallest DMA mask in the system,
154 * so a successful GFP_DMA allocation will always satisfy this.
156 phys_addr_t arm_dma_limit
;
157 unsigned long arm_dma_pfn_limit
;
159 static void __init
arm_adjust_dma_zone(unsigned long *size
, unsigned long *hole
,
160 unsigned long dma_size
)
162 if (size
[0] <= dma_size
)
165 size
[ZONE_NORMAL
] = size
[0] - dma_size
;
166 size
[ZONE_DMA
] = dma_size
;
167 hole
[ZONE_NORMAL
] = hole
[0];
172 void __init
setup_dma_zone(const struct machine_desc
*mdesc
)
174 #ifdef CONFIG_ZONE_DMA
175 if (mdesc
->dma_zone_size
) {
176 arm_dma_zone_size
= mdesc
->dma_zone_size
;
177 arm_dma_limit
= PHYS_OFFSET
+ arm_dma_zone_size
- 1;
179 arm_dma_limit
= 0xffffffff;
180 arm_dma_pfn_limit
= arm_dma_limit
>> PAGE_SHIFT
;
184 static void __init
zone_sizes_init(unsigned long min
, unsigned long max_low
,
185 unsigned long max_high
)
187 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
188 struct memblock_region
*reg
;
191 * initialise the zones.
193 memset(zone_size
, 0, sizeof(zone_size
));
196 * The memory size has already been determined. If we need
197 * to do anything fancy with the allocation of this memory
198 * to the zones, now is the time to do it.
200 zone_size
[0] = max_low
- min
;
201 #ifdef CONFIG_HIGHMEM
202 zone_size
[ZONE_HIGHMEM
] = max_high
- max_low
;
206 * Calculate the size of the holes.
207 * holes = node_size - sum(bank_sizes)
209 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
210 for_each_memblock(memory
, reg
) {
211 unsigned long start
= memblock_region_memory_base_pfn(reg
);
212 unsigned long end
= memblock_region_memory_end_pfn(reg
);
214 if (start
< max_low
) {
215 unsigned long low_end
= min(end
, max_low
);
216 zhole_size
[0] -= low_end
- start
;
218 #ifdef CONFIG_HIGHMEM
220 unsigned long high_start
= max(start
, max_low
);
221 zhole_size
[ZONE_HIGHMEM
] -= end
- high_start
;
226 #ifdef CONFIG_ZONE_DMA
228 * Adjust the sizes according to any special requirements for
231 if (arm_dma_zone_size
)
232 arm_adjust_dma_zone(zone_size
, zhole_size
,
233 arm_dma_zone_size
>> PAGE_SHIFT
);
236 free_area_init_node(0, zone_size
, min
, zhole_size
);
239 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
240 int pfn_valid(unsigned long pfn
)
242 return memblock_is_memory(__pfn_to_phys(pfn
));
244 EXPORT_SYMBOL(pfn_valid
);
247 #ifndef CONFIG_SPARSEMEM
248 static void __init
arm_memory_present(void)
252 static void __init
arm_memory_present(void)
254 struct memblock_region
*reg
;
256 for_each_memblock(memory
, reg
)
257 memory_present(0, memblock_region_memory_base_pfn(reg
),
258 memblock_region_memory_end_pfn(reg
));
262 static bool arm_memblock_steal_permitted
= true;
264 phys_addr_t __init
arm_memblock_steal(phys_addr_t size
, phys_addr_t align
)
268 BUG_ON(!arm_memblock_steal_permitted
);
270 phys
= memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ANYWHERE
);
271 memblock_free(phys
, size
);
272 memblock_remove(phys
, size
);
277 void __init
arm_memblock_init(struct meminfo
*mi
,
278 const struct machine_desc
*mdesc
)
282 for (i
= 0; i
< mi
->nr_banks
; i
++)
283 memblock_add(mi
->bank
[i
].start
, mi
->bank
[i
].size
);
285 /* Register the kernel text, kernel data and initrd with memblock. */
286 #ifdef CONFIG_XIP_KERNEL
287 memblock_reserve(__pa(_sdata
), _end
- _sdata
);
289 memblock_reserve(__pa(_stext
), _end
- _stext
);
291 #ifdef CONFIG_BLK_DEV_INITRD
292 /* FDT scan will populate initrd_start */
293 if (initrd_start
&& !phys_initrd_size
) {
294 phys_initrd_start
= __virt_to_phys(initrd_start
);
295 phys_initrd_size
= initrd_end
- initrd_start
;
297 initrd_start
= initrd_end
= 0;
298 if (phys_initrd_size
&&
299 !memblock_is_region_memory(phys_initrd_start
, phys_initrd_size
)) {
300 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
301 (u64
)phys_initrd_start
, phys_initrd_size
);
302 phys_initrd_start
= phys_initrd_size
= 0;
304 if (phys_initrd_size
&&
305 memblock_is_region_reserved(phys_initrd_start
, phys_initrd_size
)) {
306 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
307 (u64
)phys_initrd_start
, phys_initrd_size
);
308 phys_initrd_start
= phys_initrd_size
= 0;
310 if (phys_initrd_size
) {
311 memblock_reserve(phys_initrd_start
, phys_initrd_size
);
313 /* Now convert initrd to virtual addresses */
314 initrd_start
= __phys_to_virt(phys_initrd_start
);
315 initrd_end
= initrd_start
+ phys_initrd_size
;
319 arm_mm_memblock_reserve();
320 arm_dt_memblock_reserve();
322 /* reserve any platform specific memblock areas */
326 early_init_fdt_scan_reserved_mem();
329 * reserve memory for DMA contigouos allocations,
330 * must come from DMA area inside low memory
332 dma_contiguous_reserve(min(arm_dma_limit
, arm_lowmem_limit
));
334 arm_memblock_steal_permitted
= false;
338 void __init
bootmem_init(void)
340 unsigned long min
, max_low
, max_high
;
342 memblock_allow_resize();
343 max_low
= max_high
= 0;
345 find_limits(&min
, &max_low
, &max_high
);
348 * Sparsemem tries to allocate bootmem in memory_present(),
349 * so must be done after the fixed reservations
351 arm_memory_present();
354 * sparse_init() needs the bootmem allocator up and running.
359 * Now free the memory - free_area_init_node needs
360 * the sparse mem_map arrays initialized by sparse_init()
361 * for memmap_init_zone(), otherwise all PFNs are invalid.
363 zone_sizes_init(min
, max_low
, max_high
);
366 * This doesn't seem to be used by the Linux memory manager any
367 * more, but is used by ll_rw_block. If we can get rid of it, we
368 * also get rid of some of the stuff above as well.
371 max_low_pfn
= max_low
;
376 * Poison init memory with an undefined instruction (ARM) or a branch to an
377 * undefined instruction (Thumb).
379 static inline void poison_init_mem(void *s
, size_t count
)
382 for (; count
!= 0; count
-= 4)
387 free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
389 struct page
*start_pg
, *end_pg
;
390 phys_addr_t pg
, pgend
;
393 * Convert start_pfn/end_pfn to a struct page pointer.
395 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
396 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
399 * Convert to physical addresses, and
400 * round start upwards and end downwards.
402 pg
= PAGE_ALIGN(__pa(start_pg
));
403 pgend
= __pa(end_pg
) & PAGE_MASK
;
406 * If there are free pages between these,
407 * free the section of the memmap array.
410 memblock_free_early(pg
, pgend
- pg
);
414 * The mem_map array can get very big. Free the unused area of the memory map.
416 static void __init
free_unused_memmap(struct meminfo
*mi
)
418 unsigned long bank_start
, prev_bank_end
= 0;
422 * This relies on each bank being in address order.
423 * The banks are sorted previously in bootmem_init().
425 for_each_bank(i
, mi
) {
426 struct membank
*bank
= &mi
->bank
[i
];
428 bank_start
= bank_pfn_start(bank
);
430 #ifdef CONFIG_SPARSEMEM
432 * Take care not to free memmap entries that don't exist
433 * due to SPARSEMEM sections which aren't present.
435 bank_start
= min(bank_start
,
436 ALIGN(prev_bank_end
, PAGES_PER_SECTION
));
439 * Align down here since the VM subsystem insists that the
440 * memmap entries are valid from the bank start aligned to
441 * MAX_ORDER_NR_PAGES.
443 bank_start
= round_down(bank_start
, MAX_ORDER_NR_PAGES
);
446 * If we had a previous bank, and there is a space
447 * between the current bank and the previous, free it.
449 if (prev_bank_end
&& prev_bank_end
< bank_start
)
450 free_memmap(prev_bank_end
, bank_start
);
453 * Align up here since the VM subsystem insists that the
454 * memmap entries are valid from the bank end aligned to
455 * MAX_ORDER_NR_PAGES.
457 prev_bank_end
= ALIGN(bank_pfn_end(bank
), MAX_ORDER_NR_PAGES
);
460 #ifdef CONFIG_SPARSEMEM
461 if (!IS_ALIGNED(prev_bank_end
, PAGES_PER_SECTION
))
462 free_memmap(prev_bank_end
,
463 ALIGN(prev_bank_end
, PAGES_PER_SECTION
));
467 #ifdef CONFIG_HIGHMEM
468 static inline void free_area_high(unsigned long pfn
, unsigned long end
)
470 for (; pfn
< end
; pfn
++)
471 free_highmem_page(pfn_to_page(pfn
));
475 static void __init
free_highpages(void)
477 #ifdef CONFIG_HIGHMEM
478 unsigned long max_low
= max_low_pfn
;
479 struct memblock_region
*mem
, *res
;
481 /* set highmem page free */
482 for_each_memblock(memory
, mem
) {
483 unsigned long start
= memblock_region_memory_base_pfn(mem
);
484 unsigned long end
= memblock_region_memory_end_pfn(mem
);
486 /* Ignore complete lowmem entries */
490 /* Truncate partial highmem entries */
494 /* Find and exclude any reserved regions */
495 for_each_memblock(reserved
, res
) {
496 unsigned long res_start
, res_end
;
498 res_start
= memblock_region_reserved_base_pfn(res
);
499 res_end
= memblock_region_reserved_end_pfn(res
);
503 if (res_start
< start
)
509 if (res_start
!= start
)
510 free_area_high(start
, res_start
);
516 /* And now free anything which remains */
518 free_area_high(start
, end
);
524 * mem_init() marks the free areas in the mem_map and tells us how much
525 * memory is free. This is done after various parts of the system have
526 * claimed their memory after the kernel image.
528 void __init
mem_init(void)
530 #ifdef CONFIG_HAVE_TCM
531 /* These pointers are filled in on TCM detection */
536 set_max_mapnr(pfn_to_page(max_pfn
) - mem_map
);
538 /* this will put all unused low memory onto the freelists */
539 free_unused_memmap(&meminfo
);
543 /* now that our DMA memory is actually so designated, we can free it */
544 free_reserved_area(__va(PHYS_OFFSET
), swapper_pg_dir
, -1, NULL
);
549 mem_init_print_info(NULL
);
551 #define MLK(b, t) b, t, ((t) - (b)) >> 10
552 #define MLM(b, t) b, t, ((t) - (b)) >> 20
553 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
555 printk(KERN_NOTICE
"Virtual kernel memory layout:\n"
556 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
557 #ifdef CONFIG_HAVE_TCM
558 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
559 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
561 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
562 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
563 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
564 #ifdef CONFIG_HIGHMEM
565 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
567 #ifdef CONFIG_MODULES
568 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
570 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
571 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
572 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
573 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
575 MLK(UL(CONFIG_VECTORS_BASE
), UL(CONFIG_VECTORS_BASE
) +
577 #ifdef CONFIG_HAVE_TCM
578 MLK(DTCM_OFFSET
, (unsigned long) dtcm_end
),
579 MLK(ITCM_OFFSET
, (unsigned long) itcm_end
),
581 MLK(FIXADDR_START
, FIXADDR_TOP
),
582 MLM(VMALLOC_START
, VMALLOC_END
),
583 MLM(PAGE_OFFSET
, (unsigned long)high_memory
),
584 #ifdef CONFIG_HIGHMEM
585 MLM(PKMAP_BASE
, (PKMAP_BASE
) + (LAST_PKMAP
) *
588 #ifdef CONFIG_MODULES
589 MLM(MODULES_VADDR
, MODULES_END
),
592 MLK_ROUNDUP(_text
, _etext
),
593 MLK_ROUNDUP(__init_begin
, __init_end
),
594 MLK_ROUNDUP(_sdata
, _edata
),
595 MLK_ROUNDUP(__bss_start
, __bss_stop
));
602 * Check boundaries twice: Some fundamental inconsistencies can
603 * be detected at build time already.
606 BUILD_BUG_ON(TASK_SIZE
> MODULES_VADDR
);
607 BUG_ON(TASK_SIZE
> MODULES_VADDR
);
610 #ifdef CONFIG_HIGHMEM
611 BUILD_BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
612 BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
615 if (PAGE_SIZE
>= 16384 && get_num_physpages() <= 128) {
616 extern int sysctl_overcommit_memory
;
618 * On a machine this small we won't get
619 * anywhere without overcommit, so turn
622 sysctl_overcommit_memory
= OVERCOMMIT_ALWAYS
;
626 void free_initmem(void)
628 #ifdef CONFIG_HAVE_TCM
629 extern char __tcm_start
, __tcm_end
;
631 poison_init_mem(&__tcm_start
, &__tcm_end
- &__tcm_start
);
632 free_reserved_area(&__tcm_start
, &__tcm_end
, -1, "TCM link");
635 poison_init_mem(__init_begin
, __init_end
- __init_begin
);
636 if (!machine_is_integrator() && !machine_is_cintegrator())
637 free_initmem_default(-1);
640 #ifdef CONFIG_BLK_DEV_INITRD
642 static int keep_initrd
;
644 void free_initrd_mem(unsigned long start
, unsigned long end
)
647 poison_init_mem((void *)start
, PAGE_ALIGN(end
) - start
);
648 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");
652 static int __init
keepinitrd_setup(char *__unused
)
658 __setup("keepinitrd", keepinitrd_setup
);