]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c1cc1552 CM |
2 | /* |
3 | * Based on arch/arm/mm/init.c | |
4 | * | |
5 | * Copyright (C) 1995-2005 Russell King | |
6 | * Copyright (C) 2012 ARM Ltd. | |
c1cc1552 CM |
7 | */ |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/export.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/init.h> | |
5a9e3e15 | 14 | #include <linux/cache.h> |
c1cc1552 CM |
15 | #include <linux/mman.h> |
16 | #include <linux/nodemask.h> | |
17 | #include <linux/initrd.h> | |
18 | #include <linux/gfp.h> | |
19 | #include <linux/memblock.h> | |
20 | #include <linux/sort.h> | |
764b51ea | 21 | #include <linux/of.h> |
c1cc1552 | 22 | #include <linux/of_fdt.h> |
19e7640d | 23 | #include <linux/dma-mapping.h> |
6ac2104d | 24 | #include <linux/dma-contiguous.h> |
86c8b27a | 25 | #include <linux/efi.h> |
a1e50a82 | 26 | #include <linux/swiotlb.h> |
dae8c235 | 27 | #include <linux/vmalloc.h> |
2077be67 | 28 | #include <linux/mm.h> |
764b51ea | 29 | #include <linux/kexec.h> |
e62aaeac | 30 | #include <linux/crash_dump.h> |
c1cc1552 | 31 | |
a7f8de16 | 32 | #include <asm/boot.h> |
08375198 | 33 | #include <asm/fixmap.h> |
f9040773 | 34 | #include <asm/kasan.h> |
a7f8de16 | 35 | #include <asm/kernel-pgtable.h> |
aa03c428 | 36 | #include <asm/memory.h> |
1a2db300 | 37 | #include <asm/numa.h> |
c1cc1552 CM |
38 | #include <asm/sections.h> |
39 | #include <asm/setup.h> | |
87dfb311 | 40 | #include <linux/sizes.h> |
c1cc1552 | 41 | #include <asm/tlb.h> |
e039ee4e | 42 | #include <asm/alternative.h> |
c1cc1552 | 43 | |
a7f8de16 AB |
44 | /* |
45 | * We need to be able to catch inadvertent references to memstart_addr | |
46 | * that occur (potentially in generic code) before arm64_memblock_init() | |
47 | * executes, which assigns it its actual value. So use a default value | |
48 | * that cannot be mistaken for a real physical address. | |
49 | */ | |
5a9e3e15 | 50 | s64 memstart_addr __ro_after_init = -1; |
03ef055f MR |
51 | EXPORT_SYMBOL(memstart_addr); |
52 | ||
5a9e3e15 | 53 | phys_addr_t arm64_dma_phys_limit __ro_after_init; |
c1cc1552 | 54 | |
764b51ea AT |
55 | #ifdef CONFIG_KEXEC_CORE |
56 | /* | |
57 | * reserve_crashkernel() - reserves memory for crash kernel | |
58 | * | |
59 | * This function reserves memory area given in "crashkernel=" kernel command | |
60 | * line parameter. The memory reserved is used by dump capture kernel when | |
61 | * primary kernel is crashing. | |
62 | */ | |
63 | static void __init reserve_crashkernel(void) | |
64 | { | |
65 | unsigned long long crash_base, crash_size; | |
66 | int ret; | |
67 | ||
68 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), | |
69 | &crash_size, &crash_base); | |
70 | /* no crashkernel= or invalid value specified */ | |
71 | if (ret || !crash_size) | |
72 | return; | |
73 | ||
74 | crash_size = PAGE_ALIGN(crash_size); | |
75 | ||
76 | if (crash_base == 0) { | |
77 | /* Current arm64 boot protocol requires 2MB alignment */ | |
78 | crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, | |
79 | crash_size, SZ_2M); | |
80 | if (crash_base == 0) { | |
81 | pr_warn("cannot allocate crashkernel (size:0x%llx)\n", | |
82 | crash_size); | |
83 | return; | |
84 | } | |
85 | } else { | |
86 | /* User specifies base address explicitly. */ | |
87 | if (!memblock_is_region_memory(crash_base, crash_size)) { | |
88 | pr_warn("cannot reserve crashkernel: region is not memory\n"); | |
89 | return; | |
90 | } | |
91 | ||
92 | if (memblock_is_region_reserved(crash_base, crash_size)) { | |
93 | pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); | |
94 | return; | |
95 | } | |
96 | ||
97 | if (!IS_ALIGNED(crash_base, SZ_2M)) { | |
98 | pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); | |
99 | return; | |
100 | } | |
101 | } | |
102 | memblock_reserve(crash_base, crash_size); | |
103 | ||
104 | pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", | |
105 | crash_base, crash_base + crash_size, crash_size >> 20); | |
106 | ||
107 | crashk_res.start = crash_base; | |
108 | crashk_res.end = crash_base + crash_size - 1; | |
109 | } | |
110 | #else | |
111 | static void __init reserve_crashkernel(void) | |
112 | { | |
113 | } | |
114 | #endif /* CONFIG_KEXEC_CORE */ | |
115 | ||
e62aaeac AT |
116 | #ifdef CONFIG_CRASH_DUMP |
117 | static int __init early_init_dt_scan_elfcorehdr(unsigned long node, | |
118 | const char *uname, int depth, void *data) | |
119 | { | |
120 | const __be32 *reg; | |
121 | int len; | |
122 | ||
123 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
124 | return 0; | |
125 | ||
126 | reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); | |
127 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
128 | return 1; | |
129 | ||
130 | elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); | |
131 | elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); | |
132 | ||
133 | return 1; | |
134 | } | |
135 | ||
136 | /* | |
137 | * reserve_elfcorehdr() - reserves memory for elf core header | |
138 | * | |
139 | * This function reserves the memory occupied by an elf core header | |
140 | * described in the device tree. This region contains all the | |
141 | * information about primary kernel's core image and is used by a dump | |
142 | * capture kernel to access the system memory on primary kernel. | |
143 | */ | |
144 | static void __init reserve_elfcorehdr(void) | |
145 | { | |
146 | of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); | |
147 | ||
148 | if (!elfcorehdr_size) | |
149 | return; | |
150 | ||
151 | if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { | |
152 | pr_warn("elfcorehdr is overlapped\n"); | |
153 | return; | |
154 | } | |
155 | ||
156 | memblock_reserve(elfcorehdr_addr, elfcorehdr_size); | |
157 | ||
158 | pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", | |
159 | elfcorehdr_size >> 10, elfcorehdr_addr); | |
160 | } | |
161 | #else | |
162 | static void __init reserve_elfcorehdr(void) | |
163 | { | |
164 | } | |
165 | #endif /* CONFIG_CRASH_DUMP */ | |
d50314a6 | 166 | /* |
ad67f5a6 | 167 | * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It |
d50314a6 CM |
168 | * currently assumes that for memory starting above 4G, 32-bit devices will |
169 | * use a DMA offset. | |
170 | */ | |
a7c61a34 | 171 | static phys_addr_t __init max_zone_dma_phys(void) |
d50314a6 CM |
172 | { |
173 | phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); | |
174 | return min(offset + (1ULL << 32), memblock_end_of_DRAM()); | |
175 | } | |
176 | ||
1a2db300 GK |
177 | #ifdef CONFIG_NUMA |
178 | ||
179 | static void __init zone_sizes_init(unsigned long min, unsigned long max) | |
180 | { | |
181 | unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; | |
182 | ||
ad67f5a6 CH |
183 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
184 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); | |
1a2db300 GK |
185 | max_zone_pfns[ZONE_NORMAL] = max; |
186 | ||
187 | free_area_init_nodes(max_zone_pfns); | |
188 | } | |
189 | ||
190 | #else | |
191 | ||
c1cc1552 CM |
192 | static void __init zone_sizes_init(unsigned long min, unsigned long max) |
193 | { | |
194 | struct memblock_region *reg; | |
195 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | |
19e7640d | 196 | unsigned long max_dma = min; |
c1cc1552 CM |
197 | |
198 | memset(zone_size, 0, sizeof(zone_size)); | |
199 | ||
c1cc1552 | 200 | /* 4GB maximum for 32-bit only capable devices */ |
ad67f5a6 | 201 | #ifdef CONFIG_ZONE_DMA32 |
86a5906e | 202 | max_dma = PFN_DOWN(arm64_dma_phys_limit); |
ad67f5a6 | 203 | zone_size[ZONE_DMA32] = max_dma - min; |
86a5906e | 204 | #endif |
19e7640d | 205 | zone_size[ZONE_NORMAL] = max - max_dma; |
c1cc1552 CM |
206 | |
207 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | |
208 | ||
209 | for_each_memblock(memory, reg) { | |
210 | unsigned long start = memblock_region_memory_base_pfn(reg); | |
211 | unsigned long end = memblock_region_memory_end_pfn(reg); | |
212 | ||
213 | if (start >= max) | |
214 | continue; | |
19e7640d | 215 | |
ad67f5a6 | 216 | #ifdef CONFIG_ZONE_DMA32 |
86a5906e | 217 | if (start < max_dma) { |
19e7640d | 218 | unsigned long dma_end = min(end, max_dma); |
ad67f5a6 | 219 | zhole_size[ZONE_DMA32] -= dma_end - start; |
c1cc1552 | 220 | } |
86a5906e | 221 | #endif |
19e7640d | 222 | if (end > max_dma) { |
c1cc1552 | 223 | unsigned long normal_end = min(end, max); |
19e7640d | 224 | unsigned long normal_start = max(start, max_dma); |
c1cc1552 CM |
225 | zhole_size[ZONE_NORMAL] -= normal_end - normal_start; |
226 | } | |
227 | } | |
228 | ||
229 | free_area_init_node(0, zone_size, min, zhole_size); | |
230 | } | |
231 | ||
1a2db300 GK |
232 | #endif /* CONFIG_NUMA */ |
233 | ||
c1cc1552 CM |
234 | int pfn_valid(unsigned long pfn) |
235 | { | |
5ad356ea GH |
236 | phys_addr_t addr = pfn << PAGE_SHIFT; |
237 | ||
238 | if ((addr >> PAGE_SHIFT) != pfn) | |
239 | return 0; | |
4ab21506 RM |
240 | |
241 | #ifdef CONFIG_SPARSEMEM | |
242 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
243 | return 0; | |
244 | ||
245 | if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) | |
246 | return 0; | |
247 | #endif | |
5ad356ea | 248 | return memblock_is_map_memory(addr); |
c1cc1552 CM |
249 | } |
250 | EXPORT_SYMBOL(pfn_valid); | |
c1cc1552 | 251 | |
d7dc899a | 252 | static phys_addr_t memory_limit = PHYS_ADDR_MAX; |
6083fe74 MR |
253 | |
254 | /* | |
255 | * Limit the memory size that was specified via FDT. | |
256 | */ | |
257 | static int __init early_mem(char *p) | |
258 | { | |
259 | if (!p) | |
260 | return 1; | |
261 | ||
262 | memory_limit = memparse(p, &p) & PAGE_MASK; | |
263 | pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); | |
264 | ||
265 | return 0; | |
266 | } | |
267 | early_param("mem", early_mem); | |
268 | ||
8f579b1c AT |
269 | static int __init early_init_dt_scan_usablemem(unsigned long node, |
270 | const char *uname, int depth, void *data) | |
271 | { | |
272 | struct memblock_region *usablemem = data; | |
273 | const __be32 *reg; | |
274 | int len; | |
275 | ||
276 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
277 | return 0; | |
278 | ||
279 | reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); | |
280 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
281 | return 1; | |
282 | ||
283 | usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); | |
284 | usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); | |
285 | ||
286 | return 1; | |
287 | } | |
288 | ||
289 | static void __init fdt_enforce_memory_region(void) | |
290 | { | |
291 | struct memblock_region reg = { | |
292 | .size = 0, | |
293 | }; | |
294 | ||
295 | of_scan_flat_dt(early_init_dt_scan_usablemem, ®); | |
296 | ||
297 | if (reg.size) | |
298 | memblock_cap_memory_range(reg.base, reg.size); | |
299 | } | |
300 | ||
c1cc1552 CM |
301 | void __init arm64_memblock_init(void) |
302 | { | |
a7f8de16 AB |
303 | const s64 linear_region_size = -(s64)PAGE_OFFSET; |
304 | ||
8f579b1c AT |
305 | /* Handle linux,usable-memory-range property */ |
306 | fdt_enforce_memory_region(); | |
307 | ||
e9eaa805 KM |
308 | /* Remove memory above our supported physical address size */ |
309 | memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); | |
310 | ||
6d2aa549 AB |
311 | /* |
312 | * Ensure that the linear region takes up exactly half of the kernel | |
313 | * virtual address space. This way, we can distinguish a linear address | |
314 | * from a kernel/module/vmalloc address by testing a single bit. | |
315 | */ | |
316 | BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); | |
317 | ||
a7f8de16 AB |
318 | /* |
319 | * Select a suitable value for the base of physical memory. | |
320 | */ | |
321 | memstart_addr = round_down(memblock_start_of_DRAM(), | |
322 | ARM64_MEMSTART_ALIGN); | |
323 | ||
324 | /* | |
325 | * Remove the memory that we will not be able to cover with the | |
326 | * linear mapping. Take care not to clip the kernel which may be | |
327 | * high in memory. | |
328 | */ | |
2077be67 LA |
329 | memblock_remove(max_t(u64, memstart_addr + linear_region_size, |
330 | __pa_symbol(_end)), ULLONG_MAX); | |
2958987f AB |
331 | if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { |
332 | /* ensure that memstart_addr remains sufficiently aligned */ | |
333 | memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, | |
334 | ARM64_MEMSTART_ALIGN); | |
335 | memblock_remove(0, memstart_addr); | |
336 | } | |
a7f8de16 AB |
337 | |
338 | /* | |
339 | * Apply the memory limit if it was set. Since the kernel may be loaded | |
340 | * high up in memory, add back the kernel region that must be accessible | |
341 | * via the linear mapping. | |
342 | */ | |
d7dc899a | 343 | if (memory_limit != PHYS_ADDR_MAX) { |
cb0a6502 | 344 | memblock_mem_limit_remove_map(memory_limit); |
2077be67 | 345 | memblock_add(__pa_symbol(_text), (u64)(_end - _text)); |
a7f8de16 | 346 | } |
6083fe74 | 347 | |
c756c592 | 348 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
177e15f0 AB |
349 | /* |
350 | * Add back the memory we just removed if it results in the | |
351 | * initrd to become inaccessible via the linear mapping. | |
352 | * Otherwise, this is a no-op | |
353 | */ | |
c756c592 | 354 | u64 base = phys_initrd_start & PAGE_MASK; |
d4d18e3e | 355 | u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; |
177e15f0 AB |
356 | |
357 | /* | |
358 | * We can only add back the initrd memory if we don't end up | |
359 | * with more memory than we can address via the linear mapping. | |
360 | * It is up to the bootloader to position the kernel and the | |
361 | * initrd reasonably close to each other (i.e., within 32 GB of | |
362 | * each other) so that all granule/#levels combinations can | |
363 | * always access both. | |
364 | */ | |
365 | if (WARN(base < memblock_start_of_DRAM() || | |
366 | base + size > memblock_start_of_DRAM() + | |
367 | linear_region_size, | |
368 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { | |
70b3d237 | 369 | phys_initrd_size = 0; |
177e15f0 AB |
370 | } else { |
371 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ | |
372 | memblock_add(base, size); | |
373 | memblock_reserve(base, size); | |
374 | } | |
375 | } | |
376 | ||
c031a421 AB |
377 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
378 | extern u16 memstart_offset_seed; | |
379 | u64 range = linear_region_size - | |
380 | (memblock_end_of_DRAM() - memblock_start_of_DRAM()); | |
381 | ||
382 | /* | |
383 | * If the size of the linear region exceeds, by a sufficient | |
384 | * margin, the size of the region that the available physical | |
385 | * memory spans, randomize the linear region as well. | |
386 | */ | |
387 | if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { | |
c8a43c18 | 388 | range /= ARM64_MEMSTART_ALIGN; |
c031a421 AB |
389 | memstart_addr -= ARM64_MEMSTART_ALIGN * |
390 | ((range * memstart_offset_seed) >> 16); | |
391 | } | |
392 | } | |
6083fe74 | 393 | |
bd00cd5f MR |
394 | /* |
395 | * Register the kernel text, kernel data, initrd, and initial | |
396 | * pagetables with memblock. | |
397 | */ | |
2077be67 | 398 | memblock_reserve(__pa_symbol(_text), _end - _text); |
c756c592 | 399 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
a89dea58 | 400 | /* the generic initrd code expects virtual addresses */ |
c756c592 FF |
401 | initrd_start = __phys_to_virt(phys_initrd_start); |
402 | initrd_end = initrd_start + phys_initrd_size; | |
a89dea58 | 403 | } |
c1cc1552 | 404 | |
0ceac9e0 | 405 | early_init_fdt_scan_reserved_mem(); |
2d5a5612 CM |
406 | |
407 | /* 4GB maximum for 32-bit only capable devices */ | |
ad67f5a6 | 408 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
a1e50a82 CM |
409 | arm64_dma_phys_limit = max_zone_dma_phys(); |
410 | else | |
411 | arm64_dma_phys_limit = PHYS_MASK + 1; | |
764b51ea AT |
412 | |
413 | reserve_crashkernel(); | |
414 | ||
e62aaeac AT |
415 | reserve_elfcorehdr(); |
416 | ||
f24e5834 SC |
417 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; |
418 | ||
a1e50a82 | 419 | dma_contiguous_reserve(arm64_dma_phys_limit); |
c1cc1552 CM |
420 | } |
421 | ||
422 | void __init bootmem_init(void) | |
423 | { | |
424 | unsigned long min, max; | |
425 | ||
426 | min = PFN_UP(memblock_start_of_DRAM()); | |
427 | max = PFN_DOWN(memblock_end_of_DRAM()); | |
428 | ||
36dd9086 VM |
429 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); |
430 | ||
1a2db300 | 431 | max_pfn = max_low_pfn = max; |
19d6242e | 432 | min_low_pfn = min; |
1a2db300 GK |
433 | |
434 | arm64_numa_init(); | |
c1cc1552 CM |
435 | /* |
436 | * Sparsemem tries to allocate bootmem in memory_present(), so must be | |
437 | * done after the fixed reservations. | |
438 | */ | |
a2c801c5 | 439 | memblocks_present(); |
c1cc1552 CM |
440 | |
441 | sparse_init(); | |
442 | zone_sizes_init(min, max); | |
443 | ||
1a2db300 | 444 | memblock_dump_all(); |
c1cc1552 CM |
445 | } |
446 | ||
c1cc1552 CM |
447 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
448 | static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |
449 | { | |
450 | struct page *start_pg, *end_pg; | |
451 | unsigned long pg, pgend; | |
452 | ||
453 | /* | |
454 | * Convert start_pfn/end_pfn to a struct page pointer. | |
455 | */ | |
456 | start_pg = pfn_to_page(start_pfn - 1) + 1; | |
457 | end_pg = pfn_to_page(end_pfn - 1) + 1; | |
458 | ||
459 | /* | |
460 | * Convert to physical addresses, and round start upwards and end | |
461 | * downwards. | |
462 | */ | |
463 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | |
464 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | |
465 | ||
466 | /* | |
467 | * If there are free pages between these, free the section of the | |
468 | * memmap array. | |
469 | */ | |
470 | if (pg < pgend) | |
2013288f | 471 | memblock_free(pg, pgend - pg); |
c1cc1552 CM |
472 | } |
473 | ||
474 | /* | |
475 | * The mem_map array can get very big. Free the unused area of the memory map. | |
476 | */ | |
477 | static void __init free_unused_memmap(void) | |
478 | { | |
479 | unsigned long start, prev_end = 0; | |
480 | struct memblock_region *reg; | |
481 | ||
482 | for_each_memblock(memory, reg) { | |
483 | start = __phys_to_pfn(reg->base); | |
484 | ||
485 | #ifdef CONFIG_SPARSEMEM | |
486 | /* | |
487 | * Take care not to free memmap entries that don't exist due | |
488 | * to SPARSEMEM sections which aren't present. | |
489 | */ | |
490 | start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); | |
491 | #endif | |
492 | /* | |
493 | * If we had a previous bank, and there is a space between the | |
494 | * current bank and the previous, free it. | |
495 | */ | |
496 | if (prev_end && prev_end < start) | |
497 | free_memmap(prev_end, start); | |
498 | ||
499 | /* | |
500 | * Align up here since the VM subsystem insists that the | |
501 | * memmap entries are valid from the bank end aligned to | |
502 | * MAX_ORDER_NR_PAGES. | |
503 | */ | |
b9bcc919 | 504 | prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), |
c1cc1552 CM |
505 | MAX_ORDER_NR_PAGES); |
506 | } | |
507 | ||
508 | #ifdef CONFIG_SPARSEMEM | |
509 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) | |
510 | free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); | |
511 | #endif | |
512 | } | |
513 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
514 | ||
515 | /* | |
516 | * mem_init() marks the free areas in the mem_map and tells us how much memory | |
517 | * is free. This is done after various parts of the system have claimed their | |
518 | * memory after the kernel image. | |
519 | */ | |
520 | void __init mem_init(void) | |
521 | { | |
ae7871be GU |
522 | if (swiotlb_force == SWIOTLB_FORCE || |
523 | max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) | |
b67a8b29 | 524 | swiotlb_init(1); |
524dabe1 AG |
525 | else |
526 | swiotlb_force = SWIOTLB_NO_FORCE; | |
a1e50a82 | 527 | |
344bf332 | 528 | set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); |
c1cc1552 CM |
529 | |
530 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
c1cc1552 CM |
531 | free_unused_memmap(); |
532 | #endif | |
bee4ebd1 | 533 | /* this will put all unused low memory onto the freelists */ |
c6ffc5ca | 534 | memblock_free_all(); |
c1cc1552 | 535 | |
6879ea83 | 536 | mem_init_print_info(NULL); |
c1cc1552 | 537 | |
c1cc1552 CM |
538 | /* |
539 | * Check boundaries twice: Some fundamental inconsistencies can be | |
540 | * detected at build time already. | |
541 | */ | |
542 | #ifdef CONFIG_COMPAT | |
363524d2 | 543 | BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); |
c1cc1552 | 544 | #endif |
c1cc1552 | 545 | |
bee4ebd1 | 546 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
c1cc1552 CM |
547 | extern int sysctl_overcommit_memory; |
548 | /* | |
549 | * On a machine this small we won't get anywhere without | |
550 | * overcommit, so turn it on by default. | |
551 | */ | |
552 | sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | |
553 | } | |
554 | } | |
555 | ||
556 | void free_initmem(void) | |
557 | { | |
2077be67 LA |
558 | free_reserved_area(lm_alias(__init_begin), |
559 | lm_alias(__init_end), | |
d386825c | 560 | 0, "unused kernel"); |
dae8c235 KW |
561 | /* |
562 | * Unmap the __init region but leave the VM area in place. This | |
563 | * prevents the region from being reused for kernel modules, which | |
564 | * is not supported by kallsyms. | |
565 | */ | |
566 | unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); | |
c1cc1552 CM |
567 | } |
568 | ||
569 | #ifdef CONFIG_BLK_DEV_INITRD | |
662ba3db | 570 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
c1cc1552 | 571 | { |
d8ae8a37 CH |
572 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
573 | memblock_free(__virt_to_phys(start), end - start); | |
c1cc1552 | 574 | } |
c1cc1552 | 575 | #endif |
a7f8de16 AB |
576 | |
577 | /* | |
578 | * Dump out memory limit information on panic. | |
579 | */ | |
580 | static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) | |
581 | { | |
d7dc899a | 582 | if (memory_limit != PHYS_ADDR_MAX) { |
a7f8de16 AB |
583 | pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); |
584 | } else { | |
585 | pr_emerg("Memory Limit: none\n"); | |
586 | } | |
587 | return 0; | |
588 | } | |
589 | ||
590 | static struct notifier_block mem_limit_notifier = { | |
591 | .notifier_call = dump_mem_limit, | |
592 | }; | |
593 | ||
594 | static int __init register_mem_limit_dumper(void) | |
595 | { | |
596 | atomic_notifier_chain_register(&panic_notifier_list, | |
597 | &mem_limit_notifier); | |
598 | return 0; | |
599 | } | |
600 | __initcall(register_mem_limit_dumper); |