]>
Commit | Line | Data |
---|---|---|
c1cc1552 CM |
1 | /* |
2 | * Based on arch/arm/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995-2005 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/kernel.h> | |
21 | #include <linux/export.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/swap.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/bootmem.h> | |
5a9e3e15 | 26 | #include <linux/cache.h> |
c1cc1552 CM |
27 | #include <linux/mman.h> |
28 | #include <linux/nodemask.h> | |
29 | #include <linux/initrd.h> | |
30 | #include <linux/gfp.h> | |
31 | #include <linux/memblock.h> | |
32 | #include <linux/sort.h> | |
a0ffb4d1 | 33 | #include <linux/of.h> |
c1cc1552 | 34 | #include <linux/of_fdt.h> |
19e7640d | 35 | #include <linux/dma-mapping.h> |
6ac2104d | 36 | #include <linux/dma-contiguous.h> |
86c8b27a | 37 | #include <linux/efi.h> |
a1e50a82 | 38 | #include <linux/swiotlb.h> |
dae8c235 | 39 | #include <linux/vmalloc.h> |
a0ffb4d1 | 40 | #include <linux/kexec.h> |
c1cc1552 | 41 | |
a7f8de16 | 42 | #include <asm/boot.h> |
08375198 | 43 | #include <asm/fixmap.h> |
f9040773 | 44 | #include <asm/kasan.h> |
a7f8de16 | 45 | #include <asm/kernel-pgtable.h> |
aa03c428 | 46 | #include <asm/memory.h> |
1a2db300 | 47 | #include <asm/numa.h> |
c1cc1552 CM |
48 | #include <asm/sections.h> |
49 | #include <asm/setup.h> | |
50 | #include <asm/sizes.h> | |
51 | #include <asm/tlb.h> | |
e039ee4e | 52 | #include <asm/alternative.h> |
c1cc1552 | 53 | |
a7f8de16 AB |
54 | /* |
55 | * We need to be able to catch inadvertent references to memstart_addr | |
56 | * that occur (potentially in generic code) before arm64_memblock_init() | |
57 | * executes, which assigns it its actual value. So use a default value | |
58 | * that cannot be mistaken for a real physical address. | |
59 | */ | |
5a9e3e15 JZ |
60 | s64 memstart_addr __ro_after_init = -1; |
61 | phys_addr_t arm64_dma_phys_limit __ro_after_init; | |
c1cc1552 | 62 | |
ec2eaa73 | 63 | #ifdef CONFIG_BLK_DEV_INITRD |
c1cc1552 CM |
64 | static int __init early_initrd(char *p) |
65 | { | |
66 | unsigned long start, size; | |
67 | char *endp; | |
68 | ||
69 | start = memparse(p, &endp); | |
70 | if (*endp == ',') { | |
71 | size = memparse(endp + 1, NULL); | |
72 | ||
a89dea58 AB |
73 | initrd_start = start; |
74 | initrd_end = start + size; | |
c1cc1552 CM |
75 | } |
76 | return 0; | |
77 | } | |
78 | early_param("initrd", early_initrd); | |
ec2eaa73 | 79 | #endif |
c1cc1552 | 80 | |
a0ffb4d1 AT |
81 | #ifdef CONFIG_KEXEC_CORE |
82 | /* | |
83 | * reserve_crashkernel() - reserves memory for crash kernel | |
84 | * | |
85 | * This function reserves memory area given in "crashkernel=" kernel command | |
86 | * line parameter. The memory reserved is used by dump capture kernel when | |
87 | * primary kernel is crashing. | |
88 | */ | |
89 | static void __init reserve_crashkernel(void) | |
90 | { | |
91 | unsigned long long crash_base, crash_size; | |
92 | int ret; | |
93 | ||
94 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), | |
95 | &crash_size, &crash_base); | |
96 | /* no crashkernel= or invalid value specified */ | |
97 | if (ret || !crash_size) | |
98 | return; | |
99 | ||
100 | crash_size = PAGE_ALIGN(crash_size); | |
101 | ||
102 | if (crash_base == 0) { | |
103 | /* Current arm64 boot protocol requires 2MB alignment */ | |
104 | crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, | |
105 | crash_size, SZ_2M); | |
106 | if (crash_base == 0) { | |
107 | pr_warn("cannot allocate crashkernel (size:0x%llx)\n", | |
108 | crash_size); | |
109 | return; | |
110 | } | |
111 | } else { | |
112 | /* User specifies base address explicitly. */ | |
113 | if (!memblock_is_region_memory(crash_base, crash_size)) { | |
114 | pr_warn("cannot reserve crashkernel: region is not memory\n"); | |
115 | return; | |
116 | } | |
117 | ||
118 | if (memblock_is_region_reserved(crash_base, crash_size)) { | |
119 | pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); | |
120 | return; | |
121 | } | |
122 | ||
123 | if (!IS_ALIGNED(crash_base, SZ_2M)) { | |
124 | pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); | |
125 | return; | |
126 | } | |
127 | } | |
128 | memblock_reserve(crash_base, crash_size); | |
129 | ||
130 | pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", | |
131 | crash_base, crash_base + crash_size, crash_size >> 20); | |
132 | ||
133 | crashk_res.start = crash_base; | |
134 | crashk_res.end = crash_base + crash_size - 1; | |
135 | } | |
a01804cb AT |
136 | |
137 | static void __init kexec_reserve_crashkres_pages(void) | |
138 | { | |
139 | #ifdef CONFIG_HIBERNATION | |
140 | phys_addr_t addr; | |
141 | struct page *page; | |
142 | ||
143 | if (!crashk_res.end) | |
144 | return; | |
145 | ||
146 | /* | |
147 | * To reduce the size of hibernation image, all the pages are | |
148 | * marked as Reserved initially. | |
149 | */ | |
150 | for (addr = crashk_res.start; addr < (crashk_res.end + 1); | |
151 | addr += PAGE_SIZE) { | |
152 | page = phys_to_page(addr); | |
153 | SetPageReserved(page); | |
154 | } | |
155 | #endif | |
156 | } | |
a0ffb4d1 AT |
157 | #else |
158 | static void __init reserve_crashkernel(void) | |
159 | { | |
160 | } | |
a01804cb AT |
161 | |
162 | static void __init kexec_reserve_crashkres_pages(void) | |
163 | { | |
164 | } | |
a0ffb4d1 AT |
165 | #endif /* CONFIG_KEXEC_CORE */ |
166 | ||
d50314a6 CM |
167 | /* |
168 | * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It | |
169 | * currently assumes that for memory starting above 4G, 32-bit devices will | |
170 | * use a DMA offset. | |
171 | */ | |
a7c61a34 | 172 | static phys_addr_t __init max_zone_dma_phys(void) |
d50314a6 CM |
173 | { |
174 | phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); | |
175 | return min(offset + (1ULL << 32), memblock_end_of_DRAM()); | |
176 | } | |
177 | ||
1a2db300 GK |
178 | #ifdef CONFIG_NUMA |
179 | ||
180 | static void __init zone_sizes_init(unsigned long min, unsigned long max) | |
181 | { | |
182 | unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; | |
183 | ||
184 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | |
185 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); | |
186 | max_zone_pfns[ZONE_NORMAL] = max; | |
187 | ||
188 | free_area_init_nodes(max_zone_pfns); | |
189 | } | |
190 | ||
191 | #else | |
192 | ||
c1cc1552 CM |
193 | static void __init zone_sizes_init(unsigned long min, unsigned long max) |
194 | { | |
195 | struct memblock_region *reg; | |
196 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | |
19e7640d | 197 | unsigned long max_dma = min; |
c1cc1552 CM |
198 | |
199 | memset(zone_size, 0, sizeof(zone_size)); | |
200 | ||
c1cc1552 | 201 | /* 4GB maximum for 32-bit only capable devices */ |
86a5906e RM |
202 | #ifdef CONFIG_ZONE_DMA |
203 | max_dma = PFN_DOWN(arm64_dma_phys_limit); | |
204 | zone_size[ZONE_DMA] = max_dma - min; | |
205 | #endif | |
19e7640d | 206 | zone_size[ZONE_NORMAL] = max - max_dma; |
c1cc1552 CM |
207 | |
208 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | |
209 | ||
210 | for_each_memblock(memory, reg) { | |
211 | unsigned long start = memblock_region_memory_base_pfn(reg); | |
212 | unsigned long end = memblock_region_memory_end_pfn(reg); | |
213 | ||
214 | if (start >= max) | |
215 | continue; | |
19e7640d | 216 | |
86a5906e RM |
217 | #ifdef CONFIG_ZONE_DMA |
218 | if (start < max_dma) { | |
19e7640d CM |
219 | unsigned long dma_end = min(end, max_dma); |
220 | zhole_size[ZONE_DMA] -= dma_end - start; | |
c1cc1552 | 221 | } |
86a5906e | 222 | #endif |
19e7640d | 223 | if (end > max_dma) { |
c1cc1552 | 224 | unsigned long normal_end = min(end, max); |
19e7640d | 225 | unsigned long normal_start = max(start, max_dma); |
c1cc1552 CM |
226 | zhole_size[ZONE_NORMAL] -= normal_end - normal_start; |
227 | } | |
228 | } | |
229 | ||
230 | free_area_init_node(0, zone_size, min, zhole_size); | |
231 | } | |
232 | ||
1a2db300 GK |
233 | #endif /* CONFIG_NUMA */ |
234 | ||
c1cc1552 CM |
235 | #ifdef CONFIG_HAVE_ARCH_PFN_VALID |
236 | int pfn_valid(unsigned long pfn) | |
237 | { | |
68709f45 | 238 | return memblock_is_map_memory(pfn << PAGE_SHIFT); |
c1cc1552 CM |
239 | } |
240 | EXPORT_SYMBOL(pfn_valid); | |
241 | #endif | |
242 | ||
243 | #ifndef CONFIG_SPARSEMEM | |
a7c61a34 | 244 | static void __init arm64_memory_present(void) |
c1cc1552 CM |
245 | { |
246 | } | |
247 | #else | |
a7c61a34 | 248 | static void __init arm64_memory_present(void) |
c1cc1552 CM |
249 | { |
250 | struct memblock_region *reg; | |
251 | ||
1a2db300 | 252 | for_each_memblock(memory, reg) { |
ea2cbee3 MR |
253 | int nid = memblock_get_region_node(reg); |
254 | ||
1a2db300 GK |
255 | memory_present(nid, memblock_region_memory_base_pfn(reg), |
256 | memblock_region_memory_end_pfn(reg)); | |
257 | } | |
c1cc1552 CM |
258 | } |
259 | #endif | |
260 | ||
6083fe74 MR |
261 | static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; |
262 | ||
263 | /* | |
264 | * Limit the memory size that was specified via FDT. | |
265 | */ | |
266 | static int __init early_mem(char *p) | |
267 | { | |
268 | if (!p) | |
269 | return 1; | |
270 | ||
271 | memory_limit = memparse(p, &p) & PAGE_MASK; | |
272 | pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); | |
273 | ||
274 | return 0; | |
275 | } | |
276 | early_param("mem", early_mem); | |
277 | ||
6384eca9 AT |
278 | static int __init early_init_dt_scan_usablemem(unsigned long node, |
279 | const char *uname, int depth, void *data) | |
280 | { | |
281 | struct memblock_region *usablemem = data; | |
282 | const __be32 *reg; | |
283 | int len; | |
284 | ||
285 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
286 | return 0; | |
287 | ||
288 | reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); | |
289 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
290 | return 1; | |
291 | ||
292 | usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); | |
293 | usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); | |
294 | ||
295 | return 1; | |
296 | } | |
297 | ||
298 | static void __init fdt_enforce_memory_region(void) | |
299 | { | |
300 | struct memblock_region reg = { | |
301 | .size = 0, | |
302 | }; | |
303 | ||
304 | of_scan_flat_dt(early_init_dt_scan_usablemem, ®); | |
305 | ||
306 | if (reg.size) | |
307 | memblock_cap_memory_range(reg.base, reg.size); | |
308 | } | |
309 | ||
c1cc1552 CM |
310 | void __init arm64_memblock_init(void) |
311 | { | |
a7f8de16 AB |
312 | const s64 linear_region_size = -(s64)PAGE_OFFSET; |
313 | ||
6384eca9 AT |
314 | /* Handle linux,usable-memory-range property */ |
315 | fdt_enforce_memory_region(); | |
316 | ||
6d2aa549 AB |
317 | /* |
318 | * Ensure that the linear region takes up exactly half of the kernel | |
319 | * virtual address space. This way, we can distinguish a linear address | |
320 | * from a kernel/module/vmalloc address by testing a single bit. | |
321 | */ | |
322 | BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); | |
323 | ||
a7f8de16 AB |
324 | /* |
325 | * Select a suitable value for the base of physical memory. | |
326 | */ | |
327 | memstart_addr = round_down(memblock_start_of_DRAM(), | |
328 | ARM64_MEMSTART_ALIGN); | |
329 | ||
330 | /* | |
331 | * Remove the memory that we will not be able to cover with the | |
332 | * linear mapping. Take care not to clip the kernel which may be | |
333 | * high in memory. | |
334 | */ | |
020d044f | 335 | memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)), |
a7f8de16 | 336 | ULLONG_MAX); |
2958987f AB |
337 | if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { |
338 | /* ensure that memstart_addr remains sufficiently aligned */ | |
339 | memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, | |
340 | ARM64_MEMSTART_ALIGN); | |
341 | memblock_remove(0, memstart_addr); | |
342 | } | |
a7f8de16 AB |
343 | |
344 | /* | |
345 | * Apply the memory limit if it was set. Since the kernel may be loaded | |
346 | * high up in memory, add back the kernel region that must be accessible | |
347 | * via the linear mapping. | |
348 | */ | |
349 | if (memory_limit != (phys_addr_t)ULLONG_MAX) { | |
cb0a6502 | 350 | memblock_mem_limit_remove_map(memory_limit); |
a7f8de16 AB |
351 | memblock_add(__pa(_text), (u64)(_end - _text)); |
352 | } | |
6083fe74 | 353 | |
177e15f0 AB |
354 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) { |
355 | /* | |
356 | * Add back the memory we just removed if it results in the | |
357 | * initrd to become inaccessible via the linear mapping. | |
358 | * Otherwise, this is a no-op | |
359 | */ | |
360 | u64 base = initrd_start & PAGE_MASK; | |
361 | u64 size = PAGE_ALIGN(initrd_end) - base; | |
362 | ||
363 | /* | |
364 | * We can only add back the initrd memory if we don't end up | |
365 | * with more memory than we can address via the linear mapping. | |
366 | * It is up to the bootloader to position the kernel and the | |
367 | * initrd reasonably close to each other (i.e., within 32 GB of | |
368 | * each other) so that all granule/#levels combinations can | |
369 | * always access both. | |
370 | */ | |
371 | if (WARN(base < memblock_start_of_DRAM() || | |
372 | base + size > memblock_start_of_DRAM() + | |
373 | linear_region_size, | |
374 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { | |
375 | initrd_start = 0; | |
376 | } else { | |
377 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ | |
378 | memblock_add(base, size); | |
379 | memblock_reserve(base, size); | |
380 | } | |
381 | } | |
382 | ||
c031a421 AB |
383 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
384 | extern u16 memstart_offset_seed; | |
385 | u64 range = linear_region_size - | |
386 | (memblock_end_of_DRAM() - memblock_start_of_DRAM()); | |
387 | ||
388 | /* | |
389 | * If the size of the linear region exceeds, by a sufficient | |
390 | * margin, the size of the region that the available physical | |
391 | * memory spans, randomize the linear region as well. | |
392 | */ | |
393 | if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { | |
394 | range = range / ARM64_MEMSTART_ALIGN + 1; | |
395 | memstart_addr -= ARM64_MEMSTART_ALIGN * | |
396 | ((range * memstart_offset_seed) >> 16); | |
397 | } | |
398 | } | |
6083fe74 | 399 | |
bd00cd5f MR |
400 | /* |
401 | * Register the kernel text, kernel data, initrd, and initial | |
402 | * pagetables with memblock. | |
403 | */ | |
c1cc1552 CM |
404 | memblock_reserve(__pa(_text), _end - _text); |
405 | #ifdef CONFIG_BLK_DEV_INITRD | |
a89dea58 AB |
406 | if (initrd_start) { |
407 | memblock_reserve(initrd_start, initrd_end - initrd_start); | |
408 | ||
409 | /* the generic initrd code expects virtual addresses */ | |
410 | initrd_start = __phys_to_virt(initrd_start); | |
411 | initrd_end = __phys_to_virt(initrd_end); | |
412 | } | |
c1cc1552 CM |
413 | #endif |
414 | ||
0ceac9e0 | 415 | early_init_fdt_scan_reserved_mem(); |
2d5a5612 CM |
416 | |
417 | /* 4GB maximum for 32-bit only capable devices */ | |
418 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | |
a1e50a82 CM |
419 | arm64_dma_phys_limit = max_zone_dma_phys(); |
420 | else | |
421 | arm64_dma_phys_limit = PHYS_MASK + 1; | |
a0ffb4d1 AT |
422 | |
423 | reserve_crashkernel(); | |
424 | ||
a1e50a82 | 425 | dma_contiguous_reserve(arm64_dma_phys_limit); |
6ac2104d | 426 | |
c1cc1552 | 427 | memblock_allow_resize(); |
c1cc1552 CM |
428 | } |
429 | ||
430 | void __init bootmem_init(void) | |
431 | { | |
432 | unsigned long min, max; | |
433 | ||
434 | min = PFN_UP(memblock_start_of_DRAM()); | |
435 | max = PFN_DOWN(memblock_end_of_DRAM()); | |
436 | ||
36dd9086 VM |
437 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); |
438 | ||
1a2db300 GK |
439 | max_pfn = max_low_pfn = max; |
440 | ||
441 | arm64_numa_init(); | |
c1cc1552 CM |
442 | /* |
443 | * Sparsemem tries to allocate bootmem in memory_present(), so must be | |
444 | * done after the fixed reservations. | |
445 | */ | |
446 | arm64_memory_present(); | |
447 | ||
448 | sparse_init(); | |
449 | zone_sizes_init(min, max); | |
450 | ||
451 | high_memory = __va((max << PAGE_SHIFT) - 1) + 1; | |
1a2db300 | 452 | memblock_dump_all(); |
c1cc1552 CM |
453 | } |
454 | ||
c1cc1552 CM |
455 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
456 | static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |
457 | { | |
458 | struct page *start_pg, *end_pg; | |
459 | unsigned long pg, pgend; | |
460 | ||
461 | /* | |
462 | * Convert start_pfn/end_pfn to a struct page pointer. | |
463 | */ | |
464 | start_pg = pfn_to_page(start_pfn - 1) + 1; | |
465 | end_pg = pfn_to_page(end_pfn - 1) + 1; | |
466 | ||
467 | /* | |
468 | * Convert to physical addresses, and round start upwards and end | |
469 | * downwards. | |
470 | */ | |
471 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | |
472 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | |
473 | ||
474 | /* | |
475 | * If there are free pages between these, free the section of the | |
476 | * memmap array. | |
477 | */ | |
478 | if (pg < pgend) | |
479 | free_bootmem(pg, pgend - pg); | |
480 | } | |
481 | ||
482 | /* | |
483 | * The mem_map array can get very big. Free the unused area of the memory map. | |
484 | */ | |
485 | static void __init free_unused_memmap(void) | |
486 | { | |
487 | unsigned long start, prev_end = 0; | |
488 | struct memblock_region *reg; | |
489 | ||
490 | for_each_memblock(memory, reg) { | |
491 | start = __phys_to_pfn(reg->base); | |
492 | ||
493 | #ifdef CONFIG_SPARSEMEM | |
494 | /* | |
495 | * Take care not to free memmap entries that don't exist due | |
496 | * to SPARSEMEM sections which aren't present. | |
497 | */ | |
498 | start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); | |
499 | #endif | |
500 | /* | |
501 | * If we had a previous bank, and there is a space between the | |
502 | * current bank and the previous, free it. | |
503 | */ | |
504 | if (prev_end && prev_end < start) | |
505 | free_memmap(prev_end, start); | |
506 | ||
507 | /* | |
508 | * Align up here since the VM subsystem insists that the | |
509 | * memmap entries are valid from the bank end aligned to | |
510 | * MAX_ORDER_NR_PAGES. | |
511 | */ | |
b9bcc919 | 512 | prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), |
c1cc1552 CM |
513 | MAX_ORDER_NR_PAGES); |
514 | } | |
515 | ||
516 | #ifdef CONFIG_SPARSEMEM | |
517 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) | |
518 | free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); | |
519 | #endif | |
520 | } | |
521 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
522 | ||
523 | /* | |
524 | * mem_init() marks the free areas in the mem_map and tells us how much memory | |
525 | * is free. This is done after various parts of the system have claimed their | |
526 | * memory after the kernel image. | |
527 | */ | |
528 | void __init mem_init(void) | |
529 | { | |
ae7871be GU |
530 | if (swiotlb_force == SWIOTLB_FORCE || |
531 | max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) | |
b67a8b29 | 532 | swiotlb_init(1); |
524dabe1 AG |
533 | else |
534 | swiotlb_force = SWIOTLB_NO_FORCE; | |
a1e50a82 | 535 | |
a6583c7c | 536 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
c1cc1552 CM |
537 | |
538 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
c1cc1552 CM |
539 | free_unused_memmap(); |
540 | #endif | |
bee4ebd1 | 541 | /* this will put all unused low memory onto the freelists */ |
0c988534 | 542 | free_all_bootmem(); |
c1cc1552 | 543 | |
a01804cb AT |
544 | kexec_reserve_crashkres_pages(); |
545 | ||
6879ea83 | 546 | mem_init_print_info(NULL); |
c1cc1552 CM |
547 | |
548 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | |
549 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | |
08375198 | 550 | #define MLG(b, t) b, t, ((t) - (b)) >> 30 |
c1cc1552 CM |
551 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) |
552 | ||
f09f1bac | 553 | pr_notice("Virtual kernel memory layout:\n"); |
ee7f881b | 554 | #ifdef CONFIG_KASAN |
f7881bd6 | 555 | pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", |
f09f1bac | 556 | MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); |
ee7f881b | 557 | #endif |
f7881bd6 | 558 | pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", |
f09f1bac | 559 | MLM(MODULES_VADDR, MODULES_END)); |
f7881bd6 | 560 | pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", |
f09f1bac | 561 | MLG(VMALLOC_START, VMALLOC_END)); |
f7881bd6 | 562 | pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", |
9fdc14c5 | 563 | MLK_ROUNDUP(_text, _etext)); |
f7881bd6 | 564 | pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", |
9fdc14c5 | 565 | MLK_ROUNDUP(__start_rodata, __init_begin)); |
f7881bd6 | 566 | pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", |
d32351c8 | 567 | MLK_ROUNDUP(__init_begin, __init_end)); |
f7881bd6 | 568 | pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", |
f09f1bac | 569 | MLK_ROUNDUP(_sdata, _edata)); |
f7881bd6 | 570 | pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", |
9974723e | 571 | MLK_ROUNDUP(__bss_start, __bss_stop)); |
f7881bd6 | 572 | pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", |
3e1907d5 | 573 | MLK(FIXADDR_START, FIXADDR_TOP)); |
f7881bd6 | 574 | pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", |
3e1907d5 | 575 | MLM(PCI_IO_START, PCI_IO_END)); |
c1cc1552 | 576 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
f7881bd6 | 577 | pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", |
d32351c8 | 578 | MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); |
f7881bd6 | 579 | pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", |
f09f1bac CM |
580 | MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), |
581 | (unsigned long)virt_to_page(high_memory))); | |
c1cc1552 | 582 | #endif |
f7881bd6 | 583 | pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", |
f09f1bac CM |
584 | MLM(__phys_to_virt(memblock_start_of_DRAM()), |
585 | (unsigned long)high_memory)); | |
c1cc1552 CM |
586 | |
587 | #undef MLK | |
588 | #undef MLM | |
589 | #undef MLK_ROUNDUP | |
590 | ||
591 | /* | |
592 | * Check boundaries twice: Some fundamental inconsistencies can be | |
593 | * detected at build time already. | |
594 | */ | |
595 | #ifdef CONFIG_COMPAT | |
596 | BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); | |
597 | #endif | |
c1cc1552 | 598 | |
3e1907d5 AB |
599 | /* |
600 | * Make sure we chose the upper bound of sizeof(struct page) | |
601 | * correctly. | |
602 | */ | |
603 | BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); | |
604 | ||
bee4ebd1 | 605 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
c1cc1552 CM |
606 | extern int sysctl_overcommit_memory; |
607 | /* | |
608 | * On a machine this small we won't get anywhere without | |
609 | * overcommit, so turn it on by default. | |
610 | */ | |
611 | sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | |
612 | } | |
613 | } | |
614 | ||
615 | void free_initmem(void) | |
616 | { | |
d386825c AB |
617 | free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)), |
618 | 0, "unused kernel"); | |
dae8c235 KW |
619 | /* |
620 | * Unmap the __init region but leave the VM area in place. This | |
621 | * prevents the region from being reused for kernel modules, which | |
622 | * is not supported by kallsyms. | |
623 | */ | |
624 | unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); | |
c1cc1552 CM |
625 | } |
626 | ||
627 | #ifdef CONFIG_BLK_DEV_INITRD | |
628 | ||
662ba3db | 629 | static int keep_initrd __initdata; |
c1cc1552 | 630 | |
662ba3db | 631 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
c1cc1552 | 632 | { |
0145058c | 633 | if (!keep_initrd) |
9af5b807 | 634 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
c1cc1552 CM |
635 | } |
636 | ||
637 | static int __init keepinitrd_setup(char *__unused) | |
638 | { | |
639 | keep_initrd = 1; | |
640 | return 1; | |
641 | } | |
642 | ||
643 | __setup("keepinitrd", keepinitrd_setup); | |
644 | #endif | |
a7f8de16 AB |
645 | |
646 | /* | |
647 | * Dump out memory limit information on panic. | |
648 | */ | |
649 | static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) | |
650 | { | |
651 | if (memory_limit != (phys_addr_t)ULLONG_MAX) { | |
652 | pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); | |
653 | } else { | |
654 | pr_emerg("Memory Limit: none\n"); | |
655 | } | |
656 | return 0; | |
657 | } | |
658 | ||
659 | static struct notifier_block mem_limit_notifier = { | |
660 | .notifier_call = dump_mem_limit, | |
661 | }; | |
662 | ||
663 | static int __init register_mem_limit_dumper(void) | |
664 | { | |
665 | atomic_notifier_chain_register(&panic_notifier_list, | |
666 | &mem_limit_notifier); | |
667 | return 0; | |
668 | } | |
669 | __initcall(register_mem_limit_dumper); |