]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
a2531293 | 5 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 LT |
22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | |
a9ce6bc1 | 24 | #include <linux/memblock.h> |
1da177e4 | 25 | #include <linux/proc_fs.h> |
59170891 | 26 | #include <linux/pci.h> |
6fb14755 | 27 | #include <linux/pfn.h> |
c9cf5528 | 28 | #include <linux/poison.h> |
17a941d8 | 29 | #include <linux/dma-mapping.h> |
a63fdc51 | 30 | #include <linux/memory.h> |
44df75e6 | 31 | #include <linux/memory_hotplug.h> |
4b94ffdc | 32 | #include <linux/memremap.h> |
ae32b129 | 33 | #include <linux/nmi.h> |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
2f96b8c1 | 35 | #include <linux/kcore.h> |
1da177e4 LT |
36 | |
37 | #include <asm/processor.h> | |
46eaa670 | 38 | #include <asm/bios_ebda.h> |
7c0f6ba6 | 39 | #include <linux/uaccess.h> |
1da177e4 LT |
40 | #include <asm/pgtable.h> |
41 | #include <asm/pgalloc.h> | |
42 | #include <asm/dma.h> | |
43 | #include <asm/fixmap.h> | |
66441bd3 | 44 | #include <asm/e820/api.h> |
1da177e4 LT |
45 | #include <asm/apic.h> |
46 | #include <asm/tlb.h> | |
47 | #include <asm/mmu_context.h> | |
48 | #include <asm/proto.h> | |
49 | #include <asm/smp.h> | |
2bc0414e | 50 | #include <asm/sections.h> |
718fc13b | 51 | #include <asm/kdebug.h> |
aaa64e04 | 52 | #include <asm/numa.h> |
7bfeab9a | 53 | #include <asm/cacheflush.h> |
4fcb2083 | 54 | #include <asm/init.h> |
43c75f93 | 55 | #include <asm/uv/uv.h> |
e5f15b45 | 56 | #include <asm/setup.h> |
1da177e4 | 57 | |
5c51bdbe YL |
58 | #include "mm_internal.h" |
59 | ||
cf4fb15b | 60 | #include "ident_map.c" |
aece2785 | 61 | |
1da177e4 LT |
62 | /* |
63 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
64 | * physical space so we can cache the place of the first one and move | |
65 | * around without checking the pgd every time. | |
66 | */ | |
67 | ||
f955371c | 68 | pteval_t __supported_pte_mask __read_mostly = ~0; |
bd220a24 YL |
69 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
70 | ||
bd220a24 YL |
71 | int force_personality32; |
72 | ||
deed05b7 IM |
73 | /* |
74 | * noexec32=on|off | |
75 | * Control non executable heap for 32bit processes. | |
76 | * To control the stack too use noexec=off | |
77 | * | |
78 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | |
79 | * off PROT_READ implies PROT_EXEC | |
80 | */ | |
bd220a24 YL |
81 | static int __init nonx32_setup(char *str) |
82 | { | |
83 | if (!strcmp(str, "on")) | |
84 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
85 | else if (!strcmp(str, "off")) | |
86 | force_personality32 |= READ_IMPLIES_EXEC; | |
87 | return 1; | |
88 | } | |
89 | __setup("noexec32=", nonx32_setup); | |
90 | ||
6afb5157 | 91 | /* |
5372e155 | 92 | * When memory was added make sure all the processes MM have |
6afb5157 HL |
93 | * suitable PGD entries in the local PGD level page. |
94 | */ | |
5372e155 | 95 | void sync_global_pgds(unsigned long start, unsigned long end) |
6afb5157 | 96 | { |
fc5f9d5f | 97 | unsigned long addr; |
44235dcd | 98 | |
fc5f9d5f BH |
99 | for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { |
100 | pgd_t *pgd_ref = pgd_offset_k(addr); | |
f2a6a705 | 101 | const p4d_t *p4d_ref; |
44235dcd JF |
102 | struct page *page; |
103 | ||
f2a6a705 KS |
104 | /* |
105 | * With folded p4d, pgd_none() is always false, we need to | |
106 | * handle synchonization on p4d level. | |
107 | */ | |
108 | BUILD_BUG_ON(pgd_none(*pgd_ref)); | |
fc5f9d5f | 109 | p4d_ref = p4d_offset(pgd_ref, addr); |
f2a6a705 KS |
110 | |
111 | if (p4d_none(*p4d_ref)) | |
44235dcd JF |
112 | continue; |
113 | ||
a79e53d8 | 114 | spin_lock(&pgd_lock); |
44235dcd | 115 | list_for_each_entry(page, &pgd_list, lru) { |
be354f40 | 116 | pgd_t *pgd; |
f2a6a705 | 117 | p4d_t *p4d; |
617d34d9 JF |
118 | spinlock_t *pgt_lock; |
119 | ||
fc5f9d5f BH |
120 | pgd = (pgd_t *)page_address(page) + pgd_index(addr); |
121 | p4d = p4d_offset(pgd, addr); | |
a79e53d8 | 122 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
123 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
124 | spin_lock(pgt_lock); | |
125 | ||
f2a6a705 KS |
126 | if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) |
127 | BUG_ON(p4d_page_vaddr(*p4d) | |
128 | != p4d_page_vaddr(*p4d_ref)); | |
617d34d9 | 129 | |
f2a6a705 KS |
130 | if (p4d_none(*p4d)) |
131 | set_p4d(p4d, *p4d_ref); | |
9661d5bc | 132 | |
617d34d9 | 133 | spin_unlock(pgt_lock); |
44235dcd | 134 | } |
a79e53d8 | 135 | spin_unlock(&pgd_lock); |
44235dcd | 136 | } |
6afb5157 HL |
137 | } |
138 | ||
8d6ea967 MS |
139 | /* |
140 | * NOTE: This function is marked __ref because it calls __init function | |
141 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
142 | */ | |
143 | static __ref void *spp_getpage(void) | |
14a62c34 | 144 | { |
1da177e4 | 145 | void *ptr; |
14a62c34 | 146 | |
1da177e4 | 147 | if (after_bootmem) |
9e730237 | 148 | ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); |
1da177e4 LT |
149 | else |
150 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
151 | |
152 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
153 | panic("set_pte_phys: cannot allocate page data %s\n", | |
154 | after_bootmem ? "after bootmem" : ""); | |
155 | } | |
1da177e4 | 156 | |
10f22dde | 157 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 158 | |
1da177e4 | 159 | return ptr; |
14a62c34 | 160 | } |
1da177e4 | 161 | |
f2a6a705 | 162 | static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) |
1da177e4 | 163 | { |
458a3e64 | 164 | if (pgd_none(*pgd)) { |
f2a6a705 KS |
165 | p4d_t *p4d = (p4d_t *)spp_getpage(); |
166 | pgd_populate(&init_mm, pgd, p4d); | |
167 | if (p4d != p4d_offset(pgd, 0)) | |
458a3e64 | 168 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", |
f2a6a705 KS |
169 | p4d, p4d_offset(pgd, 0)); |
170 | } | |
171 | return p4d_offset(pgd, vaddr); | |
172 | } | |
173 | ||
174 | static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) | |
175 | { | |
176 | if (p4d_none(*p4d)) { | |
177 | pud_t *pud = (pud_t *)spp_getpage(); | |
178 | p4d_populate(&init_mm, p4d, pud); | |
179 | if (pud != pud_offset(p4d, 0)) | |
180 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", | |
181 | pud, pud_offset(p4d, 0)); | |
458a3e64 | 182 | } |
f2a6a705 | 183 | return pud_offset(p4d, vaddr); |
458a3e64 | 184 | } |
1da177e4 | 185 | |
f254f390 | 186 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
458a3e64 | 187 | { |
1da177e4 | 188 | if (pud_none(*pud)) { |
458a3e64 | 189 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 190 | pud_populate(&init_mm, pud, pmd); |
458a3e64 | 191 | if (pmd != pmd_offset(pud, 0)) |
f2a6a705 | 192 | printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", |
458a3e64 | 193 | pmd, pmd_offset(pud, 0)); |
1da177e4 | 194 | } |
458a3e64 TH |
195 | return pmd_offset(pud, vaddr); |
196 | } | |
197 | ||
f254f390 | 198 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
458a3e64 | 199 | { |
1da177e4 | 200 | if (pmd_none(*pmd)) { |
458a3e64 | 201 | pte_t *pte = (pte_t *) spp_getpage(); |
bb23e403 | 202 | pmd_populate_kernel(&init_mm, pmd, pte); |
458a3e64 | 203 | if (pte != pte_offset_kernel(pmd, 0)) |
f2a6a705 | 204 | printk(KERN_ERR "PAGETABLE BUG #03!\n"); |
1da177e4 | 205 | } |
458a3e64 TH |
206 | return pte_offset_kernel(pmd, vaddr); |
207 | } | |
208 | ||
f2a6a705 | 209 | static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) |
458a3e64 | 210 | { |
f2a6a705 KS |
211 | pmd_t *pmd = fill_pmd(pud, vaddr); |
212 | pte_t *pte = fill_pte(pmd, vaddr); | |
1da177e4 | 213 | |
1da177e4 LT |
214 | set_pte(pte, new_pte); |
215 | ||
216 | /* | |
217 | * It's enough to flush this one mapping. | |
218 | * (PGE mappings get flushed as well) | |
219 | */ | |
220 | __flush_tlb_one(vaddr); | |
221 | } | |
222 | ||
f2a6a705 KS |
223 | void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) |
224 | { | |
225 | p4d_t *p4d = p4d_page + p4d_index(vaddr); | |
226 | pud_t *pud = fill_pud(p4d, vaddr); | |
227 | ||
228 | __set_pte_vaddr(pud, vaddr, new_pte); | |
229 | } | |
230 | ||
231 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |
232 | { | |
233 | pud_t *pud = pud_page + pud_index(vaddr); | |
234 | ||
235 | __set_pte_vaddr(pud, vaddr, new_pte); | |
236 | } | |
237 | ||
458a3e64 | 238 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
0814e0ba EH |
239 | { |
240 | pgd_t *pgd; | |
f2a6a705 | 241 | p4d_t *p4d_page; |
0814e0ba EH |
242 | |
243 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
244 | ||
245 | pgd = pgd_offset_k(vaddr); | |
246 | if (pgd_none(*pgd)) { | |
247 | printk(KERN_ERR | |
248 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
249 | return; | |
250 | } | |
f2a6a705 KS |
251 | |
252 | p4d_page = p4d_offset(pgd, 0); | |
253 | set_pte_vaddr_p4d(p4d_page, vaddr, pteval); | |
0814e0ba EH |
254 | } |
255 | ||
458a3e64 | 256 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
257 | { |
258 | pgd_t *pgd; | |
f2a6a705 | 259 | p4d_t *p4d; |
11124411 TH |
260 | pud_t *pud; |
261 | ||
262 | pgd = pgd_offset_k(vaddr); | |
f2a6a705 KS |
263 | p4d = fill_p4d(pgd, vaddr); |
264 | pud = fill_pud(p4d, vaddr); | |
458a3e64 TH |
265 | return fill_pmd(pud, vaddr); |
266 | } | |
267 | ||
268 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
269 | { | |
270 | pmd_t *pmd; | |
11124411 | 271 | |
458a3e64 TH |
272 | pmd = populate_extra_pmd(vaddr); |
273 | return fill_pte(pmd, vaddr); | |
11124411 TH |
274 | } |
275 | ||
3a9e189d JS |
276 | /* |
277 | * Create large page table mappings for a range of physical addresses. | |
278 | */ | |
279 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
2df58b6d | 280 | enum page_cache_mode cache) |
3a9e189d JS |
281 | { |
282 | pgd_t *pgd; | |
f2a6a705 | 283 | p4d_t *p4d; |
3a9e189d JS |
284 | pud_t *pud; |
285 | pmd_t *pmd; | |
2df58b6d | 286 | pgprot_t prot; |
3a9e189d | 287 | |
2df58b6d JG |
288 | pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | |
289 | pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); | |
3a9e189d JS |
290 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); |
291 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
292 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
293 | if (pgd_none(*pgd)) { | |
f2a6a705 KS |
294 | p4d = (p4d_t *) spp_getpage(); |
295 | set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | | |
296 | _PAGE_USER)); | |
297 | } | |
298 | p4d = p4d_offset(pgd, (unsigned long)__va(phys)); | |
299 | if (p4d_none(*p4d)) { | |
3a9e189d | 300 | pud = (pud_t *) spp_getpage(); |
f2a6a705 | 301 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | |
3a9e189d JS |
302 | _PAGE_USER)); |
303 | } | |
f2a6a705 | 304 | pud = pud_offset(p4d, (unsigned long)__va(phys)); |
3a9e189d JS |
305 | if (pud_none(*pud)) { |
306 | pmd = (pmd_t *) spp_getpage(); | |
307 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
308 | _PAGE_USER)); | |
309 | } | |
310 | pmd = pmd_offset(pud, phys); | |
311 | BUG_ON(!pmd_none(*pmd)); | |
312 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
313 | } | |
314 | } | |
315 | ||
316 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
317 | { | |
2df58b6d | 318 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); |
3a9e189d JS |
319 | } |
320 | ||
321 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
322 | { | |
2df58b6d | 323 | __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); |
3a9e189d JS |
324 | } |
325 | ||
31eedd82 | 326 | /* |
88f3aec7 IM |
327 | * The head.S code sets up the kernel high mapping: |
328 | * | |
329 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 | 330 | * |
1e3b3081 | 331 | * phys_base holds the negative offset to the kernel, which is added |
31eedd82 TG |
332 | * to the compile time generated pmds. This results in invalid pmds up |
333 | * to the point where we hit the physaddr 0 mapping. | |
334 | * | |
e5f15b45 YL |
335 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
336 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | |
31eedd82 TG |
337 | * well, as they are located before _text: |
338 | */ | |
339 | void __init cleanup_highmap(void) | |
340 | { | |
341 | unsigned long vaddr = __START_KERNEL_map; | |
10054230 | 342 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
e5f15b45 | 343 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
31eedd82 | 344 | pmd_t *pmd = level2_kernel_pgt; |
31eedd82 | 345 | |
10054230 YL |
346 | /* |
347 | * Native path, max_pfn_mapped is not set yet. | |
348 | * Xen has valid max_pfn_mapped set in | |
349 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | |
350 | */ | |
351 | if (max_pfn_mapped) | |
352 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | |
353 | ||
e5f15b45 | 354 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
2884f110 | 355 | if (pmd_none(*pmd)) |
31eedd82 TG |
356 | continue; |
357 | if (vaddr < (unsigned long) _text || vaddr > end) | |
358 | set_pmd(pmd, __pmd(0)); | |
359 | } | |
360 | } | |
361 | ||
59b3d020 TG |
362 | /* |
363 | * Create PTE level page table mapping for physical addresses. | |
364 | * It returns the last physical address mapped. | |
365 | */ | |
7b16eb89 | 366 | static unsigned long __meminit |
59b3d020 | 367 | phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, |
b27a43c1 | 368 | pgprot_t prot) |
4f9c11dd | 369 | { |
59b3d020 TG |
370 | unsigned long pages = 0, paddr_next; |
371 | unsigned long paddr_last = paddr_end; | |
372 | pte_t *pte; | |
4f9c11dd | 373 | int i; |
7b16eb89 | 374 | |
59b3d020 TG |
375 | pte = pte_page + pte_index(paddr); |
376 | i = pte_index(paddr); | |
4f9c11dd | 377 | |
59b3d020 TG |
378 | for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { |
379 | paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; | |
380 | if (paddr >= paddr_end) { | |
eceb3632 | 381 | if (!after_bootmem && |
3bce64f0 | 382 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
09821ff1 | 383 | E820_TYPE_RAM) && |
3bce64f0 | 384 | !e820__mapped_any(paddr & PAGE_MASK, paddr_next, |
09821ff1 | 385 | E820_TYPE_RESERVED_KERN)) |
eceb3632 YL |
386 | set_pte(pte, __pte(0)); |
387 | continue; | |
4f9c11dd JF |
388 | } |
389 | ||
b27a43c1 SS |
390 | /* |
391 | * We will re-use the existing mapping. | |
392 | * Xen for example has some special requirements, like mapping | |
393 | * pagetable pages as RO. So assume someone who pre-setup | |
394 | * these mappings are more intelligent. | |
395 | */ | |
dcb32d99 | 396 | if (!pte_none(*pte)) { |
876ee61a JB |
397 | if (!after_bootmem) |
398 | pages++; | |
4f9c11dd | 399 | continue; |
3afa3949 | 400 | } |
4f9c11dd JF |
401 | |
402 | if (0) | |
59b3d020 TG |
403 | pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, |
404 | pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
4f9c11dd | 405 | pages++; |
59b3d020 TG |
406 | set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); |
407 | paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; | |
4f9c11dd | 408 | } |
a2699e47 | 409 | |
4f9c11dd | 410 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 | 411 | |
59b3d020 | 412 | return paddr_last; |
4f9c11dd JF |
413 | } |
414 | ||
59b3d020 TG |
415 | /* |
416 | * Create PMD level page table mapping for physical addresses. The virtual | |
417 | * and physical address have to be aligned at this level. | |
418 | * It returns the last physical address mapped. | |
419 | */ | |
cc615032 | 420 | static unsigned long __meminit |
59b3d020 | 421 | phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, |
b27a43c1 | 422 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 423 | { |
59b3d020 TG |
424 | unsigned long pages = 0, paddr_next; |
425 | unsigned long paddr_last = paddr_end; | |
ce0c0e50 | 426 | |
59b3d020 | 427 | int i = pmd_index(paddr); |
44df75e6 | 428 | |
59b3d020 TG |
429 | for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { |
430 | pmd_t *pmd = pmd_page + pmd_index(paddr); | |
4f9c11dd | 431 | pte_t *pte; |
b27a43c1 | 432 | pgprot_t new_prot = prot; |
44df75e6 | 433 | |
59b3d020 TG |
434 | paddr_next = (paddr & PMD_MASK) + PMD_SIZE; |
435 | if (paddr >= paddr_end) { | |
eceb3632 | 436 | if (!after_bootmem && |
3bce64f0 | 437 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
09821ff1 | 438 | E820_TYPE_RAM) && |
3bce64f0 | 439 | !e820__mapped_any(paddr & PMD_MASK, paddr_next, |
09821ff1 | 440 | E820_TYPE_RESERVED_KERN)) |
eceb3632 YL |
441 | set_pmd(pmd, __pmd(0)); |
442 | continue; | |
44df75e6 | 443 | } |
6ad91658 | 444 | |
dcb32d99 | 445 | if (!pmd_none(*pmd)) { |
8ae3a5a8 JB |
446 | if (!pmd_large(*pmd)) { |
447 | spin_lock(&init_mm.page_table_lock); | |
973dc4f3 | 448 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
59b3d020 TG |
449 | paddr_last = phys_pte_init(pte, paddr, |
450 | paddr_end, prot); | |
8ae3a5a8 | 451 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 452 | continue; |
8ae3a5a8 | 453 | } |
b27a43c1 SS |
454 | /* |
455 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
456 | * use the existing mapping, | |
457 | * | |
458 | * Otherwise, we will split the large page mapping but | |
459 | * use the same existing protection bits except for | |
460 | * large page, so that we don't violate Intel's TLB | |
461 | * Application note (317080) which says, while changing | |
462 | * the page sizes, new and old translations should | |
463 | * not differ with respect to page frame and | |
464 | * attributes. | |
465 | */ | |
3afa3949 | 466 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
876ee61a JB |
467 | if (!after_bootmem) |
468 | pages++; | |
59b3d020 | 469 | paddr_last = paddr_next; |
b27a43c1 | 470 | continue; |
3afa3949 | 471 | } |
b27a43c1 | 472 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
4f9c11dd JF |
473 | } |
474 | ||
b50efd2a | 475 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 476 | pages++; |
8ae3a5a8 | 477 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 478 | set_pte((pte_t *)pmd, |
59b3d020 | 479 | pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, |
b27a43c1 | 480 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
8ae3a5a8 | 481 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 482 | paddr_last = paddr_next; |
6ad91658 | 483 | continue; |
4f9c11dd | 484 | } |
6ad91658 | 485 | |
868bf4d6 | 486 | pte = alloc_low_page(); |
59b3d020 | 487 | paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot); |
4f9c11dd | 488 | |
8ae3a5a8 | 489 | spin_lock(&init_mm.page_table_lock); |
868bf4d6 | 490 | pmd_populate_kernel(&init_mm, pmd, pte); |
8ae3a5a8 | 491 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 492 | } |
ce0c0e50 | 493 | update_page_count(PG_LEVEL_2M, pages); |
59b3d020 | 494 | return paddr_last; |
44df75e6 MT |
495 | } |
496 | ||
59b3d020 TG |
497 | /* |
498 | * Create PUD level page table mapping for physical addresses. The virtual | |
faa37933 TG |
499 | * and physical address do not have to be aligned at this level. KASLR can |
500 | * randomize virtual addresses up to this level. | |
59b3d020 TG |
501 | * It returns the last physical address mapped. |
502 | */ | |
cc615032 | 503 | static unsigned long __meminit |
59b3d020 TG |
504 | phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, |
505 | unsigned long page_size_mask) | |
14a62c34 | 506 | { |
59b3d020 TG |
507 | unsigned long pages = 0, paddr_next; |
508 | unsigned long paddr_last = paddr_end; | |
faa37933 TG |
509 | unsigned long vaddr = (unsigned long)__va(paddr); |
510 | int i = pud_index(vaddr); | |
44df75e6 | 511 | |
59b3d020 | 512 | for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
faa37933 | 513 | pud_t *pud; |
1da177e4 | 514 | pmd_t *pmd; |
b27a43c1 | 515 | pgprot_t prot = PAGE_KERNEL; |
1da177e4 | 516 | |
faa37933 TG |
517 | vaddr = (unsigned long)__va(paddr); |
518 | pud = pud_page + pud_index(vaddr); | |
59b3d020 | 519 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
faa37933 | 520 | |
59b3d020 | 521 | if (paddr >= paddr_end) { |
eceb3632 | 522 | if (!after_bootmem && |
3bce64f0 | 523 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
09821ff1 | 524 | E820_TYPE_RAM) && |
3bce64f0 | 525 | !e820__mapped_any(paddr & PUD_MASK, paddr_next, |
09821ff1 | 526 | E820_TYPE_RESERVED_KERN)) |
eceb3632 | 527 | set_pud(pud, __pud(0)); |
1da177e4 | 528 | continue; |
14a62c34 | 529 | } |
1da177e4 | 530 | |
dcb32d99 | 531 | if (!pud_none(*pud)) { |
a2699e47 | 532 | if (!pud_large(*pud)) { |
973dc4f3 | 533 | pmd = pmd_offset(pud, 0); |
59b3d020 TG |
534 | paddr_last = phys_pmd_init(pmd, paddr, |
535 | paddr_end, | |
536 | page_size_mask, | |
537 | prot); | |
4b239f45 | 538 | __flush_tlb_all(); |
a2699e47 SS |
539 | continue; |
540 | } | |
b27a43c1 SS |
541 | /* |
542 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
543 | * use the existing mapping. | |
544 | * | |
545 | * Otherwise, we will split the gbpage mapping but use | |
546 | * the same existing protection bits except for large | |
547 | * page, so that we don't violate Intel's TLB | |
548 | * Application note (317080) which says, while changing | |
549 | * the page sizes, new and old translations should | |
550 | * not differ with respect to page frame and | |
551 | * attributes. | |
552 | */ | |
3afa3949 | 553 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
876ee61a JB |
554 | if (!after_bootmem) |
555 | pages++; | |
59b3d020 | 556 | paddr_last = paddr_next; |
b27a43c1 | 557 | continue; |
3afa3949 | 558 | } |
b27a43c1 | 559 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
ef925766 AK |
560 | } |
561 | ||
b50efd2a | 562 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 563 | pages++; |
8ae3a5a8 | 564 | spin_lock(&init_mm.page_table_lock); |
ef925766 | 565 | set_pte((pte_t *)pud, |
59b3d020 | 566 | pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, |
960ddb4f | 567 | PAGE_KERNEL_LARGE)); |
8ae3a5a8 | 568 | spin_unlock(&init_mm.page_table_lock); |
59b3d020 | 569 | paddr_last = paddr_next; |
6ad91658 KM |
570 | continue; |
571 | } | |
572 | ||
868bf4d6 | 573 | pmd = alloc_low_page(); |
59b3d020 TG |
574 | paddr_last = phys_pmd_init(pmd, paddr, paddr_end, |
575 | page_size_mask, prot); | |
8ae3a5a8 JB |
576 | |
577 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 578 | pud_populate(&init_mm, pud, pmd); |
44df75e6 | 579 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 580 | } |
1a2b4412 | 581 | __flush_tlb_all(); |
a2699e47 | 582 | |
ce0c0e50 | 583 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 584 | |
59b3d020 | 585 | return paddr_last; |
14a62c34 | 586 | } |
1da177e4 | 587 | |
59b3d020 TG |
588 | /* |
589 | * Create page table mapping for the physical memory for specific physical | |
faa37933 | 590 | * addresses. The virtual and physical addresses have to be aligned on PMD level |
59b3d020 TG |
591 | * down. It returns the last physical address mapped. |
592 | */ | |
41d840e2 | 593 | unsigned long __meminit |
59b3d020 TG |
594 | kernel_physical_mapping_init(unsigned long paddr_start, |
595 | unsigned long paddr_end, | |
f765090a | 596 | unsigned long page_size_mask) |
14a62c34 | 597 | { |
9b861528 | 598 | bool pgd_changed = false; |
59b3d020 | 599 | unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; |
1da177e4 | 600 | |
59b3d020 TG |
601 | paddr_last = paddr_end; |
602 | vaddr = (unsigned long)__va(paddr_start); | |
603 | vaddr_end = (unsigned long)__va(paddr_end); | |
604 | vaddr_start = vaddr; | |
1da177e4 | 605 | |
59b3d020 TG |
606 | for (; vaddr < vaddr_end; vaddr = vaddr_next) { |
607 | pgd_t *pgd = pgd_offset_k(vaddr); | |
f2a6a705 | 608 | p4d_t *p4d; |
44df75e6 MT |
609 | pud_t *pud; |
610 | ||
59b3d020 | 611 | vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; |
4f9c11dd | 612 | |
f2a6a705 KS |
613 | BUILD_BUG_ON(pgd_none(*pgd)); |
614 | p4d = p4d_offset(pgd, vaddr); | |
615 | if (p4d_val(*p4d)) { | |
616 | pud = (pud_t *)p4d_page_vaddr(*p4d); | |
59b3d020 TG |
617 | paddr_last = phys_pud_init(pud, __pa(vaddr), |
618 | __pa(vaddr_end), | |
619 | page_size_mask); | |
4f9c11dd JF |
620 | continue; |
621 | } | |
622 | ||
868bf4d6 | 623 | pud = alloc_low_page(); |
59b3d020 TG |
624 | paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end), |
625 | page_size_mask); | |
8ae3a5a8 JB |
626 | |
627 | spin_lock(&init_mm.page_table_lock); | |
f2a6a705 | 628 | p4d_populate(&init_mm, p4d, pud); |
8ae3a5a8 | 629 | spin_unlock(&init_mm.page_table_lock); |
9b861528 | 630 | pgd_changed = true; |
14a62c34 | 631 | } |
9b861528 HL |
632 | |
633 | if (pgd_changed) | |
5372e155 | 634 | sync_global_pgds(vaddr_start, vaddr_end - 1); |
9b861528 | 635 | |
a2699e47 | 636 | __flush_tlb_all(); |
1da177e4 | 637 | |
59b3d020 | 638 | return paddr_last; |
b50efd2a | 639 | } |
7b16eb89 | 640 | |
2b97690f | 641 | #ifndef CONFIG_NUMA |
d8fc3afc | 642 | void __init initmem_init(void) |
1f75d7e3 | 643 | { |
e7e8de59 | 644 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
1f75d7e3 | 645 | } |
3551f88f | 646 | #endif |
1f75d7e3 | 647 | |
1da177e4 LT |
648 | void __init paging_init(void) |
649 | { | |
3551f88f | 650 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
44df75e6 | 651 | sparse_init(); |
44b57280 YL |
652 | |
653 | /* | |
654 | * clear the default setting with node 0 | |
655 | * note: don't use nodes_clear here, that is really clearing when | |
656 | * numa support is not compiled in, and later node_set_state | |
657 | * will not set it back. | |
658 | */ | |
4b0ef1fe LJ |
659 | node_clear_state(0, N_MEMORY); |
660 | if (N_MEMORY != N_NORMAL_MEMORY) | |
661 | node_clear_state(0, N_NORMAL_MEMORY); | |
44b57280 | 662 | |
4c0b2e5f | 663 | zone_sizes_init(); |
1da177e4 | 664 | } |
1da177e4 | 665 | |
44df75e6 MT |
666 | /* |
667 | * Memory hotplug specific functions | |
44df75e6 | 668 | */ |
bc02af93 | 669 | #ifdef CONFIG_MEMORY_HOTPLUG |
ea085417 SZ |
670 | /* |
671 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
672 | * updating. | |
673 | */ | |
674 | static void update_end_of_memory_vars(u64 start, u64 size) | |
675 | { | |
676 | unsigned long end_pfn = PFN_UP(start + size); | |
677 | ||
678 | if (end_pfn > max_pfn) { | |
679 | max_pfn = end_pfn; | |
680 | max_low_pfn = end_pfn; | |
681 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
682 | } | |
683 | } | |
684 | ||
9d99aaa3 AK |
685 | /* |
686 | * Memory is added always to NORMAL zone. This means you will never get | |
687 | * additional DMA/DMA32 memory. | |
688 | */ | |
033fbae9 | 689 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
44df75e6 | 690 | { |
bc02af93 | 691 | struct pglist_data *pgdat = NODE_DATA(nid); |
9bfc4113 | 692 | struct zone *zone = pgdat->node_zones + |
033fbae9 | 693 | zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); |
66520ebc | 694 | unsigned long start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
695 | unsigned long nr_pages = size >> PAGE_SHIFT; |
696 | int ret; | |
697 | ||
66520ebc | 698 | init_memory_mapping(start, start + size); |
45e0b78b | 699 | |
c04fc586 | 700 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
fe8b868e | 701 | WARN_ON_ONCE(ret); |
44df75e6 | 702 | |
ea085417 SZ |
703 | /* update max_pfn, max_low_pfn and high_memory */ |
704 | update_end_of_memory_vars(start, size); | |
705 | ||
44df75e6 | 706 | return ret; |
44df75e6 | 707 | } |
bc02af93 | 708 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 709 | |
ae9aae9e WC |
710 | #define PAGE_INUSE 0xFD |
711 | ||
712 | static void __meminit free_pagetable(struct page *page, int order) | |
713 | { | |
ae9aae9e WC |
714 | unsigned long magic; |
715 | unsigned int nr_pages = 1 << order; | |
4b94ffdc DW |
716 | struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page); |
717 | ||
718 | if (altmap) { | |
719 | vmem_altmap_free(altmap, nr_pages); | |
720 | return; | |
721 | } | |
ae9aae9e WC |
722 | |
723 | /* bootmem page has reserved flag */ | |
724 | if (PageReserved(page)) { | |
725 | __ClearPageReserved(page); | |
ae9aae9e | 726 | |
ddffe98d | 727 | magic = (unsigned long)page->freelist; |
ae9aae9e WC |
728 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { |
729 | while (nr_pages--) | |
730 | put_page_bootmem(page++); | |
731 | } else | |
170a5a7e JL |
732 | while (nr_pages--) |
733 | free_reserved_page(page++); | |
ae9aae9e WC |
734 | } else |
735 | free_pages((unsigned long)page_address(page), order); | |
ae9aae9e WC |
736 | } |
737 | ||
738 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |
739 | { | |
740 | pte_t *pte; | |
741 | int i; | |
742 | ||
743 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
744 | pte = pte_start + i; | |
dcb32d99 | 745 | if (!pte_none(*pte)) |
ae9aae9e WC |
746 | return; |
747 | } | |
748 | ||
749 | /* free a pte talbe */ | |
750 | free_pagetable(pmd_page(*pmd), 0); | |
751 | spin_lock(&init_mm.page_table_lock); | |
752 | pmd_clear(pmd); | |
753 | spin_unlock(&init_mm.page_table_lock); | |
754 | } | |
755 | ||
756 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |
757 | { | |
758 | pmd_t *pmd; | |
759 | int i; | |
760 | ||
761 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
762 | pmd = pmd_start + i; | |
dcb32d99 | 763 | if (!pmd_none(*pmd)) |
ae9aae9e WC |
764 | return; |
765 | } | |
766 | ||
767 | /* free a pmd talbe */ | |
768 | free_pagetable(pud_page(*pud), 0); | |
769 | spin_lock(&init_mm.page_table_lock); | |
770 | pud_clear(pud); | |
771 | spin_unlock(&init_mm.page_table_lock); | |
772 | } | |
773 | ||
f2a6a705 KS |
774 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) |
775 | { | |
776 | pud_t *pud; | |
777 | int i; | |
778 | ||
779 | for (i = 0; i < PTRS_PER_PUD; i++) { | |
780 | pud = pud_start + i; | |
781 | if (!pud_none(*pud)) | |
782 | return; | |
783 | } | |
784 | ||
785 | /* free a pud talbe */ | |
786 | free_pagetable(p4d_page(*p4d), 0); | |
787 | spin_lock(&init_mm.page_table_lock); | |
788 | p4d_clear(p4d); | |
789 | spin_unlock(&init_mm.page_table_lock); | |
790 | } | |
791 | ||
ae9aae9e WC |
792 | static void __meminit |
793 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |
794 | bool direct) | |
795 | { | |
796 | unsigned long next, pages = 0; | |
797 | pte_t *pte; | |
798 | void *page_addr; | |
799 | phys_addr_t phys_addr; | |
800 | ||
801 | pte = pte_start + pte_index(addr); | |
802 | for (; addr < end; addr = next, pte++) { | |
803 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
804 | if (next > end) | |
805 | next = end; | |
806 | ||
807 | if (!pte_present(*pte)) | |
808 | continue; | |
809 | ||
810 | /* | |
811 | * We mapped [0,1G) memory as identity mapping when | |
812 | * initializing, in arch/x86/kernel/head_64.S. These | |
813 | * pagetables cannot be removed. | |
814 | */ | |
815 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | |
816 | if (phys_addr < (phys_addr_t)0x40000000) | |
817 | return; | |
818 | ||
b500f77b | 819 | if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { |
ae9aae9e WC |
820 | /* |
821 | * Do not free direct mapping pages since they were | |
822 | * freed when offlining, or simplely not in use. | |
823 | */ | |
824 | if (!direct) | |
825 | free_pagetable(pte_page(*pte), 0); | |
826 | ||
827 | spin_lock(&init_mm.page_table_lock); | |
828 | pte_clear(&init_mm, addr, pte); | |
829 | spin_unlock(&init_mm.page_table_lock); | |
830 | ||
831 | /* For non-direct mapping, pages means nothing. */ | |
832 | pages++; | |
833 | } else { | |
834 | /* | |
835 | * If we are here, we are freeing vmemmap pages since | |
836 | * direct mapped memory ranges to be freed are aligned. | |
837 | * | |
838 | * If we are not removing the whole page, it means | |
839 | * other page structs in this page are being used and | |
840 | * we canot remove them. So fill the unused page_structs | |
841 | * with 0xFD, and remove the page when it is wholly | |
842 | * filled with 0xFD. | |
843 | */ | |
844 | memset((void *)addr, PAGE_INUSE, next - addr); | |
845 | ||
846 | page_addr = page_address(pte_page(*pte)); | |
847 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { | |
848 | free_pagetable(pte_page(*pte), 0); | |
849 | ||
850 | spin_lock(&init_mm.page_table_lock); | |
851 | pte_clear(&init_mm, addr, pte); | |
852 | spin_unlock(&init_mm.page_table_lock); | |
853 | } | |
854 | } | |
855 | } | |
856 | ||
857 | /* Call free_pte_table() in remove_pmd_table(). */ | |
858 | flush_tlb_all(); | |
859 | if (direct) | |
860 | update_page_count(PG_LEVEL_4K, -pages); | |
861 | } | |
862 | ||
863 | static void __meminit | |
864 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |
865 | bool direct) | |
866 | { | |
867 | unsigned long next, pages = 0; | |
868 | pte_t *pte_base; | |
869 | pmd_t *pmd; | |
870 | void *page_addr; | |
871 | ||
872 | pmd = pmd_start + pmd_index(addr); | |
873 | for (; addr < end; addr = next, pmd++) { | |
874 | next = pmd_addr_end(addr, end); | |
875 | ||
876 | if (!pmd_present(*pmd)) | |
877 | continue; | |
878 | ||
879 | if (pmd_large(*pmd)) { | |
880 | if (IS_ALIGNED(addr, PMD_SIZE) && | |
881 | IS_ALIGNED(next, PMD_SIZE)) { | |
882 | if (!direct) | |
883 | free_pagetable(pmd_page(*pmd), | |
884 | get_order(PMD_SIZE)); | |
885 | ||
886 | spin_lock(&init_mm.page_table_lock); | |
887 | pmd_clear(pmd); | |
888 | spin_unlock(&init_mm.page_table_lock); | |
889 | pages++; | |
890 | } else { | |
891 | /* If here, we are freeing vmemmap pages. */ | |
892 | memset((void *)addr, PAGE_INUSE, next - addr); | |
893 | ||
894 | page_addr = page_address(pmd_page(*pmd)); | |
895 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
896 | PMD_SIZE)) { | |
897 | free_pagetable(pmd_page(*pmd), | |
898 | get_order(PMD_SIZE)); | |
899 | ||
900 | spin_lock(&init_mm.page_table_lock); | |
901 | pmd_clear(pmd); | |
902 | spin_unlock(&init_mm.page_table_lock); | |
903 | } | |
904 | } | |
905 | ||
906 | continue; | |
907 | } | |
908 | ||
909 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
910 | remove_pte_table(pte_base, addr, next, direct); | |
911 | free_pte_table(pte_base, pmd); | |
912 | } | |
913 | ||
914 | /* Call free_pmd_table() in remove_pud_table(). */ | |
915 | if (direct) | |
916 | update_page_count(PG_LEVEL_2M, -pages); | |
917 | } | |
918 | ||
919 | static void __meminit | |
920 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |
921 | bool direct) | |
922 | { | |
923 | unsigned long next, pages = 0; | |
924 | pmd_t *pmd_base; | |
925 | pud_t *pud; | |
926 | void *page_addr; | |
927 | ||
928 | pud = pud_start + pud_index(addr); | |
929 | for (; addr < end; addr = next, pud++) { | |
930 | next = pud_addr_end(addr, end); | |
931 | ||
932 | if (!pud_present(*pud)) | |
933 | continue; | |
934 | ||
935 | if (pud_large(*pud)) { | |
936 | if (IS_ALIGNED(addr, PUD_SIZE) && | |
937 | IS_ALIGNED(next, PUD_SIZE)) { | |
938 | if (!direct) | |
939 | free_pagetable(pud_page(*pud), | |
940 | get_order(PUD_SIZE)); | |
941 | ||
942 | spin_lock(&init_mm.page_table_lock); | |
943 | pud_clear(pud); | |
944 | spin_unlock(&init_mm.page_table_lock); | |
945 | pages++; | |
946 | } else { | |
947 | /* If here, we are freeing vmemmap pages. */ | |
948 | memset((void *)addr, PAGE_INUSE, next - addr); | |
949 | ||
950 | page_addr = page_address(pud_page(*pud)); | |
951 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
952 | PUD_SIZE)) { | |
953 | free_pagetable(pud_page(*pud), | |
954 | get_order(PUD_SIZE)); | |
955 | ||
956 | spin_lock(&init_mm.page_table_lock); | |
957 | pud_clear(pud); | |
958 | spin_unlock(&init_mm.page_table_lock); | |
959 | } | |
960 | } | |
961 | ||
962 | continue; | |
963 | } | |
964 | ||
e6ab9c4d | 965 | pmd_base = pmd_offset(pud, 0); |
ae9aae9e WC |
966 | remove_pmd_table(pmd_base, addr, next, direct); |
967 | free_pmd_table(pmd_base, pud); | |
968 | } | |
969 | ||
970 | if (direct) | |
971 | update_page_count(PG_LEVEL_1G, -pages); | |
972 | } | |
973 | ||
f2a6a705 KS |
974 | static void __meminit |
975 | remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, | |
976 | bool direct) | |
977 | { | |
978 | unsigned long next, pages = 0; | |
979 | pud_t *pud_base; | |
980 | p4d_t *p4d; | |
981 | ||
982 | p4d = p4d_start + p4d_index(addr); | |
983 | for (; addr < end; addr = next, p4d++) { | |
984 | next = p4d_addr_end(addr, end); | |
985 | ||
986 | if (!p4d_present(*p4d)) | |
987 | continue; | |
988 | ||
989 | BUILD_BUG_ON(p4d_large(*p4d)); | |
990 | ||
e6ab9c4d | 991 | pud_base = pud_offset(p4d, 0); |
f2a6a705 KS |
992 | remove_pud_table(pud_base, addr, next, direct); |
993 | free_pud_table(pud_base, p4d); | |
994 | } | |
995 | ||
996 | if (direct) | |
997 | update_page_count(PG_LEVEL_512G, -pages); | |
998 | } | |
999 | ||
ae9aae9e WC |
1000 | /* start and end are both virtual address. */ |
1001 | static void __meminit | |
1002 | remove_pagetable(unsigned long start, unsigned long end, bool direct) | |
1003 | { | |
1004 | unsigned long next; | |
5255e0a7 | 1005 | unsigned long addr; |
ae9aae9e | 1006 | pgd_t *pgd; |
f2a6a705 | 1007 | p4d_t *p4d; |
ae9aae9e | 1008 | |
5255e0a7 YI |
1009 | for (addr = start; addr < end; addr = next) { |
1010 | next = pgd_addr_end(addr, end); | |
ae9aae9e | 1011 | |
5255e0a7 | 1012 | pgd = pgd_offset_k(addr); |
ae9aae9e WC |
1013 | if (!pgd_present(*pgd)) |
1014 | continue; | |
1015 | ||
e6ab9c4d | 1016 | p4d = p4d_offset(pgd, 0); |
f2a6a705 | 1017 | remove_p4d_table(p4d, addr, next, direct); |
ae9aae9e WC |
1018 | } |
1019 | ||
ae9aae9e WC |
1020 | flush_tlb_all(); |
1021 | } | |
1022 | ||
0aad818b | 1023 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
0197518c | 1024 | { |
0197518c TC |
1025 | remove_pagetable(start, end, false); |
1026 | } | |
1027 | ||
587ff8c4 | 1028 | #ifdef CONFIG_MEMORY_HOTREMOVE |
bbcab878 TC |
1029 | static void __meminit |
1030 | kernel_physical_mapping_remove(unsigned long start, unsigned long end) | |
1031 | { | |
1032 | start = (unsigned long)__va(start); | |
1033 | end = (unsigned long)__va(end); | |
1034 | ||
1035 | remove_pagetable(start, end, true); | |
1036 | } | |
1037 | ||
24d335ca WC |
1038 | int __ref arch_remove_memory(u64 start, u64 size) |
1039 | { | |
1040 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
1041 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
4b94ffdc DW |
1042 | struct page *page = pfn_to_page(start_pfn); |
1043 | struct vmem_altmap *altmap; | |
24d335ca WC |
1044 | struct zone *zone; |
1045 | int ret; | |
1046 | ||
4b94ffdc DW |
1047 | /* With altmap the first mapped page is offset from @start */ |
1048 | altmap = to_vmem_altmap((unsigned long) page); | |
1049 | if (altmap) | |
1050 | page += vmem_altmap_offset(altmap); | |
1051 | zone = page_zone(page); | |
24d335ca WC |
1052 | ret = __remove_pages(zone, start_pfn, nr_pages); |
1053 | WARN_ON_ONCE(ret); | |
4b94ffdc | 1054 | kernel_physical_mapping_remove(start, start + size); |
24d335ca WC |
1055 | |
1056 | return ret; | |
1057 | } | |
1058 | #endif | |
45e0b78b KM |
1059 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1060 | ||
81ac3ad9 | 1061 | static struct kcore_list kcore_vsyscall; |
1da177e4 | 1062 | |
94b43c3d YL |
1063 | static void __init register_page_bootmem_info(void) |
1064 | { | |
1065 | #ifdef CONFIG_NUMA | |
1066 | int i; | |
1067 | ||
1068 | for_each_online_node(i) | |
1069 | register_page_bootmem_info_node(NODE_DATA(i)); | |
1070 | #endif | |
1071 | } | |
1072 | ||
1da177e4 LT |
1073 | void __init mem_init(void) |
1074 | { | |
0dc243ae | 1075 | pci_iommu_alloc(); |
1da177e4 | 1076 | |
48ddb154 | 1077 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 | 1078 | |
94b43c3d | 1079 | register_page_bootmem_info(); |
bced0e32 JL |
1080 | |
1081 | /* this will put all memory onto the freelists */ | |
0c988534 | 1082 | free_all_bootmem(); |
1da177e4 LT |
1083 | after_bootmem = 1; |
1084 | ||
1da177e4 | 1085 | /* Register memory areas for /proc/kcore */ |
f40c3300 AL |
1086 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, |
1087 | PAGE_SIZE, KCORE_OTHER); | |
1da177e4 | 1088 | |
46a84132 | 1089 | mem_init_print_info(NULL); |
1da177e4 LT |
1090 | } |
1091 | ||
502f6604 | 1092 | int kernel_set_to_readonly; |
16239630 SR |
1093 | |
1094 | void set_kernel_text_rw(void) | |
1095 | { | |
b9af7c0d | 1096 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1097 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1098 | |
1099 | if (!kernel_set_to_readonly) | |
1100 | return; | |
1101 | ||
1102 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
1103 | start, end); | |
1104 | ||
e7d23dde SS |
1105 | /* |
1106 | * Make the kernel identity mapping for text RW. Kernel text | |
1107 | * mapping will always be RO. Refer to the comment in | |
1108 | * static_protections() in pageattr.c | |
1109 | */ | |
16239630 SR |
1110 | set_memory_rw(start, (end - start) >> PAGE_SHIFT); |
1111 | } | |
1112 | ||
1113 | void set_kernel_text_ro(void) | |
1114 | { | |
b9af7c0d | 1115 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1116 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1117 | |
1118 | if (!kernel_set_to_readonly) | |
1119 | return; | |
1120 | ||
1121 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
1122 | start, end); | |
1123 | ||
e7d23dde SS |
1124 | /* |
1125 | * Set the kernel identity mapping for text RO. | |
1126 | */ | |
16239630 SR |
1127 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1128 | } | |
1129 | ||
67df197b AV |
1130 | void mark_rodata_ro(void) |
1131 | { | |
74e08179 | 1132 | unsigned long start = PFN_ALIGN(_text); |
fc8d7826 | 1133 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
74e08179 | 1134 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
fc8d7826 AD |
1135 | unsigned long text_end = PFN_ALIGN(&__stop___ex_table); |
1136 | unsigned long rodata_end = PFN_ALIGN(&__end_rodata); | |
45e2a9d4 | 1137 | unsigned long all_end; |
8f0f996e | 1138 | |
6fb14755 | 1139 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 1140 | (end - start) >> 10); |
984bb80d AV |
1141 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1142 | ||
16239630 SR |
1143 | kernel_set_to_readonly = 1; |
1144 | ||
984bb80d | 1145 | /* |
72212675 YL |
1146 | * The rodata/data/bss/brk section (but not the kernel text!) |
1147 | * should also be not-executable. | |
45e2a9d4 KC |
1148 | * |
1149 | * We align all_end to PMD_SIZE because the existing mapping | |
1150 | * is a full PMD. If we would align _brk_end to PAGE_SIZE we | |
1151 | * split the PMD and the reminder between _brk_end and the end | |
1152 | * of the PMD will remain mapped executable. | |
1153 | * | |
1154 | * Any PMD which was setup after the one which covers _brk_end | |
1155 | * has been zapped already via cleanup_highmem(). | |
984bb80d | 1156 | */ |
45e2a9d4 | 1157 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
ab76f7b4 | 1158 | set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); |
67df197b | 1159 | |
0c42f392 | 1160 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 1161 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 1162 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1163 | |
10f22dde | 1164 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 1165 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1166 | #endif |
74e08179 | 1167 | |
c88442ec | 1168 | free_init_pages("unused kernel", |
fc8d7826 AD |
1169 | (unsigned long) __va(__pa_symbol(text_end)), |
1170 | (unsigned long) __va(__pa_symbol(rodata_start))); | |
c88442ec | 1171 | free_init_pages("unused kernel", |
fc8d7826 AD |
1172 | (unsigned long) __va(__pa_symbol(rodata_end)), |
1173 | (unsigned long) __va(__pa_symbol(_sdata))); | |
e1a58320 SS |
1174 | |
1175 | debug_checkwx(); | |
67df197b | 1176 | } |
4e4eee0e | 1177 | |
14a62c34 TG |
1178 | int kern_addr_valid(unsigned long addr) |
1179 | { | |
1da177e4 | 1180 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 | 1181 | pgd_t *pgd; |
f2a6a705 | 1182 | p4d_t *p4d; |
14a62c34 TG |
1183 | pud_t *pud; |
1184 | pmd_t *pmd; | |
1185 | pte_t *pte; | |
1da177e4 LT |
1186 | |
1187 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
1188 | return 0; |
1189 | ||
1da177e4 LT |
1190 | pgd = pgd_offset_k(addr); |
1191 | if (pgd_none(*pgd)) | |
1192 | return 0; | |
1193 | ||
f2a6a705 KS |
1194 | p4d = p4d_offset(pgd, addr); |
1195 | if (p4d_none(*p4d)) | |
1196 | return 0; | |
1197 | ||
1198 | pud = pud_offset(p4d, addr); | |
1da177e4 | 1199 | if (pud_none(*pud)) |
14a62c34 | 1200 | return 0; |
1da177e4 | 1201 | |
0ee364eb MG |
1202 | if (pud_large(*pud)) |
1203 | return pfn_valid(pud_pfn(*pud)); | |
1204 | ||
1da177e4 LT |
1205 | pmd = pmd_offset(pud, addr); |
1206 | if (pmd_none(*pmd)) | |
1207 | return 0; | |
14a62c34 | 1208 | |
1da177e4 LT |
1209 | if (pmd_large(*pmd)) |
1210 | return pfn_valid(pmd_pfn(*pmd)); | |
1211 | ||
1212 | pte = pte_offset_kernel(pmd, addr); | |
1213 | if (pte_none(*pte)) | |
1214 | return 0; | |
14a62c34 | 1215 | |
1da177e4 LT |
1216 | return pfn_valid(pte_pfn(*pte)); |
1217 | } | |
1218 | ||
982792c7 | 1219 | static unsigned long probe_memory_block_size(void) |
1dc41aa6 | 1220 | { |
43c75f93 | 1221 | unsigned long bz = MIN_MEMORY_BLOCK_SIZE; |
982792c7 | 1222 | |
43c75f93 SJ |
1223 | /* if system is UV or has 64GB of RAM or more, use large blocks */ |
1224 | if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30))) | |
1225 | bz = 2UL << 30; /* 2GB */ | |
982792c7 | 1226 | |
43c75f93 | 1227 | pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); |
982792c7 YL |
1228 | |
1229 | return bz; | |
1230 | } | |
1231 | ||
1232 | static unsigned long memory_block_size_probed; | |
1233 | unsigned long memory_block_size_bytes(void) | |
1234 | { | |
1235 | if (!memory_block_size_probed) | |
1236 | memory_block_size_probed = probe_memory_block_size(); | |
1237 | ||
1238 | return memory_block_size_probed; | |
1239 | } | |
1240 | ||
0889eba5 CL |
1241 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1242 | /* | |
1243 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1244 | */ | |
c2b91e2e YL |
1245 | static long __meminitdata addr_start, addr_end; |
1246 | static void __meminitdata *p_start, *p_end; | |
1247 | static int __meminitdata node_start; | |
1248 | ||
e8216da5 | 1249 | static int __meminit vmemmap_populate_hugepages(unsigned long start, |
4b94ffdc | 1250 | unsigned long end, int node, struct vmem_altmap *altmap) |
0889eba5 | 1251 | { |
0aad818b | 1252 | unsigned long addr; |
0889eba5 CL |
1253 | unsigned long next; |
1254 | pgd_t *pgd; | |
f2a6a705 | 1255 | p4d_t *p4d; |
0889eba5 CL |
1256 | pud_t *pud; |
1257 | pmd_t *pmd; | |
1258 | ||
0aad818b | 1259 | for (addr = start; addr < end; addr = next) { |
e8216da5 | 1260 | next = pmd_addr_end(addr, end); |
0889eba5 CL |
1261 | |
1262 | pgd = vmemmap_pgd_populate(addr, node); | |
1263 | if (!pgd) | |
1264 | return -ENOMEM; | |
14a62c34 | 1265 | |
f2a6a705 KS |
1266 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
1267 | if (!p4d) | |
1268 | return -ENOMEM; | |
1269 | ||
1270 | pud = vmemmap_pud_populate(p4d, addr, node); | |
0889eba5 CL |
1271 | if (!pud) |
1272 | return -ENOMEM; | |
1273 | ||
e8216da5 JW |
1274 | pmd = pmd_offset(pud, addr); |
1275 | if (pmd_none(*pmd)) { | |
e8216da5 | 1276 | void *p; |
14a62c34 | 1277 | |
4b94ffdc | 1278 | p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); |
8e2cdbcb JW |
1279 | if (p) { |
1280 | pte_t entry; | |
1281 | ||
1282 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1283 | PAGE_KERNEL_LARGE); | |
1284 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1285 | ||
1286 | /* check to see if we have contiguous blocks */ | |
1287 | if (p_end != p || node_start != node) { | |
1288 | if (p_start) | |
c9cdaeb2 | 1289 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
8e2cdbcb JW |
1290 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1291 | addr_start = addr; | |
1292 | node_start = node; | |
1293 | p_start = p; | |
1294 | } | |
7c934d39 | 1295 | |
8e2cdbcb JW |
1296 | addr_end = addr + PMD_SIZE; |
1297 | p_end = p + PMD_SIZE; | |
1298 | continue; | |
4b94ffdc DW |
1299 | } else if (altmap) |
1300 | return -ENOMEM; /* no fallback */ | |
8e2cdbcb | 1301 | } else if (pmd_large(*pmd)) { |
e8216da5 | 1302 | vmemmap_verify((pte_t *)pmd, node, addr, next); |
8e2cdbcb JW |
1303 | continue; |
1304 | } | |
1305 | pr_warn_once("vmemmap: falling back to regular page backing\n"); | |
1306 | if (vmemmap_populate_basepages(addr, next, node)) | |
1307 | return -ENOMEM; | |
0889eba5 | 1308 | } |
0889eba5 CL |
1309 | return 0; |
1310 | } | |
c2b91e2e | 1311 | |
e8216da5 JW |
1312 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
1313 | { | |
4b94ffdc | 1314 | struct vmem_altmap *altmap = to_vmem_altmap(start); |
e8216da5 JW |
1315 | int err; |
1316 | ||
16bf9226 | 1317 | if (boot_cpu_has(X86_FEATURE_PSE)) |
4b94ffdc DW |
1318 | err = vmemmap_populate_hugepages(start, end, node, altmap); |
1319 | else if (altmap) { | |
1320 | pr_err_once("%s: no cpu support for altmap allocations\n", | |
1321 | __func__); | |
1322 | err = -ENOMEM; | |
1323 | } else | |
e8216da5 JW |
1324 | err = vmemmap_populate_basepages(start, end, node); |
1325 | if (!err) | |
5372e155 | 1326 | sync_global_pgds(start, end - 1); |
e8216da5 JW |
1327 | return err; |
1328 | } | |
1329 | ||
46723bfa YI |
1330 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) |
1331 | void register_page_bootmem_memmap(unsigned long section_nr, | |
1332 | struct page *start_page, unsigned long size) | |
1333 | { | |
1334 | unsigned long addr = (unsigned long)start_page; | |
1335 | unsigned long end = (unsigned long)(start_page + size); | |
1336 | unsigned long next; | |
1337 | pgd_t *pgd; | |
f2a6a705 | 1338 | p4d_t *p4d; |
46723bfa YI |
1339 | pud_t *pud; |
1340 | pmd_t *pmd; | |
1341 | unsigned int nr_pages; | |
1342 | struct page *page; | |
1343 | ||
1344 | for (; addr < end; addr = next) { | |
1345 | pte_t *pte = NULL; | |
1346 | ||
1347 | pgd = pgd_offset_k(addr); | |
1348 | if (pgd_none(*pgd)) { | |
1349 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1350 | continue; | |
1351 | } | |
1352 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | |
1353 | ||
f2a6a705 KS |
1354 | p4d = p4d_offset(pgd, addr); |
1355 | if (p4d_none(*p4d)) { | |
1356 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1357 | continue; | |
1358 | } | |
1359 | get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); | |
1360 | ||
1361 | pud = pud_offset(p4d, addr); | |
46723bfa YI |
1362 | if (pud_none(*pud)) { |
1363 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1364 | continue; | |
1365 | } | |
1366 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | |
1367 | ||
16bf9226 | 1368 | if (!boot_cpu_has(X86_FEATURE_PSE)) { |
46723bfa YI |
1369 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
1370 | pmd = pmd_offset(pud, addr); | |
1371 | if (pmd_none(*pmd)) | |
1372 | continue; | |
1373 | get_page_bootmem(section_nr, pmd_page(*pmd), | |
1374 | MIX_SECTION_INFO); | |
1375 | ||
1376 | pte = pte_offset_kernel(pmd, addr); | |
1377 | if (pte_none(*pte)) | |
1378 | continue; | |
1379 | get_page_bootmem(section_nr, pte_page(*pte), | |
1380 | SECTION_INFO); | |
1381 | } else { | |
1382 | next = pmd_addr_end(addr, end); | |
1383 | ||
1384 | pmd = pmd_offset(pud, addr); | |
1385 | if (pmd_none(*pmd)) | |
1386 | continue; | |
1387 | ||
1388 | nr_pages = 1 << (get_order(PMD_SIZE)); | |
1389 | page = pmd_page(*pmd); | |
1390 | while (nr_pages--) | |
1391 | get_page_bootmem(section_nr, page++, | |
1392 | SECTION_INFO); | |
1393 | } | |
1394 | } | |
1395 | } | |
1396 | #endif | |
1397 | ||
c2b91e2e YL |
1398 | void __meminit vmemmap_populate_print_last(void) |
1399 | { | |
1400 | if (p_start) { | |
c9cdaeb2 | 1401 | pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", |
c2b91e2e YL |
1402 | addr_start, addr_end-1, p_start, p_end-1, node_start); |
1403 | p_start = NULL; | |
1404 | p_end = NULL; | |
1405 | node_start = 0; | |
1406 | } | |
1407 | } | |
0889eba5 | 1408 | #endif |