]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
a2531293 | 5 | * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> |
1da177e4 LT |
6 | * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/signal.h> |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/smp.h> | |
20 | #include <linux/init.h> | |
11034d55 | 21 | #include <linux/initrd.h> |
1da177e4 LT |
22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | |
a9ce6bc1 | 24 | #include <linux/memblock.h> |
1da177e4 | 25 | #include <linux/proc_fs.h> |
59170891 | 26 | #include <linux/pci.h> |
6fb14755 | 27 | #include <linux/pfn.h> |
c9cf5528 | 28 | #include <linux/poison.h> |
17a941d8 | 29 | #include <linux/dma-mapping.h> |
44df75e6 | 30 | #include <linux/module.h> |
a63fdc51 | 31 | #include <linux/memory.h> |
44df75e6 | 32 | #include <linux/memory_hotplug.h> |
ae32b129 | 33 | #include <linux/nmi.h> |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
1da177e4 LT |
35 | |
36 | #include <asm/processor.h> | |
46eaa670 | 37 | #include <asm/bios_ebda.h> |
1da177e4 LT |
38 | #include <asm/uaccess.h> |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/pgalloc.h> | |
41 | #include <asm/dma.h> | |
42 | #include <asm/fixmap.h> | |
43 | #include <asm/e820.h> | |
44 | #include <asm/apic.h> | |
45 | #include <asm/tlb.h> | |
46 | #include <asm/mmu_context.h> | |
47 | #include <asm/proto.h> | |
48 | #include <asm/smp.h> | |
2bc0414e | 49 | #include <asm/sections.h> |
718fc13b | 50 | #include <asm/kdebug.h> |
aaa64e04 | 51 | #include <asm/numa.h> |
7bfeab9a | 52 | #include <asm/cacheflush.h> |
4fcb2083 | 53 | #include <asm/init.h> |
1dc41aa6 | 54 | #include <asm/uv/uv.h> |
e5f15b45 | 55 | #include <asm/setup.h> |
1da177e4 | 56 | |
5c51bdbe YL |
57 | #include "mm_internal.h" |
58 | ||
aece2785 YL |
59 | static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, |
60 | unsigned long addr, unsigned long end) | |
61 | { | |
62 | addr &= PMD_MASK; | |
63 | for (; addr < end; addr += PMD_SIZE) { | |
64 | pmd_t *pmd = pmd_page + pmd_index(addr); | |
65 | ||
66 | if (!pmd_present(*pmd)) | |
67 | set_pmd(pmd, __pmd(addr | pmd_flag)); | |
68 | } | |
69 | } | |
70 | static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, | |
71 | unsigned long addr, unsigned long end) | |
72 | { | |
73 | unsigned long next; | |
74 | ||
75 | for (; addr < end; addr = next) { | |
76 | pud_t *pud = pud_page + pud_index(addr); | |
77 | pmd_t *pmd; | |
78 | ||
79 | next = (addr & PUD_MASK) + PUD_SIZE; | |
80 | if (next > end) | |
81 | next = end; | |
82 | ||
83 | if (pud_present(*pud)) { | |
84 | pmd = pmd_offset(pud, 0); | |
85 | ident_pmd_init(info->pmd_flag, pmd, addr, next); | |
86 | continue; | |
87 | } | |
88 | pmd = (pmd_t *)info->alloc_pgt_page(info->context); | |
89 | if (!pmd) | |
90 | return -ENOMEM; | |
91 | ident_pmd_init(info->pmd_flag, pmd, addr, next); | |
92 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | |
93 | } | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, | |
99 | unsigned long addr, unsigned long end) | |
100 | { | |
101 | unsigned long next; | |
102 | int result; | |
103 | int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; | |
104 | ||
105 | for (; addr < end; addr = next) { | |
106 | pgd_t *pgd = pgd_page + pgd_index(addr) + off; | |
107 | pud_t *pud; | |
108 | ||
109 | next = (addr & PGDIR_MASK) + PGDIR_SIZE; | |
110 | if (next > end) | |
111 | next = end; | |
112 | ||
113 | if (pgd_present(*pgd)) { | |
114 | pud = pud_offset(pgd, 0); | |
115 | result = ident_pud_init(info, pud, addr, next); | |
116 | if (result) | |
117 | return result; | |
118 | continue; | |
119 | } | |
120 | ||
121 | pud = (pud_t *)info->alloc_pgt_page(info->context); | |
122 | if (!pud) | |
123 | return -ENOMEM; | |
124 | result = ident_pud_init(info, pud, addr, next); | |
125 | if (result) | |
126 | return result; | |
127 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); | |
128 | } | |
129 | ||
130 | return 0; | |
131 | } | |
132 | ||
00d1c5e0 IM |
133 | static int __init parse_direct_gbpages_off(char *arg) |
134 | { | |
135 | direct_gbpages = 0; | |
136 | return 0; | |
137 | } | |
138 | early_param("nogbpages", parse_direct_gbpages_off); | |
139 | ||
140 | static int __init parse_direct_gbpages_on(char *arg) | |
141 | { | |
142 | direct_gbpages = 1; | |
143 | return 0; | |
144 | } | |
145 | early_param("gbpages", parse_direct_gbpages_on); | |
146 | ||
1da177e4 LT |
147 | /* |
148 | * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the | |
149 | * physical space so we can cache the place of the first one and move | |
150 | * around without checking the pgd every time. | |
151 | */ | |
152 | ||
be43d728 | 153 | pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; |
bd220a24 YL |
154 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
155 | ||
bd220a24 YL |
156 | int force_personality32; |
157 | ||
deed05b7 IM |
158 | /* |
159 | * noexec32=on|off | |
160 | * Control non executable heap for 32bit processes. | |
161 | * To control the stack too use noexec=off | |
162 | * | |
163 | * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) | |
164 | * off PROT_READ implies PROT_EXEC | |
165 | */ | |
bd220a24 YL |
166 | static int __init nonx32_setup(char *str) |
167 | { | |
168 | if (!strcmp(str, "on")) | |
169 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
170 | else if (!strcmp(str, "off")) | |
171 | force_personality32 |= READ_IMPLIES_EXEC; | |
172 | return 1; | |
173 | } | |
174 | __setup("noexec32=", nonx32_setup); | |
175 | ||
6afb5157 HL |
176 | /* |
177 | * When memory was added/removed make sure all the processes MM have | |
178 | * suitable PGD entries in the local PGD level page. | |
179 | */ | |
180 | void sync_global_pgds(unsigned long start, unsigned long end) | |
181 | { | |
44235dcd JF |
182 | unsigned long address; |
183 | ||
184 | for (address = start; address <= end; address += PGDIR_SIZE) { | |
185 | const pgd_t *pgd_ref = pgd_offset_k(address); | |
44235dcd JF |
186 | struct page *page; |
187 | ||
188 | if (pgd_none(*pgd_ref)) | |
189 | continue; | |
190 | ||
a79e53d8 | 191 | spin_lock(&pgd_lock); |
44235dcd | 192 | list_for_each_entry(page, &pgd_list, lru) { |
be354f40 | 193 | pgd_t *pgd; |
617d34d9 JF |
194 | spinlock_t *pgt_lock; |
195 | ||
44235dcd | 196 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
a79e53d8 | 197 | /* the pgt_lock only for Xen */ |
617d34d9 JF |
198 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
199 | spin_lock(pgt_lock); | |
200 | ||
44235dcd JF |
201 | if (pgd_none(*pgd)) |
202 | set_pgd(pgd, *pgd_ref); | |
203 | else | |
204 | BUG_ON(pgd_page_vaddr(*pgd) | |
205 | != pgd_page_vaddr(*pgd_ref)); | |
617d34d9 JF |
206 | |
207 | spin_unlock(pgt_lock); | |
44235dcd | 208 | } |
a79e53d8 | 209 | spin_unlock(&pgd_lock); |
44235dcd | 210 | } |
6afb5157 HL |
211 | } |
212 | ||
8d6ea967 MS |
213 | /* |
214 | * NOTE: This function is marked __ref because it calls __init function | |
215 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | |
216 | */ | |
217 | static __ref void *spp_getpage(void) | |
14a62c34 | 218 | { |
1da177e4 | 219 | void *ptr; |
14a62c34 | 220 | |
1da177e4 | 221 | if (after_bootmem) |
9e730237 | 222 | ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); |
1da177e4 LT |
223 | else |
224 | ptr = alloc_bootmem_pages(PAGE_SIZE); | |
14a62c34 TG |
225 | |
226 | if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { | |
227 | panic("set_pte_phys: cannot allocate page data %s\n", | |
228 | after_bootmem ? "after bootmem" : ""); | |
229 | } | |
1da177e4 | 230 | |
10f22dde | 231 | pr_debug("spp_getpage %p\n", ptr); |
14a62c34 | 232 | |
1da177e4 | 233 | return ptr; |
14a62c34 | 234 | } |
1da177e4 | 235 | |
f254f390 | 236 | static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) |
1da177e4 | 237 | { |
458a3e64 TH |
238 | if (pgd_none(*pgd)) { |
239 | pud_t *pud = (pud_t *)spp_getpage(); | |
240 | pgd_populate(&init_mm, pgd, pud); | |
241 | if (pud != pud_offset(pgd, 0)) | |
242 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", | |
243 | pud, pud_offset(pgd, 0)); | |
244 | } | |
245 | return pud_offset(pgd, vaddr); | |
246 | } | |
1da177e4 | 247 | |
f254f390 | 248 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
458a3e64 | 249 | { |
1da177e4 | 250 | if (pud_none(*pud)) { |
458a3e64 | 251 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
bb23e403 | 252 | pud_populate(&init_mm, pud, pmd); |
458a3e64 | 253 | if (pmd != pmd_offset(pud, 0)) |
10f22dde | 254 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
458a3e64 | 255 | pmd, pmd_offset(pud, 0)); |
1da177e4 | 256 | } |
458a3e64 TH |
257 | return pmd_offset(pud, vaddr); |
258 | } | |
259 | ||
f254f390 | 260 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) |
458a3e64 | 261 | { |
1da177e4 | 262 | if (pmd_none(*pmd)) { |
458a3e64 | 263 | pte_t *pte = (pte_t *) spp_getpage(); |
bb23e403 | 264 | pmd_populate_kernel(&init_mm, pmd, pte); |
458a3e64 | 265 | if (pte != pte_offset_kernel(pmd, 0)) |
10f22dde | 266 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
1da177e4 | 267 | } |
458a3e64 TH |
268 | return pte_offset_kernel(pmd, vaddr); |
269 | } | |
270 | ||
271 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |
272 | { | |
273 | pud_t *pud; | |
274 | pmd_t *pmd; | |
275 | pte_t *pte; | |
276 | ||
277 | pud = pud_page + pud_index(vaddr); | |
278 | pmd = fill_pmd(pud, vaddr); | |
279 | pte = fill_pte(pmd, vaddr); | |
1da177e4 | 280 | |
1da177e4 LT |
281 | set_pte(pte, new_pte); |
282 | ||
283 | /* | |
284 | * It's enough to flush this one mapping. | |
285 | * (PGE mappings get flushed as well) | |
286 | */ | |
287 | __flush_tlb_one(vaddr); | |
288 | } | |
289 | ||
458a3e64 | 290 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
0814e0ba EH |
291 | { |
292 | pgd_t *pgd; | |
293 | pud_t *pud_page; | |
294 | ||
295 | pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); | |
296 | ||
297 | pgd = pgd_offset_k(vaddr); | |
298 | if (pgd_none(*pgd)) { | |
299 | printk(KERN_ERR | |
300 | "PGD FIXMAP MISSING, it should be setup in head.S!\n"); | |
301 | return; | |
302 | } | |
303 | pud_page = (pud_t*)pgd_page_vaddr(*pgd); | |
304 | set_pte_vaddr_pud(pud_page, vaddr, pteval); | |
305 | } | |
306 | ||
458a3e64 | 307 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) |
11124411 TH |
308 | { |
309 | pgd_t *pgd; | |
310 | pud_t *pud; | |
311 | ||
312 | pgd = pgd_offset_k(vaddr); | |
458a3e64 TH |
313 | pud = fill_pud(pgd, vaddr); |
314 | return fill_pmd(pud, vaddr); | |
315 | } | |
316 | ||
317 | pte_t * __init populate_extra_pte(unsigned long vaddr) | |
318 | { | |
319 | pmd_t *pmd; | |
11124411 | 320 | |
458a3e64 TH |
321 | pmd = populate_extra_pmd(vaddr); |
322 | return fill_pte(pmd, vaddr); | |
11124411 TH |
323 | } |
324 | ||
3a9e189d JS |
325 | /* |
326 | * Create large page table mappings for a range of physical addresses. | |
327 | */ | |
328 | static void __init __init_extra_mapping(unsigned long phys, unsigned long size, | |
329 | pgprot_t prot) | |
330 | { | |
331 | pgd_t *pgd; | |
332 | pud_t *pud; | |
333 | pmd_t *pmd; | |
334 | ||
335 | BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); | |
336 | for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { | |
337 | pgd = pgd_offset_k((unsigned long)__va(phys)); | |
338 | if (pgd_none(*pgd)) { | |
339 | pud = (pud_t *) spp_getpage(); | |
340 | set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | | |
341 | _PAGE_USER)); | |
342 | } | |
343 | pud = pud_offset(pgd, (unsigned long)__va(phys)); | |
344 | if (pud_none(*pud)) { | |
345 | pmd = (pmd_t *) spp_getpage(); | |
346 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | | |
347 | _PAGE_USER)); | |
348 | } | |
349 | pmd = pmd_offset(pud, phys); | |
350 | BUG_ON(!pmd_none(*pmd)); | |
351 | set_pmd(pmd, __pmd(phys | pgprot_val(prot))); | |
352 | } | |
353 | } | |
354 | ||
355 | void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) | |
356 | { | |
357 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); | |
358 | } | |
359 | ||
360 | void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |
361 | { | |
362 | __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); | |
363 | } | |
364 | ||
31eedd82 | 365 | /* |
88f3aec7 IM |
366 | * The head.S code sets up the kernel high mapping: |
367 | * | |
368 | * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) | |
31eedd82 TG |
369 | * |
370 | * phys_addr holds the negative offset to the kernel, which is added | |
371 | * to the compile time generated pmds. This results in invalid pmds up | |
372 | * to the point where we hit the physaddr 0 mapping. | |
373 | * | |
e5f15b45 YL |
374 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
375 | * is rounded up to the 2MB boundary. This catches the invalid pmds as | |
31eedd82 TG |
376 | * well, as they are located before _text: |
377 | */ | |
378 | void __init cleanup_highmap(void) | |
379 | { | |
380 | unsigned long vaddr = __START_KERNEL_map; | |
10054230 | 381 | unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; |
e5f15b45 | 382 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; |
31eedd82 | 383 | pmd_t *pmd = level2_kernel_pgt; |
31eedd82 | 384 | |
10054230 YL |
385 | /* |
386 | * Native path, max_pfn_mapped is not set yet. | |
387 | * Xen has valid max_pfn_mapped set in | |
388 | * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). | |
389 | */ | |
390 | if (max_pfn_mapped) | |
391 | vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); | |
392 | ||
e5f15b45 | 393 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
2884f110 | 394 | if (pmd_none(*pmd)) |
31eedd82 TG |
395 | continue; |
396 | if (vaddr < (unsigned long) _text || vaddr > end) | |
397 | set_pmd(pmd, __pmd(0)); | |
398 | } | |
399 | } | |
400 | ||
7b16eb89 | 401 | static unsigned long __meminit |
b27a43c1 SS |
402 | phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, |
403 | pgprot_t prot) | |
4f9c11dd | 404 | { |
eceb3632 | 405 | unsigned long pages = 0, next; |
7b16eb89 | 406 | unsigned long last_map_addr = end; |
4f9c11dd | 407 | int i; |
7b16eb89 | 408 | |
4f9c11dd JF |
409 | pte_t *pte = pte_page + pte_index(addr); |
410 | ||
eceb3632 YL |
411 | for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) { |
412 | next = (addr & PAGE_MASK) + PAGE_SIZE; | |
4f9c11dd | 413 | if (addr >= end) { |
eceb3632 YL |
414 | if (!after_bootmem && |
415 | !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) && | |
416 | !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN)) | |
417 | set_pte(pte, __pte(0)); | |
418 | continue; | |
4f9c11dd JF |
419 | } |
420 | ||
b27a43c1 SS |
421 | /* |
422 | * We will re-use the existing mapping. | |
423 | * Xen for example has some special requirements, like mapping | |
424 | * pagetable pages as RO. So assume someone who pre-setup | |
425 | * these mappings are more intelligent. | |
426 | */ | |
3afa3949 | 427 | if (pte_val(*pte)) { |
876ee61a JB |
428 | if (!after_bootmem) |
429 | pages++; | |
4f9c11dd | 430 | continue; |
3afa3949 | 431 | } |
4f9c11dd JF |
432 | |
433 | if (0) | |
434 | printk(" pte=%p addr=%lx pte=%016lx\n", | |
435 | pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); | |
4f9c11dd | 436 | pages++; |
b27a43c1 | 437 | set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); |
7b16eb89 | 438 | last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; |
4f9c11dd | 439 | } |
a2699e47 | 440 | |
4f9c11dd | 441 | update_page_count(PG_LEVEL_4K, pages); |
7b16eb89 YL |
442 | |
443 | return last_map_addr; | |
4f9c11dd JF |
444 | } |
445 | ||
cc615032 | 446 | static unsigned long __meminit |
b50efd2a | 447 | phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, |
b27a43c1 | 448 | unsigned long page_size_mask, pgprot_t prot) |
44df75e6 | 449 | { |
20167d34 | 450 | unsigned long pages = 0, next; |
7b16eb89 | 451 | unsigned long last_map_addr = end; |
ce0c0e50 | 452 | |
6ad91658 | 453 | int i = pmd_index(address); |
44df75e6 | 454 | |
20167d34 | 455 | for (; i < PTRS_PER_PMD; i++, address = next) { |
6ad91658 | 456 | pmd_t *pmd = pmd_page + pmd_index(address); |
4f9c11dd | 457 | pte_t *pte; |
b27a43c1 | 458 | pgprot_t new_prot = prot; |
44df75e6 | 459 | |
eceb3632 | 460 | next = (address & PMD_MASK) + PMD_SIZE; |
5f51e139 | 461 | if (address >= end) { |
eceb3632 YL |
462 | if (!after_bootmem && |
463 | !e820_any_mapped(address & PMD_MASK, next, E820_RAM) && | |
464 | !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN)) | |
465 | set_pmd(pmd, __pmd(0)); | |
466 | continue; | |
44df75e6 | 467 | } |
6ad91658 | 468 | |
4f9c11dd | 469 | if (pmd_val(*pmd)) { |
8ae3a5a8 JB |
470 | if (!pmd_large(*pmd)) { |
471 | spin_lock(&init_mm.page_table_lock); | |
973dc4f3 | 472 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
4b239f45 | 473 | last_map_addr = phys_pte_init(pte, address, |
b27a43c1 | 474 | end, prot); |
8ae3a5a8 | 475 | spin_unlock(&init_mm.page_table_lock); |
a2699e47 | 476 | continue; |
8ae3a5a8 | 477 | } |
b27a43c1 SS |
478 | /* |
479 | * If we are ok with PG_LEVEL_2M mapping, then we will | |
480 | * use the existing mapping, | |
481 | * | |
482 | * Otherwise, we will split the large page mapping but | |
483 | * use the same existing protection bits except for | |
484 | * large page, so that we don't violate Intel's TLB | |
485 | * Application note (317080) which says, while changing | |
486 | * the page sizes, new and old translations should | |
487 | * not differ with respect to page frame and | |
488 | * attributes. | |
489 | */ | |
3afa3949 | 490 | if (page_size_mask & (1 << PG_LEVEL_2M)) { |
876ee61a JB |
491 | if (!after_bootmem) |
492 | pages++; | |
20167d34 | 493 | last_map_addr = next; |
b27a43c1 | 494 | continue; |
3afa3949 | 495 | } |
b27a43c1 | 496 | new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); |
4f9c11dd JF |
497 | } |
498 | ||
b50efd2a | 499 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
4f9c11dd | 500 | pages++; |
8ae3a5a8 | 501 | spin_lock(&init_mm.page_table_lock); |
4f9c11dd | 502 | set_pte((pte_t *)pmd, |
960ddb4f | 503 | pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, |
b27a43c1 | 504 | __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
8ae3a5a8 | 505 | spin_unlock(&init_mm.page_table_lock); |
20167d34 | 506 | last_map_addr = next; |
6ad91658 | 507 | continue; |
4f9c11dd | 508 | } |
6ad91658 | 509 | |
868bf4d6 | 510 | pte = alloc_low_page(); |
b27a43c1 | 511 | last_map_addr = phys_pte_init(pte, address, end, new_prot); |
4f9c11dd | 512 | |
8ae3a5a8 | 513 | spin_lock(&init_mm.page_table_lock); |
868bf4d6 | 514 | pmd_populate_kernel(&init_mm, pmd, pte); |
8ae3a5a8 | 515 | spin_unlock(&init_mm.page_table_lock); |
44df75e6 | 516 | } |
ce0c0e50 | 517 | update_page_count(PG_LEVEL_2M, pages); |
7b16eb89 | 518 | return last_map_addr; |
44df75e6 MT |
519 | } |
520 | ||
cc615032 | 521 | static unsigned long __meminit |
b50efd2a YL |
522 | phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, |
523 | unsigned long page_size_mask) | |
14a62c34 | 524 | { |
20167d34 | 525 | unsigned long pages = 0, next; |
cc615032 | 526 | unsigned long last_map_addr = end; |
6ad91658 | 527 | int i = pud_index(addr); |
44df75e6 | 528 | |
20167d34 | 529 | for (; i < PTRS_PER_PUD; i++, addr = next) { |
6ad91658 | 530 | pud_t *pud = pud_page + pud_index(addr); |
1da177e4 | 531 | pmd_t *pmd; |
b27a43c1 | 532 | pgprot_t prot = PAGE_KERNEL; |
1da177e4 | 533 | |
20167d34 | 534 | next = (addr & PUD_MASK) + PUD_SIZE; |
eceb3632 YL |
535 | if (addr >= end) { |
536 | if (!after_bootmem && | |
537 | !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) && | |
538 | !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN)) | |
539 | set_pud(pud, __pud(0)); | |
1da177e4 | 540 | continue; |
14a62c34 | 541 | } |
1da177e4 | 542 | |
6ad91658 | 543 | if (pud_val(*pud)) { |
a2699e47 | 544 | if (!pud_large(*pud)) { |
973dc4f3 | 545 | pmd = pmd_offset(pud, 0); |
4b239f45 | 546 | last_map_addr = phys_pmd_init(pmd, addr, end, |
b27a43c1 | 547 | page_size_mask, prot); |
4b239f45 | 548 | __flush_tlb_all(); |
a2699e47 SS |
549 | continue; |
550 | } | |
b27a43c1 SS |
551 | /* |
552 | * If we are ok with PG_LEVEL_1G mapping, then we will | |
553 | * use the existing mapping. | |
554 | * | |
555 | * Otherwise, we will split the gbpage mapping but use | |
556 | * the same existing protection bits except for large | |
557 | * page, so that we don't violate Intel's TLB | |
558 | * Application note (317080) which says, while changing | |
559 | * the page sizes, new and old translations should | |
560 | * not differ with respect to page frame and | |
561 | * attributes. | |
562 | */ | |
3afa3949 | 563 | if (page_size_mask & (1 << PG_LEVEL_1G)) { |
876ee61a JB |
564 | if (!after_bootmem) |
565 | pages++; | |
20167d34 | 566 | last_map_addr = next; |
b27a43c1 | 567 | continue; |
3afa3949 | 568 | } |
b27a43c1 | 569 | prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); |
ef925766 AK |
570 | } |
571 | ||
b50efd2a | 572 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
ce0c0e50 | 573 | pages++; |
8ae3a5a8 | 574 | spin_lock(&init_mm.page_table_lock); |
ef925766 | 575 | set_pte((pte_t *)pud, |
960ddb4f YL |
576 | pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, |
577 | PAGE_KERNEL_LARGE)); | |
8ae3a5a8 | 578 | spin_unlock(&init_mm.page_table_lock); |
20167d34 | 579 | last_map_addr = next; |
6ad91658 KM |
580 | continue; |
581 | } | |
582 | ||
868bf4d6 | 583 | pmd = alloc_low_page(); |
b27a43c1 SS |
584 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, |
585 | prot); | |
8ae3a5a8 JB |
586 | |
587 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 588 | pud_populate(&init_mm, pud, pmd); |
44df75e6 | 589 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 | 590 | } |
1a2b4412 | 591 | __flush_tlb_all(); |
a2699e47 | 592 | |
ce0c0e50 | 593 | update_page_count(PG_LEVEL_1G, pages); |
cc615032 | 594 | |
1a0db38e | 595 | return last_map_addr; |
14a62c34 | 596 | } |
1da177e4 | 597 | |
41d840e2 | 598 | unsigned long __meminit |
f765090a PE |
599 | kernel_physical_mapping_init(unsigned long start, |
600 | unsigned long end, | |
601 | unsigned long page_size_mask) | |
14a62c34 | 602 | { |
9b861528 | 603 | bool pgd_changed = false; |
b50efd2a | 604 | unsigned long next, last_map_addr = end; |
9b861528 | 605 | unsigned long addr; |
1da177e4 LT |
606 | |
607 | start = (unsigned long)__va(start); | |
608 | end = (unsigned long)__va(end); | |
1c5f50ee | 609 | addr = start; |
1da177e4 LT |
610 | |
611 | for (; start < end; start = next) { | |
44df75e6 MT |
612 | pgd_t *pgd = pgd_offset_k(start); |
613 | pud_t *pud; | |
614 | ||
c2bdee59 | 615 | next = (start & PGDIR_MASK) + PGDIR_SIZE; |
4f9c11dd JF |
616 | |
617 | if (pgd_val(*pgd)) { | |
973dc4f3 | 618 | pud = (pud_t *)pgd_page_vaddr(*pgd); |
4b239f45 | 619 | last_map_addr = phys_pud_init(pud, __pa(start), |
b50efd2a | 620 | __pa(end), page_size_mask); |
4f9c11dd JF |
621 | continue; |
622 | } | |
623 | ||
868bf4d6 | 624 | pud = alloc_low_page(); |
c2bdee59 | 625 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), |
b50efd2a | 626 | page_size_mask); |
8ae3a5a8 JB |
627 | |
628 | spin_lock(&init_mm.page_table_lock); | |
868bf4d6 | 629 | pgd_populate(&init_mm, pgd, pud); |
8ae3a5a8 | 630 | spin_unlock(&init_mm.page_table_lock); |
9b861528 | 631 | pgd_changed = true; |
14a62c34 | 632 | } |
9b861528 HL |
633 | |
634 | if (pgd_changed) | |
f73568a0 | 635 | sync_global_pgds(addr, end - 1); |
9b861528 | 636 | |
a2699e47 | 637 | __flush_tlb_all(); |
1da177e4 | 638 | |
b50efd2a YL |
639 | return last_map_addr; |
640 | } | |
7b16eb89 | 641 | |
2b97690f | 642 | #ifndef CONFIG_NUMA |
d8fc3afc | 643 | void __init initmem_init(void) |
1f75d7e3 | 644 | { |
0608f70c | 645 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); |
1f75d7e3 | 646 | } |
3551f88f | 647 | #endif |
1f75d7e3 | 648 | |
1da177e4 LT |
649 | void __init paging_init(void) |
650 | { | |
3551f88f | 651 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
44df75e6 | 652 | sparse_init(); |
44b57280 YL |
653 | |
654 | /* | |
655 | * clear the default setting with node 0 | |
656 | * note: don't use nodes_clear here, that is really clearing when | |
657 | * numa support is not compiled in, and later node_set_state | |
658 | * will not set it back. | |
659 | */ | |
4b0ef1fe LJ |
660 | node_clear_state(0, N_MEMORY); |
661 | if (N_MEMORY != N_NORMAL_MEMORY) | |
662 | node_clear_state(0, N_NORMAL_MEMORY); | |
44b57280 | 663 | |
4c0b2e5f | 664 | zone_sizes_init(); |
1da177e4 | 665 | } |
1da177e4 | 666 | |
44df75e6 MT |
667 | /* |
668 | * Memory hotplug specific functions | |
44df75e6 | 669 | */ |
bc02af93 | 670 | #ifdef CONFIG_MEMORY_HOTPLUG |
ea085417 SZ |
671 | /* |
672 | * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need | |
673 | * updating. | |
674 | */ | |
675 | static void update_end_of_memory_vars(u64 start, u64 size) | |
676 | { | |
677 | unsigned long end_pfn = PFN_UP(start + size); | |
678 | ||
679 | if (end_pfn > max_pfn) { | |
680 | max_pfn = end_pfn; | |
681 | max_low_pfn = end_pfn; | |
682 | high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; | |
683 | } | |
684 | } | |
685 | ||
9d99aaa3 AK |
686 | /* |
687 | * Memory is added always to NORMAL zone. This means you will never get | |
688 | * additional DMA/DMA32 memory. | |
689 | */ | |
bc02af93 | 690 | int arch_add_memory(int nid, u64 start, u64 size) |
44df75e6 | 691 | { |
bc02af93 | 692 | struct pglist_data *pgdat = NODE_DATA(nid); |
776ed98b | 693 | struct zone *zone = pgdat->node_zones + ZONE_NORMAL; |
66520ebc | 694 | unsigned long start_pfn = start >> PAGE_SHIFT; |
44df75e6 MT |
695 | unsigned long nr_pages = size >> PAGE_SHIFT; |
696 | int ret; | |
697 | ||
66520ebc | 698 | init_memory_mapping(start, start + size); |
45e0b78b | 699 | |
c04fc586 | 700 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
fe8b868e | 701 | WARN_ON_ONCE(ret); |
44df75e6 | 702 | |
ea085417 SZ |
703 | /* update max_pfn, max_low_pfn and high_memory */ |
704 | update_end_of_memory_vars(start, size); | |
705 | ||
44df75e6 | 706 | return ret; |
44df75e6 | 707 | } |
bc02af93 | 708 | EXPORT_SYMBOL_GPL(arch_add_memory); |
44df75e6 | 709 | |
ae9aae9e WC |
710 | #define PAGE_INUSE 0xFD |
711 | ||
712 | static void __meminit free_pagetable(struct page *page, int order) | |
713 | { | |
714 | struct zone *zone; | |
715 | bool bootmem = false; | |
716 | unsigned long magic; | |
717 | unsigned int nr_pages = 1 << order; | |
718 | ||
719 | /* bootmem page has reserved flag */ | |
720 | if (PageReserved(page)) { | |
721 | __ClearPageReserved(page); | |
722 | bootmem = true; | |
723 | ||
724 | magic = (unsigned long)page->lru.next; | |
725 | if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { | |
726 | while (nr_pages--) | |
727 | put_page_bootmem(page++); | |
728 | } else | |
729 | __free_pages_bootmem(page, order); | |
730 | } else | |
731 | free_pages((unsigned long)page_address(page), order); | |
732 | ||
733 | /* | |
734 | * SECTION_INFO pages and MIX_SECTION_INFO pages | |
735 | * are all allocated by bootmem. | |
736 | */ | |
737 | if (bootmem) { | |
738 | zone = page_zone(page); | |
739 | zone_span_writelock(zone); | |
740 | zone->present_pages += nr_pages; | |
741 | zone_span_writeunlock(zone); | |
742 | totalram_pages += nr_pages; | |
743 | } | |
744 | } | |
745 | ||
746 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | |
747 | { | |
748 | pte_t *pte; | |
749 | int i; | |
750 | ||
751 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
752 | pte = pte_start + i; | |
753 | if (pte_val(*pte)) | |
754 | return; | |
755 | } | |
756 | ||
757 | /* free a pte talbe */ | |
758 | free_pagetable(pmd_page(*pmd), 0); | |
759 | spin_lock(&init_mm.page_table_lock); | |
760 | pmd_clear(pmd); | |
761 | spin_unlock(&init_mm.page_table_lock); | |
762 | } | |
763 | ||
764 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) | |
765 | { | |
766 | pmd_t *pmd; | |
767 | int i; | |
768 | ||
769 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
770 | pmd = pmd_start + i; | |
771 | if (pmd_val(*pmd)) | |
772 | return; | |
773 | } | |
774 | ||
775 | /* free a pmd talbe */ | |
776 | free_pagetable(pud_page(*pud), 0); | |
777 | spin_lock(&init_mm.page_table_lock); | |
778 | pud_clear(pud); | |
779 | spin_unlock(&init_mm.page_table_lock); | |
780 | } | |
781 | ||
782 | /* Return true if pgd is changed, otherwise return false. */ | |
783 | static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd) | |
784 | { | |
785 | pud_t *pud; | |
786 | int i; | |
787 | ||
788 | for (i = 0; i < PTRS_PER_PUD; i++) { | |
789 | pud = pud_start + i; | |
790 | if (pud_val(*pud)) | |
791 | return false; | |
792 | } | |
793 | ||
794 | /* free a pud table */ | |
795 | free_pagetable(pgd_page(*pgd), 0); | |
796 | spin_lock(&init_mm.page_table_lock); | |
797 | pgd_clear(pgd); | |
798 | spin_unlock(&init_mm.page_table_lock); | |
799 | ||
800 | return true; | |
801 | } | |
802 | ||
803 | static void __meminit | |
804 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |
805 | bool direct) | |
806 | { | |
807 | unsigned long next, pages = 0; | |
808 | pte_t *pte; | |
809 | void *page_addr; | |
810 | phys_addr_t phys_addr; | |
811 | ||
812 | pte = pte_start + pte_index(addr); | |
813 | for (; addr < end; addr = next, pte++) { | |
814 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
815 | if (next > end) | |
816 | next = end; | |
817 | ||
818 | if (!pte_present(*pte)) | |
819 | continue; | |
820 | ||
821 | /* | |
822 | * We mapped [0,1G) memory as identity mapping when | |
823 | * initializing, in arch/x86/kernel/head_64.S. These | |
824 | * pagetables cannot be removed. | |
825 | */ | |
826 | phys_addr = pte_val(*pte) + (addr & PAGE_MASK); | |
827 | if (phys_addr < (phys_addr_t)0x40000000) | |
828 | return; | |
829 | ||
830 | if (IS_ALIGNED(addr, PAGE_SIZE) && | |
831 | IS_ALIGNED(next, PAGE_SIZE)) { | |
832 | /* | |
833 | * Do not free direct mapping pages since they were | |
834 | * freed when offlining, or simplely not in use. | |
835 | */ | |
836 | if (!direct) | |
837 | free_pagetable(pte_page(*pte), 0); | |
838 | ||
839 | spin_lock(&init_mm.page_table_lock); | |
840 | pte_clear(&init_mm, addr, pte); | |
841 | spin_unlock(&init_mm.page_table_lock); | |
842 | ||
843 | /* For non-direct mapping, pages means nothing. */ | |
844 | pages++; | |
845 | } else { | |
846 | /* | |
847 | * If we are here, we are freeing vmemmap pages since | |
848 | * direct mapped memory ranges to be freed are aligned. | |
849 | * | |
850 | * If we are not removing the whole page, it means | |
851 | * other page structs in this page are being used and | |
852 | * we canot remove them. So fill the unused page_structs | |
853 | * with 0xFD, and remove the page when it is wholly | |
854 | * filled with 0xFD. | |
855 | */ | |
856 | memset((void *)addr, PAGE_INUSE, next - addr); | |
857 | ||
858 | page_addr = page_address(pte_page(*pte)); | |
859 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { | |
860 | free_pagetable(pte_page(*pte), 0); | |
861 | ||
862 | spin_lock(&init_mm.page_table_lock); | |
863 | pte_clear(&init_mm, addr, pte); | |
864 | spin_unlock(&init_mm.page_table_lock); | |
865 | } | |
866 | } | |
867 | } | |
868 | ||
869 | /* Call free_pte_table() in remove_pmd_table(). */ | |
870 | flush_tlb_all(); | |
871 | if (direct) | |
872 | update_page_count(PG_LEVEL_4K, -pages); | |
873 | } | |
874 | ||
875 | static void __meminit | |
876 | remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |
877 | bool direct) | |
878 | { | |
879 | unsigned long next, pages = 0; | |
880 | pte_t *pte_base; | |
881 | pmd_t *pmd; | |
882 | void *page_addr; | |
883 | ||
884 | pmd = pmd_start + pmd_index(addr); | |
885 | for (; addr < end; addr = next, pmd++) { | |
886 | next = pmd_addr_end(addr, end); | |
887 | ||
888 | if (!pmd_present(*pmd)) | |
889 | continue; | |
890 | ||
891 | if (pmd_large(*pmd)) { | |
892 | if (IS_ALIGNED(addr, PMD_SIZE) && | |
893 | IS_ALIGNED(next, PMD_SIZE)) { | |
894 | if (!direct) | |
895 | free_pagetable(pmd_page(*pmd), | |
896 | get_order(PMD_SIZE)); | |
897 | ||
898 | spin_lock(&init_mm.page_table_lock); | |
899 | pmd_clear(pmd); | |
900 | spin_unlock(&init_mm.page_table_lock); | |
901 | pages++; | |
902 | } else { | |
903 | /* If here, we are freeing vmemmap pages. */ | |
904 | memset((void *)addr, PAGE_INUSE, next - addr); | |
905 | ||
906 | page_addr = page_address(pmd_page(*pmd)); | |
907 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
908 | PMD_SIZE)) { | |
909 | free_pagetable(pmd_page(*pmd), | |
910 | get_order(PMD_SIZE)); | |
911 | ||
912 | spin_lock(&init_mm.page_table_lock); | |
913 | pmd_clear(pmd); | |
914 | spin_unlock(&init_mm.page_table_lock); | |
915 | } | |
916 | } | |
917 | ||
918 | continue; | |
919 | } | |
920 | ||
921 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | |
922 | remove_pte_table(pte_base, addr, next, direct); | |
923 | free_pte_table(pte_base, pmd); | |
924 | } | |
925 | ||
926 | /* Call free_pmd_table() in remove_pud_table(). */ | |
927 | if (direct) | |
928 | update_page_count(PG_LEVEL_2M, -pages); | |
929 | } | |
930 | ||
931 | static void __meminit | |
932 | remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |
933 | bool direct) | |
934 | { | |
935 | unsigned long next, pages = 0; | |
936 | pmd_t *pmd_base; | |
937 | pud_t *pud; | |
938 | void *page_addr; | |
939 | ||
940 | pud = pud_start + pud_index(addr); | |
941 | for (; addr < end; addr = next, pud++) { | |
942 | next = pud_addr_end(addr, end); | |
943 | ||
944 | if (!pud_present(*pud)) | |
945 | continue; | |
946 | ||
947 | if (pud_large(*pud)) { | |
948 | if (IS_ALIGNED(addr, PUD_SIZE) && | |
949 | IS_ALIGNED(next, PUD_SIZE)) { | |
950 | if (!direct) | |
951 | free_pagetable(pud_page(*pud), | |
952 | get_order(PUD_SIZE)); | |
953 | ||
954 | spin_lock(&init_mm.page_table_lock); | |
955 | pud_clear(pud); | |
956 | spin_unlock(&init_mm.page_table_lock); | |
957 | pages++; | |
958 | } else { | |
959 | /* If here, we are freeing vmemmap pages. */ | |
960 | memset((void *)addr, PAGE_INUSE, next - addr); | |
961 | ||
962 | page_addr = page_address(pud_page(*pud)); | |
963 | if (!memchr_inv(page_addr, PAGE_INUSE, | |
964 | PUD_SIZE)) { | |
965 | free_pagetable(pud_page(*pud), | |
966 | get_order(PUD_SIZE)); | |
967 | ||
968 | spin_lock(&init_mm.page_table_lock); | |
969 | pud_clear(pud); | |
970 | spin_unlock(&init_mm.page_table_lock); | |
971 | } | |
972 | } | |
973 | ||
974 | continue; | |
975 | } | |
976 | ||
977 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); | |
978 | remove_pmd_table(pmd_base, addr, next, direct); | |
979 | free_pmd_table(pmd_base, pud); | |
980 | } | |
981 | ||
982 | if (direct) | |
983 | update_page_count(PG_LEVEL_1G, -pages); | |
984 | } | |
985 | ||
986 | /* start and end are both virtual address. */ | |
987 | static void __meminit | |
988 | remove_pagetable(unsigned long start, unsigned long end, bool direct) | |
989 | { | |
990 | unsigned long next; | |
991 | pgd_t *pgd; | |
992 | pud_t *pud; | |
993 | bool pgd_changed = false; | |
994 | ||
995 | for (; start < end; start = next) { | |
996 | next = pgd_addr_end(start, end); | |
997 | ||
998 | pgd = pgd_offset_k(start); | |
999 | if (!pgd_present(*pgd)) | |
1000 | continue; | |
1001 | ||
1002 | pud = (pud_t *)pgd_page_vaddr(*pgd); | |
1003 | remove_pud_table(pud, start, next, direct); | |
1004 | if (free_pud_table(pud, pgd)) | |
1005 | pgd_changed = true; | |
1006 | } | |
1007 | ||
1008 | if (pgd_changed) | |
1009 | sync_global_pgds(start, end - 1); | |
1010 | ||
1011 | flush_tlb_all(); | |
1012 | } | |
1013 | ||
24d335ca WC |
1014 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1015 | int __ref arch_remove_memory(u64 start, u64 size) | |
1016 | { | |
1017 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
1018 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
1019 | struct zone *zone; | |
1020 | int ret; | |
1021 | ||
1022 | zone = page_zone(pfn_to_page(start_pfn)); | |
1023 | ret = __remove_pages(zone, start_pfn, nr_pages); | |
1024 | WARN_ON_ONCE(ret); | |
1025 | ||
1026 | return ret; | |
1027 | } | |
1028 | #endif | |
45e0b78b KM |
1029 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1030 | ||
81ac3ad9 | 1031 | static struct kcore_list kcore_vsyscall; |
1da177e4 | 1032 | |
94b43c3d YL |
1033 | static void __init register_page_bootmem_info(void) |
1034 | { | |
1035 | #ifdef CONFIG_NUMA | |
1036 | int i; | |
1037 | ||
1038 | for_each_online_node(i) | |
1039 | register_page_bootmem_info_node(NODE_DATA(i)); | |
1040 | #endif | |
1041 | } | |
1042 | ||
1da177e4 LT |
1043 | void __init mem_init(void) |
1044 | { | |
0a43e4bf | 1045 | long codesize, reservedpages, datasize, initsize; |
11a6b0c9 | 1046 | unsigned long absent_pages; |
1da177e4 | 1047 | |
0dc243ae | 1048 | pci_iommu_alloc(); |
1da177e4 | 1049 | |
48ddb154 | 1050 | /* clear_bss() already clear the empty_zero_page */ |
1da177e4 LT |
1051 | |
1052 | reservedpages = 0; | |
1053 | ||
1054 | /* this will put all low memory onto the freelists */ | |
94b43c3d | 1055 | register_page_bootmem_info(); |
0a43e4bf | 1056 | totalram_pages = free_all_bootmem(); |
11a6b0c9 YL |
1057 | |
1058 | absent_pages = absent_pages_in_range(0, max_pfn); | |
1059 | reservedpages = max_pfn - totalram_pages - absent_pages; | |
1da177e4 LT |
1060 | after_bootmem = 1; |
1061 | ||
1062 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
1063 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
1064 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
1065 | ||
1066 | /* Register memory areas for /proc/kcore */ | |
14a62c34 | 1067 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, |
c30bb2a2 | 1068 | VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); |
1da177e4 | 1069 | |
10f22dde | 1070 | printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " |
11a6b0c9 | 1071 | "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", |
cc013a88 | 1072 | nr_free_pages() << (PAGE_SHIFT-10), |
c987d12f | 1073 | max_pfn << (PAGE_SHIFT-10), |
1da177e4 | 1074 | codesize >> 10, |
11a6b0c9 | 1075 | absent_pages << (PAGE_SHIFT-10), |
1da177e4 LT |
1076 | reservedpages << (PAGE_SHIFT-10), |
1077 | datasize >> 10, | |
1078 | initsize >> 10); | |
1da177e4 LT |
1079 | } |
1080 | ||
67df197b | 1081 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
1082 | const int rodata_test_data = 0xC3; |
1083 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
67df197b | 1084 | |
502f6604 | 1085 | int kernel_set_to_readonly; |
16239630 SR |
1086 | |
1087 | void set_kernel_text_rw(void) | |
1088 | { | |
b9af7c0d | 1089 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1090 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1091 | |
1092 | if (!kernel_set_to_readonly) | |
1093 | return; | |
1094 | ||
1095 | pr_debug("Set kernel text: %lx - %lx for read write\n", | |
1096 | start, end); | |
1097 | ||
e7d23dde SS |
1098 | /* |
1099 | * Make the kernel identity mapping for text RW. Kernel text | |
1100 | * mapping will always be RO. Refer to the comment in | |
1101 | * static_protections() in pageattr.c | |
1102 | */ | |
16239630 SR |
1103 | set_memory_rw(start, (end - start) >> PAGE_SHIFT); |
1104 | } | |
1105 | ||
1106 | void set_kernel_text_ro(void) | |
1107 | { | |
b9af7c0d | 1108 | unsigned long start = PFN_ALIGN(_text); |
e7d23dde | 1109 | unsigned long end = PFN_ALIGN(__stop___ex_table); |
16239630 SR |
1110 | |
1111 | if (!kernel_set_to_readonly) | |
1112 | return; | |
1113 | ||
1114 | pr_debug("Set kernel text: %lx - %lx for read only\n", | |
1115 | start, end); | |
1116 | ||
e7d23dde SS |
1117 | /* |
1118 | * Set the kernel identity mapping for text RO. | |
1119 | */ | |
16239630 SR |
1120 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1121 | } | |
1122 | ||
67df197b AV |
1123 | void mark_rodata_ro(void) |
1124 | { | |
74e08179 | 1125 | unsigned long start = PFN_ALIGN(_text); |
fc8d7826 | 1126 | unsigned long rodata_start = PFN_ALIGN(__start_rodata); |
74e08179 | 1127 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
fc8d7826 AD |
1128 | unsigned long text_end = PFN_ALIGN(&__stop___ex_table); |
1129 | unsigned long rodata_end = PFN_ALIGN(&__end_rodata); | |
72212675 | 1130 | unsigned long all_end = PFN_ALIGN(&_end); |
8f0f996e | 1131 | |
6fb14755 | 1132 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
e3ebadd9 | 1133 | (end - start) >> 10); |
984bb80d AV |
1134 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
1135 | ||
16239630 SR |
1136 | kernel_set_to_readonly = 1; |
1137 | ||
984bb80d | 1138 | /* |
72212675 YL |
1139 | * The rodata/data/bss/brk section (but not the kernel text!) |
1140 | * should also be not-executable. | |
984bb80d | 1141 | */ |
72212675 | 1142 | set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); |
67df197b | 1143 | |
1a487252 AV |
1144 | rodata_test(); |
1145 | ||
0c42f392 | 1146 | #ifdef CONFIG_CPA_DEBUG |
10f22dde | 1147 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); |
6d238cc4 | 1148 | set_memory_rw(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1149 | |
10f22dde | 1150 | printk(KERN_INFO "Testing CPA: again\n"); |
6d238cc4 | 1151 | set_memory_ro(start, (end-start) >> PAGE_SHIFT); |
0c42f392 | 1152 | #endif |
74e08179 SS |
1153 | |
1154 | free_init_pages("unused kernel memory", | |
fc8d7826 AD |
1155 | (unsigned long) __va(__pa_symbol(text_end)), |
1156 | (unsigned long) __va(__pa_symbol(rodata_start))); | |
1157 | ||
74e08179 | 1158 | free_init_pages("unused kernel memory", |
fc8d7826 AD |
1159 | (unsigned long) __va(__pa_symbol(rodata_end)), |
1160 | (unsigned long) __va(__pa_symbol(_sdata))); | |
67df197b | 1161 | } |
4e4eee0e | 1162 | |
67df197b AV |
1163 | #endif |
1164 | ||
14a62c34 TG |
1165 | int kern_addr_valid(unsigned long addr) |
1166 | { | |
1da177e4 | 1167 | unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; |
14a62c34 TG |
1168 | pgd_t *pgd; |
1169 | pud_t *pud; | |
1170 | pmd_t *pmd; | |
1171 | pte_t *pte; | |
1da177e4 LT |
1172 | |
1173 | if (above != 0 && above != -1UL) | |
14a62c34 TG |
1174 | return 0; |
1175 | ||
1da177e4 LT |
1176 | pgd = pgd_offset_k(addr); |
1177 | if (pgd_none(*pgd)) | |
1178 | return 0; | |
1179 | ||
1180 | pud = pud_offset(pgd, addr); | |
1181 | if (pud_none(*pud)) | |
14a62c34 | 1182 | return 0; |
1da177e4 | 1183 | |
0ee364eb MG |
1184 | if (pud_large(*pud)) |
1185 | return pfn_valid(pud_pfn(*pud)); | |
1186 | ||
1da177e4 LT |
1187 | pmd = pmd_offset(pud, addr); |
1188 | if (pmd_none(*pmd)) | |
1189 | return 0; | |
14a62c34 | 1190 | |
1da177e4 LT |
1191 | if (pmd_large(*pmd)) |
1192 | return pfn_valid(pmd_pfn(*pmd)); | |
1193 | ||
1194 | pte = pte_offset_kernel(pmd, addr); | |
1195 | if (pte_none(*pte)) | |
1196 | return 0; | |
14a62c34 | 1197 | |
1da177e4 LT |
1198 | return pfn_valid(pte_pfn(*pte)); |
1199 | } | |
1200 | ||
14a62c34 TG |
1201 | /* |
1202 | * A pseudo VMA to allow ptrace access for the vsyscall page. This only | |
1203 | * covers the 64bit vsyscall page now. 32bit has a real VMA now and does | |
1204 | * not need special handling anymore: | |
1205 | */ | |
1da177e4 | 1206 | static struct vm_area_struct gate_vma = { |
14a62c34 TG |
1207 | .vm_start = VSYSCALL_START, |
1208 | .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), | |
1209 | .vm_page_prot = PAGE_READONLY_EXEC, | |
1210 | .vm_flags = VM_READ | VM_EXEC | |
1da177e4 LT |
1211 | }; |
1212 | ||
31db58b3 | 1213 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
1da177e4 LT |
1214 | { |
1215 | #ifdef CONFIG_IA32_EMULATION | |
31db58b3 | 1216 | if (!mm || mm->context.ia32_compat) |
1e014410 | 1217 | return NULL; |
1da177e4 LT |
1218 | #endif |
1219 | return &gate_vma; | |
1220 | } | |
1221 | ||
83b964bb | 1222 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
1da177e4 | 1223 | { |
83b964bb | 1224 | struct vm_area_struct *vma = get_gate_vma(mm); |
14a62c34 | 1225 | |
1e014410 AK |
1226 | if (!vma) |
1227 | return 0; | |
14a62c34 | 1228 | |
1da177e4 LT |
1229 | return (addr >= vma->vm_start) && (addr < vma->vm_end); |
1230 | } | |
1231 | ||
14a62c34 | 1232 | /* |
cae5d390 SW |
1233 | * Use this when you have no reliable mm, typically from interrupt |
1234 | * context. It is less reliable than using a task's mm and may give | |
1235 | * false positives. | |
1da177e4 | 1236 | */ |
cae5d390 | 1237 | int in_gate_area_no_mm(unsigned long addr) |
1da177e4 | 1238 | { |
1e014410 | 1239 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
1da177e4 | 1240 | } |
2e1c49db | 1241 | |
2aae950b AK |
1242 | const char *arch_vma_name(struct vm_area_struct *vma) |
1243 | { | |
1244 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
1245 | return "[vdso]"; | |
1246 | if (vma == &gate_vma) | |
1247 | return "[vsyscall]"; | |
1248 | return NULL; | |
1249 | } | |
0889eba5 | 1250 | |
1dc41aa6 | 1251 | #ifdef CONFIG_X86_UV |
1dc41aa6 NF |
1252 | unsigned long memory_block_size_bytes(void) |
1253 | { | |
1254 | if (is_uv_system()) { | |
1255 | printk(KERN_INFO "UV: memory block size 2GB\n"); | |
1256 | return 2UL * 1024 * 1024 * 1024; | |
1257 | } | |
1258 | return MIN_MEMORY_BLOCK_SIZE; | |
1259 | } | |
1260 | #endif | |
1261 | ||
0889eba5 CL |
1262 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
1263 | /* | |
1264 | * Initialise the sparsemem vmemmap using huge-pages at the PMD level. | |
1265 | */ | |
c2b91e2e YL |
1266 | static long __meminitdata addr_start, addr_end; |
1267 | static void __meminitdata *p_start, *p_end; | |
1268 | static int __meminitdata node_start; | |
1269 | ||
14a62c34 TG |
1270 | int __meminit |
1271 | vmemmap_populate(struct page *start_page, unsigned long size, int node) | |
0889eba5 CL |
1272 | { |
1273 | unsigned long addr = (unsigned long)start_page; | |
1274 | unsigned long end = (unsigned long)(start_page + size); | |
1275 | unsigned long next; | |
1276 | pgd_t *pgd; | |
1277 | pud_t *pud; | |
1278 | pmd_t *pmd; | |
1279 | ||
1280 | for (; addr < end; addr = next) { | |
7c934d39 | 1281 | void *p = NULL; |
0889eba5 CL |
1282 | |
1283 | pgd = vmemmap_pgd_populate(addr, node); | |
1284 | if (!pgd) | |
1285 | return -ENOMEM; | |
14a62c34 | 1286 | |
0889eba5 CL |
1287 | pud = vmemmap_pud_populate(pgd, addr, node); |
1288 | if (!pud) | |
1289 | return -ENOMEM; | |
1290 | ||
7c934d39 JF |
1291 | if (!cpu_has_pse) { |
1292 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1293 | pmd = vmemmap_pmd_populate(pud, addr, node); | |
1294 | ||
1295 | if (!pmd) | |
1296 | return -ENOMEM; | |
1297 | ||
1298 | p = vmemmap_pte_populate(pmd, addr, node); | |
14a62c34 | 1299 | |
0889eba5 CL |
1300 | if (!p) |
1301 | return -ENOMEM; | |
1302 | ||
7c934d39 JF |
1303 | addr_end = addr + PAGE_SIZE; |
1304 | p_end = p + PAGE_SIZE; | |
14a62c34 | 1305 | } else { |
7c934d39 JF |
1306 | next = pmd_addr_end(addr, end); |
1307 | ||
1308 | pmd = pmd_offset(pud, addr); | |
1309 | if (pmd_none(*pmd)) { | |
1310 | pte_t entry; | |
1311 | ||
9bdac914 | 1312 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); |
7c934d39 JF |
1313 | if (!p) |
1314 | return -ENOMEM; | |
1315 | ||
1316 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, | |
1317 | PAGE_KERNEL_LARGE); | |
1318 | set_pmd(pmd, __pmd(pte_val(entry))); | |
1319 | ||
7c934d39 JF |
1320 | /* check to see if we have contiguous blocks */ |
1321 | if (p_end != p || node_start != node) { | |
1322 | if (p_start) | |
1323 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1324 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1325 | addr_start = addr; | |
1326 | node_start = node; | |
1327 | p_start = p; | |
1328 | } | |
49c980df YL |
1329 | |
1330 | addr_end = addr + PMD_SIZE; | |
1331 | p_end = p + PMD_SIZE; | |
7c934d39 JF |
1332 | } else |
1333 | vmemmap_verify((pte_t *)pmd, node, addr, next); | |
14a62c34 | 1334 | } |
7c934d39 | 1335 | |
0889eba5 | 1336 | } |
f73568a0 | 1337 | sync_global_pgds((unsigned long)start_page, end - 1); |
0889eba5 CL |
1338 | return 0; |
1339 | } | |
c2b91e2e | 1340 | |
46723bfa YI |
1341 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) |
1342 | void register_page_bootmem_memmap(unsigned long section_nr, | |
1343 | struct page *start_page, unsigned long size) | |
1344 | { | |
1345 | unsigned long addr = (unsigned long)start_page; | |
1346 | unsigned long end = (unsigned long)(start_page + size); | |
1347 | unsigned long next; | |
1348 | pgd_t *pgd; | |
1349 | pud_t *pud; | |
1350 | pmd_t *pmd; | |
1351 | unsigned int nr_pages; | |
1352 | struct page *page; | |
1353 | ||
1354 | for (; addr < end; addr = next) { | |
1355 | pte_t *pte = NULL; | |
1356 | ||
1357 | pgd = pgd_offset_k(addr); | |
1358 | if (pgd_none(*pgd)) { | |
1359 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1360 | continue; | |
1361 | } | |
1362 | get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); | |
1363 | ||
1364 | pud = pud_offset(pgd, addr); | |
1365 | if (pud_none(*pud)) { | |
1366 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1367 | continue; | |
1368 | } | |
1369 | get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); | |
1370 | ||
1371 | if (!cpu_has_pse) { | |
1372 | next = (addr + PAGE_SIZE) & PAGE_MASK; | |
1373 | pmd = pmd_offset(pud, addr); | |
1374 | if (pmd_none(*pmd)) | |
1375 | continue; | |
1376 | get_page_bootmem(section_nr, pmd_page(*pmd), | |
1377 | MIX_SECTION_INFO); | |
1378 | ||
1379 | pte = pte_offset_kernel(pmd, addr); | |
1380 | if (pte_none(*pte)) | |
1381 | continue; | |
1382 | get_page_bootmem(section_nr, pte_page(*pte), | |
1383 | SECTION_INFO); | |
1384 | } else { | |
1385 | next = pmd_addr_end(addr, end); | |
1386 | ||
1387 | pmd = pmd_offset(pud, addr); | |
1388 | if (pmd_none(*pmd)) | |
1389 | continue; | |
1390 | ||
1391 | nr_pages = 1 << (get_order(PMD_SIZE)); | |
1392 | page = pmd_page(*pmd); | |
1393 | while (nr_pages--) | |
1394 | get_page_bootmem(section_nr, page++, | |
1395 | SECTION_INFO); | |
1396 | } | |
1397 | } | |
1398 | } | |
1399 | #endif | |
1400 | ||
c2b91e2e YL |
1401 | void __meminit vmemmap_populate_print_last(void) |
1402 | { | |
1403 | if (p_start) { | |
1404 | printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", | |
1405 | addr_start, addr_end-1, p_start, p_end-1, node_start); | |
1406 | p_start = NULL; | |
1407 | p_end = NULL; | |
1408 | node_start = 0; | |
1409 | } | |
1410 | } | |
0889eba5 | 1411 | #endif |