]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/mm/init_64.c
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / mm / init_64.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
a2531293 5 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
11034d55 21#include <linux/initrd.h>
1da177e4
LT
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
a9ce6bc1 24#include <linux/memblock.h>
1da177e4 25#include <linux/proc_fs.h>
59170891 26#include <linux/pci.h>
6fb14755 27#include <linux/pfn.h>
c9cf5528 28#include <linux/poison.h>
17a941d8 29#include <linux/dma-mapping.h>
44df75e6 30#include <linux/module.h>
a63fdc51 31#include <linux/memory.h>
44df75e6 32#include <linux/memory_hotplug.h>
4b94ffdc 33#include <linux/memremap.h>
ae32b129 34#include <linux/nmi.h>
5a0e3ad6 35#include <linux/gfp.h>
2f96b8c1 36#include <linux/kcore.h>
1da177e4
LT
37
38#include <asm/processor.h>
46eaa670 39#include <asm/bios_ebda.h>
1da177e4
LT
40#include <asm/uaccess.h>
41#include <asm/pgtable.h>
42#include <asm/pgalloc.h>
43#include <asm/dma.h>
44#include <asm/fixmap.h>
45#include <asm/e820.h>
46#include <asm/apic.h>
47#include <asm/tlb.h>
48#include <asm/mmu_context.h>
49#include <asm/proto.h>
50#include <asm/smp.h>
2bc0414e 51#include <asm/sections.h>
718fc13b 52#include <asm/kdebug.h>
aaa64e04 53#include <asm/numa.h>
7bfeab9a 54#include <asm/cacheflush.h>
4fcb2083 55#include <asm/init.h>
e5f15b45 56#include <asm/setup.h>
1da177e4 57
5c51bdbe
YL
58#include "mm_internal.h"
59
aece2785
YL
60static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
61 unsigned long addr, unsigned long end)
62{
63 addr &= PMD_MASK;
64 for (; addr < end; addr += PMD_SIZE) {
65 pmd_t *pmd = pmd_page + pmd_index(addr);
66
67 if (!pmd_present(*pmd))
68 set_pmd(pmd, __pmd(addr | pmd_flag));
69 }
70}
71static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
72 unsigned long addr, unsigned long end)
73{
74 unsigned long next;
75
76 for (; addr < end; addr = next) {
77 pud_t *pud = pud_page + pud_index(addr);
78 pmd_t *pmd;
79
80 next = (addr & PUD_MASK) + PUD_SIZE;
81 if (next > end)
82 next = end;
83
84 if (pud_present(*pud)) {
85 pmd = pmd_offset(pud, 0);
86 ident_pmd_init(info->pmd_flag, pmd, addr, next);
87 continue;
88 }
89 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
90 if (!pmd)
91 return -ENOMEM;
92 ident_pmd_init(info->pmd_flag, pmd, addr, next);
93 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
94 }
95
96 return 0;
97}
98
99int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
100 unsigned long addr, unsigned long end)
101{
102 unsigned long next;
103 int result;
104 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
105
106 for (; addr < end; addr = next) {
107 pgd_t *pgd = pgd_page + pgd_index(addr) + off;
108 pud_t *pud;
109
110 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
111 if (next > end)
112 next = end;
113
114 if (pgd_present(*pgd)) {
115 pud = pud_offset(pgd, 0);
116 result = ident_pud_init(info, pud, addr, next);
117 if (result)
118 return result;
119 continue;
120 }
121
122 pud = (pud_t *)info->alloc_pgt_page(info->context);
123 if (!pud)
124 return -ENOMEM;
125 result = ident_pud_init(info, pud, addr, next);
126 if (result)
127 return result;
128 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
129 }
130
131 return 0;
132}
133
1da177e4
LT
134/*
135 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
136 * physical space so we can cache the place of the first one and move
137 * around without checking the pgd every time.
138 */
139
f955371c 140pteval_t __supported_pte_mask __read_mostly = ~0;
bd220a24
YL
141EXPORT_SYMBOL_GPL(__supported_pte_mask);
142
bd220a24
YL
143int force_personality32;
144
deed05b7
IM
145/*
146 * noexec32=on|off
147 * Control non executable heap for 32bit processes.
148 * To control the stack too use noexec=off
149 *
150 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
151 * off PROT_READ implies PROT_EXEC
152 */
bd220a24
YL
153static int __init nonx32_setup(char *str)
154{
155 if (!strcmp(str, "on"))
156 force_personality32 &= ~READ_IMPLIES_EXEC;
157 else if (!strcmp(str, "off"))
158 force_personality32 |= READ_IMPLIES_EXEC;
159 return 1;
160}
161__setup("noexec32=", nonx32_setup);
162
6afb5157
HL
163/*
164 * When memory was added/removed make sure all the processes MM have
165 * suitable PGD entries in the local PGD level page.
166 */
9661d5bc 167void sync_global_pgds(unsigned long start, unsigned long end, int removed)
6afb5157 168{
44235dcd
JF
169 unsigned long address;
170
171 for (address = start; address <= end; address += PGDIR_SIZE) {
172 const pgd_t *pgd_ref = pgd_offset_k(address);
44235dcd
JF
173 struct page *page;
174
9661d5bc
YI
175 /*
176 * When it is called after memory hot remove, pgd_none()
177 * returns true. In this case (removed == 1), we must clear
178 * the PGD entries in the local PGD level page.
179 */
180 if (pgd_none(*pgd_ref) && !removed)
44235dcd
JF
181 continue;
182
a79e53d8 183 spin_lock(&pgd_lock);
44235dcd 184 list_for_each_entry(page, &pgd_list, lru) {
be354f40 185 pgd_t *pgd;
617d34d9
JF
186 spinlock_t *pgt_lock;
187
44235dcd 188 pgd = (pgd_t *)page_address(page) + pgd_index(address);
a79e53d8 189 /* the pgt_lock only for Xen */
617d34d9
JF
190 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
191 spin_lock(pgt_lock);
192
9661d5bc 193 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
44235dcd
JF
194 BUG_ON(pgd_page_vaddr(*pgd)
195 != pgd_page_vaddr(*pgd_ref));
617d34d9 196
9661d5bc
YI
197 if (removed) {
198 if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
199 pgd_clear(pgd);
200 } else {
201 if (pgd_none(*pgd))
202 set_pgd(pgd, *pgd_ref);
203 }
204
617d34d9 205 spin_unlock(pgt_lock);
44235dcd 206 }
a79e53d8 207 spin_unlock(&pgd_lock);
44235dcd 208 }
6afb5157
HL
209}
210
8d6ea967
MS
211/*
212 * NOTE: This function is marked __ref because it calls __init function
213 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
214 */
215static __ref void *spp_getpage(void)
14a62c34 216{
1da177e4 217 void *ptr;
14a62c34 218
1da177e4 219 if (after_bootmem)
9e730237 220 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
1da177e4
LT
221 else
222 ptr = alloc_bootmem_pages(PAGE_SIZE);
14a62c34
TG
223
224 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
225 panic("set_pte_phys: cannot allocate page data %s\n",
226 after_bootmem ? "after bootmem" : "");
227 }
1da177e4 228
10f22dde 229 pr_debug("spp_getpage %p\n", ptr);
14a62c34 230
1da177e4 231 return ptr;
14a62c34 232}
1da177e4 233
f254f390 234static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
1da177e4 235{
458a3e64
TH
236 if (pgd_none(*pgd)) {
237 pud_t *pud = (pud_t *)spp_getpage();
238 pgd_populate(&init_mm, pgd, pud);
239 if (pud != pud_offset(pgd, 0))
240 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
241 pud, pud_offset(pgd, 0));
242 }
243 return pud_offset(pgd, vaddr);
244}
1da177e4 245
f254f390 246static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
458a3e64 247{
1da177e4 248 if (pud_none(*pud)) {
458a3e64 249 pmd_t *pmd = (pmd_t *) spp_getpage();
bb23e403 250 pud_populate(&init_mm, pud, pmd);
458a3e64 251 if (pmd != pmd_offset(pud, 0))
10f22dde 252 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
458a3e64 253 pmd, pmd_offset(pud, 0));
1da177e4 254 }
458a3e64
TH
255 return pmd_offset(pud, vaddr);
256}
257
f254f390 258static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
458a3e64 259{
1da177e4 260 if (pmd_none(*pmd)) {
458a3e64 261 pte_t *pte = (pte_t *) spp_getpage();
bb23e403 262 pmd_populate_kernel(&init_mm, pmd, pte);
458a3e64 263 if (pte != pte_offset_kernel(pmd, 0))
10f22dde 264 printk(KERN_ERR "PAGETABLE BUG #02!\n");
1da177e4 265 }
458a3e64
TH
266 return pte_offset_kernel(pmd, vaddr);
267}
268
269void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
270{
271 pud_t *pud;
272 pmd_t *pmd;
273 pte_t *pte;
274
275 pud = pud_page + pud_index(vaddr);
276 pmd = fill_pmd(pud, vaddr);
277 pte = fill_pte(pmd, vaddr);
1da177e4 278
1da177e4
LT
279 set_pte(pte, new_pte);
280
281 /*
282 * It's enough to flush this one mapping.
283 * (PGE mappings get flushed as well)
284 */
285 __flush_tlb_one(vaddr);
286}
287
458a3e64 288void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
0814e0ba
EH
289{
290 pgd_t *pgd;
291 pud_t *pud_page;
292
293 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
294
295 pgd = pgd_offset_k(vaddr);
296 if (pgd_none(*pgd)) {
297 printk(KERN_ERR
298 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
299 return;
300 }
301 pud_page = (pud_t*)pgd_page_vaddr(*pgd);
302 set_pte_vaddr_pud(pud_page, vaddr, pteval);
303}
304
458a3e64 305pmd_t * __init populate_extra_pmd(unsigned long vaddr)
11124411
TH
306{
307 pgd_t *pgd;
308 pud_t *pud;
309
310 pgd = pgd_offset_k(vaddr);
458a3e64
TH
311 pud = fill_pud(pgd, vaddr);
312 return fill_pmd(pud, vaddr);
313}
314
315pte_t * __init populate_extra_pte(unsigned long vaddr)
316{
317 pmd_t *pmd;
11124411 318
458a3e64
TH
319 pmd = populate_extra_pmd(vaddr);
320 return fill_pte(pmd, vaddr);
11124411
TH
321}
322
3a9e189d
JS
323/*
324 * Create large page table mappings for a range of physical addresses.
325 */
326static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
2df58b6d 327 enum page_cache_mode cache)
3a9e189d
JS
328{
329 pgd_t *pgd;
330 pud_t *pud;
331 pmd_t *pmd;
2df58b6d 332 pgprot_t prot;
3a9e189d 333
2df58b6d
JG
334 pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
335 pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
3a9e189d
JS
336 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
337 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
338 pgd = pgd_offset_k((unsigned long)__va(phys));
339 if (pgd_none(*pgd)) {
340 pud = (pud_t *) spp_getpage();
341 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
342 _PAGE_USER));
343 }
344 pud = pud_offset(pgd, (unsigned long)__va(phys));
345 if (pud_none(*pud)) {
346 pmd = (pmd_t *) spp_getpage();
347 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
348 _PAGE_USER));
349 }
350 pmd = pmd_offset(pud, phys);
351 BUG_ON(!pmd_none(*pmd));
352 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
353 }
354}
355
356void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
357{
2df58b6d 358 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
3a9e189d
JS
359}
360
361void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
362{
2df58b6d 363 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
3a9e189d
JS
364}
365
31eedd82 366/*
88f3aec7
IM
367 * The head.S code sets up the kernel high mapping:
368 *
369 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
31eedd82 370 *
1e3b3081 371 * phys_base holds the negative offset to the kernel, which is added
31eedd82
TG
372 * to the compile time generated pmds. This results in invalid pmds up
373 * to the point where we hit the physaddr 0 mapping.
374 *
e5f15b45
YL
375 * We limit the mappings to the region from _text to _brk_end. _brk_end
376 * is rounded up to the 2MB boundary. This catches the invalid pmds as
31eedd82
TG
377 * well, as they are located before _text:
378 */
379void __init cleanup_highmap(void)
380{
381 unsigned long vaddr = __START_KERNEL_map;
10054230 382 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
e5f15b45 383 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
31eedd82 384 pmd_t *pmd = level2_kernel_pgt;
31eedd82 385
10054230
YL
386 /*
387 * Native path, max_pfn_mapped is not set yet.
388 * Xen has valid max_pfn_mapped set in
389 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
390 */
391 if (max_pfn_mapped)
392 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
393
e5f15b45 394 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
2884f110 395 if (pmd_none(*pmd))
31eedd82
TG
396 continue;
397 if (vaddr < (unsigned long) _text || vaddr > end)
398 set_pmd(pmd, __pmd(0));
399 }
400}
401
7b16eb89 402static unsigned long __meminit
b27a43c1
SS
403phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
404 pgprot_t prot)
4f9c11dd 405{
eceb3632 406 unsigned long pages = 0, next;
7b16eb89 407 unsigned long last_map_addr = end;
4f9c11dd 408 int i;
7b16eb89 409
4f9c11dd
JF
410 pte_t *pte = pte_page + pte_index(addr);
411
eceb3632
YL
412 for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
413 next = (addr & PAGE_MASK) + PAGE_SIZE;
4f9c11dd 414 if (addr >= end) {
eceb3632
YL
415 if (!after_bootmem &&
416 !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
417 !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
418 set_pte(pte, __pte(0));
419 continue;
4f9c11dd
JF
420 }
421
b27a43c1
SS
422 /*
423 * We will re-use the existing mapping.
424 * Xen for example has some special requirements, like mapping
425 * pagetable pages as RO. So assume someone who pre-setup
426 * these mappings are more intelligent.
427 */
3afa3949 428 if (pte_val(*pte)) {
876ee61a
JB
429 if (!after_bootmem)
430 pages++;
4f9c11dd 431 continue;
3afa3949 432 }
4f9c11dd
JF
433
434 if (0)
435 printk(" pte=%p addr=%lx pte=%016lx\n",
436 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
4f9c11dd 437 pages++;
b27a43c1 438 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
7b16eb89 439 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
4f9c11dd 440 }
a2699e47 441
4f9c11dd 442 update_page_count(PG_LEVEL_4K, pages);
7b16eb89
YL
443
444 return last_map_addr;
4f9c11dd
JF
445}
446
cc615032 447static unsigned long __meminit
b50efd2a 448phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
b27a43c1 449 unsigned long page_size_mask, pgprot_t prot)
44df75e6 450{
20167d34 451 unsigned long pages = 0, next;
7b16eb89 452 unsigned long last_map_addr = end;
ce0c0e50 453
6ad91658 454 int i = pmd_index(address);
44df75e6 455
20167d34 456 for (; i < PTRS_PER_PMD; i++, address = next) {
6ad91658 457 pmd_t *pmd = pmd_page + pmd_index(address);
4f9c11dd 458 pte_t *pte;
b27a43c1 459 pgprot_t new_prot = prot;
44df75e6 460
eceb3632 461 next = (address & PMD_MASK) + PMD_SIZE;
5f51e139 462 if (address >= end) {
eceb3632
YL
463 if (!after_bootmem &&
464 !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
465 !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
466 set_pmd(pmd, __pmd(0));
467 continue;
44df75e6 468 }
6ad91658 469
4f9c11dd 470 if (pmd_val(*pmd)) {
8ae3a5a8
JB
471 if (!pmd_large(*pmd)) {
472 spin_lock(&init_mm.page_table_lock);
973dc4f3 473 pte = (pte_t *)pmd_page_vaddr(*pmd);
4b239f45 474 last_map_addr = phys_pte_init(pte, address,
b27a43c1 475 end, prot);
8ae3a5a8 476 spin_unlock(&init_mm.page_table_lock);
a2699e47 477 continue;
8ae3a5a8 478 }
b27a43c1
SS
479 /*
480 * If we are ok with PG_LEVEL_2M mapping, then we will
481 * use the existing mapping,
482 *
483 * Otherwise, we will split the large page mapping but
484 * use the same existing protection bits except for
485 * large page, so that we don't violate Intel's TLB
486 * Application note (317080) which says, while changing
487 * the page sizes, new and old translations should
488 * not differ with respect to page frame and
489 * attributes.
490 */
3afa3949 491 if (page_size_mask & (1 << PG_LEVEL_2M)) {
876ee61a
JB
492 if (!after_bootmem)
493 pages++;
20167d34 494 last_map_addr = next;
b27a43c1 495 continue;
3afa3949 496 }
b27a43c1 497 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
4f9c11dd
JF
498 }
499
b50efd2a 500 if (page_size_mask & (1<<PG_LEVEL_2M)) {
4f9c11dd 501 pages++;
8ae3a5a8 502 spin_lock(&init_mm.page_table_lock);
4f9c11dd 503 set_pte((pte_t *)pmd,
960ddb4f 504 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
b27a43c1 505 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
8ae3a5a8 506 spin_unlock(&init_mm.page_table_lock);
20167d34 507 last_map_addr = next;
6ad91658 508 continue;
4f9c11dd 509 }
6ad91658 510
868bf4d6 511 pte = alloc_low_page();
b27a43c1 512 last_map_addr = phys_pte_init(pte, address, end, new_prot);
4f9c11dd 513
8ae3a5a8 514 spin_lock(&init_mm.page_table_lock);
868bf4d6 515 pmd_populate_kernel(&init_mm, pmd, pte);
8ae3a5a8 516 spin_unlock(&init_mm.page_table_lock);
44df75e6 517 }
ce0c0e50 518 update_page_count(PG_LEVEL_2M, pages);
7b16eb89 519 return last_map_addr;
44df75e6
MT
520}
521
cc615032 522static unsigned long __meminit
b50efd2a
YL
523phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
524 unsigned long page_size_mask)
14a62c34 525{
20167d34 526 unsigned long pages = 0, next;
cc615032 527 unsigned long last_map_addr = end;
6ad91658 528 int i = pud_index(addr);
44df75e6 529
20167d34 530 for (; i < PTRS_PER_PUD; i++, addr = next) {
6ad91658 531 pud_t *pud = pud_page + pud_index(addr);
1da177e4 532 pmd_t *pmd;
b27a43c1 533 pgprot_t prot = PAGE_KERNEL;
1da177e4 534
20167d34 535 next = (addr & PUD_MASK) + PUD_SIZE;
eceb3632
YL
536 if (addr >= end) {
537 if (!after_bootmem &&
538 !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
539 !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
540 set_pud(pud, __pud(0));
1da177e4 541 continue;
14a62c34 542 }
1da177e4 543
6ad91658 544 if (pud_val(*pud)) {
a2699e47 545 if (!pud_large(*pud)) {
973dc4f3 546 pmd = pmd_offset(pud, 0);
4b239f45 547 last_map_addr = phys_pmd_init(pmd, addr, end,
b27a43c1 548 page_size_mask, prot);
4b239f45 549 __flush_tlb_all();
a2699e47
SS
550 continue;
551 }
b27a43c1
SS
552 /*
553 * If we are ok with PG_LEVEL_1G mapping, then we will
554 * use the existing mapping.
555 *
556 * Otherwise, we will split the gbpage mapping but use
557 * the same existing protection bits except for large
558 * page, so that we don't violate Intel's TLB
559 * Application note (317080) which says, while changing
560 * the page sizes, new and old translations should
561 * not differ with respect to page frame and
562 * attributes.
563 */
3afa3949 564 if (page_size_mask & (1 << PG_LEVEL_1G)) {
876ee61a
JB
565 if (!after_bootmem)
566 pages++;
20167d34 567 last_map_addr = next;
b27a43c1 568 continue;
3afa3949 569 }
b27a43c1 570 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
ef925766
AK
571 }
572
b50efd2a 573 if (page_size_mask & (1<<PG_LEVEL_1G)) {
ce0c0e50 574 pages++;
8ae3a5a8 575 spin_lock(&init_mm.page_table_lock);
ef925766 576 set_pte((pte_t *)pud,
960ddb4f
YL
577 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
578 PAGE_KERNEL_LARGE));
8ae3a5a8 579 spin_unlock(&init_mm.page_table_lock);
20167d34 580 last_map_addr = next;
6ad91658
KM
581 continue;
582 }
583
868bf4d6 584 pmd = alloc_low_page();
b27a43c1
SS
585 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
586 prot);
8ae3a5a8
JB
587
588 spin_lock(&init_mm.page_table_lock);
868bf4d6 589 pud_populate(&init_mm, pud, pmd);
44df75e6 590 spin_unlock(&init_mm.page_table_lock);
1da177e4 591 }
1a2b4412 592 __flush_tlb_all();
a2699e47 593
ce0c0e50 594 update_page_count(PG_LEVEL_1G, pages);
cc615032 595
1a0db38e 596 return last_map_addr;
14a62c34 597}
1da177e4 598
41d840e2 599unsigned long __meminit
f765090a
PE
600kernel_physical_mapping_init(unsigned long start,
601 unsigned long end,
602 unsigned long page_size_mask)
14a62c34 603{
9b861528 604 bool pgd_changed = false;
b50efd2a 605 unsigned long next, last_map_addr = end;
9b861528 606 unsigned long addr;
1da177e4
LT
607
608 start = (unsigned long)__va(start);
609 end = (unsigned long)__va(end);
1c5f50ee 610 addr = start;
1da177e4
LT
611
612 for (; start < end; start = next) {
44df75e6
MT
613 pgd_t *pgd = pgd_offset_k(start);
614 pud_t *pud;
615
c2bdee59 616 next = (start & PGDIR_MASK) + PGDIR_SIZE;
4f9c11dd
JF
617
618 if (pgd_val(*pgd)) {
973dc4f3 619 pud = (pud_t *)pgd_page_vaddr(*pgd);
4b239f45 620 last_map_addr = phys_pud_init(pud, __pa(start),
b50efd2a 621 __pa(end), page_size_mask);
4f9c11dd
JF
622 continue;
623 }
624
868bf4d6 625 pud = alloc_low_page();
c2bdee59 626 last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
b50efd2a 627 page_size_mask);
8ae3a5a8
JB
628
629 spin_lock(&init_mm.page_table_lock);
868bf4d6 630 pgd_populate(&init_mm, pgd, pud);
8ae3a5a8 631 spin_unlock(&init_mm.page_table_lock);
9b861528 632 pgd_changed = true;
14a62c34 633 }
9b861528
HL
634
635 if (pgd_changed)
9661d5bc 636 sync_global_pgds(addr, end - 1, 0);
9b861528 637
a2699e47 638 __flush_tlb_all();
1da177e4 639
b50efd2a
YL
640 return last_map_addr;
641}
7b16eb89 642
2b97690f 643#ifndef CONFIG_NUMA
d8fc3afc 644void __init initmem_init(void)
1f75d7e3 645{
e7e8de59 646 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1f75d7e3 647}
3551f88f 648#endif
1f75d7e3 649
1da177e4
LT
650void __init paging_init(void)
651{
3551f88f 652 sparse_memory_present_with_active_regions(MAX_NUMNODES);
44df75e6 653 sparse_init();
44b57280
YL
654
655 /*
656 * clear the default setting with node 0
657 * note: don't use nodes_clear here, that is really clearing when
658 * numa support is not compiled in, and later node_set_state
659 * will not set it back.
660 */
4b0ef1fe
LJ
661 node_clear_state(0, N_MEMORY);
662 if (N_MEMORY != N_NORMAL_MEMORY)
663 node_clear_state(0, N_NORMAL_MEMORY);
44b57280 664
4c0b2e5f 665 zone_sizes_init();
1da177e4 666}
1da177e4 667
44df75e6
MT
668/*
669 * Memory hotplug specific functions
44df75e6 670 */
bc02af93 671#ifdef CONFIG_MEMORY_HOTPLUG
ea085417
SZ
672/*
673 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
674 * updating.
675 */
676static void update_end_of_memory_vars(u64 start, u64 size)
677{
678 unsigned long end_pfn = PFN_UP(start + size);
679
680 if (end_pfn > max_pfn) {
681 max_pfn = end_pfn;
682 max_low_pfn = end_pfn;
683 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
684 }
685}
686
9d99aaa3
AK
687/*
688 * Memory is added always to NORMAL zone. This means you will never get
689 * additional DMA/DMA32 memory.
690 */
033fbae9 691int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
44df75e6 692{
bc02af93 693 struct pglist_data *pgdat = NODE_DATA(nid);
9bfc4113 694 struct zone *zone = pgdat->node_zones +
033fbae9 695 zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
66520ebc 696 unsigned long start_pfn = start >> PAGE_SHIFT;
44df75e6
MT
697 unsigned long nr_pages = size >> PAGE_SHIFT;
698 int ret;
699
66520ebc 700 init_memory_mapping(start, start + size);
45e0b78b 701
c04fc586 702 ret = __add_pages(nid, zone, start_pfn, nr_pages);
fe8b868e 703 WARN_ON_ONCE(ret);
44df75e6 704
ea085417
SZ
705 /* update max_pfn, max_low_pfn and high_memory */
706 update_end_of_memory_vars(start, size);
707
44df75e6 708 return ret;
44df75e6 709}
bc02af93 710EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6 711
ae9aae9e
WC
712#define PAGE_INUSE 0xFD
713
714static void __meminit free_pagetable(struct page *page, int order)
715{
ae9aae9e
WC
716 unsigned long magic;
717 unsigned int nr_pages = 1 << order;
4b94ffdc
DW
718 struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
719
720 if (altmap) {
721 vmem_altmap_free(altmap, nr_pages);
722 return;
723 }
ae9aae9e
WC
724
725 /* bootmem page has reserved flag */
726 if (PageReserved(page)) {
727 __ClearPageReserved(page);
ae9aae9e
WC
728
729 magic = (unsigned long)page->lru.next;
730 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
731 while (nr_pages--)
732 put_page_bootmem(page++);
733 } else
170a5a7e
JL
734 while (nr_pages--)
735 free_reserved_page(page++);
ae9aae9e
WC
736 } else
737 free_pages((unsigned long)page_address(page), order);
ae9aae9e
WC
738}
739
740static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
741{
742 pte_t *pte;
743 int i;
744
745 for (i = 0; i < PTRS_PER_PTE; i++) {
746 pte = pte_start + i;
747 if (pte_val(*pte))
748 return;
749 }
750
751 /* free a pte talbe */
752 free_pagetable(pmd_page(*pmd), 0);
753 spin_lock(&init_mm.page_table_lock);
754 pmd_clear(pmd);
755 spin_unlock(&init_mm.page_table_lock);
756}
757
758static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
759{
760 pmd_t *pmd;
761 int i;
762
763 for (i = 0; i < PTRS_PER_PMD; i++) {
764 pmd = pmd_start + i;
765 if (pmd_val(*pmd))
766 return;
767 }
768
769 /* free a pmd talbe */
770 free_pagetable(pud_page(*pud), 0);
771 spin_lock(&init_mm.page_table_lock);
772 pud_clear(pud);
773 spin_unlock(&init_mm.page_table_lock);
774}
775
776/* Return true if pgd is changed, otherwise return false. */
777static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
778{
779 pud_t *pud;
780 int i;
781
782 for (i = 0; i < PTRS_PER_PUD; i++) {
783 pud = pud_start + i;
784 if (pud_val(*pud))
785 return false;
786 }
787
788 /* free a pud table */
789 free_pagetable(pgd_page(*pgd), 0);
790 spin_lock(&init_mm.page_table_lock);
791 pgd_clear(pgd);
792 spin_unlock(&init_mm.page_table_lock);
793
794 return true;
795}
796
797static void __meminit
798remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
799 bool direct)
800{
801 unsigned long next, pages = 0;
802 pte_t *pte;
803 void *page_addr;
804 phys_addr_t phys_addr;
805
806 pte = pte_start + pte_index(addr);
807 for (; addr < end; addr = next, pte++) {
808 next = (addr + PAGE_SIZE) & PAGE_MASK;
809 if (next > end)
810 next = end;
811
812 if (!pte_present(*pte))
813 continue;
814
815 /*
816 * We mapped [0,1G) memory as identity mapping when
817 * initializing, in arch/x86/kernel/head_64.S. These
818 * pagetables cannot be removed.
819 */
820 phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
821 if (phys_addr < (phys_addr_t)0x40000000)
822 return;
823
b500f77b 824 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
ae9aae9e
WC
825 /*
826 * Do not free direct mapping pages since they were
827 * freed when offlining, or simplely not in use.
828 */
829 if (!direct)
830 free_pagetable(pte_page(*pte), 0);
831
832 spin_lock(&init_mm.page_table_lock);
833 pte_clear(&init_mm, addr, pte);
834 spin_unlock(&init_mm.page_table_lock);
835
836 /* For non-direct mapping, pages means nothing. */
837 pages++;
838 } else {
839 /*
840 * If we are here, we are freeing vmemmap pages since
841 * direct mapped memory ranges to be freed are aligned.
842 *
843 * If we are not removing the whole page, it means
844 * other page structs in this page are being used and
845 * we canot remove them. So fill the unused page_structs
846 * with 0xFD, and remove the page when it is wholly
847 * filled with 0xFD.
848 */
849 memset((void *)addr, PAGE_INUSE, next - addr);
850
851 page_addr = page_address(pte_page(*pte));
852 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
853 free_pagetable(pte_page(*pte), 0);
854
855 spin_lock(&init_mm.page_table_lock);
856 pte_clear(&init_mm, addr, pte);
857 spin_unlock(&init_mm.page_table_lock);
858 }
859 }
860 }
861
862 /* Call free_pte_table() in remove_pmd_table(). */
863 flush_tlb_all();
864 if (direct)
865 update_page_count(PG_LEVEL_4K, -pages);
866}
867
868static void __meminit
869remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
870 bool direct)
871{
872 unsigned long next, pages = 0;
873 pte_t *pte_base;
874 pmd_t *pmd;
875 void *page_addr;
876
877 pmd = pmd_start + pmd_index(addr);
878 for (; addr < end; addr = next, pmd++) {
879 next = pmd_addr_end(addr, end);
880
881 if (!pmd_present(*pmd))
882 continue;
883
884 if (pmd_large(*pmd)) {
885 if (IS_ALIGNED(addr, PMD_SIZE) &&
886 IS_ALIGNED(next, PMD_SIZE)) {
887 if (!direct)
888 free_pagetable(pmd_page(*pmd),
889 get_order(PMD_SIZE));
890
891 spin_lock(&init_mm.page_table_lock);
892 pmd_clear(pmd);
893 spin_unlock(&init_mm.page_table_lock);
894 pages++;
895 } else {
896 /* If here, we are freeing vmemmap pages. */
897 memset((void *)addr, PAGE_INUSE, next - addr);
898
899 page_addr = page_address(pmd_page(*pmd));
900 if (!memchr_inv(page_addr, PAGE_INUSE,
901 PMD_SIZE)) {
902 free_pagetable(pmd_page(*pmd),
903 get_order(PMD_SIZE));
904
905 spin_lock(&init_mm.page_table_lock);
906 pmd_clear(pmd);
907 spin_unlock(&init_mm.page_table_lock);
908 }
909 }
910
911 continue;
912 }
913
914 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
915 remove_pte_table(pte_base, addr, next, direct);
916 free_pte_table(pte_base, pmd);
917 }
918
919 /* Call free_pmd_table() in remove_pud_table(). */
920 if (direct)
921 update_page_count(PG_LEVEL_2M, -pages);
922}
923
924static void __meminit
925remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
926 bool direct)
927{
928 unsigned long next, pages = 0;
929 pmd_t *pmd_base;
930 pud_t *pud;
931 void *page_addr;
932
933 pud = pud_start + pud_index(addr);
934 for (; addr < end; addr = next, pud++) {
935 next = pud_addr_end(addr, end);
936
937 if (!pud_present(*pud))
938 continue;
939
940 if (pud_large(*pud)) {
941 if (IS_ALIGNED(addr, PUD_SIZE) &&
942 IS_ALIGNED(next, PUD_SIZE)) {
943 if (!direct)
944 free_pagetable(pud_page(*pud),
945 get_order(PUD_SIZE));
946
947 spin_lock(&init_mm.page_table_lock);
948 pud_clear(pud);
949 spin_unlock(&init_mm.page_table_lock);
950 pages++;
951 } else {
952 /* If here, we are freeing vmemmap pages. */
953 memset((void *)addr, PAGE_INUSE, next - addr);
954
955 page_addr = page_address(pud_page(*pud));
956 if (!memchr_inv(page_addr, PAGE_INUSE,
957 PUD_SIZE)) {
958 free_pagetable(pud_page(*pud),
959 get_order(PUD_SIZE));
960
961 spin_lock(&init_mm.page_table_lock);
962 pud_clear(pud);
963 spin_unlock(&init_mm.page_table_lock);
964 }
965 }
966
967 continue;
968 }
969
970 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
971 remove_pmd_table(pmd_base, addr, next, direct);
972 free_pmd_table(pmd_base, pud);
973 }
974
975 if (direct)
976 update_page_count(PG_LEVEL_1G, -pages);
977}
978
979/* start and end are both virtual address. */
980static void __meminit
981remove_pagetable(unsigned long start, unsigned long end, bool direct)
982{
983 unsigned long next;
5255e0a7 984 unsigned long addr;
ae9aae9e
WC
985 pgd_t *pgd;
986 pud_t *pud;
987 bool pgd_changed = false;
988
5255e0a7
YI
989 for (addr = start; addr < end; addr = next) {
990 next = pgd_addr_end(addr, end);
ae9aae9e 991
5255e0a7 992 pgd = pgd_offset_k(addr);
ae9aae9e
WC
993 if (!pgd_present(*pgd))
994 continue;
995
996 pud = (pud_t *)pgd_page_vaddr(*pgd);
5255e0a7 997 remove_pud_table(pud, addr, next, direct);
ae9aae9e
WC
998 if (free_pud_table(pud, pgd))
999 pgd_changed = true;
1000 }
1001
1002 if (pgd_changed)
9661d5bc 1003 sync_global_pgds(start, end - 1, 1);
ae9aae9e
WC
1004
1005 flush_tlb_all();
1006}
1007
0aad818b 1008void __ref vmemmap_free(unsigned long start, unsigned long end)
0197518c 1009{
0197518c
TC
1010 remove_pagetable(start, end, false);
1011}
1012
587ff8c4 1013#ifdef CONFIG_MEMORY_HOTREMOVE
bbcab878
TC
1014static void __meminit
1015kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1016{
1017 start = (unsigned long)__va(start);
1018 end = (unsigned long)__va(end);
1019
1020 remove_pagetable(start, end, true);
1021}
1022
24d335ca
WC
1023int __ref arch_remove_memory(u64 start, u64 size)
1024{
1025 unsigned long start_pfn = start >> PAGE_SHIFT;
1026 unsigned long nr_pages = size >> PAGE_SHIFT;
4b94ffdc
DW
1027 struct page *page = pfn_to_page(start_pfn);
1028 struct vmem_altmap *altmap;
24d335ca
WC
1029 struct zone *zone;
1030 int ret;
1031
4b94ffdc
DW
1032 /* With altmap the first mapped page is offset from @start */
1033 altmap = to_vmem_altmap((unsigned long) page);
1034 if (altmap)
1035 page += vmem_altmap_offset(altmap);
1036 zone = page_zone(page);
24d335ca
WC
1037 ret = __remove_pages(zone, start_pfn, nr_pages);
1038 WARN_ON_ONCE(ret);
4b94ffdc 1039 kernel_physical_mapping_remove(start, start + size);
24d335ca
WC
1040
1041 return ret;
1042}
1043#endif
45e0b78b
KM
1044#endif /* CONFIG_MEMORY_HOTPLUG */
1045
81ac3ad9 1046static struct kcore_list kcore_vsyscall;
1da177e4 1047
94b43c3d
YL
1048static void __init register_page_bootmem_info(void)
1049{
1050#ifdef CONFIG_NUMA
1051 int i;
1052
1053 for_each_online_node(i)
1054 register_page_bootmem_info_node(NODE_DATA(i));
1055#endif
1056}
1057
1da177e4
LT
1058void __init mem_init(void)
1059{
0dc243ae 1060 pci_iommu_alloc();
1da177e4 1061
48ddb154 1062 /* clear_bss() already clear the empty_zero_page */
1da177e4 1063
94b43c3d 1064 register_page_bootmem_info();
bced0e32
JL
1065
1066 /* this will put all memory onto the freelists */
0c988534 1067 free_all_bootmem();
1da177e4
LT
1068 after_bootmem = 1;
1069
1da177e4 1070 /* Register memory areas for /proc/kcore */
f40c3300
AL
1071 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1072 PAGE_SIZE, KCORE_OTHER);
1da177e4 1073
46a84132 1074 mem_init_print_info(NULL);
1da177e4
LT
1075}
1076
edeed305
AV
1077const int rodata_test_data = 0xC3;
1078EXPORT_SYMBOL_GPL(rodata_test_data);
67df197b 1079
502f6604 1080int kernel_set_to_readonly;
16239630
SR
1081
1082void set_kernel_text_rw(void)
1083{
b9af7c0d 1084 unsigned long start = PFN_ALIGN(_text);
e7d23dde 1085 unsigned long end = PFN_ALIGN(__stop___ex_table);
16239630
SR
1086
1087 if (!kernel_set_to_readonly)
1088 return;
1089
1090 pr_debug("Set kernel text: %lx - %lx for read write\n",
1091 start, end);
1092
e7d23dde
SS
1093 /*
1094 * Make the kernel identity mapping for text RW. Kernel text
1095 * mapping will always be RO. Refer to the comment in
1096 * static_protections() in pageattr.c
1097 */
16239630
SR
1098 set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1099}
1100
1101void set_kernel_text_ro(void)
1102{
b9af7c0d 1103 unsigned long start = PFN_ALIGN(_text);
e7d23dde 1104 unsigned long end = PFN_ALIGN(__stop___ex_table);
16239630
SR
1105
1106 if (!kernel_set_to_readonly)
1107 return;
1108
1109 pr_debug("Set kernel text: %lx - %lx for read only\n",
1110 start, end);
1111
e7d23dde
SS
1112 /*
1113 * Set the kernel identity mapping for text RO.
1114 */
16239630
SR
1115 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1116}
1117
67df197b
AV
1118void mark_rodata_ro(void)
1119{
74e08179 1120 unsigned long start = PFN_ALIGN(_text);
fc8d7826 1121 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
74e08179 1122 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
fc8d7826
AD
1123 unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1124 unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
45e2a9d4 1125 unsigned long all_end;
8f0f996e 1126
6fb14755 1127 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
e3ebadd9 1128 (end - start) >> 10);
984bb80d
AV
1129 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1130
16239630
SR
1131 kernel_set_to_readonly = 1;
1132
984bb80d 1133 /*
72212675
YL
1134 * The rodata/data/bss/brk section (but not the kernel text!)
1135 * should also be not-executable.
45e2a9d4
KC
1136 *
1137 * We align all_end to PMD_SIZE because the existing mapping
1138 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1139 * split the PMD and the reminder between _brk_end and the end
1140 * of the PMD will remain mapped executable.
1141 *
1142 * Any PMD which was setup after the one which covers _brk_end
1143 * has been zapped already via cleanup_highmem().
984bb80d 1144 */
45e2a9d4 1145 all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
ab76f7b4 1146 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
67df197b 1147
1a487252
AV
1148 rodata_test();
1149
0c42f392 1150#ifdef CONFIG_CPA_DEBUG
10f22dde 1151 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
6d238cc4 1152 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
0c42f392 1153
10f22dde 1154 printk(KERN_INFO "Testing CPA: again\n");
6d238cc4 1155 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
0c42f392 1156#endif
74e08179 1157
c88442ec 1158 free_init_pages("unused kernel",
fc8d7826
AD
1159 (unsigned long) __va(__pa_symbol(text_end)),
1160 (unsigned long) __va(__pa_symbol(rodata_start)));
c88442ec 1161 free_init_pages("unused kernel",
fc8d7826
AD
1162 (unsigned long) __va(__pa_symbol(rodata_end)),
1163 (unsigned long) __va(__pa_symbol(_sdata)));
e1a58320
SS
1164
1165 debug_checkwx();
67df197b 1166}
4e4eee0e 1167
14a62c34
TG
1168int kern_addr_valid(unsigned long addr)
1169{
1da177e4 1170 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
14a62c34
TG
1171 pgd_t *pgd;
1172 pud_t *pud;
1173 pmd_t *pmd;
1174 pte_t *pte;
1da177e4
LT
1175
1176 if (above != 0 && above != -1UL)
14a62c34
TG
1177 return 0;
1178
1da177e4
LT
1179 pgd = pgd_offset_k(addr);
1180 if (pgd_none(*pgd))
1181 return 0;
1182
1183 pud = pud_offset(pgd, addr);
1184 if (pud_none(*pud))
14a62c34 1185 return 0;
1da177e4 1186
0ee364eb
MG
1187 if (pud_large(*pud))
1188 return pfn_valid(pud_pfn(*pud));
1189
1da177e4
LT
1190 pmd = pmd_offset(pud, addr);
1191 if (pmd_none(*pmd))
1192 return 0;
14a62c34 1193
1da177e4
LT
1194 if (pmd_large(*pmd))
1195 return pfn_valid(pmd_pfn(*pmd));
1196
1197 pte = pte_offset_kernel(pmd, addr);
1198 if (pte_none(*pte))
1199 return 0;
14a62c34 1200
1da177e4
LT
1201 return pfn_valid(pte_pfn(*pte));
1202}
1203
982792c7 1204static unsigned long probe_memory_block_size(void)
1dc41aa6 1205{
982792c7
YL
1206 /* start from 2g */
1207 unsigned long bz = 1UL<<31;
1208
bdee237c
DB
1209 if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) {
1210 pr_info("Using 2GB memory block size for large-memory system\n");
1dc41aa6
NF
1211 return 2UL * 1024 * 1024 * 1024;
1212 }
1dc41aa6 1213
982792c7
YL
1214 /* less than 64g installed */
1215 if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
1216 return MIN_MEMORY_BLOCK_SIZE;
1217
1218 /* get the tail size */
1219 while (bz > MIN_MEMORY_BLOCK_SIZE) {
1220 if (!((max_pfn << PAGE_SHIFT) & (bz - 1)))
1221 break;
1222 bz >>= 1;
1223 }
1224
1225 printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20);
1226
1227 return bz;
1228}
1229
1230static unsigned long memory_block_size_probed;
1231unsigned long memory_block_size_bytes(void)
1232{
1233 if (!memory_block_size_probed)
1234 memory_block_size_probed = probe_memory_block_size();
1235
1236 return memory_block_size_probed;
1237}
1238
0889eba5
CL
1239#ifdef CONFIG_SPARSEMEM_VMEMMAP
1240/*
1241 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1242 */
c2b91e2e
YL
1243static long __meminitdata addr_start, addr_end;
1244static void __meminitdata *p_start, *p_end;
1245static int __meminitdata node_start;
1246
e8216da5 1247static int __meminit vmemmap_populate_hugepages(unsigned long start,
4b94ffdc 1248 unsigned long end, int node, struct vmem_altmap *altmap)
0889eba5 1249{
0aad818b 1250 unsigned long addr;
0889eba5
CL
1251 unsigned long next;
1252 pgd_t *pgd;
1253 pud_t *pud;
1254 pmd_t *pmd;
1255
0aad818b 1256 for (addr = start; addr < end; addr = next) {
e8216da5 1257 next = pmd_addr_end(addr, end);
0889eba5
CL
1258
1259 pgd = vmemmap_pgd_populate(addr, node);
1260 if (!pgd)
1261 return -ENOMEM;
14a62c34 1262
0889eba5
CL
1263 pud = vmemmap_pud_populate(pgd, addr, node);
1264 if (!pud)
1265 return -ENOMEM;
1266
e8216da5
JW
1267 pmd = pmd_offset(pud, addr);
1268 if (pmd_none(*pmd)) {
e8216da5 1269 void *p;
14a62c34 1270
4b94ffdc 1271 p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
8e2cdbcb
JW
1272 if (p) {
1273 pte_t entry;
1274
1275 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1276 PAGE_KERNEL_LARGE);
1277 set_pmd(pmd, __pmd(pte_val(entry)));
1278
1279 /* check to see if we have contiguous blocks */
1280 if (p_end != p || node_start != node) {
1281 if (p_start)
c9cdaeb2 1282 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
8e2cdbcb
JW
1283 addr_start, addr_end-1, p_start, p_end-1, node_start);
1284 addr_start = addr;
1285 node_start = node;
1286 p_start = p;
1287 }
7c934d39 1288
8e2cdbcb
JW
1289 addr_end = addr + PMD_SIZE;
1290 p_end = p + PMD_SIZE;
1291 continue;
4b94ffdc
DW
1292 } else if (altmap)
1293 return -ENOMEM; /* no fallback */
8e2cdbcb 1294 } else if (pmd_large(*pmd)) {
e8216da5 1295 vmemmap_verify((pte_t *)pmd, node, addr, next);
8e2cdbcb
JW
1296 continue;
1297 }
1298 pr_warn_once("vmemmap: falling back to regular page backing\n");
1299 if (vmemmap_populate_basepages(addr, next, node))
1300 return -ENOMEM;
0889eba5 1301 }
0889eba5
CL
1302 return 0;
1303}
c2b91e2e 1304
e8216da5
JW
1305int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1306{
4b94ffdc 1307 struct vmem_altmap *altmap = to_vmem_altmap(start);
e8216da5
JW
1308 int err;
1309
1310 if (cpu_has_pse)
4b94ffdc
DW
1311 err = vmemmap_populate_hugepages(start, end, node, altmap);
1312 else if (altmap) {
1313 pr_err_once("%s: no cpu support for altmap allocations\n",
1314 __func__);
1315 err = -ENOMEM;
1316 } else
e8216da5
JW
1317 err = vmemmap_populate_basepages(start, end, node);
1318 if (!err)
9661d5bc 1319 sync_global_pgds(start, end - 1, 0);
e8216da5
JW
1320 return err;
1321}
1322
46723bfa
YI
1323#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1324void register_page_bootmem_memmap(unsigned long section_nr,
1325 struct page *start_page, unsigned long size)
1326{
1327 unsigned long addr = (unsigned long)start_page;
1328 unsigned long end = (unsigned long)(start_page + size);
1329 unsigned long next;
1330 pgd_t *pgd;
1331 pud_t *pud;
1332 pmd_t *pmd;
1333 unsigned int nr_pages;
1334 struct page *page;
1335
1336 for (; addr < end; addr = next) {
1337 pte_t *pte = NULL;
1338
1339 pgd = pgd_offset_k(addr);
1340 if (pgd_none(*pgd)) {
1341 next = (addr + PAGE_SIZE) & PAGE_MASK;
1342 continue;
1343 }
1344 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1345
1346 pud = pud_offset(pgd, addr);
1347 if (pud_none(*pud)) {
1348 next = (addr + PAGE_SIZE) & PAGE_MASK;
1349 continue;
1350 }
1351 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1352
1353 if (!cpu_has_pse) {
1354 next = (addr + PAGE_SIZE) & PAGE_MASK;
1355 pmd = pmd_offset(pud, addr);
1356 if (pmd_none(*pmd))
1357 continue;
1358 get_page_bootmem(section_nr, pmd_page(*pmd),
1359 MIX_SECTION_INFO);
1360
1361 pte = pte_offset_kernel(pmd, addr);
1362 if (pte_none(*pte))
1363 continue;
1364 get_page_bootmem(section_nr, pte_page(*pte),
1365 SECTION_INFO);
1366 } else {
1367 next = pmd_addr_end(addr, end);
1368
1369 pmd = pmd_offset(pud, addr);
1370 if (pmd_none(*pmd))
1371 continue;
1372
1373 nr_pages = 1 << (get_order(PMD_SIZE));
1374 page = pmd_page(*pmd);
1375 while (nr_pages--)
1376 get_page_bootmem(section_nr, page++,
1377 SECTION_INFO);
1378 }
1379 }
1380}
1381#endif
1382
c2b91e2e
YL
1383void __meminit vmemmap_populate_print_last(void)
1384{
1385 if (p_start) {
c9cdaeb2 1386 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
c2b91e2e
YL
1387 addr_start, addr_end-1, p_start, p_end-1, node_start);
1388 p_start = NULL;
1389 p_end = NULL;
1390 node_start = 0;
1391 }
1392}
0889eba5 1393#endif