]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/mm/init_64.c
x86, mm: Improve _install_special_mapping and fix x86 vdso naming
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / init_64.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
a2531293 5 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
1da177e4
LT
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
11034d55 21#include <linux/initrd.h>
1da177e4
LT
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
a9ce6bc1 24#include <linux/memblock.h>
1da177e4 25#include <linux/proc_fs.h>
59170891 26#include <linux/pci.h>
6fb14755 27#include <linux/pfn.h>
c9cf5528 28#include <linux/poison.h>
17a941d8 29#include <linux/dma-mapping.h>
44df75e6 30#include <linux/module.h>
a63fdc51 31#include <linux/memory.h>
44df75e6 32#include <linux/memory_hotplug.h>
ae32b129 33#include <linux/nmi.h>
5a0e3ad6 34#include <linux/gfp.h>
2f96b8c1 35#include <linux/kcore.h>
1da177e4
LT
36
37#include <asm/processor.h>
46eaa670 38#include <asm/bios_ebda.h>
1da177e4
LT
39#include <asm/uaccess.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
42#include <asm/dma.h>
43#include <asm/fixmap.h>
44#include <asm/e820.h>
45#include <asm/apic.h>
46#include <asm/tlb.h>
47#include <asm/mmu_context.h>
48#include <asm/proto.h>
49#include <asm/smp.h>
2bc0414e 50#include <asm/sections.h>
718fc13b 51#include <asm/kdebug.h>
aaa64e04 52#include <asm/numa.h>
7bfeab9a 53#include <asm/cacheflush.h>
4fcb2083 54#include <asm/init.h>
1dc41aa6 55#include <asm/uv/uv.h>
e5f15b45 56#include <asm/setup.h>
1da177e4 57
5c51bdbe
YL
58#include "mm_internal.h"
59
aece2785
YL
60static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
61 unsigned long addr, unsigned long end)
62{
63 addr &= PMD_MASK;
64 for (; addr < end; addr += PMD_SIZE) {
65 pmd_t *pmd = pmd_page + pmd_index(addr);
66
67 if (!pmd_present(*pmd))
68 set_pmd(pmd, __pmd(addr | pmd_flag));
69 }
70}
71static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
72 unsigned long addr, unsigned long end)
73{
74 unsigned long next;
75
76 for (; addr < end; addr = next) {
77 pud_t *pud = pud_page + pud_index(addr);
78 pmd_t *pmd;
79
80 next = (addr & PUD_MASK) + PUD_SIZE;
81 if (next > end)
82 next = end;
83
84 if (pud_present(*pud)) {
85 pmd = pmd_offset(pud, 0);
86 ident_pmd_init(info->pmd_flag, pmd, addr, next);
87 continue;
88 }
89 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
90 if (!pmd)
91 return -ENOMEM;
92 ident_pmd_init(info->pmd_flag, pmd, addr, next);
93 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
94 }
95
96 return 0;
97}
98
99int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
100 unsigned long addr, unsigned long end)
101{
102 unsigned long next;
103 int result;
104 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
105
106 for (; addr < end; addr = next) {
107 pgd_t *pgd = pgd_page + pgd_index(addr) + off;
108 pud_t *pud;
109
110 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
111 if (next > end)
112 next = end;
113
114 if (pgd_present(*pgd)) {
115 pud = pud_offset(pgd, 0);
116 result = ident_pud_init(info, pud, addr, next);
117 if (result)
118 return result;
119 continue;
120 }
121
122 pud = (pud_t *)info->alloc_pgt_page(info->context);
123 if (!pud)
124 return -ENOMEM;
125 result = ident_pud_init(info, pud, addr, next);
126 if (result)
127 return result;
128 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
129 }
130
131 return 0;
132}
133
00d1c5e0
IM
134static int __init parse_direct_gbpages_off(char *arg)
135{
136 direct_gbpages = 0;
137 return 0;
138}
139early_param("nogbpages", parse_direct_gbpages_off);
140
141static int __init parse_direct_gbpages_on(char *arg)
142{
143 direct_gbpages = 1;
144 return 0;
145}
146early_param("gbpages", parse_direct_gbpages_on);
147
1da177e4
LT
148/*
149 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
150 * physical space so we can cache the place of the first one and move
151 * around without checking the pgd every time.
152 */
153
be43d728 154pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
bd220a24
YL
155EXPORT_SYMBOL_GPL(__supported_pte_mask);
156
bd220a24
YL
157int force_personality32;
158
deed05b7
IM
159/*
160 * noexec32=on|off
161 * Control non executable heap for 32bit processes.
162 * To control the stack too use noexec=off
163 *
164 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
165 * off PROT_READ implies PROT_EXEC
166 */
bd220a24
YL
167static int __init nonx32_setup(char *str)
168{
169 if (!strcmp(str, "on"))
170 force_personality32 &= ~READ_IMPLIES_EXEC;
171 else if (!strcmp(str, "off"))
172 force_personality32 |= READ_IMPLIES_EXEC;
173 return 1;
174}
175__setup("noexec32=", nonx32_setup);
176
6afb5157
HL
177/*
178 * When memory was added/removed make sure all the processes MM have
179 * suitable PGD entries in the local PGD level page.
180 */
181void sync_global_pgds(unsigned long start, unsigned long end)
182{
44235dcd
JF
183 unsigned long address;
184
185 for (address = start; address <= end; address += PGDIR_SIZE) {
186 const pgd_t *pgd_ref = pgd_offset_k(address);
44235dcd
JF
187 struct page *page;
188
189 if (pgd_none(*pgd_ref))
190 continue;
191
a79e53d8 192 spin_lock(&pgd_lock);
44235dcd 193 list_for_each_entry(page, &pgd_list, lru) {
be354f40 194 pgd_t *pgd;
617d34d9
JF
195 spinlock_t *pgt_lock;
196
44235dcd 197 pgd = (pgd_t *)page_address(page) + pgd_index(address);
a79e53d8 198 /* the pgt_lock only for Xen */
617d34d9
JF
199 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
200 spin_lock(pgt_lock);
201
44235dcd
JF
202 if (pgd_none(*pgd))
203 set_pgd(pgd, *pgd_ref);
204 else
205 BUG_ON(pgd_page_vaddr(*pgd)
206 != pgd_page_vaddr(*pgd_ref));
617d34d9
JF
207
208 spin_unlock(pgt_lock);
44235dcd 209 }
a79e53d8 210 spin_unlock(&pgd_lock);
44235dcd 211 }
6afb5157
HL
212}
213
8d6ea967
MS
214/*
215 * NOTE: This function is marked __ref because it calls __init function
216 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
217 */
218static __ref void *spp_getpage(void)
14a62c34 219{
1da177e4 220 void *ptr;
14a62c34 221
1da177e4 222 if (after_bootmem)
9e730237 223 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
1da177e4
LT
224 else
225 ptr = alloc_bootmem_pages(PAGE_SIZE);
14a62c34
TG
226
227 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
228 panic("set_pte_phys: cannot allocate page data %s\n",
229 after_bootmem ? "after bootmem" : "");
230 }
1da177e4 231
10f22dde 232 pr_debug("spp_getpage %p\n", ptr);
14a62c34 233
1da177e4 234 return ptr;
14a62c34 235}
1da177e4 236
f254f390 237static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
1da177e4 238{
458a3e64
TH
239 if (pgd_none(*pgd)) {
240 pud_t *pud = (pud_t *)spp_getpage();
241 pgd_populate(&init_mm, pgd, pud);
242 if (pud != pud_offset(pgd, 0))
243 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
244 pud, pud_offset(pgd, 0));
245 }
246 return pud_offset(pgd, vaddr);
247}
1da177e4 248
f254f390 249static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
458a3e64 250{
1da177e4 251 if (pud_none(*pud)) {
458a3e64 252 pmd_t *pmd = (pmd_t *) spp_getpage();
bb23e403 253 pud_populate(&init_mm, pud, pmd);
458a3e64 254 if (pmd != pmd_offset(pud, 0))
10f22dde 255 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
458a3e64 256 pmd, pmd_offset(pud, 0));
1da177e4 257 }
458a3e64
TH
258 return pmd_offset(pud, vaddr);
259}
260
f254f390 261static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
458a3e64 262{
1da177e4 263 if (pmd_none(*pmd)) {
458a3e64 264 pte_t *pte = (pte_t *) spp_getpage();
bb23e403 265 pmd_populate_kernel(&init_mm, pmd, pte);
458a3e64 266 if (pte != pte_offset_kernel(pmd, 0))
10f22dde 267 printk(KERN_ERR "PAGETABLE BUG #02!\n");
1da177e4 268 }
458a3e64
TH
269 return pte_offset_kernel(pmd, vaddr);
270}
271
272void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
273{
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *pte;
277
278 pud = pud_page + pud_index(vaddr);
279 pmd = fill_pmd(pud, vaddr);
280 pte = fill_pte(pmd, vaddr);
1da177e4 281
1da177e4
LT
282 set_pte(pte, new_pte);
283
284 /*
285 * It's enough to flush this one mapping.
286 * (PGE mappings get flushed as well)
287 */
288 __flush_tlb_one(vaddr);
289}
290
458a3e64 291void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
0814e0ba
EH
292{
293 pgd_t *pgd;
294 pud_t *pud_page;
295
296 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
297
298 pgd = pgd_offset_k(vaddr);
299 if (pgd_none(*pgd)) {
300 printk(KERN_ERR
301 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
302 return;
303 }
304 pud_page = (pud_t*)pgd_page_vaddr(*pgd);
305 set_pte_vaddr_pud(pud_page, vaddr, pteval);
306}
307
458a3e64 308pmd_t * __init populate_extra_pmd(unsigned long vaddr)
11124411
TH
309{
310 pgd_t *pgd;
311 pud_t *pud;
312
313 pgd = pgd_offset_k(vaddr);
458a3e64
TH
314 pud = fill_pud(pgd, vaddr);
315 return fill_pmd(pud, vaddr);
316}
317
318pte_t * __init populate_extra_pte(unsigned long vaddr)
319{
320 pmd_t *pmd;
11124411 321
458a3e64
TH
322 pmd = populate_extra_pmd(vaddr);
323 return fill_pte(pmd, vaddr);
11124411
TH
324}
325
3a9e189d
JS
326/*
327 * Create large page table mappings for a range of physical addresses.
328 */
329static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
330 pgprot_t prot)
331{
332 pgd_t *pgd;
333 pud_t *pud;
334 pmd_t *pmd;
335
336 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
337 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
338 pgd = pgd_offset_k((unsigned long)__va(phys));
339 if (pgd_none(*pgd)) {
340 pud = (pud_t *) spp_getpage();
341 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
342 _PAGE_USER));
343 }
344 pud = pud_offset(pgd, (unsigned long)__va(phys));
345 if (pud_none(*pud)) {
346 pmd = (pmd_t *) spp_getpage();
347 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
348 _PAGE_USER));
349 }
350 pmd = pmd_offset(pud, phys);
351 BUG_ON(!pmd_none(*pmd));
352 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
353 }
354}
355
356void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
357{
358 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
359}
360
361void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
362{
363 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
364}
365
31eedd82 366/*
88f3aec7
IM
367 * The head.S code sets up the kernel high mapping:
368 *
369 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
31eedd82 370 *
1e3b3081 371 * phys_base holds the negative offset to the kernel, which is added
31eedd82
TG
372 * to the compile time generated pmds. This results in invalid pmds up
373 * to the point where we hit the physaddr 0 mapping.
374 *
e5f15b45
YL
375 * We limit the mappings to the region from _text to _brk_end. _brk_end
376 * is rounded up to the 2MB boundary. This catches the invalid pmds as
31eedd82
TG
377 * well, as they are located before _text:
378 */
379void __init cleanup_highmap(void)
380{
381 unsigned long vaddr = __START_KERNEL_map;
10054230 382 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
e5f15b45 383 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
31eedd82 384 pmd_t *pmd = level2_kernel_pgt;
31eedd82 385
10054230
YL
386 /*
387 * Native path, max_pfn_mapped is not set yet.
388 * Xen has valid max_pfn_mapped set in
389 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
390 */
391 if (max_pfn_mapped)
392 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
393
e5f15b45 394 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
2884f110 395 if (pmd_none(*pmd))
31eedd82
TG
396 continue;
397 if (vaddr < (unsigned long) _text || vaddr > end)
398 set_pmd(pmd, __pmd(0));
399 }
400}
401
7b16eb89 402static unsigned long __meminit
b27a43c1
SS
403phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
404 pgprot_t prot)
4f9c11dd 405{
eceb3632 406 unsigned long pages = 0, next;
7b16eb89 407 unsigned long last_map_addr = end;
4f9c11dd 408 int i;
7b16eb89 409
4f9c11dd
JF
410 pte_t *pte = pte_page + pte_index(addr);
411
eceb3632
YL
412 for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
413 next = (addr & PAGE_MASK) + PAGE_SIZE;
4f9c11dd 414 if (addr >= end) {
eceb3632
YL
415 if (!after_bootmem &&
416 !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
417 !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
418 set_pte(pte, __pte(0));
419 continue;
4f9c11dd
JF
420 }
421
b27a43c1
SS
422 /*
423 * We will re-use the existing mapping.
424 * Xen for example has some special requirements, like mapping
425 * pagetable pages as RO. So assume someone who pre-setup
426 * these mappings are more intelligent.
427 */
3afa3949 428 if (pte_val(*pte)) {
876ee61a
JB
429 if (!after_bootmem)
430 pages++;
4f9c11dd 431 continue;
3afa3949 432 }
4f9c11dd
JF
433
434 if (0)
435 printk(" pte=%p addr=%lx pte=%016lx\n",
436 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
4f9c11dd 437 pages++;
b27a43c1 438 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
7b16eb89 439 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
4f9c11dd 440 }
a2699e47 441
4f9c11dd 442 update_page_count(PG_LEVEL_4K, pages);
7b16eb89
YL
443
444 return last_map_addr;
4f9c11dd
JF
445}
446
cc615032 447static unsigned long __meminit
b50efd2a 448phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
b27a43c1 449 unsigned long page_size_mask, pgprot_t prot)
44df75e6 450{
20167d34 451 unsigned long pages = 0, next;
7b16eb89 452 unsigned long last_map_addr = end;
ce0c0e50 453
6ad91658 454 int i = pmd_index(address);
44df75e6 455
20167d34 456 for (; i < PTRS_PER_PMD; i++, address = next) {
6ad91658 457 pmd_t *pmd = pmd_page + pmd_index(address);
4f9c11dd 458 pte_t *pte;
b27a43c1 459 pgprot_t new_prot = prot;
44df75e6 460
eceb3632 461 next = (address & PMD_MASK) + PMD_SIZE;
5f51e139 462 if (address >= end) {
eceb3632
YL
463 if (!after_bootmem &&
464 !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
465 !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
466 set_pmd(pmd, __pmd(0));
467 continue;
44df75e6 468 }
6ad91658 469
4f9c11dd 470 if (pmd_val(*pmd)) {
8ae3a5a8
JB
471 if (!pmd_large(*pmd)) {
472 spin_lock(&init_mm.page_table_lock);
973dc4f3 473 pte = (pte_t *)pmd_page_vaddr(*pmd);
4b239f45 474 last_map_addr = phys_pte_init(pte, address,
b27a43c1 475 end, prot);
8ae3a5a8 476 spin_unlock(&init_mm.page_table_lock);
a2699e47 477 continue;
8ae3a5a8 478 }
b27a43c1
SS
479 /*
480 * If we are ok with PG_LEVEL_2M mapping, then we will
481 * use the existing mapping,
482 *
483 * Otherwise, we will split the large page mapping but
484 * use the same existing protection bits except for
485 * large page, so that we don't violate Intel's TLB
486 * Application note (317080) which says, while changing
487 * the page sizes, new and old translations should
488 * not differ with respect to page frame and
489 * attributes.
490 */
3afa3949 491 if (page_size_mask & (1 << PG_LEVEL_2M)) {
876ee61a
JB
492 if (!after_bootmem)
493 pages++;
20167d34 494 last_map_addr = next;
b27a43c1 495 continue;
3afa3949 496 }
b27a43c1 497 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
4f9c11dd
JF
498 }
499
b50efd2a 500 if (page_size_mask & (1<<PG_LEVEL_2M)) {
4f9c11dd 501 pages++;
8ae3a5a8 502 spin_lock(&init_mm.page_table_lock);
4f9c11dd 503 set_pte((pte_t *)pmd,
960ddb4f 504 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
b27a43c1 505 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
8ae3a5a8 506 spin_unlock(&init_mm.page_table_lock);
20167d34 507 last_map_addr = next;
6ad91658 508 continue;
4f9c11dd 509 }
6ad91658 510
868bf4d6 511 pte = alloc_low_page();
b27a43c1 512 last_map_addr = phys_pte_init(pte, address, end, new_prot);
4f9c11dd 513
8ae3a5a8 514 spin_lock(&init_mm.page_table_lock);
868bf4d6 515 pmd_populate_kernel(&init_mm, pmd, pte);
8ae3a5a8 516 spin_unlock(&init_mm.page_table_lock);
44df75e6 517 }
ce0c0e50 518 update_page_count(PG_LEVEL_2M, pages);
7b16eb89 519 return last_map_addr;
44df75e6
MT
520}
521
cc615032 522static unsigned long __meminit
b50efd2a
YL
523phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
524 unsigned long page_size_mask)
14a62c34 525{
20167d34 526 unsigned long pages = 0, next;
cc615032 527 unsigned long last_map_addr = end;
6ad91658 528 int i = pud_index(addr);
44df75e6 529
20167d34 530 for (; i < PTRS_PER_PUD; i++, addr = next) {
6ad91658 531 pud_t *pud = pud_page + pud_index(addr);
1da177e4 532 pmd_t *pmd;
b27a43c1 533 pgprot_t prot = PAGE_KERNEL;
1da177e4 534
20167d34 535 next = (addr & PUD_MASK) + PUD_SIZE;
eceb3632
YL
536 if (addr >= end) {
537 if (!after_bootmem &&
538 !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
539 !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
540 set_pud(pud, __pud(0));
1da177e4 541 continue;
14a62c34 542 }
1da177e4 543
6ad91658 544 if (pud_val(*pud)) {
a2699e47 545 if (!pud_large(*pud)) {
973dc4f3 546 pmd = pmd_offset(pud, 0);
4b239f45 547 last_map_addr = phys_pmd_init(pmd, addr, end,
b27a43c1 548 page_size_mask, prot);
4b239f45 549 __flush_tlb_all();
a2699e47
SS
550 continue;
551 }
b27a43c1
SS
552 /*
553 * If we are ok with PG_LEVEL_1G mapping, then we will
554 * use the existing mapping.
555 *
556 * Otherwise, we will split the gbpage mapping but use
557 * the same existing protection bits except for large
558 * page, so that we don't violate Intel's TLB
559 * Application note (317080) which says, while changing
560 * the page sizes, new and old translations should
561 * not differ with respect to page frame and
562 * attributes.
563 */
3afa3949 564 if (page_size_mask & (1 << PG_LEVEL_1G)) {
876ee61a
JB
565 if (!after_bootmem)
566 pages++;
20167d34 567 last_map_addr = next;
b27a43c1 568 continue;
3afa3949 569 }
b27a43c1 570 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
ef925766
AK
571 }
572
b50efd2a 573 if (page_size_mask & (1<<PG_LEVEL_1G)) {
ce0c0e50 574 pages++;
8ae3a5a8 575 spin_lock(&init_mm.page_table_lock);
ef925766 576 set_pte((pte_t *)pud,
960ddb4f
YL
577 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
578 PAGE_KERNEL_LARGE));
8ae3a5a8 579 spin_unlock(&init_mm.page_table_lock);
20167d34 580 last_map_addr = next;
6ad91658
KM
581 continue;
582 }
583
868bf4d6 584 pmd = alloc_low_page();
b27a43c1
SS
585 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
586 prot);
8ae3a5a8
JB
587
588 spin_lock(&init_mm.page_table_lock);
868bf4d6 589 pud_populate(&init_mm, pud, pmd);
44df75e6 590 spin_unlock(&init_mm.page_table_lock);
1da177e4 591 }
1a2b4412 592 __flush_tlb_all();
a2699e47 593
ce0c0e50 594 update_page_count(PG_LEVEL_1G, pages);
cc615032 595
1a0db38e 596 return last_map_addr;
14a62c34 597}
1da177e4 598
41d840e2 599unsigned long __meminit
f765090a
PE
600kernel_physical_mapping_init(unsigned long start,
601 unsigned long end,
602 unsigned long page_size_mask)
14a62c34 603{
9b861528 604 bool pgd_changed = false;
b50efd2a 605 unsigned long next, last_map_addr = end;
9b861528 606 unsigned long addr;
1da177e4
LT
607
608 start = (unsigned long)__va(start);
609 end = (unsigned long)__va(end);
1c5f50ee 610 addr = start;
1da177e4
LT
611
612 for (; start < end; start = next) {
44df75e6
MT
613 pgd_t *pgd = pgd_offset_k(start);
614 pud_t *pud;
615
c2bdee59 616 next = (start & PGDIR_MASK) + PGDIR_SIZE;
4f9c11dd
JF
617
618 if (pgd_val(*pgd)) {
973dc4f3 619 pud = (pud_t *)pgd_page_vaddr(*pgd);
4b239f45 620 last_map_addr = phys_pud_init(pud, __pa(start),
b50efd2a 621 __pa(end), page_size_mask);
4f9c11dd
JF
622 continue;
623 }
624
868bf4d6 625 pud = alloc_low_page();
c2bdee59 626 last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
b50efd2a 627 page_size_mask);
8ae3a5a8
JB
628
629 spin_lock(&init_mm.page_table_lock);
868bf4d6 630 pgd_populate(&init_mm, pgd, pud);
8ae3a5a8 631 spin_unlock(&init_mm.page_table_lock);
9b861528 632 pgd_changed = true;
14a62c34 633 }
9b861528
HL
634
635 if (pgd_changed)
f73568a0 636 sync_global_pgds(addr, end - 1);
9b861528 637
a2699e47 638 __flush_tlb_all();
1da177e4 639
b50efd2a
YL
640 return last_map_addr;
641}
7b16eb89 642
2b97690f 643#ifndef CONFIG_NUMA
d8fc3afc 644void __init initmem_init(void)
1f75d7e3 645{
e7e8de59 646 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1f75d7e3 647}
3551f88f 648#endif
1f75d7e3 649
1da177e4
LT
650void __init paging_init(void)
651{
3551f88f 652 sparse_memory_present_with_active_regions(MAX_NUMNODES);
44df75e6 653 sparse_init();
44b57280
YL
654
655 /*
656 * clear the default setting with node 0
657 * note: don't use nodes_clear here, that is really clearing when
658 * numa support is not compiled in, and later node_set_state
659 * will not set it back.
660 */
4b0ef1fe
LJ
661 node_clear_state(0, N_MEMORY);
662 if (N_MEMORY != N_NORMAL_MEMORY)
663 node_clear_state(0, N_NORMAL_MEMORY);
44b57280 664
4c0b2e5f 665 zone_sizes_init();
1da177e4 666}
1da177e4 667
44df75e6
MT
668/*
669 * Memory hotplug specific functions
44df75e6 670 */
bc02af93 671#ifdef CONFIG_MEMORY_HOTPLUG
ea085417
SZ
672/*
673 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
674 * updating.
675 */
676static void update_end_of_memory_vars(u64 start, u64 size)
677{
678 unsigned long end_pfn = PFN_UP(start + size);
679
680 if (end_pfn > max_pfn) {
681 max_pfn = end_pfn;
682 max_low_pfn = end_pfn;
683 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
684 }
685}
686
9d99aaa3
AK
687/*
688 * Memory is added always to NORMAL zone. This means you will never get
689 * additional DMA/DMA32 memory.
690 */
bc02af93 691int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 692{
bc02af93 693 struct pglist_data *pgdat = NODE_DATA(nid);
776ed98b 694 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
66520ebc 695 unsigned long start_pfn = start >> PAGE_SHIFT;
44df75e6
MT
696 unsigned long nr_pages = size >> PAGE_SHIFT;
697 int ret;
698
66520ebc 699 init_memory_mapping(start, start + size);
45e0b78b 700
c04fc586 701 ret = __add_pages(nid, zone, start_pfn, nr_pages);
fe8b868e 702 WARN_ON_ONCE(ret);
44df75e6 703
ea085417
SZ
704 /* update max_pfn, max_low_pfn and high_memory */
705 update_end_of_memory_vars(start, size);
706
44df75e6 707 return ret;
44df75e6 708}
bc02af93 709EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6 710
ae9aae9e
WC
711#define PAGE_INUSE 0xFD
712
713static void __meminit free_pagetable(struct page *page, int order)
714{
ae9aae9e
WC
715 unsigned long magic;
716 unsigned int nr_pages = 1 << order;
717
718 /* bootmem page has reserved flag */
719 if (PageReserved(page)) {
720 __ClearPageReserved(page);
ae9aae9e
WC
721
722 magic = (unsigned long)page->lru.next;
723 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
724 while (nr_pages--)
725 put_page_bootmem(page++);
726 } else
170a5a7e
JL
727 while (nr_pages--)
728 free_reserved_page(page++);
ae9aae9e
WC
729 } else
730 free_pages((unsigned long)page_address(page), order);
ae9aae9e
WC
731}
732
733static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
734{
735 pte_t *pte;
736 int i;
737
738 for (i = 0; i < PTRS_PER_PTE; i++) {
739 pte = pte_start + i;
740 if (pte_val(*pte))
741 return;
742 }
743
744 /* free a pte talbe */
745 free_pagetable(pmd_page(*pmd), 0);
746 spin_lock(&init_mm.page_table_lock);
747 pmd_clear(pmd);
748 spin_unlock(&init_mm.page_table_lock);
749}
750
751static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
752{
753 pmd_t *pmd;
754 int i;
755
756 for (i = 0; i < PTRS_PER_PMD; i++) {
757 pmd = pmd_start + i;
758 if (pmd_val(*pmd))
759 return;
760 }
761
762 /* free a pmd talbe */
763 free_pagetable(pud_page(*pud), 0);
764 spin_lock(&init_mm.page_table_lock);
765 pud_clear(pud);
766 spin_unlock(&init_mm.page_table_lock);
767}
768
769/* Return true if pgd is changed, otherwise return false. */
770static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
771{
772 pud_t *pud;
773 int i;
774
775 for (i = 0; i < PTRS_PER_PUD; i++) {
776 pud = pud_start + i;
777 if (pud_val(*pud))
778 return false;
779 }
780
781 /* free a pud table */
782 free_pagetable(pgd_page(*pgd), 0);
783 spin_lock(&init_mm.page_table_lock);
784 pgd_clear(pgd);
785 spin_unlock(&init_mm.page_table_lock);
786
787 return true;
788}
789
790static void __meminit
791remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
792 bool direct)
793{
794 unsigned long next, pages = 0;
795 pte_t *pte;
796 void *page_addr;
797 phys_addr_t phys_addr;
798
799 pte = pte_start + pte_index(addr);
800 for (; addr < end; addr = next, pte++) {
801 next = (addr + PAGE_SIZE) & PAGE_MASK;
802 if (next > end)
803 next = end;
804
805 if (!pte_present(*pte))
806 continue;
807
808 /*
809 * We mapped [0,1G) memory as identity mapping when
810 * initializing, in arch/x86/kernel/head_64.S. These
811 * pagetables cannot be removed.
812 */
813 phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
814 if (phys_addr < (phys_addr_t)0x40000000)
815 return;
816
817 if (IS_ALIGNED(addr, PAGE_SIZE) &&
818 IS_ALIGNED(next, PAGE_SIZE)) {
819 /*
820 * Do not free direct mapping pages since they were
821 * freed when offlining, or simplely not in use.
822 */
823 if (!direct)
824 free_pagetable(pte_page(*pte), 0);
825
826 spin_lock(&init_mm.page_table_lock);
827 pte_clear(&init_mm, addr, pte);
828 spin_unlock(&init_mm.page_table_lock);
829
830 /* For non-direct mapping, pages means nothing. */
831 pages++;
832 } else {
833 /*
834 * If we are here, we are freeing vmemmap pages since
835 * direct mapped memory ranges to be freed are aligned.
836 *
837 * If we are not removing the whole page, it means
838 * other page structs in this page are being used and
839 * we canot remove them. So fill the unused page_structs
840 * with 0xFD, and remove the page when it is wholly
841 * filled with 0xFD.
842 */
843 memset((void *)addr, PAGE_INUSE, next - addr);
844
845 page_addr = page_address(pte_page(*pte));
846 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
847 free_pagetable(pte_page(*pte), 0);
848
849 spin_lock(&init_mm.page_table_lock);
850 pte_clear(&init_mm, addr, pte);
851 spin_unlock(&init_mm.page_table_lock);
852 }
853 }
854 }
855
856 /* Call free_pte_table() in remove_pmd_table(). */
857 flush_tlb_all();
858 if (direct)
859 update_page_count(PG_LEVEL_4K, -pages);
860}
861
862static void __meminit
863remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
864 bool direct)
865{
866 unsigned long next, pages = 0;
867 pte_t *pte_base;
868 pmd_t *pmd;
869 void *page_addr;
870
871 pmd = pmd_start + pmd_index(addr);
872 for (; addr < end; addr = next, pmd++) {
873 next = pmd_addr_end(addr, end);
874
875 if (!pmd_present(*pmd))
876 continue;
877
878 if (pmd_large(*pmd)) {
879 if (IS_ALIGNED(addr, PMD_SIZE) &&
880 IS_ALIGNED(next, PMD_SIZE)) {
881 if (!direct)
882 free_pagetable(pmd_page(*pmd),
883 get_order(PMD_SIZE));
884
885 spin_lock(&init_mm.page_table_lock);
886 pmd_clear(pmd);
887 spin_unlock(&init_mm.page_table_lock);
888 pages++;
889 } else {
890 /* If here, we are freeing vmemmap pages. */
891 memset((void *)addr, PAGE_INUSE, next - addr);
892
893 page_addr = page_address(pmd_page(*pmd));
894 if (!memchr_inv(page_addr, PAGE_INUSE,
895 PMD_SIZE)) {
896 free_pagetable(pmd_page(*pmd),
897 get_order(PMD_SIZE));
898
899 spin_lock(&init_mm.page_table_lock);
900 pmd_clear(pmd);
901 spin_unlock(&init_mm.page_table_lock);
902 }
903 }
904
905 continue;
906 }
907
908 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
909 remove_pte_table(pte_base, addr, next, direct);
910 free_pte_table(pte_base, pmd);
911 }
912
913 /* Call free_pmd_table() in remove_pud_table(). */
914 if (direct)
915 update_page_count(PG_LEVEL_2M, -pages);
916}
917
918static void __meminit
919remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
920 bool direct)
921{
922 unsigned long next, pages = 0;
923 pmd_t *pmd_base;
924 pud_t *pud;
925 void *page_addr;
926
927 pud = pud_start + pud_index(addr);
928 for (; addr < end; addr = next, pud++) {
929 next = pud_addr_end(addr, end);
930
931 if (!pud_present(*pud))
932 continue;
933
934 if (pud_large(*pud)) {
935 if (IS_ALIGNED(addr, PUD_SIZE) &&
936 IS_ALIGNED(next, PUD_SIZE)) {
937 if (!direct)
938 free_pagetable(pud_page(*pud),
939 get_order(PUD_SIZE));
940
941 spin_lock(&init_mm.page_table_lock);
942 pud_clear(pud);
943 spin_unlock(&init_mm.page_table_lock);
944 pages++;
945 } else {
946 /* If here, we are freeing vmemmap pages. */
947 memset((void *)addr, PAGE_INUSE, next - addr);
948
949 page_addr = page_address(pud_page(*pud));
950 if (!memchr_inv(page_addr, PAGE_INUSE,
951 PUD_SIZE)) {
952 free_pagetable(pud_page(*pud),
953 get_order(PUD_SIZE));
954
955 spin_lock(&init_mm.page_table_lock);
956 pud_clear(pud);
957 spin_unlock(&init_mm.page_table_lock);
958 }
959 }
960
961 continue;
962 }
963
964 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
965 remove_pmd_table(pmd_base, addr, next, direct);
966 free_pmd_table(pmd_base, pud);
967 }
968
969 if (direct)
970 update_page_count(PG_LEVEL_1G, -pages);
971}
972
973/* start and end are both virtual address. */
974static void __meminit
975remove_pagetable(unsigned long start, unsigned long end, bool direct)
976{
977 unsigned long next;
978 pgd_t *pgd;
979 pud_t *pud;
980 bool pgd_changed = false;
981
982 for (; start < end; start = next) {
983 next = pgd_addr_end(start, end);
984
985 pgd = pgd_offset_k(start);
986 if (!pgd_present(*pgd))
987 continue;
988
989 pud = (pud_t *)pgd_page_vaddr(*pgd);
990 remove_pud_table(pud, start, next, direct);
991 if (free_pud_table(pud, pgd))
992 pgd_changed = true;
993 }
994
995 if (pgd_changed)
996 sync_global_pgds(start, end - 1);
997
998 flush_tlb_all();
999}
1000
0aad818b 1001void __ref vmemmap_free(unsigned long start, unsigned long end)
0197518c 1002{
0197518c
TC
1003 remove_pagetable(start, end, false);
1004}
1005
587ff8c4 1006#ifdef CONFIG_MEMORY_HOTREMOVE
bbcab878
TC
1007static void __meminit
1008kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1009{
1010 start = (unsigned long)__va(start);
1011 end = (unsigned long)__va(end);
1012
1013 remove_pagetable(start, end, true);
1014}
1015
24d335ca
WC
1016int __ref arch_remove_memory(u64 start, u64 size)
1017{
1018 unsigned long start_pfn = start >> PAGE_SHIFT;
1019 unsigned long nr_pages = size >> PAGE_SHIFT;
1020 struct zone *zone;
1021 int ret;
1022
1023 zone = page_zone(pfn_to_page(start_pfn));
bbcab878 1024 kernel_physical_mapping_remove(start, start + size);
24d335ca
WC
1025 ret = __remove_pages(zone, start_pfn, nr_pages);
1026 WARN_ON_ONCE(ret);
1027
1028 return ret;
1029}
1030#endif
45e0b78b
KM
1031#endif /* CONFIG_MEMORY_HOTPLUG */
1032
81ac3ad9 1033static struct kcore_list kcore_vsyscall;
1da177e4 1034
94b43c3d
YL
1035static void __init register_page_bootmem_info(void)
1036{
1037#ifdef CONFIG_NUMA
1038 int i;
1039
1040 for_each_online_node(i)
1041 register_page_bootmem_info_node(NODE_DATA(i));
1042#endif
1043}
1044
1da177e4
LT
1045void __init mem_init(void)
1046{
0dc243ae 1047 pci_iommu_alloc();
1da177e4 1048
48ddb154 1049 /* clear_bss() already clear the empty_zero_page */
1da177e4 1050
94b43c3d 1051 register_page_bootmem_info();
bced0e32
JL
1052
1053 /* this will put all memory onto the freelists */
0c988534 1054 free_all_bootmem();
1da177e4
LT
1055 after_bootmem = 1;
1056
1da177e4 1057 /* Register memory areas for /proc/kcore */
f40c3300
AL
1058 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1059 PAGE_SIZE, KCORE_OTHER);
1da177e4 1060
46a84132 1061 mem_init_print_info(NULL);
1da177e4
LT
1062}
1063
67df197b 1064#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
1065const int rodata_test_data = 0xC3;
1066EXPORT_SYMBOL_GPL(rodata_test_data);
67df197b 1067
502f6604 1068int kernel_set_to_readonly;
16239630
SR
1069
1070void set_kernel_text_rw(void)
1071{
b9af7c0d 1072 unsigned long start = PFN_ALIGN(_text);
e7d23dde 1073 unsigned long end = PFN_ALIGN(__stop___ex_table);
16239630
SR
1074
1075 if (!kernel_set_to_readonly)
1076 return;
1077
1078 pr_debug("Set kernel text: %lx - %lx for read write\n",
1079 start, end);
1080
e7d23dde
SS
1081 /*
1082 * Make the kernel identity mapping for text RW. Kernel text
1083 * mapping will always be RO. Refer to the comment in
1084 * static_protections() in pageattr.c
1085 */
16239630
SR
1086 set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1087}
1088
1089void set_kernel_text_ro(void)
1090{
b9af7c0d 1091 unsigned long start = PFN_ALIGN(_text);
e7d23dde 1092 unsigned long end = PFN_ALIGN(__stop___ex_table);
16239630
SR
1093
1094 if (!kernel_set_to_readonly)
1095 return;
1096
1097 pr_debug("Set kernel text: %lx - %lx for read only\n",
1098 start, end);
1099
e7d23dde
SS
1100 /*
1101 * Set the kernel identity mapping for text RO.
1102 */
16239630
SR
1103 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1104}
1105
67df197b
AV
1106void mark_rodata_ro(void)
1107{
74e08179 1108 unsigned long start = PFN_ALIGN(_text);
fc8d7826 1109 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
74e08179 1110 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
fc8d7826
AD
1111 unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1112 unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
72212675 1113 unsigned long all_end = PFN_ALIGN(&_end);
8f0f996e 1114
6fb14755 1115 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
e3ebadd9 1116 (end - start) >> 10);
984bb80d
AV
1117 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1118
16239630
SR
1119 kernel_set_to_readonly = 1;
1120
984bb80d 1121 /*
72212675
YL
1122 * The rodata/data/bss/brk section (but not the kernel text!)
1123 * should also be not-executable.
984bb80d 1124 */
72212675 1125 set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
67df197b 1126
1a487252
AV
1127 rodata_test();
1128
0c42f392 1129#ifdef CONFIG_CPA_DEBUG
10f22dde 1130 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
6d238cc4 1131 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
0c42f392 1132
10f22dde 1133 printk(KERN_INFO "Testing CPA: again\n");
6d238cc4 1134 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
0c42f392 1135#endif
74e08179 1136
c88442ec 1137 free_init_pages("unused kernel",
fc8d7826
AD
1138 (unsigned long) __va(__pa_symbol(text_end)),
1139 (unsigned long) __va(__pa_symbol(rodata_start)));
c88442ec 1140 free_init_pages("unused kernel",
fc8d7826
AD
1141 (unsigned long) __va(__pa_symbol(rodata_end)),
1142 (unsigned long) __va(__pa_symbol(_sdata)));
67df197b 1143}
4e4eee0e 1144
67df197b
AV
1145#endif
1146
14a62c34
TG
1147int kern_addr_valid(unsigned long addr)
1148{
1da177e4 1149 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
14a62c34
TG
1150 pgd_t *pgd;
1151 pud_t *pud;
1152 pmd_t *pmd;
1153 pte_t *pte;
1da177e4
LT
1154
1155 if (above != 0 && above != -1UL)
14a62c34
TG
1156 return 0;
1157
1da177e4
LT
1158 pgd = pgd_offset_k(addr);
1159 if (pgd_none(*pgd))
1160 return 0;
1161
1162 pud = pud_offset(pgd, addr);
1163 if (pud_none(*pud))
14a62c34 1164 return 0;
1da177e4 1165
0ee364eb
MG
1166 if (pud_large(*pud))
1167 return pfn_valid(pud_pfn(*pud));
1168
1da177e4
LT
1169 pmd = pmd_offset(pud, addr);
1170 if (pmd_none(*pmd))
1171 return 0;
14a62c34 1172
1da177e4
LT
1173 if (pmd_large(*pmd))
1174 return pfn_valid(pmd_pfn(*pmd));
1175
1176 pte = pte_offset_kernel(pmd, addr);
1177 if (pte_none(*pte))
1178 return 0;
14a62c34 1179
1da177e4
LT
1180 return pfn_valid(pte_pfn(*pte));
1181}
1182
14a62c34
TG
1183/*
1184 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
1185 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1186 * not need special handling anymore:
1187 */
1da177e4 1188static struct vm_area_struct gate_vma = {
f40c3300
AL
1189 .vm_start = VSYSCALL_ADDR,
1190 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
14a62c34
TG
1191 .vm_page_prot = PAGE_READONLY_EXEC,
1192 .vm_flags = VM_READ | VM_EXEC
1da177e4
LT
1193};
1194
31db58b3 1195struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1da177e4
LT
1196{
1197#ifdef CONFIG_IA32_EMULATION
31db58b3 1198 if (!mm || mm->context.ia32_compat)
1e014410 1199 return NULL;
1da177e4
LT
1200#endif
1201 return &gate_vma;
1202}
1203
83b964bb 1204int in_gate_area(struct mm_struct *mm, unsigned long addr)
1da177e4 1205{
83b964bb 1206 struct vm_area_struct *vma = get_gate_vma(mm);
14a62c34 1207
1e014410
AK
1208 if (!vma)
1209 return 0;
14a62c34 1210
1da177e4
LT
1211 return (addr >= vma->vm_start) && (addr < vma->vm_end);
1212}
1213
14a62c34 1214/*
cae5d390
SW
1215 * Use this when you have no reliable mm, typically from interrupt
1216 * context. It is less reliable than using a task's mm and may give
1217 * false positives.
1da177e4 1218 */
cae5d390 1219int in_gate_area_no_mm(unsigned long addr)
1da177e4 1220{
f40c3300 1221 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
1da177e4 1222}
2e1c49db 1223
2aae950b
AK
1224const char *arch_vma_name(struct vm_area_struct *vma)
1225{
2aae950b
AK
1226 if (vma == &gate_vma)
1227 return "[vsyscall]";
1228 return NULL;
1229}
0889eba5 1230
1dc41aa6 1231#ifdef CONFIG_X86_UV
1dc41aa6
NF
1232unsigned long memory_block_size_bytes(void)
1233{
1234 if (is_uv_system()) {
1235 printk(KERN_INFO "UV: memory block size 2GB\n");
1236 return 2UL * 1024 * 1024 * 1024;
1237 }
1238 return MIN_MEMORY_BLOCK_SIZE;
1239}
1240#endif
1241
0889eba5
CL
1242#ifdef CONFIG_SPARSEMEM_VMEMMAP
1243/*
1244 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1245 */
c2b91e2e
YL
1246static long __meminitdata addr_start, addr_end;
1247static void __meminitdata *p_start, *p_end;
1248static int __meminitdata node_start;
1249
e8216da5
JW
1250static int __meminit vmemmap_populate_hugepages(unsigned long start,
1251 unsigned long end, int node)
0889eba5 1252{
0aad818b 1253 unsigned long addr;
0889eba5
CL
1254 unsigned long next;
1255 pgd_t *pgd;
1256 pud_t *pud;
1257 pmd_t *pmd;
1258
0aad818b 1259 for (addr = start; addr < end; addr = next) {
e8216da5 1260 next = pmd_addr_end(addr, end);
0889eba5
CL
1261
1262 pgd = vmemmap_pgd_populate(addr, node);
1263 if (!pgd)
1264 return -ENOMEM;
14a62c34 1265
0889eba5
CL
1266 pud = vmemmap_pud_populate(pgd, addr, node);
1267 if (!pud)
1268 return -ENOMEM;
1269
e8216da5
JW
1270 pmd = pmd_offset(pud, addr);
1271 if (pmd_none(*pmd)) {
e8216da5 1272 void *p;
14a62c34 1273
e8216da5 1274 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
8e2cdbcb
JW
1275 if (p) {
1276 pte_t entry;
1277
1278 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1279 PAGE_KERNEL_LARGE);
1280 set_pmd(pmd, __pmd(pte_val(entry)));
1281
1282 /* check to see if we have contiguous blocks */
1283 if (p_end != p || node_start != node) {
1284 if (p_start)
1285 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1286 addr_start, addr_end-1, p_start, p_end-1, node_start);
1287 addr_start = addr;
1288 node_start = node;
1289 p_start = p;
1290 }
7c934d39 1291
8e2cdbcb
JW
1292 addr_end = addr + PMD_SIZE;
1293 p_end = p + PMD_SIZE;
1294 continue;
1295 }
1296 } else if (pmd_large(*pmd)) {
e8216da5 1297 vmemmap_verify((pte_t *)pmd, node, addr, next);
8e2cdbcb
JW
1298 continue;
1299 }
1300 pr_warn_once("vmemmap: falling back to regular page backing\n");
1301 if (vmemmap_populate_basepages(addr, next, node))
1302 return -ENOMEM;
0889eba5 1303 }
0889eba5
CL
1304 return 0;
1305}
c2b91e2e 1306
e8216da5
JW
1307int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1308{
1309 int err;
1310
1311 if (cpu_has_pse)
1312 err = vmemmap_populate_hugepages(start, end, node);
1313 else
1314 err = vmemmap_populate_basepages(start, end, node);
1315 if (!err)
1316 sync_global_pgds(start, end - 1);
1317 return err;
1318}
1319
46723bfa
YI
1320#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1321void register_page_bootmem_memmap(unsigned long section_nr,
1322 struct page *start_page, unsigned long size)
1323{
1324 unsigned long addr = (unsigned long)start_page;
1325 unsigned long end = (unsigned long)(start_page + size);
1326 unsigned long next;
1327 pgd_t *pgd;
1328 pud_t *pud;
1329 pmd_t *pmd;
1330 unsigned int nr_pages;
1331 struct page *page;
1332
1333 for (; addr < end; addr = next) {
1334 pte_t *pte = NULL;
1335
1336 pgd = pgd_offset_k(addr);
1337 if (pgd_none(*pgd)) {
1338 next = (addr + PAGE_SIZE) & PAGE_MASK;
1339 continue;
1340 }
1341 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1342
1343 pud = pud_offset(pgd, addr);
1344 if (pud_none(*pud)) {
1345 next = (addr + PAGE_SIZE) & PAGE_MASK;
1346 continue;
1347 }
1348 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1349
1350 if (!cpu_has_pse) {
1351 next = (addr + PAGE_SIZE) & PAGE_MASK;
1352 pmd = pmd_offset(pud, addr);
1353 if (pmd_none(*pmd))
1354 continue;
1355 get_page_bootmem(section_nr, pmd_page(*pmd),
1356 MIX_SECTION_INFO);
1357
1358 pte = pte_offset_kernel(pmd, addr);
1359 if (pte_none(*pte))
1360 continue;
1361 get_page_bootmem(section_nr, pte_page(*pte),
1362 SECTION_INFO);
1363 } else {
1364 next = pmd_addr_end(addr, end);
1365
1366 pmd = pmd_offset(pud, addr);
1367 if (pmd_none(*pmd))
1368 continue;
1369
1370 nr_pages = 1 << (get_order(PMD_SIZE));
1371 page = pmd_page(*pmd);
1372 while (nr_pages--)
1373 get_page_bootmem(section_nr, page++,
1374 SECTION_INFO);
1375 }
1376 }
1377}
1378#endif
1379
c2b91e2e
YL
1380void __meminit vmemmap_populate_print_last(void)
1381{
1382 if (p_start) {
1383 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1384 addr_start, addr_end-1, p_start, p_end-1, node_start);
1385 p_start = NULL;
1386 p_end = NULL;
1387 node_start = 0;
1388 }
1389}
0889eba5 1390#endif