]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/mm/init_64.c
x86, 64-bit: PSE no longer a hard requirement
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / init_64.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
11034d55 21#include <linux/initrd.h>
1da177e4
LT
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
6fb14755 26#include <linux/pfn.h>
c9cf5528 27#include <linux/poison.h>
17a941d8 28#include <linux/dma-mapping.h>
44df75e6
MT
29#include <linux/module.h>
30#include <linux/memory_hotplug.h>
ae32b129 31#include <linux/nmi.h>
1da177e4
LT
32
33#include <asm/processor.h>
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/pgalloc.h>
38#include <asm/dma.h>
39#include <asm/fixmap.h>
40#include <asm/e820.h>
41#include <asm/apic.h>
42#include <asm/tlb.h>
43#include <asm/mmu_context.h>
44#include <asm/proto.h>
45#include <asm/smp.h>
2bc0414e 46#include <asm/sections.h>
718fc13b 47#include <asm/kdebug.h>
aaa64e04 48#include <asm/numa.h>
7bfeab9a 49#include <asm/cacheflush.h>
1da177e4 50
064d25f1
YL
51/*
52 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
53 * The direct mapping extends to max_pfn_mapped, so that we can directly access
54 * apertures, ACPI and other tables without having to play with fixmaps.
55 */
56unsigned long max_pfn_mapped;
57
e18c6874
AK
58static unsigned long dma_reserve __initdata;
59
1da177e4
LT
60DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
61
00d1c5e0
IM
62int direct_gbpages __meminitdata
63#ifdef CONFIG_DIRECT_GBPAGES
64 = 1
65#endif
66;
67
68static int __init parse_direct_gbpages_off(char *arg)
69{
70 direct_gbpages = 0;
71 return 0;
72}
73early_param("nogbpages", parse_direct_gbpages_off);
74
75static int __init parse_direct_gbpages_on(char *arg)
76{
77 direct_gbpages = 1;
78 return 0;
79}
80early_param("gbpages", parse_direct_gbpages_on);
81
1da177e4
LT
82/*
83 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
84 * physical space so we can cache the place of the first one and move
85 * around without checking the pgd every time.
86 */
87
88void show_mem(void)
89{
e92343cc
AK
90 long i, total = 0, reserved = 0;
91 long shared = 0, cached = 0;
1da177e4 92 struct page *page;
14a62c34 93 pg_data_t *pgdat;
1da177e4 94
e92343cc 95 printk(KERN_INFO "Mem-info:\n");
1da177e4 96 show_free_areas();
ec936fc5 97 for_each_online_pgdat(pgdat) {
14a62c34
TG
98 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
99 /*
100 * This loop can take a while with 256 GB and
101 * 4k pages so defer the NMI watchdog:
102 */
103 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
ae32b129 104 touch_nmi_watchdog();
14a62c34 105
12710a56
BP
106 if (!pfn_valid(pgdat->node_start_pfn + i))
107 continue;
14a62c34 108
1da177e4
LT
109 page = pfn_to_page(pgdat->node_start_pfn + i);
110 total++;
e92343cc
AK
111 if (PageReserved(page))
112 reserved++;
113 else if (PageSwapCache(page))
114 cached++;
115 else if (page_count(page))
116 shared += page_count(page) - 1;
14a62c34 117 }
1da177e4 118 }
14a62c34
TG
119 printk(KERN_INFO "%lu pages of RAM\n", total);
120 printk(KERN_INFO "%lu reserved pages\n", reserved);
121 printk(KERN_INFO "%lu pages shared\n", shared);
122 printk(KERN_INFO "%lu pages swap cached\n", cached);
1da177e4
LT
123}
124
1da177e4
LT
125int after_bootmem;
126
5f44a669 127static __init void *spp_getpage(void)
14a62c34 128{
1da177e4 129 void *ptr;
14a62c34 130
1da177e4 131 if (after_bootmem)
14a62c34 132 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
133 else
134 ptr = alloc_bootmem_pages(PAGE_SIZE);
14a62c34
TG
135
136 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
137 panic("set_pte_phys: cannot allocate page data %s\n",
138 after_bootmem ? "after bootmem" : "");
139 }
1da177e4 140
10f22dde 141 pr_debug("spp_getpage %p\n", ptr);
14a62c34 142
1da177e4 143 return ptr;
14a62c34 144}
1da177e4 145
d494a961
JF
146void
147set_pte_vaddr(unsigned long vaddr, pte_t new_pte)
1da177e4
LT
148{
149 pgd_t *pgd;
150 pud_t *pud;
151 pmd_t *pmd;
d494a961 152 pte_t *pte;
1da177e4 153
d494a961 154 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(new_pte));
1da177e4
LT
155
156 pgd = pgd_offset_k(vaddr);
157 if (pgd_none(*pgd)) {
10f22dde
IM
158 printk(KERN_ERR
159 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
1da177e4
LT
160 return;
161 }
162 pud = pud_offset(pgd, vaddr);
163 if (pud_none(*pud)) {
14a62c34 164 pmd = (pmd_t *) spp_getpage();
bb23e403 165 pud_populate(&init_mm, pud, pmd);
1da177e4 166 if (pmd != pmd_offset(pud, 0)) {
10f22dde 167 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
14a62c34 168 pmd, pmd_offset(pud, 0));
1da177e4
LT
169 return;
170 }
171 }
172 pmd = pmd_offset(pud, vaddr);
173 if (pmd_none(*pmd)) {
174 pte = (pte_t *) spp_getpage();
bb23e403 175 pmd_populate_kernel(&init_mm, pmd, pte);
1da177e4 176 if (pte != pte_offset_kernel(pmd, 0)) {
10f22dde 177 printk(KERN_ERR "PAGETABLE BUG #02!\n");
1da177e4
LT
178 return;
179 }
180 }
1da177e4
LT
181
182 pte = pte_offset_kernel(pmd, vaddr);
70c9f590 183 if (!pte_none(*pte) && pte_val(new_pte) &&
1da177e4
LT
184 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
185 pte_ERROR(*pte);
186 set_pte(pte, new_pte);
187
188 /*
189 * It's enough to flush this one mapping.
190 * (PGE mappings get flushed as well)
191 */
192 __flush_tlb_one(vaddr);
193}
194
31eedd82 195/*
88f3aec7
IM
196 * The head.S code sets up the kernel high mapping:
197 *
198 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
31eedd82
TG
199 *
200 * phys_addr holds the negative offset to the kernel, which is added
201 * to the compile time generated pmds. This results in invalid pmds up
202 * to the point where we hit the physaddr 0 mapping.
203 *
204 * We limit the mappings to the region from _text to _end. _end is
205 * rounded up to the 2MB boundary. This catches the invalid pmds as
206 * well, as they are located before _text:
207 */
208void __init cleanup_highmap(void)
209{
210 unsigned long vaddr = __START_KERNEL_map;
211 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
212 pmd_t *pmd = level2_kernel_pgt;
213 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
214
215 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
2884f110 216 if (pmd_none(*pmd))
31eedd82
TG
217 continue;
218 if (vaddr < (unsigned long) _text || vaddr > end)
219 set_pmd(pmd, __pmd(0));
220 }
221}
222
75175278
AK
223static unsigned long __initdata table_start;
224static unsigned long __meminitdata table_end;
d86623a0 225static unsigned long __meminitdata table_top;
1da177e4 226
dafe41ee 227static __meminit void *alloc_low_page(unsigned long *phys)
14a62c34 228{
dafe41ee 229 unsigned long pfn = table_end++;
1da177e4
LT
230 void *adr;
231
44df75e6
MT
232 if (after_bootmem) {
233 adr = (void *)get_zeroed_page(GFP_ATOMIC);
234 *phys = __pa(adr);
14a62c34 235
44df75e6
MT
236 return adr;
237 }
238
d86623a0 239 if (pfn >= table_top)
14a62c34 240 panic("alloc_low_page: ran out of memory");
dafe41ee
VG
241
242 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
44df75e6 243 memset(adr, 0, PAGE_SIZE);
dafe41ee
VG
244 *phys = pfn * PAGE_SIZE;
245 return adr;
246}
1da177e4 247
dafe41ee 248static __meminit void unmap_low_page(void *adr)
14a62c34 249{
44df75e6
MT
250 if (after_bootmem)
251 return;
252
dafe41ee 253 early_iounmap(adr, PAGE_SIZE);
14a62c34 254}
1da177e4 255
4f9c11dd
JF
256static void __meminit
257phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
258{
259 unsigned pages = 0;
260 int i;
261 pte_t *pte = pte_page + pte_index(addr);
262
263 for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
264
265 if (addr >= end) {
266 if (!after_bootmem) {
267 for(; i < PTRS_PER_PTE; i++, pte++)
268 set_pte(pte, __pte(0));
269 }
270 break;
271 }
272
273 if (pte_val(*pte))
274 continue;
275
276 if (0)
277 printk(" pte=%p addr=%lx pte=%016lx\n",
278 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
279 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
280 pages++;
281 }
282 update_page_count(PG_LEVEL_4K, pages);
283}
284
285static void __meminit
286phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
287{
288 pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
289
290 phys_pte_init(pte, address, end);
291}
292
cc615032 293static unsigned long __meminit
6ad91658 294phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
44df75e6 295{
ce0c0e50
AK
296 unsigned long pages = 0;
297
6ad91658 298 int i = pmd_index(address);
44df75e6 299
6ad91658 300 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
4f9c11dd 301 unsigned long pte_phys;
6ad91658 302 pmd_t *pmd = pmd_page + pmd_index(address);
4f9c11dd 303 pte_t *pte;
44df75e6 304
5f51e139 305 if (address >= end) {
14a62c34 306 if (!after_bootmem) {
5f51e139
JB
307 for (; i < PTRS_PER_PMD; i++, pmd++)
308 set_pmd(pmd, __pmd(0));
14a62c34 309 }
44df75e6
MT
310 break;
311 }
6ad91658 312
4f9c11dd
JF
313 if (pmd_val(*pmd)) {
314 phys_pte_update(pmd, address, end);
315 continue;
316 }
317
318 if (cpu_has_pse) {
319 pages++;
320 set_pte((pte_t *)pmd,
321 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
6ad91658 322 continue;
4f9c11dd 323 }
6ad91658 324
4f9c11dd
JF
325 pte = alloc_low_page(&pte_phys);
326 phys_pte_init(pte, address, end);
327 unmap_low_page(pte);
328
329 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
44df75e6 330 }
ce0c0e50 331 update_page_count(PG_LEVEL_2M, pages);
cc615032 332 return address;
44df75e6
MT
333}
334
cc615032 335static unsigned long __meminit
44df75e6
MT
336phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
337{
14a62c34 338 pmd_t *pmd = pmd_offset(pud, 0);
cc615032
AK
339 unsigned long last_map_addr;
340
6ad91658 341 spin_lock(&init_mm.page_table_lock);
cc615032 342 last_map_addr = phys_pmd_init(pmd, address, end);
6ad91658
KM
343 spin_unlock(&init_mm.page_table_lock);
344 __flush_tlb_all();
cc615032 345 return last_map_addr;
44df75e6
MT
346}
347
cc615032 348static unsigned long __meminit
14a62c34
TG
349phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
350{
ce0c0e50 351 unsigned long pages = 0;
cc615032 352 unsigned long last_map_addr = end;
6ad91658 353 int i = pud_index(addr);
44df75e6 354
14a62c34 355 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
6ad91658
KM
356 unsigned long pmd_phys;
357 pud_t *pud = pud_page + pud_index(addr);
1da177e4
LT
358 pmd_t *pmd;
359
6ad91658 360 if (addr >= end)
1da177e4 361 break;
1da177e4 362
14a62c34
TG
363 if (!after_bootmem &&
364 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
365 set_pud(pud, __pud(0));
1da177e4 366 continue;
14a62c34 367 }
1da177e4 368
6ad91658 369 if (pud_val(*pud)) {
ef925766 370 if (!pud_large(*pud))
cc615032 371 last_map_addr = phys_pmd_update(pud, addr, end);
ef925766
AK
372 continue;
373 }
374
375 if (direct_gbpages) {
ce0c0e50 376 pages++;
ef925766
AK
377 set_pte((pte_t *)pud,
378 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
cc615032 379 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
6ad91658
KM
380 continue;
381 }
382
dafe41ee 383 pmd = alloc_low_page(&pmd_phys);
14a62c34 384
44df75e6 385 spin_lock(&init_mm.page_table_lock);
cc615032 386 last_map_addr = phys_pmd_init(pmd, addr, end);
4f9c11dd
JF
387 unmap_low_page(pmd);
388 pud_populate(&init_mm, pud, __va(pmd_phys));
44df75e6 389 spin_unlock(&init_mm.page_table_lock);
14a62c34 390
1da177e4 391 }
1a2b4412 392 __flush_tlb_all();
ce0c0e50 393 update_page_count(PG_LEVEL_1G, pages);
cc615032 394
1a0db38e 395 return last_map_addr;
14a62c34 396}
1da177e4 397
4f9c11dd
JF
398static unsigned long __meminit
399phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end)
400{
401 pud_t *pud;
402
403 pud = (pud_t *)pgd_page_vaddr(*pgd);
404
405 return phys_pud_init(pud, addr, end);
406}
407
1da177e4
LT
408static void __init find_early_table_space(unsigned long end)
409{
4f9c11dd 410 unsigned long puds, tables, start;
1da177e4
LT
411
412 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
ef925766
AK
413 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
414 if (!direct_gbpages) {
4f9c11dd 415 unsigned long pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
ef925766
AK
416 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
417 }
4f9c11dd
JF
418 if (!cpu_has_pse) {
419 unsigned long ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
420 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
421 }
1da177e4 422
14a62c34
TG
423 /*
424 * RED-PEN putting page tables only on node 0 could
425 * cause a hotspot and fill up ZONE_DMA. The page tables
426 * need roughly 0.5KB per GB.
427 */
428 start = 0x8000;
24a5da73 429 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
1da177e4
LT
430 if (table_start == -1UL)
431 panic("Cannot find space for the kernel page tables");
432
433 table_start >>= PAGE_SHIFT;
434 table_end = table_start;
d86623a0 435 table_top = table_start + (tables >> PAGE_SHIFT);
44df75e6 436
d86623a0
YL
437 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
438 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
1da177e4
LT
439}
440
ef925766
AK
441static void __init init_gbpages(void)
442{
443 if (direct_gbpages && cpu_has_gbpages)
444 printk(KERN_INFO "Using GB pages for direct mapping\n");
445 else
446 direct_gbpages = 0;
447}
448
03273184 449#ifdef CONFIG_MEMTEST
c64df707
YL
450
451static void __init memtest(unsigned long start_phys, unsigned long size,
452 unsigned pattern)
272b9cad
YL
453{
454 unsigned long i;
455 unsigned long *start;
456 unsigned long start_bad;
457 unsigned long last_bad;
458 unsigned long val;
459 unsigned long start_phys_aligned;
460 unsigned long count;
461 unsigned long incr;
462
463 switch (pattern) {
464 case 0:
465 val = 0UL;
466 break;
467 case 1:
468 val = -1UL;
469 break;
470 case 2:
471 val = 0x5555555555555555UL;
472 break;
473 case 3:
474 val = 0xaaaaaaaaaaaaaaaaUL;
475 break;
476 default:
477 return;
478 }
479
480 incr = sizeof(unsigned long);
481 start_phys_aligned = ALIGN(start_phys, incr);
482 count = (size - (start_phys_aligned - start_phys))/incr;
483 start = __va(start_phys_aligned);
484 start_bad = 0;
485 last_bad = 0;
486
487 for (i = 0; i < count; i++)
488 start[i] = val;
489 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
490 if (*start != val) {
491 if (start_phys_aligned == last_bad + incr) {
492 last_bad += incr;
493 } else {
494 if (start_bad) {
dcfe9465 495 printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
272b9cad
YL
496 val, start_bad, last_bad + incr);
497 reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
498 }
499 start_bad = last_bad = start_phys_aligned;
500 }
501 }
502 }
503 if (start_bad) {
dcfe9465 504 printk(KERN_CONT "\n %016lx bad mem addr %016lx - %016lx reserved",
272b9cad
YL
505 val, start_bad, last_bad + incr);
506 reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
507 }
508
509}
510
03273184
YL
511/* default is disabled */
512static int memtest_pattern __initdata;
c64df707 513
272b9cad
YL
514static int __init parse_memtest(char *arg)
515{
516 if (arg)
c64df707 517 memtest_pattern = simple_strtoul(arg, NULL, 0);
272b9cad
YL
518 return 0;
519}
520
521early_param("memtest", parse_memtest);
522
523static void __init early_memtest(unsigned long start, unsigned long end)
524{
27df66a4 525 u64 t_start, t_size;
272b9cad
YL
526 unsigned pattern;
527
c64df707
YL
528 if (!memtest_pattern)
529 return;
530
531 printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
272b9cad
YL
532 for (pattern = 0; pattern < memtest_pattern; pattern++) {
533 t_start = start;
534 t_size = 0;
535 while (t_start < end) {
536 t_start = find_e820_area_size(t_start, &t_size, 1);
537
538 /* done ? */
539 if (t_start >= end)
540 break;
541 if (t_start + t_size > end)
542 t_size = end - t_start;
543
27df66a4
AM
544 printk(KERN_CONT "\n %016llx - %016llx pattern %d",
545 (unsigned long long)t_start,
546 (unsigned long long)t_start + t_size, pattern);
272b9cad
YL
547
548 memtest(t_start, t_size, pattern);
549
550 t_start += t_size;
551 }
552 }
c64df707 553 printk(KERN_CONT "\n");
272b9cad 554}
c64df707
YL
555#else
556static void __init early_memtest(unsigned long start, unsigned long end)
557{
558}
559#endif
272b9cad 560
14a62c34
TG
561/*
562 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
563 * This runs before bootmem is initialized and gets pages directly from
564 * the physical memory. To access them they are temporarily mapped.
565 */
cc615032 566unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
14a62c34 567{
cc615032 568 unsigned long next, last_map_addr = end;
272b9cad 569 unsigned long start_phys = start, end_phys = end;
1da177e4 570
272b9cad 571 printk(KERN_INFO "init_memory_mapping\n");
1da177e4 572
14a62c34 573 /*
1da177e4 574 * Find space for the kernel direct mapping tables.
14a62c34
TG
575 *
576 * Later we should allocate these tables in the local node of the
577 * memory mapped. Unfortunately this is done currently before the
578 * nodes are discovered.
1da177e4 579 */
ef925766
AK
580 if (!after_bootmem) {
581 init_gbpages();
44df75e6 582 find_early_table_space(end);
ef925766 583 }
1da177e4
LT
584
585 start = (unsigned long)__va(start);
586 end = (unsigned long)__va(end);
587
588 for (; start < end; start = next) {
44df75e6 589 pgd_t *pgd = pgd_offset_k(start);
14a62c34 590 unsigned long pud_phys;
44df75e6
MT
591 pud_t *pud;
592
4f9c11dd
JF
593 next = start + PGDIR_SIZE;
594 if (next > end)
595 next = end;
596
597 if (pgd_val(*pgd)) {
598 last_map_addr = phys_pud_update(pgd, __pa(start), __pa(end));
599 continue;
600 }
601
44df75e6 602 if (after_bootmem)
d2ae5b5f 603 pud = pud_offset(pgd, start & PGDIR_MASK);
44df75e6 604 else
dafe41ee 605 pud = alloc_low_page(&pud_phys);
44df75e6 606
cc615032 607 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
4f9c11dd 608 unmap_low_page(pud);
44df75e6 609 if (!after_bootmem)
bb23e403
JF
610 pgd_populate(&init_mm, pgd_offset_k(start),
611 __va(pud_phys));
14a62c34 612 }
1da177e4 613
44df75e6 614 if (!after_bootmem)
f51c9452 615 mmu_cr4_features = read_cr4();
1da177e4 616 __flush_tlb_all();
75175278 617
24a5da73
YL
618 if (!after_bootmem)
619 reserve_early(table_start << PAGE_SHIFT,
620 table_end << PAGE_SHIFT, "PGTABLE");
272b9cad
YL
621
622 if (!after_bootmem)
623 early_memtest(start_phys, end_phys);
cc615032 624
1a0db38e 625 return last_map_addr >> PAGE_SHIFT;
1da177e4
LT
626}
627
2b97690f 628#ifndef CONFIG_NUMA
1f75d7e3
YL
629void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
630{
631 unsigned long bootmap_size, bootmap;
632
633 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
634 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
635 PAGE_SIZE);
636 if (bootmap == -1L)
637 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
346cafec
YL
638 /* don't touch min_low_pfn */
639 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
640 0, end_pfn);
1f75d7e3
YL
641 e820_register_active_regions(0, start_pfn, end_pfn);
642 free_bootmem_with_active_regions(0, end_pfn);
643 early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
644 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
645}
646
1da177e4
LT
647void __init paging_init(void)
648{
6391af17 649 unsigned long max_zone_pfns[MAX_NR_ZONES];
14a62c34 650
6391af17
MG
651 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
652 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
653 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
c987d12f 654 max_zone_pfns[ZONE_NORMAL] = max_pfn;
6391af17 655
c987d12f 656 memory_present(0, 0, max_pfn);
44df75e6 657 sparse_init();
5cb248ab 658 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
659}
660#endif
661
44df75e6
MT
662/*
663 * Memory hotplug specific functions
44df75e6 664 */
bc02af93 665#ifdef CONFIG_MEMORY_HOTPLUG
9d99aaa3
AK
666/*
667 * Memory is added always to NORMAL zone. This means you will never get
668 * additional DMA/DMA32 memory.
669 */
bc02af93 670int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 671{
bc02af93 672 struct pglist_data *pgdat = NODE_DATA(nid);
776ed98b 673 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
cc615032 674 unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
44df75e6
MT
675 unsigned long nr_pages = size >> PAGE_SHIFT;
676 int ret;
677
cc615032
AK
678 last_mapped_pfn = init_memory_mapping(start, start + size-1);
679 if (last_mapped_pfn > max_pfn_mapped)
680 max_pfn_mapped = last_mapped_pfn;
45e0b78b 681
44df75e6 682 ret = __add_pages(zone, start_pfn, nr_pages);
10f22dde 683 WARN_ON(1);
44df75e6 684
44df75e6 685 return ret;
44df75e6 686}
bc02af93 687EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6 688
8243229f 689#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
4942e998
KM
690int memory_add_physaddr_to_nid(u64 start)
691{
692 return 0;
693}
8c2676a5 694EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
4942e998
KM
695#endif
696
45e0b78b
KM
697#endif /* CONFIG_MEMORY_HOTPLUG */
698
ae531c26
AV
699/*
700 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
701 * is valid. The argument is a physical page number.
702 *
703 *
704 * On x86, access has to be given to the first megabyte of ram because that area
705 * contains bios code and data regions used by X and dosemu and similar apps.
706 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
707 * mmio resources as well as potential bios/acpi data regions.
708 */
709int devmem_is_allowed(unsigned long pagenr)
710{
711 if (pagenr <= 256)
712 return 1;
713 if (!page_is_ram(pagenr))
714 return 1;
715 return 0;
716}
717
718
14a62c34
TG
719static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
720 kcore_modules, kcore_vsyscall;
1da177e4
LT
721
722void __init mem_init(void)
723{
0a43e4bf 724 long codesize, reservedpages, datasize, initsize;
1da177e4 725
0dc243ae 726 pci_iommu_alloc();
1da177e4 727
48ddb154 728 /* clear_bss() already clear the empty_zero_page */
1da177e4
LT
729
730 reservedpages = 0;
731
732 /* this will put all low memory onto the freelists */
2b97690f 733#ifdef CONFIG_NUMA
0a43e4bf 734 totalram_pages = numa_free_all_bootmem();
1da177e4 735#else
0a43e4bf 736 totalram_pages = free_all_bootmem();
1da177e4 737#endif
c987d12f
YL
738 reservedpages = max_pfn - totalram_pages -
739 absent_pages_in_range(0, max_pfn);
1da177e4
LT
740 after_bootmem = 1;
741
742 codesize = (unsigned long) &_etext - (unsigned long) &_text;
743 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
744 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
745
746 /* Register memory areas for /proc/kcore */
14a62c34
TG
747 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
748 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1da177e4
LT
749 VMALLOC_END-VMALLOC_START);
750 kclist_add(&kcore_kernel, &_stext, _end - _stext);
751 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
14a62c34 752 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
1da177e4
LT
753 VSYSCALL_END - VSYSCALL_START);
754
10f22dde 755 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
14a62c34 756 "%ldk reserved, %ldk data, %ldk init)\n",
1da177e4 757 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
c987d12f 758 max_pfn << (PAGE_SHIFT-10),
1da177e4
LT
759 codesize >> 10,
760 reservedpages << (PAGE_SHIFT-10),
761 datasize >> 10,
762 initsize >> 10);
76ebd054
TG
763
764 cpa_init();
1da177e4
LT
765}
766
d167a518 767void free_init_pages(char *what, unsigned long begin, unsigned long end)
1da177e4 768{
bfc734b2 769 unsigned long addr = begin;
1da177e4 770
bfc734b2 771 if (addr >= end)
d167a518
GH
772 return;
773
ee01f112
IM
774 /*
775 * If debugging page accesses then do not free this memory but
776 * mark them not present - any buggy init-section access will
777 * create a kernel page fault:
778 */
779#ifdef CONFIG_DEBUG_PAGEALLOC
780 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
781 begin, PAGE_ALIGN(end));
782 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
783#else
6fb14755 784 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
14a62c34 785
bfc734b2 786 for (; addr < end; addr += PAGE_SIZE) {
e3ebadd9
LT
787 ClearPageReserved(virt_to_page(addr));
788 init_page_count(virt_to_page(addr));
789 memset((void *)(addr & ~(PAGE_SIZE-1)),
790 POISON_FREE_INITMEM, PAGE_SIZE);
e3ebadd9 791 free_page(addr);
1da177e4
LT
792 totalram_pages++;
793 }
ee01f112 794#endif
d167a518
GH
795}
796
797void free_initmem(void)
798{
d167a518 799 free_init_pages("unused kernel memory",
e3ebadd9
LT
800 (unsigned long)(&__init_begin),
801 (unsigned long)(&__init_end));
1da177e4
LT
802}
803
67df197b 804#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
805const int rodata_test_data = 0xC3;
806EXPORT_SYMBOL_GPL(rodata_test_data);
67df197b 807
67df197b
AV
808void mark_rodata_ro(void)
809{
4e4eee0e 810 unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
67df197b 811
6fb14755 812 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
e3ebadd9 813 (end - start) >> 10);
984bb80d
AV
814 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
815
816 /*
817 * The rodata section (but not the kernel text!) should also be
818 * not-executable.
819 */
820 start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
821 set_memory_nx(start, (end - start) >> PAGE_SHIFT);
67df197b 822
1a487252
AV
823 rodata_test();
824
0c42f392 825#ifdef CONFIG_CPA_DEBUG
10f22dde 826 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
6d238cc4 827 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
0c42f392 828
10f22dde 829 printk(KERN_INFO "Testing CPA: again\n");
6d238cc4 830 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
0c42f392 831#endif
67df197b 832}
4e4eee0e 833
67df197b
AV
834#endif
835
1da177e4
LT
836#ifdef CONFIG_BLK_DEV_INITRD
837void free_initrd_mem(unsigned long start, unsigned long end)
838{
e3ebadd9 839 free_init_pages("initrd memory", start, end);
1da177e4
LT
840}
841#endif
842
d2dbf343
YL
843int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
844 int flags)
14a62c34 845{
2b97690f 846#ifdef CONFIG_NUMA
8b3cd09e 847 int nid, next_nid;
6a07a0ed 848 int ret;
5e58a02a
AK
849#endif
850 unsigned long pfn = phys >> PAGE_SHIFT;
14a62c34 851
c987d12f 852 if (pfn >= max_pfn) {
14a62c34
TG
853 /*
854 * This can happen with kdump kernels when accessing
855 * firmware tables:
856 */
67794292 857 if (pfn < max_pfn_mapped)
8b2ef1d7 858 return -EFAULT;
14a62c34 859
6a07a0ed 860 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
5e58a02a 861 phys, len);
8b2ef1d7 862 return -EFAULT;
5e58a02a
AK
863 }
864
865 /* Should check here against the e820 map to avoid double free */
866#ifdef CONFIG_NUMA
8b3cd09e
YL
867 nid = phys_to_nid(phys);
868 next_nid = phys_to_nid(phys + len - 1);
869 if (nid == next_nid)
8b2ef1d7 870 ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
8b3cd09e 871 else
8b2ef1d7
BW
872 ret = reserve_bootmem(phys, len, flags);
873
874 if (ret != 0)
875 return ret;
876
14a62c34 877#else
72a7fe39 878 reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
1da177e4 879#endif
8b3cd09e 880
0e0b864e 881 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
e18c6874 882 dma_reserve += len / PAGE_SIZE;
0e0b864e
MG
883 set_dma_reserve(dma_reserve);
884 }
8b2ef1d7
BW
885
886 return 0;
1da177e4
LT
887}
888
14a62c34
TG
889int kern_addr_valid(unsigned long addr)
890{
1da177e4 891 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
14a62c34
TG
892 pgd_t *pgd;
893 pud_t *pud;
894 pmd_t *pmd;
895 pte_t *pte;
1da177e4
LT
896
897 if (above != 0 && above != -1UL)
14a62c34
TG
898 return 0;
899
1da177e4
LT
900 pgd = pgd_offset_k(addr);
901 if (pgd_none(*pgd))
902 return 0;
903
904 pud = pud_offset(pgd, addr);
905 if (pud_none(*pud))
14a62c34 906 return 0;
1da177e4
LT
907
908 pmd = pmd_offset(pud, addr);
909 if (pmd_none(*pmd))
910 return 0;
14a62c34 911
1da177e4
LT
912 if (pmd_large(*pmd))
913 return pfn_valid(pmd_pfn(*pmd));
914
915 pte = pte_offset_kernel(pmd, addr);
916 if (pte_none(*pte))
917 return 0;
14a62c34 918
1da177e4
LT
919 return pfn_valid(pte_pfn(*pte));
920}
921
14a62c34
TG
922/*
923 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
924 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
925 * not need special handling anymore:
926 */
1da177e4 927static struct vm_area_struct gate_vma = {
14a62c34
TG
928 .vm_start = VSYSCALL_START,
929 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
930 .vm_page_prot = PAGE_READONLY_EXEC,
931 .vm_flags = VM_READ | VM_EXEC
1da177e4
LT
932};
933
1da177e4
LT
934struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
935{
936#ifdef CONFIG_IA32_EMULATION
1e014410
AK
937 if (test_tsk_thread_flag(tsk, TIF_IA32))
938 return NULL;
1da177e4
LT
939#endif
940 return &gate_vma;
941}
942
943int in_gate_area(struct task_struct *task, unsigned long addr)
944{
945 struct vm_area_struct *vma = get_gate_vma(task);
14a62c34 946
1e014410
AK
947 if (!vma)
948 return 0;
14a62c34 949
1da177e4
LT
950 return (addr >= vma->vm_start) && (addr < vma->vm_end);
951}
952
14a62c34
TG
953/*
954 * Use this when you have no reliable task/vma, typically from interrupt
955 * context. It is less reliable than using the task's vma and may give
956 * false positives:
1da177e4
LT
957 */
958int in_gate_area_no_task(unsigned long addr)
959{
1e014410 960 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 961}
2e1c49db 962
2aae950b
AK
963const char *arch_vma_name(struct vm_area_struct *vma)
964{
965 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
966 return "[vdso]";
967 if (vma == &gate_vma)
968 return "[vsyscall]";
969 return NULL;
970}
0889eba5
CL
971
972#ifdef CONFIG_SPARSEMEM_VMEMMAP
973/*
974 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
975 */
c2b91e2e
YL
976static long __meminitdata addr_start, addr_end;
977static void __meminitdata *p_start, *p_end;
978static int __meminitdata node_start;
979
14a62c34
TG
980int __meminit
981vmemmap_populate(struct page *start_page, unsigned long size, int node)
0889eba5
CL
982{
983 unsigned long addr = (unsigned long)start_page;
984 unsigned long end = (unsigned long)(start_page + size);
985 unsigned long next;
986 pgd_t *pgd;
987 pud_t *pud;
988 pmd_t *pmd;
989
990 for (; addr < end; addr = next) {
7c934d39 991 void *p = NULL;
0889eba5
CL
992
993 pgd = vmemmap_pgd_populate(addr, node);
994 if (!pgd)
995 return -ENOMEM;
14a62c34 996
0889eba5
CL
997 pud = vmemmap_pud_populate(pgd, addr, node);
998 if (!pud)
999 return -ENOMEM;
1000
7c934d39
JF
1001 if (!cpu_has_pse) {
1002 next = (addr + PAGE_SIZE) & PAGE_MASK;
1003 pmd = vmemmap_pmd_populate(pud, addr, node);
1004
1005 if (!pmd)
1006 return -ENOMEM;
1007
1008 p = vmemmap_pte_populate(pmd, addr, node);
14a62c34 1009
0889eba5
CL
1010 if (!p)
1011 return -ENOMEM;
1012
7c934d39
JF
1013 addr_end = addr + PAGE_SIZE;
1014 p_end = p + PAGE_SIZE;
14a62c34 1015 } else {
7c934d39
JF
1016 next = pmd_addr_end(addr, end);
1017
1018 pmd = pmd_offset(pud, addr);
1019 if (pmd_none(*pmd)) {
1020 pte_t entry;
1021
1022 p = vmemmap_alloc_block(PMD_SIZE, node);
1023 if (!p)
1024 return -ENOMEM;
1025
1026 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1027 PAGE_KERNEL_LARGE);
1028 set_pmd(pmd, __pmd(pte_val(entry)));
1029
1030 addr_end = addr + PMD_SIZE;
1031 p_end = p + PMD_SIZE;
1032
1033 /* check to see if we have contiguous blocks */
1034 if (p_end != p || node_start != node) {
1035 if (p_start)
1036 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1037 addr_start, addr_end-1, p_start, p_end-1, node_start);
1038 addr_start = addr;
1039 node_start = node;
1040 p_start = p;
1041 }
1042 } else
1043 vmemmap_verify((pte_t *)pmd, node, addr, next);
14a62c34 1044 }
7c934d39 1045
0889eba5 1046 }
0889eba5
CL
1047 return 0;
1048}
c2b91e2e
YL
1049
1050void __meminit vmemmap_populate_print_last(void)
1051{
1052 if (p_start) {
1053 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1054 addr_start, addr_end-1, p_start, p_end-1, node_start);
1055 p_start = NULL;
1056 p_end = NULL;
1057 node_start = 0;
1058 }
1059}
0889eba5 1060#endif