]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86_64/mm/init.c
[SERIAL] add PNP IDs for FPI based touchscreens
[mirror_ubuntu-hirsute-kernel.git] / arch / x86_64 / mm / init.c
1 /*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/poison.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
29
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
42 #include <asm/smp.h>
43 #include <asm/sections.h>
44
45 #ifndef Dprintk
46 #define Dprintk(x...)
47 #endif
48
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
51
52 static unsigned long dma_reserve __initdata;
53
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
55
56 /*
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
60 */
61
62 void show_mem(void)
63 {
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
66 pg_data_t *pgdat;
67 struct page *page;
68
69 printk(KERN_INFO "Mem-info:\n");
70 show_free_areas();
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
72
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
76 total++;
77 if (PageReserved(page))
78 reserved++;
79 else if (PageSwapCache(page))
80 cached++;
81 else if (page_count(page))
82 shared += page_count(page) - 1;
83 }
84 }
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
89 }
90
91 int after_bootmem;
92
93 static __init void *spp_getpage(void)
94 {
95 void *ptr;
96 if (after_bootmem)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
98 else
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
102
103 Dprintk("spp_getpage %p\n", ptr);
104 return ptr;
105 }
106
107 static __init void set_pte_phys(unsigned long vaddr,
108 unsigned long phys, pgprot_t prot)
109 {
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 pte_t *pte, new_pte;
114
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
116
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
120 return;
121 }
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
128 return;
129 }
130 }
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
137 return;
138 }
139 }
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
141
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
145 pte_ERROR(*pte);
146 set_pte(pte, new_pte);
147
148 /*
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
151 */
152 __flush_tlb_one(vaddr);
153 }
154
155 /* NOTE: this is meant to be run only at boot */
156 void __init
157 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
158 {
159 unsigned long address = __fix_to_virt(idx);
160
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
163 return;
164 }
165 set_pte_phys(address, phys, prot);
166 }
167
168 unsigned long __initdata table_start, table_end;
169
170 extern pmd_t temp_boot_pmds[];
171
172 static struct temp_map {
173 pmd_t *pmd;
174 void *address;
175 int allocated;
176 } temp_mappings[] __initdata = {
177 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
178 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
179 {}
180 };
181
182 static __meminit void *alloc_low_page(int *index, unsigned long *phys)
183 {
184 struct temp_map *ti;
185 int i;
186 unsigned long pfn = table_end++, paddr;
187 void *adr;
188
189 if (after_bootmem) {
190 adr = (void *)get_zeroed_page(GFP_ATOMIC);
191 *phys = __pa(adr);
192 return adr;
193 }
194
195 if (pfn >= end_pfn)
196 panic("alloc_low_page: ran out of memory");
197 for (i = 0; temp_mappings[i].allocated; i++) {
198 if (!temp_mappings[i].pmd)
199 panic("alloc_low_page: ran out of temp mappings");
200 }
201 ti = &temp_mappings[i];
202 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
203 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
204 ti->allocated = 1;
205 __flush_tlb();
206 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
207 memset(adr, 0, PAGE_SIZE);
208 *index = i;
209 *phys = pfn * PAGE_SIZE;
210 return adr;
211 }
212
213 static __meminit void unmap_low_page(int i)
214 {
215 struct temp_map *ti;
216
217 if (after_bootmem)
218 return;
219
220 ti = &temp_mappings[i];
221 set_pmd(ti->pmd, __pmd(0));
222 ti->allocated = 0;
223 }
224
225 /* Must run before zap_low_mappings */
226 __init void *early_ioremap(unsigned long addr, unsigned long size)
227 {
228 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
229
230 /* actually usually some more */
231 if (size >= LARGE_PAGE_SIZE) {
232 return NULL;
233 }
234 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
235 map += LARGE_PAGE_SIZE;
236 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
237 __flush_tlb();
238 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
239 }
240
241 /* To avoid virtual aliases later */
242 __init void early_iounmap(void *addr, unsigned long size)
243 {
244 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
245 printk("early_iounmap: bad address %p\n", addr);
246 set_pmd(temp_mappings[0].pmd, __pmd(0));
247 set_pmd(temp_mappings[1].pmd, __pmd(0));
248 __flush_tlb();
249 }
250
251 static void __meminit
252 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
253 {
254 int i = pmd_index(address);
255
256 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
257 unsigned long entry;
258 pmd_t *pmd = pmd_page + pmd_index(address);
259
260 if (address >= end) {
261 if (!after_bootmem)
262 for (; i < PTRS_PER_PMD; i++, pmd++)
263 set_pmd(pmd, __pmd(0));
264 break;
265 }
266
267 if (pmd_val(*pmd))
268 continue;
269
270 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
271 entry &= __supported_pte_mask;
272 set_pmd(pmd, __pmd(entry));
273 }
274 }
275
276 static void __meminit
277 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
278 {
279 pmd_t *pmd = pmd_offset(pud,0);
280 spin_lock(&init_mm.page_table_lock);
281 phys_pmd_init(pmd, address, end);
282 spin_unlock(&init_mm.page_table_lock);
283 __flush_tlb_all();
284 }
285
286 static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
287 {
288 int i = pud_index(addr);
289
290
291 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
292 int map;
293 unsigned long pmd_phys;
294 pud_t *pud = pud_page + pud_index(addr);
295 pmd_t *pmd;
296
297 if (addr >= end)
298 break;
299
300 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
301 set_pud(pud, __pud(0));
302 continue;
303 }
304
305 if (pud_val(*pud)) {
306 phys_pmd_update(pud, addr, end);
307 continue;
308 }
309
310 pmd = alloc_low_page(&map, &pmd_phys);
311 spin_lock(&init_mm.page_table_lock);
312 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
313 phys_pmd_init(pmd, addr, end);
314 spin_unlock(&init_mm.page_table_lock);
315 unmap_low_page(map);
316 }
317 __flush_tlb();
318 }
319
320 static void __init find_early_table_space(unsigned long end)
321 {
322 unsigned long puds, pmds, tables, start;
323
324 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
325 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
326 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
327 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
328
329 /* RED-PEN putting page tables only on node 0 could
330 cause a hotspot and fill up ZONE_DMA. The page tables
331 need roughly 0.5KB per GB. */
332 start = 0x8000;
333 table_start = find_e820_area(start, end, tables);
334 if (table_start == -1UL)
335 panic("Cannot find space for the kernel page tables");
336
337 table_start >>= PAGE_SHIFT;
338 table_end = table_start;
339
340 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
341 end, table_start << PAGE_SHIFT,
342 (table_start << PAGE_SHIFT) + tables);
343 }
344
345 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
348 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
349 {
350 unsigned long next;
351
352 Dprintk("init_memory_mapping\n");
353
354 /*
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
358 * discovered.
359 */
360 if (!after_bootmem)
361 find_early_table_space(end);
362
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
365
366 for (; start < end; start = next) {
367 int map;
368 unsigned long pud_phys;
369 pgd_t *pgd = pgd_offset_k(start);
370 pud_t *pud;
371
372 if (after_bootmem)
373 pud = pud_offset(pgd, start & PGDIR_MASK);
374 else
375 pud = alloc_low_page(&map, &pud_phys);
376
377 next = start + PGDIR_SIZE;
378 if (next > end)
379 next = end;
380 phys_pud_init(pud, __pa(start), __pa(next));
381 if (!after_bootmem)
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
383 unmap_low_page(map);
384 }
385
386 if (!after_bootmem)
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
388 __flush_tlb_all();
389 }
390
391 void __cpuinit zap_low_mappings(int cpu)
392 {
393 if (cpu == 0) {
394 pgd_t *pgd = pgd_offset_k(0UL);
395 pgd_clear(pgd);
396 } else {
397 /*
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
400 */
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
402 }
403 __flush_tlb_all();
404 }
405
406 #ifndef CONFIG_NUMA
407 void __init paging_init(void)
408 {
409 unsigned long max_zone_pfns[MAX_NR_ZONES] = {MAX_DMA_PFN,
410 MAX_DMA32_PFN,
411 end_pfn};
412 memory_present(0, 0, end_pfn);
413 sparse_init();
414 free_area_init_nodes(max_zone_pfns);
415 }
416 #endif
417
418 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
419 from the CPU leading to inconsistent cache lines. address and size
420 must be aligned to 2MB boundaries.
421 Does nothing when the mapping doesn't exist. */
422 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
423 {
424 unsigned long end = address + size;
425
426 BUG_ON(address & ~LARGE_PAGE_MASK);
427 BUG_ON(size & ~LARGE_PAGE_MASK);
428
429 for (; address < end; address += LARGE_PAGE_SIZE) {
430 pgd_t *pgd = pgd_offset_k(address);
431 pud_t *pud;
432 pmd_t *pmd;
433 if (pgd_none(*pgd))
434 continue;
435 pud = pud_offset(pgd, address);
436 if (pud_none(*pud))
437 continue;
438 pmd = pmd_offset(pud, address);
439 if (!pmd || pmd_none(*pmd))
440 continue;
441 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
442 /* Could handle this, but it should not happen currently. */
443 printk(KERN_ERR
444 "clear_kernel_mapping: mapping has been split. will leak memory\n");
445 pmd_ERROR(*pmd);
446 }
447 set_pmd(pmd, __pmd(0));
448 }
449 __flush_tlb_all();
450 }
451
452 /*
453 * Memory hotplug specific functions
454 */
455 void online_page(struct page *page)
456 {
457 ClearPageReserved(page);
458 init_page_count(page);
459 __free_page(page);
460 totalram_pages++;
461 num_physpages++;
462 }
463
464 #ifdef CONFIG_MEMORY_HOTPLUG
465 /*
466 * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
467 * via probe interface of sysfs. If acpi notifies hot-add event, then it
468 * can tell node id by searching dsdt. But, probe interface doesn't have
469 * node id. So, return 0 as node id at this time.
470 */
471 #ifdef CONFIG_NUMA
472 int memory_add_physaddr_to_nid(u64 start)
473 {
474 return 0;
475 }
476 #endif
477
478 /*
479 * Memory is added always to NORMAL zone. This means you will never get
480 * additional DMA/DMA32 memory.
481 */
482 int arch_add_memory(int nid, u64 start, u64 size)
483 {
484 struct pglist_data *pgdat = NODE_DATA(nid);
485 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
486 unsigned long start_pfn = start >> PAGE_SHIFT;
487 unsigned long nr_pages = size >> PAGE_SHIFT;
488 int ret;
489
490 ret = __add_pages(zone, start_pfn, nr_pages);
491 if (ret)
492 goto error;
493
494 init_memory_mapping(start, (start + size -1));
495
496 return ret;
497 error:
498 printk("%s: Problem encountered in __add_pages!\n", __func__);
499 return ret;
500 }
501 EXPORT_SYMBOL_GPL(arch_add_memory);
502
503 int remove_memory(u64 start, u64 size)
504 {
505 return -EINVAL;
506 }
507 EXPORT_SYMBOL_GPL(remove_memory);
508
509 #else /* CONFIG_MEMORY_HOTPLUG */
510 /*
511 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
512 * just online the pages.
513 */
514 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
515 {
516 int err = -EIO;
517 unsigned long pfn;
518 unsigned long total = 0, mem = 0;
519 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
520 if (pfn_valid(pfn)) {
521 online_page(pfn_to_page(pfn));
522 err = 0;
523 mem++;
524 }
525 total++;
526 }
527 if (!err) {
528 z->spanned_pages += total;
529 z->present_pages += mem;
530 z->zone_pgdat->node_spanned_pages += total;
531 z->zone_pgdat->node_present_pages += mem;
532 }
533 return err;
534 }
535 #endif /* CONFIG_MEMORY_HOTPLUG */
536
537 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
538 kcore_vsyscall;
539
540 void __init mem_init(void)
541 {
542 long codesize, reservedpages, datasize, initsize;
543
544 pci_iommu_alloc();
545
546 /* clear the zero-page */
547 memset(empty_zero_page, 0, PAGE_SIZE);
548
549 reservedpages = 0;
550
551 /* this will put all low memory onto the freelists */
552 #ifdef CONFIG_NUMA
553 totalram_pages = numa_free_all_bootmem();
554 #else
555 totalram_pages = free_all_bootmem();
556 #endif
557 reservedpages = end_pfn - totalram_pages -
558 absent_pages_in_range(0, end_pfn);
559
560 after_bootmem = 1;
561
562 codesize = (unsigned long) &_etext - (unsigned long) &_text;
563 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
564 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
565
566 /* Register memory areas for /proc/kcore */
567 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
568 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
569 VMALLOC_END-VMALLOC_START);
570 kclist_add(&kcore_kernel, &_stext, _end - _stext);
571 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
572 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
573 VSYSCALL_END - VSYSCALL_START);
574
575 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
576 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
577 end_pfn << (PAGE_SHIFT-10),
578 codesize >> 10,
579 reservedpages << (PAGE_SHIFT-10),
580 datasize >> 10,
581 initsize >> 10);
582
583 #ifdef CONFIG_SMP
584 /*
585 * Sync boot_level4_pgt mappings with the init_level4_pgt
586 * except for the low identity mappings which are already zapped
587 * in init_level4_pgt. This sync-up is essential for AP's bringup
588 */
589 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
590 #endif
591 }
592
593 void free_init_pages(char *what, unsigned long begin, unsigned long end)
594 {
595 unsigned long addr;
596
597 if (begin >= end)
598 return;
599
600 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
601 for (addr = begin; addr < end; addr += PAGE_SIZE) {
602 ClearPageReserved(virt_to_page(addr));
603 init_page_count(virt_to_page(addr));
604 memset((void *)(addr & ~(PAGE_SIZE-1)),
605 POISON_FREE_INITMEM, PAGE_SIZE);
606 free_page(addr);
607 totalram_pages++;
608 }
609 }
610
611 void free_initmem(void)
612 {
613 memset(__initdata_begin, POISON_FREE_INITDATA,
614 __initdata_end - __initdata_begin);
615 free_init_pages("unused kernel memory",
616 (unsigned long)(&__init_begin),
617 (unsigned long)(&__init_end));
618 }
619
620 #ifdef CONFIG_DEBUG_RODATA
621
622 void mark_rodata_ro(void)
623 {
624 unsigned long addr = (unsigned long)__start_rodata;
625
626 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
627 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
628
629 printk ("Write protecting the kernel read-only data: %luk\n",
630 (__end_rodata - __start_rodata) >> 10);
631
632 /*
633 * change_page_attr_addr() requires a global_flush_tlb() call after it.
634 * We do this after the printk so that if something went wrong in the
635 * change, the printk gets out at least to give a better debug hint
636 * of who is the culprit.
637 */
638 global_flush_tlb();
639 }
640 #endif
641
642 #ifdef CONFIG_BLK_DEV_INITRD
643 void free_initrd_mem(unsigned long start, unsigned long end)
644 {
645 free_init_pages("initrd memory", start, end);
646 }
647 #endif
648
649 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
650 {
651 /* Should check here against the e820 map to avoid double free */
652 #ifdef CONFIG_NUMA
653 int nid = phys_to_nid(phys);
654 reserve_bootmem_node(NODE_DATA(nid), phys, len);
655 #else
656 reserve_bootmem(phys, len);
657 #endif
658 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
659 dma_reserve += len / PAGE_SIZE;
660 set_dma_reserve(dma_reserve);
661 }
662 }
663
664 int kern_addr_valid(unsigned long addr)
665 {
666 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
667 pgd_t *pgd;
668 pud_t *pud;
669 pmd_t *pmd;
670 pte_t *pte;
671
672 if (above != 0 && above != -1UL)
673 return 0;
674
675 pgd = pgd_offset_k(addr);
676 if (pgd_none(*pgd))
677 return 0;
678
679 pud = pud_offset(pgd, addr);
680 if (pud_none(*pud))
681 return 0;
682
683 pmd = pmd_offset(pud, addr);
684 if (pmd_none(*pmd))
685 return 0;
686 if (pmd_large(*pmd))
687 return pfn_valid(pmd_pfn(*pmd));
688
689 pte = pte_offset_kernel(pmd, addr);
690 if (pte_none(*pte))
691 return 0;
692 return pfn_valid(pte_pfn(*pte));
693 }
694
695 #ifdef CONFIG_SYSCTL
696 #include <linux/sysctl.h>
697
698 extern int exception_trace, page_fault_trace;
699
700 static ctl_table debug_table2[] = {
701 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
702 proc_dointvec },
703 { 0, }
704 };
705
706 static ctl_table debug_root_table2[] = {
707 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
708 .child = debug_table2 },
709 { 0 },
710 };
711
712 static __init int x8664_sysctl_init(void)
713 {
714 register_sysctl_table(debug_root_table2, 1);
715 return 0;
716 }
717 __initcall(x8664_sysctl_init);
718 #endif
719
720 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
721 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
722 not need special handling anymore. */
723
724 static struct vm_area_struct gate_vma = {
725 .vm_start = VSYSCALL_START,
726 .vm_end = VSYSCALL_END,
727 .vm_page_prot = PAGE_READONLY
728 };
729
730 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
731 {
732 #ifdef CONFIG_IA32_EMULATION
733 if (test_tsk_thread_flag(tsk, TIF_IA32))
734 return NULL;
735 #endif
736 return &gate_vma;
737 }
738
739 int in_gate_area(struct task_struct *task, unsigned long addr)
740 {
741 struct vm_area_struct *vma = get_gate_vma(task);
742 if (!vma)
743 return 0;
744 return (addr >= vma->vm_start) && (addr < vma->vm_end);
745 }
746
747 /* Use this when you have no reliable task/vma, typically from interrupt
748 * context. It is less reliable than using the task's vma and may give
749 * false positives.
750 */
751 int in_gate_area_no_task(unsigned long addr)
752 {
753 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
754 }