1 /* Copyright 2002,2003 Andi Kleen, SuSE Labs */
3 /* vsyscall handling for 32bit processes. Map a stub page into it
4 on demand because 32bit cannot reach the kernel's fixmaps */
7 #include <linux/string.h>
8 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/stringify.h>
12 #include <linux/security.h>
13 #include <asm/proto.h>
14 #include <asm/tlbflush.h>
15 #include <asm/ia32_unistd.h>
17 /* 32bit VDSOs mapped into user space. */
18 asm(".section \".init.data\",\"aw\"\n"
19 "syscall32_syscall:\n"
20 ".incbin \"arch/x86_64/ia32/vsyscall-syscall.so\"\n"
21 "syscall32_syscall_end:\n"
22 "syscall32_sysenter:\n"
23 ".incbin \"arch/x86_64/ia32/vsyscall-sysenter.so\"\n"
24 "syscall32_sysenter_end:\n"
27 extern unsigned char syscall32_syscall
[], syscall32_syscall_end
[];
28 extern unsigned char syscall32_sysenter
[], syscall32_sysenter_end
[];
29 extern int sysctl_vsyscall32
;
32 static int use_sysenter
= -1;
35 syscall32_nopage(struct vm_area_struct
*vma
, unsigned long adr
, int *type
)
37 struct page
*p
= virt_to_page(adr
- vma
->vm_start
+ syscall32_page
);
42 /* Prevent VMA merging */
43 static void syscall32_vma_close(struct vm_area_struct
*vma
)
47 static struct vm_operations_struct syscall32_vm_ops
= {
48 .close
= syscall32_vma_close
,
49 .nopage
= syscall32_nopage
,
54 /* Setup a VMA at program startup for the vsyscall page */
55 int syscall32_setup_pages(struct linux_binprm
*bprm
, int exstack
)
57 int npages
= (VSYSCALL32_END
- VSYSCALL32_BASE
) >> PAGE_SHIFT
;
58 struct vm_area_struct
*vma
;
59 struct mm_struct
*mm
= current
->mm
;
62 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
65 if (security_vm_enough_memory(npages
)) {
66 kmem_cache_free(vm_area_cachep
, vma
);
70 memset(vma
, 0, sizeof(struct vm_area_struct
));
71 /* Could randomize here */
72 vma
->vm_start
= VSYSCALL32_BASE
;
73 vma
->vm_end
= VSYSCALL32_END
;
74 /* MAYWRITE to allow gdb to COW and set breakpoints */
75 vma
->vm_flags
= VM_READ
|VM_EXEC
|VM_MAYREAD
|VM_MAYEXEC
|VM_MAYEXEC
|VM_MAYWRITE
;
76 vma
->vm_flags
|= mm
->def_flags
;
77 vma
->vm_page_prot
= protection_map
[vma
->vm_flags
& 7];
78 vma
->vm_ops
= &syscall32_vm_ops
;
81 down_write(&mm
->mmap_sem
);
82 if ((ret
= insert_vm_struct(mm
, vma
))) {
83 up_write(&mm
->mmap_sem
);
84 kmem_cache_free(vm_area_cachep
, vma
);
87 mm
->total_vm
+= npages
;
88 up_write(&mm
->mmap_sem
);
92 static int __init
init_syscall32(void)
94 syscall32_page
= (void *)get_zeroed_page(GFP_KERNEL
);
96 panic("Cannot allocate syscall32 page");
97 if (use_sysenter
> 0) {
98 memcpy(syscall32_page
, syscall32_sysenter
,
99 syscall32_sysenter_end
- syscall32_sysenter
);
101 memcpy(syscall32_page
, syscall32_syscall
,
102 syscall32_syscall_end
- syscall32_syscall
);
107 __initcall(init_syscall32
);
109 /* May not be __init: called during resume */
110 void syscall32_cpu_init(void)
112 if (use_sysenter
< 0)
113 use_sysenter
= (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
);
115 /* Load these always in case some future AMD CPU supports
116 SYSENTER from compat mode too. */
117 checking_wrmsrl(MSR_IA32_SYSENTER_CS
, (u64
)__KERNEL_CS
);
118 checking_wrmsrl(MSR_IA32_SYSENTER_ESP
, 0ULL);
119 checking_wrmsrl(MSR_IA32_SYSENTER_EIP
, (u64
)ia32_sysenter_target
);
121 wrmsrl(MSR_CSTAR
, ia32_cstar_target
);