2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <asm/pvclock.h>
16 #include <asm/vgtod.h>
17 #include <asm/proto.h>
23 #include <asm/cpufeature.h>
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled
= 1;
29 void __init
init_vdso_image(const struct vdso_image
*image
)
31 BUG_ON(image
->size
% PAGE_SIZE
!= 0);
33 apply_alternatives((struct alt_instr
*)(image
->data
+ image
->alt
),
34 (struct alt_instr
*)(image
->data
+ image
->alt
+
41 * Put the vdso above the (randomized) stack with another randomized
42 * offset. This way there is no hole in the middle of address space.
43 * To save memory make sure it is still in the same PTE as the stack
44 * top. This doesn't give that many random bits.
46 * Note that this algorithm is imperfect: the distribution of the vdso
47 * start address within a PMD is biased toward the end.
49 * Only used for the 64-bit and x32 vdsos.
51 static unsigned long vdso_addr(unsigned long start
, unsigned len
)
56 unsigned long addr
, end
;
60 * Round up the start address. It can start out unaligned as a result
61 * of stack start randomization.
63 start
= PAGE_ALIGN(start
);
65 /* Round the lowest possible end address up to a PMD boundary. */
66 end
= (start
+ len
+ PMD_SIZE
- 1) & PMD_MASK
;
67 if (end
>= TASK_SIZE_MAX
)
72 offset
= get_random_int() % (((end
- start
) >> PAGE_SHIFT
) + 1);
73 addr
= start
+ (offset
<< PAGE_SHIFT
);
79 * Forcibly align the final address in case we have a hardware
80 * issue that requires alignment for performance reasons.
82 addr
= align_vdso_addr(addr
);
88 static int vdso_fault(const struct vm_special_mapping
*sm
,
89 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
91 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
93 if (!image
|| (vmf
->pgoff
<< PAGE_SHIFT
) >= image
->size
)
94 return VM_FAULT_SIGBUS
;
96 vmf
->page
= virt_to_page(image
->data
+ (vmf
->pgoff
<< PAGE_SHIFT
));
101 static const struct vm_special_mapping text_mapping
= {
106 static int vvar_fault(const struct vm_special_mapping
*sm
,
107 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
109 const struct vdso_image
*image
= vma
->vm_mm
->context
.vdso_image
;
114 return VM_FAULT_SIGBUS
;
116 sym_offset
= (long)(vmf
->pgoff
<< PAGE_SHIFT
) +
117 image
->sym_vvar_start
;
120 * Sanity check: a symbol offset of zero means that the page
121 * does not exist for this vdso image, not that the page is at
122 * offset zero relative to the text mapping. This should be
123 * impossible here, because sym_offset should only be zero for
124 * the page past the end of the vvar mapping.
127 return VM_FAULT_SIGBUS
;
129 if (sym_offset
== image
->sym_vvar_page
) {
130 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
131 __pa_symbol(&__vvar_page
) >> PAGE_SHIFT
);
132 } else if (sym_offset
== image
->sym_hpet_page
) {
133 #ifdef CONFIG_HPET_TIMER
134 if (hpet_address
&& vclock_was_used(VCLOCK_HPET
)) {
135 ret
= vm_insert_pfn_prot(
137 (unsigned long)vmf
->virtual_address
,
138 hpet_address
>> PAGE_SHIFT
,
139 pgprot_noncached(PAGE_READONLY
));
142 } else if (sym_offset
== image
->sym_pvclock_page
) {
143 struct pvclock_vsyscall_time_info
*pvti
=
144 pvclock_pvti_cpu0_va();
145 if (pvti
&& vclock_was_used(VCLOCK_PVCLOCK
)) {
148 (unsigned long)vmf
->virtual_address
,
149 __pa(pvti
) >> PAGE_SHIFT
);
153 if (ret
== 0 || ret
== -EBUSY
)
154 return VM_FAULT_NOPAGE
;
156 return VM_FAULT_SIGBUS
;
159 static int map_vdso(const struct vdso_image
*image
, bool calculate_addr
)
161 struct mm_struct
*mm
= current
->mm
;
162 struct vm_area_struct
*vma
;
163 unsigned long addr
, text_start
;
165 static const struct vm_special_mapping vvar_mapping
= {
170 if (calculate_addr
) {
171 addr
= vdso_addr(current
->mm
->start_stack
,
172 image
->size
- image
->sym_vvar_start
);
177 down_write(&mm
->mmap_sem
);
179 addr
= get_unmapped_area(NULL
, addr
,
180 image
->size
- image
->sym_vvar_start
, 0, 0);
181 if (IS_ERR_VALUE(addr
)) {
186 text_start
= addr
- image
->sym_vvar_start
;
187 current
->mm
->context
.vdso
= (void __user
*)text_start
;
188 current
->mm
->context
.vdso_image
= image
;
191 * MAYWRITE to allow gdb to COW and set breakpoints
193 vma
= _install_special_mapping(mm
,
197 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
205 vma
= _install_special_mapping(mm
,
207 -image
->sym_vvar_start
,
208 VM_READ
|VM_MAYREAD
|VM_IO
|VM_DONTDUMP
|
219 current
->mm
->context
.vdso
= NULL
;
221 up_write(&mm
->mmap_sem
);
225 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
226 static int load_vdso32(void)
228 if (vdso32_enabled
!= 1) /* Other values all mean "disabled" */
231 return map_vdso(&vdso_image_32
, false);
236 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
241 return map_vdso(&vdso_image_64
, true);
245 int compat_arch_setup_additional_pages(struct linux_binprm
*bprm
,
248 #ifdef CONFIG_X86_X32_ABI
249 if (test_thread_flag(TIF_X32
)) {
253 return map_vdso(&vdso_image_x32
, true);
256 #ifdef CONFIG_IA32_EMULATION
257 return load_vdso32();
264 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
266 return load_vdso32();
271 static __init
int vdso_setup(char *s
)
273 vdso64_enabled
= simple_strtoul(s
, NULL
, 0);
276 __setup("vdso=", vdso_setup
);
280 static void vgetcpu_cpu_init(void *arg
)
282 int cpu
= smp_processor_id();
283 struct desc_struct d
= { };
284 unsigned long node
= 0;
286 node
= cpu_to_node(cpu
);
288 if (static_cpu_has(X86_FEATURE_RDTSCP
))
289 write_rdtscp_aux((node
<< 12) | cpu
);
292 * Store cpu number in limit so that it can be loaded
293 * quickly in user space in vgetcpu. (12 bits for the CPU
294 * and 8 bits for the node)
296 d
.limit0
= cpu
| ((node
& 0xf) << 12);
298 d
.type
= 5; /* RO data, expand down, accessed */
299 d
.dpl
= 3; /* Visible to user code */
300 d
.s
= 1; /* Not a system segment */
301 d
.p
= 1; /* Present */
302 d
.d
= 1; /* 32-bit */
304 write_gdt_entry(get_cpu_gdt_table(cpu
), GDT_ENTRY_PER_CPU
, &d
, DESCTYPE_S
);
308 vgetcpu_cpu_notifier(struct notifier_block
*n
, unsigned long action
, void *arg
)
310 long cpu
= (long)arg
;
312 if (action
== CPU_ONLINE
|| action
== CPU_ONLINE_FROZEN
)
313 smp_call_function_single(cpu
, vgetcpu_cpu_init
, NULL
, 1);
318 static int __init
init_vdso(void)
320 init_vdso_image(&vdso_image_64
);
322 #ifdef CONFIG_X86_X32_ABI
323 init_vdso_image(&vdso_image_x32
);
326 cpu_notifier_register_begin();
328 on_each_cpu(vgetcpu_cpu_init
, NULL
, 1);
329 /* notifier priority > KVM */
330 __hotcpu_notifier(vgetcpu_cpu_notifier
, 30);
332 cpu_notifier_register_done();
336 subsys_initcall(init_vdso
);
337 #endif /* CONFIG_X86_64 */