]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
3 | * Copyright 2003 Andi Kleen, SuSE Labs. | |
4 | * | |
5 | * Thanks to hpa@transmeta.com for some useful hint. | |
6 | * Special thanks to Ingo Molnar for his early experience with | |
7 | * a different vsyscall implementation for Linux/IA32 and for the name. | |
8 | * | |
9 | * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located | |
10 | * at virtual address -10Mbyte+1024bytes etc... There are at max 4 | |
11 | * vsyscalls. One vsyscall can reserve more than 1 slot to avoid | |
12 | * jumping out of line if necessary. We cannot add more with this | |
13 | * mechanism because older kernels won't return -ENOSYS. | |
14 | * If we want more than four we need a vDSO. | |
15 | * | |
16 | * Note: the concept clashes with user mode linux. If you use UML and | |
17 | * want per guest time just set the kernel.vsyscall64 sysctl to 0. | |
18 | */ | |
19 | ||
2b7d0390 | 20 | /* Disable profiling for userspace code: */ |
2ed84eeb | 21 | #define DISABLE_BRANCH_PROFILING |
1f0d69a9 | 22 | |
1da177e4 LT |
23 | #include <linux/time.h> |
24 | #include <linux/init.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/timer.h> | |
27 | #include <linux/seqlock.h> | |
28 | #include <linux/jiffies.h> | |
29 | #include <linux/sysctl.h> | |
7460ed28 | 30 | #include <linux/clocksource.h> |
c08c8205 | 31 | #include <linux/getcpu.h> |
8c131af1 AK |
32 | #include <linux/cpu.h> |
33 | #include <linux/smp.h> | |
34 | #include <linux/notifier.h> | |
1da177e4 LT |
35 | |
36 | #include <asm/vsyscall.h> | |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/page.h> | |
7460ed28 | 39 | #include <asm/unistd.h> |
1da177e4 LT |
40 | #include <asm/fixmap.h> |
41 | #include <asm/errno.h> | |
42 | #include <asm/io.h> | |
c08c8205 VP |
43 | #include <asm/segment.h> |
44 | #include <asm/desc.h> | |
45 | #include <asm/topology.h> | |
2aae950b | 46 | #include <asm/vgtod.h> |
1da177e4 | 47 | |
23adec55 SR |
48 | #define __vsyscall(nr) \ |
49 | __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace | |
65ea5b03 | 50 | #define __syscall_clobber "r11","cx","memory" |
1da177e4 | 51 | |
c8118c6c ED |
52 | /* |
53 | * vsyscall_gtod_data contains data that is : | |
54 | * - readonly from vsyscalls | |
676b1855 | 55 | * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) |
c8118c6c ED |
56 | * Try to keep this structure as small as possible to avoid cache line ping pongs |
57 | */ | |
c08c8205 | 58 | int __vgetcpu_mode __section_vgetcpu_mode; |
1da177e4 | 59 | |
2aae950b | 60 | struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = |
1da177e4 | 61 | { |
7460ed28 JS |
62 | .lock = SEQLOCK_UNLOCKED, |
63 | .sysctl_enabled = 1, | |
64 | }; | |
1da177e4 | 65 | |
2c622148 TB |
66 | void update_vsyscall_tz(void) |
67 | { | |
68 | unsigned long flags; | |
69 | ||
70 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); | |
71 | /* sys_tz has changed */ | |
72 | vsyscall_gtod_data.sys_tz = sys_tz; | |
73 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); | |
74 | } | |
75 | ||
0696b711 LM |
76 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, |
77 | u32 mult) | |
1da177e4 | 78 | { |
7460ed28 | 79 | unsigned long flags; |
1da177e4 | 80 | |
7460ed28 JS |
81 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); |
82 | /* copy vsyscall data */ | |
c8118c6c ED |
83 | vsyscall_gtod_data.clock.vread = clock->vread; |
84 | vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; | |
85 | vsyscall_gtod_data.clock.mask = clock->mask; | |
0696b711 | 86 | vsyscall_gtod_data.clock.mult = mult; |
c8118c6c ED |
87 | vsyscall_gtod_data.clock.shift = clock->shift; |
88 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; | |
89 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; | |
2aae950b | 90 | vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic; |
da15cfda | 91 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); |
7460ed28 | 92 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
1da177e4 LT |
93 | } |
94 | ||
7460ed28 JS |
95 | /* RED-PEN may want to readd seq locking, but then the variable should be |
96 | * write-once. | |
97 | */ | |
2c8bc944 | 98 | static __always_inline void do_get_tz(struct timezone * tz) |
1da177e4 | 99 | { |
7460ed28 | 100 | *tz = __vsyscall_gtod_data.sys_tz; |
1da177e4 LT |
101 | } |
102 | ||
2c8bc944 | 103 | static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) |
1da177e4 LT |
104 | { |
105 | int ret; | |
ce28b986 | 106 | asm volatile("syscall" |
1da177e4 | 107 | : "=a" (ret) |
7460ed28 JS |
108 | : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) |
109 | : __syscall_clobber ); | |
1da177e4 LT |
110 | return ret; |
111 | } | |
112 | ||
2c8bc944 | 113 | static __always_inline long time_syscall(long *t) |
1da177e4 LT |
114 | { |
115 | long secs; | |
ce28b986 | 116 | asm volatile("syscall" |
1da177e4 LT |
117 | : "=a" (secs) |
118 | : "0" (__NR_time),"D" (t) : __syscall_clobber); | |
119 | return secs; | |
120 | } | |
121 | ||
7460ed28 JS |
122 | static __always_inline void do_vgettimeofday(struct timeval * tv) |
123 | { | |
124 | cycle_t now, base, mask, cycle_delta; | |
c8118c6c ED |
125 | unsigned seq; |
126 | unsigned long mult, shift, nsec; | |
7460ed28 JS |
127 | cycle_t (*vread)(void); |
128 | do { | |
129 | seq = read_seqbegin(&__vsyscall_gtod_data.lock); | |
130 | ||
131 | vread = __vsyscall_gtod_data.clock.vread; | |
132 | if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) { | |
89952d13 | 133 | gettimeofday(tv,NULL); |
7460ed28 JS |
134 | return; |
135 | } | |
cb9e35dc | 136 | |
7460ed28 JS |
137 | now = vread(); |
138 | base = __vsyscall_gtod_data.clock.cycle_last; | |
139 | mask = __vsyscall_gtod_data.clock.mask; | |
140 | mult = __vsyscall_gtod_data.clock.mult; | |
141 | shift = __vsyscall_gtod_data.clock.shift; | |
142 | ||
c8118c6c ED |
143 | tv->tv_sec = __vsyscall_gtod_data.wall_time_sec; |
144 | nsec = __vsyscall_gtod_data.wall_time_nsec; | |
7460ed28 JS |
145 | } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); |
146 | ||
147 | /* calculate interval: */ | |
148 | cycle_delta = (now - base) & mask; | |
149 | /* convert to nsecs: */ | |
c8118c6c | 150 | nsec += (cycle_delta * mult) >> shift; |
7460ed28 | 151 | |
c8118c6c | 152 | while (nsec >= NSEC_PER_SEC) { |
7460ed28 | 153 | tv->tv_sec += 1; |
c8118c6c | 154 | nsec -= NSEC_PER_SEC; |
7460ed28 | 155 | } |
c8118c6c | 156 | tv->tv_usec = nsec / NSEC_PER_USEC; |
7460ed28 JS |
157 | } |
158 | ||
2e8ad43e | 159 | int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz) |
1da177e4 | 160 | { |
1da177e4 LT |
161 | if (tv) |
162 | do_vgettimeofday(tv); | |
163 | if (tz) | |
164 | do_get_tz(tz); | |
165 | return 0; | |
166 | } | |
167 | ||
168 | /* This will break when the xtime seconds get inaccurate, but that is | |
169 | * unlikely */ | |
2e8ad43e | 170 | time_t __vsyscall(1) vtime(time_t *t) |
1da177e4 | 171 | { |
d0aff6e6 | 172 | struct timeval tv; |
272a3713 | 173 | time_t result; |
7460ed28 | 174 | if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) |
1da177e4 | 175 | return time_syscall(t); |
d0aff6e6 | 176 | |
c80544dc | 177 | vgettimeofday(&tv, NULL); |
d0aff6e6 | 178 | result = tv.tv_sec; |
272a3713 ED |
179 | if (t) |
180 | *t = result; | |
181 | return result; | |
1da177e4 LT |
182 | } |
183 | ||
c08c8205 VP |
184 | /* Fast way to get current CPU and node. |
185 | This helps to do per node and per CPU caches in user space. | |
186 | The result is not guaranteed without CPU affinity, but usually | |
187 | works out because the scheduler tries to keep a thread on the same | |
188 | CPU. | |
189 | ||
190 | tcache must point to a two element sized long array. | |
191 | All arguments can be NULL. */ | |
192 | long __vsyscall(2) | |
193 | vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | |
1da177e4 | 194 | { |
8f12dea6 | 195 | unsigned int p; |
c08c8205 VP |
196 | unsigned long j = 0; |
197 | ||
198 | /* Fast cache - only recompute value once per jiffies and avoid | |
199 | relatively costly rdtscp/cpuid otherwise. | |
200 | This works because the scheduler usually keeps the process | |
201 | on the same CPU and this syscall doesn't guarantee its | |
202 | results anyways. | |
203 | We do this here because otherwise user space would do it on | |
204 | its own in a likely inferior way (no access to jiffies). | |
205 | If you don't like it pass NULL. */ | |
34596dc9 AK |
206 | if (tcache && tcache->blob[0] == (j = __jiffies)) { |
207 | p = tcache->blob[1]; | |
c08c8205 VP |
208 | } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { |
209 | /* Load per CPU data from RDTSCP */ | |
8f12dea6 | 210 | native_read_tscp(&p); |
c08c8205 VP |
211 | } else { |
212 | /* Load per CPU data from GDT */ | |
213 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); | |
214 | } | |
215 | if (tcache) { | |
34596dc9 AK |
216 | tcache->blob[0] = j; |
217 | tcache->blob[1] = p; | |
c08c8205 VP |
218 | } |
219 | if (cpu) | |
220 | *cpu = p & 0xfff; | |
221 | if (node) | |
222 | *node = p >> 12; | |
223 | return 0; | |
1da177e4 LT |
224 | } |
225 | ||
a4928cff | 226 | static long __vsyscall(3) venosys_1(void) |
1da177e4 LT |
227 | { |
228 | return -ENOSYS; | |
229 | } | |
230 | ||
231 | #ifdef CONFIG_SYSCTL | |
1da177e4 | 232 | static ctl_table kernel_table2[] = { |
282a821f | 233 | { .procname = "vsyscall64", |
7460ed28 | 234 | .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int), |
d67bbacb | 235 | .mode = 0644, |
8d65af78 | 236 | .proc_handler = proc_dointvec }, |
7a44d37d | 237 | {} |
1da177e4 LT |
238 | }; |
239 | ||
240 | static ctl_table kernel_root_table2[] = { | |
241 | { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, | |
242 | .child = kernel_table2 }, | |
7a44d37d | 243 | {} |
1da177e4 | 244 | }; |
1da177e4 LT |
245 | #endif |
246 | ||
8c131af1 AK |
247 | /* Assume __initcall executes before all user space. Hopefully kmod |
248 | doesn't violate that. We'll find out if it does. */ | |
249 | static void __cpuinit vsyscall_set_cpu(int cpu) | |
c08c8205 | 250 | { |
fc8b8a60 | 251 | unsigned long d; |
c08c8205 VP |
252 | unsigned long node = 0; |
253 | #ifdef CONFIG_NUMA | |
98c9e27a | 254 | node = cpu_to_node(cpu); |
c08c8205 | 255 | #endif |
92cb7612 | 256 | if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) |
8c131af1 | 257 | write_rdtscp_aux((node << 12) | cpu); |
c08c8205 VP |
258 | |
259 | /* Store cpu number in limit so that it can be loaded quickly | |
260 | in user space in vgetcpu. | |
261 | 12 bits for the CPU and 8 bits for the node. */ | |
fc8b8a60 JF |
262 | d = 0x0f40000000000ULL; |
263 | d |= cpu; | |
264 | d |= (node & 0xf) << 12; | |
265 | d |= (node >> 4) << 48; | |
266 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); | |
c08c8205 VP |
267 | } |
268 | ||
8c131af1 AK |
269 | static void __cpuinit cpu_vsyscall_init(void *arg) |
270 | { | |
271 | /* preemption should be already off */ | |
272 | vsyscall_set_cpu(raw_smp_processor_id()); | |
273 | } | |
274 | ||
275 | static int __cpuinit | |
276 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |
277 | { | |
278 | long cpu = (long)arg; | |
8bb78442 | 279 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
8691e5a8 | 280 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
8c131af1 AK |
281 | return NOTIFY_DONE; |
282 | } | |
283 | ||
e4026440 | 284 | void __init map_vsyscall(void) |
1da177e4 LT |
285 | { |
286 | extern char __vsyscall_0; | |
287 | unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); | |
288 | ||
103efcd9 | 289 | /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ |
1da177e4 LT |
290 | __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); |
291 | } | |
292 | ||
293 | static int __init vsyscall_init(void) | |
294 | { | |
295 | BUG_ON(((unsigned long) &vgettimeofday != | |
296 | VSYSCALL_ADDR(__NR_vgettimeofday))); | |
297 | BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); | |
298 | BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); | |
c08c8205 | 299 | BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu)); |
f3c5f5e7 | 300 | #ifdef CONFIG_SYSCTL |
0b4d4147 | 301 | register_sysctl_table(kernel_root_table2); |
f3c5f5e7 | 302 | #endif |
15c8b6c1 | 303 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
8c131af1 | 304 | hotcpu_notifier(cpu_vsyscall_notifier, 0); |
1da177e4 LT |
305 | return 0; |
306 | } | |
307 | ||
308 | __initcall(vsyscall_init); |