]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86_64/kernel/vsyscall.c
[PATCH] time: x86_64: convert x86_64 to use GENERIC_TIME
[mirror_ubuntu-artful-kernel.git] / arch / x86_64 / kernel / vsyscall.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/kernel/vsyscall.c
3 *
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
17 *
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 */
21
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/seqlock.h>
27#include <linux/jiffies.h>
28#include <linux/sysctl.h>
c08c8205 29#include <linux/getcpu.h>
8c131af1
AK
30#include <linux/cpu.h>
31#include <linux/smp.h>
32#include <linux/notifier.h>
1da177e4
LT
33
34#include <asm/vsyscall.h>
35#include <asm/pgtable.h>
36#include <asm/page.h>
37#include <asm/fixmap.h>
38#include <asm/errno.h>
39#include <asm/io.h>
c08c8205
VP
40#include <asm/segment.h>
41#include <asm/desc.h>
42#include <asm/topology.h>
1da177e4
LT
43
44#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
f5738cee 45#define __syscall_clobber "r11","rcx","memory"
1da177e4
LT
46
47int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
48seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
c08c8205 49int __vgetcpu_mode __section_vgetcpu_mode;
1da177e4
LT
50
51#include <asm/unistd.h>
52
2c8bc944 53static __always_inline void timeval_normalize(struct timeval * tv)
1da177e4
LT
54{
55 time_t __sec;
56
57 __sec = tv->tv_usec / 1000000;
58 if (__sec) {
59 tv->tv_usec %= 1000000;
60 tv->tv_sec += __sec;
61 }
62}
63
2c8bc944 64static __always_inline void do_vgettimeofday(struct timeval * tv)
1da177e4
LT
65{
66 long sequence, t;
67 unsigned long sec, usec;
68
69 do {
70 sequence = read_seqbegin(&__xtime_lock);
71
72 sec = __xtime.tv_sec;
8ef38609 73 usec = __xtime.tv_nsec / 1000;
1da177e4 74
312df5f1 75 if (__vxtime.mode != VXTIME_HPET) {
c818a181 76 t = get_cycles_sync();
1da177e4
LT
77 if (t < __vxtime.last_tsc)
78 t = __vxtime.last_tsc;
79 usec += ((t - __vxtime.last_tsc) *
80 __vxtime.tsc_quot) >> 32;
81 /* See comment in x86_64 do_gettimeofday. */
82 } else {
131cfd7b
AK
83 usec += ((readl((void __iomem *)
84 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
1da177e4
LT
85 __vxtime.last) * __vxtime.quot) >> 32;
86 }
87 } while (read_seqretry(&__xtime_lock, sequence));
88
89 tv->tv_sec = sec + usec / 1000000;
90 tv->tv_usec = usec % 1000000;
91}
92
93/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
2c8bc944 94static __always_inline void do_get_tz(struct timezone * tz)
1da177e4
LT
95{
96 *tz = __sys_tz;
97}
98
2c8bc944 99static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
1da177e4
LT
100{
101 int ret;
102 asm volatile("vsysc2: syscall"
103 : "=a" (ret)
104 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
105 return ret;
106}
107
2c8bc944 108static __always_inline long time_syscall(long *t)
1da177e4
LT
109{
110 long secs;
111 asm volatile("vsysc1: syscall"
112 : "=a" (secs)
113 : "0" (__NR_time),"D" (t) : __syscall_clobber);
114 return secs;
115}
116
2e8ad43e 117int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
1da177e4 118{
14118c3c 119 if (!__sysctl_vsyscall)
1da177e4
LT
120 return gettimeofday(tv,tz);
121 if (tv)
122 do_vgettimeofday(tv);
123 if (tz)
124 do_get_tz(tz);
125 return 0;
126}
127
128/* This will break when the xtime seconds get inaccurate, but that is
129 * unlikely */
2e8ad43e 130time_t __vsyscall(1) vtime(time_t *t)
1da177e4 131{
14118c3c 132 if (!__sysctl_vsyscall)
1da177e4
LT
133 return time_syscall(t);
134 else if (t)
135 *t = __xtime.tv_sec;
136 return __xtime.tv_sec;
137}
138
c08c8205
VP
139/* Fast way to get current CPU and node.
140 This helps to do per node and per CPU caches in user space.
141 The result is not guaranteed without CPU affinity, but usually
142 works out because the scheduler tries to keep a thread on the same
143 CPU.
144
145 tcache must point to a two element sized long array.
146 All arguments can be NULL. */
147long __vsyscall(2)
148vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
1da177e4 149{
c08c8205
VP
150 unsigned int dummy, p;
151 unsigned long j = 0;
152
153 /* Fast cache - only recompute value once per jiffies and avoid
154 relatively costly rdtscp/cpuid otherwise.
155 This works because the scheduler usually keeps the process
156 on the same CPU and this syscall doesn't guarantee its
157 results anyways.
158 We do this here because otherwise user space would do it on
159 its own in a likely inferior way (no access to jiffies).
160 If you don't like it pass NULL. */
34596dc9
AK
161 if (tcache && tcache->blob[0] == (j = __jiffies)) {
162 p = tcache->blob[1];
c08c8205
VP
163 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
164 /* Load per CPU data from RDTSCP */
165 rdtscp(dummy, dummy, p);
166 } else {
167 /* Load per CPU data from GDT */
168 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
169 }
170 if (tcache) {
34596dc9
AK
171 tcache->blob[0] = j;
172 tcache->blob[1] = p;
c08c8205
VP
173 }
174 if (cpu)
175 *cpu = p & 0xfff;
176 if (node)
177 *node = p >> 12;
178 return 0;
1da177e4
LT
179}
180
2e8ad43e 181long __vsyscall(3) venosys_1(void)
1da177e4
LT
182{
183 return -ENOSYS;
184}
185
186#ifdef CONFIG_SYSCTL
187
188#define SYSCALL 0x050f
189#define NOP2 0x9090
190
191/*
192 * NOP out syscall in vsyscall page when not needed.
193 */
194static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
195 void __user *buffer, size_t *lenp, loff_t *ppos)
196{
197 extern u16 vsysc1, vsysc2;
131cfd7b
AK
198 u16 __iomem *map1;
199 u16 __iomem *map2;
1da177e4
LT
200 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
201 if (!write)
202 return ret;
203 /* gcc has some trouble with __va(__pa()), so just do it this
204 way. */
205 map1 = ioremap(__pa_symbol(&vsysc1), 2);
206 if (!map1)
207 return -ENOMEM;
208 map2 = ioremap(__pa_symbol(&vsysc2), 2);
209 if (!map2) {
210 ret = -ENOMEM;
211 goto out;
212 }
213 if (!sysctl_vsyscall) {
131cfd7b
AK
214 writew(SYSCALL, map1);
215 writew(SYSCALL, map2);
1da177e4 216 } else {
131cfd7b
AK
217 writew(NOP2, map1);
218 writew(NOP2, map2);
1da177e4
LT
219 }
220 iounmap(map2);
221out:
222 iounmap(map1);
223 return ret;
224}
225
226static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
227 void __user *oldval, size_t __user *oldlenp,
1f29bcd7 228 void __user *newval, size_t newlen)
1da177e4
LT
229{
230 return -ENOSYS;
231}
232
233static ctl_table kernel_table2[] = {
234 { .ctl_name = 99, .procname = "vsyscall64",
235 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
236 .strategy = vsyscall_sysctl_nostrat,
237 .proc_handler = vsyscall_sysctl_change },
7a44d37d 238 {}
1da177e4
LT
239};
240
241static ctl_table kernel_root_table2[] = {
242 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
243 .child = kernel_table2 },
7a44d37d 244 {}
1da177e4
LT
245};
246
247#endif
248
8c131af1
AK
249/* Assume __initcall executes before all user space. Hopefully kmod
250 doesn't violate that. We'll find out if it does. */
251static void __cpuinit vsyscall_set_cpu(int cpu)
c08c8205
VP
252{
253 unsigned long *d;
254 unsigned long node = 0;
255#ifdef CONFIG_NUMA
256 node = cpu_to_node[cpu];
257#endif
8c131af1
AK
258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
259 write_rdtscp_aux((node << 12) | cpu);
c08c8205
VP
260
261 /* Store cpu number in limit so that it can be loaded quickly
262 in user space in vgetcpu.
263 12 bits for the CPU and 8 bits for the node. */
264 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
265 *d = 0x0f40000000000ULL;
266 *d |= cpu;
267 *d |= (node & 0xf) << 12;
268 *d |= (node >> 4) << 48;
269}
270
8c131af1
AK
271static void __cpuinit cpu_vsyscall_init(void *arg)
272{
273 /* preemption should be already off */
274 vsyscall_set_cpu(raw_smp_processor_id());
275}
276
277static int __cpuinit
278cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
279{
280 long cpu = (long)arg;
281 if (action == CPU_ONLINE)
282 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
283 return NOTIFY_DONE;
284}
285
1da177e4
LT
286static void __init map_vsyscall(void)
287{
288 extern char __vsyscall_0;
289 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
290
103efcd9 291 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
1da177e4
LT
292 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
293}
294
295static int __init vsyscall_init(void)
296{
297 BUG_ON(((unsigned long) &vgettimeofday !=
298 VSYSCALL_ADDR(__NR_vgettimeofday)));
299 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
300 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
c08c8205 301 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
1da177e4 302 map_vsyscall();
f3c5f5e7 303#ifdef CONFIG_SYSCTL
0b4d4147 304 register_sysctl_table(kernel_root_table2);
f3c5f5e7 305#endif
8c131af1
AK
306 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
307 hotcpu_notifier(cpu_vsyscall_notifier, 0);
1da177e4
LT
308 return 0;
309}
310
311__initcall(vsyscall_init);