]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86_64/kernel/vsyscall.c
[PATCH] Re-positioning the bss segment
[mirror_ubuntu-artful-kernel.git] / arch / x86_64 / kernel / vsyscall.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/kernel/vsyscall.c
3 *
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
17 *
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 */
21
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/seqlock.h>
27#include <linux/jiffies.h>
28#include <linux/sysctl.h>
c08c8205 29#include <linux/getcpu.h>
1da177e4
LT
30
31#include <asm/vsyscall.h>
32#include <asm/pgtable.h>
33#include <asm/page.h>
34#include <asm/fixmap.h>
35#include <asm/errno.h>
36#include <asm/io.h>
c08c8205
VP
37#include <asm/segment.h>
38#include <asm/desc.h>
39#include <asm/topology.h>
1da177e4
LT
40
41#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
1da177e4
LT
42
43int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
44seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
c08c8205 45int __vgetcpu_mode __section_vgetcpu_mode;
1da177e4
LT
46
47#include <asm/unistd.h>
48
2c8bc944 49static __always_inline void timeval_normalize(struct timeval * tv)
1da177e4
LT
50{
51 time_t __sec;
52
53 __sec = tv->tv_usec / 1000000;
54 if (__sec) {
55 tv->tv_usec %= 1000000;
56 tv->tv_sec += __sec;
57 }
58}
59
2c8bc944 60static __always_inline void do_vgettimeofday(struct timeval * tv)
1da177e4
LT
61{
62 long sequence, t;
63 unsigned long sec, usec;
64
65 do {
66 sequence = read_seqbegin(&__xtime_lock);
67
68 sec = __xtime.tv_sec;
69 usec = (__xtime.tv_nsec / 1000) +
70 (__jiffies - __wall_jiffies) * (1000000 / HZ);
71
312df5f1 72 if (__vxtime.mode != VXTIME_HPET) {
c818a181 73 t = get_cycles_sync();
1da177e4
LT
74 if (t < __vxtime.last_tsc)
75 t = __vxtime.last_tsc;
76 usec += ((t - __vxtime.last_tsc) *
77 __vxtime.tsc_quot) >> 32;
78 /* See comment in x86_64 do_gettimeofday. */
79 } else {
131cfd7b
AK
80 usec += ((readl((void __iomem *)
81 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
1da177e4
LT
82 __vxtime.last) * __vxtime.quot) >> 32;
83 }
84 } while (read_seqretry(&__xtime_lock, sequence));
85
86 tv->tv_sec = sec + usec / 1000000;
87 tv->tv_usec = usec % 1000000;
88}
89
90/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
2c8bc944 91static __always_inline void do_get_tz(struct timezone * tz)
1da177e4
LT
92{
93 *tz = __sys_tz;
94}
95
2c8bc944 96static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
1da177e4
LT
97{
98 int ret;
99 asm volatile("vsysc2: syscall"
100 : "=a" (ret)
101 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
102 return ret;
103}
104
2c8bc944 105static __always_inline long time_syscall(long *t)
1da177e4
LT
106{
107 long secs;
108 asm volatile("vsysc1: syscall"
109 : "=a" (secs)
110 : "0" (__NR_time),"D" (t) : __syscall_clobber);
111 return secs;
112}
113
2e8ad43e 114int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
1da177e4 115{
14118c3c 116 if (!__sysctl_vsyscall)
1da177e4
LT
117 return gettimeofday(tv,tz);
118 if (tv)
119 do_vgettimeofday(tv);
120 if (tz)
121 do_get_tz(tz);
122 return 0;
123}
124
125/* This will break when the xtime seconds get inaccurate, but that is
126 * unlikely */
2e8ad43e 127time_t __vsyscall(1) vtime(time_t *t)
1da177e4 128{
14118c3c 129 if (!__sysctl_vsyscall)
1da177e4
LT
130 return time_syscall(t);
131 else if (t)
132 *t = __xtime.tv_sec;
133 return __xtime.tv_sec;
134}
135
c08c8205
VP
136/* Fast way to get current CPU and node.
137 This helps to do per node and per CPU caches in user space.
138 The result is not guaranteed without CPU affinity, but usually
139 works out because the scheduler tries to keep a thread on the same
140 CPU.
141
142 tcache must point to a two element sized long array.
143 All arguments can be NULL. */
144long __vsyscall(2)
145vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
1da177e4 146{
c08c8205
VP
147 unsigned int dummy, p;
148 unsigned long j = 0;
149
150 /* Fast cache - only recompute value once per jiffies and avoid
151 relatively costly rdtscp/cpuid otherwise.
152 This works because the scheduler usually keeps the process
153 on the same CPU and this syscall doesn't guarantee its
154 results anyways.
155 We do this here because otherwise user space would do it on
156 its own in a likely inferior way (no access to jiffies).
157 If you don't like it pass NULL. */
158 if (tcache && tcache->t0 == (j = __jiffies)) {
159 p = tcache->t1;
160 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
161 /* Load per CPU data from RDTSCP */
162 rdtscp(dummy, dummy, p);
163 } else {
164 /* Load per CPU data from GDT */
165 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
166 }
167 if (tcache) {
168 tcache->t0 = j;
169 tcache->t1 = p;
170 }
171 if (cpu)
172 *cpu = p & 0xfff;
173 if (node)
174 *node = p >> 12;
175 return 0;
1da177e4
LT
176}
177
2e8ad43e 178long __vsyscall(3) venosys_1(void)
1da177e4
LT
179{
180 return -ENOSYS;
181}
182
183#ifdef CONFIG_SYSCTL
184
185#define SYSCALL 0x050f
186#define NOP2 0x9090
187
188/*
189 * NOP out syscall in vsyscall page when not needed.
190 */
191static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
192 void __user *buffer, size_t *lenp, loff_t *ppos)
193{
194 extern u16 vsysc1, vsysc2;
131cfd7b
AK
195 u16 __iomem *map1;
196 u16 __iomem *map2;
1da177e4
LT
197 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
198 if (!write)
199 return ret;
200 /* gcc has some trouble with __va(__pa()), so just do it this
201 way. */
202 map1 = ioremap(__pa_symbol(&vsysc1), 2);
203 if (!map1)
204 return -ENOMEM;
205 map2 = ioremap(__pa_symbol(&vsysc2), 2);
206 if (!map2) {
207 ret = -ENOMEM;
208 goto out;
209 }
210 if (!sysctl_vsyscall) {
131cfd7b
AK
211 writew(SYSCALL, map1);
212 writew(SYSCALL, map2);
1da177e4 213 } else {
131cfd7b
AK
214 writew(NOP2, map1);
215 writew(NOP2, map2);
1da177e4
LT
216 }
217 iounmap(map2);
218out:
219 iounmap(map1);
220 return ret;
221}
222
223static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
224 void __user *oldval, size_t __user *oldlenp,
225 void __user *newval, size_t newlen,
226 void **context)
227{
228 return -ENOSYS;
229}
230
231static ctl_table kernel_table2[] = {
232 { .ctl_name = 99, .procname = "vsyscall64",
233 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
234 .strategy = vsyscall_sysctl_nostrat,
235 .proc_handler = vsyscall_sysctl_change },
236 { 0, }
237};
238
239static ctl_table kernel_root_table2[] = {
240 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
241 .child = kernel_table2 },
242 { 0 },
243};
244
245#endif
246
c08c8205
VP
247static void __cpuinit write_rdtscp_cb(void *info)
248{
249 write_rdtscp_aux((unsigned long)info);
250}
251
252void __cpuinit vsyscall_set_cpu(int cpu)
253{
254 unsigned long *d;
255 unsigned long node = 0;
256#ifdef CONFIG_NUMA
257 node = cpu_to_node[cpu];
258#endif
259 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
260 void *info = (void *)((node << 12) | cpu);
261 /* Can happen on preemptive kernel */
262 if (get_cpu() == cpu)
263 write_rdtscp_cb(info);
264#ifdef CONFIG_SMP
265 else {
266 /* the notifier is unfortunately not executed on the
267 target CPU */
268 smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
269 }
270#endif
271 put_cpu();
272 }
273
274 /* Store cpu number in limit so that it can be loaded quickly
275 in user space in vgetcpu.
276 12 bits for the CPU and 8 bits for the node. */
277 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
278 *d = 0x0f40000000000ULL;
279 *d |= cpu;
280 *d |= (node & 0xf) << 12;
281 *d |= (node >> 4) << 48;
282}
283
1da177e4
LT
284static void __init map_vsyscall(void)
285{
286 extern char __vsyscall_0;
287 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
288
289 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
290}
291
292static int __init vsyscall_init(void)
293{
294 BUG_ON(((unsigned long) &vgettimeofday !=
295 VSYSCALL_ADDR(__NR_vgettimeofday)));
296 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
297 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
c08c8205 298 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
1da177e4 299 map_vsyscall();
f3c5f5e7 300#ifdef CONFIG_SYSCTL
1da177e4 301 register_sysctl_table(kernel_root_table2, 0);
f3c5f5e7 302#endif
1da177e4
LT
303 return 0;
304}
305
306__initcall(vsyscall_init);