]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/x86/vdso/vclock_gettime.c
kernel-time: fix s/then/than/ spelling errors
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / vdso / vclock_gettime.c
CommitLineData
2aae950b
AK
1/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
f144a6b4 5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
2aae950b
AK
6 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
2aae950b
AK
9 */
10
2b7d0390 11/* Disable profiling for userspace code: */
2ed84eeb 12#define DISABLE_BRANCH_PROFILING
2b7d0390 13
2aae950b
AK
14#include <linux/kernel.h>
15#include <linux/posix-timers.h>
16#include <linux/time.h>
17#include <linux/string.h>
18#include <asm/vsyscall.h>
98d0ac38 19#include <asm/fixmap.h>
2aae950b
AK
20#include <asm/vgtod.h>
21#include <asm/timex.h>
22#include <asm/hpet.h>
23#include <asm/unistd.h>
24#include <asm/io.h>
2aae950b 25
8c49d9a7 26#define gtod (&VVAR(vsyscall_gtod_data))
2aae950b 27
98d0ac38
AL
28notrace static cycle_t vread_tsc(void)
29{
30 cycle_t ret;
31 u64 last;
32
33 /*
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
39 */
40 rdtsc_barrier();
41 ret = (cycle_t)vget_cycles();
42
43 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
44
45 if (likely(ret >= last))
46 return ret;
47
48 /*
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
55 */
56 asm volatile ("");
57 return last;
58}
59
60static notrace cycle_t vread_hpet(void)
61{
62 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
63}
64
23adec55 65notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
2aae950b
AK
66{
67 long ret;
68 asm("syscall" : "=a" (ret) :
69 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
70 return ret;
71}
72
a939e817
JS
73notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74{
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80}
81
82
23adec55 83notrace static inline long vgetns(void)
2aae950b 84{
95b08679 85 long v;
98d0ac38
AL
86 cycles_t cycles;
87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
88 cycles = vread_tsc();
a939e817 89 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
98d0ac38 90 cycles = vread_hpet();
a939e817
JS
91 else
92 return 0;
98d0ac38 93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
95b08679 94 return (v * gtod->clock.mult) >> gtod->clock.shift;
2aae950b
AK
95}
96
23adec55 97notrace static noinline int do_realtime(struct timespec *ts)
2aae950b
AK
98{
99 unsigned long seq, ns;
a939e817
JS
100 int mode;
101
2aae950b 102 do {
2ab51657 103 seq = read_seqcount_begin(&gtod->seq);
a939e817 104 mode = gtod->clock.vclock_mode;
2aae950b
AK
105 ts->tv_sec = gtod->wall_time_sec;
106 ts->tv_nsec = gtod->wall_time_nsec;
107 ns = vgetns();
2ab51657 108 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
a939e817 109
2aae950b 110 timespec_add_ns(ts, ns);
a939e817 111 return mode;
2aae950b
AK
112}
113
23adec55 114notrace static noinline int do_monotonic(struct timespec *ts)
2aae950b
AK
115{
116 unsigned long seq, ns, secs;
a939e817
JS
117 int mode;
118
2aae950b 119 do {
2ab51657 120 seq = read_seqcount_begin(&gtod->seq);
a939e817 121 mode = gtod->clock.vclock_mode;
2aae950b
AK
122 secs = gtod->wall_time_sec;
123 ns = gtod->wall_time_nsec + vgetns();
124 secs += gtod->wall_to_monotonic.tv_sec;
125 ns += gtod->wall_to_monotonic.tv_nsec;
2ab51657 126 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
0f51f285
AL
127
128 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
129 * are all guaranteed to be nonnegative.
130 */
131 while (ns >= NSEC_PER_SEC) {
132 ns -= NSEC_PER_SEC;
133 ++secs;
134 }
135 ts->tv_sec = secs;
136 ts->tv_nsec = ns;
137
a939e817 138 return mode;
2aae950b
AK
139}
140
da15cfda
JS
141notrace static noinline int do_realtime_coarse(struct timespec *ts)
142{
143 unsigned long seq;
144 do {
2ab51657 145 seq = read_seqcount_begin(&gtod->seq);
da15cfda
JS
146 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
147 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
2ab51657 148 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
da15cfda
JS
149 return 0;
150}
151
152notrace static noinline int do_monotonic_coarse(struct timespec *ts)
153{
154 unsigned long seq, ns, secs;
155 do {
2ab51657 156 seq = read_seqcount_begin(&gtod->seq);
da15cfda
JS
157 secs = gtod->wall_time_coarse.tv_sec;
158 ns = gtod->wall_time_coarse.tv_nsec;
159 secs += gtod->wall_to_monotonic.tv_sec;
160 ns += gtod->wall_to_monotonic.tv_nsec;
2ab51657 161 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
0f51f285
AL
162
163 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
164 * guaranteed to be between 0 and NSEC_PER_SEC.
165 */
166 if (ns >= NSEC_PER_SEC) {
167 ns -= NSEC_PER_SEC;
168 ++secs;
169 }
170 ts->tv_sec = secs;
171 ts->tv_nsec = ns;
172
da15cfda
JS
173 return 0;
174}
175
23adec55 176notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
2aae950b 177{
a939e817
JS
178 int ret = VCLOCK_NONE;
179
0d7b8547
AL
180 switch (clock) {
181 case CLOCK_REALTIME:
a939e817 182 ret = do_realtime(ts);
0d7b8547
AL
183 break;
184 case CLOCK_MONOTONIC:
a939e817 185 ret = do_monotonic(ts);
0d7b8547
AL
186 break;
187 case CLOCK_REALTIME_COARSE:
188 return do_realtime_coarse(ts);
189 case CLOCK_MONOTONIC_COARSE:
190 return do_monotonic_coarse(ts);
191 }
192
a939e817
JS
193 if (ret == VCLOCK_NONE)
194 return vdso_fallback_gettime(clock, ts);
195 return 0;
2aae950b
AK
196}
197int clock_gettime(clockid_t, struct timespec *)
198 __attribute__((weak, alias("__vdso_clock_gettime")));
199
23adec55 200notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
2aae950b 201{
a939e817
JS
202 long ret = VCLOCK_NONE;
203
204 if (likely(tv != NULL)) {
205 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
206 offsetof(struct timespec, tv_nsec) ||
207 sizeof(*tv) != sizeof(struct timespec));
208 ret = do_realtime((struct timespec *)tv);
209 tv->tv_usec /= 1000;
2aae950b 210 }
a939e817
JS
211 if (unlikely(tz != NULL)) {
212 /* Avoid memcpy. Some old compilers fail to inline it */
213 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
214 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
215 }
216
217 if (ret == VCLOCK_NONE)
218 return vdso_fallback_gtod(tv, tz);
219 return 0;
2aae950b
AK
220}
221int gettimeofday(struct timeval *, struct timezone *)
222 __attribute__((weak, alias("__vdso_gettimeofday")));
f144a6b4 223
0d7b8547
AL
224/*
225 * This will break when the xtime seconds get inaccurate, but that is
226 * unlikely
227 */
f144a6b4
AL
228notrace time_t __vdso_time(time_t *t)
229{
973aa818 230 /* This is atomic on x86_64 so we don't need any locks. */
0d7b8547 231 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
f144a6b4
AL
232
233 if (t)
234 *t = result;
235 return result;
236}
237int time(time_t *t)
238 __attribute__((weak, alias("__vdso_time")));