]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/vgtod.h
x86, vdso: Use asm volatile in __getcpu
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / vgtod.h
1 #ifndef _ASM_X86_VGTOD_H
2 #define _ASM_X86_VGTOD_H
3
4 #include <linux/compiler.h>
5 #include <linux/clocksource.h>
6
7 #ifdef BUILD_VDSO32_64
8 typedef u64 gtod_long_t;
9 #else
10 typedef unsigned long gtod_long_t;
11 #endif
12 /*
13 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
14 * so be carefull by modifying this structure.
15 */
16 struct vsyscall_gtod_data {
17 unsigned seq;
18
19 int vclock_mode;
20 cycle_t cycle_last;
21 cycle_t mask;
22 u32 mult;
23 u32 shift;
24
25 /* open coded 'struct timespec' */
26 u64 wall_time_snsec;
27 gtod_long_t wall_time_sec;
28 gtod_long_t monotonic_time_sec;
29 u64 monotonic_time_snsec;
30 gtod_long_t wall_time_coarse_sec;
31 gtod_long_t wall_time_coarse_nsec;
32 gtod_long_t monotonic_time_coarse_sec;
33 gtod_long_t monotonic_time_coarse_nsec;
34
35 int tz_minuteswest;
36 int tz_dsttime;
37 };
38 extern struct vsyscall_gtod_data vsyscall_gtod_data;
39
40 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
41 {
42 unsigned ret;
43
44 repeat:
45 ret = ACCESS_ONCE(s->seq);
46 if (unlikely(ret & 1)) {
47 cpu_relax();
48 goto repeat;
49 }
50 smp_rmb();
51 return ret;
52 }
53
54 static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
55 unsigned start)
56 {
57 smp_rmb();
58 return unlikely(s->seq != start);
59 }
60
61 static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
62 {
63 ++s->seq;
64 smp_wmb();
65 }
66
67 static inline void gtod_write_end(struct vsyscall_gtod_data *s)
68 {
69 smp_wmb();
70 ++s->seq;
71 }
72
73 #ifdef CONFIG_X86_64
74
75 #define VGETCPU_CPU_MASK 0xfff
76
77 static inline unsigned int __getcpu(void)
78 {
79 unsigned int p;
80
81 /*
82 * Load per CPU data from GDT. LSL is faster than RDTSCP and
83 * works on all CPUs. This is volatile so that it orders
84 * correctly wrt barrier() and to keep gcc from cleverly
85 * hoisting it out of the calling function.
86 */
87 asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
88
89 return p;
90 }
91
92 #endif /* CONFIG_X86_64 */
93
94 #endif /* _ASM_X86_VGTOD_H */