]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/vgtod.h
x86/bugs, KVM: Support the combination of guest and host IBRS
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / vgtod.h
1 #ifndef _ASM_X86_VGTOD_H
2 #define _ASM_X86_VGTOD_H
3
4 #include <linux/compiler.h>
5 #include <linux/clocksource.h>
6
7 #ifdef BUILD_VDSO32_64
8 typedef u64 gtod_long_t;
9 #else
10 typedef unsigned long gtod_long_t;
11 #endif
12 /*
13 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
14 * so be carefull by modifying this structure.
15 */
16 struct vsyscall_gtod_data {
17 unsigned seq;
18
19 int vclock_mode;
20 u64 cycle_last;
21 u64 mask;
22 u32 mult;
23 u32 shift;
24
25 /* open coded 'struct timespec' */
26 u64 wall_time_snsec;
27 gtod_long_t wall_time_sec;
28 gtod_long_t monotonic_time_sec;
29 u64 monotonic_time_snsec;
30 gtod_long_t wall_time_coarse_sec;
31 gtod_long_t wall_time_coarse_nsec;
32 gtod_long_t monotonic_time_coarse_sec;
33 gtod_long_t monotonic_time_coarse_nsec;
34
35 int tz_minuteswest;
36 int tz_dsttime;
37 };
38 extern struct vsyscall_gtod_data vsyscall_gtod_data;
39
40 extern int vclocks_used;
41 static inline bool vclock_was_used(int vclock)
42 {
43 return READ_ONCE(vclocks_used) & (1 << vclock);
44 }
45
46 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
47 {
48 unsigned ret;
49
50 repeat:
51 ret = ACCESS_ONCE(s->seq);
52 if (unlikely(ret & 1)) {
53 cpu_relax();
54 goto repeat;
55 }
56 smp_rmb();
57 return ret;
58 }
59
60 static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
61 unsigned start)
62 {
63 smp_rmb();
64 return unlikely(s->seq != start);
65 }
66
67 static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
68 {
69 ++s->seq;
70 smp_wmb();
71 }
72
73 static inline void gtod_write_end(struct vsyscall_gtod_data *s)
74 {
75 smp_wmb();
76 ++s->seq;
77 }
78
79 #ifdef CONFIG_X86_64
80
81 #define VGETCPU_CPU_MASK 0xfff
82
83 static inline unsigned int __getcpu(void)
84 {
85 unsigned int p;
86
87 /*
88 * Load per CPU data from GDT. LSL is faster than RDTSCP and
89 * works on all CPUs. This is volatile so that it orders
90 * correctly wrt barrier() and to keep gcc from cleverly
91 * hoisting it out of the calling function.
92 *
93 * If RDPID is available, use it.
94 */
95 alternative_io ("lsl %[p],%[seg]",
96 ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
97 X86_FEATURE_RDPID,
98 [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
99
100 return p;
101 }
102
103 #endif /* CONFIG_X86_64 */
104
105 #endif /* _ASM_X86_VGTOD_H */