]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/vgtod.h
Merge tag 'pci-v4.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / vgtod.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VGTOD_H
3 #define _ASM_X86_VGTOD_H
4
5 #include <linux/compiler.h>
6 #include <linux/clocksource.h>
7
8 #ifdef BUILD_VDSO32_64
9 typedef u64 gtod_long_t;
10 #else
11 typedef unsigned long gtod_long_t;
12 #endif
13 /*
14 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
15 * so be carefull by modifying this structure.
16 */
17 struct vsyscall_gtod_data {
18 unsigned seq;
19
20 int vclock_mode;
21 u64 cycle_last;
22 u64 mask;
23 u32 mult;
24 u32 shift;
25
26 /* open coded 'struct timespec' */
27 u64 wall_time_snsec;
28 gtod_long_t wall_time_sec;
29 gtod_long_t monotonic_time_sec;
30 u64 monotonic_time_snsec;
31 gtod_long_t wall_time_coarse_sec;
32 gtod_long_t wall_time_coarse_nsec;
33 gtod_long_t monotonic_time_coarse_sec;
34 gtod_long_t monotonic_time_coarse_nsec;
35
36 int tz_minuteswest;
37 int tz_dsttime;
38 };
39 extern struct vsyscall_gtod_data vsyscall_gtod_data;
40
41 extern int vclocks_used;
42 static inline bool vclock_was_used(int vclock)
43 {
44 return READ_ONCE(vclocks_used) & (1 << vclock);
45 }
46
47 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
48 {
49 unsigned ret;
50
51 repeat:
52 ret = READ_ONCE(s->seq);
53 if (unlikely(ret & 1)) {
54 cpu_relax();
55 goto repeat;
56 }
57 smp_rmb();
58 return ret;
59 }
60
61 static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
62 unsigned start)
63 {
64 smp_rmb();
65 return unlikely(s->seq != start);
66 }
67
68 static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
69 {
70 ++s->seq;
71 smp_wmb();
72 }
73
74 static inline void gtod_write_end(struct vsyscall_gtod_data *s)
75 {
76 smp_wmb();
77 ++s->seq;
78 }
79
80 #ifdef CONFIG_X86_64
81
82 #define VGETCPU_CPU_MASK 0xfff
83
84 static inline unsigned int __getcpu(void)
85 {
86 unsigned int p;
87
88 /*
89 * Load per CPU data from GDT. LSL is faster than RDTSCP and
90 * works on all CPUs. This is volatile so that it orders
91 * correctly wrt barrier() and to keep gcc from cleverly
92 * hoisting it out of the calling function.
93 *
94 * If RDPID is available, use it.
95 */
96 alternative_io ("lsl %[p],%[seg]",
97 ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
98 X86_FEATURE_RDPID,
99 [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
100
101 return p;
102 }
103
104 #endif /* CONFIG_X86_64 */
105
106 #endif /* _ASM_X86_VGTOD_H */