]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_VGTOD_H |
3 | #define _ASM_X86_VGTOD_H | |
2aae950b | 4 | |
7c03156f | 5 | #include <linux/compiler.h> |
2aae950b AK |
6 | #include <linux/clocksource.h> |
7 | ||
7c03156f SS |
8 | #ifdef BUILD_VDSO32_64 |
9 | typedef u64 gtod_long_t; | |
10 | #else | |
11 | typedef unsigned long gtod_long_t; | |
12 | #endif | |
13 | /* | |
14 | * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time | |
15 | * so be carefull by modifying this structure. | |
16 | */ | |
2aae950b | 17 | struct vsyscall_gtod_data { |
7c03156f | 18 | unsigned seq; |
2aae950b | 19 | |
7c03156f | 20 | int vclock_mode; |
a5a1d1c2 TG |
21 | u64 cycle_last; |
22 | u64 mask; | |
7c03156f SS |
23 | u32 mult; |
24 | u32 shift; | |
91ec87d5 AL |
25 | |
26 | /* open coded 'struct timespec' */ | |
650ea024 | 27 | u64 wall_time_snsec; |
7c03156f SS |
28 | gtod_long_t wall_time_sec; |
29 | gtod_long_t monotonic_time_sec; | |
650ea024 | 30 | u64 monotonic_time_snsec; |
7c03156f SS |
31 | gtod_long_t wall_time_coarse_sec; |
32 | gtod_long_t wall_time_coarse_nsec; | |
33 | gtod_long_t monotonic_time_coarse_sec; | |
34 | gtod_long_t monotonic_time_coarse_nsec; | |
91ec87d5 | 35 | |
7c03156f SS |
36 | int tz_minuteswest; |
37 | int tz_dsttime; | |
2aae950b | 38 | }; |
2aae950b AK |
39 | extern struct vsyscall_gtod_data vsyscall_gtod_data; |
40 | ||
bd902c53 AL |
41 | extern int vclocks_used; |
42 | static inline bool vclock_was_used(int vclock) | |
43 | { | |
44 | return READ_ONCE(vclocks_used) & (1 << vclock); | |
45 | } | |
46 | ||
7c03156f SS |
47 | static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) |
48 | { | |
49 | unsigned ret; | |
50 | ||
51 | repeat: | |
6aa7de05 | 52 | ret = READ_ONCE(s->seq); |
7c03156f SS |
53 | if (unlikely(ret & 1)) { |
54 | cpu_relax(); | |
55 | goto repeat; | |
56 | } | |
57 | smp_rmb(); | |
58 | return ret; | |
59 | } | |
60 | ||
61 | static inline int gtod_read_retry(const struct vsyscall_gtod_data *s, | |
62 | unsigned start) | |
63 | { | |
64 | smp_rmb(); | |
65 | return unlikely(s->seq != start); | |
66 | } | |
67 | ||
68 | static inline void gtod_write_begin(struct vsyscall_gtod_data *s) | |
69 | { | |
70 | ++s->seq; | |
71 | smp_wmb(); | |
72 | } | |
73 | ||
74 | static inline void gtod_write_end(struct vsyscall_gtod_data *s) | |
75 | { | |
76 | smp_wmb(); | |
77 | ++s->seq; | |
78 | } | |
79 | ||
e76b027e AL |
80 | #ifdef CONFIG_X86_64 |
81 | ||
82 | #define VGETCPU_CPU_MASK 0xfff | |
83 | ||
84 | static inline unsigned int __getcpu(void) | |
85 | { | |
86 | unsigned int p; | |
87 | ||
88 | /* | |
89 | * Load per CPU data from GDT. LSL is faster than RDTSCP and | |
1ddf0b1b AL |
90 | * works on all CPUs. This is volatile so that it orders |
91 | * correctly wrt barrier() and to keep gcc from cleverly | |
92 | * hoisting it out of the calling function. | |
a582c540 AL |
93 | * |
94 | * If RDPID is available, use it. | |
e76b027e | 95 | */ |
a582c540 AL |
96 | alternative_io ("lsl %[p],%[seg]", |
97 | ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ | |
98 | X86_FEATURE_RDPID, | |
99 | [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); | |
e76b027e AL |
100 | |
101 | return p; | |
102 | } | |
103 | ||
104 | #endif /* CONFIG_X86_64 */ | |
105 | ||
1965aae3 | 106 | #endif /* _ASM_X86_VGTOD_H */ |