]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_VGTOD_H |
2 | #define _ASM_X86_VGTOD_H | |
2aae950b | 3 | |
7c03156f | 4 | #include <linux/compiler.h> |
2aae950b AK |
5 | #include <linux/clocksource.h> |
6 | ||
7c03156f SS |
7 | #ifdef BUILD_VDSO32_64 |
8 | typedef u64 gtod_long_t; | |
9 | #else | |
10 | typedef unsigned long gtod_long_t; | |
11 | #endif | |
12 | /* | |
13 | * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time | |
14 | * so be carefull by modifying this structure. | |
15 | */ | |
2aae950b | 16 | struct vsyscall_gtod_data { |
7c03156f | 17 | unsigned seq; |
2aae950b | 18 | |
7c03156f SS |
19 | int vclock_mode; |
20 | cycle_t cycle_last; | |
21 | cycle_t mask; | |
22 | u32 mult; | |
23 | u32 shift; | |
91ec87d5 AL |
24 | |
25 | /* open coded 'struct timespec' */ | |
650ea024 | 26 | u64 wall_time_snsec; |
7c03156f SS |
27 | gtod_long_t wall_time_sec; |
28 | gtod_long_t monotonic_time_sec; | |
650ea024 | 29 | u64 monotonic_time_snsec; |
7c03156f SS |
30 | gtod_long_t wall_time_coarse_sec; |
31 | gtod_long_t wall_time_coarse_nsec; | |
32 | gtod_long_t monotonic_time_coarse_sec; | |
33 | gtod_long_t monotonic_time_coarse_nsec; | |
91ec87d5 | 34 | |
7c03156f SS |
35 | int tz_minuteswest; |
36 | int tz_dsttime; | |
2aae950b | 37 | }; |
2aae950b AK |
38 | extern struct vsyscall_gtod_data vsyscall_gtod_data; |
39 | ||
7c03156f SS |
40 | static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) |
41 | { | |
42 | unsigned ret; | |
43 | ||
44 | repeat: | |
45 | ret = ACCESS_ONCE(s->seq); | |
46 | if (unlikely(ret & 1)) { | |
47 | cpu_relax(); | |
48 | goto repeat; | |
49 | } | |
50 | smp_rmb(); | |
51 | return ret; | |
52 | } | |
53 | ||
54 | static inline int gtod_read_retry(const struct vsyscall_gtod_data *s, | |
55 | unsigned start) | |
56 | { | |
57 | smp_rmb(); | |
58 | return unlikely(s->seq != start); | |
59 | } | |
60 | ||
61 | static inline void gtod_write_begin(struct vsyscall_gtod_data *s) | |
62 | { | |
63 | ++s->seq; | |
64 | smp_wmb(); | |
65 | } | |
66 | ||
67 | static inline void gtod_write_end(struct vsyscall_gtod_data *s) | |
68 | { | |
69 | smp_wmb(); | |
70 | ++s->seq; | |
71 | } | |
72 | ||
e76b027e AL |
73 | #ifdef CONFIG_X86_64 |
74 | ||
75 | #define VGETCPU_CPU_MASK 0xfff | |
76 | ||
77 | static inline unsigned int __getcpu(void) | |
78 | { | |
79 | unsigned int p; | |
80 | ||
81 | /* | |
82 | * Load per CPU data from GDT. LSL is faster than RDTSCP and | |
1ddf0b1b AL |
83 | * works on all CPUs. This is volatile so that it orders |
84 | * correctly wrt barrier() and to keep gcc from cleverly | |
85 | * hoisting it out of the calling function. | |
e76b027e | 86 | */ |
1ddf0b1b | 87 | asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
e76b027e AL |
88 | |
89 | return p; | |
90 | } | |
91 | ||
92 | #endif /* CONFIG_X86_64 */ | |
93 | ||
1965aae3 | 94 | #endif /* _ASM_X86_VGTOD_H */ |