]>
Commit | Line | Data |
---|---|---|
2272b0e0 | 1 | /* |
2f0798a3 | 2 | * x86 TSC related functions |
2272b0e0 | 3 | */ |
2f0798a3 TG |
4 | #ifndef _ASM_X86_TSC_H |
5 | #define _ASM_X86_TSC_H | |
2272b0e0 AS |
6 | |
7 | #include <asm/processor.h> | |
8 | ||
2f0798a3 TG |
9 | #define NS_SCALE 10 /* 2^10, carefully chosen */ |
10 | #define US_SCALE 32 /* 2^32, arbitralrily chosen */ | |
11 | ||
2272b0e0 AS |
12 | /* |
13 | * Standard way to access the cycle counter. | |
14 | */ | |
15 | typedef unsigned long long cycles_t; | |
16 | ||
17 | extern unsigned int cpu_khz; | |
18 | extern unsigned int tsc_khz; | |
73018a66 GOC |
19 | /* flag for disabling the tsc */ |
20 | extern int tsc_disable; | |
21 | ||
22 | extern void disable_TSC(void); | |
2272b0e0 AS |
23 | |
24 | static inline cycles_t get_cycles(void) | |
25 | { | |
26 | unsigned long long ret = 0; | |
27 | ||
28 | #ifndef CONFIG_X86_TSC | |
29 | if (!cpu_has_tsc) | |
30 | return 0; | |
31 | #endif | |
32 | ||
33 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | |
34 | rdtscll(ret); | |
35 | #endif | |
36 | return ret; | |
37 | } | |
38 | ||
39 | /* Like get_cycles, but make sure the CPU is synchronized. */ | |
4e87173e | 40 | static __always_inline cycles_t __get_cycles_sync(void) |
2272b0e0 AS |
41 | { |
42 | unsigned long long ret; | |
6041b57c | 43 | unsigned eax, edx; |
2272b0e0 | 44 | |
c5bcb563 | 45 | /* |
4e87173e GOC |
46 | * Use RDTSCP if possible; it is guaranteed to be synchronous |
47 | * and doesn't cause a VMEXIT on Hypervisors | |
c5bcb563 AK |
48 | */ |
49 | alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, | |
6041b57c JR |
50 | ASM_OUTPUT2("=a" (eax), "=d" (edx)), |
51 | "a" (0U), "d" (0U) : "ecx", "memory"); | |
52 | ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); | |
c5bcb563 AK |
53 | if (ret) |
54 | return ret; | |
55 | ||
2272b0e0 AS |
56 | /* |
57 | * Don't do an additional sync on CPUs where we know | |
58 | * RDTSC is already synchronous: | |
59 | */ | |
60 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | |
61 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | |
2272b0e0 | 62 | |
4e87173e GOC |
63 | return 0; |
64 | } | |
65 | ||
66 | static __always_inline cycles_t get_cycles_sync(void) | |
67 | { | |
68 | unsigned long long ret; | |
69 | ret = __get_cycles_sync(); | |
70 | if (!ret) | |
71 | rdtscll(ret); | |
2272b0e0 AS |
72 | return ret; |
73 | } | |
74 | ||
4e87173e GOC |
75 | #ifdef CONFIG_PARAVIRT |
76 | /* | |
77 | * For paravirt guests, some functionalities are executed through function | |
78 | * pointers in the various pvops structures. | |
79 | * These function pointers exist inside the kernel and can not | |
80 | * be accessed by user space. To avoid this, we make a copy of the | |
81 | * get_cycles_sync (called in kernel) but force the use of native_read_tsc. | |
82 | * Ideally, the guest should set up it's own clock and vread | |
83 | */ | |
84 | static __always_inline long long vget_cycles_sync(void) | |
85 | { | |
86 | unsigned long long ret; | |
87 | ret = __get_cycles_sync(); | |
88 | if (!ret) | |
89 | ret = native_read_tsc(); | |
90 | return ret; | |
91 | } | |
92 | #else | |
93 | # define vget_cycles_sync() get_cycles_sync() | |
94 | #endif | |
95 | ||
2272b0e0 | 96 | extern void tsc_init(void); |
5a90cf20 | 97 | extern void mark_tsc_unstable(char *reason); |
2272b0e0 AS |
98 | extern int unsynchronized_tsc(void); |
99 | extern void init_tsc_clocksource(void); | |
d7e28ffe | 100 | int check_tsc_unstable(void); |
2272b0e0 AS |
101 | |
102 | /* | |
103 | * Boot-time check whether the TSCs are synchronized across | |
104 | * all CPUs/cores: | |
105 | */ | |
106 | extern void check_tsc_sync_source(int cpu); | |
107 | extern void check_tsc_sync_target(void); | |
108 | ||
d371698e | 109 | extern void tsc_calibrate(void); |
80ca9c98 | 110 | extern int notsc_setup(char *); |
d371698e | 111 | |
2272b0e0 | 112 | #endif |