]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _ASM_X86_MSR_H | |
2 | #define _ASM_X86_MSR_H | |
3 | ||
4 | #include "msr-index.h" | |
5 | ||
6 | #ifndef __ASSEMBLY__ | |
7 | ||
8 | #include <asm/asm.h> | |
9 | #include <asm/errno.h> | |
10 | #include <asm/cpumask.h> | |
11 | #include <uapi/asm/msr.h> | |
12 | ||
13 | struct msr { | |
14 | union { | |
15 | struct { | |
16 | u32 l; | |
17 | u32 h; | |
18 | }; | |
19 | u64 q; | |
20 | }; | |
21 | }; | |
22 | ||
23 | struct msr_info { | |
24 | u32 msr_no; | |
25 | struct msr reg; | |
26 | struct msr *msrs; | |
27 | int err; | |
28 | }; | |
29 | ||
30 | struct msr_regs_info { | |
31 | u32 *regs; | |
32 | int err; | |
33 | }; | |
34 | ||
35 | struct saved_msr { | |
36 | bool valid; | |
37 | struct msr_info info; | |
38 | }; | |
39 | ||
40 | struct saved_msrs { | |
41 | unsigned int num; | |
42 | struct saved_msr *array; | |
43 | }; | |
44 | ||
45 | /* | |
46 | * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" | |
47 | * constraint has different meanings. For i386, "A" means exactly | |
48 | * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, | |
49 | * it means rax *or* rdx. | |
50 | */ | |
51 | #ifdef CONFIG_X86_64 | |
52 | /* Using 64-bit values saves one instruction clearing the high half of low */ | |
53 | #define DECLARE_ARGS(val, low, high) unsigned long low, high | |
54 | #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) | |
55 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) | |
56 | #else | |
57 | #define DECLARE_ARGS(val, low, high) unsigned long long val | |
58 | #define EAX_EDX_VAL(val, low, high) (val) | |
59 | #define EAX_EDX_RET(val, low, high) "=A" (val) | |
60 | #endif | |
61 | ||
62 | #ifdef CONFIG_TRACEPOINTS | |
63 | /* | |
64 | * Be very careful with includes. This header is prone to include loops. | |
65 | */ | |
66 | #include <asm/atomic.h> | |
67 | #include <linux/tracepoint-defs.h> | |
68 | ||
69 | extern struct tracepoint __tracepoint_read_msr; | |
70 | extern struct tracepoint __tracepoint_write_msr; | |
71 | extern struct tracepoint __tracepoint_rdpmc; | |
72 | #define msr_tracepoint_active(t) static_key_false(&(t).key) | |
73 | extern void do_trace_write_msr(unsigned msr, u64 val, int failed); | |
74 | extern void do_trace_read_msr(unsigned msr, u64 val, int failed); | |
75 | extern void do_trace_rdpmc(unsigned msr, u64 val, int failed); | |
76 | #else | |
77 | #define msr_tracepoint_active(t) false | |
78 | static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {} | |
79 | static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {} | |
80 | static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {} | |
81 | #endif | |
82 | ||
83 | static inline unsigned long long native_read_msr(unsigned int msr) | |
84 | { | |
85 | DECLARE_ARGS(val, low, high); | |
86 | ||
87 | asm volatile("1: rdmsr\n" | |
88 | "2:\n" | |
89 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe) | |
90 | : EAX_EDX_RET(val, low, high) : "c" (msr)); | |
91 | if (msr_tracepoint_active(__tracepoint_read_msr)) | |
92 | do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0); | |
93 | return EAX_EDX_VAL(val, low, high); | |
94 | } | |
95 | ||
96 | static inline unsigned long long native_read_msr_safe(unsigned int msr, | |
97 | int *err) | |
98 | { | |
99 | DECLARE_ARGS(val, low, high); | |
100 | ||
101 | asm volatile("2: rdmsr ; xor %[err],%[err]\n" | |
102 | "1:\n\t" | |
103 | ".section .fixup,\"ax\"\n\t" | |
104 | "3: mov %[fault],%[err] ; jmp 1b\n\t" | |
105 | ".previous\n\t" | |
106 | _ASM_EXTABLE(2b, 3b) | |
107 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) | |
108 | : "c" (msr), [fault] "i" (-EIO)); | |
109 | if (msr_tracepoint_active(__tracepoint_read_msr)) | |
110 | do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); | |
111 | return EAX_EDX_VAL(val, low, high); | |
112 | } | |
113 | ||
114 | /* Can be uninlined because referenced by paravirt */ | |
115 | notrace static inline void native_write_msr(unsigned int msr, | |
116 | unsigned low, unsigned high) | |
117 | { | |
118 | asm volatile("1: wrmsr\n" | |
119 | "2:\n" | |
120 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) | |
121 | : : "c" (msr), "a"(low), "d" (high) : "memory"); | |
122 | if (msr_tracepoint_active(__tracepoint_read_msr)) | |
123 | do_trace_write_msr(msr, ((u64)high << 32 | low), 0); | |
124 | } | |
125 | ||
126 | /* Can be uninlined because referenced by paravirt */ | |
127 | notrace static inline int native_write_msr_safe(unsigned int msr, | |
128 | unsigned low, unsigned high) | |
129 | { | |
130 | int err; | |
131 | asm volatile("2: wrmsr ; xor %[err],%[err]\n" | |
132 | "1:\n\t" | |
133 | ".section .fixup,\"ax\"\n\t" | |
134 | "3: mov %[fault],%[err] ; jmp 1b\n\t" | |
135 | ".previous\n\t" | |
136 | _ASM_EXTABLE(2b, 3b) | |
137 | : [err] "=a" (err) | |
138 | : "c" (msr), "0" (low), "d" (high), | |
139 | [fault] "i" (-EIO) | |
140 | : "memory"); | |
141 | if (msr_tracepoint_active(__tracepoint_read_msr)) | |
142 | do_trace_write_msr(msr, ((u64)high << 32 | low), err); | |
143 | return err; | |
144 | } | |
145 | ||
146 | extern int rdmsr_safe_regs(u32 regs[8]); | |
147 | extern int wrmsr_safe_regs(u32 regs[8]); | |
148 | ||
149 | /** | |
150 | * rdtsc() - returns the current TSC without ordering constraints | |
151 | * | |
152 | * rdtsc() returns the result of RDTSC as a 64-bit integer. The | |
153 | * only ordering constraint it supplies is the ordering implied by | |
154 | * "asm volatile": it will put the RDTSC in the place you expect. The | |
155 | * CPU can and will speculatively execute that RDTSC, though, so the | |
156 | * results can be non-monotonic if compared on different CPUs. | |
157 | */ | |
158 | static __always_inline unsigned long long rdtsc(void) | |
159 | { | |
160 | DECLARE_ARGS(val, low, high); | |
161 | ||
162 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); | |
163 | ||
164 | return EAX_EDX_VAL(val, low, high); | |
165 | } | |
166 | ||
167 | /** | |
168 | * rdtsc_ordered() - read the current TSC in program order | |
169 | * | |
170 | * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. | |
171 | * It is ordered like a load to a global in-memory counter. It should | |
172 | * be impossible to observe non-monotonic rdtsc_unordered() behavior | |
173 | * across multiple CPUs as long as the TSC is synced. | |
174 | */ | |
175 | static __always_inline unsigned long long rdtsc_ordered(void) | |
176 | { | |
177 | /* | |
178 | * The RDTSC instruction is not ordered relative to memory | |
179 | * access. The Intel SDM and the AMD APM are both vague on this | |
180 | * point, but empirically an RDTSC instruction can be | |
181 | * speculatively executed before prior loads. An RDTSC | |
182 | * immediately after an appropriate barrier appears to be | |
183 | * ordered as a normal load, that is, it provides the same | |
184 | * ordering guarantees as reading from a global memory location | |
185 | * that some other imaginary CPU is updating continuously with a | |
186 | * time stamp. | |
187 | */ | |
188 | alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, | |
189 | "lfence", X86_FEATURE_LFENCE_RDTSC); | |
190 | return rdtsc(); | |
191 | } | |
192 | ||
193 | /* Deprecated, keep it for a cycle for easier merging: */ | |
194 | #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0) | |
195 | ||
196 | static inline unsigned long long native_read_pmc(int counter) | |
197 | { | |
198 | DECLARE_ARGS(val, low, high); | |
199 | ||
200 | asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); | |
201 | if (msr_tracepoint_active(__tracepoint_rdpmc)) | |
202 | do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); | |
203 | return EAX_EDX_VAL(val, low, high); | |
204 | } | |
205 | ||
206 | #ifdef CONFIG_PARAVIRT | |
207 | #include <asm/paravirt.h> | |
208 | #else | |
209 | #include <linux/errno.h> | |
210 | /* | |
211 | * Access to machine-specific registers (available on 586 and better only) | |
212 | * Note: the rd* operations modify the parameters directly (without using | |
213 | * pointer indirection), this allows gcc to optimize better | |
214 | */ | |
215 | ||
216 | #define rdmsr(msr, low, high) \ | |
217 | do { \ | |
218 | u64 __val = native_read_msr((msr)); \ | |
219 | (void)((low) = (u32)__val); \ | |
220 | (void)((high) = (u32)(__val >> 32)); \ | |
221 | } while (0) | |
222 | ||
223 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) | |
224 | { | |
225 | native_write_msr(msr, low, high); | |
226 | } | |
227 | ||
228 | #define rdmsrl(msr, val) \ | |
229 | ((val) = native_read_msr((msr))) | |
230 | ||
231 | static inline void wrmsrl(unsigned msr, u64 val) | |
232 | { | |
233 | native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); | |
234 | } | |
235 | ||
236 | /* wrmsr with exception handling */ | |
237 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | |
238 | { | |
239 | return native_write_msr_safe(msr, low, high); | |
240 | } | |
241 | ||
242 | /* rdmsr with exception handling */ | |
243 | #define rdmsr_safe(msr, low, high) \ | |
244 | ({ \ | |
245 | int __err; \ | |
246 | u64 __val = native_read_msr_safe((msr), &__err); \ | |
247 | (*low) = (u32)__val; \ | |
248 | (*high) = (u32)(__val >> 32); \ | |
249 | __err; \ | |
250 | }) | |
251 | ||
252 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |
253 | { | |
254 | int err; | |
255 | ||
256 | *p = native_read_msr_safe(msr, &err); | |
257 | return err; | |
258 | } | |
259 | ||
260 | #define rdpmc(counter, low, high) \ | |
261 | do { \ | |
262 | u64 _l = native_read_pmc((counter)); \ | |
263 | (low) = (u32)_l; \ | |
264 | (high) = (u32)(_l >> 32); \ | |
265 | } while (0) | |
266 | ||
267 | #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) | |
268 | ||
269 | #endif /* !CONFIG_PARAVIRT */ | |
270 | ||
271 | /* | |
272 | * 64-bit version of wrmsr_safe(): | |
273 | */ | |
274 | static inline int wrmsrl_safe(u32 msr, u64 val) | |
275 | { | |
276 | return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); | |
277 | } | |
278 | ||
279 | #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) | |
280 | ||
281 | #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) | |
282 | ||
283 | struct msr *msrs_alloc(void); | |
284 | void msrs_free(struct msr *msrs); | |
285 | int msr_set_bit(u32 msr, u8 bit); | |
286 | int msr_clear_bit(u32 msr, u8 bit); | |
287 | ||
288 | #ifdef CONFIG_SMP | |
289 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | |
290 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | |
291 | int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); | |
292 | int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); | |
293 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); | |
294 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); | |
295 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | |
296 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | |
297 | int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); | |
298 | int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); | |
299 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | |
300 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | |
301 | #else /* CONFIG_SMP */ | |
302 | static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | |
303 | { | |
304 | rdmsr(msr_no, *l, *h); | |
305 | return 0; | |
306 | } | |
307 | static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |
308 | { | |
309 | wrmsr(msr_no, l, h); | |
310 | return 0; | |
311 | } | |
312 | static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) | |
313 | { | |
314 | rdmsrl(msr_no, *q); | |
315 | return 0; | |
316 | } | |
317 | static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) | |
318 | { | |
319 | wrmsrl(msr_no, q); | |
320 | return 0; | |
321 | } | |
322 | static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, | |
323 | struct msr *msrs) | |
324 | { | |
325 | rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); | |
326 | } | |
327 | static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, | |
328 | struct msr *msrs) | |
329 | { | |
330 | wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); | |
331 | } | |
332 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, | |
333 | u32 *l, u32 *h) | |
334 | { | |
335 | return rdmsr_safe(msr_no, l, h); | |
336 | } | |
337 | static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |
338 | { | |
339 | return wrmsr_safe(msr_no, l, h); | |
340 | } | |
341 | static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) | |
342 | { | |
343 | return rdmsrl_safe(msr_no, q); | |
344 | } | |
345 | static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) | |
346 | { | |
347 | return wrmsrl_safe(msr_no, q); | |
348 | } | |
349 | static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) | |
350 | { | |
351 | return rdmsr_safe_regs(regs); | |
352 | } | |
353 | static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) | |
354 | { | |
355 | return wrmsr_safe_regs(regs); | |
356 | } | |
357 | #endif /* CONFIG_SMP */ | |
358 | #endif /* __ASSEMBLY__ */ | |
359 | #endif /* _ASM_X86_MSR_H */ |