10 #include <asm/cpumask.h>
11 #include <uapi/asm/msr.h>
30 struct msr_regs_info
{
35 static inline unsigned long long native_read_tscp(unsigned int *aux
)
37 unsigned long low
, high
;
38 asm volatile(".byte 0x0f,0x01,0xf9"
39 : "=a" (low
), "=d" (high
), "=c" (*aux
));
40 return low
| ((u64
)high
<< 32);
44 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
45 * constraint has different meanings. For i386, "A" means exactly
46 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
47 * it means rax *or* rdx.
50 /* Using 64-bit values saves one instruction clearing the high half of low */
51 #define DECLARE_ARGS(val, low, high) unsigned long low, high
52 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
53 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
55 #define DECLARE_ARGS(val, low, high) unsigned long long val
56 #define EAX_EDX_VAL(val, low, high) (val)
57 #define EAX_EDX_RET(val, low, high) "=A" (val)
60 #ifdef CONFIG_TRACEPOINTS
62 * Be very careful with includes. This header is prone to include loops.
64 #include <asm/atomic.h>
65 #include <linux/tracepoint-defs.h>
67 extern struct tracepoint __tracepoint_read_msr
;
68 extern struct tracepoint __tracepoint_write_msr
;
69 extern struct tracepoint __tracepoint_rdpmc
;
70 #define msr_tracepoint_active(t) static_key_false(&(t).key)
71 extern void do_trace_write_msr(unsigned msr
, u64 val
, int failed
);
72 extern void do_trace_read_msr(unsigned msr
, u64 val
, int failed
);
73 extern void do_trace_rdpmc(unsigned msr
, u64 val
, int failed
);
75 #define msr_tracepoint_active(t) false
76 static inline void do_trace_write_msr(unsigned msr
, u64 val
, int failed
) {}
77 static inline void do_trace_read_msr(unsigned msr
, u64 val
, int failed
) {}
78 static inline void do_trace_rdpmc(unsigned msr
, u64 val
, int failed
) {}
81 static inline unsigned long long native_read_msr(unsigned int msr
)
83 DECLARE_ARGS(val
, low
, high
);
85 asm volatile("rdmsr" : EAX_EDX_RET(val
, low
, high
) : "c" (msr
));
86 if (msr_tracepoint_active(__tracepoint_read_msr
))
87 do_trace_read_msr(msr
, EAX_EDX_VAL(val
, low
, high
), 0);
88 return EAX_EDX_VAL(val
, low
, high
);
91 static inline unsigned long long native_read_msr_safe(unsigned int msr
,
94 DECLARE_ARGS(val
, low
, high
);
96 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
98 ".section .fixup,\"ax\"\n\t"
99 "3: mov %[fault],%[err] ; jmp 1b\n\t"
102 : [err
] "=r" (*err
), EAX_EDX_RET(val
, low
, high
)
103 : "c" (msr
), [fault
] "i" (-EIO
));
104 if (msr_tracepoint_active(__tracepoint_read_msr
))
105 do_trace_read_msr(msr
, EAX_EDX_VAL(val
, low
, high
), *err
);
106 return EAX_EDX_VAL(val
, low
, high
);
109 static inline void native_write_msr(unsigned int msr
,
110 unsigned low
, unsigned high
)
112 asm volatile("wrmsr" : : "c" (msr
), "a"(low
), "d" (high
) : "memory");
113 if (msr_tracepoint_active(__tracepoint_read_msr
))
114 do_trace_write_msr(msr
, ((u64
)high
<< 32 | low
), 0);
117 /* Can be uninlined because referenced by paravirt */
118 notrace
static inline int native_write_msr_safe(unsigned int msr
,
119 unsigned low
, unsigned high
)
122 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
124 ".section .fixup,\"ax\"\n\t"
125 "3: mov %[fault],%[err] ; jmp 1b\n\t"
129 : "c" (msr
), "0" (low
), "d" (high
),
132 if (msr_tracepoint_active(__tracepoint_read_msr
))
133 do_trace_write_msr(msr
, ((u64
)high
<< 32 | low
), err
);
137 extern int rdmsr_safe_regs(u32 regs
[8]);
138 extern int wrmsr_safe_regs(u32 regs
[8]);
141 * rdtsc() - returns the current TSC without ordering constraints
143 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
144 * only ordering constraint it supplies is the ordering implied by
145 * "asm volatile": it will put the RDTSC in the place you expect. The
146 * CPU can and will speculatively execute that RDTSC, though, so the
147 * results can be non-monotonic if compared on different CPUs.
149 static __always_inline
unsigned long long rdtsc(void)
151 DECLARE_ARGS(val
, low
, high
);
153 asm volatile("rdtsc" : EAX_EDX_RET(val
, low
, high
));
155 return EAX_EDX_VAL(val
, low
, high
);
159 * rdtsc_ordered() - read the current TSC in program order
161 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
162 * It is ordered like a load to a global in-memory counter. It should
163 * be impossible to observe non-monotonic rdtsc_unordered() behavior
164 * across multiple CPUs as long as the TSC is synced.
166 static __always_inline
unsigned long long rdtsc_ordered(void)
169 * The RDTSC instruction is not ordered relative to memory
170 * access. The Intel SDM and the AMD APM are both vague on this
171 * point, but empirically an RDTSC instruction can be
172 * speculatively executed before prior loads. An RDTSC
173 * immediately after an appropriate barrier appears to be
174 * ordered as a normal load, that is, it provides the same
175 * ordering guarantees as reading from a global memory location
176 * that some other imaginary CPU is updating continuously with a
179 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC
,
180 "lfence", X86_FEATURE_LFENCE_RDTSC
);
184 /* Deprecated, keep it for a cycle for easier merging: */
185 #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
187 static inline unsigned long long native_read_pmc(int counter
)
189 DECLARE_ARGS(val
, low
, high
);
191 asm volatile("rdpmc" : EAX_EDX_RET(val
, low
, high
) : "c" (counter
));
192 if (msr_tracepoint_active(__tracepoint_rdpmc
))
193 do_trace_rdpmc(counter
, EAX_EDX_VAL(val
, low
, high
), 0);
194 return EAX_EDX_VAL(val
, low
, high
);
197 #ifdef CONFIG_PARAVIRT
198 #include <asm/paravirt.h>
200 #include <linux/errno.h>
202 * Access to machine-specific registers (available on 586 and better only)
203 * Note: the rd* operations modify the parameters directly (without using
204 * pointer indirection), this allows gcc to optimize better
207 #define rdmsr(msr, low, high) \
209 u64 __val = native_read_msr((msr)); \
210 (void)((low) = (u32)__val); \
211 (void)((high) = (u32)(__val >> 32)); \
214 static inline void wrmsr(unsigned msr
, unsigned low
, unsigned high
)
216 native_write_msr(msr
, low
, high
);
219 #define rdmsrl(msr, val) \
220 ((val) = native_read_msr((msr)))
222 static inline void wrmsrl(unsigned msr
, u64 val
)
224 native_write_msr(msr
, (u32
)(val
& 0xffffffffULL
), (u32
)(val
>> 32));
227 /* wrmsr with exception handling */
228 static inline int wrmsr_safe(unsigned msr
, unsigned low
, unsigned high
)
230 return native_write_msr_safe(msr
, low
, high
);
233 /* rdmsr with exception handling */
234 #define rdmsr_safe(msr, low, high) \
237 u64 __val = native_read_msr_safe((msr), &__err); \
238 (*low) = (u32)__val; \
239 (*high) = (u32)(__val >> 32); \
243 static inline int rdmsrl_safe(unsigned msr
, unsigned long long *p
)
247 *p
= native_read_msr_safe(msr
, &err
);
251 #define rdpmc(counter, low, high) \
253 u64 _l = native_read_pmc((counter)); \
255 (high) = (u32)(_l >> 32); \
258 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
260 #endif /* !CONFIG_PARAVIRT */
263 * 64-bit version of wrmsr_safe():
265 static inline int wrmsrl_safe(u32 msr
, u64 val
)
267 return wrmsr_safe(msr
, (u32
)val
, (u32
)(val
>> 32));
270 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
272 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
274 struct msr
*msrs_alloc(void);
275 void msrs_free(struct msr
*msrs
);
276 int msr_set_bit(u32 msr
, u8 bit
);
277 int msr_clear_bit(u32 msr
, u8 bit
);
280 int rdmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
);
281 int wrmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
);
282 int rdmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
);
283 int wrmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
);
284 void rdmsr_on_cpus(const struct cpumask
*mask
, u32 msr_no
, struct msr
*msrs
);
285 void wrmsr_on_cpus(const struct cpumask
*mask
, u32 msr_no
, struct msr
*msrs
);
286 int rdmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
);
287 int wrmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
);
288 int rdmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
);
289 int wrmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
);
290 int rdmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8]);
291 int wrmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8]);
292 #else /* CONFIG_SMP */
293 static inline int rdmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32
*l
, u32
*h
)
295 rdmsr(msr_no
, *l
, *h
);
298 static inline int wrmsr_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
)
303 static inline int rdmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
)
308 static inline int wrmsrl_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
)
313 static inline void rdmsr_on_cpus(const struct cpumask
*m
, u32 msr_no
,
316 rdmsr_on_cpu(0, msr_no
, &(msrs
[0].l
), &(msrs
[0].h
));
318 static inline void wrmsr_on_cpus(const struct cpumask
*m
, u32 msr_no
,
321 wrmsr_on_cpu(0, msr_no
, msrs
[0].l
, msrs
[0].h
);
323 static inline int rdmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
,
326 return rdmsr_safe(msr_no
, l
, h
);
328 static inline int wrmsr_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u32 l
, u32 h
)
330 return wrmsr_safe(msr_no
, l
, h
);
332 static inline int rdmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64
*q
)
334 return rdmsrl_safe(msr_no
, q
);
336 static inline int wrmsrl_safe_on_cpu(unsigned int cpu
, u32 msr_no
, u64 q
)
338 return wrmsrl_safe(msr_no
, q
);
340 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8])
342 return rdmsr_safe_regs(regs
);
344 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu
, u32 regs
[8])
346 return wrmsr_safe_regs(regs
);
348 #endif /* CONFIG_SMP */
349 #endif /* __ASSEMBLY__ */
350 #endif /* _ASM_X86_MSR_H */