]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/msr.h
mm: Implement new pkey_mprotect() system call
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / msr.h
1 #ifndef _ASM_X86_MSR_H
2 #define _ASM_X86_MSR_H
3
4 #include "msr-index.h"
5
6 #ifndef __ASSEMBLY__
7
8 #include <asm/asm.h>
9 #include <asm/errno.h>
10 #include <asm/cpumask.h>
11 #include <uapi/asm/msr.h>
12
13 struct msr {
14 union {
15 struct {
16 u32 l;
17 u32 h;
18 };
19 u64 q;
20 };
21 };
22
23 struct msr_info {
24 u32 msr_no;
25 struct msr reg;
26 struct msr *msrs;
27 int err;
28 };
29
30 struct msr_regs_info {
31 u32 *regs;
32 int err;
33 };
34
35 struct saved_msr {
36 bool valid;
37 struct msr_info info;
38 };
39
40 struct saved_msrs {
41 unsigned int num;
42 struct saved_msr *array;
43 };
44
45 /*
46 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
47 * constraint has different meanings. For i386, "A" means exactly
48 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
49 * it means rax *or* rdx.
50 */
51 #ifdef CONFIG_X86_64
52 /* Using 64-bit values saves one instruction clearing the high half of low */
53 #define DECLARE_ARGS(val, low, high) unsigned long low, high
54 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
55 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
56 #else
57 #define DECLARE_ARGS(val, low, high) unsigned long long val
58 #define EAX_EDX_VAL(val, low, high) (val)
59 #define EAX_EDX_RET(val, low, high) "=A" (val)
60 #endif
61
62 #ifdef CONFIG_TRACEPOINTS
63 /*
64 * Be very careful with includes. This header is prone to include loops.
65 */
66 #include <asm/atomic.h>
67 #include <linux/tracepoint-defs.h>
68
69 extern struct tracepoint __tracepoint_read_msr;
70 extern struct tracepoint __tracepoint_write_msr;
71 extern struct tracepoint __tracepoint_rdpmc;
72 #define msr_tracepoint_active(t) static_key_false(&(t).key)
73 extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
74 extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
75 extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
76 #else
77 #define msr_tracepoint_active(t) false
78 static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
79 static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
80 static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
81 #endif
82
83 static inline unsigned long long native_read_msr(unsigned int msr)
84 {
85 DECLARE_ARGS(val, low, high);
86
87 asm volatile("1: rdmsr\n"
88 "2:\n"
89 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
90 : EAX_EDX_RET(val, low, high) : "c" (msr));
91 if (msr_tracepoint_active(__tracepoint_read_msr))
92 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
93 return EAX_EDX_VAL(val, low, high);
94 }
95
96 static inline unsigned long long native_read_msr_safe(unsigned int msr,
97 int *err)
98 {
99 DECLARE_ARGS(val, low, high);
100
101 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
102 "1:\n\t"
103 ".section .fixup,\"ax\"\n\t"
104 "3: mov %[fault],%[err]\n\t"
105 "xorl %%eax, %%eax\n\t"
106 "xorl %%edx, %%edx\n\t"
107 "jmp 1b\n\t"
108 ".previous\n\t"
109 _ASM_EXTABLE(2b, 3b)
110 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
111 : "c" (msr), [fault] "i" (-EIO));
112 if (msr_tracepoint_active(__tracepoint_read_msr))
113 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
114 return EAX_EDX_VAL(val, low, high);
115 }
116
117 /* Can be uninlined because referenced by paravirt */
118 notrace static inline void native_write_msr(unsigned int msr,
119 unsigned low, unsigned high)
120 {
121 asm volatile("1: wrmsr\n"
122 "2:\n"
123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
124 : : "c" (msr), "a"(low), "d" (high) : "memory");
125 if (msr_tracepoint_active(__tracepoint_write_msr))
126 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
127 }
128
129 /* Can be uninlined because referenced by paravirt */
130 notrace static inline int native_write_msr_safe(unsigned int msr,
131 unsigned low, unsigned high)
132 {
133 int err;
134 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
135 "1:\n\t"
136 ".section .fixup,\"ax\"\n\t"
137 "3: mov %[fault],%[err] ; jmp 1b\n\t"
138 ".previous\n\t"
139 _ASM_EXTABLE(2b, 3b)
140 : [err] "=a" (err)
141 : "c" (msr), "0" (low), "d" (high),
142 [fault] "i" (-EIO)
143 : "memory");
144 if (msr_tracepoint_active(__tracepoint_write_msr))
145 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
146 return err;
147 }
148
149 extern int rdmsr_safe_regs(u32 regs[8]);
150 extern int wrmsr_safe_regs(u32 regs[8]);
151
152 /**
153 * rdtsc() - returns the current TSC without ordering constraints
154 *
155 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
156 * only ordering constraint it supplies is the ordering implied by
157 * "asm volatile": it will put the RDTSC in the place you expect. The
158 * CPU can and will speculatively execute that RDTSC, though, so the
159 * results can be non-monotonic if compared on different CPUs.
160 */
161 static __always_inline unsigned long long rdtsc(void)
162 {
163 DECLARE_ARGS(val, low, high);
164
165 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
166
167 return EAX_EDX_VAL(val, low, high);
168 }
169
170 /**
171 * rdtsc_ordered() - read the current TSC in program order
172 *
173 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
174 * It is ordered like a load to a global in-memory counter. It should
175 * be impossible to observe non-monotonic rdtsc_unordered() behavior
176 * across multiple CPUs as long as the TSC is synced.
177 */
178 static __always_inline unsigned long long rdtsc_ordered(void)
179 {
180 /*
181 * The RDTSC instruction is not ordered relative to memory
182 * access. The Intel SDM and the AMD APM are both vague on this
183 * point, but empirically an RDTSC instruction can be
184 * speculatively executed before prior loads. An RDTSC
185 * immediately after an appropriate barrier appears to be
186 * ordered as a normal load, that is, it provides the same
187 * ordering guarantees as reading from a global memory location
188 * that some other imaginary CPU is updating continuously with a
189 * time stamp.
190 */
191 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
192 "lfence", X86_FEATURE_LFENCE_RDTSC);
193 return rdtsc();
194 }
195
196 /* Deprecated, keep it for a cycle for easier merging: */
197 #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
198
199 static inline unsigned long long native_read_pmc(int counter)
200 {
201 DECLARE_ARGS(val, low, high);
202
203 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
204 if (msr_tracepoint_active(__tracepoint_rdpmc))
205 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
206 return EAX_EDX_VAL(val, low, high);
207 }
208
209 #ifdef CONFIG_PARAVIRT
210 #include <asm/paravirt.h>
211 #else
212 #include <linux/errno.h>
213 /*
214 * Access to machine-specific registers (available on 586 and better only)
215 * Note: the rd* operations modify the parameters directly (without using
216 * pointer indirection), this allows gcc to optimize better
217 */
218
219 #define rdmsr(msr, low, high) \
220 do { \
221 u64 __val = native_read_msr((msr)); \
222 (void)((low) = (u32)__val); \
223 (void)((high) = (u32)(__val >> 32)); \
224 } while (0)
225
226 static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
227 {
228 native_write_msr(msr, low, high);
229 }
230
231 #define rdmsrl(msr, val) \
232 ((val) = native_read_msr((msr)))
233
234 static inline void wrmsrl(unsigned msr, u64 val)
235 {
236 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
237 }
238
239 /* wrmsr with exception handling */
240 static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
241 {
242 return native_write_msr_safe(msr, low, high);
243 }
244
245 /* rdmsr with exception handling */
246 #define rdmsr_safe(msr, low, high) \
247 ({ \
248 int __err; \
249 u64 __val = native_read_msr_safe((msr), &__err); \
250 (*low) = (u32)__val; \
251 (*high) = (u32)(__val >> 32); \
252 __err; \
253 })
254
255 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
256 {
257 int err;
258
259 *p = native_read_msr_safe(msr, &err);
260 return err;
261 }
262
263 #define rdpmc(counter, low, high) \
264 do { \
265 u64 _l = native_read_pmc((counter)); \
266 (low) = (u32)_l; \
267 (high) = (u32)(_l >> 32); \
268 } while (0)
269
270 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
271
272 #endif /* !CONFIG_PARAVIRT */
273
274 /*
275 * 64-bit version of wrmsr_safe():
276 */
277 static inline int wrmsrl_safe(u32 msr, u64 val)
278 {
279 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
280 }
281
282 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
283
284 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
285
286 struct msr *msrs_alloc(void);
287 void msrs_free(struct msr *msrs);
288 int msr_set_bit(u32 msr, u8 bit);
289 int msr_clear_bit(u32 msr, u8 bit);
290
291 #ifdef CONFIG_SMP
292 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
293 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
294 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
295 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
296 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
297 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
298 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
299 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
300 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
301 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
302 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
303 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
304 #else /* CONFIG_SMP */
305 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
306 {
307 rdmsr(msr_no, *l, *h);
308 return 0;
309 }
310 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
311 {
312 wrmsr(msr_no, l, h);
313 return 0;
314 }
315 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
316 {
317 rdmsrl(msr_no, *q);
318 return 0;
319 }
320 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
321 {
322 wrmsrl(msr_no, q);
323 return 0;
324 }
325 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
326 struct msr *msrs)
327 {
328 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
329 }
330 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
331 struct msr *msrs)
332 {
333 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
334 }
335 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
336 u32 *l, u32 *h)
337 {
338 return rdmsr_safe(msr_no, l, h);
339 }
340 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
341 {
342 return wrmsr_safe(msr_no, l, h);
343 }
344 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
345 {
346 return rdmsrl_safe(msr_no, q);
347 }
348 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
349 {
350 return wrmsrl_safe(msr_no, q);
351 }
352 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
353 {
354 return rdmsr_safe_regs(regs);
355 }
356 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
357 {
358 return wrmsr_safe_regs(regs);
359 }
360 #endif /* CONFIG_SMP */
361 #endif /* __ASSEMBLY__ */
362 #endif /* _ASM_X86_MSR_H */