]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SYSTEM_H |
2 | #define _ASM_X86_SYSTEM_H | |
d8954222 GOC |
3 | |
4 | #include <asm/asm.h> | |
d46d7d75 GOC |
5 | #include <asm/segment.h> |
6 | #include <asm/cpufeature.h> | |
7 | #include <asm/cmpxchg.h> | |
fde1b3fa | 8 | #include <asm/nops.h> |
d8954222 | 9 | |
d3ca901f | 10 | #include <linux/kernel.h> |
d46d7d75 | 11 | #include <linux/irqflags.h> |
d3ca901f | 12 | |
ded9aa0d JB |
13 | /* entries in ARCH_DLINFO: */ |
14 | #ifdef CONFIG_IA32_EMULATION | |
15 | # define AT_VECTOR_SIZE_ARCH 2 | |
16 | #else | |
17 | # define AT_VECTOR_SIZE_ARCH 1 | |
18 | #endif | |
19 | ||
0a3b4d15 | 20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
599db4fe HH |
21 | struct task_struct *__switch_to(struct task_struct *prev, |
22 | struct task_struct *next); | |
0a3b4d15 | 23 | |
aab02f0a JS |
24 | #ifdef CONFIG_X86_32 |
25 | ||
60a5317f TH |
26 | #ifdef CONFIG_CC_STACKPROTECTOR |
27 | #define __switch_canary \ | |
28 | "movl "__percpu_arg([current_task])",%%ebx\n\t" \ | |
29 | "movl %P[task_canary](%%ebx),%%ebx\n\t" \ | |
30 | "movl %%ebx,"__percpu_arg([stack_canary])"\n\t" | |
31 | #define __switch_canary_oparam \ | |
32 | , [stack_canary] "=m" (per_cpu_var(stack_canary)) | |
33 | #define __switch_canary_iparam \ | |
34 | , [current_task] "m" (per_cpu_var(current_task)) \ | |
35 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | |
36 | #else /* CC_STACKPROTECTOR */ | |
37 | #define __switch_canary | |
38 | #define __switch_canary_oparam | |
39 | #define __switch_canary_iparam | |
40 | #endif /* CC_STACKPROTECTOR */ | |
41 | ||
0a3b4d15 GOC |
42 | /* |
43 | * Saving eflags is important. It switches not only IOPL between tasks, | |
44 | * it also protects other tasks from NT leaking through sysenter etc. | |
45 | */ | |
23b55bd9 IM |
46 | #define switch_to(prev, next, last) \ |
47 | do { \ | |
8b6451fe IM |
48 | /* \ |
49 | * Context-switching clobbers all registers, so we clobber \ | |
50 | * them explicitly, via unused output variables. \ | |
51 | * (EAX and EBP is not listed because EBP is saved/restored \ | |
52 | * explicitly for wchan access and EAX is the return value of \ | |
53 | * __switch_to()) \ | |
54 | */ \ | |
55 | unsigned long ebx, ecx, edx, esi, edi; \ | |
23b55bd9 | 56 | \ |
c5386c20 JP |
57 | asm volatile("pushfl\n\t" /* save flags */ \ |
58 | "pushl %%ebp\n\t" /* save EBP */ \ | |
59 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | |
60 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | |
61 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | |
62 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | |
63 | "jmp __switch_to\n" /* regparm call */ \ | |
64 | "1:\t" \ | |
60a5317f | 65 | __switch_canary \ |
c5386c20 JP |
66 | "popl %%ebp\n\t" /* restore EBP */ \ |
67 | "popfl\n" /* restore flags */ \ | |
23b55bd9 | 68 | \ |
c5386c20 JP |
69 | /* output parameters */ \ |
70 | : [prev_sp] "=m" (prev->thread.sp), \ | |
71 | [prev_ip] "=m" (prev->thread.ip), \ | |
72 | "=a" (last), \ | |
23b55bd9 | 73 | \ |
c5386c20 JP |
74 | /* clobbered output registers: */ \ |
75 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | |
76 | "=S" (esi), "=D" (edi) \ | |
77 | \ | |
60a5317f TH |
78 | __switch_canary_oparam \ |
79 | \ | |
c5386c20 JP |
80 | /* input parameters: */ \ |
81 | : [next_sp] "m" (next->thread.sp), \ | |
82 | [next_ip] "m" (next->thread.ip), \ | |
83 | \ | |
84 | /* regparm parameters for __switch_to(): */ \ | |
85 | [prev] "a" (prev), \ | |
33f8c40a VN |
86 | [next] "d" (next) \ |
87 | \ | |
60a5317f TH |
88 | __switch_canary_iparam \ |
89 | \ | |
33f8c40a VN |
90 | : /* reloaded segment registers */ \ |
91 | "memory"); \ | |
0a3b4d15 GOC |
92 | } while (0) |
93 | ||
d46d7d75 GOC |
94 | /* |
95 | * disable hlt during certain critical i/o operations | |
96 | */ | |
97 | #define HAVE_DISABLE_HLT | |
96a388de | 98 | #else |
0a3b4d15 GOC |
99 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" |
100 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | |
101 | ||
102 | /* frame pointer must be last for get_wchan */ | |
103 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | |
104 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | |
105 | ||
106 | #define __EXTRA_CLOBBER \ | |
107 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | |
108 | "r12", "r13", "r14", "r15" | |
109 | ||
b4a8f7a2 TH |
110 | #ifdef CONFIG_CC_STACKPROTECTOR |
111 | #define __switch_canary \ | |
112 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | |
67e68bde TH |
113 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" |
114 | #define __switch_canary_oparam \ | |
115 | , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | |
116 | #define __switch_canary_iparam \ | |
117 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | |
b4a8f7a2 TH |
118 | #else /* CC_STACKPROTECTOR */ |
119 | #define __switch_canary | |
67e68bde TH |
120 | #define __switch_canary_oparam |
121 | #define __switch_canary_iparam | |
b4a8f7a2 TH |
122 | #endif /* CC_STACKPROTECTOR */ |
123 | ||
0a3b4d15 GOC |
124 | /* Save restore flags to clear handle leaking NT */ |
125 | #define switch_to(prev, next, last) \ | |
b4a8f7a2 | 126 | asm volatile(SAVE_CONTEXT \ |
0a3b4d15 GOC |
127 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
128 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | |
129 | "call __switch_to\n\t" \ | |
130 | ".globl thread_return\n" \ | |
131 | "thread_return:\n\t" \ | |
87b26406 | 132 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
b4a8f7a2 | 133 | __switch_canary \ |
0a3b4d15 GOC |
134 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
135 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ | |
136 | "movq %%rax,%%rdi\n\t" \ | |
137 | "jc ret_from_fork\n\t" \ | |
138 | RESTORE_CONTEXT \ | |
139 | : "=a" (last) \ | |
67e68bde | 140 | __switch_canary_oparam \ |
0a3b4d15 GOC |
141 | : [next] "S" (next), [prev] "D" (prev), \ |
142 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | |
143 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | |
144 | [tif_fork] "i" (TIF_FORK), \ | |
145 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | |
b4a8f7a2 | 146 | [current_task] "m" (per_cpu_var(current_task)) \ |
67e68bde | 147 | __switch_canary_iparam \ |
0a3b4d15 | 148 | : "memory", "cc" __EXTRA_CLOBBER) |
96a388de | 149 | #endif |
d8954222 GOC |
150 | |
151 | #ifdef __KERNEL__ | |
152 | #define _set_base(addr, base) do { unsigned long __pr; \ | |
153 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
154 | "rorl $16,%%edx\n\t" \ | |
155 | "movb %%dl,%2\n\t" \ | |
156 | "movb %%dh,%3" \ | |
157 | :"=&d" (__pr) \ | |
158 | :"m" (*((addr)+2)), \ | |
159 | "m" (*((addr)+4)), \ | |
160 | "m" (*((addr)+7)), \ | |
161 | "0" (base) \ | |
162 | ); } while (0) | |
163 | ||
164 | #define _set_limit(addr, limit) do { unsigned long __lr; \ | |
165 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
166 | "rorl $16,%%edx\n\t" \ | |
167 | "movb %2,%%dh\n\t" \ | |
168 | "andb $0xf0,%%dh\n\t" \ | |
169 | "orb %%dh,%%dl\n\t" \ | |
170 | "movb %%dl,%2" \ | |
171 | :"=&d" (__lr) \ | |
172 | :"m" (*(addr)), \ | |
173 | "m" (*((addr)+6)), \ | |
174 | "0" (limit) \ | |
175 | ); } while (0) | |
176 | ||
177 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) | |
178 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) | |
179 | ||
9f9d489a | 180 | extern void native_load_gs_index(unsigned); |
d3ca901f | 181 | |
a6b46552 GOC |
182 | /* |
183 | * Load a segment. Fall back on loading the zero | |
184 | * segment if something goes wrong.. | |
185 | */ | |
186 | #define loadsegment(seg, value) \ | |
187 | asm volatile("\n" \ | |
c5386c20 JP |
188 | "1:\t" \ |
189 | "movl %k0,%%" #seg "\n" \ | |
190 | "2:\n" \ | |
191 | ".section .fixup,\"ax\"\n" \ | |
192 | "3:\t" \ | |
193 | "movl %k1, %%" #seg "\n\t" \ | |
194 | "jmp 2b\n" \ | |
195 | ".previous\n" \ | |
196 | _ASM_EXTABLE(1b,3b) \ | |
d338c73c | 197 | : :"r" (value), "r" (0) : "memory") |
a6b46552 GOC |
198 | |
199 | ||
d8954222 GOC |
200 | /* |
201 | * Save a segment register away | |
202 | */ | |
c5386c20 | 203 | #define savesegment(seg, value) \ |
d9fc3fd3 | 204 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
d8954222 | 205 | |
d9a89a26 TH |
206 | /* |
207 | * x86_32 user gs accessors. | |
208 | */ | |
209 | #ifdef CONFIG_X86_32 | |
ccbeed3a | 210 | #ifdef CONFIG_X86_32_LAZY_GS |
d9a89a26 TH |
211 | #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) |
212 | #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) | |
213 | #define task_user_gs(tsk) ((tsk)->thread.gs) | |
ccbeed3a TH |
214 | #define lazy_save_gs(v) savesegment(gs, (v)) |
215 | #define lazy_load_gs(v) loadsegment(gs, (v)) | |
216 | #else /* X86_32_LAZY_GS */ | |
217 | #define get_user_gs(regs) (u16)((regs)->gs) | |
218 | #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) | |
219 | #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) | |
220 | #define lazy_save_gs(v) do { } while (0) | |
221 | #define lazy_load_gs(v) do { } while (0) | |
222 | #endif /* X86_32_LAZY_GS */ | |
223 | #endif /* X86_32 */ | |
d9a89a26 | 224 | |
d8954222 GOC |
225 | static inline unsigned long get_limit(unsigned long segment) |
226 | { | |
227 | unsigned long __limit; | |
c5386c20 JP |
228 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
229 | return __limit + 1; | |
d8954222 | 230 | } |
d3ca901f GOC |
231 | |
232 | static inline void native_clts(void) | |
233 | { | |
c5386c20 | 234 | asm volatile("clts"); |
d3ca901f GOC |
235 | } |
236 | ||
237 | /* | |
238 | * Volatile isn't enough to prevent the compiler from reordering the | |
239 | * read/write functions for the control registers and messing everything up. | |
240 | * A memory clobber would solve the problem, but would prevent reordering of | |
241 | * all loads stores around it, which can hurt performance. Solution is to | |
242 | * use a variable and mimic reads and writes to it to enforce serialization | |
243 | */ | |
244 | static unsigned long __force_order; | |
245 | ||
246 | static inline unsigned long native_read_cr0(void) | |
247 | { | |
248 | unsigned long val; | |
c5386c20 | 249 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
250 | return val; |
251 | } | |
252 | ||
253 | static inline void native_write_cr0(unsigned long val) | |
254 | { | |
c5386c20 | 255 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
256 | } |
257 | ||
258 | static inline unsigned long native_read_cr2(void) | |
259 | { | |
260 | unsigned long val; | |
c5386c20 | 261 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
262 | return val; |
263 | } | |
264 | ||
265 | static inline void native_write_cr2(unsigned long val) | |
266 | { | |
c5386c20 | 267 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
268 | } |
269 | ||
270 | static inline unsigned long native_read_cr3(void) | |
271 | { | |
272 | unsigned long val; | |
c5386c20 | 273 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
274 | return val; |
275 | } | |
276 | ||
277 | static inline void native_write_cr3(unsigned long val) | |
278 | { | |
c5386c20 | 279 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
280 | } |
281 | ||
282 | static inline unsigned long native_read_cr4(void) | |
283 | { | |
284 | unsigned long val; | |
c5386c20 | 285 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
286 | return val; |
287 | } | |
288 | ||
289 | static inline unsigned long native_read_cr4_safe(void) | |
290 | { | |
291 | unsigned long val; | |
292 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always | |
293 | * exists, so it will never fail. */ | |
294 | #ifdef CONFIG_X86_32 | |
88976ee1 PA |
295 | asm volatile("1: mov %%cr4, %0\n" |
296 | "2:\n" | |
c5386c20 | 297 | _ASM_EXTABLE(1b, 2b) |
88976ee1 | 298 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
d3ca901f GOC |
299 | #else |
300 | val = native_read_cr4(); | |
301 | #endif | |
302 | return val; | |
303 | } | |
304 | ||
305 | static inline void native_write_cr4(unsigned long val) | |
306 | { | |
c5386c20 | 307 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
308 | } |
309 | ||
94ea03cd GOC |
310 | #ifdef CONFIG_X86_64 |
311 | static inline unsigned long native_read_cr8(void) | |
312 | { | |
313 | unsigned long cr8; | |
314 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); | |
315 | return cr8; | |
316 | } | |
317 | ||
318 | static inline void native_write_cr8(unsigned long val) | |
319 | { | |
320 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | |
321 | } | |
322 | #endif | |
323 | ||
d3ca901f GOC |
324 | static inline void native_wbinvd(void) |
325 | { | |
326 | asm volatile("wbinvd": : :"memory"); | |
327 | } | |
c5386c20 | 328 | |
d3ca901f GOC |
329 | #ifdef CONFIG_PARAVIRT |
330 | #include <asm/paravirt.h> | |
331 | #else | |
332 | #define read_cr0() (native_read_cr0()) | |
333 | #define write_cr0(x) (native_write_cr0(x)) | |
334 | #define read_cr2() (native_read_cr2()) | |
335 | #define write_cr2(x) (native_write_cr2(x)) | |
336 | #define read_cr3() (native_read_cr3()) | |
337 | #define write_cr3(x) (native_write_cr3(x)) | |
338 | #define read_cr4() (native_read_cr4()) | |
339 | #define read_cr4_safe() (native_read_cr4_safe()) | |
340 | #define write_cr4(x) (native_write_cr4(x)) | |
341 | #define wbinvd() (native_wbinvd()) | |
d46d7d75 | 342 | #ifdef CONFIG_X86_64 |
94ea03cd GOC |
343 | #define read_cr8() (native_read_cr8()) |
344 | #define write_cr8(x) (native_write_cr8(x)) | |
9f9d489a | 345 | #define load_gs_index native_load_gs_index |
d46d7d75 GOC |
346 | #endif |
347 | ||
d3ca901f GOC |
348 | /* Clear the 'TS' bit */ |
349 | #define clts() (native_clts()) | |
350 | ||
351 | #endif/* CONFIG_PARAVIRT */ | |
352 | ||
4e09e21c | 353 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) |
d3ca901f | 354 | |
d8954222 GOC |
355 | #endif /* __KERNEL__ */ |
356 | ||
84fb144b | 357 | static inline void clflush(volatile void *__p) |
d8954222 | 358 | { |
84fb144b | 359 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
d8954222 GOC |
360 | } |
361 | ||
c5386c20 | 362 | #define nop() asm volatile ("nop") |
d8954222 GOC |
363 | |
364 | void disable_hlt(void); | |
365 | void enable_hlt(void); | |
366 | ||
d8954222 GOC |
367 | void cpu_idle_wait(void); |
368 | ||
369 | extern unsigned long arch_align_stack(unsigned long sp); | |
370 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | |
371 | ||
372 | void default_idle(void); | |
373 | ||
d3ec5cae IV |
374 | void stop_this_cpu(void *dummy); |
375 | ||
833d8469 GOC |
376 | /* |
377 | * Force strict CPU ordering. | |
378 | * And yes, this is required on UP too when we're talking | |
379 | * to devices. | |
380 | */ | |
381 | #ifdef CONFIG_X86_32 | |
382 | /* | |
0d7a1819 | 383 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
833d8469 GOC |
384 | * nop for these. |
385 | */ | |
386 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | |
387 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | |
388 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | |
389 | #else | |
390 | #define mb() asm volatile("mfence":::"memory") | |
391 | #define rmb() asm volatile("lfence":::"memory") | |
392 | #define wmb() asm volatile("sfence" ::: "memory") | |
393 | #endif | |
394 | ||
395 | /** | |
396 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
397 | * depend on. | |
398 | * | |
399 | * No data-dependent reads from memory-like regions are ever reordered | |
400 | * over this barrier. All reads preceding this primitive are guaranteed | |
401 | * to access memory (but not necessarily other CPUs' caches) before any | |
402 | * reads following this primitive that depend on the data return by | |
403 | * any of the preceding reads. This primitive is much lighter weight than | |
404 | * rmb() on most CPUs, and is never heavier weight than is | |
405 | * rmb(). | |
406 | * | |
407 | * These ordering constraints are respected by both the local CPU | |
408 | * and the compiler. | |
409 | * | |
410 | * Ordering is not guaranteed by anything other than these primitives, | |
411 | * not even by data dependencies. See the documentation for | |
412 | * memory_barrier() for examples and URLs to more information. | |
413 | * | |
414 | * For example, the following code would force ordering (the initial | |
415 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
416 | * | |
417 | * <programlisting> | |
418 | * CPU 0 CPU 1 | |
419 | * | |
420 | * b = 2; | |
421 | * memory_barrier(); | |
422 | * p = &b; q = p; | |
423 | * read_barrier_depends(); | |
424 | * d = *q; | |
425 | * </programlisting> | |
426 | * | |
427 | * because the read of "*q" depends on the read of "p" and these | |
428 | * two reads are separated by a read_barrier_depends(). However, | |
429 | * the following code, with the same initial values for "a" and "b": | |
430 | * | |
431 | * <programlisting> | |
432 | * CPU 0 CPU 1 | |
433 | * | |
434 | * a = 2; | |
435 | * memory_barrier(); | |
436 | * b = 3; y = b; | |
437 | * read_barrier_depends(); | |
438 | * x = a; | |
439 | * </programlisting> | |
440 | * | |
441 | * does not enforce ordering, since there is no data dependency between | |
442 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
443 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
444 | * in cases like this where there are no data dependencies. | |
445 | **/ | |
446 | ||
447 | #define read_barrier_depends() do { } while (0) | |
448 | ||
449 | #ifdef CONFIG_SMP | |
450 | #define smp_mb() mb() | |
451 | #ifdef CONFIG_X86_PPRO_FENCE | |
452 | # define smp_rmb() rmb() | |
453 | #else | |
454 | # define smp_rmb() barrier() | |
455 | #endif | |
456 | #ifdef CONFIG_X86_OOSTORE | |
457 | # define smp_wmb() wmb() | |
458 | #else | |
459 | # define smp_wmb() barrier() | |
460 | #endif | |
461 | #define smp_read_barrier_depends() read_barrier_depends() | |
c5386c20 | 462 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
833d8469 GOC |
463 | #else |
464 | #define smp_mb() barrier() | |
465 | #define smp_rmb() barrier() | |
466 | #define smp_wmb() barrier() | |
467 | #define smp_read_barrier_depends() do { } while (0) | |
468 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | |
469 | #endif | |
470 | ||
fde1b3fa AK |
471 | /* |
472 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | |
473 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | |
474 | * code region. | |
475 | * | |
476 | * (Could use an alternative three way for this if there was one.) | |
477 | */ | |
478 | static inline void rdtsc_barrier(void) | |
479 | { | |
480 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | |
481 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | |
482 | } | |
833d8469 | 483 | |
1965aae3 | 484 | #endif /* _ASM_X86_SYSTEM_H */ |