]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SYSTEM_H |
2 | #define _ASM_X86_SYSTEM_H | |
d8954222 GOC |
3 | |
4 | #include <asm/asm.h> | |
d46d7d75 GOC |
5 | #include <asm/segment.h> |
6 | #include <asm/cpufeature.h> | |
7 | #include <asm/cmpxchg.h> | |
fde1b3fa | 8 | #include <asm/nops.h> |
d8954222 | 9 | |
d3ca901f | 10 | #include <linux/kernel.h> |
d46d7d75 | 11 | #include <linux/irqflags.h> |
d3ca901f | 12 | |
ded9aa0d JB |
13 | /* entries in ARCH_DLINFO: */ |
14 | #ifdef CONFIG_IA32_EMULATION | |
15 | # define AT_VECTOR_SIZE_ARCH 2 | |
16 | #else | |
17 | # define AT_VECTOR_SIZE_ARCH 1 | |
18 | #endif | |
19 | ||
0a3b4d15 | 20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
599db4fe HH |
21 | struct task_struct *__switch_to(struct task_struct *prev, |
22 | struct task_struct *next); | |
2fb6b2a0 | 23 | struct tss_struct; |
389d1fb1 JF |
24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
25 | struct tss_struct *tss); | |
0a3b4d15 | 26 | |
aab02f0a JS |
27 | #ifdef CONFIG_X86_32 |
28 | ||
60a5317f TH |
29 | #ifdef CONFIG_CC_STACKPROTECTOR |
30 | #define __switch_canary \ | |
5c79d2a5 TH |
31 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
32 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | |
60a5317f | 33 | #define __switch_canary_oparam \ |
1ea0d14e | 34 | , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) |
60a5317f | 35 | #define __switch_canary_iparam \ |
60a5317f TH |
36 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
37 | #else /* CC_STACKPROTECTOR */ | |
38 | #define __switch_canary | |
39 | #define __switch_canary_oparam | |
40 | #define __switch_canary_iparam | |
41 | #endif /* CC_STACKPROTECTOR */ | |
42 | ||
0a3b4d15 GOC |
43 | /* |
44 | * Saving eflags is important. It switches not only IOPL between tasks, | |
45 | * it also protects other tasks from NT leaking through sysenter etc. | |
46 | */ | |
23b55bd9 IM |
47 | #define switch_to(prev, next, last) \ |
48 | do { \ | |
8b6451fe IM |
49 | /* \ |
50 | * Context-switching clobbers all registers, so we clobber \ | |
51 | * them explicitly, via unused output variables. \ | |
52 | * (EAX and EBP is not listed because EBP is saved/restored \ | |
53 | * explicitly for wchan access and EAX is the return value of \ | |
54 | * __switch_to()) \ | |
55 | */ \ | |
56 | unsigned long ebx, ecx, edx, esi, edi; \ | |
23b55bd9 | 57 | \ |
c5386c20 JP |
58 | asm volatile("pushfl\n\t" /* save flags */ \ |
59 | "pushl %%ebp\n\t" /* save EBP */ \ | |
60 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | |
61 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | |
62 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | |
63 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | |
5c79d2a5 | 64 | __switch_canary \ |
c5386c20 JP |
65 | "jmp __switch_to\n" /* regparm call */ \ |
66 | "1:\t" \ | |
67 | "popl %%ebp\n\t" /* restore EBP */ \ | |
68 | "popfl\n" /* restore flags */ \ | |
23b55bd9 | 69 | \ |
c5386c20 JP |
70 | /* output parameters */ \ |
71 | : [prev_sp] "=m" (prev->thread.sp), \ | |
72 | [prev_ip] "=m" (prev->thread.ip), \ | |
73 | "=a" (last), \ | |
23b55bd9 | 74 | \ |
c5386c20 JP |
75 | /* clobbered output registers: */ \ |
76 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | |
77 | "=S" (esi), "=D" (edi) \ | |
78 | \ | |
60a5317f TH |
79 | __switch_canary_oparam \ |
80 | \ | |
c5386c20 JP |
81 | /* input parameters: */ \ |
82 | : [next_sp] "m" (next->thread.sp), \ | |
83 | [next_ip] "m" (next->thread.ip), \ | |
84 | \ | |
85 | /* regparm parameters for __switch_to(): */ \ | |
86 | [prev] "a" (prev), \ | |
33f8c40a VN |
87 | [next] "d" (next) \ |
88 | \ | |
60a5317f TH |
89 | __switch_canary_iparam \ |
90 | \ | |
33f8c40a VN |
91 | : /* reloaded segment registers */ \ |
92 | "memory"); \ | |
0a3b4d15 GOC |
93 | } while (0) |
94 | ||
d46d7d75 GOC |
95 | /* |
96 | * disable hlt during certain critical i/o operations | |
97 | */ | |
98 | #define HAVE_DISABLE_HLT | |
96a388de | 99 | #else |
0a3b4d15 GOC |
100 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" |
101 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | |
102 | ||
103 | /* frame pointer must be last for get_wchan */ | |
104 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | |
105 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | |
106 | ||
107 | #define __EXTRA_CLOBBER \ | |
108 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | |
109 | "r12", "r13", "r14", "r15" | |
110 | ||
b4a8f7a2 TH |
111 | #ifdef CONFIG_CC_STACKPROTECTOR |
112 | #define __switch_canary \ | |
113 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ | |
67e68bde TH |
114 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" |
115 | #define __switch_canary_oparam \ | |
116 | , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | |
117 | #define __switch_canary_iparam \ | |
118 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | |
b4a8f7a2 TH |
119 | #else /* CC_STACKPROTECTOR */ |
120 | #define __switch_canary | |
67e68bde TH |
121 | #define __switch_canary_oparam |
122 | #define __switch_canary_iparam | |
b4a8f7a2 TH |
123 | #endif /* CC_STACKPROTECTOR */ |
124 | ||
0a3b4d15 GOC |
125 | /* Save restore flags to clear handle leaking NT */ |
126 | #define switch_to(prev, next, last) \ | |
b4a8f7a2 | 127 | asm volatile(SAVE_CONTEXT \ |
0a3b4d15 GOC |
128 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
129 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | |
130 | "call __switch_to\n\t" \ | |
87b26406 | 131 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
b4a8f7a2 | 132 | __switch_canary \ |
0a3b4d15 | 133 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
0a3b4d15 | 134 | "movq %%rax,%%rdi\n\t" \ |
7106a5ab BL |
135 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
136 | "jnz ret_from_fork\n\t" \ | |
0a3b4d15 GOC |
137 | RESTORE_CONTEXT \ |
138 | : "=a" (last) \ | |
67e68bde | 139 | __switch_canary_oparam \ |
0a3b4d15 GOC |
140 | : [next] "S" (next), [prev] "D" (prev), \ |
141 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | |
142 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | |
7106a5ab | 143 | [_tif_fork] "i" (_TIF_FORK), \ |
0a3b4d15 | 144 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
b4a8f7a2 | 145 | [current_task] "m" (per_cpu_var(current_task)) \ |
67e68bde | 146 | __switch_canary_iparam \ |
0a3b4d15 | 147 | : "memory", "cc" __EXTRA_CLOBBER) |
96a388de | 148 | #endif |
d8954222 GOC |
149 | |
150 | #ifdef __KERNEL__ | |
d8954222 | 151 | |
9f9d489a | 152 | extern void native_load_gs_index(unsigned); |
d3ca901f | 153 | |
a6b46552 GOC |
154 | /* |
155 | * Load a segment. Fall back on loading the zero | |
156 | * segment if something goes wrong.. | |
157 | */ | |
158 | #define loadsegment(seg, value) \ | |
159 | asm volatile("\n" \ | |
c5386c20 JP |
160 | "1:\t" \ |
161 | "movl %k0,%%" #seg "\n" \ | |
162 | "2:\n" \ | |
163 | ".section .fixup,\"ax\"\n" \ | |
164 | "3:\t" \ | |
165 | "movl %k1, %%" #seg "\n\t" \ | |
166 | "jmp 2b\n" \ | |
167 | ".previous\n" \ | |
168 | _ASM_EXTABLE(1b,3b) \ | |
d338c73c | 169 | : :"r" (value), "r" (0) : "memory") |
a6b46552 GOC |
170 | |
171 | ||
d8954222 GOC |
172 | /* |
173 | * Save a segment register away | |
174 | */ | |
c5386c20 | 175 | #define savesegment(seg, value) \ |
d9fc3fd3 | 176 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
d8954222 | 177 | |
d9a89a26 TH |
178 | /* |
179 | * x86_32 user gs accessors. | |
180 | */ | |
181 | #ifdef CONFIG_X86_32 | |
ccbeed3a | 182 | #ifdef CONFIG_X86_32_LAZY_GS |
d9a89a26 TH |
183 | #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) |
184 | #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) | |
185 | #define task_user_gs(tsk) ((tsk)->thread.gs) | |
ccbeed3a TH |
186 | #define lazy_save_gs(v) savesegment(gs, (v)) |
187 | #define lazy_load_gs(v) loadsegment(gs, (v)) | |
188 | #else /* X86_32_LAZY_GS */ | |
189 | #define get_user_gs(regs) (u16)((regs)->gs) | |
190 | #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) | |
191 | #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) | |
192 | #define lazy_save_gs(v) do { } while (0) | |
193 | #define lazy_load_gs(v) do { } while (0) | |
194 | #endif /* X86_32_LAZY_GS */ | |
195 | #endif /* X86_32 */ | |
d9a89a26 | 196 | |
d8954222 GOC |
197 | static inline unsigned long get_limit(unsigned long segment) |
198 | { | |
199 | unsigned long __limit; | |
c5386c20 JP |
200 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
201 | return __limit + 1; | |
d8954222 | 202 | } |
d3ca901f GOC |
203 | |
204 | static inline void native_clts(void) | |
205 | { | |
c5386c20 | 206 | asm volatile("clts"); |
d3ca901f GOC |
207 | } |
208 | ||
209 | /* | |
210 | * Volatile isn't enough to prevent the compiler from reordering the | |
211 | * read/write functions for the control registers and messing everything up. | |
212 | * A memory clobber would solve the problem, but would prevent reordering of | |
213 | * all loads stores around it, which can hurt performance. Solution is to | |
214 | * use a variable and mimic reads and writes to it to enforce serialization | |
215 | */ | |
216 | static unsigned long __force_order; | |
217 | ||
218 | static inline unsigned long native_read_cr0(void) | |
219 | { | |
220 | unsigned long val; | |
c5386c20 | 221 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
222 | return val; |
223 | } | |
224 | ||
225 | static inline void native_write_cr0(unsigned long val) | |
226 | { | |
c5386c20 | 227 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
228 | } |
229 | ||
230 | static inline unsigned long native_read_cr2(void) | |
231 | { | |
232 | unsigned long val; | |
c5386c20 | 233 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
234 | return val; |
235 | } | |
236 | ||
237 | static inline void native_write_cr2(unsigned long val) | |
238 | { | |
c5386c20 | 239 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
240 | } |
241 | ||
242 | static inline unsigned long native_read_cr3(void) | |
243 | { | |
244 | unsigned long val; | |
c5386c20 | 245 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
246 | return val; |
247 | } | |
248 | ||
249 | static inline void native_write_cr3(unsigned long val) | |
250 | { | |
c5386c20 | 251 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
252 | } |
253 | ||
254 | static inline unsigned long native_read_cr4(void) | |
255 | { | |
256 | unsigned long val; | |
c5386c20 | 257 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
d3ca901f GOC |
258 | return val; |
259 | } | |
260 | ||
261 | static inline unsigned long native_read_cr4_safe(void) | |
262 | { | |
263 | unsigned long val; | |
264 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always | |
265 | * exists, so it will never fail. */ | |
266 | #ifdef CONFIG_X86_32 | |
88976ee1 PA |
267 | asm volatile("1: mov %%cr4, %0\n" |
268 | "2:\n" | |
c5386c20 | 269 | _ASM_EXTABLE(1b, 2b) |
88976ee1 | 270 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
d3ca901f GOC |
271 | #else |
272 | val = native_read_cr4(); | |
273 | #endif | |
274 | return val; | |
275 | } | |
276 | ||
277 | static inline void native_write_cr4(unsigned long val) | |
278 | { | |
c5386c20 | 279 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
d3ca901f GOC |
280 | } |
281 | ||
94ea03cd GOC |
282 | #ifdef CONFIG_X86_64 |
283 | static inline unsigned long native_read_cr8(void) | |
284 | { | |
285 | unsigned long cr8; | |
286 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); | |
287 | return cr8; | |
288 | } | |
289 | ||
290 | static inline void native_write_cr8(unsigned long val) | |
291 | { | |
292 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | |
293 | } | |
294 | #endif | |
295 | ||
d3ca901f GOC |
296 | static inline void native_wbinvd(void) |
297 | { | |
298 | asm volatile("wbinvd": : :"memory"); | |
299 | } | |
c5386c20 | 300 | |
d3ca901f GOC |
301 | #ifdef CONFIG_PARAVIRT |
302 | #include <asm/paravirt.h> | |
303 | #else | |
304 | #define read_cr0() (native_read_cr0()) | |
305 | #define write_cr0(x) (native_write_cr0(x)) | |
306 | #define read_cr2() (native_read_cr2()) | |
307 | #define write_cr2(x) (native_write_cr2(x)) | |
308 | #define read_cr3() (native_read_cr3()) | |
309 | #define write_cr3(x) (native_write_cr3(x)) | |
310 | #define read_cr4() (native_read_cr4()) | |
311 | #define read_cr4_safe() (native_read_cr4_safe()) | |
312 | #define write_cr4(x) (native_write_cr4(x)) | |
313 | #define wbinvd() (native_wbinvd()) | |
d46d7d75 | 314 | #ifdef CONFIG_X86_64 |
94ea03cd GOC |
315 | #define read_cr8() (native_read_cr8()) |
316 | #define write_cr8(x) (native_write_cr8(x)) | |
9f9d489a | 317 | #define load_gs_index native_load_gs_index |
d46d7d75 GOC |
318 | #endif |
319 | ||
d3ca901f GOC |
320 | /* Clear the 'TS' bit */ |
321 | #define clts() (native_clts()) | |
322 | ||
323 | #endif/* CONFIG_PARAVIRT */ | |
324 | ||
4e09e21c | 325 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) |
d3ca901f | 326 | |
d8954222 GOC |
327 | #endif /* __KERNEL__ */ |
328 | ||
84fb144b | 329 | static inline void clflush(volatile void *__p) |
d8954222 | 330 | { |
84fb144b | 331 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
d8954222 GOC |
332 | } |
333 | ||
c5386c20 | 334 | #define nop() asm volatile ("nop") |
d8954222 GOC |
335 | |
336 | void disable_hlt(void); | |
337 | void enable_hlt(void); | |
338 | ||
d8954222 GOC |
339 | void cpu_idle_wait(void); |
340 | ||
341 | extern unsigned long arch_align_stack(unsigned long sp); | |
342 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | |
343 | ||
344 | void default_idle(void); | |
345 | ||
d3ec5cae IV |
346 | void stop_this_cpu(void *dummy); |
347 | ||
833d8469 GOC |
348 | /* |
349 | * Force strict CPU ordering. | |
350 | * And yes, this is required on UP too when we're talking | |
351 | * to devices. | |
352 | */ | |
353 | #ifdef CONFIG_X86_32 | |
354 | /* | |
0d7a1819 | 355 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
833d8469 GOC |
356 | * nop for these. |
357 | */ | |
358 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | |
359 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | |
360 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | |
361 | #else | |
362 | #define mb() asm volatile("mfence":::"memory") | |
363 | #define rmb() asm volatile("lfence":::"memory") | |
364 | #define wmb() asm volatile("sfence" ::: "memory") | |
365 | #endif | |
366 | ||
367 | /** | |
368 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
369 | * depend on. | |
370 | * | |
371 | * No data-dependent reads from memory-like regions are ever reordered | |
372 | * over this barrier. All reads preceding this primitive are guaranteed | |
373 | * to access memory (but not necessarily other CPUs' caches) before any | |
374 | * reads following this primitive that depend on the data return by | |
375 | * any of the preceding reads. This primitive is much lighter weight than | |
376 | * rmb() on most CPUs, and is never heavier weight than is | |
377 | * rmb(). | |
378 | * | |
379 | * These ordering constraints are respected by both the local CPU | |
380 | * and the compiler. | |
381 | * | |
382 | * Ordering is not guaranteed by anything other than these primitives, | |
383 | * not even by data dependencies. See the documentation for | |
384 | * memory_barrier() for examples and URLs to more information. | |
385 | * | |
386 | * For example, the following code would force ordering (the initial | |
387 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
388 | * | |
389 | * <programlisting> | |
390 | * CPU 0 CPU 1 | |
391 | * | |
392 | * b = 2; | |
393 | * memory_barrier(); | |
394 | * p = &b; q = p; | |
395 | * read_barrier_depends(); | |
396 | * d = *q; | |
397 | * </programlisting> | |
398 | * | |
399 | * because the read of "*q" depends on the read of "p" and these | |
400 | * two reads are separated by a read_barrier_depends(). However, | |
401 | * the following code, with the same initial values for "a" and "b": | |
402 | * | |
403 | * <programlisting> | |
404 | * CPU 0 CPU 1 | |
405 | * | |
406 | * a = 2; | |
407 | * memory_barrier(); | |
408 | * b = 3; y = b; | |
409 | * read_barrier_depends(); | |
410 | * x = a; | |
411 | * </programlisting> | |
412 | * | |
413 | * does not enforce ordering, since there is no data dependency between | |
414 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
415 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
416 | * in cases like this where there are no data dependencies. | |
417 | **/ | |
418 | ||
419 | #define read_barrier_depends() do { } while (0) | |
420 | ||
421 | #ifdef CONFIG_SMP | |
422 | #define smp_mb() mb() | |
423 | #ifdef CONFIG_X86_PPRO_FENCE | |
424 | # define smp_rmb() rmb() | |
425 | #else | |
426 | # define smp_rmb() barrier() | |
427 | #endif | |
428 | #ifdef CONFIG_X86_OOSTORE | |
429 | # define smp_wmb() wmb() | |
430 | #else | |
431 | # define smp_wmb() barrier() | |
432 | #endif | |
433 | #define smp_read_barrier_depends() read_barrier_depends() | |
c5386c20 | 434 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
833d8469 GOC |
435 | #else |
436 | #define smp_mb() barrier() | |
437 | #define smp_rmb() barrier() | |
438 | #define smp_wmb() barrier() | |
439 | #define smp_read_barrier_depends() do { } while (0) | |
440 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | |
441 | #endif | |
442 | ||
fde1b3fa AK |
443 | /* |
444 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | |
445 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | |
446 | * code region. | |
447 | * | |
448 | * (Could use an alternative three way for this if there was one.) | |
449 | */ | |
450 | static inline void rdtsc_barrier(void) | |
451 | { | |
452 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | |
453 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | |
454 | } | |
833d8469 | 455 | |
1965aae3 | 456 | #endif /* _ASM_X86_SYSTEM_H */ |