]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SYSTEM_H |
2 | #define __ASM_SYSTEM_H | |
3 | ||
4 | #include <linux/config.h> | |
5 | #include <linux/kernel.h> | |
6 | #include <asm/segment.h> | |
7 | #include <asm/cpufeature.h> | |
8 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | |
9 | ||
10 | #ifdef __KERNEL__ | |
11 | ||
12 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | |
13 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | |
14 | ||
15 | #define switch_to(prev,next,last) do { \ | |
16 | unsigned long esi,edi; \ | |
a5201129 | 17 | asm volatile("pushl %%ebp\n\t" \ |
1da177e4 LT |
18 | "movl %%esp,%0\n\t" /* save ESP */ \ |
19 | "movl %5,%%esp\n\t" /* restore ESP */ \ | |
20 | "movl $1f,%1\n\t" /* save EIP */ \ | |
21 | "pushl %6\n\t" /* restore EIP */ \ | |
22 | "jmp __switch_to\n" \ | |
23 | "1:\t" \ | |
24 | "popl %%ebp\n\t" \ | |
1da177e4 LT |
25 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
26 | "=a" (last),"=S" (esi),"=D" (edi) \ | |
27 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | |
28 | "2" (prev), "d" (next)); \ | |
29 | } while (0) | |
30 | ||
31 | #define _set_base(addr,base) do { unsigned long __pr; \ | |
32 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
33 | "rorl $16,%%edx\n\t" \ | |
34 | "movb %%dl,%2\n\t" \ | |
35 | "movb %%dh,%3" \ | |
36 | :"=&d" (__pr) \ | |
37 | :"m" (*((addr)+2)), \ | |
38 | "m" (*((addr)+4)), \ | |
39 | "m" (*((addr)+7)), \ | |
40 | "0" (base) \ | |
41 | ); } while(0) | |
42 | ||
43 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | |
44 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
45 | "rorl $16,%%edx\n\t" \ | |
46 | "movb %2,%%dh\n\t" \ | |
47 | "andb $0xf0,%%dh\n\t" \ | |
48 | "orb %%dh,%%dl\n\t" \ | |
49 | "movb %%dl,%2" \ | |
50 | :"=&d" (__lr) \ | |
51 | :"m" (*(addr)), \ | |
52 | "m" (*((addr)+6)), \ | |
53 | "0" (limit) \ | |
54 | ); } while(0) | |
55 | ||
56 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | |
57 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) | |
58 | ||
59 | static inline unsigned long _get_base(char * addr) | |
60 | { | |
61 | unsigned long __base; | |
62 | __asm__("movb %3,%%dh\n\t" | |
63 | "movb %2,%%dl\n\t" | |
64 | "shll $16,%%edx\n\t" | |
65 | "movw %1,%%dx" | |
66 | :"=&d" (__base) | |
67 | :"m" (*((addr)+2)), | |
68 | "m" (*((addr)+4)), | |
69 | "m" (*((addr)+7))); | |
70 | return __base; | |
71 | } | |
72 | ||
73 | #define get_base(ldt) _get_base( ((char *)&(ldt)) ) | |
74 | ||
75 | /* | |
76 | * Load a segment. Fall back on loading the zero | |
77 | * segment if something goes wrong.. | |
78 | */ | |
79 | #define loadsegment(seg,value) \ | |
80 | asm volatile("\n" \ | |
81 | "1:\t" \ | |
fd51f666 | 82 | "mov %0,%%" #seg "\n" \ |
1da177e4 LT |
83 | "2:\n" \ |
84 | ".section .fixup,\"ax\"\n" \ | |
85 | "3:\t" \ | |
86 | "pushl $0\n\t" \ | |
87 | "popl %%" #seg "\n\t" \ | |
88 | "jmp 2b\n" \ | |
89 | ".previous\n" \ | |
90 | ".section __ex_table,\"a\"\n\t" \ | |
91 | ".align 4\n\t" \ | |
92 | ".long 1b,3b\n" \ | |
93 | ".previous" \ | |
4d37e7e3 | 94 | : :"rm" (value)) |
1da177e4 LT |
95 | |
96 | /* | |
97 | * Save a segment register away | |
98 | */ | |
99 | #define savesegment(seg, value) \ | |
4d37e7e3 | 100 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
1da177e4 LT |
101 | |
102 | /* | |
103 | * Clear and set 'TS' bit respectively | |
104 | */ | |
105 | #define clts() __asm__ __volatile__ ("clts") | |
106 | #define read_cr0() ({ \ | |
107 | unsigned int __dummy; \ | |
4bb0d3ec | 108 | __asm__ __volatile__( \ |
1da177e4 LT |
109 | "movl %%cr0,%0\n\t" \ |
110 | :"=r" (__dummy)); \ | |
111 | __dummy; \ | |
112 | }) | |
113 | #define write_cr0(x) \ | |
4bb0d3ec ZA |
114 | __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); |
115 | ||
116 | #define read_cr2() ({ \ | |
117 | unsigned int __dummy; \ | |
118 | __asm__ __volatile__( \ | |
119 | "movl %%cr2,%0\n\t" \ | |
120 | :"=r" (__dummy)); \ | |
121 | __dummy; \ | |
122 | }) | |
123 | #define write_cr2(x) \ | |
124 | __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); | |
125 | ||
126 | #define read_cr3() ({ \ | |
127 | unsigned int __dummy; \ | |
128 | __asm__ ( \ | |
129 | "movl %%cr3,%0\n\t" \ | |
130 | :"=r" (__dummy)); \ | |
131 | __dummy; \ | |
132 | }) | |
133 | #define write_cr3(x) \ | |
134 | __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); | |
1da177e4 LT |
135 | |
136 | #define read_cr4() ({ \ | |
137 | unsigned int __dummy; \ | |
138 | __asm__( \ | |
139 | "movl %%cr4,%0\n\t" \ | |
140 | :"=r" (__dummy)); \ | |
141 | __dummy; \ | |
142 | }) | |
143 | #define write_cr4(x) \ | |
4bb0d3ec | 144 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); |
1da177e4 LT |
145 | #define stts() write_cr0(8 | read_cr0()) |
146 | ||
147 | #endif /* __KERNEL__ */ | |
148 | ||
149 | #define wbinvd() \ | |
150 | __asm__ __volatile__ ("wbinvd": : :"memory"); | |
151 | ||
152 | static inline unsigned long get_limit(unsigned long segment) | |
153 | { | |
154 | unsigned long __limit; | |
155 | __asm__("lsll %1,%0" | |
156 | :"=r" (__limit):"r" (segment)); | |
157 | return __limit+1; | |
158 | } | |
159 | ||
160 | #define nop() __asm__ __volatile__ ("nop") | |
161 | ||
162 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | |
163 | ||
164 | #define tas(ptr) (xchg((ptr),1)) | |
165 | ||
166 | struct __xchg_dummy { unsigned long a[100]; }; | |
167 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
168 | ||
169 | ||
170 | /* | |
171 | * The semantics of XCHGCMP8B are a bit strange, this is why | |
172 | * there is a loop and the loading of %%eax and %%edx has to | |
173 | * be inside. This inlines well in most cases, the cached | |
174 | * cost is around ~38 cycles. (in the future we might want | |
175 | * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that | |
176 | * might have an implicit FPU-save as a cost, so it's not | |
177 | * clear which path to go.) | |
178 | * | |
179 | * cmpxchg8b must be used with the lock prefix here to allow | |
180 | * the instruction to be executed atomically, see page 3-102 | |
181 | * of the instruction set reference 24319102.pdf. We need | |
182 | * the reader side to see the coherent 64bit value. | |
183 | */ | |
184 | static inline void __set_64bit (unsigned long long * ptr, | |
185 | unsigned int low, unsigned int high) | |
186 | { | |
187 | __asm__ __volatile__ ( | |
188 | "\n1:\t" | |
189 | "movl (%0), %%eax\n\t" | |
190 | "movl 4(%0), %%edx\n\t" | |
191 | "lock cmpxchg8b (%0)\n\t" | |
192 | "jnz 1b" | |
193 | : /* no outputs */ | |
194 | : "D"(ptr), | |
195 | "b"(low), | |
196 | "c"(high) | |
197 | : "ax","dx","memory"); | |
198 | } | |
199 | ||
200 | static inline void __set_64bit_constant (unsigned long long *ptr, | |
201 | unsigned long long value) | |
202 | { | |
203 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | |
204 | } | |
205 | #define ll_low(x) *(((unsigned int*)&(x))+0) | |
206 | #define ll_high(x) *(((unsigned int*)&(x))+1) | |
207 | ||
208 | static inline void __set_64bit_var (unsigned long long *ptr, | |
209 | unsigned long long value) | |
210 | { | |
211 | __set_64bit(ptr,ll_low(value), ll_high(value)); | |
212 | } | |
213 | ||
214 | #define set_64bit(ptr,value) \ | |
215 | (__builtin_constant_p(value) ? \ | |
216 | __set_64bit_constant(ptr, value) : \ | |
217 | __set_64bit_var(ptr, value) ) | |
218 | ||
219 | #define _set_64bit(ptr,value) \ | |
220 | (__builtin_constant_p(value) ? \ | |
221 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | |
222 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | |
223 | ||
224 | /* | |
225 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
226 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
227 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
228 | */ | |
229 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
230 | { | |
231 | switch (size) { | |
232 | case 1: | |
233 | __asm__ __volatile__("xchgb %b0,%1" | |
234 | :"=q" (x) | |
235 | :"m" (*__xg(ptr)), "0" (x) | |
236 | :"memory"); | |
237 | break; | |
238 | case 2: | |
239 | __asm__ __volatile__("xchgw %w0,%1" | |
240 | :"=r" (x) | |
241 | :"m" (*__xg(ptr)), "0" (x) | |
242 | :"memory"); | |
243 | break; | |
244 | case 4: | |
245 | __asm__ __volatile__("xchgl %0,%1" | |
246 | :"=r" (x) | |
247 | :"m" (*__xg(ptr)), "0" (x) | |
248 | :"memory"); | |
249 | break; | |
250 | } | |
251 | return x; | |
252 | } | |
253 | ||
254 | /* | |
255 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
256 | * store NEW in MEM. Return the initial value in MEM. Success is | |
257 | * indicated by comparing RETURN with OLD. | |
258 | */ | |
259 | ||
260 | #ifdef CONFIG_X86_CMPXCHG | |
261 | #define __HAVE_ARCH_CMPXCHG 1 | |
262 | #endif | |
263 | ||
264 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
265 | unsigned long new, int size) | |
266 | { | |
267 | unsigned long prev; | |
268 | switch (size) { | |
269 | case 1: | |
270 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | |
271 | : "=a"(prev) | |
272 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | |
273 | : "memory"); | |
274 | return prev; | |
275 | case 2: | |
276 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | |
277 | : "=a"(prev) | |
278 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | |
279 | : "memory"); | |
280 | return prev; | |
281 | case 4: | |
282 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | |
283 | : "=a"(prev) | |
284 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | |
285 | : "memory"); | |
286 | return prev; | |
287 | } | |
288 | return old; | |
289 | } | |
290 | ||
291 | #define cmpxchg(ptr,o,n)\ | |
292 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | |
293 | (unsigned long)(n),sizeof(*(ptr)))) | |
294 | ||
295 | #ifdef __KERNEL__ | |
296 | struct alt_instr { | |
297 | __u8 *instr; /* original instruction */ | |
298 | __u8 *replacement; | |
299 | __u8 cpuid; /* cpuid bit set for replacement */ | |
300 | __u8 instrlen; /* length of original instruction */ | |
301 | __u8 replacementlen; /* length of new instruction, <= instrlen */ | |
302 | __u8 pad; | |
303 | }; | |
304 | #endif | |
305 | ||
306 | /* | |
307 | * Alternative instructions for different CPU types or capabilities. | |
308 | * | |
309 | * This allows to use optimized instructions even on generic binary | |
310 | * kernels. | |
311 | * | |
312 | * length of oldinstr must be longer or equal the length of newinstr | |
313 | * It can be padded with nops as needed. | |
314 | * | |
315 | * For non barrier like inlines please define new variants | |
316 | * without volatile and memory clobber. | |
317 | */ | |
318 | #define alternative(oldinstr, newinstr, feature) \ | |
319 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | |
320 | ".section .altinstructions,\"a\"\n" \ | |
321 | " .align 4\n" \ | |
322 | " .long 661b\n" /* label */ \ | |
323 | " .long 663f\n" /* new instruction */ \ | |
324 | " .byte %c0\n" /* feature bit */ \ | |
325 | " .byte 662b-661b\n" /* sourcelen */ \ | |
326 | " .byte 664f-663f\n" /* replacementlen */ \ | |
327 | ".previous\n" \ | |
328 | ".section .altinstr_replacement,\"ax\"\n" \ | |
329 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | |
330 | ".previous" :: "i" (feature) : "memory") | |
331 | ||
332 | /* | |
333 | * Alternative inline assembly with input. | |
334 | * | |
335 | * Pecularities: | |
336 | * No memory clobber here. | |
337 | * Argument numbers start with 1. | |
338 | * Best is to use constraints that are fixed size (like (%1) ... "r") | |
339 | * If you use variable sized constraints like "m" or "g" in the | |
340 | * replacement maake sure to pad to the worst case length. | |
341 | */ | |
342 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | |
343 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | |
344 | ".section .altinstructions,\"a\"\n" \ | |
345 | " .align 4\n" \ | |
346 | " .long 661b\n" /* label */ \ | |
347 | " .long 663f\n" /* new instruction */ \ | |
348 | " .byte %c0\n" /* feature bit */ \ | |
349 | " .byte 662b-661b\n" /* sourcelen */ \ | |
350 | " .byte 664f-663f\n" /* replacementlen */ \ | |
351 | ".previous\n" \ | |
352 | ".section .altinstr_replacement,\"ax\"\n" \ | |
353 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | |
354 | ".previous" :: "i" (feature), ##input) | |
355 | ||
356 | /* | |
357 | * Force strict CPU ordering. | |
358 | * And yes, this is required on UP too when we're talking | |
359 | * to devices. | |
360 | * | |
361 | * For now, "wmb()" doesn't actually do anything, as all | |
362 | * Intel CPU's follow what Intel calls a *Processor Order*, | |
363 | * in which all writes are seen in the program order even | |
364 | * outside the CPU. | |
365 | * | |
366 | * I expect future Intel CPU's to have a weaker ordering, | |
367 | * but I'd also expect them to finally get their act together | |
368 | * and add some real memory barriers if so. | |
369 | * | |
370 | * Some non intel clones support out of order store. wmb() ceases to be a | |
371 | * nop for these. | |
372 | */ | |
373 | ||
374 | ||
375 | /* | |
376 | * Actually only lfence would be needed for mb() because all stores done | |
377 | * by the kernel should be already ordered. But keep a full barrier for now. | |
378 | */ | |
379 | ||
380 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | |
381 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | |
382 | ||
383 | /** | |
384 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
385 | * depend on. | |
386 | * | |
387 | * No data-dependent reads from memory-like regions are ever reordered | |
388 | * over this barrier. All reads preceding this primitive are guaranteed | |
389 | * to access memory (but not necessarily other CPUs' caches) before any | |
390 | * reads following this primitive that depend on the data return by | |
391 | * any of the preceding reads. This primitive is much lighter weight than | |
392 | * rmb() on most CPUs, and is never heavier weight than is | |
393 | * rmb(). | |
394 | * | |
395 | * These ordering constraints are respected by both the local CPU | |
396 | * and the compiler. | |
397 | * | |
398 | * Ordering is not guaranteed by anything other than these primitives, | |
399 | * not even by data dependencies. See the documentation for | |
400 | * memory_barrier() for examples and URLs to more information. | |
401 | * | |
402 | * For example, the following code would force ordering (the initial | |
403 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
404 | * | |
405 | * <programlisting> | |
406 | * CPU 0 CPU 1 | |
407 | * | |
408 | * b = 2; | |
409 | * memory_barrier(); | |
410 | * p = &b; q = p; | |
411 | * read_barrier_depends(); | |
412 | * d = *q; | |
413 | * </programlisting> | |
414 | * | |
415 | * because the read of "*q" depends on the read of "p" and these | |
416 | * two reads are separated by a read_barrier_depends(). However, | |
417 | * the following code, with the same initial values for "a" and "b": | |
418 | * | |
419 | * <programlisting> | |
420 | * CPU 0 CPU 1 | |
421 | * | |
422 | * a = 2; | |
423 | * memory_barrier(); | |
424 | * b = 3; y = b; | |
425 | * read_barrier_depends(); | |
426 | * x = a; | |
427 | * </programlisting> | |
428 | * | |
429 | * does not enforce ordering, since there is no data dependency between | |
430 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
431 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
432 | * in cases like thiswhere there are no data dependencies. | |
433 | **/ | |
434 | ||
435 | #define read_barrier_depends() do { } while(0) | |
436 | ||
437 | #ifdef CONFIG_X86_OOSTORE | |
438 | /* Actually there are no OOO store capable CPUs for now that do SSE, | |
439 | but make it already an possibility. */ | |
440 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | |
441 | #else | |
442 | #define wmb() __asm__ __volatile__ ("": : :"memory") | |
443 | #endif | |
444 | ||
445 | #ifdef CONFIG_SMP | |
446 | #define smp_mb() mb() | |
447 | #define smp_rmb() rmb() | |
448 | #define smp_wmb() wmb() | |
449 | #define smp_read_barrier_depends() read_barrier_depends() | |
450 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | |
451 | #else | |
452 | #define smp_mb() barrier() | |
453 | #define smp_rmb() barrier() | |
454 | #define smp_wmb() barrier() | |
455 | #define smp_read_barrier_depends() do { } while(0) | |
456 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | |
457 | #endif | |
458 | ||
459 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
460 | ||
461 | /* interrupt control.. */ | |
462 | #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) | |
463 | #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) | |
464 | #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") | |
465 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") | |
466 | /* used in the idle loop; sti takes one instruction cycle to complete */ | |
467 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | |
4bb0d3ec ZA |
468 | /* used when interrupts are already enabled or to shutdown the processor */ |
469 | #define halt() __asm__ __volatile__("hlt": : :"memory") | |
1da177e4 LT |
470 | |
471 | #define irqs_disabled() \ | |
472 | ({ \ | |
473 | unsigned long flags; \ | |
474 | local_save_flags(flags); \ | |
475 | !(flags & (1<<9)); \ | |
476 | }) | |
477 | ||
478 | /* For spinlocks etc */ | |
479 | #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") | |
480 | ||
481 | /* | |
482 | * disable hlt during certain critical i/o operations | |
483 | */ | |
484 | #define HAVE_DISABLE_HLT | |
485 | void disable_hlt(void); | |
486 | void enable_hlt(void); | |
487 | ||
488 | extern int es7000_plat; | |
489 | void cpu_idle_wait(void); | |
490 | ||
491 | extern unsigned long arch_align_stack(unsigned long sp); | |
492 | ||
493 | #endif |