]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SYSTEM_H |
2 | #define __ASM_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/kernel.h> |
5 | #include <asm/segment.h> | |
6 | #include <asm/cpufeature.h> | |
7 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | |
8 | ||
9 | #ifdef __KERNEL__ | |
10 | ||
11 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | |
12 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | |
13 | ||
47a5c6fa LT |
14 | /* |
15 | * Saving eflags is important. It switches not only IOPL between tasks, | |
16 | * it also protects other tasks from NT leaking through sysenter etc. | |
17 | */ | |
1da177e4 LT |
18 | #define switch_to(prev,next,last) do { \ |
19 | unsigned long esi,edi; \ | |
47a5c6fa LT |
20 | asm volatile("pushfl\n\t" /* Save flags */ \ |
21 | "pushl %%ebp\n\t" \ | |
1da177e4 LT |
22 | "movl %%esp,%0\n\t" /* save ESP */ \ |
23 | "movl %5,%%esp\n\t" /* restore ESP */ \ | |
24 | "movl $1f,%1\n\t" /* save EIP */ \ | |
25 | "pushl %6\n\t" /* restore EIP */ \ | |
26 | "jmp __switch_to\n" \ | |
27 | "1:\t" \ | |
28 | "popl %%ebp\n\t" \ | |
47a5c6fa | 29 | "popfl" \ |
1da177e4 LT |
30 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
31 | "=a" (last),"=S" (esi),"=D" (edi) \ | |
32 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | |
33 | "2" (prev), "d" (next)); \ | |
34 | } while (0) | |
35 | ||
36 | #define _set_base(addr,base) do { unsigned long __pr; \ | |
37 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
38 | "rorl $16,%%edx\n\t" \ | |
39 | "movb %%dl,%2\n\t" \ | |
40 | "movb %%dh,%3" \ | |
41 | :"=&d" (__pr) \ | |
42 | :"m" (*((addr)+2)), \ | |
43 | "m" (*((addr)+4)), \ | |
44 | "m" (*((addr)+7)), \ | |
45 | "0" (base) \ | |
46 | ); } while(0) | |
47 | ||
48 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | |
49 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |
50 | "rorl $16,%%edx\n\t" \ | |
51 | "movb %2,%%dh\n\t" \ | |
52 | "andb $0xf0,%%dh\n\t" \ | |
53 | "orb %%dh,%%dl\n\t" \ | |
54 | "movb %%dl,%2" \ | |
55 | :"=&d" (__lr) \ | |
56 | :"m" (*(addr)), \ | |
57 | "m" (*((addr)+6)), \ | |
58 | "0" (limit) \ | |
59 | ); } while(0) | |
60 | ||
61 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | |
5fe9fe3c | 62 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) |
1da177e4 | 63 | |
1da177e4 LT |
64 | /* |
65 | * Load a segment. Fall back on loading the zero | |
66 | * segment if something goes wrong.. | |
67 | */ | |
68 | #define loadsegment(seg,value) \ | |
69 | asm volatile("\n" \ | |
70 | "1:\t" \ | |
fd51f666 | 71 | "mov %0,%%" #seg "\n" \ |
1da177e4 LT |
72 | "2:\n" \ |
73 | ".section .fixup,\"ax\"\n" \ | |
74 | "3:\t" \ | |
75 | "pushl $0\n\t" \ | |
76 | "popl %%" #seg "\n\t" \ | |
77 | "jmp 2b\n" \ | |
78 | ".previous\n" \ | |
79 | ".section __ex_table,\"a\"\n\t" \ | |
80 | ".align 4\n\t" \ | |
81 | ".long 1b,3b\n" \ | |
82 | ".previous" \ | |
4d37e7e3 | 83 | : :"rm" (value)) |
1da177e4 LT |
84 | |
85 | /* | |
86 | * Save a segment register away | |
87 | */ | |
88 | #define savesegment(seg, value) \ | |
4d37e7e3 | 89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
1da177e4 | 90 | |
d3561b7f RR |
91 | #ifdef CONFIG_PARAVIRT |
92 | #include <asm/paravirt.h> | |
93 | #else | |
1da177e4 LT |
94 | #define read_cr0() ({ \ |
95 | unsigned int __dummy; \ | |
4bb0d3ec | 96 | __asm__ __volatile__( \ |
1da177e4 LT |
97 | "movl %%cr0,%0\n\t" \ |
98 | :"=r" (__dummy)); \ | |
99 | __dummy; \ | |
100 | }) | |
101 | #define write_cr0(x) \ | |
b43c7cec | 102 | __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) |
4bb0d3ec ZA |
103 | |
104 | #define read_cr2() ({ \ | |
105 | unsigned int __dummy; \ | |
106 | __asm__ __volatile__( \ | |
107 | "movl %%cr2,%0\n\t" \ | |
108 | :"=r" (__dummy)); \ | |
109 | __dummy; \ | |
110 | }) | |
111 | #define write_cr2(x) \ | |
b43c7cec | 112 | __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) |
4bb0d3ec ZA |
113 | |
114 | #define read_cr3() ({ \ | |
115 | unsigned int __dummy; \ | |
116 | __asm__ ( \ | |
117 | "movl %%cr3,%0\n\t" \ | |
118 | :"=r" (__dummy)); \ | |
119 | __dummy; \ | |
120 | }) | |
121 | #define write_cr3(x) \ | |
b43c7cec | 122 | __asm__ __volatile__("movl %0,%%cr3": :"r" (x)) |
1da177e4 LT |
123 | |
124 | #define read_cr4() ({ \ | |
125 | unsigned int __dummy; \ | |
126 | __asm__( \ | |
127 | "movl %%cr4,%0\n\t" \ | |
128 | :"=r" (__dummy)); \ | |
129 | __dummy; \ | |
130 | }) | |
ff6e8c0d ZA |
131 | #define read_cr4_safe() ({ \ |
132 | unsigned int __dummy; \ | |
133 | /* This could fault if %cr4 does not exist */ \ | |
134 | __asm__("1: movl %%cr4, %0 \n" \ | |
135 | "2: \n" \ | |
136 | ".section __ex_table,\"a\" \n" \ | |
137 | ".long 1b,2b \n" \ | |
138 | ".previous \n" \ | |
139 | : "=r" (__dummy): "0" (0)); \ | |
140 | __dummy; \ | |
141 | }) | |
1da177e4 | 142 | #define write_cr4(x) \ |
b43c7cec CE |
143 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) |
144 | ||
d3561b7f RR |
145 | #define wbinvd() \ |
146 | __asm__ __volatile__ ("wbinvd": : :"memory") | |
147 | ||
148 | /* Clear the 'TS' bit */ | |
b43c7cec | 149 | #define clts() __asm__ __volatile__ ("clts") |
d3561b7f RR |
150 | #endif/* CONFIG_PARAVIRT */ |
151 | ||
152 | /* Set the 'TS' bit */ | |
1da177e4 LT |
153 | #define stts() write_cr0(8 | read_cr0()) |
154 | ||
155 | #endif /* __KERNEL__ */ | |
156 | ||
1da177e4 LT |
157 | static inline unsigned long get_limit(unsigned long segment) |
158 | { | |
159 | unsigned long __limit; | |
160 | __asm__("lsll %1,%0" | |
161 | :"=r" (__limit):"r" (segment)); | |
162 | return __limit+1; | |
163 | } | |
164 | ||
165 | #define nop() __asm__ __volatile__ ("nop") | |
166 | ||
167 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | |
168 | ||
169 | #define tas(ptr) (xchg((ptr),1)) | |
170 | ||
171 | struct __xchg_dummy { unsigned long a[100]; }; | |
172 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
173 | ||
174 | ||
8896fab3 JB |
175 | #ifdef CONFIG_X86_CMPXCHG64 |
176 | ||
1da177e4 LT |
177 | /* |
178 | * The semantics of XCHGCMP8B are a bit strange, this is why | |
179 | * there is a loop and the loading of %%eax and %%edx has to | |
180 | * be inside. This inlines well in most cases, the cached | |
181 | * cost is around ~38 cycles. (in the future we might want | |
182 | * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that | |
183 | * might have an implicit FPU-save as a cost, so it's not | |
184 | * clear which path to go.) | |
185 | * | |
186 | * cmpxchg8b must be used with the lock prefix here to allow | |
187 | * the instruction to be executed atomically, see page 3-102 | |
188 | * of the instruction set reference 24319102.pdf. We need | |
189 | * the reader side to see the coherent 64bit value. | |
190 | */ | |
191 | static inline void __set_64bit (unsigned long long * ptr, | |
192 | unsigned int low, unsigned int high) | |
193 | { | |
194 | __asm__ __volatile__ ( | |
195 | "\n1:\t" | |
196 | "movl (%0), %%eax\n\t" | |
197 | "movl 4(%0), %%edx\n\t" | |
198 | "lock cmpxchg8b (%0)\n\t" | |
199 | "jnz 1b" | |
200 | : /* no outputs */ | |
201 | : "D"(ptr), | |
202 | "b"(low), | |
203 | "c"(high) | |
204 | : "ax","dx","memory"); | |
205 | } | |
206 | ||
207 | static inline void __set_64bit_constant (unsigned long long *ptr, | |
208 | unsigned long long value) | |
209 | { | |
210 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | |
211 | } | |
212 | #define ll_low(x) *(((unsigned int*)&(x))+0) | |
213 | #define ll_high(x) *(((unsigned int*)&(x))+1) | |
214 | ||
215 | static inline void __set_64bit_var (unsigned long long *ptr, | |
216 | unsigned long long value) | |
217 | { | |
218 | __set_64bit(ptr,ll_low(value), ll_high(value)); | |
219 | } | |
220 | ||
221 | #define set_64bit(ptr,value) \ | |
222 | (__builtin_constant_p(value) ? \ | |
223 | __set_64bit_constant(ptr, value) : \ | |
224 | __set_64bit_var(ptr, value) ) | |
225 | ||
226 | #define _set_64bit(ptr,value) \ | |
227 | (__builtin_constant_p(value) ? \ | |
228 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | |
229 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | |
230 | ||
8896fab3 JB |
231 | #endif |
232 | ||
1da177e4 LT |
233 | /* |
234 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
235 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
236 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
237 | */ | |
238 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
239 | { | |
240 | switch (size) { | |
241 | case 1: | |
242 | __asm__ __volatile__("xchgb %b0,%1" | |
243 | :"=q" (x) | |
244 | :"m" (*__xg(ptr)), "0" (x) | |
245 | :"memory"); | |
246 | break; | |
247 | case 2: | |
248 | __asm__ __volatile__("xchgw %w0,%1" | |
249 | :"=r" (x) | |
250 | :"m" (*__xg(ptr)), "0" (x) | |
251 | :"memory"); | |
252 | break; | |
253 | case 4: | |
254 | __asm__ __volatile__("xchgl %0,%1" | |
255 | :"=r" (x) | |
256 | :"m" (*__xg(ptr)), "0" (x) | |
257 | :"memory"); | |
258 | break; | |
259 | } | |
260 | return x; | |
261 | } | |
262 | ||
263 | /* | |
264 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
265 | * store NEW in MEM. Return the initial value in MEM. Success is | |
266 | * indicated by comparing RETURN with OLD. | |
267 | */ | |
268 | ||
269 | #ifdef CONFIG_X86_CMPXCHG | |
270 | #define __HAVE_ARCH_CMPXCHG 1 | |
53e86b91 NP |
271 | #define cmpxchg(ptr,o,n)\ |
272 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | |
273 | (unsigned long)(n),sizeof(*(ptr)))) | |
027a8c7e CW |
274 | #define sync_cmpxchg(ptr,o,n)\ |
275 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ | |
276 | (unsigned long)(n),sizeof(*(ptr)))) | |
53e86b91 | 277 | #endif |
1da177e4 LT |
278 | |
279 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
280 | unsigned long new, int size) | |
281 | { | |
282 | unsigned long prev; | |
283 | switch (size) { | |
284 | case 1: | |
285 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | |
286 | : "=a"(prev) | |
287 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | |
288 | : "memory"); | |
289 | return prev; | |
290 | case 2: | |
291 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | |
292 | : "=a"(prev) | |
8896fab3 | 293 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
1da177e4 LT |
294 | : "memory"); |
295 | return prev; | |
296 | case 4: | |
297 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | |
298 | : "=a"(prev) | |
8896fab3 | 299 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
1da177e4 LT |
300 | : "memory"); |
301 | return prev; | |
302 | } | |
303 | return old; | |
304 | } | |
305 | ||
027a8c7e CW |
306 | /* |
307 | * Always use locked operations when touching memory shared with a | |
308 | * hypervisor, since the system may be SMP even if the guest kernel | |
309 | * isn't. | |
310 | */ | |
311 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | |
312 | unsigned long old, | |
313 | unsigned long new, int size) | |
314 | { | |
315 | unsigned long prev; | |
316 | switch (size) { | |
317 | case 1: | |
318 | __asm__ __volatile__("lock; cmpxchgb %b1,%2" | |
319 | : "=a"(prev) | |
320 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | |
321 | : "memory"); | |
322 | return prev; | |
323 | case 2: | |
324 | __asm__ __volatile__("lock; cmpxchgw %w1,%2" | |
325 | : "=a"(prev) | |
326 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | |
327 | : "memory"); | |
328 | return prev; | |
329 | case 4: | |
330 | __asm__ __volatile__("lock; cmpxchgl %1,%2" | |
331 | : "=a"(prev) | |
332 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | |
333 | : "memory"); | |
334 | return prev; | |
335 | } | |
336 | return old; | |
337 | } | |
338 | ||
53e86b91 NP |
339 | #ifndef CONFIG_X86_CMPXCHG |
340 | /* | |
341 | * Building a kernel capable running on 80386. It may be necessary to | |
342 | * simulate the cmpxchg on the 80386 CPU. For that purpose we define | |
343 | * a function for each of the sizes we support. | |
344 | */ | |
8896fab3 | 345 | |
53e86b91 NP |
346 | extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); |
347 | extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); | |
348 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); | |
349 | ||
350 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |
351 | unsigned long new, int size) | |
352 | { | |
353 | switch (size) { | |
354 | case 1: | |
355 | return cmpxchg_386_u8(ptr, old, new); | |
356 | case 2: | |
357 | return cmpxchg_386_u16(ptr, old, new); | |
358 | case 4: | |
359 | return cmpxchg_386_u32(ptr, old, new); | |
360 | } | |
361 | return old; | |
362 | } | |
363 | ||
364 | #define cmpxchg(ptr,o,n) \ | |
365 | ({ \ | |
366 | __typeof__(*(ptr)) __ret; \ | |
367 | if (likely(boot_cpu_data.x86 > 3)) \ | |
368 | __ret = __cmpxchg((ptr), (unsigned long)(o), \ | |
369 | (unsigned long)(n), sizeof(*(ptr))); \ | |
370 | else \ | |
371 | __ret = cmpxchg_386((ptr), (unsigned long)(o), \ | |
372 | (unsigned long)(n), sizeof(*(ptr))); \ | |
373 | __ret; \ | |
374 | }) | |
8896fab3 JB |
375 | #endif |
376 | ||
377 | #ifdef CONFIG_X86_CMPXCHG64 | |
378 | ||
379 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | |
380 | unsigned long long new) | |
381 | { | |
382 | unsigned long long prev; | |
383 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | |
384 | : "=A"(prev) | |
385 | : "b"((unsigned long)new), | |
386 | "c"((unsigned long)(new >> 32)), | |
387 | "m"(*__xg(ptr)), | |
388 | "0"(old) | |
389 | : "memory"); | |
390 | return prev; | |
391 | } | |
392 | ||
393 | #define cmpxchg64(ptr,o,n)\ | |
394 | ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ | |
395 | (unsigned long long)(n))) | |
396 | ||
397 | #endif | |
1da177e4 | 398 | |
1da177e4 LT |
399 | /* |
400 | * Force strict CPU ordering. | |
401 | * And yes, this is required on UP too when we're talking | |
402 | * to devices. | |
403 | * | |
404 | * For now, "wmb()" doesn't actually do anything, as all | |
405 | * Intel CPU's follow what Intel calls a *Processor Order*, | |
406 | * in which all writes are seen in the program order even | |
407 | * outside the CPU. | |
408 | * | |
409 | * I expect future Intel CPU's to have a weaker ordering, | |
410 | * but I'd also expect them to finally get their act together | |
411 | * and add some real memory barriers if so. | |
412 | * | |
413 | * Some non intel clones support out of order store. wmb() ceases to be a | |
414 | * nop for these. | |
415 | */ | |
416 | ||
417 | ||
418 | /* | |
419 | * Actually only lfence would be needed for mb() because all stores done | |
420 | * by the kernel should be already ordered. But keep a full barrier for now. | |
421 | */ | |
422 | ||
423 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | |
424 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | |
425 | ||
426 | /** | |
427 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
428 | * depend on. | |
429 | * | |
430 | * No data-dependent reads from memory-like regions are ever reordered | |
431 | * over this barrier. All reads preceding this primitive are guaranteed | |
432 | * to access memory (but not necessarily other CPUs' caches) before any | |
433 | * reads following this primitive that depend on the data return by | |
434 | * any of the preceding reads. This primitive is much lighter weight than | |
435 | * rmb() on most CPUs, and is never heavier weight than is | |
436 | * rmb(). | |
437 | * | |
438 | * These ordering constraints are respected by both the local CPU | |
439 | * and the compiler. | |
440 | * | |
441 | * Ordering is not guaranteed by anything other than these primitives, | |
442 | * not even by data dependencies. See the documentation for | |
443 | * memory_barrier() for examples and URLs to more information. | |
444 | * | |
445 | * For example, the following code would force ordering (the initial | |
446 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
447 | * | |
448 | * <programlisting> | |
449 | * CPU 0 CPU 1 | |
450 | * | |
451 | * b = 2; | |
452 | * memory_barrier(); | |
453 | * p = &b; q = p; | |
454 | * read_barrier_depends(); | |
455 | * d = *q; | |
456 | * </programlisting> | |
457 | * | |
458 | * because the read of "*q" depends on the read of "p" and these | |
459 | * two reads are separated by a read_barrier_depends(). However, | |
460 | * the following code, with the same initial values for "a" and "b": | |
461 | * | |
462 | * <programlisting> | |
463 | * CPU 0 CPU 1 | |
464 | * | |
465 | * a = 2; | |
466 | * memory_barrier(); | |
467 | * b = 3; y = b; | |
468 | * read_barrier_depends(); | |
469 | * x = a; | |
470 | * </programlisting> | |
471 | * | |
472 | * does not enforce ordering, since there is no data dependency between | |
473 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
474 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
d6e05edc | 475 | * in cases like this where there are no data dependencies. |
1da177e4 LT |
476 | **/ |
477 | ||
478 | #define read_barrier_depends() do { } while(0) | |
479 | ||
480 | #ifdef CONFIG_X86_OOSTORE | |
481 | /* Actually there are no OOO store capable CPUs for now that do SSE, | |
482 | but make it already an possibility. */ | |
483 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | |
484 | #else | |
485 | #define wmb() __asm__ __volatile__ ("": : :"memory") | |
486 | #endif | |
487 | ||
488 | #ifdef CONFIG_SMP | |
489 | #define smp_mb() mb() | |
490 | #define smp_rmb() rmb() | |
491 | #define smp_wmb() wmb() | |
492 | #define smp_read_barrier_depends() read_barrier_depends() | |
911b0ad2 | 493 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
1da177e4 LT |
494 | #else |
495 | #define smp_mb() barrier() | |
496 | #define smp_rmb() barrier() | |
497 | #define smp_wmb() barrier() | |
498 | #define smp_read_barrier_depends() do { } while(0) | |
499 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | |
500 | #endif | |
501 | ||
55f327fa | 502 | #include <linux/irqflags.h> |
1da177e4 LT |
503 | |
504 | /* | |
505 | * disable hlt during certain critical i/o operations | |
506 | */ | |
507 | #define HAVE_DISABLE_HLT | |
508 | void disable_hlt(void); | |
509 | void enable_hlt(void); | |
510 | ||
511 | extern int es7000_plat; | |
512 | void cpu_idle_wait(void); | |
513 | ||
4dc7a0bb IM |
514 | /* |
515 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
516 | * it needs a way to flush as much of the CPU's caches as possible: | |
517 | */ | |
518 | static inline void sched_cacheflush(void) | |
519 | { | |
520 | wbinvd(); | |
521 | } | |
522 | ||
1da177e4 | 523 | extern unsigned long arch_align_stack(unsigned long sp); |
9a0b5817 | 524 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
1da177e4 | 525 | |
cdb04527 AB |
526 | void default_idle(void); |
527 | ||
1da177e4 | 528 | #endif |