]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* $Id: system.h,v 1.86 2001/10/30 04:57:10 davem Exp $ */ |
1da177e4 LT |
2 | |
3 | #ifndef __SPARC_SYSTEM_H | |
4 | #define __SPARC_SYSTEM_H | |
5 | ||
1da177e4 LT |
6 | #include <linux/kernel.h> |
7 | #include <linux/threads.h> /* NR_CPUS */ | |
8 | #include <linux/thread_info.h> | |
9 | ||
1da177e4 LT |
10 | #include <asm/page.h> |
11 | #include <asm/psr.h> | |
12 | #include <asm/ptrace.h> | |
13 | #include <asm/btfixup.h> | |
14 | ||
15 | #ifndef __ASSEMBLY__ | |
16 | ||
17 | /* | |
18 | * Sparc (general) CPU types | |
19 | */ | |
20 | enum sparc_cpu { | |
21 | sun4 = 0x00, | |
22 | sun4c = 0x01, | |
23 | sun4m = 0x02, | |
24 | sun4d = 0x03, | |
25 | sun4e = 0x04, | |
26 | sun4u = 0x05, /* V8 ploos ploos */ | |
27 | sun_unknown = 0x06, | |
28 | ap1000 = 0x07, /* almost a sun4m */ | |
29 | }; | |
30 | ||
31 | /* Really, userland should not be looking at any of this... */ | |
32 | #ifdef __KERNEL__ | |
33 | ||
34 | extern enum sparc_cpu sparc_cpu_model; | |
35 | ||
36 | #ifndef CONFIG_SUN4 | |
37 | #define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c) | |
38 | #define ARCH_SUN4 0 | |
39 | #else | |
40 | #define ARCH_SUN4C_SUN4 1 | |
41 | #define ARCH_SUN4 1 | |
42 | #endif | |
43 | ||
44 | #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ | |
45 | ||
46 | extern struct thread_info *current_set[NR_CPUS]; | |
47 | ||
48 | extern unsigned long empty_bad_page; | |
49 | extern unsigned long empty_bad_page_table; | |
50 | extern unsigned long empty_zero_page; | |
51 | ||
52 | extern void sun_do_break(void); | |
53 | extern int serial_console; | |
54 | extern int stop_a_enabled; | |
55 | ||
56 | static __inline__ int con_is_present(void) | |
57 | { | |
58 | return serial_console ? 0 : 1; | |
59 | } | |
60 | ||
61 | /* When a context switch happens we must flush all user windows so that | |
62 | * the windows of the current process are flushed onto its stack. This | |
63 | * way the windows are all clean for the next process and the stack | |
64 | * frames are up to date. | |
65 | */ | |
66 | extern void flush_user_windows(void); | |
67 | extern void kill_user_windows(void); | |
68 | extern void synchronize_user_stack(void); | |
69 | extern void fpsave(unsigned long *fpregs, unsigned long *fsr, | |
70 | void *fpqueue, unsigned long *fpqdepth); | |
71 | ||
72 | #ifdef CONFIG_SMP | |
73 | #define SWITCH_ENTER(prv) \ | |
74 | do { \ | |
75 | if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \ | |
76 | put_psr(get_psr() | PSR_EF); \ | |
77 | fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \ | |
78 | &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \ | |
79 | clear_tsk_thread_flag(prv, TIF_USEDFPU); \ | |
80 | (prv)->thread.kregs->psr &= ~PSR_EF; \ | |
81 | } \ | |
82 | } while(0) | |
83 | ||
84 | #define SWITCH_DO_LAZY_FPU(next) /* */ | |
85 | #else | |
86 | #define SWITCH_ENTER(prv) /* */ | |
87 | #define SWITCH_DO_LAZY_FPU(nxt) \ | |
88 | do { \ | |
89 | if (last_task_used_math != (nxt)) \ | |
90 | (nxt)->thread.kregs->psr&=~PSR_EF; \ | |
91 | } while(0) | |
92 | #endif | |
93 | ||
94 | /* | |
95 | * Flush windows so that the VM switch which follows | |
96 | * would not pull the stack from under us. | |
97 | * | |
98 | * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) | |
99 | * XXX WTF is the above comment? Found in late teen 2.4.x. | |
100 | */ | |
4866cde0 | 101 | #define prepare_arch_switch(next) do { \ |
1da177e4 LT |
102 | __asm__ __volatile__( \ |
103 | ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ | |
104 | "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ | |
105 | "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ | |
106 | "save %sp, -0x40, %sp\n\t" \ | |
107 | "restore; restore; restore; restore; restore; restore; restore"); \ | |
108 | } while(0) | |
1da177e4 LT |
109 | |
110 | /* Much care has gone into this code, do not touch it. | |
111 | * | |
112 | * We need to loadup regs l0/l1 for the newly forked child | |
113 | * case because the trap return path relies on those registers | |
114 | * holding certain values, gcc is told that they are clobbered. | |
115 | * Gcc needs registers for 3 values in and 1 value out, so we | |
116 | * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM | |
117 | * | |
118 | * Hey Dave, that do not touch sign is too much of an incentive | |
119 | * - Anton & Pete | |
120 | */ | |
121 | #define switch_to(prev, next, last) do { \ | |
122 | SWITCH_ENTER(prev); \ | |
123 | SWITCH_DO_LAZY_FPU(next); \ | |
124 | cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \ | |
125 | __asm__ __volatile__( \ | |
126 | "sethi %%hi(here - 0x8), %%o7\n\t" \ | |
127 | "mov %%g6, %%g3\n\t" \ | |
128 | "or %%o7, %%lo(here - 0x8), %%o7\n\t" \ | |
129 | "rd %%psr, %%g4\n\t" \ | |
130 | "std %%sp, [%%g6 + %4]\n\t" \ | |
131 | "rd %%wim, %%g5\n\t" \ | |
132 | "wr %%g4, 0x20, %%psr\n\t" \ | |
133 | "nop\n\t" \ | |
134 | "std %%g4, [%%g6 + %3]\n\t" \ | |
135 | "ldd [%2 + %3], %%g4\n\t" \ | |
136 | "mov %2, %%g6\n\t" \ | |
137 | ".globl patchme_store_new_current\n" \ | |
138 | "patchme_store_new_current:\n\t" \ | |
139 | "st %2, [%1]\n\t" \ | |
140 | "wr %%g4, 0x20, %%psr\n\t" \ | |
141 | "nop\n\t" \ | |
142 | "nop\n\t" \ | |
143 | "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \ | |
144 | "ldd [%%g6 + %4], %%sp\n\t" \ | |
145 | "wr %%g5, 0x0, %%wim\n\t" \ | |
146 | "ldd [%%sp + 0x00], %%l0\n\t" \ | |
147 | "ldd [%%sp + 0x38], %%i6\n\t" \ | |
148 | "wr %%g4, 0x0, %%psr\n\t" \ | |
149 | "nop\n\t" \ | |
150 | "nop\n\t" \ | |
151 | "jmpl %%o7 + 0x8, %%g0\n\t" \ | |
152 | " ld [%%g3 + %5], %0\n\t" \ | |
153 | "here:\n" \ | |
154 | : "=&r" (last) \ | |
155 | : "r" (&(current_set[hard_smp_processor_id()])), \ | |
d562ef6a | 156 | "r" (task_thread_info(next)), \ |
1da177e4 LT |
157 | "i" (TI_KPSR), \ |
158 | "i" (TI_KSP), \ | |
159 | "i" (TI_TASK) \ | |
160 | : "g1", "g2", "g3", "g4", "g5", "g7", \ | |
161 | "l0", "l1", "l3", "l4", "l5", "l6", "l7", \ | |
162 | "i0", "i1", "i2", "i3", "i4", "i5", \ | |
163 | "o0", "o1", "o2", "o3", "o7"); \ | |
164 | } while(0) | |
165 | ||
4dc7a0bb IM |
166 | /* |
167 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
168 | * it needs a way to flush as much of the CPU's caches as possible. | |
169 | * | |
170 | * TODO: fill this in! | |
171 | */ | |
172 | static inline void sched_cacheflush(void) | |
173 | { | |
174 | } | |
175 | ||
1da177e4 LT |
176 | /* |
177 | * Changing the IRQ level on the Sparc. | |
178 | */ | |
179 | extern void local_irq_restore(unsigned long); | |
180 | extern unsigned long __local_irq_save(void); | |
181 | extern void local_irq_enable(void); | |
182 | ||
183 | static inline unsigned long getipl(void) | |
184 | { | |
185 | unsigned long retval; | |
186 | ||
187 | __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); | |
188 | return retval; | |
189 | } | |
190 | ||
191 | #define local_save_flags(flags) ((flags) = getipl()) | |
192 | #define local_irq_save(flags) ((flags) = __local_irq_save()) | |
193 | #define local_irq_disable() ((void) __local_irq_save()) | |
194 | #define irqs_disabled() ((getipl() & PSR_PIL) != 0) | |
195 | ||
196 | /* XXX Change this if we ever use a PSO mode kernel. */ | |
197 | #define mb() __asm__ __volatile__ ("" : : : "memory") | |
198 | #define rmb() mb() | |
199 | #define wmb() mb() | |
200 | #define read_barrier_depends() do { } while(0) | |
201 | #define set_mb(__var, __value) do { __var = __value; mb(); } while(0) | |
1da177e4 LT |
202 | #define smp_mb() __asm__ __volatile__("":::"memory") |
203 | #define smp_rmb() __asm__ __volatile__("":::"memory") | |
204 | #define smp_wmb() __asm__ __volatile__("":::"memory") | |
205 | #define smp_read_barrier_depends() do { } while(0) | |
206 | ||
207 | #define nop() __asm__ __volatile__ ("nop") | |
208 | ||
209 | /* This has special calling conventions */ | |
210 | #ifndef CONFIG_SMP | |
211 | BTFIXUPDEF_CALL(void, ___xchg32, void) | |
212 | #endif | |
213 | ||
3115624e | 214 | static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) |
1da177e4 LT |
215 | { |
216 | #ifdef CONFIG_SMP | |
217 | __asm__ __volatile__("swap [%2], %0" | |
218 | : "=&r" (val) | |
219 | : "0" (val), "r" (m) | |
220 | : "memory"); | |
221 | return val; | |
222 | #else | |
223 | register unsigned long *ptr asm("g1"); | |
224 | register unsigned long ret asm("g2"); | |
225 | ||
226 | ptr = (unsigned long *) m; | |
227 | ret = val; | |
228 | ||
229 | /* Note: this is magic and the nop there is | |
230 | really needed. */ | |
231 | __asm__ __volatile__( | |
232 | "mov %%o7, %%g4\n\t" | |
233 | "call ___f____xchg32\n\t" | |
234 | " nop\n\t" | |
235 | : "=&r" (ret) | |
236 | : "0" (ret), "r" (ptr) | |
237 | : "g3", "g4", "g7", "memory", "cc"); | |
238 | ||
239 | return ret; | |
240 | #endif | |
241 | } | |
242 | ||
243 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
244 | #define tas(ptr) (xchg((ptr),1)) | |
245 | ||
246 | extern void __xchg_called_with_bad_pointer(void); | |
247 | ||
248 | static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) | |
249 | { | |
250 | switch (size) { | |
251 | case 4: | |
252 | return xchg_u32(ptr, x); | |
253 | }; | |
254 | __xchg_called_with_bad_pointer(); | |
255 | return x; | |
256 | } | |
257 | ||
258 | extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); | |
259 | ||
260 | #endif /* __KERNEL__ */ | |
261 | ||
262 | #endif /* __ASSEMBLY__ */ | |
263 | ||
264 | #define arch_align_stack(x) (x) | |
265 | ||
266 | #endif /* !(__SPARC_SYSTEM_H) */ |