]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _M68KNOMMU_SYSTEM_H |
2 | #define _M68KNOMMU_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/linkage.h> |
5 | #include <asm/segment.h> | |
6 | #include <asm/entry.h> | |
7 | ||
8 | /* | |
9 | * switch_to(n) should switch tasks to task ptr, first checking that | |
10 | * ptr isn't the current task, in which case it does nothing. This | |
11 | * also clears the TS-flag if the task we switched to has used the | |
12 | * math co-processor latest. | |
13 | */ | |
14 | /* | |
15 | * switch_to() saves the extra registers, that are not saved | |
16 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | |
17 | * a0-a1. Some of these are used by schedule() and its predecessors | |
18 | * and so we might get see unexpected behaviors when a task returns | |
19 | * with unexpected register values. | |
20 | * | |
21 | * syscall stores these registers itself and none of them are used | |
22 | * by syscall after the function in the syscall has been called. | |
23 | * | |
24 | * Beware that resume now expects *next to be in d1 and the offset of | |
25 | * tss to be in a1. This saves a few instructions as we no longer have | |
26 | * to push them onto the stack and read them back right after. | |
27 | * | |
28 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | |
29 | * | |
30 | * Changed 96/09/19 by Andreas Schwab | |
31 | * pass prev in a0, next in a1, offset of tss in d1, and whether | |
32 | * the mm structures are shared in d2 (to avoid atc flushing). | |
33 | */ | |
34 | asmlinkage void resume(void); | |
35 | #define switch_to(prev,next,last) \ | |
36 | { \ | |
37 | void *_last; \ | |
38 | __asm__ __volatile__( \ | |
39 | "movel %1, %%a0\n\t" \ | |
40 | "movel %2, %%a1\n\t" \ | |
41 | "jbsr resume\n\t" \ | |
42 | "movel %%d1, %0\n\t" \ | |
43 | : "=d" (_last) \ | |
44 | : "d" (prev), "d" (next) \ | |
45 | : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ | |
46 | (last) = _last; \ | |
47 | } | |
48 | ||
49 | #ifdef CONFIG_COLDFIRE | |
50 | #define local_irq_enable() __asm__ __volatile__ ( \ | |
51 | "move %/sr,%%d0\n\t" \ | |
52 | "andi.l #0xf8ff,%%d0\n\t" \ | |
53 | "move %%d0,%/sr\n" \ | |
54 | : /* no outputs */ \ | |
55 | : \ | |
56 | : "cc", "%d0", "memory") | |
57 | #define local_irq_disable() __asm__ __volatile__ ( \ | |
58 | "move %/sr,%%d0\n\t" \ | |
9c2aba48 | 59 | "ori.l #0x0700,%%d0\n\t" \ |
1da177e4 | 60 | "move %%d0,%/sr\n" \ |
9c2aba48 GU |
61 | : /* no outputs */ \ |
62 | : \ | |
63 | : "cc", "%d0", "memory") | |
64 | /* For spinlocks etc */ | |
65 | #define local_irq_save(x) __asm__ __volatile__ ( \ | |
66 | "movew %%sr,%0\n\t" \ | |
67 | "movew #0x0700,%%d0\n\t" \ | |
68 | "or.l %0,%%d0\n\t" \ | |
69 | "movew %%d0,%/sr" \ | |
70 | : "=d" (x) \ | |
1da177e4 LT |
71 | : \ |
72 | : "cc", "%d0", "memory") | |
73 | #else | |
74 | ||
75 | /* portable version */ /* FIXME - see entry.h*/ | |
76 | #define ALLOWINT 0xf8ff | |
77 | ||
78 | #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") | |
79 | #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") | |
80 | #endif | |
81 | ||
82 | #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") | |
83 | #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") | |
84 | ||
85 | /* For spinlocks etc */ | |
9c2aba48 | 86 | #ifndef local_irq_save |
1da177e4 | 87 | #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0) |
9c2aba48 | 88 | #endif |
1da177e4 LT |
89 | |
90 | #define irqs_disabled() \ | |
91 | ({ \ | |
92 | unsigned long flags; \ | |
93 | local_save_flags(flags); \ | |
94 | ((flags & 0x0700) == 0x0700); \ | |
95 | }) | |
96 | ||
97 | #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") | |
98 | ||
99 | /* | |
100 | * Force strict CPU ordering. | |
101 | * Not really required on m68k... | |
102 | */ | |
103 | #define nop() asm volatile ("nop"::) | |
104 | #define mb() asm volatile ("" : : :"memory") | |
105 | #define rmb() asm volatile ("" : : :"memory") | |
106 | #define wmb() asm volatile ("" : : :"memory") | |
107 | #define set_rmb(var, value) do { xchg(&var, value); } while (0) | |
108 | #define set_mb(var, value) set_rmb(var, value) | |
1da177e4 LT |
109 | |
110 | #ifdef CONFIG_SMP | |
111 | #define smp_mb() mb() | |
112 | #define smp_rmb() rmb() | |
113 | #define smp_wmb() wmb() | |
114 | #define smp_read_barrier_depends() read_barrier_depends() | |
115 | #else | |
116 | #define smp_mb() barrier() | |
117 | #define smp_rmb() barrier() | |
118 | #define smp_wmb() barrier() | |
119 | #define smp_read_barrier_depends() do { } while(0) | |
120 | #endif | |
121 | ||
122 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
123 | #define tas(ptr) (xchg((ptr),1)) | |
124 | ||
125 | struct __xchg_dummy { unsigned long a[100]; }; | |
126 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | |
127 | ||
128 | #ifndef CONFIG_RMW_INSNS | |
129 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
130 | { | |
131 | unsigned long tmp, flags; | |
132 | ||
133 | local_irq_save(flags); | |
134 | ||
135 | switch (size) { | |
136 | case 1: | |
137 | __asm__ __volatile__ | |
138 | ("moveb %2,%0\n\t" | |
139 | "moveb %1,%2" | |
140 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
141 | break; | |
142 | case 2: | |
143 | __asm__ __volatile__ | |
144 | ("movew %2,%0\n\t" | |
145 | "movew %1,%2" | |
146 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
147 | break; | |
148 | case 4: | |
149 | __asm__ __volatile__ | |
150 | ("movel %2,%0\n\t" | |
151 | "movel %1,%2" | |
152 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
153 | break; | |
154 | } | |
155 | local_irq_restore(flags); | |
156 | return tmp; | |
157 | } | |
158 | #else | |
159 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
160 | { | |
161 | switch (size) { | |
162 | case 1: | |
163 | __asm__ __volatile__ | |
164 | ("moveb %2,%0\n\t" | |
165 | "1:\n\t" | |
166 | "casb %0,%1,%2\n\t" | |
167 | "jne 1b" | |
168 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
169 | break; | |
170 | case 2: | |
171 | __asm__ __volatile__ | |
172 | ("movew %2,%0\n\t" | |
173 | "1:\n\t" | |
174 | "casw %0,%1,%2\n\t" | |
175 | "jne 1b" | |
176 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
177 | break; | |
178 | case 4: | |
179 | __asm__ __volatile__ | |
180 | ("movel %2,%0\n\t" | |
181 | "1:\n\t" | |
182 | "casl %0,%1,%2\n\t" | |
183 | "jne 1b" | |
184 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
185 | break; | |
186 | } | |
187 | return x; | |
188 | } | |
189 | #endif | |
190 | ||
191 | /* | |
192 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
193 | * store NEW in MEM. Return the initial value in MEM. Success is | |
194 | * indicated by comparing RETURN with OLD. | |
195 | */ | |
196 | #define __HAVE_ARCH_CMPXCHG 1 | |
197 | ||
198 | static __inline__ unsigned long | |
199 | cmpxchg(volatile int *p, int old, int new) | |
200 | { | |
201 | unsigned long flags; | |
202 | int prev; | |
203 | ||
204 | local_irq_save(flags); | |
205 | if ((prev = *p) == old) | |
206 | *p = new; | |
207 | local_irq_restore(flags); | |
208 | return(prev); | |
209 | } | |
210 | ||
211 | ||
212 | #ifdef CONFIG_M68332 | |
213 | #define HARD_RESET_NOW() ({ \ | |
214 | local_irq_disable(); \ | |
215 | asm(" \ | |
216 | movew #0x0000, 0xfffa6a; \ | |
217 | reset; \ | |
218 | /*movew #0x1557, 0xfffa44;*/ \ | |
219 | /*movew #0x0155, 0xfffa46;*/ \ | |
220 | moveal #0, %a0; \ | |
221 | movec %a0, %vbr; \ | |
222 | moveal 0, %sp; \ | |
223 | moveal 4, %a0; \ | |
224 | jmp (%a0); \ | |
225 | "); \ | |
226 | }) | |
227 | #endif | |
228 | ||
229 | #if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \ | |
230 | defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 ) | |
231 | #define HARD_RESET_NOW() ({ \ | |
232 | local_irq_disable(); \ | |
233 | asm(" \ | |
234 | moveal #0x10c00000, %a0; \ | |
235 | moveb #0, 0xFFFFF300; \ | |
236 | moveal 0(%a0), %sp; \ | |
237 | moveal 4(%a0), %a0; \ | |
238 | jmp (%a0); \ | |
239 | "); \ | |
240 | }) | |
241 | #endif | |
242 | ||
243 | #ifdef CONFIG_COLDFIRE | |
244 | #if defined(CONFIG_M5272) && defined(CONFIG_NETtel) | |
245 | /* | |
9c2aba48 GU |
246 | * Need to account for broken early mask of 5272 silicon. So don't |
247 | * jump through the original start address. Jump strait into the | |
248 | * known start of the FLASH code. | |
1da177e4 LT |
249 | */ |
250 | #define HARD_RESET_NOW() ({ \ | |
251 | asm(" \ | |
252 | movew #0x2700, %sr; \ | |
253 | jmp 0xf0000400; \ | |
254 | "); \ | |
255 | }) | |
9c2aba48 GU |
256 | #elif defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \ |
257 | defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \ | |
258 | defined(CONFIG_CLEOPATRA) | |
1da177e4 LT |
259 | #define HARD_RESET_NOW() ({ \ |
260 | asm(" \ | |
261 | movew #0x2700, %sr; \ | |
262 | moveal #0x10000044, %a0; \ | |
263 | movel #0xffffffff, (%a0); \ | |
264 | moveal #0x10000001, %a0; \ | |
265 | moveb #0x00, (%a0); \ | |
266 | moveal #0xf0000004, %a0; \ | |
267 | moveal (%a0), %a0; \ | |
268 | jmp (%a0); \ | |
269 | "); \ | |
270 | }) | |
9c2aba48 GU |
271 | #elif defined(CONFIG_M5272) |
272 | /* | |
273 | * Retrieve the boot address in flash using CSBR0 and CSOR0 | |
274 | * find the reset vector at flash_address + 4 (e.g. 0x400) | |
275 | * remap it in the flash's current location (e.g. 0xf0000400) | |
276 | * and jump there. | |
277 | */ | |
278 | #define HARD_RESET_NOW() ({ \ | |
279 | asm(" \ | |
280 | movew #0x2700, %%sr; \ | |
281 | move.l %0+0x40,%%d0; \ | |
282 | and.l %0+0x44,%%d0; \ | |
283 | andi.l #0xfffff000,%%d0; \ | |
284 | mov.l %%d0,%%a0; \ | |
285 | or.l 4(%%a0),%%d0; \ | |
286 | mov.l %%d0,%%a0; \ | |
287 | jmp (%%a0);" \ | |
288 | : /* No output */ \ | |
289 | : "o" (*(char *)MCF_MBAR) ); \ | |
290 | }) | |
1da177e4 LT |
291 | #elif defined(CONFIG_M528x) |
292 | /* | |
293 | * The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR), | |
294 | * that when set, resets the MCF528x. | |
295 | */ | |
296 | #define HARD_RESET_NOW() \ | |
297 | ({ \ | |
298 | unsigned char volatile *reset; \ | |
299 | asm("move.w #0x2700, %sr"); \ | |
300 | reset = ((volatile unsigned short *)(MCF_IPSBAR + 0x110000)); \ | |
301 | while(1) \ | |
302 | *reset |= (0x01 << 7);\ | |
303 | }) | |
9c2aba48 GU |
304 | #elif defined(CONFIG_M523x) |
305 | #define HARD_RESET_NOW() ({ \ | |
306 | asm(" \ | |
307 | movew #0x2700, %sr; \ | |
308 | movel #0x01000000, %sp; \ | |
309 | moveal #0x40110000, %a0; \ | |
310 | moveb #0x80, (%a0); \ | |
311 | "); \ | |
312 | }) | |
01824853 GU |
313 | #elif defined(CONFIG_M520x) |
314 | /* | |
315 | * The MCF5208 has a bit (SOFTRST) in memory (Reset Control Register | |
316 | * RCR), that when set, resets the MCF5208. | |
317 | */ | |
318 | #define HARD_RESET_NOW() \ | |
319 | ({ \ | |
320 | unsigned char volatile *reset; \ | |
321 | asm("move.w #0x2700, %sr"); \ | |
322 | reset = ((volatile unsigned short *)(MCF_IPSBAR + 0xA0000)); \ | |
323 | while(1) \ | |
324 | *reset |= 0x80; \ | |
325 | }) | |
1da177e4 LT |
326 | #else |
327 | #define HARD_RESET_NOW() ({ \ | |
328 | asm(" \ | |
329 | movew #0x2700, %sr; \ | |
330 | moveal #0x4, %a0; \ | |
331 | moveal (%a0), %a0; \ | |
332 | jmp (%a0); \ | |
333 | "); \ | |
334 | }) | |
335 | #endif | |
336 | #endif | |
337 | #define arch_align_stack(x) (x) | |
338 | ||
339 | #endif /* _M68KNOMMU_SYSTEM_H */ |