]>
Commit | Line | Data |
---|---|---|
14be4252 GU |
1 | /* -*- mode: asm -*- |
2 | * | |
3 | * linux/arch/m68k/kernel/entry.S | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License. See the file README.legal in the main directory of this archive | |
9 | * for more details. | |
10 | * | |
11 | * Linux/m68k support by Hamish Macdonald | |
12 | * | |
13 | * 68060 fixes by Jesper Skov | |
14 | * | |
15 | */ | |
16 | ||
17 | /* | |
18 | * entry.S contains the system-call and fault low-level handling routines. | |
19 | * This also contains the timer-interrupt handler, as well as all interrupts | |
20 | * and faults that can result in a task-switch. | |
21 | * | |
22 | * NOTE: This code handles signal-recognition, which happens every time | |
23 | * after a timer-interrupt and after each system call. | |
24 | * | |
25 | */ | |
26 | ||
27 | /* | |
28 | * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so | |
29 | * all pointers that used to be 'current' are now entry | |
30 | * number 0 in the 'current_set' list. | |
31 | * | |
32 | * 6/05/00 RZ: addedd writeback completion after return from sighandler | |
33 | * for 68040 | |
34 | */ | |
35 | ||
36 | #include <linux/linkage.h> | |
37 | #include <asm/errno.h> | |
38 | #include <asm/setup.h> | |
39 | #include <asm/segment.h> | |
40 | #include <asm/traps.h> | |
41 | #include <asm/unistd.h> | |
42 | #include <asm/asm-offsets.h> | |
43 | #include <asm/entry.h> | |
44 | ||
45 | .globl system_call, buserr, trap, resume | |
46 | .globl sys_call_table | |
20ecc91c | 47 | .globl __sys_fork, __sys_clone, __sys_vfork |
09f90f66 | 48 | .globl bad_interrupt |
14be4252 GU |
49 | .globl auto_irqhandler_fixup |
50 | .globl user_irqvec_fixup | |
51 | ||
52 | .text | |
20ecc91c | 53 | ENTRY(__sys_fork) |
14be4252 | 54 | SAVE_SWITCH_STACK |
20ecc91c AV |
55 | jbsr sys_fork |
56 | lea %sp@(24),%sp | |
14be4252 GU |
57 | rts |
58 | ||
20ecc91c | 59 | ENTRY(__sys_clone) |
14be4252 GU |
60 | SAVE_SWITCH_STACK |
61 | pea %sp@(SWITCH_STACK_SIZE) | |
62 | jbsr m68k_clone | |
20ecc91c | 63 | lea %sp@(28),%sp |
14be4252 GU |
64 | rts |
65 | ||
20ecc91c | 66 | ENTRY(__sys_vfork) |
14be4252 | 67 | SAVE_SWITCH_STACK |
20ecc91c AV |
68 | jbsr sys_vfork |
69 | lea %sp@(24),%sp | |
14be4252 GU |
70 | rts |
71 | ||
72 | ENTRY(sys_sigreturn) | |
73 | SAVE_SWITCH_STACK | |
74 | jbsr do_sigreturn | |
75 | RESTORE_SWITCH_STACK | |
76 | rts | |
77 | ||
78 | ENTRY(sys_rt_sigreturn) | |
79 | SAVE_SWITCH_STACK | |
80 | jbsr do_rt_sigreturn | |
81 | RESTORE_SWITCH_STACK | |
82 | rts | |
83 | ||
84 | ENTRY(buserr) | |
85 | SAVE_ALL_INT | |
86 | GET_CURRENT(%d0) | |
87 | movel %sp,%sp@- | stack frame pointer argument | |
88 | jbsr buserr_c | |
89 | addql #4,%sp | |
90 | jra ret_from_exception | |
91 | ||
92 | ENTRY(trap) | |
93 | SAVE_ALL_INT | |
94 | GET_CURRENT(%d0) | |
95 | movel %sp,%sp@- | stack frame pointer argument | |
96 | jbsr trap_c | |
97 | addql #4,%sp | |
98 | jra ret_from_exception | |
99 | ||
100 | | After a fork we jump here directly from resume, | |
101 | | so that %d1 contains the previous task | |
102 | | schedule_tail now used regardless of CONFIG_SMP | |
103 | ENTRY(ret_from_fork) | |
104 | movel %d1,%sp@- | |
105 | jsr schedule_tail | |
106 | addql #4,%sp | |
107 | jra ret_from_exception | |
108 | ||
533e6903 AV |
109 | ENTRY(ret_from_kernel_thread) |
110 | | a3 contains the kernel thread payload, d7 - its argument | |
111 | movel %d1,%sp@- | |
112 | jsr schedule_tail | |
533e6903 AV |
113 | movel %d7,(%sp) |
114 | jsr %a3@ | |
115 | addql #4,%sp | |
d878d6da AV |
116 | jra ret_from_exception |
117 | ||
14be4252 GU |
118 | #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) |
119 | ||
120 | #ifdef TRAP_DBG_INTERRUPT | |
121 | ||
122 | .globl dbginterrupt | |
123 | ENTRY(dbginterrupt) | |
124 | SAVE_ALL_INT | |
125 | GET_CURRENT(%d0) | |
126 | movel %sp,%sp@- /* stack frame pointer argument */ | |
127 | jsr dbginterrupt_c | |
128 | addql #4,%sp | |
129 | jra ret_from_exception | |
130 | #endif | |
131 | ||
132 | ENTRY(reschedule) | |
133 | /* save top of frame */ | |
134 | pea %sp@ | |
135 | jbsr set_esp0 | |
136 | addql #4,%sp | |
137 | pea ret_from_exception | |
138 | jmp schedule | |
139 | ||
140 | ENTRY(ret_from_user_signal) | |
141 | moveq #__NR_sigreturn,%d0 | |
142 | trap #0 | |
143 | ||
144 | ENTRY(ret_from_user_rt_signal) | |
145 | movel #__NR_rt_sigreturn,%d0 | |
146 | trap #0 | |
147 | ||
66d857b0 | 148 | #else |
14be4252 GU |
149 | |
150 | do_trace_entry: | |
151 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace | |
152 | subql #4,%sp | |
153 | SAVE_SWITCH_STACK | |
154 | jbsr syscall_trace | |
155 | RESTORE_SWITCH_STACK | |
156 | addql #4,%sp | |
157 | movel %sp@(PT_OFF_ORIG_D0),%d0 | |
158 | cmpl #NR_syscalls,%d0 | |
159 | jcs syscall | |
160 | badsys: | |
161 | movel #-ENOSYS,%sp@(PT_OFF_D0) | |
162 | jra ret_from_syscall | |
163 | ||
164 | do_trace_exit: | |
165 | subql #4,%sp | |
166 | SAVE_SWITCH_STACK | |
167 | jbsr syscall_trace | |
168 | RESTORE_SWITCH_STACK | |
169 | addql #4,%sp | |
170 | jra .Lret_from_exception | |
171 | ||
172 | ENTRY(ret_from_signal) | |
173 | movel %curptr@(TASK_STACK),%a1 | |
174 | tstb %a1@(TINFO_FLAGS+2) | |
175 | jge 1f | |
176 | jbsr syscall_trace | |
177 | 1: RESTORE_SWITCH_STACK | |
178 | addql #4,%sp | |
179 | /* on 68040 complete pending writebacks if any */ | |
180 | #ifdef CONFIG_M68040 | |
181 | bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 | |
182 | subql #7,%d0 | bus error frame ? | |
183 | jbne 1f | |
184 | movel %sp,%sp@- | |
185 | jbsr berr_040cleanup | |
186 | addql #4,%sp | |
187 | 1: | |
188 | #endif | |
189 | jra .Lret_from_exception | |
190 | ||
191 | ENTRY(system_call) | |
192 | SAVE_ALL_SYS | |
193 | ||
194 | GET_CURRENT(%d1) | |
195 | movel %d1,%a1 | |
196 | ||
197 | | save top of frame | |
198 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | |
199 | ||
200 | | syscall trace? | |
201 | tstb %a1@(TINFO_FLAGS+2) | |
202 | jmi do_trace_entry | |
203 | cmpl #NR_syscalls,%d0 | |
204 | jcc badsys | |
205 | syscall: | |
206 | jbsr @(sys_call_table,%d0:l:4)@(0) | |
207 | movel %d0,%sp@(PT_OFF_D0) | save the return value | |
208 | ret_from_syscall: | |
209 | |oriw #0x0700,%sr | |
210 | movel %curptr@(TASK_STACK),%a1 | |
211 | movew %a1@(TINFO_FLAGS+2),%d0 | |
212 | jne syscall_exit_work | |
213 | 1: RESTORE_ALL | |
214 | ||
215 | syscall_exit_work: | |
216 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel | |
217 | bnes 1b | if so, skip resched, signals | |
218 | lslw #1,%d0 | |
219 | jcs do_trace_exit | |
220 | jmi do_delayed_trace | |
221 | lslw #8,%d0 | |
222 | jne do_signal_return | |
223 | pea resume_userspace | |
224 | jra schedule | |
225 | ||
226 | ||
227 | ENTRY(ret_from_exception) | |
228 | .Lret_from_exception: | |
229 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel | |
230 | bnes 1f | if so, skip resched, signals | |
231 | | only allow interrupts when we are really the last one on the | |
232 | | kernel stack, otherwise stack overflow can occur during | |
233 | | heavy interrupt load | |
234 | andw #ALLOWINT,%sr | |
235 | ||
236 | resume_userspace: | |
237 | movel %curptr@(TASK_STACK),%a1 | |
238 | moveb %a1@(TINFO_FLAGS+3),%d0 | |
239 | jne exit_work | |
240 | 1: RESTORE_ALL | |
241 | ||
242 | exit_work: | |
243 | | save top of frame | |
244 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | |
245 | lslb #1,%d0 | |
246 | jne do_signal_return | |
247 | pea resume_userspace | |
248 | jra schedule | |
249 | ||
250 | ||
251 | do_signal_return: | |
252 | |andw #ALLOWINT,%sr | |
253 | subql #4,%sp | dummy return address | |
254 | SAVE_SWITCH_STACK | |
255 | pea %sp@(SWITCH_STACK_SIZE) | |
256 | bsrl do_notify_resume | |
257 | addql #4,%sp | |
258 | RESTORE_SWITCH_STACK | |
259 | addql #4,%sp | |
260 | jbra resume_userspace | |
261 | ||
262 | do_delayed_trace: | |
263 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR | |
264 | pea 1 | send SIGTRAP | |
265 | movel %curptr,%sp@- | |
266 | pea LSIGTRAP | |
267 | jbsr send_sig | |
268 | addql #8,%sp | |
269 | addql #4,%sp | |
270 | jbra resume_userspace | |
271 | ||
272 | ||
273 | /* This is the main interrupt handler for autovector interrupts */ | |
274 | ||
275 | ENTRY(auto_inthandler) | |
276 | SAVE_ALL_INT | |
277 | GET_CURRENT(%d0) | |
14be4252 GU |
278 | | put exception # in d0 |
279 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 | |
280 | subw #VEC_SPUR,%d0 | |
281 | ||
282 | movel %sp,%sp@- | |
283 | movel %d0,%sp@- | put vector # on stack | |
284 | auto_irqhandler_fixup = . + 2 | |
285 | jsr do_IRQ | process the IRQ | |
286 | addql #8,%sp | pop parameters off stack | |
09f90f66 | 287 | jra ret_from_exception |
14be4252 GU |
288 | |
289 | /* Handler for user defined interrupt vectors */ | |
290 | ||
291 | ENTRY(user_inthandler) | |
292 | SAVE_ALL_INT | |
293 | GET_CURRENT(%d0) | |
14be4252 GU |
294 | | put exception # in d0 |
295 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 | |
296 | user_irqvec_fixup = . + 2 | |
297 | subw #VEC_USER,%d0 | |
298 | ||
299 | movel %sp,%sp@- | |
300 | movel %d0,%sp@- | put vector # on stack | |
301 | jsr do_IRQ | process the IRQ | |
302 | addql #8,%sp | pop parameters off stack | |
09f90f66 | 303 | jra ret_from_exception |
14be4252 GU |
304 | |
305 | /* Handler for uninitialized and spurious interrupts */ | |
306 | ||
307 | ENTRY(bad_inthandler) | |
308 | SAVE_ALL_INT | |
309 | GET_CURRENT(%d0) | |
14be4252 GU |
310 | |
311 | movel %sp,%sp@- | |
312 | jsr handle_badint | |
313 | addql #4,%sp | |
09f90f66 | 314 | jra ret_from_exception |
14be4252 GU |
315 | |
316 | resume: | |
317 | /* | |
318 | * Beware - when entering resume, prev (the current task) is | |
319 | * in a0, next (the new task) is in a1,so don't change these | |
320 | * registers until their contents are no longer needed. | |
321 | */ | |
322 | ||
323 | /* save sr */ | |
324 | movew %sr,%a0@(TASK_THREAD+THREAD_SR) | |
325 | ||
326 | /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ | |
327 | movec %sfc,%d0 | |
328 | movew %d0,%a0@(TASK_THREAD+THREAD_FS) | |
329 | ||
330 | /* save usp */ | |
331 | /* it is better to use a movel here instead of a movew 8*) */ | |
332 | movec %usp,%d0 | |
333 | movel %d0,%a0@(TASK_THREAD+THREAD_USP) | |
334 | ||
335 | /* save non-scratch registers on stack */ | |
336 | SAVE_SWITCH_STACK | |
337 | ||
338 | /* save current kernel stack pointer */ | |
339 | movel %sp,%a0@(TASK_THREAD+THREAD_KSP) | |
340 | ||
341 | /* save floating point context */ | |
342 | #ifndef CONFIG_M68KFPU_EMU_ONLY | |
343 | #ifdef CONFIG_M68KFPU_EMU | |
344 | tstl m68k_fputype | |
345 | jeq 3f | |
346 | #endif | |
347 | fsave %a0@(TASK_THREAD+THREAD_FPSTATE) | |
348 | ||
349 | #if defined(CONFIG_M68060) | |
350 | #if !defined(CPU_M68060_ONLY) | |
351 | btst #3,m68k_cputype+3 | |
352 | beqs 1f | |
353 | #endif | |
354 | /* The 060 FPU keeps status in bits 15-8 of the first longword */ | |
355 | tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) | |
356 | jeq 3f | |
357 | #if !defined(CPU_M68060_ONLY) | |
358 | jra 2f | |
359 | #endif | |
360 | #endif /* CONFIG_M68060 */ | |
361 | #if !defined(CPU_M68060_ONLY) | |
362 | 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) | |
363 | jeq 3f | |
364 | #endif | |
365 | 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) | |
366 | fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) | |
367 | 3: | |
368 | #endif /* CONFIG_M68KFPU_EMU_ONLY */ | |
369 | /* Return previous task in %d1 */ | |
370 | movel %curptr,%d1 | |
371 | ||
372 | /* switch to new task (a1 contains new task) */ | |
373 | movel %a1,%curptr | |
374 | ||
375 | /* restore floating point context */ | |
376 | #ifndef CONFIG_M68KFPU_EMU_ONLY | |
377 | #ifdef CONFIG_M68KFPU_EMU | |
378 | tstl m68k_fputype | |
379 | jeq 4f | |
380 | #endif | |
381 | #if defined(CONFIG_M68060) | |
382 | #if !defined(CPU_M68060_ONLY) | |
383 | btst #3,m68k_cputype+3 | |
384 | beqs 1f | |
385 | #endif | |
386 | /* The 060 FPU keeps status in bits 15-8 of the first longword */ | |
387 | tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) | |
388 | jeq 3f | |
389 | #if !defined(CPU_M68060_ONLY) | |
390 | jra 2f | |
391 | #endif | |
392 | #endif /* CONFIG_M68060 */ | |
393 | #if !defined(CPU_M68060_ONLY) | |
394 | 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) | |
395 | jeq 3f | |
1da177e4 | 396 | #endif |
14be4252 GU |
397 | 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 |
398 | fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar | |
399 | 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) | |
400 | 4: | |
401 | #endif /* CONFIG_M68KFPU_EMU_ONLY */ | |
402 | ||
403 | /* restore the kernel stack pointer */ | |
404 | movel %a1@(TASK_THREAD+THREAD_KSP),%sp | |
405 | ||
406 | /* restore non-scratch registers */ | |
407 | RESTORE_SWITCH_STACK | |
408 | ||
409 | /* restore user stack pointer */ | |
410 | movel %a1@(TASK_THREAD+THREAD_USP),%a0 | |
411 | movel %a0,%usp | |
412 | ||
413 | /* restore fs (sfc,%dfc) */ | |
414 | movew %a1@(TASK_THREAD+THREAD_FS),%a0 | |
415 | movec %a0,%sfc | |
416 | movec %a0,%dfc | |
417 | ||
418 | /* restore status register */ | |
419 | movew %a1@(TASK_THREAD+THREAD_SR),%sr | |
420 | ||
421 | rts | |
422 | ||
423 | #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ |