]>
Commit | Line | Data |
---|---|---|
14be4252 GU |
1 | /* -*- mode: asm -*- |
2 | * | |
3 | * linux/arch/m68k/kernel/entry.S | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License. See the file README.legal in the main directory of this archive | |
9 | * for more details. | |
10 | * | |
11 | * Linux/m68k support by Hamish Macdonald | |
12 | * | |
13 | * 68060 fixes by Jesper Skov | |
14 | * | |
15 | */ | |
16 | ||
17 | /* | |
18 | * entry.S contains the system-call and fault low-level handling routines. | |
19 | * This also contains the timer-interrupt handler, as well as all interrupts | |
20 | * and faults that can result in a task-switch. | |
21 | * | |
22 | * NOTE: This code handles signal-recognition, which happens every time | |
23 | * after a timer-interrupt and after each system call. | |
24 | * | |
25 | */ | |
26 | ||
27 | /* | |
28 | * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so | |
29 | * all pointers that used to be 'current' are now entry | |
30 | * number 0 in the 'current_set' list. | |
31 | * | |
32 | * 6/05/00 RZ: addedd writeback completion after return from sighandler | |
33 | * for 68040 | |
34 | */ | |
35 | ||
36 | #include <linux/linkage.h> | |
37 | #include <asm/errno.h> | |
38 | #include <asm/setup.h> | |
39 | #include <asm/segment.h> | |
40 | #include <asm/traps.h> | |
41 | #include <asm/unistd.h> | |
42 | #include <asm/asm-offsets.h> | |
43 | #include <asm/entry.h> | |
44 | ||
45 | .globl system_call, buserr, trap, resume | |
46 | .globl sys_call_table | |
47 | .globl sys_fork, sys_clone, sys_vfork | |
48 | .globl ret_from_interrupt, bad_interrupt | |
49 | .globl auto_irqhandler_fixup | |
50 | .globl user_irqvec_fixup | |
51 | ||
52 | .text | |
53 | ENTRY(sys_fork) | |
54 | SAVE_SWITCH_STACK | |
55 | pea %sp@(SWITCH_STACK_SIZE) | |
56 | jbsr m68k_fork | |
57 | addql #4,%sp | |
58 | RESTORE_SWITCH_STACK | |
59 | rts | |
60 | ||
61 | ENTRY(sys_clone) | |
62 | SAVE_SWITCH_STACK | |
63 | pea %sp@(SWITCH_STACK_SIZE) | |
64 | jbsr m68k_clone | |
65 | addql #4,%sp | |
66 | RESTORE_SWITCH_STACK | |
67 | rts | |
68 | ||
69 | ENTRY(sys_vfork) | |
70 | SAVE_SWITCH_STACK | |
71 | pea %sp@(SWITCH_STACK_SIZE) | |
72 | jbsr m68k_vfork | |
73 | addql #4,%sp | |
74 | RESTORE_SWITCH_STACK | |
75 | rts | |
76 | ||
77 | ENTRY(sys_sigreturn) | |
78 | SAVE_SWITCH_STACK | |
79 | jbsr do_sigreturn | |
80 | RESTORE_SWITCH_STACK | |
81 | rts | |
82 | ||
83 | ENTRY(sys_rt_sigreturn) | |
84 | SAVE_SWITCH_STACK | |
85 | jbsr do_rt_sigreturn | |
86 | RESTORE_SWITCH_STACK | |
87 | rts | |
88 | ||
89 | ENTRY(buserr) | |
90 | SAVE_ALL_INT | |
91 | GET_CURRENT(%d0) | |
92 | movel %sp,%sp@- | stack frame pointer argument | |
93 | jbsr buserr_c | |
94 | addql #4,%sp | |
95 | jra ret_from_exception | |
96 | ||
97 | ENTRY(trap) | |
98 | SAVE_ALL_INT | |
99 | GET_CURRENT(%d0) | |
100 | movel %sp,%sp@- | stack frame pointer argument | |
101 | jbsr trap_c | |
102 | addql #4,%sp | |
103 | jra ret_from_exception | |
104 | ||
105 | | After a fork we jump here directly from resume, | |
106 | | so that %d1 contains the previous task | |
107 | | schedule_tail now used regardless of CONFIG_SMP | |
108 | ENTRY(ret_from_fork) | |
109 | movel %d1,%sp@- | |
110 | jsr schedule_tail | |
111 | addql #4,%sp | |
112 | jra ret_from_exception | |
113 | ||
533e6903 AV |
114 | ENTRY(ret_from_kernel_thread) |
115 | | a3 contains the kernel thread payload, d7 - its argument | |
116 | movel %d1,%sp@- | |
117 | jsr schedule_tail | |
118 | GET_CURRENT(%d0) | |
119 | movel %d7,(%sp) | |
120 | jsr %a3@ | |
121 | addql #4,%sp | |
122 | movel %d0,(%sp) | |
123 | jra sys_exit | |
124 | ||
14be4252 GU |
125 | #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) |
126 | ||
127 | #ifdef TRAP_DBG_INTERRUPT | |
128 | ||
129 | .globl dbginterrupt | |
130 | ENTRY(dbginterrupt) | |
131 | SAVE_ALL_INT | |
132 | GET_CURRENT(%d0) | |
133 | movel %sp,%sp@- /* stack frame pointer argument */ | |
134 | jsr dbginterrupt_c | |
135 | addql #4,%sp | |
136 | jra ret_from_exception | |
137 | #endif | |
138 | ||
139 | ENTRY(reschedule) | |
140 | /* save top of frame */ | |
141 | pea %sp@ | |
142 | jbsr set_esp0 | |
143 | addql #4,%sp | |
144 | pea ret_from_exception | |
145 | jmp schedule | |
146 | ||
147 | ENTRY(ret_from_user_signal) | |
148 | moveq #__NR_sigreturn,%d0 | |
149 | trap #0 | |
150 | ||
151 | ENTRY(ret_from_user_rt_signal) | |
152 | movel #__NR_rt_sigreturn,%d0 | |
153 | trap #0 | |
154 | ||
66d857b0 | 155 | #else |
14be4252 GU |
156 | |
157 | do_trace_entry: | |
158 | movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace | |
159 | subql #4,%sp | |
160 | SAVE_SWITCH_STACK | |
161 | jbsr syscall_trace | |
162 | RESTORE_SWITCH_STACK | |
163 | addql #4,%sp | |
164 | movel %sp@(PT_OFF_ORIG_D0),%d0 | |
165 | cmpl #NR_syscalls,%d0 | |
166 | jcs syscall | |
167 | badsys: | |
168 | movel #-ENOSYS,%sp@(PT_OFF_D0) | |
169 | jra ret_from_syscall | |
170 | ||
171 | do_trace_exit: | |
172 | subql #4,%sp | |
173 | SAVE_SWITCH_STACK | |
174 | jbsr syscall_trace | |
175 | RESTORE_SWITCH_STACK | |
176 | addql #4,%sp | |
177 | jra .Lret_from_exception | |
178 | ||
179 | ENTRY(ret_from_signal) | |
180 | movel %curptr@(TASK_STACK),%a1 | |
181 | tstb %a1@(TINFO_FLAGS+2) | |
182 | jge 1f | |
183 | jbsr syscall_trace | |
184 | 1: RESTORE_SWITCH_STACK | |
185 | addql #4,%sp | |
186 | /* on 68040 complete pending writebacks if any */ | |
187 | #ifdef CONFIG_M68040 | |
188 | bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 | |
189 | subql #7,%d0 | bus error frame ? | |
190 | jbne 1f | |
191 | movel %sp,%sp@- | |
192 | jbsr berr_040cleanup | |
193 | addql #4,%sp | |
194 | 1: | |
195 | #endif | |
196 | jra .Lret_from_exception | |
197 | ||
198 | ENTRY(system_call) | |
199 | SAVE_ALL_SYS | |
200 | ||
201 | GET_CURRENT(%d1) | |
202 | movel %d1,%a1 | |
203 | ||
204 | | save top of frame | |
205 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | |
206 | ||
207 | | syscall trace? | |
208 | tstb %a1@(TINFO_FLAGS+2) | |
209 | jmi do_trace_entry | |
210 | cmpl #NR_syscalls,%d0 | |
211 | jcc badsys | |
212 | syscall: | |
213 | jbsr @(sys_call_table,%d0:l:4)@(0) | |
214 | movel %d0,%sp@(PT_OFF_D0) | save the return value | |
215 | ret_from_syscall: | |
216 | |oriw #0x0700,%sr | |
217 | movel %curptr@(TASK_STACK),%a1 | |
218 | movew %a1@(TINFO_FLAGS+2),%d0 | |
219 | jne syscall_exit_work | |
220 | 1: RESTORE_ALL | |
221 | ||
222 | syscall_exit_work: | |
223 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel | |
224 | bnes 1b | if so, skip resched, signals | |
225 | lslw #1,%d0 | |
226 | jcs do_trace_exit | |
227 | jmi do_delayed_trace | |
228 | lslw #8,%d0 | |
229 | jne do_signal_return | |
230 | pea resume_userspace | |
231 | jra schedule | |
232 | ||
233 | ||
234 | ENTRY(ret_from_exception) | |
235 | .Lret_from_exception: | |
236 | btst #5,%sp@(PT_OFF_SR) | check if returning to kernel | |
237 | bnes 1f | if so, skip resched, signals | |
238 | | only allow interrupts when we are really the last one on the | |
239 | | kernel stack, otherwise stack overflow can occur during | |
240 | | heavy interrupt load | |
241 | andw #ALLOWINT,%sr | |
242 | ||
243 | resume_userspace: | |
244 | movel %curptr@(TASK_STACK),%a1 | |
245 | moveb %a1@(TINFO_FLAGS+3),%d0 | |
246 | jne exit_work | |
247 | 1: RESTORE_ALL | |
248 | ||
249 | exit_work: | |
250 | | save top of frame | |
251 | movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) | |
252 | lslb #1,%d0 | |
253 | jne do_signal_return | |
254 | pea resume_userspace | |
255 | jra schedule | |
256 | ||
257 | ||
258 | do_signal_return: | |
259 | |andw #ALLOWINT,%sr | |
260 | subql #4,%sp | dummy return address | |
261 | SAVE_SWITCH_STACK | |
262 | pea %sp@(SWITCH_STACK_SIZE) | |
263 | bsrl do_notify_resume | |
264 | addql #4,%sp | |
265 | RESTORE_SWITCH_STACK | |
266 | addql #4,%sp | |
267 | jbra resume_userspace | |
268 | ||
269 | do_delayed_trace: | |
270 | bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR | |
271 | pea 1 | send SIGTRAP | |
272 | movel %curptr,%sp@- | |
273 | pea LSIGTRAP | |
274 | jbsr send_sig | |
275 | addql #8,%sp | |
276 | addql #4,%sp | |
277 | jbra resume_userspace | |
278 | ||
279 | ||
280 | /* This is the main interrupt handler for autovector interrupts */ | |
281 | ||
282 | ENTRY(auto_inthandler) | |
283 | SAVE_ALL_INT | |
284 | GET_CURRENT(%d0) | |
285 | movel %d0,%a1 | |
286 | addqb #1,%a1@(TINFO_PREEMPT+1) | |
287 | | put exception # in d0 | |
288 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 | |
289 | subw #VEC_SPUR,%d0 | |
290 | ||
291 | movel %sp,%sp@- | |
292 | movel %d0,%sp@- | put vector # on stack | |
293 | auto_irqhandler_fixup = . + 2 | |
294 | jsr do_IRQ | process the IRQ | |
295 | addql #8,%sp | pop parameters off stack | |
296 | ||
297 | ret_from_interrupt: | |
298 | movel %curptr@(TASK_STACK),%a1 | |
299 | subqb #1,%a1@(TINFO_PREEMPT+1) | |
300 | jeq ret_from_last_interrupt | |
301 | 2: RESTORE_ALL | |
302 | ||
303 | ALIGN | |
304 | ret_from_last_interrupt: | |
305 | moveq #(~ALLOWINT>>8)&0xff,%d0 | |
306 | andb %sp@(PT_OFF_SR),%d0 | |
307 | jne 2b | |
308 | ||
309 | /* check if we need to do software interrupts */ | |
310 | tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING | |
311 | jeq .Lret_from_exception | |
312 | pea ret_from_exception | |
313 | jra do_softirq | |
314 | ||
315 | /* Handler for user defined interrupt vectors */ | |
316 | ||
317 | ENTRY(user_inthandler) | |
318 | SAVE_ALL_INT | |
319 | GET_CURRENT(%d0) | |
320 | movel %d0,%a1 | |
321 | addqb #1,%a1@(TINFO_PREEMPT+1) | |
322 | | put exception # in d0 | |
323 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 | |
324 | user_irqvec_fixup = . + 2 | |
325 | subw #VEC_USER,%d0 | |
326 | ||
327 | movel %sp,%sp@- | |
328 | movel %d0,%sp@- | put vector # on stack | |
329 | jsr do_IRQ | process the IRQ | |
330 | addql #8,%sp | pop parameters off stack | |
331 | ||
332 | movel %curptr@(TASK_STACK),%a1 | |
333 | subqb #1,%a1@(TINFO_PREEMPT+1) | |
334 | jeq ret_from_last_interrupt | |
335 | RESTORE_ALL | |
336 | ||
337 | /* Handler for uninitialized and spurious interrupts */ | |
338 | ||
339 | ENTRY(bad_inthandler) | |
340 | SAVE_ALL_INT | |
341 | GET_CURRENT(%d0) | |
342 | movel %d0,%a1 | |
343 | addqb #1,%a1@(TINFO_PREEMPT+1) | |
344 | ||
345 | movel %sp,%sp@- | |
346 | jsr handle_badint | |
347 | addql #4,%sp | |
348 | ||
349 | movel %curptr@(TASK_STACK),%a1 | |
350 | subqb #1,%a1@(TINFO_PREEMPT+1) | |
351 | jeq ret_from_last_interrupt | |
352 | RESTORE_ALL | |
353 | ||
354 | ||
355 | resume: | |
356 | /* | |
357 | * Beware - when entering resume, prev (the current task) is | |
358 | * in a0, next (the new task) is in a1,so don't change these | |
359 | * registers until their contents are no longer needed. | |
360 | */ | |
361 | ||
362 | /* save sr */ | |
363 | movew %sr,%a0@(TASK_THREAD+THREAD_SR) | |
364 | ||
365 | /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ | |
366 | movec %sfc,%d0 | |
367 | movew %d0,%a0@(TASK_THREAD+THREAD_FS) | |
368 | ||
369 | /* save usp */ | |
370 | /* it is better to use a movel here instead of a movew 8*) */ | |
371 | movec %usp,%d0 | |
372 | movel %d0,%a0@(TASK_THREAD+THREAD_USP) | |
373 | ||
374 | /* save non-scratch registers on stack */ | |
375 | SAVE_SWITCH_STACK | |
376 | ||
377 | /* save current kernel stack pointer */ | |
378 | movel %sp,%a0@(TASK_THREAD+THREAD_KSP) | |
379 | ||
380 | /* save floating point context */ | |
381 | #ifndef CONFIG_M68KFPU_EMU_ONLY | |
382 | #ifdef CONFIG_M68KFPU_EMU | |
383 | tstl m68k_fputype | |
384 | jeq 3f | |
385 | #endif | |
386 | fsave %a0@(TASK_THREAD+THREAD_FPSTATE) | |
387 | ||
388 | #if defined(CONFIG_M68060) | |
389 | #if !defined(CPU_M68060_ONLY) | |
390 | btst #3,m68k_cputype+3 | |
391 | beqs 1f | |
392 | #endif | |
393 | /* The 060 FPU keeps status in bits 15-8 of the first longword */ | |
394 | tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) | |
395 | jeq 3f | |
396 | #if !defined(CPU_M68060_ONLY) | |
397 | jra 2f | |
398 | #endif | |
399 | #endif /* CONFIG_M68060 */ | |
400 | #if !defined(CPU_M68060_ONLY) | |
401 | 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) | |
402 | jeq 3f | |
403 | #endif | |
404 | 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) | |
405 | fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) | |
406 | 3: | |
407 | #endif /* CONFIG_M68KFPU_EMU_ONLY */ | |
408 | /* Return previous task in %d1 */ | |
409 | movel %curptr,%d1 | |
410 | ||
411 | /* switch to new task (a1 contains new task) */ | |
412 | movel %a1,%curptr | |
413 | ||
414 | /* restore floating point context */ | |
415 | #ifndef CONFIG_M68KFPU_EMU_ONLY | |
416 | #ifdef CONFIG_M68KFPU_EMU | |
417 | tstl m68k_fputype | |
418 | jeq 4f | |
419 | #endif | |
420 | #if defined(CONFIG_M68060) | |
421 | #if !defined(CPU_M68060_ONLY) | |
422 | btst #3,m68k_cputype+3 | |
423 | beqs 1f | |
424 | #endif | |
425 | /* The 060 FPU keeps status in bits 15-8 of the first longword */ | |
426 | tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) | |
427 | jeq 3f | |
428 | #if !defined(CPU_M68060_ONLY) | |
429 | jra 2f | |
430 | #endif | |
431 | #endif /* CONFIG_M68060 */ | |
432 | #if !defined(CPU_M68060_ONLY) | |
433 | 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) | |
434 | jeq 3f | |
1da177e4 | 435 | #endif |
14be4252 GU |
436 | 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 |
437 | fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar | |
438 | 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) | |
439 | 4: | |
440 | #endif /* CONFIG_M68KFPU_EMU_ONLY */ | |
441 | ||
442 | /* restore the kernel stack pointer */ | |
443 | movel %a1@(TASK_THREAD+THREAD_KSP),%sp | |
444 | ||
445 | /* restore non-scratch registers */ | |
446 | RESTORE_SWITCH_STACK | |
447 | ||
448 | /* restore user stack pointer */ | |
449 | movel %a1@(TASK_THREAD+THREAD_USP),%a0 | |
450 | movel %a0,%usp | |
451 | ||
452 | /* restore fs (sfc,%dfc) */ | |
453 | movew %a1@(TASK_THREAD+THREAD_FS),%a0 | |
454 | movec %a0,%sfc | |
455 | movec %a0,%dfc | |
456 | ||
457 | /* restore status register */ | |
458 | movew %a1@(TASK_THREAD+THREAD_SR),%sr | |
459 | ||
460 | rts | |
461 | ||
462 | #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ |