]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/m68knommu/platform/5307/entry.S
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / arch / m68knommu / platform / 5307 / entry.S
1 /*
2 * linux/arch/m68knommu/platform/5307/entry.S
3 *
4 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
5 * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
6 * Kenneth Albanowski <kjahds@kjahds.com>,
7 * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
8 * Copyright (C) 2004 Macq Electronique SA. (www.macqel.com)
9 *
10 * Based on:
11 *
12 * linux/arch/m68k/kernel/entry.S
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file README.legal in the main directory of this archive
18 * for more details.
19 *
20 * Linux/m68k support by Hamish Macdonald
21 *
22 * 68060 fixes by Jesper Skov
23 * ColdFire support by Greg Ungerer (gerg@snapgear.com)
24 * 5307 fixes by David W. Miller
25 * linux 2.4 support David McCullough <davidm@snapgear.com>
26 * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
27 */
28
29 #include <linux/config.h>
30 #include <linux/sys.h>
31 #include <linux/linkage.h>
32 #include <asm/unistd.h>
33 #include <asm/thread_info.h>
34 #include <asm/errno.h>
35 #include <asm/setup.h>
36 #include <asm/segment.h>
37 #include <asm/asm-offsets.h>
38 #include <asm/entry.h>
39
40 .bss
41
42 sw_ksp:
43 .long 0
44
45 sw_usp:
46 .long 0
47
48 .text
49
50 .globl system_call
51 .globl resume
52 .globl ret_from_exception
53 .globl ret_from_signal
54 .globl sys_call_table
55 .globl ret_from_interrupt
56 .globl inthandler
57 .globl fasthandler
58
59 ENTRY(system_call)
60 SAVE_ALL
61 move #0x2000,%sr /* enable intrs again */
62
63 movel #-LENOSYS,%d2
64 movel %d2,PT_D0(%sp) /* default return value in d0 */
65 /* original D0 is in orig_d0 */
66 movel %d0,%d2
67
68 /* save top of frame */
69 pea %sp@
70 jbsr set_esp0
71 addql #4,%sp
72
73 cmpl #NR_syscalls,%d2
74 jcc ret_from_exception
75 lea sys_call_table,%a0
76 lsll #2,%d2 /* movel %a0@(%d2:l:4),%d3 */
77 movel %a0@(%d2),%d3
78 jeq ret_from_exception
79 lsrl #2,%d2
80
81 movel %sp,%d2 /* get thread_info pointer */
82 andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
83 movel %d2,%a0
84 btst #TIF_SYSCALL_TRACE,%a0@(TI_FLAGS)
85 bnes 1f
86
87 movel %d3,%a0
88 jbsr %a0@
89 movel %d0,%sp@(PT_D0) /* save the return value */
90 jra ret_from_exception
91 1:
92 subql #4,%sp
93 SAVE_SWITCH_STACK
94 jbsr syscall_trace
95 RESTORE_SWITCH_STACK
96 addql #4,%sp
97 movel %d3,%a0
98 jbsr %a0@
99 movel %d0,%sp@(PT_D0) /* save the return value */
100 subql #4,%sp /* dummy return address */
101 SAVE_SWITCH_STACK
102 jbsr syscall_trace
103
104 ret_from_signal:
105 RESTORE_SWITCH_STACK
106 addql #4,%sp
107
108 ret_from_exception:
109 btst #5,%sp@(PT_SR) /* check if returning to kernel */
110 jeq Luser_return /* if so, skip resched, signals */
111
112 Lkernel_return:
113 moveml %sp@,%d1-%d5/%a0-%a2
114 lea %sp@(32),%sp /* space for 8 regs */
115 movel %sp@+,%d0
116 addql #4,%sp /* orig d0 */
117 addl %sp@+,%sp /* stk adj */
118 rte
119
120 Luser_return:
121 movel %sp,%d1 /* get thread_info pointer */
122 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
123 movel %d1,%a0
124 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
125 andl #_TIF_WORK_MASK,%d1
126 jne Lwork_to_do /* still work to do */
127
128 Lreturn:
129 move #0x2700,%sr /* disable intrs */
130 movel sw_usp,%a0 /* get usp */
131 movel %sp@(PT_PC),%a0@- /* copy exception program counter */
132 movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
133 moveml %sp@,%d1-%d5/%a0-%a2
134 lea %sp@(32),%sp /* space for 8 regs */
135 movel %sp@+,%d0
136 addql #4,%sp /* orig d0 */
137 addl %sp@+,%sp /* stk adj */
138 addql #8,%sp /* remove exception */
139 movel %sp,sw_ksp /* save ksp */
140 subql #8,sw_usp /* set exception */
141 movel sw_usp,%sp /* restore usp */
142 rte
143
144 Lwork_to_do:
145 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
146 btst #TIF_NEED_RESCHED,%d1
147 jne reschedule
148
149 /* GERG: do we need something here for TRACEing?? */
150
151 Lsignal_return:
152 subql #4,%sp /* dummy return address */
153 SAVE_SWITCH_STACK
154 pea %sp@(SWITCH_STACK_SIZE)
155 clrl %sp@-
156 jsr do_signal
157 addql #8,%sp
158 RESTORE_SWITCH_STACK
159 addql #4,%sp
160 jmp Lreturn
161
162 /*
163 * This is the generic interrupt handler (for all hardware interrupt
164 * sources). It figures out the vector number and calls the appropriate
165 * interrupt service routine directly.
166 */
167 ENTRY(inthandler)
168 SAVE_ALL
169 moveq #-1,%d0
170 movel %d0,%sp@(PT_ORIG_D0)
171 addql #1,local_irq_count
172
173 movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
174 andl #0x03fc,%d0 /* mask out vector only */
175
176 leal per_cpu__kstat+STAT_IRQ,%a0
177 addql #1,%a0@(%d0)
178
179 lsrl #2,%d0 /* calculate real vector # */
180 movel %d0,%d1 /* calculate array offset */
181 lsll #4,%d1
182 lea irq_list,%a0
183 addl %d1,%a0 /* pointer to array struct */
184
185 movel %sp,%sp@- /* push regs arg onto stack */
186 movel %a0@(8),%sp@- /* push devid arg */
187 movel %d0,%sp@- /* push vector # on stack */
188
189 movel %a0@,%a0 /* get function to call */
190 jbsr %a0@ /* call vector handler */
191 lea %sp@(12),%sp /* pop parameters off stack */
192
193 bra ret_from_interrupt /* this was fallthrough */
194
195 /*
196 * This is the fast interrupt handler (for certain hardware interrupt
197 * sources). Unlike the normal interrupt handler it just uses the
198 * current stack (doesn't care if it is user or kernel). It also
199 * doesn't bother doing the bottom half handlers.
200 */
201 ENTRY(fasthandler)
202 SAVE_LOCAL
203
204 movew %sp@(PT_FORMATVEC),%d0
205 andl #0x03fc,%d0 /* mask out vector only */
206
207 leal per_cpu__kstat+STAT_IRQ,%a0
208 addql #1,%a0@(%d0)
209
210 movel %sp,%sp@- /* push regs arg onto stack */
211 clrl %sp@- /* push devid arg */
212 lsrl #2,%d0 /* calculate real vector # */
213 movel %d0,%sp@- /* push vector # on stack */
214
215 lsll #4,%d0 /* adjust for array offset */
216 lea irq_list,%a0
217 movel %a0@(%d0),%a0 /* get function to call */
218 jbsr %a0@ /* call vector handler */
219 lea %sp@(12),%sp /* pop parameters off stack */
220
221 RESTORE_LOCAL
222
223 ENTRY(ret_from_interrupt)
224 subql #1,local_irq_count
225 jeq 2f
226 1:
227 RESTORE_ALL
228 2:
229 moveb %sp@(PT_SR),%d0
230 andl #0x7,%d0
231 jhi 1b
232
233 /* check if we need to do software interrupts */
234 movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
235 jeq ret_from_exception
236
237 pea ret_from_exception
238 jmp do_softirq
239
240 /*
241 * Beware - when entering resume, prev (the current task) is
242 * in a0, next (the new task) is in a1,so don't change these
243 * registers until their contents are no longer needed.
244 */
245 ENTRY(resume)
246 movel %a0, %d1 /* get prev thread in d1 */
247
248 movew %sr,%d0 /* save thread status reg */
249 movew %d0,%a0@(TASK_THREAD+THREAD_SR)
250
251 oril #0x700,%d0 /* disable interrupts */
252 move %d0,%sr
253
254 movel sw_usp,%d0 /* save usp */
255 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
256
257 SAVE_SWITCH_STACK
258 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
259 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
260 RESTORE_SWITCH_STACK
261
262 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
263 movel %a0, sw_usp
264
265 movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
266 movew %d0, %sr
267 rts
268