]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/arm/kernel/entry-common.S
[ARM] Move include/asm-arm/arch-* to arch/arm/*/include/mach
[mirror_ubuntu-focal-kernel.git] / arch / arm / kernel / entry-common.S
1 /*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <asm/unistd.h>
12 #include <asm/ftrace.h>
13 #include <mach/entry-macro.S>
14
15 #include "entry-header.S"
16
17
18 .align 5
19 /*
20 * This is the fast syscall return path. We do as little as
21 * possible here, and this includes saving r0 back into the SVC
22 * stack.
23 */
24 ret_fast_syscall:
25 disable_irq @ disable interrupts
26 ldr r1, [tsk, #TI_FLAGS]
27 tst r1, #_TIF_WORK_MASK
28 bne fast_work_pending
29
30 /* perform architecture specific actions before user return */
31 arch_ret_to_user r1, lr
32
33 @ fast_restore_user_regs
34 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
35 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
36 msr spsr_cxsf, r1 @ save in spsr_svc
37 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
38 mov r0, r0
39 add sp, sp, #S_FRAME_SIZE - S_PC
40 movs pc, lr @ return & move spsr_svc into cpsr
41
42 /*
43 * Ok, we need to do extra processing, enter the slow path.
44 */
45 fast_work_pending:
46 str r0, [sp, #S_R0+S_OFF]! @ returned r0
47 work_pending:
48 tst r1, #_TIF_NEED_RESCHED
49 bne work_resched
50 tst r1, #_TIF_SIGPENDING
51 beq no_work_pending
52 mov r0, sp @ 'regs'
53 mov r2, why @ 'syscall'
54 bl do_notify_resume
55 b ret_slow_syscall @ Check work again
56
57 work_resched:
58 bl schedule
59 /*
60 * "slow" syscall return path. "why" tells us if this was a real syscall.
61 */
62 ENTRY(ret_to_user)
63 ret_slow_syscall:
64 disable_irq @ disable interrupts
65 ldr r1, [tsk, #TI_FLAGS]
66 tst r1, #_TIF_WORK_MASK
67 bne work_pending
68 no_work_pending:
69 /* perform architecture specific actions before user return */
70 arch_ret_to_user r1, lr
71
72 @ slow_restore_user_regs
73 ldr r1, [sp, #S_PSR] @ get calling cpsr
74 ldr lr, [sp, #S_PC]! @ get pc
75 msr spsr_cxsf, r1 @ save in spsr_svc
76 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
77 mov r0, r0
78 add sp, sp, #S_FRAME_SIZE - S_PC
79 movs pc, lr @ return & move spsr_svc into cpsr
80
81 /*
82 * This is how we return from a fork.
83 */
84 ENTRY(ret_from_fork)
85 bl schedule_tail
86 get_thread_info tsk
87 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
88 mov why, #1
89 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
90 beq ret_slow_syscall
91 mov r1, sp
92 mov r0, #1 @ trace exit [IP = 1]
93 bl syscall_trace
94 b ret_slow_syscall
95
96
97 .equ NR_syscalls,0
98 #define CALL(x) .equ NR_syscalls,NR_syscalls+1
99 #include "calls.S"
100 #undef CALL
101 #define CALL(x) .long x
102
103 #ifdef CONFIG_FTRACE
104 #ifdef CONFIG_DYNAMIC_FTRACE
105 ENTRY(mcount)
106 stmdb sp!, {r0-r3, lr}
107 mov r0, lr
108 sub r0, r0, #MCOUNT_INSN_SIZE
109
110 .globl mcount_call
111 mcount_call:
112 bl ftrace_stub
113 ldmia sp!, {r0-r3, pc}
114
115 ENTRY(ftrace_caller)
116 stmdb sp!, {r0-r3, lr}
117 ldr r1, [fp, #-4]
118 mov r0, lr
119 sub r0, r0, #MCOUNT_INSN_SIZE
120
121 .globl ftrace_call
122 ftrace_call:
123 bl ftrace_stub
124 ldmia sp!, {r0-r3, pc}
125
126 #else
127
128 ENTRY(mcount)
129 stmdb sp!, {r0-r3, lr}
130 ldr r0, =ftrace_trace_function
131 ldr r2, [r0]
132 adr r0, ftrace_stub
133 cmp r0, r2
134 bne trace
135 ldmia sp!, {r0-r3, pc}
136
137 trace:
138 ldr r1, [fp, #-4]
139 mov r0, lr
140 sub r0, r0, #MCOUNT_INSN_SIZE
141 mov lr, pc
142 mov pc, r2
143 ldmia sp!, {r0-r3, pc}
144
145 #endif /* CONFIG_DYNAMIC_FTRACE */
146
147 .globl ftrace_stub
148 ftrace_stub:
149 mov pc, lr
150
151 #endif /* CONFIG_FTRACE */
152
153 /*=============================================================================
154 * SWI handler
155 *-----------------------------------------------------------------------------
156 */
157
158 /* If we're optimising for StrongARM the resulting code won't
159 run on an ARM7 and we can save a couple of instructions.
160 --pb */
161 #ifdef CONFIG_CPU_ARM710
162 #define A710(code...) code
163 .Larm710bug:
164 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
165 mov r0, r0
166 add sp, sp, #S_FRAME_SIZE
167 subs pc, lr, #4
168 #else
169 #define A710(code...)
170 #endif
171
172 .align 5
173 ENTRY(vector_swi)
174 sub sp, sp, #S_FRAME_SIZE
175 stmia sp, {r0 - r12} @ Calling r0 - r12
176 add r8, sp, #S_PC
177 stmdb r8, {sp, lr}^ @ Calling sp, lr
178 mrs r8, spsr @ called from non-FIQ mode, so ok.
179 str lr, [sp, #S_PC] @ Save calling PC
180 str r8, [sp, #S_PSR] @ Save CPSR
181 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
182 zero_fp
183
184 /*
185 * Get the system call number.
186 */
187
188 #if defined(CONFIG_OABI_COMPAT)
189
190 /*
191 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
192 * value to determine if it is an EABI or an old ABI call.
193 */
194 #ifdef CONFIG_ARM_THUMB
195 tst r8, #PSR_T_BIT
196 movne r10, #0 @ no thumb OABI emulation
197 ldreq r10, [lr, #-4] @ get SWI instruction
198 #else
199 ldr r10, [lr, #-4] @ get SWI instruction
200 A710( and ip, r10, #0x0f000000 @ check for SWI )
201 A710( teq ip, #0x0f000000 )
202 A710( bne .Larm710bug )
203 #endif
204
205 #elif defined(CONFIG_AEABI)
206
207 /*
208 * Pure EABI user space always put syscall number into scno (r7).
209 */
210 A710( ldr ip, [lr, #-4] @ get SWI instruction )
211 A710( and ip, ip, #0x0f000000 @ check for SWI )
212 A710( teq ip, #0x0f000000 )
213 A710( bne .Larm710bug )
214
215 #elif defined(CONFIG_ARM_THUMB)
216
217 /* Legacy ABI only, possibly thumb mode. */
218 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
219 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
220 ldreq scno, [lr, #-4]
221
222 #else
223
224 /* Legacy ABI only. */
225 ldr scno, [lr, #-4] @ get SWI instruction
226 A710( and ip, scno, #0x0f000000 @ check for SWI )
227 A710( teq ip, #0x0f000000 )
228 A710( bne .Larm710bug )
229
230 #endif
231
232 #ifdef CONFIG_ALIGNMENT_TRAP
233 ldr ip, __cr_alignment
234 ldr ip, [ip]
235 mcr p15, 0, ip, c1, c0 @ update control register
236 #endif
237 enable_irq
238
239 get_thread_info tsk
240 adr tbl, sys_call_table @ load syscall table pointer
241 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
242
243 #if defined(CONFIG_OABI_COMPAT)
244 /*
245 * If the swi argument is zero, this is an EABI call and we do nothing.
246 *
247 * If this is an old ABI call, get the syscall number into scno and
248 * get the old ABI syscall table address.
249 */
250 bics r10, r10, #0xff000000
251 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
252 ldrne tbl, =sys_oabi_call_table
253 #elif !defined(CONFIG_AEABI)
254 bic scno, scno, #0xff000000 @ mask off SWI op-code
255 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
256 #endif
257
258 stmdb sp!, {r4, r5} @ push fifth and sixth args
259 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
260 bne __sys_trace
261
262 cmp scno, #NR_syscalls @ check upper syscall limit
263 adr lr, ret_fast_syscall @ return address
264 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
265
266 add r1, sp, #S_OFF
267 2: mov why, #0 @ no longer a real syscall
268 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
269 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
270 bcs arm_syscall
271 b sys_ni_syscall @ not private func
272
273 /*
274 * This is the really slow path. We're going to be doing
275 * context switches, and waiting for our parent to respond.
276 */
277 __sys_trace:
278 mov r2, scno
279 add r1, sp, #S_OFF
280 mov r0, #0 @ trace entry [IP = 0]
281 bl syscall_trace
282
283 adr lr, __sys_trace_return @ return address
284 mov scno, r0 @ syscall number (possibly new)
285 add r1, sp, #S_R0 + S_OFF @ pointer to regs
286 cmp scno, #NR_syscalls @ check upper syscall limit
287 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
288 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
289 b 2b
290
291 __sys_trace_return:
292 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
293 mov r2, scno
294 mov r1, sp
295 mov r0, #1 @ trace exit [IP = 1]
296 bl syscall_trace
297 b ret_slow_syscall
298
299 .align 5
300 #ifdef CONFIG_ALIGNMENT_TRAP
301 .type __cr_alignment, #object
302 __cr_alignment:
303 .word cr_alignment
304 #endif
305 .ltorg
306
307 /*
308 * This is the syscall table declaration for native ABI syscalls.
309 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
310 */
311 #define ABI(native, compat) native
312 #ifdef CONFIG_AEABI
313 #define OBSOLETE(syscall) sys_ni_syscall
314 #else
315 #define OBSOLETE(syscall) syscall
316 #endif
317
318 .type sys_call_table, #object
319 ENTRY(sys_call_table)
320 #include "calls.S"
321 #undef ABI
322 #undef OBSOLETE
323
324 /*============================================================================
325 * Special system call wrappers
326 */
327 @ r0 = syscall number
328 @ r8 = syscall table
329 .type sys_syscall, #function
330 sys_syscall:
331 bic scno, r0, #__NR_OABI_SYSCALL_BASE
332 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
333 cmpne scno, #NR_syscalls @ check range
334 stmloia sp, {r5, r6} @ shuffle args
335 movlo r0, r1
336 movlo r1, r2
337 movlo r2, r3
338 movlo r3, r4
339 ldrlo pc, [tbl, scno, lsl #2]
340 b sys_ni_syscall
341
342 sys_fork_wrapper:
343 add r0, sp, #S_OFF
344 b sys_fork
345
346 sys_vfork_wrapper:
347 add r0, sp, #S_OFF
348 b sys_vfork
349
350 sys_execve_wrapper:
351 add r3, sp, #S_OFF
352 b sys_execve
353
354 sys_clone_wrapper:
355 add ip, sp, #S_OFF
356 str ip, [sp, #4]
357 b sys_clone
358
359 sys_sigsuspend_wrapper:
360 add r3, sp, #S_OFF
361 b sys_sigsuspend
362
363 sys_rt_sigsuspend_wrapper:
364 add r2, sp, #S_OFF
365 b sys_rt_sigsuspend
366
367 sys_sigreturn_wrapper:
368 add r0, sp, #S_OFF
369 b sys_sigreturn
370
371 sys_rt_sigreturn_wrapper:
372 add r0, sp, #S_OFF
373 b sys_rt_sigreturn
374
375 sys_sigaltstack_wrapper:
376 ldr r2, [sp, #S_OFF + S_SP]
377 b do_sigaltstack
378
379 sys_statfs64_wrapper:
380 teq r1, #88
381 moveq r1, #84
382 b sys_statfs64
383
384 sys_fstatfs64_wrapper:
385 teq r1, #88
386 moveq r1, #84
387 b sys_fstatfs64
388
389 /*
390 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
391 * offset, we return EINVAL.
392 */
393 sys_mmap2:
394 #if PAGE_SHIFT > 12
395 tst r5, #PGOFF_MASK
396 moveq r5, r5, lsr #PAGE_SHIFT - 12
397 streq r5, [sp, #4]
398 beq do_mmap2
399 mov r0, #-EINVAL
400 mov pc, lr
401 #else
402 str r5, [sp, #4]
403 b do_mmap2
404 #endif
405
406 ENTRY(pabort_ifar)
407 mrc p15, 0, r0, cr6, cr0, 2
408 ENTRY(pabort_noifar)
409 mov pc, lr
410
411 #ifdef CONFIG_OABI_COMPAT
412
413 /*
414 * These are syscalls with argument register differences
415 */
416
417 sys_oabi_pread64:
418 stmia sp, {r3, r4}
419 b sys_pread64
420
421 sys_oabi_pwrite64:
422 stmia sp, {r3, r4}
423 b sys_pwrite64
424
425 sys_oabi_truncate64:
426 mov r3, r2
427 mov r2, r1
428 b sys_truncate64
429
430 sys_oabi_ftruncate64:
431 mov r3, r2
432 mov r2, r1
433 b sys_ftruncate64
434
435 sys_oabi_readahead:
436 str r3, [sp]
437 mov r3, r2
438 mov r2, r1
439 b sys_readahead
440
441 /*
442 * Let's declare a second syscall table for old ABI binaries
443 * using the compatibility syscall entries.
444 */
445 #define ABI(native, compat) compat
446 #define OBSOLETE(syscall) syscall
447
448 .type sys_oabi_call_table, #object
449 ENTRY(sys_oabi_call_table)
450 #include "calls.S"
451 #undef ABI
452 #undef OBSOLETE
453
454 #endif
455