]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-common.S | |
3 | * | |
4 | * Copyright (C) 2000 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
1da177e4 | 10 | |
6ebbf2ce | 11 | #include <asm/assembler.h> |
1da177e4 | 12 | #include <asm/unistd.h> |
395a59d0 | 13 | #include <asm/ftrace.h> |
c4c5716e | 14 | #include <asm/unwind.h> |
e33f8d32 | 15 | #include <asm/memory.h> |
96a8fae0 RK |
16 | #ifdef CONFIG_AEABI |
17 | #include <asm/unistd-oabi.h> | |
18 | #endif | |
19 | ||
20 | .equ NR_syscalls, __NR_syscalls | |
1da177e4 | 21 | |
13a5045d RH |
22 | #ifdef CONFIG_NEED_RET_TO_USER |
23 | #include <mach/entry-macro.S> | |
24 | #else | |
25 | .macro arch_ret_to_user, tmp1, tmp2 | |
26 | .endm | |
27 | #endif | |
28 | ||
1da177e4 LT |
29 | #include "entry-header.S" |
30 | ||
309ee042 | 31 | saved_psr .req r8 |
fcea4523 RK |
32 | #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) |
33 | saved_pc .req r9 | |
34 | #define TRACE(x...) x | |
35 | #else | |
309ee042 | 36 | saved_pc .req lr |
fcea4523 RK |
37 | #define TRACE(x...) |
38 | #endif | |
1da177e4 | 39 | |
c6089061 | 40 | .section .entry.text,"ax",%progbits |
1da177e4 | 41 | .align 5 |
b74406f3 MD |
42 | #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \ |
43 | IS_ENABLED(CONFIG_DEBUG_RSEQ)) | |
1da177e4 | 44 | /* |
3302cadd RK |
45 | * This is the fast syscall return path. We do as little as possible here, |
46 | * such as avoiding writing r0 to the stack. We only use this path if we | |
b74406f3 MD |
47 | * have tracing, context tracking and rseq debug disabled - the overheads |
48 | * from those features make this path too inefficient. | |
1da177e4 LT |
49 | */ |
50 | ret_fast_syscall: | |
afc9f65e | 51 | __ret_fast_syscall: |
c4c5716e CM |
52 | UNWIND(.fnstart ) |
53 | UNWIND(.cantunwind ) | |
3302cadd | 54 | disable_irq_notrace @ disable interrupts |
e33f8d32 TG |
55 | ldr r2, [tsk, #TI_ADDR_LIMIT] |
56 | cmp r2, #TASK_SIZE | |
57 | blne addr_limit_check_failed | |
1b979372 | 58 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
2404269b | 59 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
1da177e4 | 60 | bne fast_work_pending |
f4dc9a4c | 61 | |
e33f8d32 | 62 | |
f80dff9d DW |
63 | /* perform architecture specific actions before user return */ |
64 | arch_ret_to_user r1, lr | |
65 | ||
b86040a5 | 66 | restore_user_regs fast = 1, offset = S_OFF |
c4c5716e | 67 | UNWIND(.fnend ) |
3302cadd | 68 | ENDPROC(ret_fast_syscall) |
1da177e4 | 69 | |
3302cadd | 70 | /* Ok, we need to do extra processing, enter the slow path. */ |
1da177e4 LT |
71 | fast_work_pending: |
72 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
3302cadd RK |
73 | /* fall through to work_pending */ |
74 | #else | |
75 | /* | |
b74406f3 MD |
76 | * The "replacement" ret_fast_syscall for when tracing, context tracking, |
77 | * or rseq debug is enabled. As we will need to call out to some C functions, | |
78 | * we save r0 first to avoid needing to save registers around each C function | |
79 | * call. | |
3302cadd RK |
80 | */ |
81 | ret_fast_syscall: | |
afc9f65e | 82 | __ret_fast_syscall: |
3302cadd RK |
83 | UNWIND(.fnstart ) |
84 | UNWIND(.cantunwind ) | |
85 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
b74406f3 MD |
86 | #if IS_ENABLED(CONFIG_DEBUG_RSEQ) |
87 | /* do_rseq_syscall needs interrupts enabled. */ | |
88 | mov r0, sp @ 'regs' | |
89 | bl do_rseq_syscall | |
90 | #endif | |
3302cadd | 91 | disable_irq_notrace @ disable interrupts |
e33f8d32 TG |
92 | ldr r2, [tsk, #TI_ADDR_LIMIT] |
93 | cmp r2, #TASK_SIZE | |
94 | blne addr_limit_check_failed | |
3302cadd | 95 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
2404269b | 96 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
3302cadd RK |
97 | beq no_work_pending |
98 | UNWIND(.fnend ) | |
99 | ENDPROC(ret_fast_syscall) | |
100 | ||
101 | /* Slower path - fall through to work_pending */ | |
102 | #endif | |
103 | ||
104 | tst r1, #_TIF_SYSCALL_WORK | |
105 | bne __sys_trace_return_nosave | |
106 | slow_work_pending: | |
1da177e4 LT |
107 | mov r0, sp @ 'regs' |
108 | mov r2, why @ 'syscall' | |
0a267fa6 | 109 | bl do_work_pending |
66285217 | 110 | cmp r0, #0 |
81783786 | 111 | beq no_work_pending |
66285217 | 112 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
81783786 AV |
113 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
114 | b local_restart @ ... and off we go | |
e83dd377 | 115 | ENDPROC(ret_fast_syscall) |
81783786 | 116 | |
1da177e4 LT |
117 | /* |
118 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
3302cadd RK |
119 | * IRQs may be enabled here, so always disable them. Note that we use the |
120 | * "notrace" version to avoid calling into the tracing code unnecessarily. | |
121 | * do_work_pending() will update this state if necessary. | |
1da177e4 LT |
122 | */ |
123 | ENTRY(ret_to_user) | |
124 | ret_slow_syscall: | |
b74406f3 MD |
125 | #if IS_ENABLED(CONFIG_DEBUG_RSEQ) |
126 | /* do_rseq_syscall needs interrupts enabled. */ | |
127 | enable_irq_notrace @ enable interrupts | |
128 | mov r0, sp @ 'regs' | |
129 | bl do_rseq_syscall | |
130 | #endif | |
3302cadd | 131 | disable_irq_notrace @ disable interrupts |
9fc2552a | 132 | ENTRY(ret_to_user_from_irq) |
e33f8d32 TG |
133 | ldr r2, [tsk, #TI_ADDR_LIMIT] |
134 | cmp r2, #TASK_SIZE | |
135 | blne addr_limit_check_failed | |
1da177e4 LT |
136 | ldr r1, [tsk, #TI_FLAGS] |
137 | tst r1, #_TIF_WORK_MASK | |
3302cadd | 138 | bne slow_work_pending |
1da177e4 | 139 | no_work_pending: |
3302cadd | 140 | asm_trace_hardirqs_on save = 0 |
651e9499 | 141 | |
f80dff9d DW |
142 | /* perform architecture specific actions before user return */ |
143 | arch_ret_to_user r1, lr | |
b0088480 | 144 | ct_user_enter save = 0 |
f80dff9d | 145 | |
b86040a5 | 146 | restore_user_regs fast = 0, offset = 0 |
9fc2552a | 147 | ENDPROC(ret_to_user_from_irq) |
93ed3970 | 148 | ENDPROC(ret_to_user) |
1da177e4 LT |
149 | |
150 | /* | |
151 | * This is how we return from a fork. | |
152 | */ | |
153 | ENTRY(ret_from_fork) | |
154 | bl schedule_tail | |
9fff2fa0 AV |
155 | cmp r5, #0 |
156 | movne r0, r4 | |
14327c66 | 157 | badrne lr, 1f |
6ebbf2ce | 158 | retne r5 |
68687c84 | 159 | 1: get_thread_info tsk |
1da177e4 | 160 | b ret_slow_syscall |
93ed3970 | 161 | ENDPROC(ret_from_fork) |
1da177e4 | 162 | |
1da177e4 LT |
163 | /*============================================================================= |
164 | * SWI handler | |
165 | *----------------------------------------------------------------------------- | |
166 | */ | |
167 | ||
1da177e4 LT |
168 | .align 5 |
169 | ENTRY(vector_swi) | |
19c4d593 UKK |
170 | #ifdef CONFIG_CPU_V7M |
171 | v7m_exception_entry | |
172 | #else | |
5745eef6 | 173 | sub sp, sp, #PT_REGS_SIZE |
f4dc9a4c | 174 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
b86040a5 CM |
175 | ARM( add r8, sp, #S_PC ) |
176 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr | |
177 | THUMB( mov r8, sp ) | |
178 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr | |
309ee042 | 179 | mrs saved_psr, spsr @ called from non-FIQ mode, so ok. |
fcea4523 | 180 | TRACE( mov saved_pc, lr ) |
309ee042 RK |
181 | str saved_pc, [sp, #S_PC] @ Save calling PC |
182 | str saved_psr, [sp, #S_PSR] @ Save CPSR | |
f4dc9a4c | 183 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
19c4d593 | 184 | #endif |
1da177e4 | 185 | zero_fp |
195b58ad | 186 | alignment_trap r10, ip, __cr_alignment |
dca778c5 RK |
187 | asm_trace_hardirqs_on save=0 |
188 | enable_irq_notrace | |
189 | ct_user_exit save=0 | |
1aa2b3b7 | 190 | |
e0f9f4a6 RK |
191 | /* |
192 | * Get the system call number. | |
193 | */ | |
3f2829a3 | 194 | |
dd35afc2 | 195 | #if defined(CONFIG_OABI_COMPAT) |
3f2829a3 | 196 | |
dd35afc2 NP |
197 | /* |
198 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi | |
199 | * value to determine if it is an EABI or an old ABI call. | |
200 | */ | |
201 | #ifdef CONFIG_ARM_THUMB | |
309ee042 | 202 | tst saved_psr, #PSR_T_BIT |
dd35afc2 | 203 | movne r10, #0 @ no thumb OABI emulation |
309ee042 | 204 | USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction |
dd35afc2 | 205 | #else |
309ee042 | 206 | USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction |
dd35afc2 | 207 | #endif |
457c2403 | 208 | ARM_BE8(rev r10, r10) @ little endian instruction |
dd35afc2 NP |
209 | |
210 | #elif defined(CONFIG_AEABI) | |
211 | ||
212 | /* | |
213 | * Pure EABI user space always put syscall number into scno (r7). | |
214 | */ | |
3f2829a3 | 215 | #elif defined(CONFIG_ARM_THUMB) |
dd35afc2 | 216 | /* Legacy ABI only, possibly thumb mode. */ |
309ee042 | 217 | tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs |
e0f9f4a6 | 218 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
309ee042 | 219 | USER( ldreq scno, [saved_pc, #-4] ) |
dd35afc2 | 220 | |
e0f9f4a6 | 221 | #else |
dd35afc2 | 222 | /* Legacy ABI only. */ |
309ee042 | 223 | USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction |
e0f9f4a6 | 224 | #endif |
1da177e4 | 225 | |
309ee042 RK |
226 | /* saved_psr and saved_pc are now dead */ |
227 | ||
2190fed6 RK |
228 | uaccess_disable tbl |
229 | ||
dd35afc2 | 230 | adr tbl, sys_call_table @ load syscall table pointer |
dd35afc2 NP |
231 | |
232 | #if defined(CONFIG_OABI_COMPAT) | |
233 | /* | |
234 | * If the swi argument is zero, this is an EABI call and we do nothing. | |
235 | * | |
236 | * If this is an old ABI call, get the syscall number into scno and | |
237 | * get the old ABI syscall table address. | |
238 | */ | |
239 | bics r10, r10, #0xff000000 | |
240 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE | |
241 | ldrne tbl, =sys_oabi_call_table | |
242 | #elif !defined(CONFIG_AEABI) | |
1da177e4 | 243 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
e0f9f4a6 | 244 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
3f2829a3 | 245 | #endif |
da594e3f | 246 | get_thread_info tsk |
dca778c5 RK |
247 | /* |
248 | * Reload the registers that may have been corrupted on entry to | |
249 | * the syscall assembly (by tracing or context tracking.) | |
250 | */ | |
251 | TRACE( ldmia sp, {r0 - r3} ) | |
dd35afc2 | 252 | |
81783786 | 253 | local_restart: |
70c70d97 | 254 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
3f2829a3 | 255 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
70c70d97 | 256 | |
29ef73b7 | 257 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
1da177e4 LT |
258 | bne __sys_trace |
259 | ||
afc9f65e | 260 | invoke_syscall tbl, scno, r10, __ret_fast_syscall |
1da177e4 LT |
261 | |
262 | add r1, sp, #S_OFF | |
d95bc250 | 263 | 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
e0f9f4a6 | 264 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
377747c4 | 265 | bcs arm_syscall |
d95bc250 | 266 | mov why, #0 @ no longer a real syscall |
1da177e4 | 267 | b sys_ni_syscall @ not private func |
1aa2b3b7 WD |
268 | |
269 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | |
270 | /* | |
271 | * We failed to handle a fault trying to access the page | |
272 | * containing the swi instruction, but we're not really in a | |
273 | * position to return -EFAULT. Instead, return back to the | |
274 | * instruction and re-enter the user fault handling path trying | |
275 | * to page it in. This will likely result in sending SEGV to the | |
276 | * current task. | |
277 | */ | |
278 | 9001: | |
309ee042 | 279 | sub lr, saved_pc, #4 |
1aa2b3b7 | 280 | str lr, [sp, #S_PC] |
da594e3f | 281 | get_thread_info tsk |
1aa2b3b7 WD |
282 | b ret_fast_syscall |
283 | #endif | |
93ed3970 | 284 | ENDPROC(vector_swi) |
1da177e4 LT |
285 | |
286 | /* | |
287 | * This is the really slow path. We're going to be doing | |
288 | * context switches, and waiting for our parent to respond. | |
289 | */ | |
290 | __sys_trace: | |
ad722541 WD |
291 | mov r1, scno |
292 | add r0, sp, #S_OFF | |
293 | bl syscall_trace_enter | |
10573ae5 RK |
294 | mov scno, r0 |
295 | invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1 | |
ad75b514 KC |
296 | cmp scno, #-1 @ skip the syscall? |
297 | bne 2b | |
298 | add sp, sp, #S_OFF @ restore stack | |
1da177e4 | 299 | |
f18aef74 TB |
300 | __sys_trace_return_nosave: |
301 | enable_irq_notrace | |
ad722541 WD |
302 | mov r0, sp |
303 | bl syscall_trace_exit | |
1da177e4 LT |
304 | b ret_slow_syscall |
305 | ||
f18aef74 TB |
306 | __sys_trace_return: |
307 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
3302cadd RK |
308 | mov r0, sp |
309 | bl syscall_trace_exit | |
310 | b ret_slow_syscall | |
311 | ||
1da177e4 LT |
312 | .align 5 |
313 | #ifdef CONFIG_ALIGNMENT_TRAP | |
314 | .type __cr_alignment, #object | |
315 | __cr_alignment: | |
316 | .word cr_alignment | |
dd35afc2 NP |
317 | #endif |
318 | .ltorg | |
319 | ||
96a8fae0 RK |
320 | .macro syscall_table_start, sym |
321 | .equ __sys_nr, 0 | |
322 | .type \sym, #object | |
323 | ENTRY(\sym) | |
324 | .endm | |
325 | ||
326 | .macro syscall, nr, func | |
327 | .ifgt __sys_nr - \nr | |
328 | .error "Duplicated/unorded system call entry" | |
329 | .endif | |
330 | .rept \nr - __sys_nr | |
331 | .long sys_ni_syscall | |
332 | .endr | |
333 | .long \func | |
334 | .equ __sys_nr, \nr + 1 | |
335 | .endm | |
336 | ||
337 | .macro syscall_table_end, sym | |
338 | .ifgt __sys_nr - __NR_syscalls | |
339 | .error "System call table too big" | |
340 | .endif | |
341 | .rept __NR_syscalls - __sys_nr | |
342 | .long sys_ni_syscall | |
343 | .endr | |
344 | .size \sym, . - \sym | |
345 | .endm | |
346 | ||
347 | #define NATIVE(nr, func) syscall nr, func | |
348 | ||
dd35afc2 NP |
349 | /* |
350 | * This is the syscall table declaration for native ABI syscalls. | |
351 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. | |
352 | */ | |
96a8fae0 RK |
353 | syscall_table_start sys_call_table |
354 | #define COMPAT(nr, native, compat) syscall nr, native | |
dd35afc2 | 355 | #ifdef CONFIG_AEABI |
96a8fae0 | 356 | #include <calls-eabi.S> |
dd35afc2 | 357 | #else |
96a8fae0 | 358 | #include <calls-oabi.S> |
1da177e4 | 359 | #endif |
96a8fae0 RK |
360 | #undef COMPAT |
361 | syscall_table_end sys_call_table | |
1da177e4 LT |
362 | |
363 | /*============================================================================ | |
364 | * Special system call wrappers | |
365 | */ | |
366 | @ r0 = syscall number | |
567bd980 | 367 | @ r8 = syscall table |
1da177e4 | 368 | sys_syscall: |
5247593c | 369 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
1da177e4 LT |
370 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
371 | cmpne scno, #NR_syscalls @ check range | |
10573ae5 RK |
372 | #ifdef CONFIG_CPU_SPECTRE |
373 | movhs scno, #0 | |
374 | csdb | |
375 | #endif | |
e44fc388 | 376 | stmialo sp, {r5, r6} @ shuffle args |
1da177e4 LT |
377 | movlo r0, r1 |
378 | movlo r1, r2 | |
379 | movlo r2, r3 | |
380 | movlo r3, r4 | |
381 | ldrlo pc, [tbl, scno, lsl #2] | |
382 | b sys_ni_syscall | |
93ed3970 | 383 | ENDPROC(sys_syscall) |
1da177e4 | 384 | |
1da177e4 LT |
385 | sys_sigreturn_wrapper: |
386 | add r0, sp, #S_OFF | |
653d48b2 | 387 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 388 | b sys_sigreturn |
93ed3970 | 389 | ENDPROC(sys_sigreturn_wrapper) |
1da177e4 LT |
390 | |
391 | sys_rt_sigreturn_wrapper: | |
392 | add r0, sp, #S_OFF | |
653d48b2 | 393 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 394 | b sys_rt_sigreturn |
93ed3970 | 395 | ENDPROC(sys_rt_sigreturn_wrapper) |
1da177e4 | 396 | |
713c4815 NP |
397 | sys_statfs64_wrapper: |
398 | teq r1, #88 | |
399 | moveq r1, #84 | |
400 | b sys_statfs64 | |
93ed3970 | 401 | ENDPROC(sys_statfs64_wrapper) |
713c4815 NP |
402 | |
403 | sys_fstatfs64_wrapper: | |
404 | teq r1, #88 | |
405 | moveq r1, #84 | |
406 | b sys_fstatfs64 | |
93ed3970 | 407 | ENDPROC(sys_fstatfs64_wrapper) |
713c4815 | 408 | |
1da177e4 LT |
409 | /* |
410 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
411 | * offset, we return EINVAL. | |
412 | */ | |
413 | sys_mmap2: | |
1da177e4 | 414 | str r5, [sp, #4] |
f8b72560 | 415 | b sys_mmap_pgoff |
93ed3970 | 416 | ENDPROC(sys_mmap2) |
687ad019 NP |
417 | |
418 | #ifdef CONFIG_OABI_COMPAT | |
dd35afc2 | 419 | |
687ad019 NP |
420 | /* |
421 | * These are syscalls with argument register differences | |
422 | */ | |
423 | ||
424 | sys_oabi_pread64: | |
425 | stmia sp, {r3, r4} | |
426 | b sys_pread64 | |
93ed3970 | 427 | ENDPROC(sys_oabi_pread64) |
687ad019 NP |
428 | |
429 | sys_oabi_pwrite64: | |
430 | stmia sp, {r3, r4} | |
431 | b sys_pwrite64 | |
93ed3970 | 432 | ENDPROC(sys_oabi_pwrite64) |
687ad019 NP |
433 | |
434 | sys_oabi_truncate64: | |
435 | mov r3, r2 | |
436 | mov r2, r1 | |
437 | b sys_truncate64 | |
93ed3970 | 438 | ENDPROC(sys_oabi_truncate64) |
687ad019 NP |
439 | |
440 | sys_oabi_ftruncate64: | |
441 | mov r3, r2 | |
442 | mov r2, r1 | |
443 | b sys_ftruncate64 | |
93ed3970 | 444 | ENDPROC(sys_oabi_ftruncate64) |
687ad019 NP |
445 | |
446 | sys_oabi_readahead: | |
447 | str r3, [sp] | |
448 | mov r3, r2 | |
449 | mov r2, r1 | |
450 | b sys_readahead | |
93ed3970 | 451 | ENDPROC(sys_oabi_readahead) |
687ad019 | 452 | |
dd35afc2 NP |
453 | /* |
454 | * Let's declare a second syscall table for old ABI binaries | |
455 | * using the compatibility syscall entries. | |
456 | */ | |
96a8fae0 RK |
457 | syscall_table_start sys_oabi_call_table |
458 | #define COMPAT(nr, native, compat) syscall nr, compat | |
459 | #include <calls-oabi.S> | |
460 | syscall_table_end sys_oabi_call_table | |
dd35afc2 | 461 | |
687ad019 NP |
462 | #endif |
463 |