]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/arm/kernel/entry-armv.S
Merge branch 'for-4.3/microsoft' into for-linus
[mirror_ubuntu-zesty-kernel.git] / arch / arm / kernel / entry-armv.S
1 /*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
16 */
17
18 #include <linux/init.h>
19
20 #include <asm/assembler.h>
21 #include <asm/memory.h>
22 #include <asm/glue-df.h>
23 #include <asm/glue-pf.h>
24 #include <asm/vfpmacros.h>
25 #ifndef CONFIG_MULTI_IRQ_HANDLER
26 #include <mach/entry-macro.S>
27 #endif
28 #include <asm/thread_notify.h>
29 #include <asm/unwind.h>
30 #include <asm/unistd.h>
31 #include <asm/tls.h>
32 #include <asm/system_info.h>
33
34 #include "entry-header.S"
35 #include <asm/entry-macro-multi.S>
36 #include <asm/probes.h>
37
38 /*
39 * Interrupt handling.
40 */
41 .macro irq_handler
42 #ifdef CONFIG_MULTI_IRQ_HANDLER
43 ldr r1, =handle_arch_irq
44 mov r0, sp
45 badr lr, 9997f
46 ldr pc, [r1]
47 #else
48 arch_irq_handler_default
49 #endif
50 9997:
51 .endm
52
53 .macro pabt_helper
54 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
55 #ifdef MULTI_PABORT
56 ldr ip, .LCprocfns
57 mov lr, pc
58 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
59 #else
60 bl CPU_PABORT_HANDLER
61 #endif
62 .endm
63
64 .macro dabt_helper
65
66 @
67 @ Call the processor-specific abort handler:
68 @
69 @ r2 - pt_regs
70 @ r4 - aborted context pc
71 @ r5 - aborted context psr
72 @
73 @ The abort handler must return the aborted address in r0, and
74 @ the fault status register in r1. r9 must be preserved.
75 @
76 #ifdef MULTI_DABORT
77 ldr ip, .LCprocfns
78 mov lr, pc
79 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
80 #else
81 bl CPU_DABORT_HANDLER
82 #endif
83 .endm
84
85 #ifdef CONFIG_KPROBES
86 .section .kprobes.text,"ax",%progbits
87 #else
88 .text
89 #endif
90
91 /*
92 * Invalid mode handlers
93 */
94 .macro inv_entry, reason
95 sub sp, sp, #S_FRAME_SIZE
96 ARM( stmib sp, {r1 - lr} )
97 THUMB( stmia sp, {r0 - r12} )
98 THUMB( str sp, [sp, #S_SP] )
99 THUMB( str lr, [sp, #S_LR] )
100 mov r1, #\reason
101 .endm
102
103 __pabt_invalid:
104 inv_entry BAD_PREFETCH
105 b common_invalid
106 ENDPROC(__pabt_invalid)
107
108 __dabt_invalid:
109 inv_entry BAD_DATA
110 b common_invalid
111 ENDPROC(__dabt_invalid)
112
113 __irq_invalid:
114 inv_entry BAD_IRQ
115 b common_invalid
116 ENDPROC(__irq_invalid)
117
118 __und_invalid:
119 inv_entry BAD_UNDEFINSTR
120
121 @
122 @ XXX fall through to common_invalid
123 @
124
125 @
126 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
127 @
128 common_invalid:
129 zero_fp
130
131 ldmia r0, {r4 - r6}
132 add r0, sp, #S_PC @ here for interlock avoidance
133 mov r7, #-1 @ "" "" "" ""
134 str r4, [sp] @ save preserved r0
135 stmia r0, {r5 - r7} @ lr_<exception>,
136 @ cpsr_<exception>, "old_r0"
137
138 mov r0, sp
139 b bad_mode
140 ENDPROC(__und_invalid)
141
142 /*
143 * SVC mode handlers
144 */
145
146 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
147 #define SPFIX(code...) code
148 #else
149 #define SPFIX(code...)
150 #endif
151
152 .macro svc_entry, stack_hole=0, trace=1
153 UNWIND(.fnstart )
154 UNWIND(.save {r0 - pc} )
155 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
156 #ifdef CONFIG_THUMB2_KERNEL
157 SPFIX( str r0, [sp] ) @ temporarily saved
158 SPFIX( mov r0, sp )
159 SPFIX( tst r0, #4 ) @ test original stack alignment
160 SPFIX( ldr r0, [sp] ) @ restored
161 #else
162 SPFIX( tst sp, #4 )
163 #endif
164 SPFIX( subeq sp, sp, #4 )
165 stmia sp, {r1 - r12}
166
167 ldmia r0, {r3 - r5}
168 add r7, sp, #S_SP - 4 @ here for interlock avoidance
169 mov r6, #-1 @ "" "" "" ""
170 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
171 SPFIX( addeq r2, r2, #4 )
172 str r3, [sp, #-4]! @ save the "real" r0 copied
173 @ from the exception stack
174
175 mov r3, lr
176
177 @
178 @ We are now ready to fill in the remaining blanks on the stack:
179 @
180 @ r2 - sp_svc
181 @ r3 - lr_svc
182 @ r4 - lr_<exception>, already fixed up for correct return/restart
183 @ r5 - spsr_<exception>
184 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
185 @
186 stmia r7, {r2 - r6}
187
188 .if \trace
189 #ifdef CONFIG_TRACE_IRQFLAGS
190 bl trace_hardirqs_off
191 #endif
192 .endif
193 .endm
194
195 .align 5
196 __dabt_svc:
197 svc_entry
198 mov r2, sp
199 dabt_helper
200 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
201 svc_exit r5 @ return from exception
202 UNWIND(.fnend )
203 ENDPROC(__dabt_svc)
204
205 .align 5
206 __irq_svc:
207 svc_entry
208 irq_handler
209
210 #ifdef CONFIG_PREEMPT
211 get_thread_info tsk
212 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
213 ldr r0, [tsk, #TI_FLAGS] @ get flags
214 teq r8, #0 @ if preempt count != 0
215 movne r0, #0 @ force flags to 0
216 tst r0, #_TIF_NEED_RESCHED
217 blne svc_preempt
218 #endif
219
220 svc_exit r5, irq = 1 @ return from exception
221 UNWIND(.fnend )
222 ENDPROC(__irq_svc)
223
224 .ltorg
225
226 #ifdef CONFIG_PREEMPT
227 svc_preempt:
228 mov r8, lr
229 1: bl preempt_schedule_irq @ irq en/disable is done inside
230 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
231 tst r0, #_TIF_NEED_RESCHED
232 reteq r8 @ go again
233 b 1b
234 #endif
235
236 __und_fault:
237 @ Correct the PC such that it is pointing at the instruction
238 @ which caused the fault. If the faulting instruction was ARM
239 @ the PC will be pointing at the next instruction, and have to
240 @ subtract 4. Otherwise, it is Thumb, and the PC will be
241 @ pointing at the second half of the Thumb instruction. We
242 @ have to subtract 2.
243 ldr r2, [r0, #S_PC]
244 sub r2, r2, r1
245 str r2, [r0, #S_PC]
246 b do_undefinstr
247 ENDPROC(__und_fault)
248
249 .align 5
250 __und_svc:
251 #ifdef CONFIG_KPROBES
252 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
253 @ it obviously needs free stack space which then will belong to
254 @ the saved context.
255 svc_entry MAX_STACK_SIZE
256 #else
257 svc_entry
258 #endif
259 @
260 @ call emulation code, which returns using r9 if it has emulated
261 @ the instruction, or the more conventional lr if we are to treat
262 @ this as a real undefined instruction
263 @
264 @ r0 - instruction
265 @
266 #ifndef CONFIG_THUMB2_KERNEL
267 ldr r0, [r4, #-4]
268 #else
269 mov r1, #2
270 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
271 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
272 blo __und_svc_fault
273 ldrh r9, [r4] @ bottom 16 bits
274 add r4, r4, #2
275 str r4, [sp, #S_PC]
276 orr r0, r9, r0, lsl #16
277 #endif
278 badr r9, __und_svc_finish
279 mov r2, r4
280 bl call_fpe
281
282 mov r1, #4 @ PC correction to apply
283 __und_svc_fault:
284 mov r0, sp @ struct pt_regs *regs
285 bl __und_fault
286
287 __und_svc_finish:
288 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
289 svc_exit r5 @ return from exception
290 UNWIND(.fnend )
291 ENDPROC(__und_svc)
292
293 .align 5
294 __pabt_svc:
295 svc_entry
296 mov r2, sp @ regs
297 pabt_helper
298 svc_exit r5 @ return from exception
299 UNWIND(.fnend )
300 ENDPROC(__pabt_svc)
301
302 .align 5
303 __fiq_svc:
304 svc_entry trace=0
305 mov r0, sp @ struct pt_regs *regs
306 bl handle_fiq_as_nmi
307 svc_exit_via_fiq
308 UNWIND(.fnend )
309 ENDPROC(__fiq_svc)
310
311 .align 5
312 .LCcralign:
313 .word cr_alignment
314 #ifdef MULTI_DABORT
315 .LCprocfns:
316 .word processor
317 #endif
318 .LCfp:
319 .word fp_enter
320
321 /*
322 * Abort mode handlers
323 */
324
325 @
326 @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
327 @ and reuses the same macros. However in abort mode we must also
328 @ save/restore lr_abt and spsr_abt to make nested aborts safe.
329 @
330 .align 5
331 __fiq_abt:
332 svc_entry trace=0
333
334 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
335 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
336 THUMB( msr cpsr_c, r0 )
337 mov r1, lr @ Save lr_abt
338 mrs r2, spsr @ Save spsr_abt, abort is now safe
339 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
340 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
341 THUMB( msr cpsr_c, r0 )
342 stmfd sp!, {r1 - r2}
343
344 add r0, sp, #8 @ struct pt_regs *regs
345 bl handle_fiq_as_nmi
346
347 ldmfd sp!, {r1 - r2}
348 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
349 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
350 THUMB( msr cpsr_c, r0 )
351 mov lr, r1 @ Restore lr_abt, abort is unsafe
352 msr spsr_cxsf, r2 @ Restore spsr_abt
353 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
354 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
355 THUMB( msr cpsr_c, r0 )
356
357 svc_exit_via_fiq
358 UNWIND(.fnend )
359 ENDPROC(__fiq_abt)
360
361 /*
362 * User mode handlers
363 *
364 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
365 */
366
367 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
368 #error "sizeof(struct pt_regs) must be a multiple of 8"
369 #endif
370
371 .macro usr_entry, trace=1
372 UNWIND(.fnstart )
373 UNWIND(.cantunwind ) @ don't unwind the user space
374 sub sp, sp, #S_FRAME_SIZE
375 ARM( stmib sp, {r1 - r12} )
376 THUMB( stmia sp, {r0 - r12} )
377
378 ATRAP( mrc p15, 0, r7, c1, c0, 0)
379 ATRAP( ldr r8, .LCcralign)
380
381 ldmia r0, {r3 - r5}
382 add r0, sp, #S_PC @ here for interlock avoidance
383 mov r6, #-1 @ "" "" "" ""
384
385 str r3, [sp] @ save the "real" r0 copied
386 @ from the exception stack
387
388 ATRAP( ldr r8, [r8, #0])
389
390 @
391 @ We are now ready to fill in the remaining blanks on the stack:
392 @
393 @ r4 - lr_<exception>, already fixed up for correct return/restart
394 @ r5 - spsr_<exception>
395 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
396 @
397 @ Also, separately save sp_usr and lr_usr
398 @
399 stmia r0, {r4 - r6}
400 ARM( stmdb r0, {sp, lr}^ )
401 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
402
403 @ Enable the alignment trap while in kernel mode
404 ATRAP( teq r8, r7)
405 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
406
407 @
408 @ Clear FP to mark the first stack frame
409 @
410 zero_fp
411
412 .if \trace
413 #ifdef CONFIG_TRACE_IRQFLAGS
414 bl trace_hardirqs_off
415 #endif
416 ct_user_exit save = 0
417 .endif
418 .endm
419
420 .macro kuser_cmpxchg_check
421 #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
422 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
423 #ifndef CONFIG_MMU
424 #warning "NPTL on non MMU needs fixing"
425 #else
426 @ Make sure our user space atomic helper is restarted
427 @ if it was interrupted in a critical region. Here we
428 @ perform a quick test inline since it should be false
429 @ 99.9999% of the time. The rest is done out of line.
430 cmp r4, #TASK_SIZE
431 blhs kuser_cmpxchg64_fixup
432 #endif
433 #endif
434 .endm
435
436 .align 5
437 __dabt_usr:
438 usr_entry
439 kuser_cmpxchg_check
440 mov r2, sp
441 dabt_helper
442 b ret_from_exception
443 UNWIND(.fnend )
444 ENDPROC(__dabt_usr)
445
446 .align 5
447 __irq_usr:
448 usr_entry
449 kuser_cmpxchg_check
450 irq_handler
451 get_thread_info tsk
452 mov why, #0
453 b ret_to_user_from_irq
454 UNWIND(.fnend )
455 ENDPROC(__irq_usr)
456
457 .ltorg
458
459 .align 5
460 __und_usr:
461 usr_entry
462
463 mov r2, r4
464 mov r3, r5
465
466 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
467 @ faulting instruction depending on Thumb mode.
468 @ r3 = regs->ARM_cpsr
469 @
470 @ The emulation code returns using r9 if it has emulated the
471 @ instruction, or the more conventional lr if we are to treat
472 @ this as a real undefined instruction
473 @
474 badr r9, ret_from_exception
475
476 @ IRQs must be enabled before attempting to read the instruction from
477 @ user space since that could cause a page/translation fault if the
478 @ page table was modified by another CPU.
479 enable_irq
480
481 tst r3, #PSR_T_BIT @ Thumb mode?
482 bne __und_usr_thumb
483 sub r4, r2, #4 @ ARM instr at LR - 4
484 1: ldrt r0, [r4]
485 ARM_BE8(rev r0, r0) @ little endian instruction
486
487 @ r0 = 32-bit ARM instruction which caused the exception
488 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
489 @ r4 = PC value for the faulting instruction
490 @ lr = 32-bit undefined instruction function
491 badr lr, __und_usr_fault_32
492 b call_fpe
493
494 __und_usr_thumb:
495 @ Thumb instruction
496 sub r4, r2, #2 @ First half of thumb instr at LR - 2
497 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
498 /*
499 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
500 * can never be supported in a single kernel, this code is not applicable at
501 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
502 * made about .arch directives.
503 */
504 #if __LINUX_ARM_ARCH__ < 7
505 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
506 #define NEED_CPU_ARCHITECTURE
507 ldr r5, .LCcpu_architecture
508 ldr r5, [r5]
509 cmp r5, #CPU_ARCH_ARMv7
510 blo __und_usr_fault_16 @ 16bit undefined instruction
511 /*
512 * The following code won't get run unless the running CPU really is v7, so
513 * coding round the lack of ldrht on older arches is pointless. Temporarily
514 * override the assembler target arch with the minimum required instead:
515 */
516 .arch armv6t2
517 #endif
518 2: ldrht r5, [r4]
519 ARM_BE8(rev16 r5, r5) @ little endian instruction
520 cmp r5, #0xe800 @ 32bit instruction if xx != 0
521 blo __und_usr_fault_16 @ 16bit undefined instruction
522 3: ldrht r0, [r2]
523 ARM_BE8(rev16 r0, r0) @ little endian instruction
524 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
525 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
526 orr r0, r0, r5, lsl #16
527 badr lr, __und_usr_fault_32
528 @ r0 = the two 16-bit Thumb instructions which caused the exception
529 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
530 @ r4 = PC value for the first 16-bit Thumb instruction
531 @ lr = 32bit undefined instruction function
532
533 #if __LINUX_ARM_ARCH__ < 7
534 /* If the target arch was overridden, change it back: */
535 #ifdef CONFIG_CPU_32v6K
536 .arch armv6k
537 #else
538 .arch armv6
539 #endif
540 #endif /* __LINUX_ARM_ARCH__ < 7 */
541 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
542 b __und_usr_fault_16
543 #endif
544 UNWIND(.fnend)
545 ENDPROC(__und_usr)
546
547 /*
548 * The out of line fixup for the ldrt instructions above.
549 */
550 .pushsection .text.fixup, "ax"
551 .align 2
552 4: str r4, [sp, #S_PC] @ retry current instruction
553 ret r9
554 .popsection
555 .pushsection __ex_table,"a"
556 .long 1b, 4b
557 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
558 .long 2b, 4b
559 .long 3b, 4b
560 #endif
561 .popsection
562
563 /*
564 * Check whether the instruction is a co-processor instruction.
565 * If yes, we need to call the relevant co-processor handler.
566 *
567 * Note that we don't do a full check here for the co-processor
568 * instructions; all instructions with bit 27 set are well
569 * defined. The only instructions that should fault are the
570 * co-processor instructions. However, we have to watch out
571 * for the ARM6/ARM7 SWI bug.
572 *
573 * NEON is a special case that has to be handled here. Not all
574 * NEON instructions are co-processor instructions, so we have
575 * to make a special case of checking for them. Plus, there's
576 * five groups of them, so we have a table of mask/opcode pairs
577 * to check against, and if any match then we branch off into the
578 * NEON handler code.
579 *
580 * Emulators may wish to make use of the following registers:
581 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
582 * r2 = PC value to resume execution after successful emulation
583 * r9 = normal "successful" return address
584 * r10 = this threads thread_info structure
585 * lr = unrecognised instruction return address
586 * IRQs enabled, FIQs enabled.
587 */
588 @
589 @ Fall-through from Thumb-2 __und_usr
590 @
591 #ifdef CONFIG_NEON
592 get_thread_info r10 @ get current thread
593 adr r6, .LCneon_thumb_opcodes
594 b 2f
595 #endif
596 call_fpe:
597 get_thread_info r10 @ get current thread
598 #ifdef CONFIG_NEON
599 adr r6, .LCneon_arm_opcodes
600 2: ldr r5, [r6], #4 @ mask value
601 ldr r7, [r6], #4 @ opcode bits matching in mask
602 cmp r5, #0 @ end mask?
603 beq 1f
604 and r8, r0, r5
605 cmp r8, r7 @ NEON instruction?
606 bne 2b
607 mov r7, #1
608 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
609 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
610 b do_vfp @ let VFP handler handle this
611 1:
612 #endif
613 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
614 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
615 reteq lr
616 and r8, r0, #0x00000f00 @ mask out CP number
617 THUMB( lsr r8, r8, #8 )
618 mov r7, #1
619 add r6, r10, #TI_USED_CP
620 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
621 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
622 #ifdef CONFIG_IWMMXT
623 @ Test if we need to give access to iWMMXt coprocessors
624 ldr r5, [r10, #TI_FLAGS]
625 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
626 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
627 bcs iwmmxt_task_enable
628 #endif
629 ARM( add pc, pc, r8, lsr #6 )
630 THUMB( lsl r8, r8, #2 )
631 THUMB( add pc, r8 )
632 nop
633
634 ret.w lr @ CP#0
635 W(b) do_fpe @ CP#1 (FPE)
636 W(b) do_fpe @ CP#2 (FPE)
637 ret.w lr @ CP#3
638 #ifdef CONFIG_CRUNCH
639 b crunch_task_enable @ CP#4 (MaverickCrunch)
640 b crunch_task_enable @ CP#5 (MaverickCrunch)
641 b crunch_task_enable @ CP#6 (MaverickCrunch)
642 #else
643 ret.w lr @ CP#4
644 ret.w lr @ CP#5
645 ret.w lr @ CP#6
646 #endif
647 ret.w lr @ CP#7
648 ret.w lr @ CP#8
649 ret.w lr @ CP#9
650 #ifdef CONFIG_VFP
651 W(b) do_vfp @ CP#10 (VFP)
652 W(b) do_vfp @ CP#11 (VFP)
653 #else
654 ret.w lr @ CP#10 (VFP)
655 ret.w lr @ CP#11 (VFP)
656 #endif
657 ret.w lr @ CP#12
658 ret.w lr @ CP#13
659 ret.w lr @ CP#14 (Debug)
660 ret.w lr @ CP#15 (Control)
661
662 #ifdef NEED_CPU_ARCHITECTURE
663 .align 2
664 .LCcpu_architecture:
665 .word __cpu_architecture
666 #endif
667
668 #ifdef CONFIG_NEON
669 .align 6
670
671 .LCneon_arm_opcodes:
672 .word 0xfe000000 @ mask
673 .word 0xf2000000 @ opcode
674
675 .word 0xff100000 @ mask
676 .word 0xf4000000 @ opcode
677
678 .word 0x00000000 @ mask
679 .word 0x00000000 @ opcode
680
681 .LCneon_thumb_opcodes:
682 .word 0xef000000 @ mask
683 .word 0xef000000 @ opcode
684
685 .word 0xff100000 @ mask
686 .word 0xf9000000 @ opcode
687
688 .word 0x00000000 @ mask
689 .word 0x00000000 @ opcode
690 #endif
691
692 do_fpe:
693 ldr r4, .LCfp
694 add r10, r10, #TI_FPSTATE @ r10 = workspace
695 ldr pc, [r4] @ Call FP module USR entry point
696
697 /*
698 * The FP module is called with these registers set:
699 * r0 = instruction
700 * r2 = PC+4
701 * r9 = normal "successful" return address
702 * r10 = FP workspace
703 * lr = unrecognised FP instruction return address
704 */
705
706 .pushsection .data
707 ENTRY(fp_enter)
708 .word no_fp
709 .popsection
710
711 ENTRY(no_fp)
712 ret lr
713 ENDPROC(no_fp)
714
715 __und_usr_fault_32:
716 mov r1, #4
717 b 1f
718 __und_usr_fault_16:
719 mov r1, #2
720 1: mov r0, sp
721 badr lr, ret_from_exception
722 b __und_fault
723 ENDPROC(__und_usr_fault_32)
724 ENDPROC(__und_usr_fault_16)
725
726 .align 5
727 __pabt_usr:
728 usr_entry
729 mov r2, sp @ regs
730 pabt_helper
731 UNWIND(.fnend )
732 /* fall through */
733 /*
734 * This is the return code to user mode for abort handlers
735 */
736 ENTRY(ret_from_exception)
737 UNWIND(.fnstart )
738 UNWIND(.cantunwind )
739 get_thread_info tsk
740 mov why, #0
741 b ret_to_user
742 UNWIND(.fnend )
743 ENDPROC(__pabt_usr)
744 ENDPROC(ret_from_exception)
745
746 .align 5
747 __fiq_usr:
748 usr_entry trace=0
749 kuser_cmpxchg_check
750 mov r0, sp @ struct pt_regs *regs
751 bl handle_fiq_as_nmi
752 get_thread_info tsk
753 restore_user_regs fast = 0, offset = 0
754 UNWIND(.fnend )
755 ENDPROC(__fiq_usr)
756
757 /*
758 * Register switch for ARMv3 and ARMv4 processors
759 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
760 * previous and next are guaranteed not to be the same.
761 */
762 ENTRY(__switch_to)
763 UNWIND(.fnstart )
764 UNWIND(.cantunwind )
765 add ip, r1, #TI_CPU_SAVE
766 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
767 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
768 THUMB( str sp, [ip], #4 )
769 THUMB( str lr, [ip], #4 )
770 ldr r4, [r2, #TI_TP_VALUE]
771 ldr r5, [r2, #TI_TP_VALUE + 4]
772 #ifdef CONFIG_CPU_USE_DOMAINS
773 ldr r6, [r2, #TI_CPU_DOMAIN]
774 #endif
775 switch_tls r1, r4, r5, r3, r7
776 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
777 ldr r7, [r2, #TI_TASK]
778 ldr r8, =__stack_chk_guard
779 ldr r7, [r7, #TSK_STACK_CANARY]
780 #endif
781 #ifdef CONFIG_CPU_USE_DOMAINS
782 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
783 #endif
784 mov r5, r0
785 add r4, r2, #TI_CPU_SAVE
786 ldr r0, =thread_notify_head
787 mov r1, #THREAD_NOTIFY_SWITCH
788 bl atomic_notifier_call_chain
789 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
790 str r7, [r8]
791 #endif
792 THUMB( mov ip, r4 )
793 mov r0, r5
794 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
795 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
796 THUMB( ldr sp, [ip], #4 )
797 THUMB( ldr pc, [ip] )
798 UNWIND(.fnend )
799 ENDPROC(__switch_to)
800
801 __INIT
802
803 /*
804 * User helpers.
805 *
806 * Each segment is 32-byte aligned and will be moved to the top of the high
807 * vector page. New segments (if ever needed) must be added in front of
808 * existing ones. This mechanism should be used only for things that are
809 * really small and justified, and not be abused freely.
810 *
811 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
812 */
813 THUMB( .arm )
814
815 .macro usr_ret, reg
816 #ifdef CONFIG_ARM_THUMB
817 bx \reg
818 #else
819 ret \reg
820 #endif
821 .endm
822
823 .macro kuser_pad, sym, size
824 .if (. - \sym) & 3
825 .rept 4 - (. - \sym) & 3
826 .byte 0
827 .endr
828 .endif
829 .rept (\size - (. - \sym)) / 4
830 .word 0xe7fddef1
831 .endr
832 .endm
833
834 #ifdef CONFIG_KUSER_HELPERS
835 .align 5
836 .globl __kuser_helper_start
837 __kuser_helper_start:
838
839 /*
840 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
841 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
842 */
843
844 __kuser_cmpxchg64: @ 0xffff0f60
845
846 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
847
848 /*
849 * Poor you. No fast solution possible...
850 * The kernel itself must perform the operation.
851 * A special ghost syscall is used for that (see traps.c).
852 */
853 stmfd sp!, {r7, lr}
854 ldr r7, 1f @ it's 20 bits
855 swi __ARM_NR_cmpxchg64
856 ldmfd sp!, {r7, pc}
857 1: .word __ARM_NR_cmpxchg64
858
859 #elif defined(CONFIG_CPU_32v6K)
860
861 stmfd sp!, {r4, r5, r6, r7}
862 ldrd r4, r5, [r0] @ load old val
863 ldrd r6, r7, [r1] @ load new val
864 smp_dmb arm
865 1: ldrexd r0, r1, [r2] @ load current val
866 eors r3, r0, r4 @ compare with oldval (1)
867 eoreqs r3, r1, r5 @ compare with oldval (2)
868 strexdeq r3, r6, r7, [r2] @ store newval if eq
869 teqeq r3, #1 @ success?
870 beq 1b @ if no then retry
871 smp_dmb arm
872 rsbs r0, r3, #0 @ set returned val and C flag
873 ldmfd sp!, {r4, r5, r6, r7}
874 usr_ret lr
875
876 #elif !defined(CONFIG_SMP)
877
878 #ifdef CONFIG_MMU
879
880 /*
881 * The only thing that can break atomicity in this cmpxchg64
882 * implementation is either an IRQ or a data abort exception
883 * causing another process/thread to be scheduled in the middle of
884 * the critical sequence. The same strategy as for cmpxchg is used.
885 */
886 stmfd sp!, {r4, r5, r6, lr}
887 ldmia r0, {r4, r5} @ load old val
888 ldmia r1, {r6, lr} @ load new val
889 1: ldmia r2, {r0, r1} @ load current val
890 eors r3, r0, r4 @ compare with oldval (1)
891 eoreqs r3, r1, r5 @ compare with oldval (2)
892 2: stmeqia r2, {r6, lr} @ store newval if eq
893 rsbs r0, r3, #0 @ set return val and C flag
894 ldmfd sp!, {r4, r5, r6, pc}
895
896 .text
897 kuser_cmpxchg64_fixup:
898 @ Called from kuser_cmpxchg_fixup.
899 @ r4 = address of interrupted insn (must be preserved).
900 @ sp = saved regs. r7 and r8 are clobbered.
901 @ 1b = first critical insn, 2b = last critical insn.
902 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
903 mov r7, #0xffff0fff
904 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
905 subs r8, r4, r7
906 rsbcss r8, r8, #(2b - 1b)
907 strcs r7, [sp, #S_PC]
908 #if __LINUX_ARM_ARCH__ < 6
909 bcc kuser_cmpxchg32_fixup
910 #endif
911 ret lr
912 .previous
913
914 #else
915 #warning "NPTL on non MMU needs fixing"
916 mov r0, #-1
917 adds r0, r0, #0
918 usr_ret lr
919 #endif
920
921 #else
922 #error "incoherent kernel configuration"
923 #endif
924
925 kuser_pad __kuser_cmpxchg64, 64
926
927 __kuser_memory_barrier: @ 0xffff0fa0
928 smp_dmb arm
929 usr_ret lr
930
931 kuser_pad __kuser_memory_barrier, 32
932
933 __kuser_cmpxchg: @ 0xffff0fc0
934
935 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
936
937 /*
938 * Poor you. No fast solution possible...
939 * The kernel itself must perform the operation.
940 * A special ghost syscall is used for that (see traps.c).
941 */
942 stmfd sp!, {r7, lr}
943 ldr r7, 1f @ it's 20 bits
944 swi __ARM_NR_cmpxchg
945 ldmfd sp!, {r7, pc}
946 1: .word __ARM_NR_cmpxchg
947
948 #elif __LINUX_ARM_ARCH__ < 6
949
950 #ifdef CONFIG_MMU
951
952 /*
953 * The only thing that can break atomicity in this cmpxchg
954 * implementation is either an IRQ or a data abort exception
955 * causing another process/thread to be scheduled in the middle
956 * of the critical sequence. To prevent this, code is added to
957 * the IRQ and data abort exception handlers to set the pc back
958 * to the beginning of the critical section if it is found to be
959 * within that critical section (see kuser_cmpxchg_fixup).
960 */
961 1: ldr r3, [r2] @ load current val
962 subs r3, r3, r0 @ compare with oldval
963 2: streq r1, [r2] @ store newval if eq
964 rsbs r0, r3, #0 @ set return val and C flag
965 usr_ret lr
966
967 .text
968 kuser_cmpxchg32_fixup:
969 @ Called from kuser_cmpxchg_check macro.
970 @ r4 = address of interrupted insn (must be preserved).
971 @ sp = saved regs. r7 and r8 are clobbered.
972 @ 1b = first critical insn, 2b = last critical insn.
973 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
974 mov r7, #0xffff0fff
975 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
976 subs r8, r4, r7
977 rsbcss r8, r8, #(2b - 1b)
978 strcs r7, [sp, #S_PC]
979 ret lr
980 .previous
981
982 #else
983 #warning "NPTL on non MMU needs fixing"
984 mov r0, #-1
985 adds r0, r0, #0
986 usr_ret lr
987 #endif
988
989 #else
990
991 smp_dmb arm
992 1: ldrex r3, [r2]
993 subs r3, r3, r0
994 strexeq r3, r1, [r2]
995 teqeq r3, #1
996 beq 1b
997 rsbs r0, r3, #0
998 /* beware -- each __kuser slot must be 8 instructions max */
999 ALT_SMP(b __kuser_memory_barrier)
1000 ALT_UP(usr_ret lr)
1001
1002 #endif
1003
1004 kuser_pad __kuser_cmpxchg, 32
1005
1006 __kuser_get_tls: @ 0xffff0fe0
1007 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1008 usr_ret lr
1009 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
1010 kuser_pad __kuser_get_tls, 16
1011 .rep 3
1012 .word 0 @ 0xffff0ff0 software TLS value, then
1013 .endr @ pad up to __kuser_helper_version
1014
1015 __kuser_helper_version: @ 0xffff0ffc
1016 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1017
1018 .globl __kuser_helper_end
1019 __kuser_helper_end:
1020
1021 #endif
1022
1023 THUMB( .thumb )
1024
1025 /*
1026 * Vector stubs.
1027 *
1028 * This code is copied to 0xffff1000 so we can use branches in the
1029 * vectors, rather than ldr's. Note that this code must not exceed
1030 * a page size.
1031 *
1032 * Common stub entry macro:
1033 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1034 *
1035 * SP points to a minimal amount of processor-private memory, the address
1036 * of which is copied into r0 for the mode specific abort handler.
1037 */
1038 .macro vector_stub, name, mode, correction=0
1039 .align 5
1040
1041 vector_\name:
1042 .if \correction
1043 sub lr, lr, #\correction
1044 .endif
1045
1046 @
1047 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1048 @ (parent CPSR)
1049 @
1050 stmia sp, {r0, lr} @ save r0, lr
1051 mrs lr, spsr
1052 str lr, [sp, #8] @ save spsr
1053
1054 @
1055 @ Prepare for SVC32 mode. IRQs remain disabled.
1056 @
1057 mrs r0, cpsr
1058 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1059 msr spsr_cxsf, r0
1060
1061 @
1062 @ the branch table must immediately follow this code
1063 @
1064 and lr, lr, #0x0f
1065 THUMB( adr r0, 1f )
1066 THUMB( ldr lr, [r0, lr, lsl #2] )
1067 mov r0, sp
1068 ARM( ldr lr, [pc, lr, lsl #2] )
1069 movs pc, lr @ branch to handler in SVC mode
1070 ENDPROC(vector_\name)
1071
1072 .align 2
1073 @ handler addresses follow this label
1074 1:
1075 .endm
1076
1077 .section .stubs, "ax", %progbits
1078 __stubs_start:
1079 @ This must be the first word
1080 .word vector_swi
1081
1082 vector_rst:
1083 ARM( swi SYS_ERROR0 )
1084 THUMB( svc #0 )
1085 THUMB( nop )
1086 b vector_und
1087
1088 /*
1089 * Interrupt dispatcher
1090 */
1091 vector_stub irq, IRQ_MODE, 4
1092
1093 .long __irq_usr @ 0 (USR_26 / USR_32)
1094 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1095 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1096 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1097 .long __irq_invalid @ 4
1098 .long __irq_invalid @ 5
1099 .long __irq_invalid @ 6
1100 .long __irq_invalid @ 7
1101 .long __irq_invalid @ 8
1102 .long __irq_invalid @ 9
1103 .long __irq_invalid @ a
1104 .long __irq_invalid @ b
1105 .long __irq_invalid @ c
1106 .long __irq_invalid @ d
1107 .long __irq_invalid @ e
1108 .long __irq_invalid @ f
1109
1110 /*
1111 * Data abort dispatcher
1112 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1113 */
1114 vector_stub dabt, ABT_MODE, 8
1115
1116 .long __dabt_usr @ 0 (USR_26 / USR_32)
1117 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1118 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1119 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1120 .long __dabt_invalid @ 4
1121 .long __dabt_invalid @ 5
1122 .long __dabt_invalid @ 6
1123 .long __dabt_invalid @ 7
1124 .long __dabt_invalid @ 8
1125 .long __dabt_invalid @ 9
1126 .long __dabt_invalid @ a
1127 .long __dabt_invalid @ b
1128 .long __dabt_invalid @ c
1129 .long __dabt_invalid @ d
1130 .long __dabt_invalid @ e
1131 .long __dabt_invalid @ f
1132
1133 /*
1134 * Prefetch abort dispatcher
1135 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1136 */
1137 vector_stub pabt, ABT_MODE, 4
1138
1139 .long __pabt_usr @ 0 (USR_26 / USR_32)
1140 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1141 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1142 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1143 .long __pabt_invalid @ 4
1144 .long __pabt_invalid @ 5
1145 .long __pabt_invalid @ 6
1146 .long __pabt_invalid @ 7
1147 .long __pabt_invalid @ 8
1148 .long __pabt_invalid @ 9
1149 .long __pabt_invalid @ a
1150 .long __pabt_invalid @ b
1151 .long __pabt_invalid @ c
1152 .long __pabt_invalid @ d
1153 .long __pabt_invalid @ e
1154 .long __pabt_invalid @ f
1155
1156 /*
1157 * Undef instr entry dispatcher
1158 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1159 */
1160 vector_stub und, UND_MODE
1161
1162 .long __und_usr @ 0 (USR_26 / USR_32)
1163 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1164 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1165 .long __und_svc @ 3 (SVC_26 / SVC_32)
1166 .long __und_invalid @ 4
1167 .long __und_invalid @ 5
1168 .long __und_invalid @ 6
1169 .long __und_invalid @ 7
1170 .long __und_invalid @ 8
1171 .long __und_invalid @ 9
1172 .long __und_invalid @ a
1173 .long __und_invalid @ b
1174 .long __und_invalid @ c
1175 .long __und_invalid @ d
1176 .long __und_invalid @ e
1177 .long __und_invalid @ f
1178
1179 .align 5
1180
1181 /*=============================================================================
1182 * Address exception handler
1183 *-----------------------------------------------------------------------------
1184 * These aren't too critical.
1185 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1186 */
1187
1188 vector_addrexcptn:
1189 b vector_addrexcptn
1190
1191 /*=============================================================================
1192 * FIQ "NMI" handler
1193 *-----------------------------------------------------------------------------
1194 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1195 * systems.
1196 */
1197 vector_stub fiq, FIQ_MODE, 4
1198
1199 .long __fiq_usr @ 0 (USR_26 / USR_32)
1200 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1201 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1202 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1203 .long __fiq_svc @ 4
1204 .long __fiq_svc @ 5
1205 .long __fiq_svc @ 6
1206 .long __fiq_abt @ 7
1207 .long __fiq_svc @ 8
1208 .long __fiq_svc @ 9
1209 .long __fiq_svc @ a
1210 .long __fiq_svc @ b
1211 .long __fiq_svc @ c
1212 .long __fiq_svc @ d
1213 .long __fiq_svc @ e
1214 .long __fiq_svc @ f
1215
1216 .globl vector_fiq_offset
1217 .equ vector_fiq_offset, vector_fiq
1218
1219 .section .vectors, "ax", %progbits
1220 __vectors_start:
1221 W(b) vector_rst
1222 W(b) vector_und
1223 W(ldr) pc, __vectors_start + 0x1000
1224 W(b) vector_pabt
1225 W(b) vector_dabt
1226 W(b) vector_addrexcptn
1227 W(b) vector_irq
1228 W(b) vector_fiq
1229
1230 .data
1231
1232 .globl cr_alignment
1233 cr_alignment:
1234 .space 4
1235
1236 #ifdef CONFIG_MULTI_IRQ_HANDLER
1237 .globl handle_arch_irq
1238 handle_arch_irq:
1239 .space 4
1240 #endif