2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/stddef.h>
26 #include <linux/unistd.h>
27 #include <linux/ptrace.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/elf.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/export.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/ftrace.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/personality.h>
41 #include <linux/random.h>
42 #include <linux/hw_breakpoint.h>
43 #include <linux/uaccess.h>
44 #include <linux/elf-randomize.h>
46 #include <asm/pgtable.h>
48 #include <asm/processor.h>
51 #include <asm/machdep.h>
53 #include <asm/runlatch.h>
54 #include <asm/syscalls.h>
55 #include <asm/switch_to.h>
57 #include <asm/debug.h>
59 #include <asm/firmware.h>
61 #include <asm/code-patching.h>
63 #include <asm/livepatch.h>
64 #include <asm/cpu_has_feature.h>
65 #include <asm/asm-prototypes.h>
67 #include <linux/kprobes.h>
68 #include <linux/kdebug.h>
70 /* Transactional Memory debug */
72 #define TM_DEBUG(x...) printk(KERN_INFO x)
74 #define TM_DEBUG(x...) do { } while(0)
77 extern unsigned long _get_SP(void);
79 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81 * Are we running in "Suspend disabled" mode? If so we have to block any
82 * sigreturn that would get us into suspended state, and we also warn in some
83 * other paths that we should never reach with suspend disabled.
85 bool tm_suspend_disabled __ro_after_init
= false;
87 static void check_if_tm_restore_required(struct task_struct
*tsk
)
90 * If we are saving the current thread's registers, and the
91 * thread is in a transactional state, set the TIF_RESTORE_TM
92 * bit so that we know to restore the registers before
93 * returning to userspace.
95 if (tsk
== current
&& tsk
->thread
.regs
&&
96 MSR_TM_ACTIVE(tsk
->thread
.regs
->msr
) &&
97 !test_thread_flag(TIF_RESTORE_TM
)) {
98 tsk
->thread
.ckpt_regs
.msr
= tsk
->thread
.regs
->msr
;
99 set_thread_flag(TIF_RESTORE_TM
);
103 static inline bool msr_tm_active(unsigned long msr
)
105 return MSR_TM_ACTIVE(msr
);
108 static inline bool msr_tm_active(unsigned long msr
) { return false; }
109 static inline void check_if_tm_restore_required(struct task_struct
*tsk
) { }
110 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
112 bool strict_msr_control
;
113 EXPORT_SYMBOL(strict_msr_control
);
115 static int __init
enable_strict_msr_control(char *str
)
117 strict_msr_control
= true;
118 pr_info("Enabling strict facility control\n");
122 early_param("ppc_strict_facility_enable", enable_strict_msr_control
);
124 unsigned long msr_check_and_set(unsigned long bits
)
126 unsigned long oldmsr
= mfmsr();
127 unsigned long newmsr
;
129 newmsr
= oldmsr
| bits
;
132 if (cpu_has_feature(CPU_FTR_VSX
) && (bits
& MSR_FP
))
136 if (oldmsr
!= newmsr
)
142 void __msr_check_and_clear(unsigned long bits
)
144 unsigned long oldmsr
= mfmsr();
145 unsigned long newmsr
;
147 newmsr
= oldmsr
& ~bits
;
150 if (cpu_has_feature(CPU_FTR_VSX
) && (bits
& MSR_FP
))
154 if (oldmsr
!= newmsr
)
157 EXPORT_SYMBOL(__msr_check_and_clear
);
159 #ifdef CONFIG_PPC_FPU
160 void __giveup_fpu(struct task_struct
*tsk
)
165 msr
= tsk
->thread
.regs
->msr
;
168 if (cpu_has_feature(CPU_FTR_VSX
))
171 tsk
->thread
.regs
->msr
= msr
;
174 void giveup_fpu(struct task_struct
*tsk
)
176 check_if_tm_restore_required(tsk
);
178 msr_check_and_set(MSR_FP
);
180 msr_check_and_clear(MSR_FP
);
182 EXPORT_SYMBOL(giveup_fpu
);
185 * Make sure the floating-point register state in the
186 * the thread_struct is up to date for task tsk.
188 void flush_fp_to_thread(struct task_struct
*tsk
)
190 if (tsk
->thread
.regs
) {
192 * We need to disable preemption here because if we didn't,
193 * another process could get scheduled after the regs->msr
194 * test but before we have finished saving the FP registers
195 * to the thread_struct. That process could take over the
196 * FPU, and then when we get scheduled again we would store
197 * bogus values for the remaining FP registers.
200 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
202 * This should only ever be called for current or
203 * for a stopped child process. Since we save away
204 * the FP register state on context switch,
205 * there is something wrong if a stopped child appears
206 * to still have its FP state in the CPU registers.
208 BUG_ON(tsk
!= current
);
214 EXPORT_SYMBOL_GPL(flush_fp_to_thread
);
216 void enable_kernel_fp(void)
218 unsigned long cpumsr
;
220 WARN_ON(preemptible());
222 cpumsr
= msr_check_and_set(MSR_FP
);
224 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
)) {
225 check_if_tm_restore_required(current
);
227 * If a thread has already been reclaimed then the
228 * checkpointed registers are on the CPU but have definitely
229 * been saved by the reclaim code. Don't need to and *cannot*
230 * giveup as this would save to the 'live' structure not the
231 * checkpointed structure.
233 if(!msr_tm_active(cpumsr
) && msr_tm_active(current
->thread
.regs
->msr
))
235 __giveup_fpu(current
);
238 EXPORT_SYMBOL(enable_kernel_fp
);
240 static int restore_fp(struct task_struct
*tsk
)
242 if (tsk
->thread
.load_fp
|| msr_tm_active(tsk
->thread
.regs
->msr
)) {
243 load_fp_state(¤t
->thread
.fp_state
);
244 current
->thread
.load_fp
++;
250 static int restore_fp(struct task_struct
*tsk
) { return 0; }
251 #endif /* CONFIG_PPC_FPU */
253 #ifdef CONFIG_ALTIVEC
254 #define loadvec(thr) ((thr).load_vec)
256 static void __giveup_altivec(struct task_struct
*tsk
)
261 msr
= tsk
->thread
.regs
->msr
;
264 if (cpu_has_feature(CPU_FTR_VSX
))
267 tsk
->thread
.regs
->msr
= msr
;
270 void giveup_altivec(struct task_struct
*tsk
)
272 check_if_tm_restore_required(tsk
);
274 msr_check_and_set(MSR_VEC
);
275 __giveup_altivec(tsk
);
276 msr_check_and_clear(MSR_VEC
);
278 EXPORT_SYMBOL(giveup_altivec
);
280 void enable_kernel_altivec(void)
282 unsigned long cpumsr
;
284 WARN_ON(preemptible());
286 cpumsr
= msr_check_and_set(MSR_VEC
);
288 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
)) {
289 check_if_tm_restore_required(current
);
291 * If a thread has already been reclaimed then the
292 * checkpointed registers are on the CPU but have definitely
293 * been saved by the reclaim code. Don't need to and *cannot*
294 * giveup as this would save to the 'live' structure not the
295 * checkpointed structure.
297 if(!msr_tm_active(cpumsr
) && msr_tm_active(current
->thread
.regs
->msr
))
299 __giveup_altivec(current
);
302 EXPORT_SYMBOL(enable_kernel_altivec
);
305 * Make sure the VMX/Altivec register state in the
306 * the thread_struct is up to date for task tsk.
308 void flush_altivec_to_thread(struct task_struct
*tsk
)
310 if (tsk
->thread
.regs
) {
312 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
313 BUG_ON(tsk
!= current
);
319 EXPORT_SYMBOL_GPL(flush_altivec_to_thread
);
321 static int restore_altivec(struct task_struct
*tsk
)
323 if (cpu_has_feature(CPU_FTR_ALTIVEC
) &&
324 (tsk
->thread
.load_vec
|| msr_tm_active(tsk
->thread
.regs
->msr
))) {
325 load_vr_state(&tsk
->thread
.vr_state
);
326 tsk
->thread
.used_vr
= 1;
327 tsk
->thread
.load_vec
++;
334 #define loadvec(thr) 0
335 static inline int restore_altivec(struct task_struct
*tsk
) { return 0; }
336 #endif /* CONFIG_ALTIVEC */
339 static void __giveup_vsx(struct task_struct
*tsk
)
341 unsigned long msr
= tsk
->thread
.regs
->msr
;
344 * We should never be ssetting MSR_VSX without also setting
347 WARN_ON((msr
& MSR_VSX
) && !((msr
& MSR_FP
) && (msr
& MSR_VEC
)));
349 /* __giveup_fpu will clear MSR_VSX */
353 __giveup_altivec(tsk
);
356 static void giveup_vsx(struct task_struct
*tsk
)
358 check_if_tm_restore_required(tsk
);
360 msr_check_and_set(MSR_FP
|MSR_VEC
|MSR_VSX
);
362 msr_check_and_clear(MSR_FP
|MSR_VEC
|MSR_VSX
);
365 void enable_kernel_vsx(void)
367 unsigned long cpumsr
;
369 WARN_ON(preemptible());
371 cpumsr
= msr_check_and_set(MSR_FP
|MSR_VEC
|MSR_VSX
);
373 if (current
->thread
.regs
&&
374 (current
->thread
.regs
->msr
& (MSR_VSX
|MSR_VEC
|MSR_FP
))) {
375 check_if_tm_restore_required(current
);
377 * If a thread has already been reclaimed then the
378 * checkpointed registers are on the CPU but have definitely
379 * been saved by the reclaim code. Don't need to and *cannot*
380 * giveup as this would save to the 'live' structure not the
381 * checkpointed structure.
383 if(!msr_tm_active(cpumsr
) && msr_tm_active(current
->thread
.regs
->msr
))
385 __giveup_vsx(current
);
388 EXPORT_SYMBOL(enable_kernel_vsx
);
390 void flush_vsx_to_thread(struct task_struct
*tsk
)
392 if (tsk
->thread
.regs
) {
394 if (tsk
->thread
.regs
->msr
& (MSR_VSX
|MSR_VEC
|MSR_FP
)) {
395 BUG_ON(tsk
!= current
);
401 EXPORT_SYMBOL_GPL(flush_vsx_to_thread
);
403 static int restore_vsx(struct task_struct
*tsk
)
405 if (cpu_has_feature(CPU_FTR_VSX
)) {
406 tsk
->thread
.used_vsr
= 1;
413 static inline int restore_vsx(struct task_struct
*tsk
) { return 0; }
414 #endif /* CONFIG_VSX */
417 void giveup_spe(struct task_struct
*tsk
)
419 check_if_tm_restore_required(tsk
);
421 msr_check_and_set(MSR_SPE
);
423 msr_check_and_clear(MSR_SPE
);
425 EXPORT_SYMBOL(giveup_spe
);
427 void enable_kernel_spe(void)
429 WARN_ON(preemptible());
431 msr_check_and_set(MSR_SPE
);
433 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
)) {
434 check_if_tm_restore_required(current
);
435 __giveup_spe(current
);
438 EXPORT_SYMBOL(enable_kernel_spe
);
440 void flush_spe_to_thread(struct task_struct
*tsk
)
442 if (tsk
->thread
.regs
) {
444 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
445 BUG_ON(tsk
!= current
);
446 tsk
->thread
.spefscr
= mfspr(SPRN_SPEFSCR
);
452 #endif /* CONFIG_SPE */
454 static unsigned long msr_all_available
;
456 static int __init
init_msr_all_available(void)
458 #ifdef CONFIG_PPC_FPU
459 msr_all_available
|= MSR_FP
;
461 #ifdef CONFIG_ALTIVEC
462 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
463 msr_all_available
|= MSR_VEC
;
466 if (cpu_has_feature(CPU_FTR_VSX
))
467 msr_all_available
|= MSR_VSX
;
470 if (cpu_has_feature(CPU_FTR_SPE
))
471 msr_all_available
|= MSR_SPE
;
476 early_initcall(init_msr_all_available
);
478 void giveup_all(struct task_struct
*tsk
)
480 unsigned long usermsr
;
482 if (!tsk
->thread
.regs
)
485 usermsr
= tsk
->thread
.regs
->msr
;
487 if ((usermsr
& msr_all_available
) == 0)
490 msr_check_and_set(msr_all_available
);
491 check_if_tm_restore_required(tsk
);
493 WARN_ON((usermsr
& MSR_VSX
) && !((usermsr
& MSR_FP
) && (usermsr
& MSR_VEC
)));
495 #ifdef CONFIG_PPC_FPU
496 if (usermsr
& MSR_FP
)
499 #ifdef CONFIG_ALTIVEC
500 if (usermsr
& MSR_VEC
)
501 __giveup_altivec(tsk
);
504 if (usermsr
& MSR_SPE
)
508 msr_check_and_clear(msr_all_available
);
510 EXPORT_SYMBOL(giveup_all
);
512 void restore_math(struct pt_regs
*regs
)
516 if (!msr_tm_active(regs
->msr
) &&
517 !current
->thread
.load_fp
&& !loadvec(current
->thread
))
521 msr_check_and_set(msr_all_available
);
524 * Only reload if the bit is not set in the user MSR, the bit BEING set
525 * indicates that the registers are hot
527 if ((!(msr
& MSR_FP
)) && restore_fp(current
))
528 msr
|= MSR_FP
| current
->thread
.fpexc_mode
;
530 if ((!(msr
& MSR_VEC
)) && restore_altivec(current
))
533 if ((msr
& (MSR_FP
| MSR_VEC
)) == (MSR_FP
| MSR_VEC
) &&
534 restore_vsx(current
)) {
538 msr_check_and_clear(msr_all_available
);
543 void save_all(struct task_struct
*tsk
)
545 unsigned long usermsr
;
547 if (!tsk
->thread
.regs
)
550 usermsr
= tsk
->thread
.regs
->msr
;
552 if ((usermsr
& msr_all_available
) == 0)
555 msr_check_and_set(msr_all_available
);
557 WARN_ON((usermsr
& MSR_VSX
) && !((usermsr
& MSR_FP
) && (usermsr
& MSR_VEC
)));
559 if (usermsr
& MSR_FP
)
562 if (usermsr
& MSR_VEC
)
565 if (usermsr
& MSR_SPE
)
568 msr_check_and_clear(msr_all_available
);
571 void flush_all_to_thread(struct task_struct
*tsk
)
573 if (tsk
->thread
.regs
) {
575 BUG_ON(tsk
!= current
);
579 if (tsk
->thread
.regs
->msr
& MSR_SPE
)
580 tsk
->thread
.spefscr
= mfspr(SPRN_SPEFSCR
);
586 EXPORT_SYMBOL(flush_all_to_thread
);
588 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
589 void do_send_trap(struct pt_regs
*regs
, unsigned long address
,
590 unsigned long error_code
, int signal_code
, int breakpt
)
594 current
->thread
.trap_nr
= signal_code
;
595 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
596 11, SIGSEGV
) == NOTIFY_STOP
)
599 /* Deliver the signal to userspace */
600 info
.si_signo
= SIGTRAP
;
601 info
.si_errno
= breakpt
; /* breakpoint or watchpoint id */
602 info
.si_code
= signal_code
;
603 info
.si_addr
= (void __user
*)address
;
604 force_sig_info(SIGTRAP
, &info
, current
);
606 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
607 void do_break (struct pt_regs
*regs
, unsigned long address
,
608 unsigned long error_code
)
612 current
->thread
.trap_nr
= TRAP_HWBKPT
;
613 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
614 11, SIGSEGV
) == NOTIFY_STOP
)
617 if (debugger_break_match(regs
))
620 /* Clear the breakpoint */
621 hw_breakpoint_disable();
623 /* Deliver the signal to userspace */
624 info
.si_signo
= SIGTRAP
;
626 info
.si_code
= TRAP_HWBKPT
;
627 info
.si_addr
= (void __user
*)address
;
628 force_sig_info(SIGTRAP
, &info
, current
);
630 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
632 static DEFINE_PER_CPU(struct arch_hw_breakpoint
, current_brk
);
634 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
636 * Set the debug registers back to their default "safe" values.
638 static void set_debug_reg_defaults(struct thread_struct
*thread
)
640 thread
->debug
.iac1
= thread
->debug
.iac2
= 0;
641 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
642 thread
->debug
.iac3
= thread
->debug
.iac4
= 0;
644 thread
->debug
.dac1
= thread
->debug
.dac2
= 0;
645 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
646 thread
->debug
.dvc1
= thread
->debug
.dvc2
= 0;
648 thread
->debug
.dbcr0
= 0;
651 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
653 thread
->debug
.dbcr1
= DBCR1_IAC1US
| DBCR1_IAC2US
|
654 DBCR1_IAC3US
| DBCR1_IAC4US
;
656 * Force Data Address Compare User/Supervisor bits to be User-only
657 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
659 thread
->debug
.dbcr2
= DBCR2_DAC1US
| DBCR2_DAC2US
;
661 thread
->debug
.dbcr1
= 0;
665 static void prime_debug_regs(struct debug_reg
*debug
)
668 * We could have inherited MSR_DE from userspace, since
669 * it doesn't get cleared on exception entry. Make sure
670 * MSR_DE is clear before we enable any debug events.
672 mtmsr(mfmsr() & ~MSR_DE
);
674 mtspr(SPRN_IAC1
, debug
->iac1
);
675 mtspr(SPRN_IAC2
, debug
->iac2
);
676 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
677 mtspr(SPRN_IAC3
, debug
->iac3
);
678 mtspr(SPRN_IAC4
, debug
->iac4
);
680 mtspr(SPRN_DAC1
, debug
->dac1
);
681 mtspr(SPRN_DAC2
, debug
->dac2
);
682 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
683 mtspr(SPRN_DVC1
, debug
->dvc1
);
684 mtspr(SPRN_DVC2
, debug
->dvc2
);
686 mtspr(SPRN_DBCR0
, debug
->dbcr0
);
687 mtspr(SPRN_DBCR1
, debug
->dbcr1
);
689 mtspr(SPRN_DBCR2
, debug
->dbcr2
);
693 * Unless neither the old or new thread are making use of the
694 * debug registers, set the debug registers from the values
695 * stored in the new thread.
697 void switch_booke_debug_regs(struct debug_reg
*new_debug
)
699 if ((current
->thread
.debug
.dbcr0
& DBCR0_IDM
)
700 || (new_debug
->dbcr0
& DBCR0_IDM
))
701 prime_debug_regs(new_debug
);
703 EXPORT_SYMBOL_GPL(switch_booke_debug_regs
);
704 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
705 #ifndef CONFIG_HAVE_HW_BREAKPOINT
706 static void set_debug_reg_defaults(struct thread_struct
*thread
)
708 thread
->hw_brk
.address
= 0;
709 thread
->hw_brk
.type
= 0;
710 set_breakpoint(&thread
->hw_brk
);
712 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
713 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
715 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
716 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
718 mtspr(SPRN_DAC1
, dabr
);
719 #ifdef CONFIG_PPC_47x
724 #elif defined(CONFIG_PPC_BOOK3S)
725 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
727 mtspr(SPRN_DABR
, dabr
);
728 if (cpu_has_feature(CPU_FTR_DABRX
))
729 mtspr(SPRN_DABRX
, dabrx
);
732 #elif defined(CONFIG_PPC_8xx)
733 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
735 unsigned long addr
= dabr
& ~HW_BRK_TYPE_DABR
;
736 unsigned long lctrl1
= 0x90000000; /* compare type: equal on E & F */
737 unsigned long lctrl2
= 0x8e000002; /* watchpoint 1 on cmp E | F */
739 if ((dabr
& HW_BRK_TYPE_RDWR
) == HW_BRK_TYPE_READ
)
741 else if ((dabr
& HW_BRK_TYPE_RDWR
) == HW_BRK_TYPE_WRITE
)
743 else if ((dabr
& HW_BRK_TYPE_RDWR
) == 0)
746 mtspr(SPRN_LCTRL2
, 0);
747 mtspr(SPRN_CMPE
, addr
);
748 mtspr(SPRN_CMPF
, addr
+ 4);
749 mtspr(SPRN_LCTRL1
, lctrl1
);
750 mtspr(SPRN_LCTRL2
, lctrl2
);
755 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
761 static inline int set_dabr(struct arch_hw_breakpoint
*brk
)
763 unsigned long dabr
, dabrx
;
765 dabr
= brk
->address
| (brk
->type
& HW_BRK_TYPE_DABR
);
766 dabrx
= ((brk
->type
>> 3) & 0x7);
769 return ppc_md
.set_dabr(dabr
, dabrx
);
771 return __set_dabr(dabr
, dabrx
);
774 static inline int set_dawr(struct arch_hw_breakpoint
*brk
)
776 unsigned long dawr
, dawrx
, mrd
;
780 dawrx
= (brk
->type
& (HW_BRK_TYPE_READ
| HW_BRK_TYPE_WRITE
)) \
781 << (63 - 58); //* read/write bits */
782 dawrx
|= ((brk
->type
& (HW_BRK_TYPE_TRANSLATE
)) >> 2) \
783 << (63 - 59); //* translate */
784 dawrx
|= (brk
->type
& (HW_BRK_TYPE_PRIV_ALL
)) \
785 >> 3; //* PRIM bits */
786 /* dawr length is stored in field MDR bits 48:53. Matches range in
787 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
789 brk->len is in bytes.
790 This aligns up to double word size, shifts and does the bias.
792 mrd
= ((brk
->len
+ 7) >> 3) - 1;
793 dawrx
|= (mrd
& 0x3f) << (63 - 53);
796 return ppc_md
.set_dawr(dawr
, dawrx
);
797 mtspr(SPRN_DAWR
, dawr
);
798 mtspr(SPRN_DAWRX
, dawrx
);
802 void __set_breakpoint(struct arch_hw_breakpoint
*brk
)
804 memcpy(this_cpu_ptr(¤t_brk
), brk
, sizeof(*brk
));
806 if (cpu_has_feature(CPU_FTR_DAWR
))
812 void set_breakpoint(struct arch_hw_breakpoint
*brk
)
815 __set_breakpoint(brk
);
820 DEFINE_PER_CPU(struct cpu_usage
, cpu_usage_array
);
823 static inline bool hw_brk_match(struct arch_hw_breakpoint
*a
,
824 struct arch_hw_breakpoint
*b
)
826 if (a
->address
!= b
->address
)
828 if (a
->type
!= b
->type
)
830 if (a
->len
!= b
->len
)
835 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
837 static inline bool tm_enabled(struct task_struct
*tsk
)
839 return tsk
&& tsk
->thread
.regs
&& (tsk
->thread
.regs
->msr
& MSR_TM
);
842 static void tm_reclaim_thread(struct thread_struct
*thr
,
843 struct thread_info
*ti
, uint8_t cause
)
846 * Use the current MSR TM suspended bit to track if we have
847 * checkpointed state outstanding.
848 * On signal delivery, we'd normally reclaim the checkpointed
849 * state to obtain stack pointer (see:get_tm_stackpointer()).
850 * This will then directly return to userspace without going
851 * through __switch_to(). However, if the stack frame is bad,
852 * we need to exit this thread which calls __switch_to() which
853 * will again attempt to reclaim the already saved tm state.
854 * Hence we need to check that we've not already reclaimed
856 * We do this using the current MSR, rather tracking it in
857 * some specific thread_struct bit, as it has the additional
858 * benefit of checking for a potential TM bad thing exception.
860 if (!MSR_TM_SUSPENDED(mfmsr()))
864 * If we are in a transaction and FP is off then we can't have
865 * used FP inside that transaction. Hence the checkpointed
866 * state is the same as the live state. We need to copy the
867 * live state to the checkpointed state so that when the
868 * transaction is restored, the checkpointed state is correct
869 * and the aborted transaction sees the correct state. We use
870 * ckpt_regs.msr here as that's what tm_reclaim will use to
871 * determine if it's going to write the checkpointed state or
872 * not. So either this will write the checkpointed registers,
873 * or reclaim will. Similarly for VMX.
875 if ((thr
->ckpt_regs
.msr
& MSR_FP
) == 0)
876 memcpy(&thr
->ckfp_state
, &thr
->fp_state
,
877 sizeof(struct thread_fp_state
));
878 if ((thr
->ckpt_regs
.msr
& MSR_VEC
) == 0)
879 memcpy(&thr
->ckvr_state
, &thr
->vr_state
,
880 sizeof(struct thread_vr_state
));
882 giveup_all(container_of(thr
, struct task_struct
, thread
));
884 tm_reclaim(thr
, thr
->ckpt_regs
.msr
, cause
);
887 void tm_reclaim_current(uint8_t cause
)
890 tm_reclaim_thread(¤t
->thread
, current_thread_info(), cause
);
893 static inline void tm_reclaim_task(struct task_struct
*tsk
)
895 /* We have to work out if we're switching from/to a task that's in the
896 * middle of a transaction.
898 * In switching we need to maintain a 2nd register state as
899 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
900 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
903 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
905 struct thread_struct
*thr
= &tsk
->thread
;
910 if (!MSR_TM_ACTIVE(thr
->regs
->msr
))
911 goto out_and_saveregs
;
913 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
914 "ccr=%lx, msr=%lx, trap=%lx)\n",
915 tsk
->pid
, thr
->regs
->nip
,
916 thr
->regs
->ccr
, thr
->regs
->msr
,
919 tm_reclaim_thread(thr
, task_thread_info(tsk
), TM_CAUSE_RESCHED
);
921 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
925 /* Always save the regs here, even if a transaction's not active.
926 * This context-switches a thread's TM info SPRs. We do it here to
927 * be consistent with the restore path (in recheckpoint) which
928 * cannot happen later in _switch().
933 extern void __tm_recheckpoint(struct thread_struct
*thread
,
934 unsigned long orig_msr
);
936 void tm_recheckpoint(struct thread_struct
*thread
,
937 unsigned long orig_msr
)
941 if (!(thread
->regs
->msr
& MSR_TM
))
944 /* We really can't be interrupted here as the TEXASR registers can't
945 * change and later in the trecheckpoint code, we have a userspace R1.
946 * So let's hard disable over this region.
948 local_irq_save(flags
);
951 /* The TM SPRs are restored here, so that TEXASR.FS can be set
952 * before the trecheckpoint and no explosion occurs.
954 tm_restore_sprs(thread
);
956 __tm_recheckpoint(thread
, orig_msr
);
958 local_irq_restore(flags
);
961 static inline void tm_recheckpoint_new_task(struct task_struct
*new)
965 if (!cpu_has_feature(CPU_FTR_TM
))
968 /* Recheckpoint the registers of the thread we're about to switch to.
970 * If the task was using FP, we non-lazily reload both the original and
971 * the speculative FP register states. This is because the kernel
972 * doesn't see if/when a TM rollback occurs, so if we take an FP
973 * unavailable later, we are unable to determine which set of FP regs
974 * need to be restored.
976 if (!tm_enabled(new))
979 if (!MSR_TM_ACTIVE(new->thread
.regs
->msr
)){
980 tm_restore_sprs(&new->thread
);
983 msr
= new->thread
.ckpt_regs
.msr
;
984 /* Recheckpoint to restore original checkpointed register state. */
985 TM_DEBUG("*** tm_recheckpoint of pid %d "
986 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
987 new->pid
, new->thread
.regs
->msr
, msr
);
989 tm_recheckpoint(&new->thread
, msr
);
992 * The checkpointed state has been restored but the live state has
993 * not, ensure all the math functionality is turned off to trigger
994 * restore_math() to reload.
996 new->thread
.regs
->msr
&= ~(MSR_FP
| MSR_VEC
| MSR_VSX
);
998 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
999 "(kernel msr 0x%lx)\n",
1003 static inline void __switch_to_tm(struct task_struct
*prev
,
1004 struct task_struct
*new)
1006 if (cpu_has_feature(CPU_FTR_TM
)) {
1007 if (tm_enabled(prev
) || tm_enabled(new))
1010 if (tm_enabled(prev
)) {
1011 prev
->thread
.load_tm
++;
1012 tm_reclaim_task(prev
);
1013 if (!MSR_TM_ACTIVE(prev
->thread
.regs
->msr
) && prev
->thread
.load_tm
== 0)
1014 prev
->thread
.regs
->msr
&= ~MSR_TM
;
1017 tm_recheckpoint_new_task(new);
1022 * This is called if we are on the way out to userspace and the
1023 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1024 * FP and/or vector state and does so if necessary.
1025 * If userspace is inside a transaction (whether active or
1026 * suspended) and FP/VMX/VSX instructions have ever been enabled
1027 * inside that transaction, then we have to keep them enabled
1028 * and keep the FP/VMX/VSX state loaded while ever the transaction
1029 * continues. The reason is that if we didn't, and subsequently
1030 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1031 * we don't know whether it's the same transaction, and thus we
1032 * don't know which of the checkpointed state and the transactional
1035 void restore_tm_state(struct pt_regs
*regs
)
1037 unsigned long msr_diff
;
1040 * This is the only moment we should clear TIF_RESTORE_TM as
1041 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1042 * again, anything else could lead to an incorrect ckpt_msr being
1043 * saved and therefore incorrect signal contexts.
1045 clear_thread_flag(TIF_RESTORE_TM
);
1046 if (!MSR_TM_ACTIVE(regs
->msr
))
1049 msr_diff
= current
->thread
.ckpt_regs
.msr
& ~regs
->msr
;
1050 msr_diff
&= MSR_FP
| MSR_VEC
| MSR_VSX
;
1052 /* Ensure that restore_math() will restore */
1053 if (msr_diff
& MSR_FP
)
1054 current
->thread
.load_fp
= 1;
1055 #ifdef CONFIG_ALTIVEC
1056 if (cpu_has_feature(CPU_FTR_ALTIVEC
) && msr_diff
& MSR_VEC
)
1057 current
->thread
.load_vec
= 1;
1061 regs
->msr
|= msr_diff
;
1065 #define tm_recheckpoint_new_task(new)
1066 #define __switch_to_tm(prev, new)
1067 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1069 static inline void save_sprs(struct thread_struct
*t
)
1071 #ifdef CONFIG_ALTIVEC
1072 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1073 t
->vrsave
= mfspr(SPRN_VRSAVE
);
1075 #ifdef CONFIG_PPC_BOOK3S_64
1076 if (cpu_has_feature(CPU_FTR_DSCR
))
1077 t
->dscr
= mfspr(SPRN_DSCR
);
1079 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
1080 t
->bescr
= mfspr(SPRN_BESCR
);
1081 t
->ebbhr
= mfspr(SPRN_EBBHR
);
1082 t
->ebbrr
= mfspr(SPRN_EBBRR
);
1084 t
->fscr
= mfspr(SPRN_FSCR
);
1087 * Note that the TAR is not available for use in the kernel.
1088 * (To provide this, the TAR should be backed up/restored on
1089 * exception entry/exit instead, and be in pt_regs. FIXME,
1090 * this should be in pt_regs anyway (for debug).)
1092 t
->tar
= mfspr(SPRN_TAR
);
1097 static inline void restore_sprs(struct thread_struct
*old_thread
,
1098 struct thread_struct
*new_thread
)
1100 #ifdef CONFIG_ALTIVEC
1101 if (cpu_has_feature(CPU_FTR_ALTIVEC
) &&
1102 old_thread
->vrsave
!= new_thread
->vrsave
)
1103 mtspr(SPRN_VRSAVE
, new_thread
->vrsave
);
1105 #ifdef CONFIG_PPC_BOOK3S_64
1106 if (cpu_has_feature(CPU_FTR_DSCR
)) {
1107 u64 dscr
= get_paca()->dscr_default
;
1108 if (new_thread
->dscr_inherit
)
1109 dscr
= new_thread
->dscr
;
1111 if (old_thread
->dscr
!= dscr
)
1112 mtspr(SPRN_DSCR
, dscr
);
1115 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
1116 if (old_thread
->bescr
!= new_thread
->bescr
)
1117 mtspr(SPRN_BESCR
, new_thread
->bescr
);
1118 if (old_thread
->ebbhr
!= new_thread
->ebbhr
)
1119 mtspr(SPRN_EBBHR
, new_thread
->ebbhr
);
1120 if (old_thread
->ebbrr
!= new_thread
->ebbrr
)
1121 mtspr(SPRN_EBBRR
, new_thread
->ebbrr
);
1123 if (old_thread
->fscr
!= new_thread
->fscr
)
1124 mtspr(SPRN_FSCR
, new_thread
->fscr
);
1126 if (old_thread
->tar
!= new_thread
->tar
)
1127 mtspr(SPRN_TAR
, new_thread
->tar
);
1132 #ifdef CONFIG_PPC_BOOK3S_64
1134 static const u8 dummy_copy_buffer
[CP_SIZE
] __attribute__((aligned(CP_SIZE
)));
1137 struct task_struct
*__switch_to(struct task_struct
*prev
,
1138 struct task_struct
*new)
1140 struct thread_struct
*new_thread
, *old_thread
;
1141 struct task_struct
*last
;
1142 #ifdef CONFIG_PPC_BOOK3S_64
1143 struct ppc64_tlb_batch
*batch
;
1146 new_thread
= &new->thread
;
1147 old_thread
= ¤t
->thread
;
1149 WARN_ON(!irqs_disabled());
1153 * Collect processor utilization data per process
1155 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
1156 struct cpu_usage
*cu
= this_cpu_ptr(&cpu_usage_array
);
1157 long unsigned start_tb
, current_tb
;
1158 start_tb
= old_thread
->start_tb
;
1159 cu
->current_tb
= current_tb
= mfspr(SPRN_PURR
);
1160 old_thread
->accum_tb
+= (current_tb
- start_tb
);
1161 new_thread
->start_tb
= current_tb
;
1163 #endif /* CONFIG_PPC64 */
1165 #ifdef CONFIG_PPC_STD_MMU_64
1166 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1167 if (batch
->active
) {
1168 current_thread_info()->local_flags
|= _TLF_LAZY_MMU
;
1170 __flush_tlb_pending(batch
);
1173 #endif /* CONFIG_PPC_STD_MMU_64 */
1175 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1176 switch_booke_debug_regs(&new->thread
.debug
);
1179 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1182 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1183 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk
), &new->thread
.hw_brk
)))
1184 __set_breakpoint(&new->thread
.hw_brk
);
1185 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1189 * We need to save SPRs before treclaim/trecheckpoint as these will
1190 * change a number of them.
1192 save_sprs(&prev
->thread
);
1194 /* Save FPU, Altivec, VSX and SPE state */
1197 __switch_to_tm(prev
, new);
1199 if (!radix_enabled()) {
1201 * We can't take a PMU exception inside _switch() since there
1202 * is a window where the kernel stack SLB and the kernel stack
1203 * are out of sync. Hard disable here.
1209 * Call restore_sprs() before calling _switch(). If we move it after
1210 * _switch() then we miss out on calling it for new tasks. The reason
1211 * for this is we manually create a stack frame for new tasks that
1212 * directly returns through ret_from_fork() or
1213 * ret_from_kernel_thread(). See copy_thread() for details.
1215 restore_sprs(old_thread
, new_thread
);
1217 last
= _switch(old_thread
, new_thread
);
1219 #ifdef CONFIG_PPC_STD_MMU_64
1220 if (current_thread_info()->local_flags
& _TLF_LAZY_MMU
) {
1221 current_thread_info()->local_flags
&= ~_TLF_LAZY_MMU
;
1222 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1226 if (current_thread_info()->task
->thread
.regs
) {
1227 restore_math(current_thread_info()->task
->thread
.regs
);
1230 * The copy-paste buffer can only store into foreign real
1231 * addresses, so unprivileged processes can not see the
1232 * data or use it in any way unless they have foreign real
1233 * mappings. We don't have a VAS driver that allocates those
1234 * yet, so no cpabort is required.
1236 if (cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
1238 * DD1 allows paste into normal system memory, so we
1239 * do an unpaired copy here to clear the buffer and
1240 * prevent a covert channel being set up.
1242 * cpabort is not used because it is quite expensive.
1244 asm volatile(PPC_COPY(%0, %1)
1245 : : "r"(dummy_copy_buffer
), "r"(0));
1248 #endif /* CONFIG_PPC_STD_MMU_64 */
1253 static int instructions_to_print
= 16;
1255 static void show_instructions(struct pt_regs
*regs
)
1258 unsigned long pc
= regs
->nip
- (instructions_to_print
* 3 / 4 *
1261 printk("Instruction dump:");
1263 for (i
= 0; i
< instructions_to_print
; i
++) {
1269 #if !defined(CONFIG_BOOKE)
1270 /* If executing with the IMMU off, adjust pc rather
1271 * than print XXXXXXXX.
1273 if (!(regs
->msr
& MSR_IR
))
1274 pc
= (unsigned long)phys_to_virt(pc
);
1277 if (!__kernel_text_address(pc
) ||
1278 probe_kernel_address((unsigned int __user
*)pc
, instr
)) {
1279 pr_cont("XXXXXXXX ");
1281 if (regs
->nip
== pc
)
1282 pr_cont("<%08x> ", instr
);
1284 pr_cont("%08x ", instr
);
1298 static struct regbit msr_bits
[] = {
1299 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1321 #ifndef CONFIG_BOOKE
1328 static void print_bits(unsigned long val
, struct regbit
*bits
, const char *sep
)
1332 for (; bits
->bit
; ++bits
)
1333 if (val
& bits
->bit
) {
1334 pr_cont("%s%s", s
, bits
->name
);
1339 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1340 static struct regbit msr_tm_bits
[] = {
1347 static void print_tm_bits(unsigned long val
)
1350 * This only prints something if at least one of the TM bit is set.
1351 * Inside the TM[], the output means:
1352 * E: Enabled (bit 32)
1353 * S: Suspended (bit 33)
1354 * T: Transactional (bit 34)
1356 if (val
& (MSR_TM
| MSR_TS_S
| MSR_TS_T
)) {
1358 print_bits(val
, msr_tm_bits
, "");
1363 static void print_tm_bits(unsigned long val
) {}
1366 static void print_msr_bits(unsigned long val
)
1369 print_bits(val
, msr_bits
, ",");
1375 #define REG "%016lx"
1376 #define REGS_PER_LINE 4
1377 #define LAST_VOLATILE 13
1380 #define REGS_PER_LINE 8
1381 #define LAST_VOLATILE 12
1384 void show_regs(struct pt_regs
* regs
)
1388 show_regs_print_info(KERN_DEFAULT
);
1390 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
1391 regs
->nip
, regs
->link
, regs
->ctr
);
1392 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1393 regs
, regs
->trap
, print_tainted(), init_utsname()->release
);
1394 printk("MSR: "REG
" ", regs
->msr
);
1395 print_msr_bits(regs
->msr
);
1396 pr_cont(" CR: %08lx XER: %08lx\n", regs
->ccr
, regs
->xer
);
1398 if ((regs
->trap
!= 0xc00) && cpu_has_feature(CPU_FTR_CFAR
))
1399 pr_cont("CFAR: "REG
" ", regs
->orig_gpr3
);
1400 if (trap
== 0x200 || trap
== 0x300 || trap
== 0x600)
1401 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1402 pr_cont("DEAR: "REG
" ESR: "REG
" ", regs
->dar
, regs
->dsisr
);
1404 pr_cont("DAR: "REG
" DSISR: %08lx ", regs
->dar
, regs
->dsisr
);
1407 pr_cont("SOFTE: %ld ", regs
->softe
);
1409 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1410 if (MSR_TM_ACTIVE(regs
->msr
))
1411 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch
);
1414 for (i
= 0; i
< 32; i
++) {
1415 if ((i
% REGS_PER_LINE
) == 0)
1416 pr_cont("\nGPR%02d: ", i
);
1417 pr_cont(REG
" ", regs
->gpr
[i
]);
1418 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
1422 #ifdef CONFIG_KALLSYMS
1424 * Lookup NIP late so we have the best change of getting the
1425 * above info out without failing
1427 printk("NIP ["REG
"] %pS\n", regs
->nip
, (void *)regs
->nip
);
1428 printk("LR ["REG
"] %pS\n", regs
->link
, (void *)regs
->link
);
1430 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
1431 if (!user_mode(regs
))
1432 show_instructions(regs
);
1435 void flush_thread(void)
1437 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1438 flush_ptrace_hw_breakpoint(current
);
1439 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1440 set_debug_reg_defaults(¤t
->thread
);
1441 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1445 release_thread(struct task_struct
*t
)
1450 * this gets called so that we can store coprocessor state into memory and
1451 * copy the current task into the new thread.
1453 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
1455 flush_all_to_thread(src
);
1457 * Flush TM state out so we can copy it. __switch_to_tm() does this
1458 * flush but it removes the checkpointed state from the current CPU and
1459 * transitions the CPU out of TM mode. Hence we need to call
1460 * tm_recheckpoint_new_task() (on the same task) to restore the
1461 * checkpointed state back and the TM mode.
1463 * Can't pass dst because it isn't ready. Doesn't matter, passing
1464 * dst is only important for __switch_to()
1466 __switch_to_tm(src
, src
);
1470 clear_task_ebb(dst
);
1475 static void setup_ksp_vsid(struct task_struct
*p
, unsigned long sp
)
1477 #ifdef CONFIG_PPC_STD_MMU_64
1478 unsigned long sp_vsid
;
1479 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
1481 if (radix_enabled())
1484 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1485 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_1T
)
1486 << SLB_VSID_SHIFT_1T
;
1488 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_256M
)
1490 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
1491 p
->thread
.ksp_vsid
= sp_vsid
;
1500 * Copy architecture-specific thread state
1502 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
1503 unsigned long kthread_arg
, struct task_struct
*p
)
1505 struct pt_regs
*childregs
, *kregs
;
1506 extern void ret_from_fork(void);
1507 extern void ret_from_kernel_thread(void);
1509 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
1510 struct thread_info
*ti
= task_thread_info(p
);
1512 klp_init_thread_info(ti
);
1514 /* Copy registers */
1515 sp
-= sizeof(struct pt_regs
);
1516 childregs
= (struct pt_regs
*) sp
;
1517 if (unlikely(p
->flags
& PF_KTHREAD
)) {
1519 memset(childregs
, 0, sizeof(struct pt_regs
));
1520 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
1523 childregs
->gpr
[14] = ppc_function_entry((void *)usp
);
1525 clear_tsk_thread_flag(p
, TIF_32BIT
);
1526 childregs
->softe
= 1;
1528 childregs
->gpr
[15] = kthread_arg
;
1529 p
->thread
.regs
= NULL
; /* no user register state */
1530 ti
->flags
|= _TIF_RESTOREALL
;
1531 f
= ret_from_kernel_thread
;
1534 struct pt_regs
*regs
= current_pt_regs();
1535 CHECK_FULL_REGS(regs
);
1538 childregs
->gpr
[1] = usp
;
1539 p
->thread
.regs
= childregs
;
1540 childregs
->gpr
[3] = 0; /* Result from fork() */
1541 if (clone_flags
& CLONE_SETTLS
) {
1543 if (!is_32bit_task())
1544 childregs
->gpr
[13] = childregs
->gpr
[6];
1547 childregs
->gpr
[2] = childregs
->gpr
[6];
1552 childregs
->msr
&= ~(MSR_FP
|MSR_VEC
|MSR_VSX
);
1553 sp
-= STACK_FRAME_OVERHEAD
;
1556 * The way this works is that at some point in the future
1557 * some task will call _switch to switch to the new task.
1558 * That will pop off the stack frame created below and start
1559 * the new task running at ret_from_fork. The new task will
1560 * do some house keeping and then return from the fork or clone
1561 * system call, using the stack frame created above.
1563 ((unsigned long *)sp
)[0] = 0;
1564 sp
-= sizeof(struct pt_regs
);
1565 kregs
= (struct pt_regs
*) sp
;
1566 sp
-= STACK_FRAME_OVERHEAD
;
1569 p
->thread
.ksp_limit
= (unsigned long)task_stack_page(p
) +
1570 _ALIGN_UP(sizeof(struct thread_info
), 16);
1572 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1573 p
->thread
.ptrace_bps
[0] = NULL
;
1576 p
->thread
.fp_save_area
= NULL
;
1577 #ifdef CONFIG_ALTIVEC
1578 p
->thread
.vr_save_area
= NULL
;
1581 setup_ksp_vsid(p
, sp
);
1584 if (cpu_has_feature(CPU_FTR_DSCR
)) {
1585 p
->thread
.dscr_inherit
= current
->thread
.dscr_inherit
;
1586 p
->thread
.dscr
= mfspr(SPRN_DSCR
);
1588 if (cpu_has_feature(CPU_FTR_HAS_PPR
))
1589 p
->thread
.ppr
= INIT_PPR
;
1591 kregs
->nip
= ppc_function_entry(f
);
1596 * Set up a thread for executing a new program
1598 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
1601 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
1605 * If we exec out of a kernel thread then thread.regs will not be
1608 if (!current
->thread
.regs
) {
1609 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
1610 current
->thread
.regs
= regs
- 1;
1613 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1615 * Clear any transactional state, we're exec()ing. The cause is
1616 * not important as there will never be a recheckpoint so it's not
1619 if (MSR_TM_SUSPENDED(mfmsr()))
1620 tm_reclaim_current(0);
1623 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
1631 * We have just cleared all the nonvolatile GPRs, so make
1632 * FULL_REGS(regs) return true. This is necessary to allow
1633 * ptrace to examine the thread immediately after exec.
1640 regs
->msr
= MSR_USER
;
1642 if (!is_32bit_task()) {
1643 unsigned long entry
;
1645 if (is_elf2_task()) {
1646 /* Look ma, no function descriptors! */
1651 * The latest iteration of the ABI requires that when
1652 * calling a function (at its global entry point),
1653 * the caller must ensure r12 holds the entry point
1654 * address (so that the function can quickly
1655 * establish addressability).
1657 regs
->gpr
[12] = start
;
1658 /* Make sure that's restored on entry to userspace. */
1659 set_thread_flag(TIF_RESTOREALL
);
1663 /* start is a relocated pointer to the function
1664 * descriptor for the elf _start routine. The first
1665 * entry in the function descriptor is the entry
1666 * address of _start and the second entry is the TOC
1667 * value we need to use.
1669 __get_user(entry
, (unsigned long __user
*)start
);
1670 __get_user(toc
, (unsigned long __user
*)start
+1);
1672 /* Check whether the e_entry function descriptor entries
1673 * need to be relocated before we can use them.
1675 if (load_addr
!= 0) {
1682 regs
->msr
= MSR_USER64
;
1686 regs
->msr
= MSR_USER32
;
1690 current
->thread
.used_vsr
= 0;
1692 current
->thread
.load_fp
= 0;
1693 memset(¤t
->thread
.fp_state
, 0, sizeof(current
->thread
.fp_state
));
1694 current
->thread
.fp_save_area
= NULL
;
1695 #ifdef CONFIG_ALTIVEC
1696 memset(¤t
->thread
.vr_state
, 0, sizeof(current
->thread
.vr_state
));
1697 current
->thread
.vr_state
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
1698 current
->thread
.vr_save_area
= NULL
;
1699 current
->thread
.vrsave
= 0;
1700 current
->thread
.used_vr
= 0;
1701 current
->thread
.load_vec
= 0;
1702 #endif /* CONFIG_ALTIVEC */
1704 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
1705 current
->thread
.acc
= 0;
1706 current
->thread
.spefscr
= 0;
1707 current
->thread
.used_spe
= 0;
1708 #endif /* CONFIG_SPE */
1709 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1710 current
->thread
.tm_tfhar
= 0;
1711 current
->thread
.tm_texasr
= 0;
1712 current
->thread
.tm_tfiar
= 0;
1713 current
->thread
.load_tm
= 0;
1714 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1716 EXPORT_SYMBOL(start_thread
);
1718 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1719 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1721 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
1723 struct pt_regs
*regs
= tsk
->thread
.regs
;
1725 /* This is a bit hairy. If we are an SPE enabled processor
1726 * (have embedded fp) we store the IEEE exception enable flags in
1727 * fpexc_mode. fpexc_mode is also used for setting FP exception
1728 * mode (asyn, precise, disabled) for 'Classic' FP. */
1729 if (val
& PR_FP_EXC_SW_ENABLE
) {
1731 if (cpu_has_feature(CPU_FTR_SPE
)) {
1733 * When the sticky exception bits are set
1734 * directly by userspace, it must call prctl
1735 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1736 * in the existing prctl settings) or
1737 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1738 * the bits being set). <fenv.h> functions
1739 * saving and restoring the whole
1740 * floating-point environment need to do so
1741 * anyway to restore the prctl settings from
1742 * the saved environment.
1744 tsk
->thread
.spefscr_last
= mfspr(SPRN_SPEFSCR
);
1745 tsk
->thread
.fpexc_mode
= val
&
1746 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
1756 /* on a CONFIG_SPE this does not hurt us. The bits that
1757 * __pack_fe01 use do not overlap with bits used for
1758 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1759 * on CONFIG_SPE implementations are reserved so writing to
1760 * them does not change anything */
1761 if (val
> PR_FP_EXC_PRECISE
)
1763 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
1764 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
1765 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
1766 | tsk
->thread
.fpexc_mode
;
1770 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
1774 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
1776 if (cpu_has_feature(CPU_FTR_SPE
)) {
1778 * When the sticky exception bits are set
1779 * directly by userspace, it must call prctl
1780 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1781 * in the existing prctl settings) or
1782 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1783 * the bits being set). <fenv.h> functions
1784 * saving and restoring the whole
1785 * floating-point environment need to do so
1786 * anyway to restore the prctl settings from
1787 * the saved environment.
1789 tsk
->thread
.spefscr_last
= mfspr(SPRN_SPEFSCR
);
1790 val
= tsk
->thread
.fpexc_mode
;
1797 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
1798 return put_user(val
, (unsigned int __user
*) adr
);
1801 int set_endian(struct task_struct
*tsk
, unsigned int val
)
1803 struct pt_regs
*regs
= tsk
->thread
.regs
;
1805 if ((val
== PR_ENDIAN_LITTLE
&& !cpu_has_feature(CPU_FTR_REAL_LE
)) ||
1806 (val
== PR_ENDIAN_PPC_LITTLE
&& !cpu_has_feature(CPU_FTR_PPC_LE
)))
1812 if (val
== PR_ENDIAN_BIG
)
1813 regs
->msr
&= ~MSR_LE
;
1814 else if (val
== PR_ENDIAN_LITTLE
|| val
== PR_ENDIAN_PPC_LITTLE
)
1815 regs
->msr
|= MSR_LE
;
1822 int get_endian(struct task_struct
*tsk
, unsigned long adr
)
1824 struct pt_regs
*regs
= tsk
->thread
.regs
;
1827 if (!cpu_has_feature(CPU_FTR_PPC_LE
) &&
1828 !cpu_has_feature(CPU_FTR_REAL_LE
))
1834 if (regs
->msr
& MSR_LE
) {
1835 if (cpu_has_feature(CPU_FTR_REAL_LE
))
1836 val
= PR_ENDIAN_LITTLE
;
1838 val
= PR_ENDIAN_PPC_LITTLE
;
1840 val
= PR_ENDIAN_BIG
;
1842 return put_user(val
, (unsigned int __user
*)adr
);
1845 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
1847 tsk
->thread
.align_ctl
= val
;
1851 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
1853 return put_user(tsk
->thread
.align_ctl
, (unsigned int __user
*)adr
);
1856 static inline int valid_irq_stack(unsigned long sp
, struct task_struct
*p
,
1857 unsigned long nbytes
)
1859 unsigned long stack_page
;
1860 unsigned long cpu
= task_cpu(p
);
1863 * Avoid crashing if the stack has overflowed and corrupted
1864 * task_cpu(p), which is in the thread_info struct.
1866 if (cpu
< NR_CPUS
&& cpu_possible(cpu
)) {
1867 stack_page
= (unsigned long) hardirq_ctx
[cpu
];
1868 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1869 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1872 stack_page
= (unsigned long) softirq_ctx
[cpu
];
1873 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1874 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1880 int validate_sp(unsigned long sp
, struct task_struct
*p
,
1881 unsigned long nbytes
)
1883 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
1885 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1886 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1889 return valid_irq_stack(sp
, p
, nbytes
);
1892 EXPORT_SYMBOL(validate_sp
);
1894 unsigned long get_wchan(struct task_struct
*p
)
1896 unsigned long ip
, sp
;
1899 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
1903 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1907 sp
= *(unsigned long *)sp
;
1908 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
) ||
1909 p
->state
== TASK_RUNNING
)
1912 ip
= ((unsigned long *)sp
)[STACK_FRAME_LR_SAVE
];
1913 if (!in_sched_functions(ip
))
1916 } while (count
++ < 16);
1920 static int kstack_depth_to_print
= CONFIG_PRINT_STACK_DEPTH
;
1922 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
1924 unsigned long sp
, ip
, lr
, newsp
;
1927 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1928 int curr_frame
= current
->curr_ret_stack
;
1929 extern void return_to_handler(void);
1930 unsigned long rth
= (unsigned long)return_to_handler
;
1933 sp
= (unsigned long) stack
;
1938 sp
= current_stack_pointer();
1940 sp
= tsk
->thread
.ksp
;
1944 printk("Call Trace:\n");
1946 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
1949 stack
= (unsigned long *) sp
;
1951 ip
= stack
[STACK_FRAME_LR_SAVE
];
1952 if (!firstframe
|| ip
!= lr
) {
1953 printk("["REG
"] ["REG
"] %pS", sp
, ip
, (void *)ip
);
1954 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1955 if ((ip
== rth
) && curr_frame
>= 0) {
1957 (void *)current
->ret_stack
[curr_frame
].ret
);
1962 pr_cont(" (unreliable)");
1968 * See if this is an exception frame.
1969 * We look for the "regshere" marker in the current frame.
1971 if (validate_sp(sp
, tsk
, STACK_INT_FRAME_SIZE
)
1972 && stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
1973 struct pt_regs
*regs
= (struct pt_regs
*)
1974 (sp
+ STACK_FRAME_OVERHEAD
);
1976 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1977 regs
->trap
, (void *)regs
->nip
, (void *)lr
);
1982 } while (count
++ < kstack_depth_to_print
);
1986 /* Called with hard IRQs off */
1987 void notrace
__ppc64_runlatch_on(void)
1989 struct thread_info
*ti
= current_thread_info();
1991 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
1993 * Least significant bit (RUN) is the only writable bit of
1994 * the CTRL register, so we can avoid mfspr. 2.06 is not the
1995 * earliest ISA where this is the case, but it's convenient.
1997 mtspr(SPRN_CTRLT
, CTRL_RUNLATCH
);
2002 * Some architectures (e.g., Cell) have writable fields other
2003 * than RUN, so do the read-modify-write.
2005 ctrl
= mfspr(SPRN_CTRLF
);
2006 ctrl
|= CTRL_RUNLATCH
;
2007 mtspr(SPRN_CTRLT
, ctrl
);
2010 ti
->local_flags
|= _TLF_RUNLATCH
;
2013 /* Called with hard IRQs off */
2014 void notrace
__ppc64_runlatch_off(void)
2016 struct thread_info
*ti
= current_thread_info();
2018 ti
->local_flags
&= ~_TLF_RUNLATCH
;
2020 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
2021 mtspr(SPRN_CTRLT
, 0);
2025 ctrl
= mfspr(SPRN_CTRLF
);
2026 ctrl
&= ~CTRL_RUNLATCH
;
2027 mtspr(SPRN_CTRLT
, ctrl
);
2030 #endif /* CONFIG_PPC64 */
2032 unsigned long arch_align_stack(unsigned long sp
)
2034 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
2035 sp
-= get_random_int() & ~PAGE_MASK
;
2039 static inline unsigned long brk_rnd(void)
2041 unsigned long rnd
= 0;
2043 /* 8MB for 32bit, 1GB for 64bit */
2044 if (is_32bit_task())
2045 rnd
= (get_random_long() % (1UL<<(23-PAGE_SHIFT
)));
2047 rnd
= (get_random_long() % (1UL<<(30-PAGE_SHIFT
)));
2049 return rnd
<< PAGE_SHIFT
;
2052 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
2054 unsigned long base
= mm
->brk
;
2057 #ifdef CONFIG_PPC_STD_MMU_64
2059 * If we are using 1TB segments and we are allowed to randomise
2060 * the heap, we can put it above 1TB so it is backed by a 1TB
2061 * segment. Otherwise the heap will be in the bottom 1TB
2062 * which always uses 256MB segments and this may result in a
2063 * performance penalty. We don't need to worry about radix. For
2064 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2066 if (!is_32bit_task() && (mmu_highuser_ssize
== MMU_SEGSIZE_1T
))
2067 base
= max_t(unsigned long, mm
->brk
, 1UL << SID_SHIFT_1T
);
2070 ret
= PAGE_ALIGN(base
+ brk_rnd());