2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/stddef.h>
26 #include <linux/unistd.h>
27 #include <linux/ptrace.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/elf.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/export.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/ftrace.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/personality.h>
41 #include <linux/random.h>
42 #include <linux/hw_breakpoint.h>
43 #include <linux/uaccess.h>
44 #include <linux/elf-randomize.h>
45 #include <linux/pkeys.h>
46 #include <linux/seq_buf.h>
48 #include <asm/pgtable.h>
50 #include <asm/processor.h>
53 #include <asm/machdep.h>
55 #include <asm/runlatch.h>
56 #include <asm/syscalls.h>
57 #include <asm/switch_to.h>
59 #include <asm/debug.h>
61 #include <asm/firmware.h>
62 #include <asm/hw_irq.h>
64 #include <asm/code-patching.h>
66 #include <asm/livepatch.h>
67 #include <asm/cpu_has_feature.h>
68 #include <asm/asm-prototypes.h>
69 #include <asm/stacktrace.h>
70 #include <asm/hw_breakpoint.h>
72 #include <linux/kprobes.h>
73 #include <linux/kdebug.h>
75 /* Transactional Memory debug */
77 #define TM_DEBUG(x...) printk(KERN_INFO x)
79 #define TM_DEBUG(x...) do { } while(0)
82 extern unsigned long _get_SP(void);
84 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
86 * Are we running in "Suspend disabled" mode? If so we have to block any
87 * sigreturn that would get us into suspended state, and we also warn in some
88 * other paths that we should never reach with suspend disabled.
90 bool tm_suspend_disabled __ro_after_init
= false;
92 static void check_if_tm_restore_required(struct task_struct
*tsk
)
95 * If we are saving the current thread's registers, and the
96 * thread is in a transactional state, set the TIF_RESTORE_TM
97 * bit so that we know to restore the registers before
98 * returning to userspace.
100 if (tsk
== current
&& tsk
->thread
.regs
&&
101 MSR_TM_ACTIVE(tsk
->thread
.regs
->msr
) &&
102 !test_thread_flag(TIF_RESTORE_TM
)) {
103 tsk
->thread
.ckpt_regs
.msr
= tsk
->thread
.regs
->msr
;
104 set_thread_flag(TIF_RESTORE_TM
);
108 static bool tm_active_with_fp(struct task_struct
*tsk
)
110 return MSR_TM_ACTIVE(tsk
->thread
.regs
->msr
) &&
111 (tsk
->thread
.ckpt_regs
.msr
& MSR_FP
);
114 static bool tm_active_with_altivec(struct task_struct
*tsk
)
116 return MSR_TM_ACTIVE(tsk
->thread
.regs
->msr
) &&
117 (tsk
->thread
.ckpt_regs
.msr
& MSR_VEC
);
120 static inline void check_if_tm_restore_required(struct task_struct
*tsk
) { }
121 static inline bool tm_active_with_fp(struct task_struct
*tsk
) { return false; }
122 static inline bool tm_active_with_altivec(struct task_struct
*tsk
) { return false; }
123 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
125 bool strict_msr_control
;
126 EXPORT_SYMBOL(strict_msr_control
);
128 static int __init
enable_strict_msr_control(char *str
)
130 strict_msr_control
= true;
131 pr_info("Enabling strict facility control\n");
135 early_param("ppc_strict_facility_enable", enable_strict_msr_control
);
137 /* notrace because it's called by restore_math */
138 unsigned long notrace
msr_check_and_set(unsigned long bits
)
140 unsigned long oldmsr
= mfmsr();
141 unsigned long newmsr
;
143 newmsr
= oldmsr
| bits
;
146 if (cpu_has_feature(CPU_FTR_VSX
) && (bits
& MSR_FP
))
150 if (oldmsr
!= newmsr
)
155 EXPORT_SYMBOL_GPL(msr_check_and_set
);
157 /* notrace because it's called by restore_math */
158 void notrace
__msr_check_and_clear(unsigned long bits
)
160 unsigned long oldmsr
= mfmsr();
161 unsigned long newmsr
;
163 newmsr
= oldmsr
& ~bits
;
166 if (cpu_has_feature(CPU_FTR_VSX
) && (bits
& MSR_FP
))
170 if (oldmsr
!= newmsr
)
173 EXPORT_SYMBOL(__msr_check_and_clear
);
175 #ifdef CONFIG_PPC_FPU
176 static void __giveup_fpu(struct task_struct
*tsk
)
181 msr
= tsk
->thread
.regs
->msr
;
182 msr
&= ~(MSR_FP
|MSR_FE0
|MSR_FE1
);
184 if (cpu_has_feature(CPU_FTR_VSX
))
187 tsk
->thread
.regs
->msr
= msr
;
190 void giveup_fpu(struct task_struct
*tsk
)
192 check_if_tm_restore_required(tsk
);
194 msr_check_and_set(MSR_FP
);
196 msr_check_and_clear(MSR_FP
);
198 EXPORT_SYMBOL(giveup_fpu
);
201 * Make sure the floating-point register state in the
202 * the thread_struct is up to date for task tsk.
204 void flush_fp_to_thread(struct task_struct
*tsk
)
206 if (tsk
->thread
.regs
) {
208 * We need to disable preemption here because if we didn't,
209 * another process could get scheduled after the regs->msr
210 * test but before we have finished saving the FP registers
211 * to the thread_struct. That process could take over the
212 * FPU, and then when we get scheduled again we would store
213 * bogus values for the remaining FP registers.
216 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
218 * This should only ever be called for current or
219 * for a stopped child process. Since we save away
220 * the FP register state on context switch,
221 * there is something wrong if a stopped child appears
222 * to still have its FP state in the CPU registers.
224 BUG_ON(tsk
!= current
);
230 EXPORT_SYMBOL_GPL(flush_fp_to_thread
);
232 void enable_kernel_fp(void)
234 unsigned long cpumsr
;
236 WARN_ON(preemptible());
238 cpumsr
= msr_check_and_set(MSR_FP
);
240 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
)) {
241 check_if_tm_restore_required(current
);
243 * If a thread has already been reclaimed then the
244 * checkpointed registers are on the CPU but have definitely
245 * been saved by the reclaim code. Don't need to and *cannot*
246 * giveup as this would save to the 'live' structure not the
247 * checkpointed structure.
249 if (!MSR_TM_ACTIVE(cpumsr
) &&
250 MSR_TM_ACTIVE(current
->thread
.regs
->msr
))
252 __giveup_fpu(current
);
255 EXPORT_SYMBOL(enable_kernel_fp
);
257 static int restore_fp(struct task_struct
*tsk
)
259 if (tsk
->thread
.load_fp
|| tm_active_with_fp(tsk
)) {
260 load_fp_state(¤t
->thread
.fp_state
);
261 current
->thread
.load_fp
++;
267 static int restore_fp(struct task_struct
*tsk
) { return 0; }
268 #endif /* CONFIG_PPC_FPU */
270 #ifdef CONFIG_ALTIVEC
271 #define loadvec(thr) ((thr).load_vec)
273 static void __giveup_altivec(struct task_struct
*tsk
)
278 msr
= tsk
->thread
.regs
->msr
;
281 if (cpu_has_feature(CPU_FTR_VSX
))
284 tsk
->thread
.regs
->msr
= msr
;
287 void giveup_altivec(struct task_struct
*tsk
)
289 check_if_tm_restore_required(tsk
);
291 msr_check_and_set(MSR_VEC
);
292 __giveup_altivec(tsk
);
293 msr_check_and_clear(MSR_VEC
);
295 EXPORT_SYMBOL(giveup_altivec
);
297 void enable_kernel_altivec(void)
299 unsigned long cpumsr
;
301 WARN_ON(preemptible());
303 cpumsr
= msr_check_and_set(MSR_VEC
);
305 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
)) {
306 check_if_tm_restore_required(current
);
308 * If a thread has already been reclaimed then the
309 * checkpointed registers are on the CPU but have definitely
310 * been saved by the reclaim code. Don't need to and *cannot*
311 * giveup as this would save to the 'live' structure not the
312 * checkpointed structure.
314 if (!MSR_TM_ACTIVE(cpumsr
) &&
315 MSR_TM_ACTIVE(current
->thread
.regs
->msr
))
317 __giveup_altivec(current
);
320 EXPORT_SYMBOL(enable_kernel_altivec
);
323 * Make sure the VMX/Altivec register state in the
324 * the thread_struct is up to date for task tsk.
326 void flush_altivec_to_thread(struct task_struct
*tsk
)
328 if (tsk
->thread
.regs
) {
330 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
331 BUG_ON(tsk
!= current
);
337 EXPORT_SYMBOL_GPL(flush_altivec_to_thread
);
339 static int restore_altivec(struct task_struct
*tsk
)
341 if (cpu_has_feature(CPU_FTR_ALTIVEC
) &&
342 (tsk
->thread
.load_vec
|| tm_active_with_altivec(tsk
))) {
343 load_vr_state(&tsk
->thread
.vr_state
);
344 tsk
->thread
.used_vr
= 1;
345 tsk
->thread
.load_vec
++;
352 #define loadvec(thr) 0
353 static inline int restore_altivec(struct task_struct
*tsk
) { return 0; }
354 #endif /* CONFIG_ALTIVEC */
357 static void __giveup_vsx(struct task_struct
*tsk
)
359 unsigned long msr
= tsk
->thread
.regs
->msr
;
362 * We should never be ssetting MSR_VSX without also setting
365 WARN_ON((msr
& MSR_VSX
) && !((msr
& MSR_FP
) && (msr
& MSR_VEC
)));
367 /* __giveup_fpu will clear MSR_VSX */
371 __giveup_altivec(tsk
);
374 static void giveup_vsx(struct task_struct
*tsk
)
376 check_if_tm_restore_required(tsk
);
378 msr_check_and_set(MSR_FP
|MSR_VEC
|MSR_VSX
);
380 msr_check_and_clear(MSR_FP
|MSR_VEC
|MSR_VSX
);
383 void enable_kernel_vsx(void)
385 unsigned long cpumsr
;
387 WARN_ON(preemptible());
389 cpumsr
= msr_check_and_set(MSR_FP
|MSR_VEC
|MSR_VSX
);
391 if (current
->thread
.regs
&&
392 (current
->thread
.regs
->msr
& (MSR_VSX
|MSR_VEC
|MSR_FP
))) {
393 check_if_tm_restore_required(current
);
395 * If a thread has already been reclaimed then the
396 * checkpointed registers are on the CPU but have definitely
397 * been saved by the reclaim code. Don't need to and *cannot*
398 * giveup as this would save to the 'live' structure not the
399 * checkpointed structure.
401 if (!MSR_TM_ACTIVE(cpumsr
) &&
402 MSR_TM_ACTIVE(current
->thread
.regs
->msr
))
404 __giveup_vsx(current
);
407 EXPORT_SYMBOL(enable_kernel_vsx
);
409 void flush_vsx_to_thread(struct task_struct
*tsk
)
411 if (tsk
->thread
.regs
) {
413 if (tsk
->thread
.regs
->msr
& (MSR_VSX
|MSR_VEC
|MSR_FP
)) {
414 BUG_ON(tsk
!= current
);
420 EXPORT_SYMBOL_GPL(flush_vsx_to_thread
);
422 static int restore_vsx(struct task_struct
*tsk
)
424 if (cpu_has_feature(CPU_FTR_VSX
)) {
425 tsk
->thread
.used_vsr
= 1;
432 static inline int restore_vsx(struct task_struct
*tsk
) { return 0; }
433 #endif /* CONFIG_VSX */
436 void giveup_spe(struct task_struct
*tsk
)
438 check_if_tm_restore_required(tsk
);
440 msr_check_and_set(MSR_SPE
);
442 msr_check_and_clear(MSR_SPE
);
444 EXPORT_SYMBOL(giveup_spe
);
446 void enable_kernel_spe(void)
448 WARN_ON(preemptible());
450 msr_check_and_set(MSR_SPE
);
452 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
)) {
453 check_if_tm_restore_required(current
);
454 __giveup_spe(current
);
457 EXPORT_SYMBOL(enable_kernel_spe
);
459 void flush_spe_to_thread(struct task_struct
*tsk
)
461 if (tsk
->thread
.regs
) {
463 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
464 BUG_ON(tsk
!= current
);
465 tsk
->thread
.spefscr
= mfspr(SPRN_SPEFSCR
);
471 #endif /* CONFIG_SPE */
473 static unsigned long msr_all_available
;
475 static int __init
init_msr_all_available(void)
477 #ifdef CONFIG_PPC_FPU
478 msr_all_available
|= MSR_FP
;
480 #ifdef CONFIG_ALTIVEC
481 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
482 msr_all_available
|= MSR_VEC
;
485 if (cpu_has_feature(CPU_FTR_VSX
))
486 msr_all_available
|= MSR_VSX
;
489 if (cpu_has_feature(CPU_FTR_SPE
))
490 msr_all_available
|= MSR_SPE
;
495 early_initcall(init_msr_all_available
);
497 void giveup_all(struct task_struct
*tsk
)
499 unsigned long usermsr
;
501 if (!tsk
->thread
.regs
)
504 usermsr
= tsk
->thread
.regs
->msr
;
506 if ((usermsr
& msr_all_available
) == 0)
509 msr_check_and_set(msr_all_available
);
510 check_if_tm_restore_required(tsk
);
512 WARN_ON((usermsr
& MSR_VSX
) && !((usermsr
& MSR_FP
) && (usermsr
& MSR_VEC
)));
514 #ifdef CONFIG_PPC_FPU
515 if (usermsr
& MSR_FP
)
518 #ifdef CONFIG_ALTIVEC
519 if (usermsr
& MSR_VEC
)
520 __giveup_altivec(tsk
);
523 if (usermsr
& MSR_SPE
)
527 msr_check_and_clear(msr_all_available
);
529 EXPORT_SYMBOL(giveup_all
);
532 * The exception exit path calls restore_math() with interrupts hard disabled
533 * but the soft irq state not "reconciled". ftrace code that calls
534 * local_irq_save/restore causes warnings.
536 * Rather than complicate the exit path, just don't trace restore_math. This
537 * could be done by having ftrace entry code check for this un-reconciled
538 * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
539 * temporarily fix it up for the duration of the ftrace call.
541 void notrace
restore_math(struct pt_regs
*regs
)
545 if (!MSR_TM_ACTIVE(regs
->msr
) &&
546 !current
->thread
.load_fp
&& !loadvec(current
->thread
))
550 msr_check_and_set(msr_all_available
);
553 * Only reload if the bit is not set in the user MSR, the bit BEING set
554 * indicates that the registers are hot
556 if ((!(msr
& MSR_FP
)) && restore_fp(current
))
557 msr
|= MSR_FP
| current
->thread
.fpexc_mode
;
559 if ((!(msr
& MSR_VEC
)) && restore_altivec(current
))
562 if ((msr
& (MSR_FP
| MSR_VEC
)) == (MSR_FP
| MSR_VEC
) &&
563 restore_vsx(current
)) {
567 msr_check_and_clear(msr_all_available
);
572 static void save_all(struct task_struct
*tsk
)
574 unsigned long usermsr
;
576 if (!tsk
->thread
.regs
)
579 usermsr
= tsk
->thread
.regs
->msr
;
581 if ((usermsr
& msr_all_available
) == 0)
584 msr_check_and_set(msr_all_available
);
586 WARN_ON((usermsr
& MSR_VSX
) && !((usermsr
& MSR_FP
) && (usermsr
& MSR_VEC
)));
588 if (usermsr
& MSR_FP
)
591 if (usermsr
& MSR_VEC
)
594 if (usermsr
& MSR_SPE
)
597 msr_check_and_clear(msr_all_available
);
598 thread_pkey_regs_save(&tsk
->thread
);
601 void flush_all_to_thread(struct task_struct
*tsk
)
603 if (tsk
->thread
.regs
) {
605 BUG_ON(tsk
!= current
);
607 if (tsk
->thread
.regs
->msr
& MSR_SPE
)
608 tsk
->thread
.spefscr
= mfspr(SPRN_SPEFSCR
);
615 EXPORT_SYMBOL(flush_all_to_thread
);
617 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
618 void do_send_trap(struct pt_regs
*regs
, unsigned long address
,
619 unsigned long error_code
, int breakpt
)
621 current
->thread
.trap_nr
= TRAP_HWBKPT
;
622 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
623 11, SIGSEGV
) == NOTIFY_STOP
)
626 /* Deliver the signal to userspace */
627 force_sig_ptrace_errno_trap(breakpt
, /* breakpoint or watchpoint id */
628 (void __user
*)address
);
630 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
631 void do_break (struct pt_regs
*regs
, unsigned long address
,
632 unsigned long error_code
)
634 current
->thread
.trap_nr
= TRAP_HWBKPT
;
635 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
636 11, SIGSEGV
) == NOTIFY_STOP
)
639 if (debugger_break_match(regs
))
642 /* Clear the breakpoint */
643 hw_breakpoint_disable();
645 /* Deliver the signal to userspace */
646 force_sig_fault(SIGTRAP
, TRAP_HWBKPT
, (void __user
*)address
, current
);
648 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
650 static DEFINE_PER_CPU(struct arch_hw_breakpoint
, current_brk
);
652 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
654 * Set the debug registers back to their default "safe" values.
656 static void set_debug_reg_defaults(struct thread_struct
*thread
)
658 thread
->debug
.iac1
= thread
->debug
.iac2
= 0;
659 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
660 thread
->debug
.iac3
= thread
->debug
.iac4
= 0;
662 thread
->debug
.dac1
= thread
->debug
.dac2
= 0;
663 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
664 thread
->debug
.dvc1
= thread
->debug
.dvc2
= 0;
666 thread
->debug
.dbcr0
= 0;
669 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
671 thread
->debug
.dbcr1
= DBCR1_IAC1US
| DBCR1_IAC2US
|
672 DBCR1_IAC3US
| DBCR1_IAC4US
;
674 * Force Data Address Compare User/Supervisor bits to be User-only
675 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
677 thread
->debug
.dbcr2
= DBCR2_DAC1US
| DBCR2_DAC2US
;
679 thread
->debug
.dbcr1
= 0;
683 static void prime_debug_regs(struct debug_reg
*debug
)
686 * We could have inherited MSR_DE from userspace, since
687 * it doesn't get cleared on exception entry. Make sure
688 * MSR_DE is clear before we enable any debug events.
690 mtmsr(mfmsr() & ~MSR_DE
);
692 mtspr(SPRN_IAC1
, debug
->iac1
);
693 mtspr(SPRN_IAC2
, debug
->iac2
);
694 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
695 mtspr(SPRN_IAC3
, debug
->iac3
);
696 mtspr(SPRN_IAC4
, debug
->iac4
);
698 mtspr(SPRN_DAC1
, debug
->dac1
);
699 mtspr(SPRN_DAC2
, debug
->dac2
);
700 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
701 mtspr(SPRN_DVC1
, debug
->dvc1
);
702 mtspr(SPRN_DVC2
, debug
->dvc2
);
704 mtspr(SPRN_DBCR0
, debug
->dbcr0
);
705 mtspr(SPRN_DBCR1
, debug
->dbcr1
);
707 mtspr(SPRN_DBCR2
, debug
->dbcr2
);
711 * Unless neither the old or new thread are making use of the
712 * debug registers, set the debug registers from the values
713 * stored in the new thread.
715 void switch_booke_debug_regs(struct debug_reg
*new_debug
)
717 if ((current
->thread
.debug
.dbcr0
& DBCR0_IDM
)
718 || (new_debug
->dbcr0
& DBCR0_IDM
))
719 prime_debug_regs(new_debug
);
721 EXPORT_SYMBOL_GPL(switch_booke_debug_regs
);
722 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
723 #ifndef CONFIG_HAVE_HW_BREAKPOINT
724 static void set_breakpoint(struct arch_hw_breakpoint
*brk
)
727 __set_breakpoint(brk
);
731 static void set_debug_reg_defaults(struct thread_struct
*thread
)
733 thread
->hw_brk
.address
= 0;
734 thread
->hw_brk
.type
= 0;
735 if (ppc_breakpoint_available())
736 set_breakpoint(&thread
->hw_brk
);
738 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
739 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
741 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
742 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
744 mtspr(SPRN_DAC1
, dabr
);
745 #ifdef CONFIG_PPC_47x
750 #elif defined(CONFIG_PPC_BOOK3S)
751 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
753 mtspr(SPRN_DABR
, dabr
);
754 if (cpu_has_feature(CPU_FTR_DABRX
))
755 mtspr(SPRN_DABRX
, dabrx
);
758 #elif defined(CONFIG_PPC_8xx)
759 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
761 unsigned long addr
= dabr
& ~HW_BRK_TYPE_DABR
;
762 unsigned long lctrl1
= 0x90000000; /* compare type: equal on E & F */
763 unsigned long lctrl2
= 0x8e000002; /* watchpoint 1 on cmp E | F */
765 if ((dabr
& HW_BRK_TYPE_RDWR
) == HW_BRK_TYPE_READ
)
767 else if ((dabr
& HW_BRK_TYPE_RDWR
) == HW_BRK_TYPE_WRITE
)
769 else if ((dabr
& HW_BRK_TYPE_RDWR
) == 0)
772 mtspr(SPRN_LCTRL2
, 0);
773 mtspr(SPRN_CMPE
, addr
);
774 mtspr(SPRN_CMPF
, addr
+ 4);
775 mtspr(SPRN_LCTRL1
, lctrl1
);
776 mtspr(SPRN_LCTRL2
, lctrl2
);
781 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
787 static inline int set_dabr(struct arch_hw_breakpoint
*brk
)
789 unsigned long dabr
, dabrx
;
791 dabr
= brk
->address
| (brk
->type
& HW_BRK_TYPE_DABR
);
792 dabrx
= ((brk
->type
>> 3) & 0x7);
795 return ppc_md
.set_dabr(dabr
, dabrx
);
797 return __set_dabr(dabr
, dabrx
);
800 int set_dawr(struct arch_hw_breakpoint
*brk
)
802 unsigned long dawr
, dawrx
, mrd
;
806 dawrx
= (brk
->type
& (HW_BRK_TYPE_READ
| HW_BRK_TYPE_WRITE
)) \
807 << (63 - 58); //* read/write bits */
808 dawrx
|= ((brk
->type
& (HW_BRK_TYPE_TRANSLATE
)) >> 2) \
809 << (63 - 59); //* translate */
810 dawrx
|= (brk
->type
& (HW_BRK_TYPE_PRIV_ALL
)) \
811 >> 3; //* PRIM bits */
812 /* dawr length is stored in field MDR bits 48:53. Matches range in
813 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
815 brk->len is in bytes.
816 This aligns up to double word size, shifts and does the bias.
818 mrd
= ((brk
->len
+ 7) >> 3) - 1;
819 dawrx
|= (mrd
& 0x3f) << (63 - 53);
822 return ppc_md
.set_dawr(dawr
, dawrx
);
823 mtspr(SPRN_DAWR
, dawr
);
824 mtspr(SPRN_DAWRX
, dawrx
);
828 void __set_breakpoint(struct arch_hw_breakpoint
*brk
)
830 memcpy(this_cpu_ptr(¤t_brk
), brk
, sizeof(*brk
));
835 else if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
839 // Shouldn't happen due to higher level checks
843 /* Check if we have DAWR or DABR hardware */
844 bool ppc_breakpoint_available(void)
847 return true; /* POWER8 DAWR or POWER9 forced DAWR */
848 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
849 return false; /* POWER9 with DAWR disabled */
850 /* DABR: Everything but POWER8 and POWER9 */
853 EXPORT_SYMBOL_GPL(ppc_breakpoint_available
);
855 static inline bool hw_brk_match(struct arch_hw_breakpoint
*a
,
856 struct arch_hw_breakpoint
*b
)
858 if (a
->address
!= b
->address
)
860 if (a
->type
!= b
->type
)
862 if (a
->len
!= b
->len
)
867 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
869 static inline bool tm_enabled(struct task_struct
*tsk
)
871 return tsk
&& tsk
->thread
.regs
&& (tsk
->thread
.regs
->msr
& MSR_TM
);
874 static void tm_reclaim_thread(struct thread_struct
*thr
, uint8_t cause
)
877 * Use the current MSR TM suspended bit to track if we have
878 * checkpointed state outstanding.
879 * On signal delivery, we'd normally reclaim the checkpointed
880 * state to obtain stack pointer (see:get_tm_stackpointer()).
881 * This will then directly return to userspace without going
882 * through __switch_to(). However, if the stack frame is bad,
883 * we need to exit this thread which calls __switch_to() which
884 * will again attempt to reclaim the already saved tm state.
885 * Hence we need to check that we've not already reclaimed
887 * We do this using the current MSR, rather tracking it in
888 * some specific thread_struct bit, as it has the additional
889 * benefit of checking for a potential TM bad thing exception.
891 if (!MSR_TM_SUSPENDED(mfmsr()))
894 giveup_all(container_of(thr
, struct task_struct
, thread
));
896 tm_reclaim(thr
, cause
);
899 * If we are in a transaction and FP is off then we can't have
900 * used FP inside that transaction. Hence the checkpointed
901 * state is the same as the live state. We need to copy the
902 * live state to the checkpointed state so that when the
903 * transaction is restored, the checkpointed state is correct
904 * and the aborted transaction sees the correct state. We use
905 * ckpt_regs.msr here as that's what tm_reclaim will use to
906 * determine if it's going to write the checkpointed state or
907 * not. So either this will write the checkpointed registers,
908 * or reclaim will. Similarly for VMX.
910 if ((thr
->ckpt_regs
.msr
& MSR_FP
) == 0)
911 memcpy(&thr
->ckfp_state
, &thr
->fp_state
,
912 sizeof(struct thread_fp_state
));
913 if ((thr
->ckpt_regs
.msr
& MSR_VEC
) == 0)
914 memcpy(&thr
->ckvr_state
, &thr
->vr_state
,
915 sizeof(struct thread_vr_state
));
918 void tm_reclaim_current(uint8_t cause
)
921 tm_reclaim_thread(¤t
->thread
, cause
);
924 static inline void tm_reclaim_task(struct task_struct
*tsk
)
926 /* We have to work out if we're switching from/to a task that's in the
927 * middle of a transaction.
929 * In switching we need to maintain a 2nd register state as
930 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
931 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
934 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
936 struct thread_struct
*thr
= &tsk
->thread
;
941 if (!MSR_TM_ACTIVE(thr
->regs
->msr
))
942 goto out_and_saveregs
;
944 WARN_ON(tm_suspend_disabled
);
946 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
947 "ccr=%lx, msr=%lx, trap=%lx)\n",
948 tsk
->pid
, thr
->regs
->nip
,
949 thr
->regs
->ccr
, thr
->regs
->msr
,
952 tm_reclaim_thread(thr
, TM_CAUSE_RESCHED
);
954 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
958 /* Always save the regs here, even if a transaction's not active.
959 * This context-switches a thread's TM info SPRs. We do it here to
960 * be consistent with the restore path (in recheckpoint) which
961 * cannot happen later in _switch().
966 extern void __tm_recheckpoint(struct thread_struct
*thread
);
968 void tm_recheckpoint(struct thread_struct
*thread
)
972 if (!(thread
->regs
->msr
& MSR_TM
))
975 /* We really can't be interrupted here as the TEXASR registers can't
976 * change and later in the trecheckpoint code, we have a userspace R1.
977 * So let's hard disable over this region.
979 local_irq_save(flags
);
982 /* The TM SPRs are restored here, so that TEXASR.FS can be set
983 * before the trecheckpoint and no explosion occurs.
985 tm_restore_sprs(thread
);
987 __tm_recheckpoint(thread
);
989 local_irq_restore(flags
);
992 static inline void tm_recheckpoint_new_task(struct task_struct
*new)
994 if (!cpu_has_feature(CPU_FTR_TM
))
997 /* Recheckpoint the registers of the thread we're about to switch to.
999 * If the task was using FP, we non-lazily reload both the original and
1000 * the speculative FP register states. This is because the kernel
1001 * doesn't see if/when a TM rollback occurs, so if we take an FP
1002 * unavailable later, we are unable to determine which set of FP regs
1003 * need to be restored.
1005 if (!tm_enabled(new))
1008 if (!MSR_TM_ACTIVE(new->thread
.regs
->msr
)){
1009 tm_restore_sprs(&new->thread
);
1012 /* Recheckpoint to restore original checkpointed register state. */
1013 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1014 new->pid
, new->thread
.regs
->msr
);
1016 tm_recheckpoint(&new->thread
);
1019 * The checkpointed state has been restored but the live state has
1020 * not, ensure all the math functionality is turned off to trigger
1021 * restore_math() to reload.
1023 new->thread
.regs
->msr
&= ~(MSR_FP
| MSR_VEC
| MSR_VSX
);
1025 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1026 "(kernel msr 0x%lx)\n",
1030 static inline void __switch_to_tm(struct task_struct
*prev
,
1031 struct task_struct
*new)
1033 if (cpu_has_feature(CPU_FTR_TM
)) {
1034 if (tm_enabled(prev
) || tm_enabled(new))
1037 if (tm_enabled(prev
)) {
1038 prev
->thread
.load_tm
++;
1039 tm_reclaim_task(prev
);
1040 if (!MSR_TM_ACTIVE(prev
->thread
.regs
->msr
) && prev
->thread
.load_tm
== 0)
1041 prev
->thread
.regs
->msr
&= ~MSR_TM
;
1044 tm_recheckpoint_new_task(new);
1049 * This is called if we are on the way out to userspace and the
1050 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1051 * FP and/or vector state and does so if necessary.
1052 * If userspace is inside a transaction (whether active or
1053 * suspended) and FP/VMX/VSX instructions have ever been enabled
1054 * inside that transaction, then we have to keep them enabled
1055 * and keep the FP/VMX/VSX state loaded while ever the transaction
1056 * continues. The reason is that if we didn't, and subsequently
1057 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1058 * we don't know whether it's the same transaction, and thus we
1059 * don't know which of the checkpointed state and the transactional
1062 void restore_tm_state(struct pt_regs
*regs
)
1064 unsigned long msr_diff
;
1067 * This is the only moment we should clear TIF_RESTORE_TM as
1068 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1069 * again, anything else could lead to an incorrect ckpt_msr being
1070 * saved and therefore incorrect signal contexts.
1072 clear_thread_flag(TIF_RESTORE_TM
);
1073 if (!MSR_TM_ACTIVE(regs
->msr
))
1076 msr_diff
= current
->thread
.ckpt_regs
.msr
& ~regs
->msr
;
1077 msr_diff
&= MSR_FP
| MSR_VEC
| MSR_VSX
;
1079 /* Ensure that restore_math() will restore */
1080 if (msr_diff
& MSR_FP
)
1081 current
->thread
.load_fp
= 1;
1082 #ifdef CONFIG_ALTIVEC
1083 if (cpu_has_feature(CPU_FTR_ALTIVEC
) && msr_diff
& MSR_VEC
)
1084 current
->thread
.load_vec
= 1;
1088 regs
->msr
|= msr_diff
;
1092 #define tm_recheckpoint_new_task(new)
1093 #define __switch_to_tm(prev, new)
1094 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1096 static inline void save_sprs(struct thread_struct
*t
)
1098 #ifdef CONFIG_ALTIVEC
1099 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1100 t
->vrsave
= mfspr(SPRN_VRSAVE
);
1102 #ifdef CONFIG_PPC_BOOK3S_64
1103 if (cpu_has_feature(CPU_FTR_DSCR
))
1104 t
->dscr
= mfspr(SPRN_DSCR
);
1106 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
1107 t
->bescr
= mfspr(SPRN_BESCR
);
1108 t
->ebbhr
= mfspr(SPRN_EBBHR
);
1109 t
->ebbrr
= mfspr(SPRN_EBBRR
);
1111 t
->fscr
= mfspr(SPRN_FSCR
);
1114 * Note that the TAR is not available for use in the kernel.
1115 * (To provide this, the TAR should be backed up/restored on
1116 * exception entry/exit instead, and be in pt_regs. FIXME,
1117 * this should be in pt_regs anyway (for debug).)
1119 t
->tar
= mfspr(SPRN_TAR
);
1123 thread_pkey_regs_save(t
);
1126 static inline void restore_sprs(struct thread_struct
*old_thread
,
1127 struct thread_struct
*new_thread
)
1129 #ifdef CONFIG_ALTIVEC
1130 if (cpu_has_feature(CPU_FTR_ALTIVEC
) &&
1131 old_thread
->vrsave
!= new_thread
->vrsave
)
1132 mtspr(SPRN_VRSAVE
, new_thread
->vrsave
);
1134 #ifdef CONFIG_PPC_BOOK3S_64
1135 if (cpu_has_feature(CPU_FTR_DSCR
)) {
1136 u64 dscr
= get_paca()->dscr_default
;
1137 if (new_thread
->dscr_inherit
)
1138 dscr
= new_thread
->dscr
;
1140 if (old_thread
->dscr
!= dscr
)
1141 mtspr(SPRN_DSCR
, dscr
);
1144 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
1145 if (old_thread
->bescr
!= new_thread
->bescr
)
1146 mtspr(SPRN_BESCR
, new_thread
->bescr
);
1147 if (old_thread
->ebbhr
!= new_thread
->ebbhr
)
1148 mtspr(SPRN_EBBHR
, new_thread
->ebbhr
);
1149 if (old_thread
->ebbrr
!= new_thread
->ebbrr
)
1150 mtspr(SPRN_EBBRR
, new_thread
->ebbrr
);
1152 if (old_thread
->fscr
!= new_thread
->fscr
)
1153 mtspr(SPRN_FSCR
, new_thread
->fscr
);
1155 if (old_thread
->tar
!= new_thread
->tar
)
1156 mtspr(SPRN_TAR
, new_thread
->tar
);
1159 if (cpu_has_feature(CPU_FTR_P9_TIDR
) &&
1160 old_thread
->tidr
!= new_thread
->tidr
)
1161 mtspr(SPRN_TIDR
, new_thread
->tidr
);
1164 thread_pkey_regs_restore(new_thread
, old_thread
);
1167 struct task_struct
*__switch_to(struct task_struct
*prev
,
1168 struct task_struct
*new)
1170 struct thread_struct
*new_thread
, *old_thread
;
1171 struct task_struct
*last
;
1172 #ifdef CONFIG_PPC_BOOK3S_64
1173 struct ppc64_tlb_batch
*batch
;
1176 new_thread
= &new->thread
;
1177 old_thread
= ¤t
->thread
;
1179 WARN_ON(!irqs_disabled());
1181 #ifdef CONFIG_PPC_BOOK3S_64
1182 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1183 if (batch
->active
) {
1184 current_thread_info()->local_flags
|= _TLF_LAZY_MMU
;
1186 __flush_tlb_pending(batch
);
1189 #endif /* CONFIG_PPC_BOOK3S_64 */
1191 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1192 switch_booke_debug_regs(&new->thread
.debug
);
1195 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1198 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1199 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk
), &new->thread
.hw_brk
)))
1200 __set_breakpoint(&new->thread
.hw_brk
);
1201 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1205 * We need to save SPRs before treclaim/trecheckpoint as these will
1206 * change a number of them.
1208 save_sprs(&prev
->thread
);
1210 /* Save FPU, Altivec, VSX and SPE state */
1213 __switch_to_tm(prev
, new);
1215 if (!radix_enabled()) {
1217 * We can't take a PMU exception inside _switch() since there
1218 * is a window where the kernel stack SLB and the kernel stack
1219 * are out of sync. Hard disable here.
1225 * Call restore_sprs() before calling _switch(). If we move it after
1226 * _switch() then we miss out on calling it for new tasks. The reason
1227 * for this is we manually create a stack frame for new tasks that
1228 * directly returns through ret_from_fork() or
1229 * ret_from_kernel_thread(). See copy_thread() for details.
1231 restore_sprs(old_thread
, new_thread
);
1233 last
= _switch(old_thread
, new_thread
);
1235 #ifdef CONFIG_PPC_BOOK3S_64
1236 if (current_thread_info()->local_flags
& _TLF_LAZY_MMU
) {
1237 current_thread_info()->local_flags
&= ~_TLF_LAZY_MMU
;
1238 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
1242 if (current
->thread
.regs
) {
1243 restore_math(current
->thread
.regs
);
1246 * The copy-paste buffer can only store into foreign real
1247 * addresses, so unprivileged processes can not see the
1248 * data or use it in any way unless they have foreign real
1249 * mappings. If the new process has the foreign real address
1250 * mappings, we must issue a cp_abort to clear any state and
1251 * prevent snooping, corruption or a covert channel.
1253 if (current
->thread
.used_vas
)
1254 asm volatile(PPC_CP_ABORT
);
1256 #endif /* CONFIG_PPC_BOOK3S_64 */
1261 #define NR_INSN_TO_PRINT 16
1263 static void show_instructions(struct pt_regs
*regs
)
1266 unsigned long pc
= regs
->nip
- (NR_INSN_TO_PRINT
* 3 / 4 * sizeof(int));
1268 printk("Instruction dump:");
1270 for (i
= 0; i
< NR_INSN_TO_PRINT
; i
++) {
1276 #if !defined(CONFIG_BOOKE)
1277 /* If executing with the IMMU off, adjust pc rather
1278 * than print XXXXXXXX.
1280 if (!(regs
->msr
& MSR_IR
))
1281 pc
= (unsigned long)phys_to_virt(pc
);
1284 if (!__kernel_text_address(pc
) ||
1285 probe_kernel_address((const void *)pc
, instr
)) {
1286 pr_cont("XXXXXXXX ");
1288 if (regs
->nip
== pc
)
1289 pr_cont("<%08x> ", instr
);
1291 pr_cont("%08x ", instr
);
1300 void show_user_instructions(struct pt_regs
*regs
)
1303 int n
= NR_INSN_TO_PRINT
;
1305 char buf
[96]; /* enough for 8 times 9 + 2 chars */
1307 pc
= regs
->nip
- (NR_INSN_TO_PRINT
* 3 / 4 * sizeof(int));
1310 * Make sure the NIP points at userspace, not kernel text/data or
1313 if (!__access_ok(pc
, NR_INSN_TO_PRINT
* sizeof(int), USER_DS
)) {
1314 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1315 current
->comm
, current
->pid
);
1319 seq_buf_init(&s
, buf
, sizeof(buf
));
1326 for (i
= 0; i
< 8 && n
; i
++, n
--, pc
+= sizeof(int)) {
1329 if (probe_kernel_address((const void *)pc
, instr
)) {
1330 seq_buf_printf(&s
, "XXXXXXXX ");
1333 seq_buf_printf(&s
, regs
->nip
== pc
? "<%08x> " : "%08x ", instr
);
1336 if (!seq_buf_has_overflowed(&s
))
1337 pr_info("%s[%d]: code: %s\n", current
->comm
,
1338 current
->pid
, s
.buffer
);
1347 static struct regbit msr_bits
[] = {
1348 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1370 #ifndef CONFIG_BOOKE
1377 static void print_bits(unsigned long val
, struct regbit
*bits
, const char *sep
)
1381 for (; bits
->bit
; ++bits
)
1382 if (val
& bits
->bit
) {
1383 pr_cont("%s%s", s
, bits
->name
);
1388 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1389 static struct regbit msr_tm_bits
[] = {
1396 static void print_tm_bits(unsigned long val
)
1399 * This only prints something if at least one of the TM bit is set.
1400 * Inside the TM[], the output means:
1401 * E: Enabled (bit 32)
1402 * S: Suspended (bit 33)
1403 * T: Transactional (bit 34)
1405 if (val
& (MSR_TM
| MSR_TS_S
| MSR_TS_T
)) {
1407 print_bits(val
, msr_tm_bits
, "");
1412 static void print_tm_bits(unsigned long val
) {}
1415 static void print_msr_bits(unsigned long val
)
1418 print_bits(val
, msr_bits
, ",");
1424 #define REG "%016lx"
1425 #define REGS_PER_LINE 4
1426 #define LAST_VOLATILE 13
1429 #define REGS_PER_LINE 8
1430 #define LAST_VOLATILE 12
1433 void show_regs(struct pt_regs
* regs
)
1437 show_regs_print_info(KERN_DEFAULT
);
1439 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
1440 regs
->nip
, regs
->link
, regs
->ctr
);
1441 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1442 regs
, regs
->trap
, print_tainted(), init_utsname()->release
);
1443 printk("MSR: "REG
" ", regs
->msr
);
1444 print_msr_bits(regs
->msr
);
1445 pr_cont(" CR: %08lx XER: %08lx\n", regs
->ccr
, regs
->xer
);
1447 if ((TRAP(regs
) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR
))
1448 pr_cont("CFAR: "REG
" ", regs
->orig_gpr3
);
1449 if (trap
== 0x200 || trap
== 0x300 || trap
== 0x600)
1450 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1451 pr_cont("DEAR: "REG
" ESR: "REG
" ", regs
->dar
, regs
->dsisr
);
1453 pr_cont("DAR: "REG
" DSISR: %08lx ", regs
->dar
, regs
->dsisr
);
1456 pr_cont("IRQMASK: %lx ", regs
->softe
);
1458 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459 if (MSR_TM_ACTIVE(regs
->msr
))
1460 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch
);
1463 for (i
= 0; i
< 32; i
++) {
1464 if ((i
% REGS_PER_LINE
) == 0)
1465 pr_cont("\nGPR%02d: ", i
);
1466 pr_cont(REG
" ", regs
->gpr
[i
]);
1467 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
1471 #ifdef CONFIG_KALLSYMS
1473 * Lookup NIP late so we have the best change of getting the
1474 * above info out without failing
1476 printk("NIP ["REG
"] %pS\n", regs
->nip
, (void *)regs
->nip
);
1477 printk("LR ["REG
"] %pS\n", regs
->link
, (void *)regs
->link
);
1479 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
1480 if (!user_mode(regs
))
1481 show_instructions(regs
);
1484 void flush_thread(void)
1486 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1487 flush_ptrace_hw_breakpoint(current
);
1488 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1489 set_debug_reg_defaults(¤t
->thread
);
1490 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1493 #ifdef CONFIG_PPC_BOOK3S_64
1494 void arch_setup_new_exec(void)
1496 if (radix_enabled())
1498 hash__setup_new_exec();
1502 int set_thread_uses_vas(void)
1504 #ifdef CONFIG_PPC_BOOK3S_64
1505 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
1508 current
->thread
.used_vas
= 1;
1511 * Even a process that has no foreign real address mapping can use
1512 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1513 * to clear any pending COPY and prevent a covert channel.
1515 * __switch_to() will issue CP_ABORT on future context switches.
1517 asm volatile(PPC_CP_ABORT
);
1519 #endif /* CONFIG_PPC_BOOK3S_64 */
1525 * Assign a TIDR (thread ID) for task @t and set it in the thread
1526 * structure. For now, we only support setting TIDR for 'current' task.
1528 * Since the TID value is a truncated form of it PID, it is possible
1529 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1530 * that 2 threads share the same TID and are waiting, one of the following
1531 * cases will happen:
1533 * 1. The correct thread is running, the wrong thread is not
1534 * In this situation, the correct thread is woken and proceeds to pass it's
1537 * 2. Neither threads are running
1538 * In this situation, neither thread will be woken. When scheduled, the waiting
1539 * threads will execute either a wait, which will return immediately, followed
1540 * by a condition check, which will pass for the correct thread and fail
1541 * for the wrong thread, or they will execute the condition check immediately.
1543 * 3. The wrong thread is running, the correct thread is not
1544 * The wrong thread will be woken, but will fail it's condition check and
1545 * re-execute wait. The correct thread, when scheduled, will execute either
1546 * it's condition check (which will pass), or wait, which returns immediately
1547 * when called the first time after the thread is scheduled, followed by it's
1548 * condition check (which will pass).
1550 * 4. Both threads are running
1551 * Both threads will be woken. The wrong thread will fail it's condition check
1552 * and execute another wait, while the correct thread will pass it's condition
1555 * @t: the task to set the thread ID for
1557 int set_thread_tidr(struct task_struct
*t
)
1559 if (!cpu_has_feature(CPU_FTR_P9_TIDR
))
1568 t
->thread
.tidr
= (u16
)task_pid_nr(t
);
1569 mtspr(SPRN_TIDR
, t
->thread
.tidr
);
1573 EXPORT_SYMBOL_GPL(set_thread_tidr
);
1575 #endif /* CONFIG_PPC64 */
1578 release_thread(struct task_struct
*t
)
1583 * this gets called so that we can store coprocessor state into memory and
1584 * copy the current task into the new thread.
1586 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
1588 flush_all_to_thread(src
);
1590 * Flush TM state out so we can copy it. __switch_to_tm() does this
1591 * flush but it removes the checkpointed state from the current CPU and
1592 * transitions the CPU out of TM mode. Hence we need to call
1593 * tm_recheckpoint_new_task() (on the same task) to restore the
1594 * checkpointed state back and the TM mode.
1596 * Can't pass dst because it isn't ready. Doesn't matter, passing
1597 * dst is only important for __switch_to()
1599 __switch_to_tm(src
, src
);
1603 clear_task_ebb(dst
);
1608 static void setup_ksp_vsid(struct task_struct
*p
, unsigned long sp
)
1610 #ifdef CONFIG_PPC_BOOK3S_64
1611 unsigned long sp_vsid
;
1612 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
1614 if (radix_enabled())
1617 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1618 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_1T
)
1619 << SLB_VSID_SHIFT_1T
;
1621 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_256M
)
1623 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
1624 p
->thread
.ksp_vsid
= sp_vsid
;
1633 * Copy architecture-specific thread state
1635 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
1636 unsigned long kthread_arg
, struct task_struct
*p
)
1638 struct pt_regs
*childregs
, *kregs
;
1639 extern void ret_from_fork(void);
1640 extern void ret_from_kernel_thread(void);
1642 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
1643 struct thread_info
*ti
= task_thread_info(p
);
1645 klp_init_thread_info(p
);
1647 /* Copy registers */
1648 sp
-= sizeof(struct pt_regs
);
1649 childregs
= (struct pt_regs
*) sp
;
1650 if (unlikely(p
->flags
& PF_KTHREAD
)) {
1652 memset(childregs
, 0, sizeof(struct pt_regs
));
1653 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
1656 childregs
->gpr
[14] = ppc_function_entry((void *)usp
);
1658 clear_tsk_thread_flag(p
, TIF_32BIT
);
1659 childregs
->softe
= IRQS_ENABLED
;
1661 childregs
->gpr
[15] = kthread_arg
;
1662 p
->thread
.regs
= NULL
; /* no user register state */
1663 ti
->flags
|= _TIF_RESTOREALL
;
1664 f
= ret_from_kernel_thread
;
1667 struct pt_regs
*regs
= current_pt_regs();
1668 CHECK_FULL_REGS(regs
);
1671 childregs
->gpr
[1] = usp
;
1672 p
->thread
.regs
= childregs
;
1673 childregs
->gpr
[3] = 0; /* Result from fork() */
1674 if (clone_flags
& CLONE_SETTLS
) {
1676 if (!is_32bit_task())
1677 childregs
->gpr
[13] = childregs
->gpr
[6];
1680 childregs
->gpr
[2] = childregs
->gpr
[6];
1685 childregs
->msr
&= ~(MSR_FP
|MSR_VEC
|MSR_VSX
);
1686 sp
-= STACK_FRAME_OVERHEAD
;
1689 * The way this works is that at some point in the future
1690 * some task will call _switch to switch to the new task.
1691 * That will pop off the stack frame created below and start
1692 * the new task running at ret_from_fork. The new task will
1693 * do some house keeping and then return from the fork or clone
1694 * system call, using the stack frame created above.
1696 ((unsigned long *)sp
)[0] = 0;
1697 sp
-= sizeof(struct pt_regs
);
1698 kregs
= (struct pt_regs
*) sp
;
1699 sp
-= STACK_FRAME_OVERHEAD
;
1702 p
->thread
.ksp_limit
= (unsigned long)end_of_stack(p
);
1704 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1705 p
->thread
.ptrace_bps
[0] = NULL
;
1708 p
->thread
.fp_save_area
= NULL
;
1709 #ifdef CONFIG_ALTIVEC
1710 p
->thread
.vr_save_area
= NULL
;
1713 setup_ksp_vsid(p
, sp
);
1716 if (cpu_has_feature(CPU_FTR_DSCR
)) {
1717 p
->thread
.dscr_inherit
= current
->thread
.dscr_inherit
;
1718 p
->thread
.dscr
= mfspr(SPRN_DSCR
);
1720 if (cpu_has_feature(CPU_FTR_HAS_PPR
))
1721 childregs
->ppr
= DEFAULT_PPR
;
1725 kregs
->nip
= ppc_function_entry(f
);
1729 void preload_new_slb_context(unsigned long start
, unsigned long sp
);
1732 * Set up a thread for executing a new program
1734 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
1737 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
1739 #ifdef CONFIG_PPC_BOOK3S_64
1740 if (!radix_enabled())
1741 preload_new_slb_context(start
, sp
);
1746 * If we exec out of a kernel thread then thread.regs will not be
1749 if (!current
->thread
.regs
) {
1750 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
1751 current
->thread
.regs
= regs
- 1;
1754 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1756 * Clear any transactional state, we're exec()ing. The cause is
1757 * not important as there will never be a recheckpoint so it's not
1760 if (MSR_TM_SUSPENDED(mfmsr()))
1761 tm_reclaim_current(0);
1764 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
1772 * We have just cleared all the nonvolatile GPRs, so make
1773 * FULL_REGS(regs) return true. This is necessary to allow
1774 * ptrace to examine the thread immediately after exec.
1781 regs
->msr
= MSR_USER
;
1783 if (!is_32bit_task()) {
1784 unsigned long entry
;
1786 if (is_elf2_task()) {
1787 /* Look ma, no function descriptors! */
1792 * The latest iteration of the ABI requires that when
1793 * calling a function (at its global entry point),
1794 * the caller must ensure r12 holds the entry point
1795 * address (so that the function can quickly
1796 * establish addressability).
1798 regs
->gpr
[12] = start
;
1799 /* Make sure that's restored on entry to userspace. */
1800 set_thread_flag(TIF_RESTOREALL
);
1804 /* start is a relocated pointer to the function
1805 * descriptor for the elf _start routine. The first
1806 * entry in the function descriptor is the entry
1807 * address of _start and the second entry is the TOC
1808 * value we need to use.
1810 __get_user(entry
, (unsigned long __user
*)start
);
1811 __get_user(toc
, (unsigned long __user
*)start
+1);
1813 /* Check whether the e_entry function descriptor entries
1814 * need to be relocated before we can use them.
1816 if (load_addr
!= 0) {
1823 regs
->msr
= MSR_USER64
;
1827 regs
->msr
= MSR_USER32
;
1831 current
->thread
.used_vsr
= 0;
1833 current
->thread
.load_slb
= 0;
1834 current
->thread
.load_fp
= 0;
1835 memset(¤t
->thread
.fp_state
, 0, sizeof(current
->thread
.fp_state
));
1836 current
->thread
.fp_save_area
= NULL
;
1837 #ifdef CONFIG_ALTIVEC
1838 memset(¤t
->thread
.vr_state
, 0, sizeof(current
->thread
.vr_state
));
1839 current
->thread
.vr_state
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
1840 current
->thread
.vr_save_area
= NULL
;
1841 current
->thread
.vrsave
= 0;
1842 current
->thread
.used_vr
= 0;
1843 current
->thread
.load_vec
= 0;
1844 #endif /* CONFIG_ALTIVEC */
1846 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
1847 current
->thread
.acc
= 0;
1848 current
->thread
.spefscr
= 0;
1849 current
->thread
.used_spe
= 0;
1850 #endif /* CONFIG_SPE */
1851 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1852 current
->thread
.tm_tfhar
= 0;
1853 current
->thread
.tm_texasr
= 0;
1854 current
->thread
.tm_tfiar
= 0;
1855 current
->thread
.load_tm
= 0;
1856 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1858 thread_pkey_regs_init(¤t
->thread
);
1860 EXPORT_SYMBOL(start_thread
);
1862 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1863 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1865 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
1867 struct pt_regs
*regs
= tsk
->thread
.regs
;
1869 /* This is a bit hairy. If we are an SPE enabled processor
1870 * (have embedded fp) we store the IEEE exception enable flags in
1871 * fpexc_mode. fpexc_mode is also used for setting FP exception
1872 * mode (asyn, precise, disabled) for 'Classic' FP. */
1873 if (val
& PR_FP_EXC_SW_ENABLE
) {
1875 if (cpu_has_feature(CPU_FTR_SPE
)) {
1877 * When the sticky exception bits are set
1878 * directly by userspace, it must call prctl
1879 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1880 * in the existing prctl settings) or
1881 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1882 * the bits being set). <fenv.h> functions
1883 * saving and restoring the whole
1884 * floating-point environment need to do so
1885 * anyway to restore the prctl settings from
1886 * the saved environment.
1888 tsk
->thread
.spefscr_last
= mfspr(SPRN_SPEFSCR
);
1889 tsk
->thread
.fpexc_mode
= val
&
1890 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
1900 /* on a CONFIG_SPE this does not hurt us. The bits that
1901 * __pack_fe01 use do not overlap with bits used for
1902 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1903 * on CONFIG_SPE implementations are reserved so writing to
1904 * them does not change anything */
1905 if (val
> PR_FP_EXC_PRECISE
)
1907 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
1908 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
1909 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
1910 | tsk
->thread
.fpexc_mode
;
1914 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
1918 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
1920 if (cpu_has_feature(CPU_FTR_SPE
)) {
1922 * When the sticky exception bits are set
1923 * directly by userspace, it must call prctl
1924 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1925 * in the existing prctl settings) or
1926 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1927 * the bits being set). <fenv.h> functions
1928 * saving and restoring the whole
1929 * floating-point environment need to do so
1930 * anyway to restore the prctl settings from
1931 * the saved environment.
1933 tsk
->thread
.spefscr_last
= mfspr(SPRN_SPEFSCR
);
1934 val
= tsk
->thread
.fpexc_mode
;
1941 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
1942 return put_user(val
, (unsigned int __user
*) adr
);
1945 int set_endian(struct task_struct
*tsk
, unsigned int val
)
1947 struct pt_regs
*regs
= tsk
->thread
.regs
;
1949 if ((val
== PR_ENDIAN_LITTLE
&& !cpu_has_feature(CPU_FTR_REAL_LE
)) ||
1950 (val
== PR_ENDIAN_PPC_LITTLE
&& !cpu_has_feature(CPU_FTR_PPC_LE
)))
1956 if (val
== PR_ENDIAN_BIG
)
1957 regs
->msr
&= ~MSR_LE
;
1958 else if (val
== PR_ENDIAN_LITTLE
|| val
== PR_ENDIAN_PPC_LITTLE
)
1959 regs
->msr
|= MSR_LE
;
1966 int get_endian(struct task_struct
*tsk
, unsigned long adr
)
1968 struct pt_regs
*regs
= tsk
->thread
.regs
;
1971 if (!cpu_has_feature(CPU_FTR_PPC_LE
) &&
1972 !cpu_has_feature(CPU_FTR_REAL_LE
))
1978 if (regs
->msr
& MSR_LE
) {
1979 if (cpu_has_feature(CPU_FTR_REAL_LE
))
1980 val
= PR_ENDIAN_LITTLE
;
1982 val
= PR_ENDIAN_PPC_LITTLE
;
1984 val
= PR_ENDIAN_BIG
;
1986 return put_user(val
, (unsigned int __user
*)adr
);
1989 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
1991 tsk
->thread
.align_ctl
= val
;
1995 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
1997 return put_user(tsk
->thread
.align_ctl
, (unsigned int __user
*)adr
);
2000 static inline int valid_irq_stack(unsigned long sp
, struct task_struct
*p
,
2001 unsigned long nbytes
)
2003 unsigned long stack_page
;
2004 unsigned long cpu
= task_cpu(p
);
2006 stack_page
= (unsigned long)hardirq_ctx
[cpu
];
2007 if (sp
>= stack_page
&& sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
2010 stack_page
= (unsigned long)softirq_ctx
[cpu
];
2011 if (sp
>= stack_page
&& sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
2017 int validate_sp(unsigned long sp
, struct task_struct
*p
,
2018 unsigned long nbytes
)
2020 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
2022 if (sp
< THREAD_SIZE
)
2025 if (sp
>= stack_page
&& sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
2028 return valid_irq_stack(sp
, p
, nbytes
);
2031 EXPORT_SYMBOL(validate_sp
);
2033 static unsigned long __get_wchan(struct task_struct
*p
)
2035 unsigned long ip
, sp
;
2038 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
2042 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
2046 sp
= *(unsigned long *)sp
;
2047 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
) ||
2048 p
->state
== TASK_RUNNING
)
2051 ip
= ((unsigned long *)sp
)[STACK_FRAME_LR_SAVE
];
2052 if (!in_sched_functions(ip
))
2055 } while (count
++ < 16);
2059 unsigned long get_wchan(struct task_struct
*p
)
2063 if (!try_get_task_stack(p
))
2066 ret
= __get_wchan(p
);
2073 static int kstack_depth_to_print
= CONFIG_PRINT_STACK_DEPTH
;
2075 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
2077 unsigned long sp
, ip
, lr
, newsp
;
2080 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2081 struct ftrace_ret_stack
*ret_stack
;
2082 extern void return_to_handler(void);
2083 unsigned long rth
= (unsigned long)return_to_handler
;
2090 if (!try_get_task_stack(tsk
))
2093 sp
= (unsigned long) stack
;
2096 sp
= current_stack_pointer();
2098 sp
= tsk
->thread
.ksp
;
2102 printk("Call Trace:\n");
2104 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
2107 stack
= (unsigned long *) sp
;
2109 ip
= stack
[STACK_FRAME_LR_SAVE
];
2110 if (!firstframe
|| ip
!= lr
) {
2111 printk("["REG
"] ["REG
"] %pS", sp
, ip
, (void *)ip
);
2112 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2113 if ((ip
== rth
) && curr_frame
>= 0) {
2114 ret_stack
= ftrace_graph_get_ret_stack(current
,
2118 (void *)ret_stack
->ret
);
2124 pr_cont(" (unreliable)");
2130 * See if this is an exception frame.
2131 * We look for the "regshere" marker in the current frame.
2133 if (validate_sp(sp
, tsk
, STACK_INT_FRAME_SIZE
)
2134 && stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
2135 struct pt_regs
*regs
= (struct pt_regs
*)
2136 (sp
+ STACK_FRAME_OVERHEAD
);
2138 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2139 regs
->trap
, (void *)regs
->nip
, (void *)lr
);
2144 } while (count
++ < kstack_depth_to_print
);
2146 put_task_stack(tsk
);
2150 /* Called with hard IRQs off */
2151 void notrace
__ppc64_runlatch_on(void)
2153 struct thread_info
*ti
= current_thread_info();
2155 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
2157 * Least significant bit (RUN) is the only writable bit of
2158 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2159 * earliest ISA where this is the case, but it's convenient.
2161 mtspr(SPRN_CTRLT
, CTRL_RUNLATCH
);
2166 * Some architectures (e.g., Cell) have writable fields other
2167 * than RUN, so do the read-modify-write.
2169 ctrl
= mfspr(SPRN_CTRLF
);
2170 ctrl
|= CTRL_RUNLATCH
;
2171 mtspr(SPRN_CTRLT
, ctrl
);
2174 ti
->local_flags
|= _TLF_RUNLATCH
;
2177 /* Called with hard IRQs off */
2178 void notrace
__ppc64_runlatch_off(void)
2180 struct thread_info
*ti
= current_thread_info();
2182 ti
->local_flags
&= ~_TLF_RUNLATCH
;
2184 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
2185 mtspr(SPRN_CTRLT
, 0);
2189 ctrl
= mfspr(SPRN_CTRLF
);
2190 ctrl
&= ~CTRL_RUNLATCH
;
2191 mtspr(SPRN_CTRLT
, ctrl
);
2194 #endif /* CONFIG_PPC64 */
2196 unsigned long arch_align_stack(unsigned long sp
)
2198 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
2199 sp
-= get_random_int() & ~PAGE_MASK
;
2203 static inline unsigned long brk_rnd(void)
2205 unsigned long rnd
= 0;
2207 /* 8MB for 32bit, 1GB for 64bit */
2208 if (is_32bit_task())
2209 rnd
= (get_random_long() % (1UL<<(23-PAGE_SHIFT
)));
2211 rnd
= (get_random_long() % (1UL<<(30-PAGE_SHIFT
)));
2213 return rnd
<< PAGE_SHIFT
;
2216 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
2218 unsigned long base
= mm
->brk
;
2221 #ifdef CONFIG_PPC_BOOK3S_64
2223 * If we are using 1TB segments and we are allowed to randomise
2224 * the heap, we can put it above 1TB so it is backed by a 1TB
2225 * segment. Otherwise the heap will be in the bottom 1TB
2226 * which always uses 256MB segments and this may result in a
2227 * performance penalty. We don't need to worry about radix. For
2228 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2230 if (!is_32bit_task() && (mmu_highuser_ssize
== MMU_SEGSIZE_1T
))
2231 base
= max_t(unsigned long, mm
->brk
, 1UL << SID_SHIFT_1T
);
2234 ret
= PAGE_ALIGN(base
+ brk_rnd());