2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/uaccess.h>
42 #include <asm/pgtable.h>
44 #include <asm/processor.h>
47 #include <asm/machdep.h>
49 #include <asm/runlatch.h>
50 #include <asm/syscalls.h>
51 #include <asm/switch_to.h>
53 #include <asm/debug.h>
55 #include <asm/firmware.h>
57 #include <asm/code-patching.h>
58 #include <linux/kprobes.h>
59 #include <linux/kdebug.h>
61 /* Transactional Memory debug */
63 #define TM_DEBUG(x...) printk(KERN_INFO x)
65 #define TM_DEBUG(x...) do { } while(0)
68 extern unsigned long _get_SP(void);
70 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
71 static void check_if_tm_restore_required(struct task_struct
*tsk
)
74 * If we are saving the current thread's registers, and the
75 * thread is in a transactional state, set the TIF_RESTORE_TM
76 * bit so that we know to restore the registers before
77 * returning to userspace.
79 if (tsk
== current
&& tsk
->thread
.regs
&&
80 MSR_TM_ACTIVE(tsk
->thread
.regs
->msr
) &&
81 !test_thread_flag(TIF_RESTORE_TM
)) {
82 tsk
->thread
.ckpt_regs
.msr
= tsk
->thread
.regs
->msr
;
83 set_thread_flag(TIF_RESTORE_TM
);
87 static inline void check_if_tm_restore_required(struct task_struct
*tsk
) { }
88 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
90 bool strict_msr_control
;
91 EXPORT_SYMBOL(strict_msr_control
);
93 static int __init
enable_strict_msr_control(char *str
)
95 strict_msr_control
= true;
96 pr_info("Enabling strict facility control\n");
100 early_param("ppc_strict_facility_enable", enable_strict_msr_control
);
102 void msr_check_and_set(unsigned long bits
)
104 unsigned long oldmsr
= mfmsr();
105 unsigned long newmsr
;
107 newmsr
= oldmsr
| bits
;
110 if (cpu_has_feature(CPU_FTR_VSX
) && (bits
& MSR_FP
))
114 if (oldmsr
!= newmsr
)
118 void __msr_check_and_clear(unsigned long bits
)
120 unsigned long oldmsr
= mfmsr();
121 unsigned long newmsr
;
123 newmsr
= oldmsr
& ~bits
;
126 if (cpu_has_feature(CPU_FTR_VSX
) && (bits
& MSR_FP
))
130 if (oldmsr
!= newmsr
)
133 EXPORT_SYMBOL(__msr_check_and_clear
);
135 #ifdef CONFIG_PPC_FPU
136 void giveup_fpu(struct task_struct
*tsk
)
138 check_if_tm_restore_required(tsk
);
140 msr_check_and_set(MSR_FP
);
142 msr_check_and_clear(MSR_FP
);
144 EXPORT_SYMBOL(giveup_fpu
);
147 * Make sure the floating-point register state in the
148 * the thread_struct is up to date for task tsk.
150 void flush_fp_to_thread(struct task_struct
*tsk
)
152 if (tsk
->thread
.regs
) {
154 * We need to disable preemption here because if we didn't,
155 * another process could get scheduled after the regs->msr
156 * test but before we have finished saving the FP registers
157 * to the thread_struct. That process could take over the
158 * FPU, and then when we get scheduled again we would store
159 * bogus values for the remaining FP registers.
162 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
164 * This should only ever be called for current or
165 * for a stopped child process. Since we save away
166 * the FP register state on context switch,
167 * there is something wrong if a stopped child appears
168 * to still have its FP state in the CPU registers.
170 BUG_ON(tsk
!= current
);
176 EXPORT_SYMBOL_GPL(flush_fp_to_thread
);
177 #endif /* CONFIG_PPC_FPU */
179 void enable_kernel_fp(void)
181 WARN_ON(preemptible());
183 msr_check_and_set(MSR_FP
);
185 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
))
186 __giveup_fpu(current
);
188 EXPORT_SYMBOL(enable_kernel_fp
);
190 #ifdef CONFIG_ALTIVEC
191 void giveup_altivec(struct task_struct
*tsk
)
193 check_if_tm_restore_required(tsk
);
195 msr_check_and_set(MSR_VEC
);
196 __giveup_altivec(tsk
);
197 msr_check_and_clear(MSR_VEC
);
199 EXPORT_SYMBOL(giveup_altivec
);
201 void enable_kernel_altivec(void)
203 WARN_ON(preemptible());
205 msr_check_and_set(MSR_VEC
);
207 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
))
208 __giveup_altivec(current
);
210 EXPORT_SYMBOL(enable_kernel_altivec
);
213 * Make sure the VMX/Altivec register state in the
214 * the thread_struct is up to date for task tsk.
216 void flush_altivec_to_thread(struct task_struct
*tsk
)
218 if (tsk
->thread
.regs
) {
220 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
221 BUG_ON(tsk
!= current
);
227 EXPORT_SYMBOL_GPL(flush_altivec_to_thread
);
228 #endif /* CONFIG_ALTIVEC */
231 void giveup_vsx(struct task_struct
*tsk
)
233 check_if_tm_restore_required(tsk
);
235 msr_check_and_set(MSR_FP
|MSR_VEC
|MSR_VSX
);
236 if (tsk
->thread
.regs
->msr
& MSR_FP
)
238 if (tsk
->thread
.regs
->msr
& MSR_VEC
)
239 __giveup_altivec(tsk
);
241 msr_check_and_clear(MSR_FP
|MSR_VEC
|MSR_VSX
);
243 EXPORT_SYMBOL(giveup_vsx
);
245 void enable_kernel_vsx(void)
247 WARN_ON(preemptible());
249 msr_check_and_set(MSR_FP
|MSR_VEC
|MSR_VSX
);
251 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VSX
)) {
252 if (current
->thread
.regs
->msr
& MSR_FP
)
253 __giveup_fpu(current
);
254 if (current
->thread
.regs
->msr
& MSR_VEC
)
255 __giveup_altivec(current
);
256 __giveup_vsx(current
);
259 EXPORT_SYMBOL(enable_kernel_vsx
);
261 void flush_vsx_to_thread(struct task_struct
*tsk
)
263 if (tsk
->thread
.regs
) {
265 if (tsk
->thread
.regs
->msr
& MSR_VSX
) {
266 BUG_ON(tsk
!= current
);
272 EXPORT_SYMBOL_GPL(flush_vsx_to_thread
);
273 #endif /* CONFIG_VSX */
276 void giveup_spe(struct task_struct
*tsk
)
278 check_if_tm_restore_required(tsk
);
280 msr_check_and_set(MSR_SPE
);
282 msr_check_and_clear(MSR_SPE
);
284 EXPORT_SYMBOL(giveup_spe
);
286 void enable_kernel_spe(void)
288 WARN_ON(preemptible());
290 msr_check_and_set(MSR_SPE
);
292 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
))
293 __giveup_spe(current
);
295 EXPORT_SYMBOL(enable_kernel_spe
);
297 void flush_spe_to_thread(struct task_struct
*tsk
)
299 if (tsk
->thread
.regs
) {
301 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
302 BUG_ON(tsk
!= current
);
303 tsk
->thread
.spefscr
= mfspr(SPRN_SPEFSCR
);
309 #endif /* CONFIG_SPE */
311 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
312 void do_send_trap(struct pt_regs
*regs
, unsigned long address
,
313 unsigned long error_code
, int signal_code
, int breakpt
)
317 current
->thread
.trap_nr
= signal_code
;
318 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
319 11, SIGSEGV
) == NOTIFY_STOP
)
322 /* Deliver the signal to userspace */
323 info
.si_signo
= SIGTRAP
;
324 info
.si_errno
= breakpt
; /* breakpoint or watchpoint id */
325 info
.si_code
= signal_code
;
326 info
.si_addr
= (void __user
*)address
;
327 force_sig_info(SIGTRAP
, &info
, current
);
329 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
330 void do_break (struct pt_regs
*regs
, unsigned long address
,
331 unsigned long error_code
)
335 current
->thread
.trap_nr
= TRAP_HWBKPT
;
336 if (notify_die(DIE_DABR_MATCH
, "dabr_match", regs
, error_code
,
337 11, SIGSEGV
) == NOTIFY_STOP
)
340 if (debugger_break_match(regs
))
343 /* Clear the breakpoint */
344 hw_breakpoint_disable();
346 /* Deliver the signal to userspace */
347 info
.si_signo
= SIGTRAP
;
349 info
.si_code
= TRAP_HWBKPT
;
350 info
.si_addr
= (void __user
*)address
;
351 force_sig_info(SIGTRAP
, &info
, current
);
353 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
355 static DEFINE_PER_CPU(struct arch_hw_breakpoint
, current_brk
);
357 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
359 * Set the debug registers back to their default "safe" values.
361 static void set_debug_reg_defaults(struct thread_struct
*thread
)
363 thread
->debug
.iac1
= thread
->debug
.iac2
= 0;
364 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
365 thread
->debug
.iac3
= thread
->debug
.iac4
= 0;
367 thread
->debug
.dac1
= thread
->debug
.dac2
= 0;
368 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
369 thread
->debug
.dvc1
= thread
->debug
.dvc2
= 0;
371 thread
->debug
.dbcr0
= 0;
374 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
376 thread
->debug
.dbcr1
= DBCR1_IAC1US
| DBCR1_IAC2US
|
377 DBCR1_IAC3US
| DBCR1_IAC4US
;
379 * Force Data Address Compare User/Supervisor bits to be User-only
380 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
382 thread
->debug
.dbcr2
= DBCR2_DAC1US
| DBCR2_DAC2US
;
384 thread
->debug
.dbcr1
= 0;
388 static void prime_debug_regs(struct debug_reg
*debug
)
391 * We could have inherited MSR_DE from userspace, since
392 * it doesn't get cleared on exception entry. Make sure
393 * MSR_DE is clear before we enable any debug events.
395 mtmsr(mfmsr() & ~MSR_DE
);
397 mtspr(SPRN_IAC1
, debug
->iac1
);
398 mtspr(SPRN_IAC2
, debug
->iac2
);
399 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
400 mtspr(SPRN_IAC3
, debug
->iac3
);
401 mtspr(SPRN_IAC4
, debug
->iac4
);
403 mtspr(SPRN_DAC1
, debug
->dac1
);
404 mtspr(SPRN_DAC2
, debug
->dac2
);
405 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
406 mtspr(SPRN_DVC1
, debug
->dvc1
);
407 mtspr(SPRN_DVC2
, debug
->dvc2
);
409 mtspr(SPRN_DBCR0
, debug
->dbcr0
);
410 mtspr(SPRN_DBCR1
, debug
->dbcr1
);
412 mtspr(SPRN_DBCR2
, debug
->dbcr2
);
416 * Unless neither the old or new thread are making use of the
417 * debug registers, set the debug registers from the values
418 * stored in the new thread.
420 void switch_booke_debug_regs(struct debug_reg
*new_debug
)
422 if ((current
->thread
.debug
.dbcr0
& DBCR0_IDM
)
423 || (new_debug
->dbcr0
& DBCR0_IDM
))
424 prime_debug_regs(new_debug
);
426 EXPORT_SYMBOL_GPL(switch_booke_debug_regs
);
427 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
428 #ifndef CONFIG_HAVE_HW_BREAKPOINT
429 static void set_debug_reg_defaults(struct thread_struct
*thread
)
431 thread
->hw_brk
.address
= 0;
432 thread
->hw_brk
.type
= 0;
433 set_breakpoint(&thread
->hw_brk
);
435 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
436 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
438 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
439 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
441 mtspr(SPRN_DAC1
, dabr
);
442 #ifdef CONFIG_PPC_47x
447 #elif defined(CONFIG_PPC_BOOK3S)
448 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
450 mtspr(SPRN_DABR
, dabr
);
451 if (cpu_has_feature(CPU_FTR_DABRX
))
452 mtspr(SPRN_DABRX
, dabrx
);
456 static inline int __set_dabr(unsigned long dabr
, unsigned long dabrx
)
462 static inline int set_dabr(struct arch_hw_breakpoint
*brk
)
464 unsigned long dabr
, dabrx
;
466 dabr
= brk
->address
| (brk
->type
& HW_BRK_TYPE_DABR
);
467 dabrx
= ((brk
->type
>> 3) & 0x7);
470 return ppc_md
.set_dabr(dabr
, dabrx
);
472 return __set_dabr(dabr
, dabrx
);
475 static inline int set_dawr(struct arch_hw_breakpoint
*brk
)
477 unsigned long dawr
, dawrx
, mrd
;
481 dawrx
= (brk
->type
& (HW_BRK_TYPE_READ
| HW_BRK_TYPE_WRITE
)) \
482 << (63 - 58); //* read/write bits */
483 dawrx
|= ((brk
->type
& (HW_BRK_TYPE_TRANSLATE
)) >> 2) \
484 << (63 - 59); //* translate */
485 dawrx
|= (brk
->type
& (HW_BRK_TYPE_PRIV_ALL
)) \
486 >> 3; //* PRIM bits */
487 /* dawr length is stored in field MDR bits 48:53. Matches range in
488 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
490 brk->len is in bytes.
491 This aligns up to double word size, shifts and does the bias.
493 mrd
= ((brk
->len
+ 7) >> 3) - 1;
494 dawrx
|= (mrd
& 0x3f) << (63 - 53);
497 return ppc_md
.set_dawr(dawr
, dawrx
);
498 mtspr(SPRN_DAWR
, dawr
);
499 mtspr(SPRN_DAWRX
, dawrx
);
503 void __set_breakpoint(struct arch_hw_breakpoint
*brk
)
505 memcpy(this_cpu_ptr(¤t_brk
), brk
, sizeof(*brk
));
507 if (cpu_has_feature(CPU_FTR_DAWR
))
513 void set_breakpoint(struct arch_hw_breakpoint
*brk
)
516 __set_breakpoint(brk
);
521 DEFINE_PER_CPU(struct cpu_usage
, cpu_usage_array
);
524 static inline bool hw_brk_match(struct arch_hw_breakpoint
*a
,
525 struct arch_hw_breakpoint
*b
)
527 if (a
->address
!= b
->address
)
529 if (a
->type
!= b
->type
)
531 if (a
->len
!= b
->len
)
536 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
537 static void tm_reclaim_thread(struct thread_struct
*thr
,
538 struct thread_info
*ti
, uint8_t cause
)
540 unsigned long msr_diff
= 0;
543 * If FP/VSX registers have been already saved to the
544 * thread_struct, move them to the transact_fp array.
545 * We clear the TIF_RESTORE_TM bit since after the reclaim
546 * the thread will no longer be transactional.
548 if (test_ti_thread_flag(ti
, TIF_RESTORE_TM
)) {
549 msr_diff
= thr
->ckpt_regs
.msr
& ~thr
->regs
->msr
;
550 if (msr_diff
& MSR_FP
)
551 memcpy(&thr
->transact_fp
, &thr
->fp_state
,
552 sizeof(struct thread_fp_state
));
553 if (msr_diff
& MSR_VEC
)
554 memcpy(&thr
->transact_vr
, &thr
->vr_state
,
555 sizeof(struct thread_vr_state
));
556 clear_ti_thread_flag(ti
, TIF_RESTORE_TM
);
557 msr_diff
&= MSR_FP
| MSR_VEC
| MSR_VSX
| MSR_FE0
| MSR_FE1
;
560 tm_reclaim(thr
, thr
->regs
->msr
, cause
);
562 /* Having done the reclaim, we now have the checkpointed
563 * FP/VSX values in the registers. These might be valid
564 * even if we have previously called enable_kernel_fp() or
565 * flush_fp_to_thread(), so update thr->regs->msr to
566 * indicate their current validity.
568 thr
->regs
->msr
|= msr_diff
;
571 void tm_reclaim_current(uint8_t cause
)
574 tm_reclaim_thread(¤t
->thread
, current_thread_info(), cause
);
577 static inline void tm_reclaim_task(struct task_struct
*tsk
)
579 /* We have to work out if we're switching from/to a task that's in the
580 * middle of a transaction.
582 * In switching we need to maintain a 2nd register state as
583 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
584 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
585 * (current) FPRs into oldtask->thread.transact_fpr[].
587 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
589 struct thread_struct
*thr
= &tsk
->thread
;
594 if (!MSR_TM_ACTIVE(thr
->regs
->msr
))
595 goto out_and_saveregs
;
597 /* Stash the original thread MSR, as giveup_fpu et al will
598 * modify it. We hold onto it to see whether the task used
599 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
600 * ckpt_regs.msr is already set.
602 if (!test_ti_thread_flag(task_thread_info(tsk
), TIF_RESTORE_TM
))
603 thr
->ckpt_regs
.msr
= thr
->regs
->msr
;
605 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
606 "ccr=%lx, msr=%lx, trap=%lx)\n",
607 tsk
->pid
, thr
->regs
->nip
,
608 thr
->regs
->ccr
, thr
->regs
->msr
,
611 tm_reclaim_thread(thr
, task_thread_info(tsk
), TM_CAUSE_RESCHED
);
613 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
617 /* Always save the regs here, even if a transaction's not active.
618 * This context-switches a thread's TM info SPRs. We do it here to
619 * be consistent with the restore path (in recheckpoint) which
620 * cannot happen later in _switch().
625 extern void __tm_recheckpoint(struct thread_struct
*thread
,
626 unsigned long orig_msr
);
628 void tm_recheckpoint(struct thread_struct
*thread
,
629 unsigned long orig_msr
)
633 /* We really can't be interrupted here as the TEXASR registers can't
634 * change and later in the trecheckpoint code, we have a userspace R1.
635 * So let's hard disable over this region.
637 local_irq_save(flags
);
640 /* The TM SPRs are restored here, so that TEXASR.FS can be set
641 * before the trecheckpoint and no explosion occurs.
643 tm_restore_sprs(thread
);
645 __tm_recheckpoint(thread
, orig_msr
);
647 local_irq_restore(flags
);
650 static inline void tm_recheckpoint_new_task(struct task_struct
*new)
654 if (!cpu_has_feature(CPU_FTR_TM
))
657 /* Recheckpoint the registers of the thread we're about to switch to.
659 * If the task was using FP, we non-lazily reload both the original and
660 * the speculative FP register states. This is because the kernel
661 * doesn't see if/when a TM rollback occurs, so if we take an FP
662 * unavoidable later, we are unable to determine which set of FP regs
663 * need to be restored.
665 if (!new->thread
.regs
)
668 if (!MSR_TM_ACTIVE(new->thread
.regs
->msr
)){
669 tm_restore_sprs(&new->thread
);
672 msr
= new->thread
.ckpt_regs
.msr
;
673 /* Recheckpoint to restore original checkpointed register state. */
674 TM_DEBUG("*** tm_recheckpoint of pid %d "
675 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
676 new->pid
, new->thread
.regs
->msr
, msr
);
678 /* This loads the checkpointed FP/VEC state, if used */
679 tm_recheckpoint(&new->thread
, msr
);
681 /* This loads the speculative FP/VEC state, if used */
683 do_load_up_transact_fpu(&new->thread
);
684 new->thread
.regs
->msr
|=
685 (MSR_FP
| new->thread
.fpexc_mode
);
687 #ifdef CONFIG_ALTIVEC
689 do_load_up_transact_altivec(&new->thread
);
690 new->thread
.regs
->msr
|= MSR_VEC
;
693 /* We may as well turn on VSX too since all the state is restored now */
695 new->thread
.regs
->msr
|= MSR_VSX
;
697 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
698 "(kernel msr 0x%lx)\n",
702 static inline void __switch_to_tm(struct task_struct
*prev
)
704 if (cpu_has_feature(CPU_FTR_TM
)) {
706 tm_reclaim_task(prev
);
711 * This is called if we are on the way out to userspace and the
712 * TIF_RESTORE_TM flag is set. It checks if we need to reload
713 * FP and/or vector state and does so if necessary.
714 * If userspace is inside a transaction (whether active or
715 * suspended) and FP/VMX/VSX instructions have ever been enabled
716 * inside that transaction, then we have to keep them enabled
717 * and keep the FP/VMX/VSX state loaded while ever the transaction
718 * continues. The reason is that if we didn't, and subsequently
719 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
720 * we don't know whether it's the same transaction, and thus we
721 * don't know which of the checkpointed state and the transactional
724 void restore_tm_state(struct pt_regs
*regs
)
726 unsigned long msr_diff
;
728 clear_thread_flag(TIF_RESTORE_TM
);
729 if (!MSR_TM_ACTIVE(regs
->msr
))
732 msr_diff
= current
->thread
.ckpt_regs
.msr
& ~regs
->msr
;
733 msr_diff
&= MSR_FP
| MSR_VEC
| MSR_VSX
;
734 if (msr_diff
& MSR_FP
) {
736 load_fp_state(¤t
->thread
.fp_state
);
737 regs
->msr
|= current
->thread
.fpexc_mode
;
739 if (msr_diff
& MSR_VEC
) {
741 load_vr_state(¤t
->thread
.vr_state
);
743 regs
->msr
|= msr_diff
;
747 #define tm_recheckpoint_new_task(new)
748 #define __switch_to_tm(prev)
749 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
751 static inline void save_sprs(struct thread_struct
*t
)
753 #ifdef CONFIG_ALTIVEC
754 if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC
)))
755 t
->vrsave
= mfspr(SPRN_VRSAVE
);
757 #ifdef CONFIG_PPC_BOOK3S_64
758 if (cpu_has_feature(CPU_FTR_DSCR
))
759 t
->dscr
= mfspr(SPRN_DSCR
);
761 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
762 t
->bescr
= mfspr(SPRN_BESCR
);
763 t
->ebbhr
= mfspr(SPRN_EBBHR
);
764 t
->ebbrr
= mfspr(SPRN_EBBRR
);
766 t
->fscr
= mfspr(SPRN_FSCR
);
769 * Note that the TAR is not available for use in the kernel.
770 * (To provide this, the TAR should be backed up/restored on
771 * exception entry/exit instead, and be in pt_regs. FIXME,
772 * this should be in pt_regs anyway (for debug).)
774 t
->tar
= mfspr(SPRN_TAR
);
779 static inline void restore_sprs(struct thread_struct
*old_thread
,
780 struct thread_struct
*new_thread
)
782 #ifdef CONFIG_ALTIVEC
783 if (cpu_has_feature(CPU_FTR_ALTIVEC
) &&
784 old_thread
->vrsave
!= new_thread
->vrsave
)
785 mtspr(SPRN_VRSAVE
, new_thread
->vrsave
);
787 #ifdef CONFIG_PPC_BOOK3S_64
788 if (cpu_has_feature(CPU_FTR_DSCR
)) {
789 u64 dscr
= get_paca()->dscr_default
;
790 u64 fscr
= old_thread
->fscr
& ~FSCR_DSCR
;
792 if (new_thread
->dscr_inherit
) {
793 dscr
= new_thread
->dscr
;
797 if (old_thread
->dscr
!= dscr
)
798 mtspr(SPRN_DSCR
, dscr
);
800 if (old_thread
->fscr
!= fscr
)
801 mtspr(SPRN_FSCR
, fscr
);
804 if (cpu_has_feature(CPU_FTR_ARCH_207S
)) {
805 if (old_thread
->bescr
!= new_thread
->bescr
)
806 mtspr(SPRN_BESCR
, new_thread
->bescr
);
807 if (old_thread
->ebbhr
!= new_thread
->ebbhr
)
808 mtspr(SPRN_EBBHR
, new_thread
->ebbhr
);
809 if (old_thread
->ebbrr
!= new_thread
->ebbrr
)
810 mtspr(SPRN_EBBRR
, new_thread
->ebbrr
);
812 if (old_thread
->tar
!= new_thread
->tar
)
813 mtspr(SPRN_TAR
, new_thread
->tar
);
818 struct task_struct
*__switch_to(struct task_struct
*prev
,
819 struct task_struct
*new)
821 struct thread_struct
*new_thread
, *old_thread
;
822 struct task_struct
*last
;
823 #ifdef CONFIG_PPC_BOOK3S_64
824 struct ppc64_tlb_batch
*batch
;
827 new_thread
= &new->thread
;
828 old_thread
= ¤t
->thread
;
830 WARN_ON(!irqs_disabled());
833 * We need to save SPRs before treclaim/trecheckpoint as these will
834 * change a number of them.
836 save_sprs(&prev
->thread
);
838 __switch_to_tm(prev
);
840 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_FP
))
842 #ifdef CONFIG_ALTIVEC
843 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VEC
))
844 giveup_altivec(prev
);
845 #endif /* CONFIG_ALTIVEC */
847 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VSX
))
848 /* VMX and FPU registers are already save here */
850 #endif /* CONFIG_VSX */
852 if ((prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_SPE
)))
854 #endif /* CONFIG_SPE */
856 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
857 switch_booke_debug_regs(&new->thread
.debug
);
860 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
863 #ifndef CONFIG_HAVE_HW_BREAKPOINT
864 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk
), &new->thread
.hw_brk
)))
865 __set_breakpoint(&new->thread
.hw_brk
);
866 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
871 * Collect processor utilization data per process
873 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
874 struct cpu_usage
*cu
= this_cpu_ptr(&cpu_usage_array
);
875 long unsigned start_tb
, current_tb
;
876 start_tb
= old_thread
->start_tb
;
877 cu
->current_tb
= current_tb
= mfspr(SPRN_PURR
);
878 old_thread
->accum_tb
+= (current_tb
- start_tb
);
879 new_thread
->start_tb
= current_tb
;
881 #endif /* CONFIG_PPC64 */
883 #ifdef CONFIG_PPC_BOOK3S_64
884 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
886 current_thread_info()->local_flags
|= _TLF_LAZY_MMU
;
888 __flush_tlb_pending(batch
);
891 #endif /* CONFIG_PPC_BOOK3S_64 */
894 * We can't take a PMU exception inside _switch() since there is a
895 * window where the kernel stack SLB and the kernel stack are out
896 * of sync. Hard disable here.
900 tm_recheckpoint_new_task(new);
902 last
= _switch(old_thread
, new_thread
);
904 /* Need to recalculate these after calling _switch() */
905 old_thread
= &last
->thread
;
906 new_thread
= ¤t
->thread
;
908 #ifdef CONFIG_PPC_BOOK3S_64
909 if (current_thread_info()->local_flags
& _TLF_LAZY_MMU
) {
910 current_thread_info()->local_flags
&= ~_TLF_LAZY_MMU
;
911 batch
= this_cpu_ptr(&ppc64_tlb_batch
);
914 #endif /* CONFIG_PPC_BOOK3S_64 */
916 restore_sprs(old_thread
, new_thread
);
921 static int instructions_to_print
= 16;
923 static void show_instructions(struct pt_regs
*regs
)
926 unsigned long pc
= regs
->nip
- (instructions_to_print
* 3 / 4 *
929 printk("Instruction dump:");
931 for (i
= 0; i
< instructions_to_print
; i
++) {
937 #if !defined(CONFIG_BOOKE)
938 /* If executing with the IMMU off, adjust pc rather
939 * than print XXXXXXXX.
941 if (!(regs
->msr
& MSR_IR
))
942 pc
= (unsigned long)phys_to_virt(pc
);
945 if (!__kernel_text_address(pc
) ||
946 probe_kernel_address((unsigned int __user
*)pc
, instr
)) {
947 printk(KERN_CONT
"XXXXXXXX ");
950 printk(KERN_CONT
"<%08x> ", instr
);
952 printk(KERN_CONT
"%08x ", instr
);
961 static struct regbit
{
965 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
994 static void printbits(unsigned long val
, struct regbit
*bits
)
996 const char *sep
= "";
999 for (; bits
->bit
; ++bits
)
1000 if (val
& bits
->bit
) {
1001 printk("%s%s", sep
, bits
->name
);
1008 #define REG "%016lx"
1009 #define REGS_PER_LINE 4
1010 #define LAST_VOLATILE 13
1013 #define REGS_PER_LINE 8
1014 #define LAST_VOLATILE 12
1017 void show_regs(struct pt_regs
* regs
)
1021 show_regs_print_info(KERN_DEFAULT
);
1023 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
1024 regs
->nip
, regs
->link
, regs
->ctr
);
1025 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1026 regs
, regs
->trap
, print_tainted(), init_utsname()->release
);
1027 printk("MSR: "REG
" ", regs
->msr
);
1028 printbits(regs
->msr
, msr_bits
);
1029 printk(" CR: %08lx XER: %08lx\n", regs
->ccr
, regs
->xer
);
1031 if ((regs
->trap
!= 0xc00) && cpu_has_feature(CPU_FTR_CFAR
))
1032 printk("CFAR: "REG
" ", regs
->orig_gpr3
);
1033 if (trap
== 0x200 || trap
== 0x300 || trap
== 0x600)
1034 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1035 printk("DEAR: "REG
" ESR: "REG
" ", regs
->dar
, regs
->dsisr
);
1037 printk("DAR: "REG
" DSISR: %08lx ", regs
->dar
, regs
->dsisr
);
1040 printk("SOFTE: %ld ", regs
->softe
);
1042 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1043 if (MSR_TM_ACTIVE(regs
->msr
))
1044 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch
);
1047 for (i
= 0; i
< 32; i
++) {
1048 if ((i
% REGS_PER_LINE
) == 0)
1049 printk("\nGPR%02d: ", i
);
1050 printk(REG
" ", regs
->gpr
[i
]);
1051 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
1055 #ifdef CONFIG_KALLSYMS
1057 * Lookup NIP late so we have the best change of getting the
1058 * above info out without failing
1060 printk("NIP ["REG
"] %pS\n", regs
->nip
, (void *)regs
->nip
);
1061 printk("LR ["REG
"] %pS\n", regs
->link
, (void *)regs
->link
);
1063 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
1064 if (!user_mode(regs
))
1065 show_instructions(regs
);
1068 void exit_thread(void)
1072 void flush_thread(void)
1074 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1075 flush_ptrace_hw_breakpoint(current
);
1076 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1077 set_debug_reg_defaults(¤t
->thread
);
1078 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1082 release_thread(struct task_struct
*t
)
1087 * this gets called so that we can store coprocessor state into memory and
1088 * copy the current task into the new thread.
1090 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
1092 flush_fp_to_thread(src
);
1093 flush_altivec_to_thread(src
);
1094 flush_vsx_to_thread(src
);
1095 flush_spe_to_thread(src
);
1097 * Flush TM state out so we can copy it. __switch_to_tm() does this
1098 * flush but it removes the checkpointed state from the current CPU and
1099 * transitions the CPU out of TM mode. Hence we need to call
1100 * tm_recheckpoint_new_task() (on the same task) to restore the
1101 * checkpointed state back and the TM mode.
1103 __switch_to_tm(src
);
1104 tm_recheckpoint_new_task(src
);
1108 clear_task_ebb(dst
);
1113 static void setup_ksp_vsid(struct task_struct
*p
, unsigned long sp
)
1115 #ifdef CONFIG_PPC_STD_MMU_64
1116 unsigned long sp_vsid
;
1117 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
1119 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1120 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_1T
)
1121 << SLB_VSID_SHIFT_1T
;
1123 sp_vsid
= get_kernel_vsid(sp
, MMU_SEGSIZE_256M
)
1125 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
1126 p
->thread
.ksp_vsid
= sp_vsid
;
1135 * Copy architecture-specific thread state
1137 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
1138 unsigned long kthread_arg
, struct task_struct
*p
)
1140 struct pt_regs
*childregs
, *kregs
;
1141 extern void ret_from_fork(void);
1142 extern void ret_from_kernel_thread(void);
1144 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
1146 /* Copy registers */
1147 sp
-= sizeof(struct pt_regs
);
1148 childregs
= (struct pt_regs
*) sp
;
1149 if (unlikely(p
->flags
& PF_KTHREAD
)) {
1151 struct thread_info
*ti
= (void *)task_stack_page(p
);
1152 memset(childregs
, 0, sizeof(struct pt_regs
));
1153 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
1156 childregs
->gpr
[14] = ppc_function_entry((void *)usp
);
1158 clear_tsk_thread_flag(p
, TIF_32BIT
);
1159 childregs
->softe
= 1;
1161 childregs
->gpr
[15] = kthread_arg
;
1162 p
->thread
.regs
= NULL
; /* no user register state */
1163 ti
->flags
|= _TIF_RESTOREALL
;
1164 f
= ret_from_kernel_thread
;
1167 struct pt_regs
*regs
= current_pt_regs();
1168 CHECK_FULL_REGS(regs
);
1171 childregs
->gpr
[1] = usp
;
1172 p
->thread
.regs
= childregs
;
1173 childregs
->gpr
[3] = 0; /* Result from fork() */
1174 if (clone_flags
& CLONE_SETTLS
) {
1176 if (!is_32bit_task())
1177 childregs
->gpr
[13] = childregs
->gpr
[6];
1180 childregs
->gpr
[2] = childregs
->gpr
[6];
1185 sp
-= STACK_FRAME_OVERHEAD
;
1188 * The way this works is that at some point in the future
1189 * some task will call _switch to switch to the new task.
1190 * That will pop off the stack frame created below and start
1191 * the new task running at ret_from_fork. The new task will
1192 * do some house keeping and then return from the fork or clone
1193 * system call, using the stack frame created above.
1195 ((unsigned long *)sp
)[0] = 0;
1196 sp
-= sizeof(struct pt_regs
);
1197 kregs
= (struct pt_regs
*) sp
;
1198 sp
-= STACK_FRAME_OVERHEAD
;
1201 p
->thread
.ksp_limit
= (unsigned long)task_stack_page(p
) +
1202 _ALIGN_UP(sizeof(struct thread_info
), 16);
1204 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1205 p
->thread
.ptrace_bps
[0] = NULL
;
1208 p
->thread
.fp_save_area
= NULL
;
1209 #ifdef CONFIG_ALTIVEC
1210 p
->thread
.vr_save_area
= NULL
;
1213 setup_ksp_vsid(p
, sp
);
1216 if (cpu_has_feature(CPU_FTR_DSCR
)) {
1217 p
->thread
.dscr_inherit
= current
->thread
.dscr_inherit
;
1218 p
->thread
.dscr
= current
->thread
.dscr
;
1220 if (cpu_has_feature(CPU_FTR_HAS_PPR
))
1221 p
->thread
.ppr
= INIT_PPR
;
1223 kregs
->nip
= ppc_function_entry(f
);
1228 * Set up a thread for executing a new program
1230 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
1233 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
1237 * If we exec out of a kernel thread then thread.regs will not be
1240 if (!current
->thread
.regs
) {
1241 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
1242 current
->thread
.regs
= regs
- 1;
1245 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
1253 * We have just cleared all the nonvolatile GPRs, so make
1254 * FULL_REGS(regs) return true. This is necessary to allow
1255 * ptrace to examine the thread immediately after exec.
1262 regs
->msr
= MSR_USER
;
1264 if (!is_32bit_task()) {
1265 unsigned long entry
;
1267 if (is_elf2_task()) {
1268 /* Look ma, no function descriptors! */
1273 * The latest iteration of the ABI requires that when
1274 * calling a function (at its global entry point),
1275 * the caller must ensure r12 holds the entry point
1276 * address (so that the function can quickly
1277 * establish addressability).
1279 regs
->gpr
[12] = start
;
1280 /* Make sure that's restored on entry to userspace. */
1281 set_thread_flag(TIF_RESTOREALL
);
1285 /* start is a relocated pointer to the function
1286 * descriptor for the elf _start routine. The first
1287 * entry in the function descriptor is the entry
1288 * address of _start and the second entry is the TOC
1289 * value we need to use.
1291 __get_user(entry
, (unsigned long __user
*)start
);
1292 __get_user(toc
, (unsigned long __user
*)start
+1);
1294 /* Check whether the e_entry function descriptor entries
1295 * need to be relocated before we can use them.
1297 if (load_addr
!= 0) {
1304 regs
->msr
= MSR_USER64
;
1308 regs
->msr
= MSR_USER32
;
1312 current
->thread
.used_vsr
= 0;
1314 memset(¤t
->thread
.fp_state
, 0, sizeof(current
->thread
.fp_state
));
1315 current
->thread
.fp_save_area
= NULL
;
1316 #ifdef CONFIG_ALTIVEC
1317 memset(¤t
->thread
.vr_state
, 0, sizeof(current
->thread
.vr_state
));
1318 current
->thread
.vr_state
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
1319 current
->thread
.vr_save_area
= NULL
;
1320 current
->thread
.vrsave
= 0;
1321 current
->thread
.used_vr
= 0;
1322 #endif /* CONFIG_ALTIVEC */
1324 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
1325 current
->thread
.acc
= 0;
1326 current
->thread
.spefscr
= 0;
1327 current
->thread
.used_spe
= 0;
1328 #endif /* CONFIG_SPE */
1329 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1330 if (cpu_has_feature(CPU_FTR_TM
))
1331 regs
->msr
|= MSR_TM
;
1332 current
->thread
.tm_tfhar
= 0;
1333 current
->thread
.tm_texasr
= 0;
1334 current
->thread
.tm_tfiar
= 0;
1335 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1337 EXPORT_SYMBOL(start_thread
);
1339 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1340 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1342 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
1344 struct pt_regs
*regs
= tsk
->thread
.regs
;
1346 /* This is a bit hairy. If we are an SPE enabled processor
1347 * (have embedded fp) we store the IEEE exception enable flags in
1348 * fpexc_mode. fpexc_mode is also used for setting FP exception
1349 * mode (asyn, precise, disabled) for 'Classic' FP. */
1350 if (val
& PR_FP_EXC_SW_ENABLE
) {
1352 if (cpu_has_feature(CPU_FTR_SPE
)) {
1354 * When the sticky exception bits are set
1355 * directly by userspace, it must call prctl
1356 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1357 * in the existing prctl settings) or
1358 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1359 * the bits being set). <fenv.h> functions
1360 * saving and restoring the whole
1361 * floating-point environment need to do so
1362 * anyway to restore the prctl settings from
1363 * the saved environment.
1365 tsk
->thread
.spefscr_last
= mfspr(SPRN_SPEFSCR
);
1366 tsk
->thread
.fpexc_mode
= val
&
1367 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
1377 /* on a CONFIG_SPE this does not hurt us. The bits that
1378 * __pack_fe01 use do not overlap with bits used for
1379 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1380 * on CONFIG_SPE implementations are reserved so writing to
1381 * them does not change anything */
1382 if (val
> PR_FP_EXC_PRECISE
)
1384 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
1385 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
1386 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
1387 | tsk
->thread
.fpexc_mode
;
1391 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
1395 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
1397 if (cpu_has_feature(CPU_FTR_SPE
)) {
1399 * When the sticky exception bits are set
1400 * directly by userspace, it must call prctl
1401 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1402 * in the existing prctl settings) or
1403 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1404 * the bits being set). <fenv.h> functions
1405 * saving and restoring the whole
1406 * floating-point environment need to do so
1407 * anyway to restore the prctl settings from
1408 * the saved environment.
1410 tsk
->thread
.spefscr_last
= mfspr(SPRN_SPEFSCR
);
1411 val
= tsk
->thread
.fpexc_mode
;
1418 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
1419 return put_user(val
, (unsigned int __user
*) adr
);
1422 int set_endian(struct task_struct
*tsk
, unsigned int val
)
1424 struct pt_regs
*regs
= tsk
->thread
.regs
;
1426 if ((val
== PR_ENDIAN_LITTLE
&& !cpu_has_feature(CPU_FTR_REAL_LE
)) ||
1427 (val
== PR_ENDIAN_PPC_LITTLE
&& !cpu_has_feature(CPU_FTR_PPC_LE
)))
1433 if (val
== PR_ENDIAN_BIG
)
1434 regs
->msr
&= ~MSR_LE
;
1435 else if (val
== PR_ENDIAN_LITTLE
|| val
== PR_ENDIAN_PPC_LITTLE
)
1436 regs
->msr
|= MSR_LE
;
1443 int get_endian(struct task_struct
*tsk
, unsigned long adr
)
1445 struct pt_regs
*regs
= tsk
->thread
.regs
;
1448 if (!cpu_has_feature(CPU_FTR_PPC_LE
) &&
1449 !cpu_has_feature(CPU_FTR_REAL_LE
))
1455 if (regs
->msr
& MSR_LE
) {
1456 if (cpu_has_feature(CPU_FTR_REAL_LE
))
1457 val
= PR_ENDIAN_LITTLE
;
1459 val
= PR_ENDIAN_PPC_LITTLE
;
1461 val
= PR_ENDIAN_BIG
;
1463 return put_user(val
, (unsigned int __user
*)adr
);
1466 int set_unalign_ctl(struct task_struct
*tsk
, unsigned int val
)
1468 tsk
->thread
.align_ctl
= val
;
1472 int get_unalign_ctl(struct task_struct
*tsk
, unsigned long adr
)
1474 return put_user(tsk
->thread
.align_ctl
, (unsigned int __user
*)adr
);
1477 static inline int valid_irq_stack(unsigned long sp
, struct task_struct
*p
,
1478 unsigned long nbytes
)
1480 unsigned long stack_page
;
1481 unsigned long cpu
= task_cpu(p
);
1484 * Avoid crashing if the stack has overflowed and corrupted
1485 * task_cpu(p), which is in the thread_info struct.
1487 if (cpu
< NR_CPUS
&& cpu_possible(cpu
)) {
1488 stack_page
= (unsigned long) hardirq_ctx
[cpu
];
1489 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1490 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1493 stack_page
= (unsigned long) softirq_ctx
[cpu
];
1494 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1495 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1501 int validate_sp(unsigned long sp
, struct task_struct
*p
,
1502 unsigned long nbytes
)
1504 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
1506 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
1507 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
1510 return valid_irq_stack(sp
, p
, nbytes
);
1513 EXPORT_SYMBOL(validate_sp
);
1515 unsigned long get_wchan(struct task_struct
*p
)
1517 unsigned long ip
, sp
;
1520 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
1524 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1528 sp
= *(unsigned long *)sp
;
1529 if (!validate_sp(sp
, p
, STACK_FRAME_OVERHEAD
))
1532 ip
= ((unsigned long *)sp
)[STACK_FRAME_LR_SAVE
];
1533 if (!in_sched_functions(ip
))
1536 } while (count
++ < 16);
1540 static int kstack_depth_to_print
= CONFIG_PRINT_STACK_DEPTH
;
1542 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
1544 unsigned long sp
, ip
, lr
, newsp
;
1547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1548 int curr_frame
= current
->curr_ret_stack
;
1549 extern void return_to_handler(void);
1550 unsigned long rth
= (unsigned long)return_to_handler
;
1553 sp
= (unsigned long) stack
;
1558 sp
= current_stack_pointer();
1560 sp
= tsk
->thread
.ksp
;
1564 printk("Call Trace:\n");
1566 if (!validate_sp(sp
, tsk
, STACK_FRAME_OVERHEAD
))
1569 stack
= (unsigned long *) sp
;
1571 ip
= stack
[STACK_FRAME_LR_SAVE
];
1572 if (!firstframe
|| ip
!= lr
) {
1573 printk("["REG
"] ["REG
"] %pS", sp
, ip
, (void *)ip
);
1574 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1575 if ((ip
== rth
) && curr_frame
>= 0) {
1577 (void *)current
->ret_stack
[curr_frame
].ret
);
1582 printk(" (unreliable)");
1588 * See if this is an exception frame.
1589 * We look for the "regshere" marker in the current frame.
1591 if (validate_sp(sp
, tsk
, STACK_INT_FRAME_SIZE
)
1592 && stack
[STACK_FRAME_MARKER
] == STACK_FRAME_REGS_MARKER
) {
1593 struct pt_regs
*regs
= (struct pt_regs
*)
1594 (sp
+ STACK_FRAME_OVERHEAD
);
1596 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1597 regs
->trap
, (void *)regs
->nip
, (void *)lr
);
1602 } while (count
++ < kstack_depth_to_print
);
1606 /* Called with hard IRQs off */
1607 void notrace
__ppc64_runlatch_on(void)
1609 struct thread_info
*ti
= current_thread_info();
1612 ctrl
= mfspr(SPRN_CTRLF
);
1613 ctrl
|= CTRL_RUNLATCH
;
1614 mtspr(SPRN_CTRLT
, ctrl
);
1616 ti
->local_flags
|= _TLF_RUNLATCH
;
1619 /* Called with hard IRQs off */
1620 void notrace
__ppc64_runlatch_off(void)
1622 struct thread_info
*ti
= current_thread_info();
1625 ti
->local_flags
&= ~_TLF_RUNLATCH
;
1627 ctrl
= mfspr(SPRN_CTRLF
);
1628 ctrl
&= ~CTRL_RUNLATCH
;
1629 mtspr(SPRN_CTRLT
, ctrl
);
1631 #endif /* CONFIG_PPC64 */
1633 unsigned long arch_align_stack(unsigned long sp
)
1635 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
1636 sp
-= get_random_int() & ~PAGE_MASK
;
1640 static inline unsigned long brk_rnd(void)
1642 unsigned long rnd
= 0;
1644 /* 8MB for 32bit, 1GB for 64bit */
1645 if (is_32bit_task())
1646 rnd
= (long)(get_random_int() % (1<<(23-PAGE_SHIFT
)));
1648 rnd
= (long)(get_random_int() % (1<<(30-PAGE_SHIFT
)));
1650 return rnd
<< PAGE_SHIFT
;
1653 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
1655 unsigned long base
= mm
->brk
;
1658 #ifdef CONFIG_PPC_STD_MMU_64
1660 * If we are using 1TB segments and we are allowed to randomise
1661 * the heap, we can put it above 1TB so it is backed by a 1TB
1662 * segment. Otherwise the heap will be in the bottom 1TB
1663 * which always uses 256MB segments and this may result in a
1664 * performance penalty.
1666 if (!is_32bit_task() && (mmu_highuser_ssize
== MMU_SEGSIZE_1T
))
1667 base
= max_t(unsigned long, mm
->brk
, 1UL << SID_SHIFT_1T
);
1670 ret
= PAGE_ALIGN(base
+ brk_rnd());