]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/kernel/process.c
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/config.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/elf.h>
30 #include <linux/init.h>
31 #include <linux/prctl.h>
32 #include <linux/init_task.h>
33 #include <linux/module.h>
34 #include <linux/kallsyms.h>
35 #include <linux/mqueue.h>
36 #include <linux/hardirq.h>
37 #include <linux/utsname.h>
38 #include <linux/kprobes.h>
40 #include <asm/pgtable.h>
41 #include <asm/uaccess.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
47 #include <asm/machdep.h>
50 #include <asm/firmware.h>
53 extern unsigned long _get_SP(void);
56 struct task_struct
*last_task_used_math
= NULL
;
57 struct task_struct
*last_task_used_altivec
= NULL
;
58 struct task_struct
*last_task_used_spe
= NULL
;
62 * Make sure the floating-point register state in the
63 * the thread_struct is up to date for task tsk.
65 void flush_fp_to_thread(struct task_struct
*tsk
)
67 if (tsk
->thread
.regs
) {
69 * We need to disable preemption here because if we didn't,
70 * another process could get scheduled after the regs->msr
71 * test but before we have finished saving the FP registers
72 * to the thread_struct. That process could take over the
73 * FPU, and then when we get scheduled again we would store
74 * bogus values for the remaining FP registers.
77 if (tsk
->thread
.regs
->msr
& MSR_FP
) {
80 * This should only ever be called for current or
81 * for a stopped child process. Since we save away
82 * the FP register state on context switch on SMP,
83 * there is something wrong if a stopped child appears
84 * to still have its FP state in the CPU registers.
86 BUG_ON(tsk
!= current
);
94 void enable_kernel_fp(void)
96 WARN_ON(preemptible());
99 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_FP
))
102 giveup_fpu(NULL
); /* just enables FP for kernel */
104 giveup_fpu(last_task_used_math
);
105 #endif /* CONFIG_SMP */
107 EXPORT_SYMBOL(enable_kernel_fp
);
109 int dump_task_fpu(struct task_struct
*tsk
, elf_fpregset_t
*fpregs
)
111 if (!tsk
->thread
.regs
)
113 flush_fp_to_thread(current
);
115 memcpy(fpregs
, &tsk
->thread
.fpr
[0], sizeof(*fpregs
));
120 #ifdef CONFIG_ALTIVEC
121 void enable_kernel_altivec(void)
123 WARN_ON(preemptible());
126 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_VEC
))
127 giveup_altivec(current
);
129 giveup_altivec(NULL
); /* just enable AltiVec for kernel - force */
131 giveup_altivec(last_task_used_altivec
);
132 #endif /* CONFIG_SMP */
134 EXPORT_SYMBOL(enable_kernel_altivec
);
137 * Make sure the VMX/Altivec register state in the
138 * the thread_struct is up to date for task tsk.
140 void flush_altivec_to_thread(struct task_struct
*tsk
)
142 if (tsk
->thread
.regs
) {
144 if (tsk
->thread
.regs
->msr
& MSR_VEC
) {
146 BUG_ON(tsk
!= current
);
148 giveup_altivec(current
);
154 int dump_task_altivec(struct pt_regs
*regs
, elf_vrregset_t
*vrregs
)
156 flush_altivec_to_thread(current
);
157 memcpy(vrregs
, ¤t
->thread
.vr
[0], sizeof(*vrregs
));
160 #endif /* CONFIG_ALTIVEC */
164 void enable_kernel_spe(void)
166 WARN_ON(preemptible());
169 if (current
->thread
.regs
&& (current
->thread
.regs
->msr
& MSR_SPE
))
172 giveup_spe(NULL
); /* just enable SPE for kernel - force */
174 giveup_spe(last_task_used_spe
);
175 #endif /* __SMP __ */
177 EXPORT_SYMBOL(enable_kernel_spe
);
179 void flush_spe_to_thread(struct task_struct
*tsk
)
181 if (tsk
->thread
.regs
) {
183 if (tsk
->thread
.regs
->msr
& MSR_SPE
) {
185 BUG_ON(tsk
!= current
);
193 int dump_spe(struct pt_regs
*regs
, elf_vrregset_t
*evrregs
)
195 flush_spe_to_thread(current
);
196 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
197 memcpy(evrregs
, ¤t
->thread
.evr
[0], sizeof(u32
) * 35);
200 #endif /* CONFIG_SPE */
204 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
205 * and the current task has some state, discard it.
207 void discard_lazy_cpu_state(void)
210 if (last_task_used_math
== current
)
211 last_task_used_math
= NULL
;
212 #ifdef CONFIG_ALTIVEC
213 if (last_task_used_altivec
== current
)
214 last_task_used_altivec
= NULL
;
215 #endif /* CONFIG_ALTIVEC */
217 if (last_task_used_spe
== current
)
218 last_task_used_spe
= NULL
;
222 #endif /* CONFIG_SMP */
224 #ifdef CONFIG_PPC_MERGE /* XXX for now */
225 int set_dabr(unsigned long dabr
)
228 return ppc_md
.set_dabr(dabr
);
230 mtspr(SPRN_DABR
, dabr
);
236 DEFINE_PER_CPU(struct cpu_usage
, cpu_usage_array
);
237 static DEFINE_PER_CPU(unsigned long, current_dabr
);
240 struct task_struct
*__switch_to(struct task_struct
*prev
,
241 struct task_struct
*new)
243 struct thread_struct
*new_thread
, *old_thread
;
245 struct task_struct
*last
;
248 /* avoid complexity of lazy save/restore of fpu
249 * by just saving it every time we switch out if
250 * this task used the fpu during the last quantum.
252 * If it tries to use the fpu again, it'll trap and
253 * reload its fp regs. So we don't have to do a restore
254 * every switch, just a save.
257 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_FP
))
259 #ifdef CONFIG_ALTIVEC
261 * If the previous thread used altivec in the last quantum
262 * (thus changing altivec regs) then save them.
263 * We used to check the VRSAVE register but not all apps
264 * set it, so we don't rely on it now (and in fact we need
265 * to save & restore VSCR even if VRSAVE == 0). -- paulus
267 * On SMP we always save/restore altivec regs just to avoid the
268 * complexity of changing processors.
271 if (prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_VEC
))
272 giveup_altivec(prev
);
273 #endif /* CONFIG_ALTIVEC */
276 * If the previous thread used spe in the last quantum
277 * (thus changing spe regs) then save them.
279 * On SMP we always save/restore spe regs just to avoid the
280 * complexity of changing processors.
282 if ((prev
->thread
.regs
&& (prev
->thread
.regs
->msr
& MSR_SPE
)))
284 #endif /* CONFIG_SPE */
286 #else /* CONFIG_SMP */
287 #ifdef CONFIG_ALTIVEC
288 /* Avoid the trap. On smp this this never happens since
289 * we don't set last_task_used_altivec -- Cort
291 if (new->thread
.regs
&& last_task_used_altivec
== new)
292 new->thread
.regs
->msr
|= MSR_VEC
;
293 #endif /* CONFIG_ALTIVEC */
295 /* Avoid the trap. On smp this this never happens since
296 * we don't set last_task_used_spe
298 if (new->thread
.regs
&& last_task_used_spe
== new)
299 new->thread
.regs
->msr
|= MSR_SPE
;
300 #endif /* CONFIG_SPE */
302 #endif /* CONFIG_SMP */
304 #ifdef CONFIG_PPC64 /* for now */
305 if (unlikely(__get_cpu_var(current_dabr
) != new->thread
.dabr
)) {
306 set_dabr(new->thread
.dabr
);
307 __get_cpu_var(current_dabr
) = new->thread
.dabr
;
313 new_thread
= &new->thread
;
314 old_thread
= ¤t
->thread
;
318 * Collect processor utilization data per process
320 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
321 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
322 long unsigned start_tb
, current_tb
;
323 start_tb
= old_thread
->start_tb
;
324 cu
->current_tb
= current_tb
= mfspr(SPRN_PURR
);
325 old_thread
->accum_tb
+= (current_tb
- start_tb
);
326 new_thread
->start_tb
= current_tb
;
330 local_irq_save(flags
);
332 account_system_vtime(current
);
333 account_process_vtime(current
);
334 calculate_steal_time();
336 last
= _switch(old_thread
, new_thread
);
338 local_irq_restore(flags
);
343 static int instructions_to_print
= 16;
346 #define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
347 (REGION_ID(pc) != VMALLOC_REGION_ID))
349 #define BAD_PC(pc) ((pc) < KERNELBASE)
352 static void show_instructions(struct pt_regs
*regs
)
355 unsigned long pc
= regs
->nip
- (instructions_to_print
* 3 / 4 *
358 printk("Instruction dump:");
360 for (i
= 0; i
< instructions_to_print
; i
++) {
366 if (BAD_PC(pc
) || __get_user(instr
, (unsigned int *)pc
)) {
370 printk("<%08x> ", instr
);
372 printk("%08x ", instr
);
381 static struct regbit
{
394 static void printbits(unsigned long val
, struct regbit
*bits
)
396 const char *sep
= "";
399 for (; bits
->bit
; ++bits
)
400 if (val
& bits
->bit
) {
401 printk("%s%s", sep
, bits
->name
);
409 #define REGS_PER_LINE 4
410 #define LAST_VOLATILE 13
413 #define REGS_PER_LINE 8
414 #define LAST_VOLATILE 12
417 void show_regs(struct pt_regs
* regs
)
421 printk("NIP: "REG
" LR: "REG
" CTR: "REG
"\n",
422 regs
->nip
, regs
->link
, regs
->ctr
);
423 printk("REGS: %p TRAP: %04lx %s (%s)\n",
424 regs
, regs
->trap
, print_tainted(), system_utsname
.release
);
425 printk("MSR: "REG
" ", regs
->msr
);
426 printbits(regs
->msr
, msr_bits
);
427 printk(" CR: %08lX XER: %08lX\n", regs
->ccr
, regs
->xer
);
429 if (trap
== 0x300 || trap
== 0x600)
430 printk("DAR: "REG
", DSISR: "REG
"\n", regs
->dar
, regs
->dsisr
);
431 printk("TASK = %p[%d] '%s' THREAD: %p",
432 current
, current
->pid
, current
->comm
, task_thread_info(current
));
435 printk(" CPU: %d", smp_processor_id());
436 #endif /* CONFIG_SMP */
438 for (i
= 0; i
< 32; i
++) {
439 if ((i
% REGS_PER_LINE
) == 0)
440 printk("\n" KERN_INFO
"GPR%02d: ", i
);
441 printk(REG
" ", regs
->gpr
[i
]);
442 if (i
== LAST_VOLATILE
&& !FULL_REGS(regs
))
446 #ifdef CONFIG_KALLSYMS
448 * Lookup NIP late so we have the best change of getting the
449 * above info out without failing
451 printk("NIP ["REG
"] ", regs
->nip
);
452 print_symbol("%s\n", regs
->nip
);
453 printk("LR ["REG
"] ", regs
->link
);
454 print_symbol("%s\n", regs
->link
);
456 show_stack(current
, (unsigned long *) regs
->gpr
[1]);
457 if (!user_mode(regs
))
458 show_instructions(regs
);
461 void exit_thread(void)
463 kprobe_flush_task(current
);
464 discard_lazy_cpu_state();
467 void flush_thread(void)
470 struct thread_info
*t
= current_thread_info();
472 if (t
->flags
& _TIF_ABI_PENDING
)
473 t
->flags
^= (_TIF_ABI_PENDING
| _TIF_32BIT
);
476 discard_lazy_cpu_state();
478 #ifdef CONFIG_PPC64 /* for now */
479 if (current
->thread
.dabr
) {
480 current
->thread
.dabr
= 0;
487 release_thread(struct task_struct
*t
)
492 * This gets called before we allocate a new thread and copy
493 * the current task into it.
495 void prepare_to_copy(struct task_struct
*tsk
)
497 flush_fp_to_thread(current
);
498 flush_altivec_to_thread(current
);
499 flush_spe_to_thread(current
);
505 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long usp
,
506 unsigned long unused
, struct task_struct
*p
,
507 struct pt_regs
*regs
)
509 struct pt_regs
*childregs
, *kregs
;
510 extern void ret_from_fork(void);
511 unsigned long sp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
513 CHECK_FULL_REGS(regs
);
515 sp
-= sizeof(struct pt_regs
);
516 childregs
= (struct pt_regs
*) sp
;
518 if ((childregs
->msr
& MSR_PR
) == 0) {
519 /* for kernel thread, set `current' and stackptr in new task */
520 childregs
->gpr
[1] = sp
+ sizeof(struct pt_regs
);
522 childregs
->gpr
[2] = (unsigned long) p
;
524 clear_tsk_thread_flag(p
, TIF_32BIT
);
526 p
->thread
.regs
= NULL
; /* no user register state */
528 childregs
->gpr
[1] = usp
;
529 p
->thread
.regs
= childregs
;
530 if (clone_flags
& CLONE_SETTLS
) {
532 if (!test_thread_flag(TIF_32BIT
))
533 childregs
->gpr
[13] = childregs
->gpr
[6];
536 childregs
->gpr
[2] = childregs
->gpr
[6];
539 childregs
->gpr
[3] = 0; /* Result from fork() */
540 sp
-= STACK_FRAME_OVERHEAD
;
543 * The way this works is that at some point in the future
544 * some task will call _switch to switch to the new task.
545 * That will pop off the stack frame created below and start
546 * the new task running at ret_from_fork. The new task will
547 * do some house keeping and then return from the fork or clone
548 * system call, using the stack frame created above.
550 sp
-= sizeof(struct pt_regs
);
551 kregs
= (struct pt_regs
*) sp
;
552 sp
-= STACK_FRAME_OVERHEAD
;
556 if (cpu_has_feature(CPU_FTR_SLB
)) {
557 unsigned long sp_vsid
= get_kernel_vsid(sp
);
558 unsigned long llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
560 sp_vsid
<<= SLB_VSID_SHIFT
;
561 sp_vsid
|= SLB_VSID_KERNEL
| llp
;
562 p
->thread
.ksp_vsid
= sp_vsid
;
566 * The PPC64 ABI makes use of a TOC to contain function
567 * pointers. The function (ret_from_except) is actually a pointer
568 * to the TOC entry. The first entry is a pointer to the actual
571 kregs
->nip
= *((unsigned long *)ret_from_fork
);
573 kregs
->nip
= (unsigned long)ret_from_fork
;
574 p
->thread
.last_syscall
= -1;
581 * Set up a thread for executing a new program
583 void start_thread(struct pt_regs
*regs
, unsigned long start
, unsigned long sp
)
586 unsigned long load_addr
= regs
->gpr
[2]; /* saved by ELF_PLAT_INIT */
592 * If we exec out of a kernel thread then thread.regs will not be
595 if (!current
->thread
.regs
) {
596 struct pt_regs
*regs
= task_stack_page(current
) + THREAD_SIZE
;
597 current
->thread
.regs
= regs
- 1;
600 memset(regs
->gpr
, 0, sizeof(regs
->gpr
));
610 regs
->msr
= MSR_USER
;
612 if (!test_thread_flag(TIF_32BIT
)) {
613 unsigned long entry
, toc
;
615 /* start is a relocated pointer to the function descriptor for
616 * the elf _start routine. The first entry in the function
617 * descriptor is the entry address of _start and the second
618 * entry is the TOC value we need to use.
620 __get_user(entry
, (unsigned long __user
*)start
);
621 __get_user(toc
, (unsigned long __user
*)start
+1);
623 /* Check whether the e_entry function descriptor entries
624 * need to be relocated before we can use them.
626 if (load_addr
!= 0) {
632 regs
->msr
= MSR_USER64
;
636 regs
->msr
= MSR_USER32
;
640 discard_lazy_cpu_state();
641 memset(current
->thread
.fpr
, 0, sizeof(current
->thread
.fpr
));
642 current
->thread
.fpscr
.val
= 0;
643 #ifdef CONFIG_ALTIVEC
644 memset(current
->thread
.vr
, 0, sizeof(current
->thread
.vr
));
645 memset(¤t
->thread
.vscr
, 0, sizeof(current
->thread
.vscr
));
646 current
->thread
.vscr
.u
[3] = 0x00010000; /* Java mode disabled */
647 current
->thread
.vrsave
= 0;
648 current
->thread
.used_vr
= 0;
649 #endif /* CONFIG_ALTIVEC */
651 memset(current
->thread
.evr
, 0, sizeof(current
->thread
.evr
));
652 current
->thread
.acc
= 0;
653 current
->thread
.spefscr
= 0;
654 current
->thread
.used_spe
= 0;
655 #endif /* CONFIG_SPE */
658 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
659 | PR_FP_EXC_RES | PR_FP_EXC_INV)
661 int set_fpexc_mode(struct task_struct
*tsk
, unsigned int val
)
663 struct pt_regs
*regs
= tsk
->thread
.regs
;
665 /* This is a bit hairy. If we are an SPE enabled processor
666 * (have embedded fp) we store the IEEE exception enable flags in
667 * fpexc_mode. fpexc_mode is also used for setting FP exception
668 * mode (asyn, precise, disabled) for 'Classic' FP. */
669 if (val
& PR_FP_EXC_SW_ENABLE
) {
671 tsk
->thread
.fpexc_mode
= val
&
672 (PR_FP_EXC_SW_ENABLE
| PR_FP_ALL_EXCEPT
);
679 /* on a CONFIG_SPE this does not hurt us. The bits that
680 * __pack_fe01 use do not overlap with bits used for
681 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
682 * on CONFIG_SPE implementations are reserved so writing to
683 * them does not change anything */
684 if (val
> PR_FP_EXC_PRECISE
)
686 tsk
->thread
.fpexc_mode
= __pack_fe01(val
);
687 if (regs
!= NULL
&& (regs
->msr
& MSR_FP
) != 0)
688 regs
->msr
= (regs
->msr
& ~(MSR_FE0
|MSR_FE1
))
689 | tsk
->thread
.fpexc_mode
;
693 int get_fpexc_mode(struct task_struct
*tsk
, unsigned long adr
)
697 if (tsk
->thread
.fpexc_mode
& PR_FP_EXC_SW_ENABLE
)
699 val
= tsk
->thread
.fpexc_mode
;
704 val
= __unpack_fe01(tsk
->thread
.fpexc_mode
);
705 return put_user(val
, (unsigned int __user
*) adr
);
708 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
710 int sys_clone(unsigned long clone_flags
, unsigned long usp
,
711 int __user
*parent_tidp
, void __user
*child_threadptr
,
712 int __user
*child_tidp
, int p6
,
713 struct pt_regs
*regs
)
715 CHECK_FULL_REGS(regs
);
717 usp
= regs
->gpr
[1]; /* stack pointer for child */
719 if (test_thread_flag(TIF_32BIT
)) {
720 parent_tidp
= TRUNC_PTR(parent_tidp
);
721 child_tidp
= TRUNC_PTR(child_tidp
);
724 return do_fork(clone_flags
, usp
, regs
, 0, parent_tidp
, child_tidp
);
727 int sys_fork(unsigned long p1
, unsigned long p2
, unsigned long p3
,
728 unsigned long p4
, unsigned long p5
, unsigned long p6
,
729 struct pt_regs
*regs
)
731 CHECK_FULL_REGS(regs
);
732 return do_fork(SIGCHLD
, regs
->gpr
[1], regs
, 0, NULL
, NULL
);
735 int sys_vfork(unsigned long p1
, unsigned long p2
, unsigned long p3
,
736 unsigned long p4
, unsigned long p5
, unsigned long p6
,
737 struct pt_regs
*regs
)
739 CHECK_FULL_REGS(regs
);
740 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->gpr
[1],
741 regs
, 0, NULL
, NULL
);
744 int sys_execve(unsigned long a0
, unsigned long a1
, unsigned long a2
,
745 unsigned long a3
, unsigned long a4
, unsigned long a5
,
746 struct pt_regs
*regs
)
751 filename
= getname((char __user
*) a0
);
752 error
= PTR_ERR(filename
);
753 if (IS_ERR(filename
))
755 flush_fp_to_thread(current
);
756 flush_altivec_to_thread(current
);
757 flush_spe_to_thread(current
);
758 error
= do_execve(filename
, (char __user
* __user
*) a1
,
759 (char __user
* __user
*) a2
, regs
);
762 current
->ptrace
&= ~PT_DTRACE
;
763 task_unlock(current
);
770 static int validate_sp(unsigned long sp
, struct task_struct
*p
,
771 unsigned long nbytes
)
773 unsigned long stack_page
= (unsigned long)task_stack_page(p
);
775 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
776 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
779 #ifdef CONFIG_IRQSTACKS
780 stack_page
= (unsigned long) hardirq_ctx
[task_cpu(p
)];
781 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
782 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
785 stack_page
= (unsigned long) softirq_ctx
[task_cpu(p
)];
786 if (sp
>= stack_page
+ sizeof(struct thread_struct
)
787 && sp
<= stack_page
+ THREAD_SIZE
- nbytes
)
795 #define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */
796 #define FRAME_LR_SAVE 2
797 #define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
798 #define REGS_MARKER 0x7265677368657265ul
799 #define FRAME_MARKER 12
801 #define MIN_STACK_FRAME 16
802 #define FRAME_LR_SAVE 1
803 #define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
804 #define REGS_MARKER 0x72656773ul
805 #define FRAME_MARKER 2
808 unsigned long get_wchan(struct task_struct
*p
)
810 unsigned long ip
, sp
;
813 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
817 if (!validate_sp(sp
, p
, MIN_STACK_FRAME
))
821 sp
= *(unsigned long *)sp
;
822 if (!validate_sp(sp
, p
, MIN_STACK_FRAME
))
825 ip
= ((unsigned long *)sp
)[FRAME_LR_SAVE
];
826 if (!in_sched_functions(ip
))
829 } while (count
++ < 16);
832 EXPORT_SYMBOL(get_wchan
);
834 static int kstack_depth_to_print
= 64;
836 void show_stack(struct task_struct
*tsk
, unsigned long *stack
)
838 unsigned long sp
, ip
, lr
, newsp
;
842 sp
= (unsigned long) stack
;
847 asm("mr %0,1" : "=r" (sp
));
849 sp
= tsk
->thread
.ksp
;
853 printk("Call Trace:\n");
855 if (!validate_sp(sp
, tsk
, MIN_STACK_FRAME
))
858 stack
= (unsigned long *) sp
;
860 ip
= stack
[FRAME_LR_SAVE
];
861 if (!firstframe
|| ip
!= lr
) {
862 printk("["REG
"] ["REG
"] ", sp
, ip
);
863 print_symbol("%s", ip
);
865 printk(" (unreliable)");
871 * See if this is an exception frame.
872 * We look for the "regshere" marker in the current frame.
874 if (validate_sp(sp
, tsk
, INT_FRAME_SIZE
)
875 && stack
[FRAME_MARKER
] == REGS_MARKER
) {
876 struct pt_regs
*regs
= (struct pt_regs
*)
877 (sp
+ STACK_FRAME_OVERHEAD
);
878 printk("--- Exception: %lx", regs
->trap
);
879 print_symbol(" at %s\n", regs
->nip
);
881 print_symbol(" LR = %s\n", lr
);
886 } while (count
++ < kstack_depth_to_print
);
889 void dump_stack(void)
891 show_stack(current
, NULL
);
893 EXPORT_SYMBOL(dump_stack
);
896 void ppc64_runlatch_on(void)
900 if (cpu_has_feature(CPU_FTR_CTRL
) && !test_thread_flag(TIF_RUNLATCH
)) {
903 ctrl
= mfspr(SPRN_CTRLF
);
904 ctrl
|= CTRL_RUNLATCH
;
905 mtspr(SPRN_CTRLT
, ctrl
);
907 set_thread_flag(TIF_RUNLATCH
);
911 void ppc64_runlatch_off(void)
915 if (cpu_has_feature(CPU_FTR_CTRL
) && test_thread_flag(TIF_RUNLATCH
)) {
918 clear_thread_flag(TIF_RUNLATCH
);
920 ctrl
= mfspr(SPRN_CTRLF
);
921 ctrl
&= ~CTRL_RUNLATCH
;
922 mtspr(SPRN_CTRLT
, ctrl
);