]>
Commit | Line | Data |
---|---|---|
e8de3486 JH |
1 | #include <linux/export.h> |
2 | #include <linux/sched.h> | |
b17b0153 | 3 | #include <linux/sched/debug.h> |
e8de3486 JH |
4 | #include <linux/stacktrace.h> |
5 | ||
6 | #include <asm/stacktrace.h> | |
7 | ||
8 | #if defined(CONFIG_FRAME_POINTER) | |
9 | ||
10 | #ifdef CONFIG_KALLSYMS | |
11 | #include <linux/kallsyms.h> | |
12 | #include <linux/module.h> | |
13 | ||
14 | static unsigned long tbi_boing_addr; | |
15 | static unsigned long tbi_boing_size; | |
16 | ||
17 | static void tbi_boing_init(void) | |
18 | { | |
19 | /* We need to know where TBIBoingVec is and it's size */ | |
20 | unsigned long size; | |
21 | unsigned long offset; | |
22 | char modname[MODULE_NAME_LEN]; | |
23 | char name[KSYM_NAME_LEN]; | |
24 | tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec"); | |
25 | if (!tbi_boing_addr) | |
26 | tbi_boing_addr = 1; | |
27 | else if (!lookup_symbol_attrs(tbi_boing_addr, &size, | |
28 | &offset, modname, name)) | |
29 | tbi_boing_size = size; | |
30 | } | |
31 | #endif | |
32 | ||
33 | #define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) | |
34 | ||
35 | /* | |
36 | * Unwind the current stack frame and store the new register values in the | |
37 | * structure passed as argument. Unwinding is equivalent to a function return, | |
38 | * hence the new PC value rather than LR should be used for backtrace. | |
39 | */ | |
40 | int notrace unwind_frame(struct stackframe *frame) | |
41 | { | |
42 | struct metag_frame *fp = (struct metag_frame *)frame->fp; | |
43 | unsigned long lr; | |
44 | unsigned long fpnew; | |
45 | ||
46 | if (frame->fp & 0x7) | |
47 | return -EINVAL; | |
48 | ||
49 | fpnew = fp->fp; | |
50 | lr = fp->lr - 4; | |
51 | ||
52 | #ifdef CONFIG_KALLSYMS | |
53 | /* If we've reached TBIBoingVec then we're at an interrupt | |
54 | * entry point or a syscall entry point. The frame pointer | |
55 | * points to a pt_regs which can be used to continue tracing on | |
56 | * the other side of the boing. | |
57 | */ | |
58 | if (!tbi_boing_addr) | |
59 | tbi_boing_init(); | |
60 | if (tbi_boing_size && lr >= tbi_boing_addr && | |
61 | lr < tbi_boing_addr + tbi_boing_size) { | |
62 | struct pt_regs *regs = (struct pt_regs *)fpnew; | |
63 | if (user_mode(regs)) | |
64 | return -EINVAL; | |
65 | fpnew = regs->ctx.AX[1].U0; | |
66 | lr = regs->ctx.DX[4].U1; | |
67 | } | |
68 | #endif | |
69 | ||
70 | /* stack grows up, so frame pointers must decrease */ | |
71 | if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) + | |
72 | sizeof(struct thread_info)) || fpnew >= (unsigned long)fp) | |
73 | return -EINVAL; | |
74 | ||
75 | /* restore the registers from the stack frame */ | |
76 | frame->fp = fpnew; | |
77 | frame->pc = lr; | |
78 | ||
79 | return 0; | |
80 | } | |
81 | #else | |
82 | int notrace unwind_frame(struct stackframe *frame) | |
83 | { | |
84 | struct metag_frame *sp = (struct metag_frame *)frame->sp; | |
85 | ||
86 | if (frame->sp & 0x7) | |
87 | return -EINVAL; | |
88 | ||
89 | while (!kstack_end(sp)) { | |
90 | unsigned long addr = sp->lr - 4; | |
91 | sp--; | |
92 | ||
93 | if (__kernel_text_address(addr)) { | |
94 | frame->sp = (unsigned long)sp; | |
95 | frame->pc = addr; | |
96 | return 0; | |
97 | } | |
98 | } | |
99 | return -EINVAL; | |
100 | } | |
101 | #endif | |
102 | ||
103 | void notrace walk_stackframe(struct stackframe *frame, | |
104 | int (*fn)(struct stackframe *, void *), void *data) | |
105 | { | |
106 | while (1) { | |
107 | int ret; | |
108 | ||
109 | if (fn(frame, data)) | |
110 | break; | |
111 | ret = unwind_frame(frame); | |
112 | if (ret < 0) | |
113 | break; | |
114 | } | |
115 | } | |
116 | EXPORT_SYMBOL(walk_stackframe); | |
117 | ||
118 | #ifdef CONFIG_STACKTRACE | |
119 | struct stack_trace_data { | |
120 | struct stack_trace *trace; | |
121 | unsigned int no_sched_functions; | |
122 | unsigned int skip; | |
123 | }; | |
124 | ||
125 | static int save_trace(struct stackframe *frame, void *d) | |
126 | { | |
127 | struct stack_trace_data *data = d; | |
128 | struct stack_trace *trace = data->trace; | |
129 | unsigned long addr = frame->pc; | |
130 | ||
131 | if (data->no_sched_functions && in_sched_functions(addr)) | |
132 | return 0; | |
133 | if (data->skip) { | |
134 | data->skip--; | |
135 | return 0; | |
136 | } | |
137 | ||
138 | trace->entries[trace->nr_entries++] = addr; | |
139 | ||
140 | return trace->nr_entries >= trace->max_entries; | |
141 | } | |
142 | ||
143 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |
144 | { | |
145 | struct stack_trace_data data; | |
146 | struct stackframe frame; | |
147 | ||
148 | data.trace = trace; | |
149 | data.skip = trace->skip; | |
150 | ||
151 | if (tsk != current) { | |
152 | #ifdef CONFIG_SMP | |
153 | /* | |
154 | * What guarantees do we have here that 'tsk' is not | |
155 | * running on another CPU? For now, ignore it as we | |
156 | * can't guarantee we won't explode. | |
157 | */ | |
158 | if (trace->nr_entries < trace->max_entries) | |
159 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
160 | return; | |
161 | #else | |
162 | data.no_sched_functions = 1; | |
163 | frame.fp = thread_saved_fp(tsk); | |
164 | frame.sp = thread_saved_sp(tsk); | |
165 | frame.lr = 0; /* recovered from the stack */ | |
166 | frame.pc = thread_saved_pc(tsk); | |
167 | #endif | |
168 | } else { | |
169 | register unsigned long current_sp asm ("A0StP"); | |
170 | ||
171 | data.no_sched_functions = 0; | |
172 | frame.fp = (unsigned long)__builtin_frame_address(0); | |
173 | frame.sp = current_sp; | |
174 | frame.lr = (unsigned long)__builtin_return_address(0); | |
175 | frame.pc = (unsigned long)save_stack_trace_tsk; | |
176 | } | |
177 | ||
178 | walk_stackframe(&frame, save_trace, &data); | |
179 | if (trace->nr_entries < trace->max_entries) | |
180 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
181 | } | |
182 | ||
183 | void save_stack_trace(struct stack_trace *trace) | |
184 | { | |
185 | save_stack_trace_tsk(current, trace); | |
186 | } | |
187 | EXPORT_SYMBOL_GPL(save_stack_trace); | |
188 | #endif |