]>
Commit | Line | Data |
---|---|---|
e8de3486 JH |
1 | #include <linux/export.h> |
2 | #include <linux/sched.h> | |
b17b0153 | 3 | #include <linux/sched/debug.h> |
3f8c2452 | 4 | #include <linux/sched/task_stack.h> |
e8de3486 JH |
5 | #include <linux/stacktrace.h> |
6 | ||
7 | #include <asm/stacktrace.h> | |
8 | ||
9 | #if defined(CONFIG_FRAME_POINTER) | |
10 | ||
11 | #ifdef CONFIG_KALLSYMS | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/module.h> | |
14 | ||
15 | static unsigned long tbi_boing_addr; | |
16 | static unsigned long tbi_boing_size; | |
17 | ||
18 | static void tbi_boing_init(void) | |
19 | { | |
20 | /* We need to know where TBIBoingVec is and it's size */ | |
21 | unsigned long size; | |
22 | unsigned long offset; | |
23 | char modname[MODULE_NAME_LEN]; | |
24 | char name[KSYM_NAME_LEN]; | |
25 | tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec"); | |
26 | if (!tbi_boing_addr) | |
27 | tbi_boing_addr = 1; | |
28 | else if (!lookup_symbol_attrs(tbi_boing_addr, &size, | |
29 | &offset, modname, name)) | |
30 | tbi_boing_size = size; | |
31 | } | |
32 | #endif | |
33 | ||
34 | #define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) | |
35 | ||
36 | /* | |
37 | * Unwind the current stack frame and store the new register values in the | |
38 | * structure passed as argument. Unwinding is equivalent to a function return, | |
39 | * hence the new PC value rather than LR should be used for backtrace. | |
40 | */ | |
41 | int notrace unwind_frame(struct stackframe *frame) | |
42 | { | |
43 | struct metag_frame *fp = (struct metag_frame *)frame->fp; | |
44 | unsigned long lr; | |
45 | unsigned long fpnew; | |
46 | ||
47 | if (frame->fp & 0x7) | |
48 | return -EINVAL; | |
49 | ||
50 | fpnew = fp->fp; | |
51 | lr = fp->lr - 4; | |
52 | ||
53 | #ifdef CONFIG_KALLSYMS | |
54 | /* If we've reached TBIBoingVec then we're at an interrupt | |
55 | * entry point or a syscall entry point. The frame pointer | |
56 | * points to a pt_regs which can be used to continue tracing on | |
57 | * the other side of the boing. | |
58 | */ | |
59 | if (!tbi_boing_addr) | |
60 | tbi_boing_init(); | |
61 | if (tbi_boing_size && lr >= tbi_boing_addr && | |
62 | lr < tbi_boing_addr + tbi_boing_size) { | |
63 | struct pt_regs *regs = (struct pt_regs *)fpnew; | |
64 | if (user_mode(regs)) | |
65 | return -EINVAL; | |
66 | fpnew = regs->ctx.AX[1].U0; | |
67 | lr = regs->ctx.DX[4].U1; | |
68 | } | |
69 | #endif | |
70 | ||
71 | /* stack grows up, so frame pointers must decrease */ | |
72 | if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) + | |
73 | sizeof(struct thread_info)) || fpnew >= (unsigned long)fp) | |
74 | return -EINVAL; | |
75 | ||
76 | /* restore the registers from the stack frame */ | |
77 | frame->fp = fpnew; | |
78 | frame->pc = lr; | |
79 | ||
80 | return 0; | |
81 | } | |
82 | #else | |
83 | int notrace unwind_frame(struct stackframe *frame) | |
84 | { | |
85 | struct metag_frame *sp = (struct metag_frame *)frame->sp; | |
86 | ||
87 | if (frame->sp & 0x7) | |
88 | return -EINVAL; | |
89 | ||
90 | while (!kstack_end(sp)) { | |
91 | unsigned long addr = sp->lr - 4; | |
92 | sp--; | |
93 | ||
94 | if (__kernel_text_address(addr)) { | |
95 | frame->sp = (unsigned long)sp; | |
96 | frame->pc = addr; | |
97 | return 0; | |
98 | } | |
99 | } | |
100 | return -EINVAL; | |
101 | } | |
102 | #endif | |
103 | ||
104 | void notrace walk_stackframe(struct stackframe *frame, | |
105 | int (*fn)(struct stackframe *, void *), void *data) | |
106 | { | |
107 | while (1) { | |
108 | int ret; | |
109 | ||
110 | if (fn(frame, data)) | |
111 | break; | |
112 | ret = unwind_frame(frame); | |
113 | if (ret < 0) | |
114 | break; | |
115 | } | |
116 | } | |
117 | EXPORT_SYMBOL(walk_stackframe); | |
118 | ||
119 | #ifdef CONFIG_STACKTRACE | |
120 | struct stack_trace_data { | |
121 | struct stack_trace *trace; | |
122 | unsigned int no_sched_functions; | |
123 | unsigned int skip; | |
124 | }; | |
125 | ||
126 | static int save_trace(struct stackframe *frame, void *d) | |
127 | { | |
128 | struct stack_trace_data *data = d; | |
129 | struct stack_trace *trace = data->trace; | |
130 | unsigned long addr = frame->pc; | |
131 | ||
132 | if (data->no_sched_functions && in_sched_functions(addr)) | |
133 | return 0; | |
134 | if (data->skip) { | |
135 | data->skip--; | |
136 | return 0; | |
137 | } | |
138 | ||
139 | trace->entries[trace->nr_entries++] = addr; | |
140 | ||
141 | return trace->nr_entries >= trace->max_entries; | |
142 | } | |
143 | ||
144 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |
145 | { | |
146 | struct stack_trace_data data; | |
147 | struct stackframe frame; | |
148 | ||
149 | data.trace = trace; | |
150 | data.skip = trace->skip; | |
151 | ||
152 | if (tsk != current) { | |
153 | #ifdef CONFIG_SMP | |
154 | /* | |
155 | * What guarantees do we have here that 'tsk' is not | |
156 | * running on another CPU? For now, ignore it as we | |
157 | * can't guarantee we won't explode. | |
158 | */ | |
159 | if (trace->nr_entries < trace->max_entries) | |
160 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
161 | return; | |
162 | #else | |
163 | data.no_sched_functions = 1; | |
164 | frame.fp = thread_saved_fp(tsk); | |
165 | frame.sp = thread_saved_sp(tsk); | |
166 | frame.lr = 0; /* recovered from the stack */ | |
167 | frame.pc = thread_saved_pc(tsk); | |
168 | #endif | |
169 | } else { | |
170 | register unsigned long current_sp asm ("A0StP"); | |
171 | ||
172 | data.no_sched_functions = 0; | |
173 | frame.fp = (unsigned long)__builtin_frame_address(0); | |
174 | frame.sp = current_sp; | |
175 | frame.lr = (unsigned long)__builtin_return_address(0); | |
176 | frame.pc = (unsigned long)save_stack_trace_tsk; | |
177 | } | |
178 | ||
179 | walk_stackframe(&frame, save_trace, &data); | |
180 | if (trace->nr_entries < trace->max_entries) | |
181 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
182 | } | |
183 | ||
184 | void save_stack_trace(struct stack_trace *trace) | |
185 | { | |
186 | save_stack_trace_tsk(current, trace); | |
187 | } | |
188 | EXPORT_SYMBOL_GPL(save_stack_trace); | |
189 | #endif |