]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel unwinding support | |
3 | * | |
4 | * (c) 2002-2004 Randolph Chung <tausq@debian.org> | |
5 | * | |
6 | * Derived partially from the IA64 implementation. The PA-RISC | |
7 | * Runtime Architecture Document is also a useful reference to | |
8 | * understand what is happening here | |
9 | */ | |
10 | ||
11 | #include <linux/config.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/kallsyms.h> | |
16 | ||
17 | #include <asm/uaccess.h> | |
18 | #include <asm/assembly.h> | |
19 | ||
20 | #include <asm/unwind.h> | |
21 | ||
22 | /* #define DEBUG 1 */ | |
23 | #ifdef DEBUG | |
24 | #define dbg(x...) printk(x) | |
25 | #else | |
26 | #define dbg(x...) | |
27 | #endif | |
28 | ||
29 | extern struct unwind_table_entry __start___unwind[]; | |
30 | extern struct unwind_table_entry __stop___unwind[]; | |
31 | ||
32 | static spinlock_t unwind_lock; | |
33 | /* | |
34 | * the kernel unwind block is not dynamically allocated so that | |
35 | * we can call unwind_init as early in the bootup process as | |
36 | * possible (before the slab allocator is initialized) | |
37 | */ | |
8039de10 | 38 | static struct unwind_table kernel_unwind_table __read_mostly; |
1da177e4 LT |
39 | static LIST_HEAD(unwind_tables); |
40 | ||
41 | static inline const struct unwind_table_entry * | |
42 | find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) | |
43 | { | |
44 | const struct unwind_table_entry *e = NULL; | |
45 | unsigned long lo, hi, mid; | |
46 | ||
47 | lo = 0; | |
48 | hi = table->length - 1; | |
49 | ||
50 | while (lo <= hi) { | |
51 | mid = (hi - lo) / 2 + lo; | |
52 | e = &table->table[mid]; | |
53 | if (addr < e->region_start) | |
54 | hi = mid - 1; | |
55 | else if (addr > e->region_end) | |
56 | lo = mid + 1; | |
57 | else | |
58 | return e; | |
59 | } | |
60 | ||
61 | return NULL; | |
62 | } | |
63 | ||
64 | static const struct unwind_table_entry * | |
65 | find_unwind_entry(unsigned long addr) | |
66 | { | |
67 | struct unwind_table *table; | |
68 | const struct unwind_table_entry *e = NULL; | |
69 | ||
70 | if (addr >= kernel_unwind_table.start && | |
71 | addr <= kernel_unwind_table.end) | |
72 | e = find_unwind_entry_in_table(&kernel_unwind_table, addr); | |
73 | else | |
74 | list_for_each_entry(table, &unwind_tables, list) { | |
75 | if (addr >= table->start && | |
76 | addr <= table->end) | |
77 | e = find_unwind_entry_in_table(table, addr); | |
78 | if (e) | |
79 | break; | |
80 | } | |
81 | ||
82 | return e; | |
83 | } | |
84 | ||
85 | static void | |
86 | unwind_table_init(struct unwind_table *table, const char *name, | |
87 | unsigned long base_addr, unsigned long gp, | |
88 | void *table_start, void *table_end) | |
89 | { | |
90 | struct unwind_table_entry *start = table_start; | |
91 | struct unwind_table_entry *end = | |
92 | (struct unwind_table_entry *)table_end - 1; | |
93 | ||
94 | table->name = name; | |
95 | table->base_addr = base_addr; | |
96 | table->gp = gp; | |
97 | table->start = base_addr + start->region_start; | |
98 | table->end = base_addr + end->region_end; | |
99 | table->table = (struct unwind_table_entry *)table_start; | |
100 | table->length = end - start + 1; | |
101 | INIT_LIST_HEAD(&table->list); | |
102 | ||
103 | for (; start <= end; start++) { | |
104 | if (start < end && | |
105 | start->region_end > (start+1)->region_start) { | |
106 | printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1); | |
107 | } | |
108 | ||
109 | start->region_start += base_addr; | |
110 | start->region_end += base_addr; | |
111 | } | |
112 | } | |
113 | ||
114 | static void | |
115 | unwind_table_sort(struct unwind_table_entry *start, | |
116 | struct unwind_table_entry *finish) | |
117 | { | |
118 | struct unwind_table_entry el, *p, *q; | |
119 | ||
120 | for (p = start + 1; p < finish; ++p) { | |
121 | if (p[0].region_start < p[-1].region_start) { | |
122 | el = *p; | |
123 | q = p; | |
124 | do { | |
125 | q[0] = q[-1]; | |
126 | --q; | |
127 | } while (q > start && | |
128 | el.region_start < q[-1].region_start); | |
129 | *q = el; | |
130 | } | |
131 | } | |
132 | } | |
133 | ||
134 | struct unwind_table * | |
135 | unwind_table_add(const char *name, unsigned long base_addr, | |
136 | unsigned long gp, | |
137 | void *start, void *end) | |
138 | { | |
139 | struct unwind_table *table; | |
140 | unsigned long flags; | |
141 | struct unwind_table_entry *s = (struct unwind_table_entry *)start; | |
142 | struct unwind_table_entry *e = (struct unwind_table_entry *)end; | |
143 | ||
144 | unwind_table_sort(s, e); | |
145 | ||
146 | table = kmalloc(sizeof(struct unwind_table), GFP_USER); | |
147 | if (table == NULL) | |
148 | return NULL; | |
149 | unwind_table_init(table, name, base_addr, gp, start, end); | |
150 | spin_lock_irqsave(&unwind_lock, flags); | |
151 | list_add_tail(&table->list, &unwind_tables); | |
152 | spin_unlock_irqrestore(&unwind_lock, flags); | |
153 | ||
154 | return table; | |
155 | } | |
156 | ||
157 | void unwind_table_remove(struct unwind_table *table) | |
158 | { | |
159 | unsigned long flags; | |
160 | ||
161 | spin_lock_irqsave(&unwind_lock, flags); | |
162 | list_del(&table->list); | |
163 | spin_unlock_irqrestore(&unwind_lock, flags); | |
164 | ||
165 | kfree(table); | |
166 | } | |
167 | ||
168 | /* Called from setup_arch to import the kernel unwind info */ | |
169 | static int unwind_init(void) | |
170 | { | |
171 | long start, stop; | |
172 | register unsigned long gp __asm__ ("r27"); | |
173 | ||
174 | start = (long)&__start___unwind[0]; | |
175 | stop = (long)&__stop___unwind[0]; | |
176 | ||
177 | spin_lock_init(&unwind_lock); | |
178 | ||
179 | printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", | |
180 | start, stop, | |
181 | (stop - start) / sizeof(struct unwind_table_entry)); | |
182 | ||
183 | unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, | |
184 | gp, | |
185 | &__start___unwind[0], &__stop___unwind[0]); | |
186 | #if 0 | |
187 | { | |
188 | int i; | |
189 | for (i = 0; i < 10; i++) | |
190 | { | |
191 | printk("region 0x%x-0x%x\n", | |
192 | __start___unwind[i].region_start, | |
193 | __start___unwind[i].region_end); | |
194 | } | |
195 | } | |
196 | #endif | |
197 | return 0; | |
198 | } | |
199 | ||
200 | static void unwind_frame_regs(struct unwind_frame_info *info) | |
201 | { | |
202 | const struct unwind_table_entry *e; | |
203 | unsigned long npc; | |
204 | unsigned int insn; | |
205 | long frame_size = 0; | |
206 | int looking_for_rp, rpoffset = 0; | |
207 | ||
208 | e = find_unwind_entry(info->ip); | |
209 | if (e == NULL) { | |
210 | unsigned long sp; | |
211 | extern char _stext[], _etext[]; | |
212 | ||
213 | dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); | |
214 | ||
215 | #ifdef CONFIG_KALLSYMS | |
216 | /* Handle some frequent special cases.... */ | |
217 | { | |
218 | char symname[KSYM_NAME_LEN+1]; | |
219 | char *modname; | |
220 | unsigned long symsize, offset; | |
221 | ||
222 | kallsyms_lookup(info->ip, &symsize, &offset, | |
223 | &modname, symname); | |
224 | ||
225 | dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname); | |
226 | ||
227 | if (strcmp(symname, "_switch_to_ret") == 0) { | |
228 | info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; | |
229 | info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); | |
230 | dbg("_switch_to_ret @ %lx - setting " | |
231 | "prev_sp=%lx prev_ip=%lx\n", | |
232 | info->ip, info->prev_sp, | |
233 | info->prev_ip); | |
234 | return; | |
235 | } else if (strcmp(symname, "ret_from_kernel_thread") == 0 || | |
236 | strcmp(symname, "syscall_exit") == 0) { | |
237 | info->prev_ip = info->prev_sp = 0; | |
238 | return; | |
239 | } | |
240 | } | |
241 | #endif | |
242 | ||
243 | /* Since we are doing the unwinding blind, we don't know if | |
244 | we are adjusting the stack correctly or extracting the rp | |
245 | correctly. The rp is checked to see if it belongs to the | |
246 | kernel text section, if not we assume we don't have a | |
247 | correct stack frame and we continue to unwind the stack. | |
248 | This is not quite correct, and will fail for loadable | |
249 | modules. */ | |
250 | sp = info->sp & ~63; | |
251 | do { | |
252 | unsigned long tmp; | |
253 | ||
254 | info->prev_sp = sp - 64; | |
255 | info->prev_ip = 0; | |
256 | if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) | |
257 | break; | |
258 | info->prev_ip = tmp; | |
259 | sp = info->prev_sp; | |
260 | } while (info->prev_ip < (unsigned long)_stext || | |
261 | info->prev_ip > (unsigned long)_etext); | |
262 | ||
263 | info->rp = 0; | |
264 | ||
265 | dbg("analyzing func @ %lx with no unwind info, setting " | |
266 | "prev_sp=%lx prev_ip=%lx\n", info->ip, | |
267 | info->prev_sp, info->prev_ip); | |
268 | } else { | |
269 | dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, " | |
270 | "Save_RP = %d, Millicode = %d size = %u\n", | |
271 | e->region_start, e->region_end, e->Save_SP, e->Save_RP, | |
272 | e->Millicode, e->Total_frame_size); | |
273 | ||
274 | looking_for_rp = e->Save_RP; | |
275 | ||
276 | for (npc = e->region_start; | |
277 | (frame_size < (e->Total_frame_size << 3) || | |
278 | looking_for_rp) && | |
279 | npc < info->ip; | |
280 | npc += 4) { | |
281 | ||
282 | insn = *(unsigned int *)npc; | |
283 | ||
284 | if ((insn & 0xffffc000) == 0x37de0000 || | |
285 | (insn & 0xffe00000) == 0x6fc00000) { | |
286 | /* ldo X(sp), sp, or stwm X,D(sp) */ | |
287 | frame_size += (insn & 0x1 ? -1 << 13 : 0) | | |
288 | ((insn & 0x3fff) >> 1); | |
289 | dbg("analyzing func @ %lx, insn=%08x @ " | |
290 | "%lx, frame_size = %ld\n", info->ip, | |
291 | insn, npc, frame_size); | |
292 | } else if ((insn & 0xffe00008) == 0x73c00008) { | |
293 | /* std,ma X,D(sp) */ | |
294 | frame_size += (insn & 0x1 ? -1 << 13 : 0) | | |
295 | (((insn >> 4) & 0x3ff) << 3); | |
296 | dbg("analyzing func @ %lx, insn=%08x @ " | |
297 | "%lx, frame_size = %ld\n", info->ip, | |
298 | insn, npc, frame_size); | |
299 | } else if (insn == 0x6bc23fd9) { | |
300 | /* stw rp,-20(sp) */ | |
301 | rpoffset = 20; | |
302 | looking_for_rp = 0; | |
303 | dbg("analyzing func @ %lx, insn=stw rp," | |
304 | "-20(sp) @ %lx\n", info->ip, npc); | |
305 | } else if (insn == 0x0fc212c1) { | |
306 | /* std rp,-16(sr0,sp) */ | |
307 | rpoffset = 16; | |
308 | looking_for_rp = 0; | |
309 | dbg("analyzing func @ %lx, insn=std rp," | |
310 | "-16(sp) @ %lx\n", info->ip, npc); | |
311 | } | |
312 | } | |
313 | ||
314 | info->prev_sp = info->sp - frame_size; | |
315 | if (e->Millicode) | |
316 | info->rp = info->r31; | |
317 | else if (rpoffset) | |
318 | info->rp = *(unsigned long *)(info->prev_sp - rpoffset); | |
319 | info->prev_ip = info->rp; | |
320 | info->rp = 0; | |
321 | ||
322 | dbg("analyzing func @ %lx, setting prev_sp=%lx " | |
323 | "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, | |
324 | info->prev_ip, npc); | |
325 | } | |
326 | } | |
327 | ||
328 | void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, | |
329 | struct pt_regs *regs) | |
330 | { | |
331 | memset(info, 0, sizeof(struct unwind_frame_info)); | |
332 | info->t = t; | |
333 | info->sp = regs->gr[30]; | |
334 | info->ip = regs->iaoq[0]; | |
335 | info->rp = regs->gr[2]; | |
336 | info->r31 = regs->gr[31]; | |
337 | ||
338 | dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", | |
339 | t ? (int)t->pid : -1, info->sp, info->ip); | |
340 | } | |
341 | ||
342 | void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) | |
343 | { | |
344 | struct pt_regs *r = &t->thread.regs; | |
345 | struct pt_regs *r2; | |
346 | ||
347 | r2 = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL); | |
348 | if (!r2) | |
349 | return; | |
350 | *r2 = *r; | |
351 | r2->gr[30] = r->ksp; | |
352 | r2->iaoq[0] = r->kpc; | |
353 | unwind_frame_init(info, t, r2); | |
354 | kfree(r2); | |
355 | } | |
356 | ||
357 | void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs) | |
358 | { | |
359 | unwind_frame_init(info, current, regs); | |
360 | } | |
361 | ||
362 | int unwind_once(struct unwind_frame_info *next_frame) | |
363 | { | |
364 | unwind_frame_regs(next_frame); | |
365 | ||
366 | if (next_frame->prev_sp == 0 || | |
367 | next_frame->prev_ip == 0) | |
368 | return -1; | |
369 | ||
370 | next_frame->sp = next_frame->prev_sp; | |
371 | next_frame->ip = next_frame->prev_ip; | |
372 | next_frame->prev_sp = 0; | |
373 | next_frame->prev_ip = 0; | |
374 | ||
375 | dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", | |
376 | next_frame->t ? (int)next_frame->t->pid : -1, | |
377 | next_frame->sp, next_frame->ip); | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
382 | int unwind_to_user(struct unwind_frame_info *info) | |
383 | { | |
384 | int ret; | |
385 | ||
386 | do { | |
387 | ret = unwind_once(info); | |
388 | } while (!ret && !(info->ip & 3)); | |
389 | ||
390 | return ret; | |
391 | } | |
392 | ||
393 | module_init(unwind_init); |