]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/tile/kernel/backtrace.c
arch/tile: Miscellaneous cleanup changes.
[mirror_ubuntu-artful-kernel.git] / arch / tile / kernel / backtrace.c
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/string.h>
17
18#include <asm/backtrace.h>
19
20#include <arch/chip.h>
21
22#if TILE_CHIP < 10
23
24
25#include <asm/opcode-tile.h>
26
27
28#define TREG_SP 54
29#define TREG_LR 55
30
31
32/** A decoded bundle used for backtracer analysis. */
0707ad30 33struct BacktraceBundle {
867e359b
CM
34 tile_bundle_bits bits;
35 int num_insns;
36 struct tile_decoded_instruction
37 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
0707ad30 38};
867e359b
CM
39
40
41/* This implementation only makes sense for native tools. */
42/** Default function to read memory. */
0707ad30
CM
43static bool bt_read_memory(void *result, VirtualAddress addr,
44 size_t size, void *extra)
867e359b
CM
45{
46 /* FIXME: this should do some horrible signal stuff to catch
47 * SEGV cleanly and fail.
48 *
49 * Or else the caller should do the setjmp for efficiency.
50 */
51
52 memcpy(result, (const void *)addr, size);
53 return true;
54}
55
56
57/** Locates an instruction inside the given bundle that
58 * has the specified mnemonic, and whose first 'num_operands_to_match'
59 * operands exactly match those in 'operand_values'.
60 */
0707ad30
CM
61static const struct tile_decoded_instruction *find_matching_insn(
62 const struct BacktraceBundle *bundle,
63 tile_mnemonic mnemonic,
64 const int *operand_values,
65 int num_operands_to_match)
867e359b
CM
66{
67 int i, j;
68 bool match;
69
70 for (i = 0; i < bundle->num_insns; i++) {
71 const struct tile_decoded_instruction *insn =
72 &bundle->insns[i];
73
74 if (insn->opcode->mnemonic != mnemonic)
75 continue;
76
77 match = true;
78 for (j = 0; j < num_operands_to_match; j++) {
79 if (operand_values[j] != insn->operand_values[j]) {
80 match = false;
81 break;
82 }
83 }
84
85 if (match)
86 return insn;
87 }
88
89 return NULL;
90}
91
92/** Does this bundle contain an 'iret' instruction? */
0707ad30 93static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
867e359b
CM
94{
95 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
96}
97
98/** Does this bundle contain an 'addi sp, sp, OFFSET' or
99 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
100 */
0707ad30 101static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
867e359b
CM
102{
103 static const int vals[2] = { TREG_SP, TREG_SP };
104
105 const struct tile_decoded_instruction *insn =
106 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
107 if (insn == NULL)
108 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
109 if (insn == NULL)
110 return false;
111
112 *adjust = insn->operand_values[2];
113 return true;
114}
115
116/** Does this bundle contain any 'info OP' or 'infol OP'
117 * instruction, and if so, what are their OP? Note that OP is interpreted
118 * as an unsigned value by this code since that's what the caller wants.
119 * Returns the number of info ops found.
120 */
0707ad30 121static int bt_get_info_ops(const struct BacktraceBundle *bundle,
867e359b
CM
122 int operands[MAX_INFO_OPS_PER_BUNDLE])
123{
124 int num_ops = 0;
125 int i;
126
127 for (i = 0; i < bundle->num_insns; i++) {
128 const struct tile_decoded_instruction *insn =
129 &bundle->insns[i];
130
131 if (insn->opcode->mnemonic == TILE_OPC_INFO ||
132 insn->opcode->mnemonic == TILE_OPC_INFOL) {
133 operands[num_ops++] = insn->operand_values[0];
134 }
135 }
136
137 return num_ops;
138}
139
140/** Does this bundle contain a jrp instruction, and if so, to which
141 * register is it jumping?
142 */
0707ad30 143static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
867e359b
CM
144{
145 const struct tile_decoded_instruction *insn =
146 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
147 if (insn == NULL)
148 return false;
149
150 *target_reg = insn->operand_values[0];
151 return true;
152}
153
154/** Does this bundle modify the specified register in any way? */
0707ad30 155static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
867e359b
CM
156{
157 int i, j;
158 for (i = 0; i < bundle->num_insns; i++) {
159 const struct tile_decoded_instruction *insn =
160 &bundle->insns[i];
161
162 if (insn->opcode->implicitly_written_register == reg)
163 return true;
164
165 for (j = 0; j < insn->opcode->num_operands; j++)
166 if (insn->operands[j]->is_dest_reg &&
167 insn->operand_values[j] == reg)
168 return true;
169 }
170
171 return false;
172}
173
174/** Does this bundle modify sp? */
0707ad30 175static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
867e359b
CM
176{
177 return bt_modifies_reg(bundle, TREG_SP);
178}
179
180/** Does this bundle modify lr? */
0707ad30 181static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
867e359b
CM
182{
183 return bt_modifies_reg(bundle, TREG_LR);
184}
185
186/** Does this bundle contain the instruction 'move fp, sp'? */
0707ad30 187static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
867e359b
CM
188{
189 static const int vals[2] = { 52, TREG_SP };
190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
191}
192
193/** Does this bundle contain the instruction 'sw sp, lr'? */
0707ad30 194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
867e359b
CM
195{
196 static const int vals[2] = { TREG_SP, TREG_LR };
197 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
198}
199
200/** Locates the caller's PC and SP for a program starting at the
201 * given address.
202 */
0707ad30
CM
203static void find_caller_pc_and_caller_sp(CallerLocation *location,
204 const VirtualAddress start_pc,
205 BacktraceMemoryReader read_memory_func,
206 void *read_memory_func_extra)
867e359b
CM
207{
208 /* Have we explicitly decided what the sp is,
209 * rather than just the default?
210 */
211 bool sp_determined = false;
212
213 /* Has any bundle seen so far modified lr? */
214 bool lr_modified = false;
215
216 /* Have we seen a move from sp to fp? */
217 bool sp_moved_to_r52 = false;
218
219 /* Have we seen a terminating bundle? */
220 bool seen_terminating_bundle = false;
221
222 /* Cut down on round-trip reading overhead by reading several
223 * bundles at a time.
224 */
225 tile_bundle_bits prefetched_bundles[32];
226 int num_bundles_prefetched = 0;
227 int next_bundle = 0;
228 VirtualAddress pc;
229
230 /* Default to assuming that the caller's sp is the current sp.
231 * This is necessary to handle the case where we start backtracing
232 * right at the end of the epilog.
233 */
234 location->sp_location = SP_LOC_OFFSET;
235 location->sp_offset = 0;
236
237 /* Default to having no idea where the caller PC is. */
238 location->pc_location = PC_LOC_UNKNOWN;
239
240 /* Don't even try if the PC is not aligned. */
241 if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
242 return;
243
244 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
245
0707ad30 246 struct BacktraceBundle bundle;
867e359b
CM
247 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
248 int one_ago, jrp_reg;
249 bool has_jrp;
250
251 if (next_bundle >= num_bundles_prefetched) {
252 /* Prefetch some bytes, but don't cross a page
253 * boundary since that might cause a read failure we
254 * don't care about if we only need the first few
255 * bytes. Note: we don't care what the actual page
256 * size is; using the minimum possible page size will
257 * prevent any problems.
258 */
259 unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
260 if (bytes_to_prefetch > sizeof prefetched_bundles)
261 bytes_to_prefetch = sizeof prefetched_bundles;
262
263 if (!read_memory_func(prefetched_bundles, pc,
264 bytes_to_prefetch,
265 read_memory_func_extra)) {
266 if (pc == start_pc) {
267 /* The program probably called a bad
268 * address, such as a NULL pointer.
269 * So treat this as if we are at the
270 * start of the function prolog so the
271 * backtrace will show how we got here.
272 */
273 location->pc_location = PC_LOC_IN_LR;
274 return;
275 }
276
277 /* Unreadable address. Give up. */
278 break;
279 }
280
281 next_bundle = 0;
282 num_bundles_prefetched =
283 bytes_to_prefetch / sizeof(tile_bundle_bits);
284 }
285
286 /* Decode the next bundle. */
287 bundle.bits = prefetched_bundles[next_bundle++];
288 bundle.num_insns =
289 parse_insn_tile(bundle.bits, pc, bundle.insns);
290 num_info_ops = bt_get_info_ops(&bundle, info_operands);
291
292 /* First look at any one_ago info ops if they are interesting,
293 * since they should shadow any non-one-ago info ops.
294 */
295 for (one_ago = (pc != start_pc) ? 1 : 0;
296 one_ago >= 0; one_ago--) {
297 int i;
298 for (i = 0; i < num_info_ops; i++) {
299 int info_operand = info_operands[i];
300 if (info_operand < CALLER_UNKNOWN_BASE) {
301 /* Weird; reserved value, ignore it. */
302 continue;
303 }
304
305 /* Skip info ops which are not in the
306 * "one_ago" mode we want right now.
307 */
308 if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
309 != (one_ago != 0))
310 continue;
311
312 /* Clear the flag to make later checking
313 * easier. */
314 info_operand &= ~ONE_BUNDLE_AGO_FLAG;
315
316 /* Default to looking at PC_IN_LR_FLAG. */
317 if (info_operand & PC_IN_LR_FLAG)
318 location->pc_location =
319 PC_LOC_IN_LR;
320 else
321 location->pc_location =
322 PC_LOC_ON_STACK;
323
324 switch (info_operand) {
325 case CALLER_UNKNOWN_BASE:
326 location->pc_location = PC_LOC_UNKNOWN;
327 location->sp_location = SP_LOC_UNKNOWN;
328 return;
329
330 case CALLER_SP_IN_R52_BASE:
331 case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
332 location->sp_location = SP_LOC_IN_R52;
333 return;
334
335 default:
336 {
337 const unsigned int val = info_operand
338 - CALLER_SP_OFFSET_BASE;
339 const unsigned int sp_offset =
340 (val >> NUM_INFO_OP_FLAGS) * 8;
341 if (sp_offset < 32768) {
342 /* This is a properly encoded
343 * SP offset. */
344 location->sp_location =
345 SP_LOC_OFFSET;
346 location->sp_offset =
347 sp_offset;
348 return;
349 } else {
350 /* This looked like an SP
351 * offset, but it's outside
352 * the legal range, so this
353 * must be an unrecognized
354 * info operand. Ignore it.
355 */
356 }
357 }
358 break;
359 }
360 }
361 }
362
363 if (seen_terminating_bundle) {
364 /* We saw a terminating bundle during the previous
365 * iteration, so we were only looking for an info op.
366 */
367 break;
368 }
369
370 if (bundle.bits == 0) {
371 /* Wacky terminating bundle. Stop looping, and hope
372 * we've already seen enough to find the caller.
373 */
374 break;
375 }
376
377 /*
378 * Try to determine caller's SP.
379 */
380
381 if (!sp_determined) {
382 int adjust;
383 if (bt_has_addi_sp(&bundle, &adjust)) {
384 location->sp_location = SP_LOC_OFFSET;
385
386 if (adjust <= 0) {
387 /* We are in prolog about to adjust
388 * SP. */
389 location->sp_offset = 0;
390 } else {
391 /* We are in epilog restoring SP. */
392 location->sp_offset = adjust;
393 }
394
395 sp_determined = true;
396 } else {
397 if (bt_has_move_r52_sp(&bundle)) {
398 /* Maybe in prolog, creating an
399 * alloca-style frame. But maybe in
400 * the middle of a fixed-size frame
401 * clobbering r52 with SP.
402 */
403 sp_moved_to_r52 = true;
404 }
405
406 if (bt_modifies_sp(&bundle)) {
407 if (sp_moved_to_r52) {
408 /* We saw SP get saved into
409 * r52 earlier (or now), which
410 * must have been in the
411 * prolog, so we now know that
412 * SP is still holding the
413 * caller's sp value.
414 */
415 location->sp_location =
416 SP_LOC_OFFSET;
417 location->sp_offset = 0;
418 } else {
419 /* Someone must have saved
420 * aside the caller's SP value
421 * into r52, so r52 holds the
422 * current value.
423 */
424 location->sp_location =
425 SP_LOC_IN_R52;
426 }
427 sp_determined = true;
428 }
429 }
430 }
431
432 if (bt_has_iret(&bundle)) {
433 /* This is a terminating bundle. */
434 seen_terminating_bundle = true;
435 continue;
436 }
437
438 /*
439 * Try to determine caller's PC.
440 */
441
442 jrp_reg = -1;
443 has_jrp = bt_has_jrp(&bundle, &jrp_reg);
444 if (has_jrp)
445 seen_terminating_bundle = true;
446
447 if (location->pc_location == PC_LOC_UNKNOWN) {
448 if (has_jrp) {
449 if (jrp_reg == TREG_LR && !lr_modified) {
450 /* Looks like a leaf function, or else
451 * lr is already restored. */
452 location->pc_location =
453 PC_LOC_IN_LR;
454 } else {
455 location->pc_location =
456 PC_LOC_ON_STACK;
457 }
458 } else if (bt_has_sw_sp_lr(&bundle)) {
459 /* In prolog, spilling initial lr to stack. */
460 location->pc_location = PC_LOC_IN_LR;
461 } else if (bt_modifies_lr(&bundle)) {
462 lr_modified = true;
463 }
464 }
465 }
466}
467
0707ad30
CM
468void backtrace_init(BacktraceIterator *state,
469 BacktraceMemoryReader read_memory_func,
470 void *read_memory_func_extra,
471 VirtualAddress pc, VirtualAddress lr,
472 VirtualAddress sp, VirtualAddress r52)
867e359b
CM
473{
474 CallerLocation location;
475 VirtualAddress fp, initial_frame_caller_pc;
476
477 if (read_memory_func == NULL) {
478 read_memory_func = bt_read_memory;
479 }
480
481 /* Find out where we are in the initial frame. */
482 find_caller_pc_and_caller_sp(&location, pc,
483 read_memory_func, read_memory_func_extra);
484
485 switch (location.sp_location) {
486 case SP_LOC_UNKNOWN:
487 /* Give up. */
488 fp = -1;
489 break;
490
491 case SP_LOC_IN_R52:
492 fp = r52;
493 break;
494
495 case SP_LOC_OFFSET:
496 fp = sp + location.sp_offset;
497 break;
498
499 default:
500 /* Give up. */
501 fp = -1;
502 break;
503 }
504
505 /* The frame pointer should theoretically be aligned mod 8. If
506 * it's not even aligned mod 4 then something terrible happened
507 * and we should mark it as invalid.
508 */
509 if (fp % 4 != 0)
510 fp = -1;
511
512 /* -1 means "don't know initial_frame_caller_pc". */
513 initial_frame_caller_pc = -1;
514
515 switch (location.pc_location) {
516 case PC_LOC_UNKNOWN:
517 /* Give up. */
518 fp = -1;
519 break;
520
521 case PC_LOC_IN_LR:
522 if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
523 /* Give up. */
524 fp = -1;
525 } else {
526 initial_frame_caller_pc = lr;
527 }
528 break;
529
530 case PC_LOC_ON_STACK:
531 /* Leave initial_frame_caller_pc as -1,
532 * meaning check the stack.
533 */
534 break;
535
536 default:
537 /* Give up. */
538 fp = -1;
539 break;
540 }
541
542 state->pc = pc;
543 state->sp = sp;
544 state->fp = fp;
545 state->initial_frame_caller_pc = initial_frame_caller_pc;
546 state->read_memory_func = read_memory_func;
547 state->read_memory_func_extra = read_memory_func_extra;
548}
549
0707ad30 550bool backtrace_next(BacktraceIterator *state)
867e359b
CM
551{
552 VirtualAddress next_fp, next_pc, next_frame[2];
553
554 if (state->fp == -1) {
555 /* No parent frame. */
556 return false;
557 }
558
559 /* Try to read the frame linkage data chaining to the next function. */
560 if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
561 state->read_memory_func_extra)) {
562 return false;
563 }
564
565 next_fp = next_frame[1];
566 if (next_fp % 4 != 0) {
567 /* Caller's frame pointer is suspect, so give up.
568 * Technically it should be aligned mod 8, but we will
569 * be forgiving here.
570 */
571 return false;
572 }
573
574 if (state->initial_frame_caller_pc != -1) {
575 /* We must be in the initial stack frame and already know the
576 * caller PC.
577 */
578 next_pc = state->initial_frame_caller_pc;
579
580 /* Force reading stack next time, in case we were in the
581 * initial frame. We don't do this above just to paranoidly
582 * avoid changing the struct at all when we return false.
583 */
584 state->initial_frame_caller_pc = -1;
585 } else {
586 /* Get the caller PC from the frame linkage area. */
587 next_pc = next_frame[0];
588 if (next_pc == 0 ||
589 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
590 /* The PC is suspect, so give up. */
591 return false;
592 }
593 }
594
595 /* Update state to become the caller's stack frame. */
596 state->pc = next_pc;
597 state->sp = state->fp;
598 state->fp = next_fp;
599
600 return true;
601}
602
603#else /* TILE_CHIP < 10 */
604
0707ad30
CM
605void backtrace_init(BacktraceIterator *state,
606 BacktraceMemoryReader read_memory_func,
607 void *read_memory_func_extra,
608 VirtualAddress pc, VirtualAddress lr,
609 VirtualAddress sp, VirtualAddress r52)
867e359b
CM
610{
611 state->pc = pc;
612 state->sp = sp;
613 state->fp = -1;
614 state->initial_frame_caller_pc = -1;
615 state->read_memory_func = read_memory_func;
616 state->read_memory_func_extra = read_memory_func_extra;
617}
618
619bool backtrace_next(BacktraceIterator *state) { return false; }
620
621#endif /* TILE_CHIP < 10 */