]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/metag/kernel/stacktrace.c
linux/kernel.h: Add ALIGN_DOWN macro
[mirror_ubuntu-jammy-kernel.git] / arch / metag / kernel / stacktrace.c
CommitLineData
e8de3486
JH
1#include <linux/export.h>
2#include <linux/sched.h>
b17b0153 3#include <linux/sched/debug.h>
3f8c2452 4#include <linux/sched/task_stack.h>
e8de3486
JH
5#include <linux/stacktrace.h>
6
7#include <asm/stacktrace.h>
8
9#if defined(CONFIG_FRAME_POINTER)
10
11#ifdef CONFIG_KALLSYMS
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14
15static unsigned long tbi_boing_addr;
16static unsigned long tbi_boing_size;
17
18static void tbi_boing_init(void)
19{
20 /* We need to know where TBIBoingVec is and it's size */
21 unsigned long size;
22 unsigned long offset;
23 char modname[MODULE_NAME_LEN];
24 char name[KSYM_NAME_LEN];
25 tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec");
26 if (!tbi_boing_addr)
27 tbi_boing_addr = 1;
28 else if (!lookup_symbol_attrs(tbi_boing_addr, &size,
29 &offset, modname, name))
30 tbi_boing_size = size;
31}
32#endif
33
e8de3486
JH
34/*
35 * Unwind the current stack frame and store the new register values in the
36 * structure passed as argument. Unwinding is equivalent to a function return,
37 * hence the new PC value rather than LR should be used for backtrace.
38 */
39int notrace unwind_frame(struct stackframe *frame)
40{
41 struct metag_frame *fp = (struct metag_frame *)frame->fp;
42 unsigned long lr;
43 unsigned long fpnew;
44
45 if (frame->fp & 0x7)
46 return -EINVAL;
47
48 fpnew = fp->fp;
49 lr = fp->lr - 4;
50
51#ifdef CONFIG_KALLSYMS
52 /* If we've reached TBIBoingVec then we're at an interrupt
53 * entry point or a syscall entry point. The frame pointer
54 * points to a pt_regs which can be used to continue tracing on
55 * the other side of the boing.
56 */
57 if (!tbi_boing_addr)
58 tbi_boing_init();
59 if (tbi_boing_size && lr >= tbi_boing_addr &&
60 lr < tbi_boing_addr + tbi_boing_size) {
61 struct pt_regs *regs = (struct pt_regs *)fpnew;
62 if (user_mode(regs))
63 return -EINVAL;
64 fpnew = regs->ctx.AX[1].U0;
65 lr = regs->ctx.DX[4].U1;
66 }
67#endif
68
69 /* stack grows up, so frame pointers must decrease */
70 if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) +
71 sizeof(struct thread_info)) || fpnew >= (unsigned long)fp)
72 return -EINVAL;
73
74 /* restore the registers from the stack frame */
75 frame->fp = fpnew;
76 frame->pc = lr;
77
78 return 0;
79}
80#else
81int notrace unwind_frame(struct stackframe *frame)
82{
83 struct metag_frame *sp = (struct metag_frame *)frame->sp;
84
85 if (frame->sp & 0x7)
86 return -EINVAL;
87
88 while (!kstack_end(sp)) {
89 unsigned long addr = sp->lr - 4;
90 sp--;
91
92 if (__kernel_text_address(addr)) {
93 frame->sp = (unsigned long)sp;
94 frame->pc = addr;
95 return 0;
96 }
97 }
98 return -EINVAL;
99}
100#endif
101
102void notrace walk_stackframe(struct stackframe *frame,
103 int (*fn)(struct stackframe *, void *), void *data)
104{
105 while (1) {
106 int ret;
107
108 if (fn(frame, data))
109 break;
110 ret = unwind_frame(frame);
111 if (ret < 0)
112 break;
113 }
114}
115EXPORT_SYMBOL(walk_stackframe);
116
117#ifdef CONFIG_STACKTRACE
118struct stack_trace_data {
119 struct stack_trace *trace;
120 unsigned int no_sched_functions;
121 unsigned int skip;
122};
123
124static int save_trace(struct stackframe *frame, void *d)
125{
126 struct stack_trace_data *data = d;
127 struct stack_trace *trace = data->trace;
128 unsigned long addr = frame->pc;
129
130 if (data->no_sched_functions && in_sched_functions(addr))
131 return 0;
132 if (data->skip) {
133 data->skip--;
134 return 0;
135 }
136
137 trace->entries[trace->nr_entries++] = addr;
138
139 return trace->nr_entries >= trace->max_entries;
140}
141
142void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
143{
144 struct stack_trace_data data;
145 struct stackframe frame;
146
147 data.trace = trace;
148 data.skip = trace->skip;
149
150 if (tsk != current) {
151#ifdef CONFIG_SMP
152 /*
153 * What guarantees do we have here that 'tsk' is not
154 * running on another CPU? For now, ignore it as we
155 * can't guarantee we won't explode.
156 */
157 if (trace->nr_entries < trace->max_entries)
158 trace->entries[trace->nr_entries++] = ULONG_MAX;
159 return;
160#else
161 data.no_sched_functions = 1;
162 frame.fp = thread_saved_fp(tsk);
163 frame.sp = thread_saved_sp(tsk);
164 frame.lr = 0; /* recovered from the stack */
165 frame.pc = thread_saved_pc(tsk);
166#endif
167 } else {
168 register unsigned long current_sp asm ("A0StP");
169
170 data.no_sched_functions = 0;
171 frame.fp = (unsigned long)__builtin_frame_address(0);
172 frame.sp = current_sp;
173 frame.lr = (unsigned long)__builtin_return_address(0);
174 frame.pc = (unsigned long)save_stack_trace_tsk;
175 }
176
177 walk_stackframe(&frame, save_trace, &data);
178 if (trace->nr_entries < trace->max_entries)
179 trace->entries[trace->nr_entries++] = ULONG_MAX;
180}
181
182void save_stack_trace(struct stack_trace *trace)
183{
184 save_stack_trace_tsk(current, trace);
185}
186EXPORT_SYMBOL_GPL(save_stack_trace);
187#endif