]>
Commit | Line | Data |
---|---|---|
21b32bbf | 1 | /* |
21b32bbf IM |
2 | * Stack trace management functions |
3 | * | |
8f47e163 | 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
21b32bbf IM |
5 | */ |
6 | #include <linux/sched.h> | |
b17b0153 | 7 | #include <linux/sched/debug.h> |
68db0cf1 | 8 | #include <linux/sched/task_stack.h> |
21b32bbf | 9 | #include <linux/stacktrace.h> |
186f4360 | 10 | #include <linux/export.h> |
02b67518 | 11 | #include <linux/uaccess.h> |
c0b766f1 | 12 | #include <asm/stacktrace.h> |
49a612c6 | 13 | #include <asm/unwind.h> |
21b32bbf | 14 | |
3599fe12 TG |
15 | void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, |
16 | struct task_struct *task, struct pt_regs *regs) | |
9745512c | 17 | { |
49a612c6 JP |
18 | struct unwind_state state; |
19 | unsigned long addr; | |
9745512c | 20 | |
3599fe12 TG |
21 | if (regs && !consume_entry(cookie, regs->ip, false)) |
22 | return; | |
21b32bbf | 23 | |
49a612c6 JP |
24 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
25 | unwind_next_frame(&state)) { | |
26 | addr = unwind_get_return_address(&state); | |
3599fe12 | 27 | if (!addr || !consume_entry(cookie, addr, false)) |
49a612c6 JP |
28 | break; |
29 | } | |
49a612c6 | 30 | } |
9745512c | 31 | |
21b32bbf | 32 | /* |
3599fe12 TG |
33 | * This function returns an error if it detects any unreliable features of the |
34 | * stack. Otherwise it guarantees that the stack trace is reliable. | |
35 | * | |
36 | * If the task is not 'current', the caller *must* ensure the task is inactive. | |
21b32bbf | 37 | */ |
3599fe12 TG |
38 | int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, |
39 | void *cookie, struct task_struct *task) | |
af085d90 JP |
40 | { |
41 | struct unwind_state state; | |
42 | struct pt_regs *regs; | |
43 | unsigned long addr; | |
44 | ||
441ccc35 JS |
45 | for (unwind_start(&state, task, NULL, NULL); |
46 | !unwind_done(&state) && !unwind_error(&state); | |
af085d90 JP |
47 | unwind_next_frame(&state)) { |
48 | ||
a9cdbe72 | 49 | regs = unwind_get_entry_regs(&state, NULL); |
af085d90 | 50 | if (regs) { |
441ccc35 JS |
51 | /* Success path for user tasks */ |
52 | if (user_mode(regs)) | |
c5c27a0a | 53 | return 0; |
441ccc35 | 54 | |
af085d90 JP |
55 | /* |
56 | * Kernel mode registers on the stack indicate an | |
57 | * in-kernel interrupt or exception (e.g., preemption | |
58 | * or a page fault), which can make frame pointers | |
59 | * unreliable. | |
60 | */ | |
0c414367 JS |
61 | if (IS_ENABLED(CONFIG_FRAME_POINTER)) |
62 | return -EINVAL; | |
af085d90 JP |
63 | } |
64 | ||
65 | addr = unwind_get_return_address(&state); | |
66 | ||
67 | /* | |
68 | * A NULL or invalid return address probably means there's some | |
69 | * generated code which __kernel_text_address() doesn't know | |
70 | * about. | |
71 | */ | |
17426923 | 72 | if (!addr) |
af085d90 | 73 | return -EINVAL; |
af085d90 | 74 | |
3599fe12 | 75 | if (!consume_entry(cookie, addr, false)) |
af085d90 JP |
76 | return -EINVAL; |
77 | } | |
78 | ||
79 | /* Check for stack corruption */ | |
17426923 | 80 | if (unwind_error(&state)) |
af085d90 | 81 | return -EINVAL; |
af085d90 | 82 | |
af085d90 JP |
83 | return 0; |
84 | } | |
85 | ||
02b67518 TE |
86 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
87 | ||
c9cf4dbb | 88 | struct stack_frame_user { |
02b67518 | 89 | const void __user *next_fp; |
8d7c6a96 | 90 | unsigned long ret_addr; |
02b67518 TE |
91 | }; |
92 | ||
c9cf4dbb | 93 | static int |
c8e3dd86 AV |
94 | copy_stack_frame(const struct stack_frame_user __user *fp, |
95 | struct stack_frame_user *frame) | |
02b67518 TE |
96 | { |
97 | int ret; | |
98 | ||
2af7c857 | 99 | if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE)) |
02b67518 TE |
100 | return 0; |
101 | ||
102 | ret = 1; | |
103 | pagefault_disable(); | |
c8e3dd86 AV |
104 | if (__get_user(frame->next_fp, &fp->next_fp) || |
105 | __get_user(frame->ret_addr, &fp->ret_addr)) | |
02b67518 TE |
106 | ret = 0; |
107 | pagefault_enable(); | |
108 | ||
109 | return ret; | |
110 | } | |
111 | ||
3599fe12 TG |
112 | void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, |
113 | const struct pt_regs *regs) | |
8d7c6a96 | 114 | { |
8d7c6a96 TE |
115 | const void __user *fp = (const void __user *)regs->bp; |
116 | ||
3599fe12 TG |
117 | if (!consume_entry(cookie, regs->ip, false)) |
118 | return; | |
8d7c6a96 | 119 | |
3599fe12 | 120 | while (1) { |
c9cf4dbb | 121 | struct stack_frame_user frame; |
8d7c6a96 TE |
122 | |
123 | frame.next_fp = NULL; | |
124 | frame.ret_addr = 0; | |
125 | if (!copy_stack_frame(fp, &frame)) | |
126 | break; | |
127 | if ((unsigned long)fp < regs->sp) | |
128 | break; | |
cbf5b73d ET |
129 | if (!frame.ret_addr) |
130 | break; | |
131 | if (!consume_entry(cookie, frame.ret_addr, false)) | |
8d7c6a96 TE |
132 | break; |
133 | fp = frame.next_fp; | |
134 | } | |
135 | } | |
136 |