]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/include/asm/stacktrace.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / include / asm / stacktrace.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
60ffc30d
CM
2/*
3 * Copyright (C) 2012 ARM Ltd.
60ffc30d
CM
4 */
5#ifndef __ASM_STACKTRACE_H
6#define __ASM_STACKTRACE_H
7
f60ad4ed
MR
8#include <linux/percpu.h>
9#include <linux/sched.h>
10#include <linux/sched/task_stack.h>
11
12#include <asm/memory.h>
13#include <asm/ptrace.h>
f5df2696 14#include <asm/sdei.h>
fe13f95b 15
60ffc30d
CM
16struct stackframe {
17 unsigned long fp;
60ffc30d 18 unsigned long pc;
20380bb3 19#ifdef CONFIG_FUNCTION_GRAPH_TRACER
9f416319 20 int graph;
20380bb3 21#endif
60ffc30d
CM
22};
23
8a1ccfbc
LA
24enum stack_type {
25 STACK_TYPE_UNKNOWN,
26 STACK_TYPE_TASK,
27 STACK_TYPE_IRQ,
28 STACK_TYPE_OVERFLOW,
29 STACK_TYPE_SDEI_NORMAL,
30 STACK_TYPE_SDEI_CRITICAL,
31};
32
33struct stack_info {
34 unsigned long low;
35 unsigned long high;
36 enum stack_type type;
37};
38
fe13f95b
AT
39extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
40extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
60ffc30d 41 int (*fn)(struct stackframe *, void *), void *data);
1149aad1 42extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
60ffc30d 43
f60fe78f 44DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
f60ad4ed 45
8a1ccfbc
LA
46static inline bool on_irq_stack(unsigned long sp,
47 struct stack_info *info)
f60ad4ed 48{
f60fe78f 49 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
f60ad4ed
MR
50 unsigned long high = low + IRQ_STACK_SIZE;
51
f60fe78f
MR
52 if (!low)
53 return false;
54
8a1ccfbc
LA
55 if (sp < low || sp >= high)
56 return false;
57
58 if (info) {
59 info->low = low;
60 info->high = high;
61 info->type = STACK_TYPE_IRQ;
62 }
63
64 return true;
f60ad4ed
MR
65}
66
8a1ccfbc
LA
67static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
68 struct stack_info *info)
f60ad4ed
MR
69{
70 unsigned long low = (unsigned long)task_stack_page(tsk);
71 unsigned long high = low + THREAD_SIZE;
72
8a1ccfbc
LA
73 if (sp < low || sp >= high)
74 return false;
75
76 if (info) {
77 info->low = low;
78 info->high = high;
79 info->type = STACK_TYPE_TASK;
80 }
81
82 return true;
f60ad4ed
MR
83}
84
872d8327
MR
85#ifdef CONFIG_VMAP_STACK
86DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
87
8a1ccfbc
LA
88static inline bool on_overflow_stack(unsigned long sp,
89 struct stack_info *info)
872d8327
MR
90{
91 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
92 unsigned long high = low + OVERFLOW_STACK_SIZE;
93
8a1ccfbc
LA
94 if (sp < low || sp >= high)
95 return false;
96
97 if (info) {
98 info->low = low;
99 info->high = high;
100 info->type = STACK_TYPE_OVERFLOW;
101 }
102
103 return true;
872d8327
MR
104}
105#else
8a1ccfbc
LA
106static inline bool on_overflow_stack(unsigned long sp,
107 struct stack_info *info) { return false; }
872d8327
MR
108#endif
109
8a1ccfbc 110
12964443
MR
111/*
112 * We can only safely access per-cpu stacks from current in a non-preemptible
113 * context.
114 */
8a1ccfbc
LA
115static inline bool on_accessible_stack(struct task_struct *tsk,
116 unsigned long sp,
117 struct stack_info *info)
12964443 118{
8a1ccfbc 119 if (on_task_stack(tsk, sp, info))
12964443
MR
120 return true;
121 if (tsk != current || preemptible())
122 return false;
8a1ccfbc 123 if (on_irq_stack(sp, info))
12964443 124 return true;
8a1ccfbc 125 if (on_overflow_stack(sp, info))
872d8327 126 return true;
8a1ccfbc 127 if (on_sdei_stack(sp, info))
f5df2696 128 return true;
12964443
MR
129
130 return false;
131}
132
60ffc30d 133#endif /* __ASM_STACKTRACE_H */