]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/extable.c
tracing: Ensure trace buffer is at least 4096 bytes large
[mirror_ubuntu-jammy-kernel.git] / kernel / extable.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
1da177e4 5*/
505f2b97 6#include <linux/ftrace.h>
f80d2d77 7#include <linux/memory.h>
8a293be0 8#include <linux/extable.h>
1da177e4 9#include <linux/module.h>
505f2b97 10#include <linux/mutex.h>
1da177e4 11#include <linux/init.h>
5b485629 12#include <linux/kprobes.h>
74451e66 13#include <linux/filter.h>
505f2b97 14
1da177e4 15#include <asm/sections.h>
7c0f6ba6 16#include <linux/uaccess.h>
505f2b97
IM
17
18/*
19 * mutex protecting text section modification (dynamic code patching).
20 * some users need to sleep (allocating memory...) while they hold this lock.
21 *
e846d139
ZC
22 * Note: Also protects SMP-alternatives modification on x86.
23 *
505f2b97
IM
24 * NOT exported to modules - patching kernel text is a really delicate matter.
25 */
26DEFINE_MUTEX(text_mutex);
1da177e4
LT
27
28extern struct exception_table_entry __start___ex_table[];
29extern struct exception_table_entry __stop___ex_table[];
30
d219e2e8 31/* Cleared by build time tools if the table is already sorted. */
00b71030 32u32 __initdata __visible main_extable_sort_needed = 1;
d219e2e8 33
1da177e4
LT
34/* Sort the kernel's built-in exception table */
35void __init sort_main_extable(void)
36{
63174f61
NC
37 if (main_extable_sort_needed &&
38 &__stop___ex_table > &__start___ex_table) {
bec1b9e7 39 pr_notice("Sorting __ex_table...\n");
d219e2e8 40 sort_extable(__start___ex_table, __stop___ex_table);
bec1b9e7 41 }
1da177e4
LT
42}
43
49ec9177
SS
44/* Given an address, look for it in the kernel exception table */
45const
46struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
47{
48 return search_extable(__start___ex_table,
49 __stop___ex_table - __start___ex_table, addr);
50}
51
1da177e4
LT
52/* Given an address, look for it in the exception tables. */
53const struct exception_table_entry *search_exception_tables(unsigned long addr)
54{
55 const struct exception_table_entry *e;
56
49ec9177 57 e = search_kernel_exception_table(addr);
1da177e4
LT
58 if (!e)
59 e = search_module_extables(addr);
3dec541b
AS
60 if (!e)
61 e = search_bpf_extables(addr);
1da177e4
LT
62 return e;
63}
64
9fbcc57a 65int init_kernel_text(unsigned long addr)
4a44bac1
IM
66{
67 if (addr >= (unsigned long)_sinittext &&
5ecbe3c3 68 addr < (unsigned long)_einittext)
4a44bac1
IM
69 return 1;
70 return 0;
71}
72
c0d80dda 73int notrace core_kernel_text(unsigned long addr)
1da177e4
LT
74{
75 if (addr >= (unsigned long)_stext &&
5ecbe3c3 76 addr < (unsigned long)_etext)
1da177e4
LT
77 return 1;
78
0594729c 79 if (system_state < SYSTEM_RUNNING &&
4a44bac1 80 init_kernel_text(addr))
1da177e4
LT
81 return 1;
82 return 0;
83}
84
a2d063ac
SR
85/**
86 * core_kernel_data - tell if addr points to kernel data
87 * @addr: address to test
88 *
89 * Returns true if @addr passed in is from the core kernel data
90 * section.
91 *
92 * Note: On some archs it may return true for core RODATA, and false
93 * for others. But will always be true for core RW data.
94 */
cdbe61bf
SR
95int core_kernel_data(unsigned long addr)
96{
a2d063ac 97 if (addr >= (unsigned long)_sdata &&
cdbe61bf
SR
98 addr < (unsigned long)_edata)
99 return 1;
100 return 0;
101}
102
3861a17b 103int __kernel_text_address(unsigned long addr)
1da177e4 104{
9aadde91 105 if (kernel_text_address(addr))
74451e66 106 return 1;
4a44bac1
IM
107 /*
108 * There might be init symbols in saved stacktraces.
109 * Give those symbols a chance to be printed in
110 * backtraces (such as lockdep traces).
111 *
112 * Since we are after the module-symbols check, there's
113 * no danger of address overlap:
114 */
115 if (init_kernel_text(addr))
116 return 1;
117 return 0;
1da177e4
LT
118}
119
120int kernel_text_address(unsigned long addr)
121{
e8cac8b1
SRV
122 bool no_rcu;
123 int ret = 1;
124
1da177e4
LT
125 if (core_kernel_text(addr))
126 return 1;
e8cac8b1
SRV
127
128 /*
129 * If a stack dump happens while RCU is not watching, then
130 * RCU needs to be notified that it requires to start
131 * watching again. This can happen either by tracing that
132 * triggers a stack trace, or a WARN() that happens during
133 * coming back from idle, or cpu on or offlining.
134 *
e9b4e606
JO
135 * is_module_text_address() as well as the kprobe slots,
136 * is_bpf_text_address() and is_bpf_image_address require
137 * RCU to be watching.
e8cac8b1
SRV
138 */
139 no_rcu = !rcu_is_watching();
140
141 /* Treat this like an NMI as it can happen anywhere */
142 if (no_rcu)
143 rcu_nmi_enter();
144
aec0be2d 145 if (is_module_text_address(addr))
e8cac8b1 146 goto out;
5b485629 147 if (is_ftrace_trampoline(addr))
e8cac8b1 148 goto out;
5b485629 149 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
e8cac8b1 150 goto out;
74451e66 151 if (is_bpf_text_address(addr))
e8cac8b1
SRV
152 goto out;
153 ret = 0;
154out:
155 if (no_rcu)
156 rcu_nmi_exit();
157
158 return ret;
1da177e4 159}
ab7476cf
AV
160
161/*
162 * On some architectures (PPC64, IA64) function pointers
163 * are actually only tokens to some data that then holds the
164 * real function address. As a result, to find if a function
165 * pointer is part of the kernel text, we need to do some
166 * special dereferencing first.
167 */
168int func_ptr_is_kernel_text(void *ptr)
169{
170 unsigned long addr;
171 addr = (unsigned long) dereference_function_descriptor(ptr);
172 if (core_kernel_text(addr))
173 return 1;
a6e6abd5 174 return is_module_text_address(addr);
ab7476cf 175}