]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_COMPILER_H |
3 | #define __LINUX_COMPILER_H | |
4 | ||
d1515582 | 5 | #include <linux/compiler_types.h> |
1da177e4 | 6 | |
d1515582 | 7 | #ifndef __ASSEMBLY__ |
6f33d587 | 8 | |
1da177e4 LT |
9 | #ifdef __KERNEL__ |
10 | ||
2ed84eeb SR |
11 | /* |
12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code | |
13 | * to disable branch tracing on a per file basis. | |
14 | */ | |
d9ad8bc0 BVA |
15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) | |
134e6a03 | 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
d45ae1f7 | 18 | int expect, int is_constant); |
1f0d69a9 SR |
19 | |
20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) | |
21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) | |
22 | ||
d45ae1f7 | 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
2026d357 | 24 | long ______r; \ |
134e6a03 | 25 | static struct ftrace_likely_data \ |
e04462fb | 26 | __aligned(4) \ |
33def849 | 27 | __section("_ftrace_annotated_branch") \ |
1f0d69a9 | 28 | ______f = { \ |
134e6a03 SRV |
29 | .data.func = __func__, \ |
30 | .data.file = __FILE__, \ | |
31 | .data.line = __LINE__, \ | |
1f0d69a9 | 32 | }; \ |
d45ae1f7 SRV |
33 | ______r = __builtin_expect(!!(x), expect); \ |
34 | ftrace_likely_update(&______f, ______r, \ | |
35 | expect, is_constant); \ | |
1f0d69a9 SR |
36 | ______r; \ |
37 | }) | |
38 | ||
39 | /* | |
40 | * Using __builtin_constant_p(x) to ignore cases where the return | |
41 | * value is always the same. This idea is taken from a similar patch | |
42 | * written by Daniel Walker. | |
43 | */ | |
44 | # ifndef likely | |
d45ae1f7 | 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
1f0d69a9 SR |
46 | # endif |
47 | # ifndef unlikely | |
d45ae1f7 | 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
1f0d69a9 | 49 | # endif |
2bcd521a SR |
50 | |
51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | |
52 | /* | |
53 | * "Define 'is'", Bill Clinton | |
54 | * "Define 'if'", Steven Rostedt | |
55 | */ | |
a15fd609 LT |
56 | #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) |
57 | ||
58 | #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) | |
59 | ||
60 | #define __trace_if_value(cond) ({ \ | |
61 | static struct ftrace_branch_data \ | |
62 | __aligned(4) \ | |
33def849 | 63 | __section("_ftrace_branch") \ |
a15fd609 LT |
64 | __if_trace = { \ |
65 | .func = __func__, \ | |
66 | .file = __FILE__, \ | |
67 | .line = __LINE__, \ | |
68 | }; \ | |
69 | (cond) ? \ | |
70 | (__if_trace.miss_hit[1]++,1) : \ | |
71 | (__if_trace.miss_hit[0]++,0); \ | |
72 | }) | |
73 | ||
2bcd521a SR |
74 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
75 | ||
1f0d69a9 SR |
76 | #else |
77 | # define likely(x) __builtin_expect(!!(x), 1) | |
78 | # define unlikely(x) __builtin_expect(!!(x), 0) | |
2f0df49c SRV |
79 | # define likely_notrace(x) likely(x) |
80 | # define unlikely_notrace(x) unlikely(x) | |
1f0d69a9 | 81 | #endif |
1da177e4 LT |
82 | |
83 | /* Optimization barrier */ | |
84 | #ifndef barrier | |
3347acc6 AS |
85 | /* The "volatile" is due to gcc bugs */ |
86 | # define barrier() __asm__ __volatile__("": : :"memory") | |
1da177e4 LT |
87 | #endif |
88 | ||
7829fb09 | 89 | #ifndef barrier_data |
3347acc6 AS |
90 | /* |
91 | * This version is i.e. to prevent dead stores elimination on @ptr | |
92 | * where gcc and llvm may behave differently when otherwise using | |
93 | * normal barrier(): while gcc behavior gets along with a normal | |
94 | * barrier(), llvm needs an explicit input variable to be assumed | |
95 | * clobbered. The issue is as follows: while the inline asm might | |
96 | * access any memory it wants, the compiler could have fit all of | |
97 | * @ptr into memory registers instead, and since @ptr never escaped | |
98 | * from that, it proved that the inline asm wasn't touching any of | |
99 | * it. This version works well with both compilers, i.e. we're telling | |
100 | * the compiler that the inline asm absolutely may see the contents | |
101 | * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 | |
102 | */ | |
103 | # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") | |
7829fb09 DB |
104 | #endif |
105 | ||
173a3efd AB |
106 | /* workaround for GCC PR82365 if needed */ |
107 | #ifndef barrier_before_unreachable | |
108 | # define barrier_before_unreachable() do { } while (0) | |
109 | #endif | |
110 | ||
38938c87 | 111 | /* Unreachable code */ |
649ea4d5 | 112 | #ifdef CONFIG_STACK_VALIDATION |
d0c2e691 JP |
113 | /* |
114 | * These macros help objtool understand GCC code flow for unreachable code. | |
115 | * The __COUNTER__ based labels are a hack to make each instance of the macros | |
116 | * unique, to convince GCC not to merge duplicate inline asm statements. | |
117 | */ | |
f1069a87 VG |
118 | #define __stringify_label(n) #n |
119 | ||
120 | #define __annotate_reachable(c) ({ \ | |
121 | asm volatile(__stringify_label(c) ":\n\t" \ | |
96af6cd0 | 122 | ".pushsection .discard.reachable\n\t" \ |
f1069a87 VG |
123 | ".long " __stringify_label(c) "b - .\n\t" \ |
124 | ".popsection\n\t"); \ | |
649ea4d5 | 125 | }) |
f1069a87 VG |
126 | #define annotate_reachable() __annotate_reachable(__COUNTER__) |
127 | ||
128 | #define __annotate_unreachable(c) ({ \ | |
129 | asm volatile(__stringify_label(c) ":\n\t" \ | |
96af6cd0 | 130 | ".pushsection .discard.unreachable\n\t" \ |
f1069a87 VG |
131 | ".long " __stringify_label(c) "b - .\n\t" \ |
132 | ".popsection\n\t"); \ | |
649ea4d5 | 133 | }) |
f1069a87 VG |
134 | #define annotate_unreachable() __annotate_unreachable(__COUNTER__) |
135 | ||
96af6cd0 IM |
136 | #define ASM_UNREACHABLE \ |
137 | "999:\n\t" \ | |
138 | ".pushsection .discard.unreachable\n\t" \ | |
139 | ".long 999b - .\n\t" \ | |
140 | ".popsection\n\t" | |
87b512de JP |
141 | |
142 | /* Annotate a C jump table to allow objtool to follow the code flow */ | |
33def849 | 143 | #define __annotate_jump_table __section(".rodata..c_jump_table") |
87b512de | 144 | |
649ea4d5 JP |
145 | #else |
146 | #define annotate_reachable() | |
147 | #define annotate_unreachable() | |
87b512de | 148 | #define __annotate_jump_table |
649ea4d5 JP |
149 | #endif |
150 | ||
aa5d1b81 KC |
151 | #ifndef ASM_UNREACHABLE |
152 | # define ASM_UNREACHABLE | |
153 | #endif | |
38938c87 | 154 | #ifndef unreachable |
fe0640eb | 155 | # define unreachable() do { \ |
156 | annotate_unreachable(); \ | |
157 | __builtin_unreachable(); \ | |
158 | } while (0) | |
38938c87 DD |
159 | #endif |
160 | ||
b67067f1 NP |
161 | /* |
162 | * KENTRY - kernel entry point | |
163 | * This can be used to annotate symbols (functions or data) that are used | |
164 | * without their linker symbol being referenced explicitly. For example, | |
165 | * interrupt vector handlers, or functions in the kernel image that are found | |
166 | * programatically. | |
167 | * | |
168 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those | |
169 | * are handled in their own way (with KEEP() in linker scripts). | |
170 | * | |
171 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the | |
172 | * linker script. For example an architecture could KEEP() its entire | |
173 | * boot/exception vector code rather than annotate each function and data. | |
174 | */ | |
175 | #ifndef KENTRY | |
176 | # define KENTRY(sym) \ | |
177 | extern typeof(sym) sym; \ | |
178 | static const unsigned long __kentry_##sym \ | |
179 | __used \ | |
a25c13b3 | 180 | __attribute__((__section__("___kentry+" #sym))) \ |
b67067f1 NP |
181 | = (unsigned long)&sym; |
182 | #endif | |
183 | ||
1da177e4 LT |
184 | #ifndef RELOC_HIDE |
185 | # define RELOC_HIDE(ptr, off) \ | |
186 | ({ unsigned long __ptr; \ | |
187 | __ptr = (unsigned long) (ptr); \ | |
188 | (typeof(ptr)) (__ptr + (off)); }) | |
189 | #endif | |
190 | ||
fe8c8a12 | 191 | #ifndef OPTIMIZER_HIDE_VAR |
3e2ffd65 MT |
192 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
193 | #define OPTIMIZER_HIDE_VAR(var) \ | |
194 | __asm__ ("" : "=r" (var) : "0" (var)) | |
fe8c8a12 CEB |
195 | #endif |
196 | ||
6f33d587 RR |
197 | /* Not-quite-unique ID. */ |
198 | #ifndef __UNIQUE_ID | |
199 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) | |
200 | #endif | |
201 | ||
37d1a04b TG |
202 | /** |
203 | * data_race - mark an expression as containing intentional data races | |
204 | * | |
205 | * This data_race() macro is useful for situations in which data races | |
206 | * should be forgiven. One example is diagnostic code that accesses | |
207 | * shared variables but is not a part of the core synchronization design. | |
208 | * | |
209 | * This macro *does not* affect normal code generation, but is a hint | |
210 | * to tooling that data races here are to be ignored. | |
211 | */ | |
212 | #define data_race(expr) \ | |
d976441f | 213 | ({ \ |
95c094fc ME |
214 | __unqual_scalar_typeof(({ expr; })) __v = ({ \ |
215 | __kcsan_disable_current(); \ | |
216 | expr; \ | |
37d1a04b | 217 | }); \ |
95c094fc ME |
218 | __kcsan_enable_current(); \ |
219 | __v; \ | |
d976441f | 220 | }) |
230fa253 | 221 | |
590e8a08 MR |
222 | /* |
223 | * With CONFIG_CFI_CLANG, the compiler replaces function addresses in | |
224 | * instrumented C code with jump table addresses. Architectures that | |
225 | * support CFI can define this macro to return the actual function address | |
226 | * when needed. | |
227 | */ | |
228 | #ifndef function_nocfi | |
229 | #define function_nocfi(x) (x) | |
230 | #endif | |
231 | ||
1da177e4 LT |
232 | #endif /* __KERNEL__ */ |
233 | ||
7290d580 AB |
234 | /* |
235 | * Force the compiler to emit 'sym' as a symbol, so that we can reference | |
236 | * it from inline assembler. Necessary in case 'sym' could be inlined | |
237 | * otherwise, or eliminated entirely due to lack of references that are | |
238 | * visible to the compiler. | |
239 | */ | |
240 | #define __ADDRESSABLE(sym) \ | |
33def849 | 241 | static void * __section(".discard.addressable") __used \ |
563a02b0 | 242 | __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; |
7290d580 AB |
243 | |
244 | /** | |
245 | * offset_to_ptr - convert a relative memory offset to an absolute pointer | |
246 | * @off: the address of the 32-bit offset value | |
247 | */ | |
248 | static inline void *offset_to_ptr(const int *off) | |
249 | { | |
250 | return (void *)((unsigned long)off + *off); | |
251 | } | |
252 | ||
1da177e4 LT |
253 | #endif /* __ASSEMBLY__ */ |
254 | ||
ec0bbef6 MO |
255 | /* &a[0] degrades to a pointer: a different type from an array */ |
256 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) | |
ec0bbef6 | 257 | |
a9a3ed1e BP |
258 | /* |
259 | * This is needed in functions which generate the stack canary, see | |
260 | * arch/x86/kernel/smpboot.c::start_secondary() for an example. | |
261 | */ | |
262 | #define prevent_tail_call_optimization() mb() | |
263 | ||
e506ea45 WD |
264 | #include <asm/rwonce.h> |
265 | ||
1da177e4 | 266 | #endif /* __LINUX_COMPILER_H */ |