]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_COMPILER_H |
3 | #define __LINUX_COMPILER_H | |
4 | ||
d1515582 | 5 | #include <linux/compiler_types.h> |
1da177e4 | 6 | |
d1515582 | 7 | #ifndef __ASSEMBLY__ |
6f33d587 | 8 | |
1da177e4 LT |
9 | #ifdef __KERNEL__ |
10 | ||
2ed84eeb SR |
11 | /* |
12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code | |
13 | * to disable branch tracing on a per file basis. | |
14 | */ | |
d9ad8bc0 BVA |
15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) | |
134e6a03 | 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
d45ae1f7 | 18 | int expect, int is_constant); |
1f0d69a9 SR |
19 | |
20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) | |
21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) | |
22 | ||
d45ae1f7 | 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
2026d357 | 24 | long ______r; \ |
134e6a03 | 25 | static struct ftrace_likely_data \ |
e04462fb MO |
26 | __aligned(4) \ |
27 | __section("_ftrace_annotated_branch") \ | |
1f0d69a9 | 28 | ______f = { \ |
134e6a03 SRV |
29 | .data.func = __func__, \ |
30 | .data.file = __FILE__, \ | |
31 | .data.line = __LINE__, \ | |
1f0d69a9 | 32 | }; \ |
d45ae1f7 SRV |
33 | ______r = __builtin_expect(!!(x), expect); \ |
34 | ftrace_likely_update(&______f, ______r, \ | |
35 | expect, is_constant); \ | |
1f0d69a9 SR |
36 | ______r; \ |
37 | }) | |
38 | ||
39 | /* | |
40 | * Using __builtin_constant_p(x) to ignore cases where the return | |
41 | * value is always the same. This idea is taken from a similar patch | |
42 | * written by Daniel Walker. | |
43 | */ | |
44 | # ifndef likely | |
d45ae1f7 | 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
1f0d69a9 SR |
46 | # endif |
47 | # ifndef unlikely | |
d45ae1f7 | 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
1f0d69a9 | 49 | # endif |
2bcd521a SR |
50 | |
51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | |
52 | /* | |
53 | * "Define 'is'", Bill Clinton | |
54 | * "Define 'if'", Steven Rostedt | |
55 | */ | |
ab3c9c68 LT |
56 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
57 | #define __trace_if(cond) \ | |
b33c8ff4 | 58 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
2bcd521a SR |
59 | ({ \ |
60 | int ______r; \ | |
61 | static struct ftrace_branch_data \ | |
e04462fb MO |
62 | __aligned(4) \ |
63 | __section("_ftrace_branch") \ | |
2bcd521a SR |
64 | ______f = { \ |
65 | .func = __func__, \ | |
66 | .file = __FILE__, \ | |
67 | .line = __LINE__, \ | |
68 | }; \ | |
69 | ______r = !!(cond); \ | |
37686b13 | 70 | ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ |
2bcd521a SR |
71 | ______r; \ |
72 | })) | |
73 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | |
74 | ||
1f0d69a9 SR |
75 | #else |
76 | # define likely(x) __builtin_expect(!!(x), 1) | |
77 | # define unlikely(x) __builtin_expect(!!(x), 0) | |
78 | #endif | |
1da177e4 LT |
79 | |
80 | /* Optimization barrier */ | |
81 | #ifndef barrier | |
82 | # define barrier() __memory_barrier() | |
83 | #endif | |
84 | ||
7829fb09 DB |
85 | #ifndef barrier_data |
86 | # define barrier_data(ptr) barrier() | |
87 | #endif | |
88 | ||
173a3efd AB |
89 | /* workaround for GCC PR82365 if needed */ |
90 | #ifndef barrier_before_unreachable | |
91 | # define barrier_before_unreachable() do { } while (0) | |
92 | #endif | |
93 | ||
38938c87 | 94 | /* Unreachable code */ |
649ea4d5 | 95 | #ifdef CONFIG_STACK_VALIDATION |
d0c2e691 JP |
96 | /* |
97 | * These macros help objtool understand GCC code flow for unreachable code. | |
98 | * The __COUNTER__ based labels are a hack to make each instance of the macros | |
99 | * unique, to convince GCC not to merge duplicate inline asm statements. | |
100 | */ | |
649ea4d5 | 101 | #define annotate_reachable() ({ \ |
96af6cd0 IM |
102 | asm volatile("%c0:\n\t" \ |
103 | ".pushsection .discard.reachable\n\t" \ | |
104 | ".long %c0b - .\n\t" \ | |
105 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | |
649ea4d5 JP |
106 | }) |
107 | #define annotate_unreachable() ({ \ | |
96af6cd0 IM |
108 | asm volatile("%c0:\n\t" \ |
109 | ".pushsection .discard.unreachable\n\t" \ | |
110 | ".long %c0b - .\n\t" \ | |
111 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | |
649ea4d5 | 112 | }) |
96af6cd0 IM |
113 | #define ASM_UNREACHABLE \ |
114 | "999:\n\t" \ | |
115 | ".pushsection .discard.unreachable\n\t" \ | |
116 | ".long 999b - .\n\t" \ | |
117 | ".popsection\n\t" | |
649ea4d5 JP |
118 | #else |
119 | #define annotate_reachable() | |
120 | #define annotate_unreachable() | |
121 | #endif | |
122 | ||
aa5d1b81 KC |
123 | #ifndef ASM_UNREACHABLE |
124 | # define ASM_UNREACHABLE | |
125 | #endif | |
38938c87 | 126 | #ifndef unreachable |
fe0640eb | 127 | # define unreachable() do { \ |
128 | annotate_unreachable(); \ | |
129 | __builtin_unreachable(); \ | |
130 | } while (0) | |
38938c87 DD |
131 | #endif |
132 | ||
b67067f1 NP |
133 | /* |
134 | * KENTRY - kernel entry point | |
135 | * This can be used to annotate symbols (functions or data) that are used | |
136 | * without their linker symbol being referenced explicitly. For example, | |
137 | * interrupt vector handlers, or functions in the kernel image that are found | |
138 | * programatically. | |
139 | * | |
140 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those | |
141 | * are handled in their own way (with KEEP() in linker scripts). | |
142 | * | |
143 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the | |
144 | * linker script. For example an architecture could KEEP() its entire | |
145 | * boot/exception vector code rather than annotate each function and data. | |
146 | */ | |
147 | #ifndef KENTRY | |
148 | # define KENTRY(sym) \ | |
149 | extern typeof(sym) sym; \ | |
150 | static const unsigned long __kentry_##sym \ | |
151 | __used \ | |
e04462fb | 152 | __section("___kentry" "+" #sym ) \ |
b67067f1 NP |
153 | = (unsigned long)&sym; |
154 | #endif | |
155 | ||
1da177e4 LT |
156 | #ifndef RELOC_HIDE |
157 | # define RELOC_HIDE(ptr, off) \ | |
158 | ({ unsigned long __ptr; \ | |
159 | __ptr = (unsigned long) (ptr); \ | |
160 | (typeof(ptr)) (__ptr + (off)); }) | |
161 | #endif | |
162 | ||
fe8c8a12 | 163 | #ifndef OPTIMIZER_HIDE_VAR |
3e2ffd65 MT |
164 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
165 | #define OPTIMIZER_HIDE_VAR(var) \ | |
166 | __asm__ ("" : "=r" (var) : "0" (var)) | |
fe8c8a12 CEB |
167 | #endif |
168 | ||
6f33d587 RR |
169 | /* Not-quite-unique ID. */ |
170 | #ifndef __UNIQUE_ID | |
171 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) | |
172 | #endif | |
173 | ||
230fa253 CB |
174 | #include <uapi/linux/types.h> |
175 | ||
d976441f AR |
176 | #define __READ_ONCE_SIZE \ |
177 | ({ \ | |
178 | switch (size) { \ | |
179 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ | |
180 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ | |
181 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ | |
182 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ | |
183 | default: \ | |
184 | barrier(); \ | |
185 | __builtin_memcpy((void *)res, (const void *)p, size); \ | |
186 | barrier(); \ | |
187 | } \ | |
188 | }) | |
189 | ||
190 | static __always_inline | |
191 | void __read_once_size(const volatile void *p, void *res, int size) | |
230fa253 | 192 | { |
d976441f AR |
193 | __READ_ONCE_SIZE; |
194 | } | |
195 | ||
196 | #ifdef CONFIG_KASAN | |
197 | /* | |
bdb5ac80 | 198 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
d976441f AR |
199 | * with inlining. Attempt to inline it may cause a build failure. |
200 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 | |
201 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. | |
202 | */ | |
163c8d54 | 203 | # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused |
d976441f | 204 | #else |
bdb5ac80 AR |
205 | # define __no_kasan_or_inline __always_inline |
206 | #endif | |
207 | ||
208 | static __no_kasan_or_inline | |
d976441f AR |
209 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
210 | { | |
211 | __READ_ONCE_SIZE; | |
230fa253 CB |
212 | } |
213 | ||
43239cbe | 214 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
230fa253 CB |
215 | { |
216 | switch (size) { | |
217 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
218 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | |
219 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | |
230fa253 | 220 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
230fa253 CB |
221 | default: |
222 | barrier(); | |
223 | __builtin_memcpy((void *)p, (const void *)res, size); | |
230fa253 CB |
224 | barrier(); |
225 | } | |
226 | } | |
227 | ||
228 | /* | |
229 | * Prevent the compiler from merging or refetching reads or writes. The | |
230 | * compiler is also forbidden from reordering successive instances of | |
b899a850 MR |
231 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
232 | * particular ordering. One way to make the compiler aware of ordering is to | |
233 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C | |
234 | * statements. | |
230fa253 | 235 | * |
b899a850 MR |
236 | * These two macros will also work on aggregate data types like structs or |
237 | * unions. If the size of the accessed data type exceeds the word size of | |
238 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will | |
239 | * fall back to memcpy(). There's at least two memcpy()s: one for the | |
240 | * __builtin_memcpy() and then one for the macro doing the copy of variable | |
241 | * - '__u' allocated on the stack. | |
230fa253 CB |
242 | * |
243 | * Their two major use cases are: (1) Mediating communication between | |
244 | * process-level code and irq/NMI handlers, all running on the same CPU, | |
b899a850 | 245 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
230fa253 CB |
246 | * mutilate accesses that either do not require ordering or that interact |
247 | * with an explicit memory barrier or atomic instruction that provides the | |
248 | * required ordering. | |
249 | */ | |
d1515582 | 250 | #include <asm/barrier.h> |
7f1e541f | 251 | #include <linux/kasan-checks.h> |
230fa253 | 252 | |
d976441f AR |
253 | #define __READ_ONCE(x, check) \ |
254 | ({ \ | |
255 | union { typeof(x) __val; char __c[1]; } __u; \ | |
256 | if (check) \ | |
257 | __read_once_size(&(x), __u.__c, sizeof(x)); \ | |
258 | else \ | |
259 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ | |
76ebbe78 | 260 | smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ |
d976441f AR |
261 | __u.__val; \ |
262 | }) | |
263 | #define READ_ONCE(x) __READ_ONCE(x, 1) | |
264 | ||
265 | /* | |
266 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need | |
267 | * to hide memory access from KASAN. | |
268 | */ | |
269 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) | |
230fa253 | 270 | |
7f1e541f AR |
271 | static __no_kasan_or_inline |
272 | unsigned long read_word_at_a_time(const void *addr) | |
273 | { | |
274 | kasan_check_read(addr, 1); | |
275 | return *(unsigned long *)addr; | |
276 | } | |
277 | ||
43239cbe | 278 | #define WRITE_ONCE(x, val) \ |
ba33034f CB |
279 | ({ \ |
280 | union { typeof(x) __val; char __c[1]; } __u = \ | |
281 | { .__val = (__force typeof(x)) (val) }; \ | |
282 | __write_once_size(&(x), __u.__c, sizeof(x)); \ | |
283 | __u.__val; \ | |
284 | }) | |
230fa253 | 285 | |
1da177e4 LT |
286 | #endif /* __KERNEL__ */ |
287 | ||
7290d580 AB |
288 | /* |
289 | * Force the compiler to emit 'sym' as a symbol, so that we can reference | |
290 | * it from inline assembler. Necessary in case 'sym' could be inlined | |
291 | * otherwise, or eliminated entirely due to lack of references that are | |
292 | * visible to the compiler. | |
293 | */ | |
294 | #define __ADDRESSABLE(sym) \ | |
e04462fb | 295 | static void * __section(".discard.addressable") __used \ |
7290d580 AB |
296 | __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; |
297 | ||
298 | /** | |
299 | * offset_to_ptr - convert a relative memory offset to an absolute pointer | |
300 | * @off: the address of the 32-bit offset value | |
301 | */ | |
302 | static inline void *offset_to_ptr(const int *off) | |
303 | { | |
304 | return (void *)((unsigned long)off + *off); | |
305 | } | |
306 | ||
1da177e4 LT |
307 | #endif /* __ASSEMBLY__ */ |
308 | ||
9f0cf4ad AV |
309 | /* Compile time object size, -1 for unknown */ |
310 | #ifndef __compiletime_object_size | |
311 | # define __compiletime_object_size(obj) -1 | |
312 | #endif | |
4a312769 AV |
313 | #ifndef __compiletime_warning |
314 | # define __compiletime_warning(message) | |
315 | #endif | |
63312b6a AV |
316 | #ifndef __compiletime_error |
317 | # define __compiletime_error(message) | |
318 | #endif | |
c361d3e5 | 319 | |
c03567a8 JS |
320 | #ifdef __OPTIMIZE__ |
321 | # define __compiletime_assert(condition, msg, prefix, suffix) \ | |
9a8ab1c3 | 322 | do { \ |
9a8ab1c3 | 323 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
81b45683 | 324 | if (!(condition)) \ |
9a8ab1c3 | 325 | prefix ## suffix(); \ |
9a8ab1c3 | 326 | } while (0) |
c03567a8 JS |
327 | #else |
328 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) | |
329 | #endif | |
9a8ab1c3 DS |
330 | |
331 | #define _compiletime_assert(condition, msg, prefix, suffix) \ | |
332 | __compiletime_assert(condition, msg, prefix, suffix) | |
333 | ||
334 | /** | |
335 | * compiletime_assert - break build and emit msg if condition is false | |
336 | * @condition: a compile-time constant condition to check | |
337 | * @msg: a message to emit if condition is false | |
338 | * | |
339 | * In tradition of POSIX assert, this macro will break the build if the | |
340 | * supplied condition is *false*, emitting the supplied error message if the | |
341 | * compiler has support to do so. | |
342 | */ | |
343 | #define compiletime_assert(condition, msg) \ | |
344 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) | |
345 | ||
47933ad4 PZ |
346 | #define compiletime_assert_atomic_type(t) \ |
347 | compiletime_assert(__native_word(t), \ | |
348 | "Need native word sized stores/loads for atomicity.") | |
349 | ||
ec0bbef6 MO |
350 | /* &a[0] degrades to a pointer: a different type from an array */ |
351 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) | |
ec0bbef6 | 352 | |
1da177e4 | 353 | #endif /* __LINUX_COMPILER_H */ |