]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_COMPILER_H |
3 | #define __LINUX_COMPILER_H | |
4 | ||
d1515582 | 5 | #include <linux/compiler_types.h> |
1da177e4 | 6 | |
d1515582 | 7 | #ifndef __ASSEMBLY__ |
6f33d587 | 8 | |
1da177e4 LT |
9 | #ifdef __KERNEL__ |
10 | ||
2ed84eeb SR |
11 | /* |
12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code | |
13 | * to disable branch tracing on a per file basis. | |
14 | */ | |
d9ad8bc0 BVA |
15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) | |
134e6a03 | 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
d45ae1f7 | 18 | int expect, int is_constant); |
1f0d69a9 SR |
19 | |
20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) | |
21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) | |
22 | ||
d45ae1f7 | 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
cf5dab54 | 24 | long ______r; \ |
134e6a03 | 25 | static struct ftrace_likely_data \ |
1f0d69a9 | 26 | __attribute__((__aligned__(4))) \ |
45b79749 | 27 | __attribute__((section("_ftrace_annotated_branch"))) \ |
1f0d69a9 | 28 | ______f = { \ |
134e6a03 SRV |
29 | .data.func = __func__, \ |
30 | .data.file = __FILE__, \ | |
31 | .data.line = __LINE__, \ | |
1f0d69a9 | 32 | }; \ |
d45ae1f7 SRV |
33 | ______r = __builtin_expect(!!(x), expect); \ |
34 | ftrace_likely_update(&______f, ______r, \ | |
35 | expect, is_constant); \ | |
1f0d69a9 SR |
36 | ______r; \ |
37 | }) | |
38 | ||
39 | /* | |
40 | * Using __builtin_constant_p(x) to ignore cases where the return | |
41 | * value is always the same. This idea is taken from a similar patch | |
42 | * written by Daniel Walker. | |
43 | */ | |
44 | # ifndef likely | |
d45ae1f7 | 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
1f0d69a9 SR |
46 | # endif |
47 | # ifndef unlikely | |
d45ae1f7 | 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
1f0d69a9 | 49 | # endif |
2bcd521a SR |
50 | |
51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | |
52 | /* | |
53 | * "Define 'is'", Bill Clinton | |
54 | * "Define 'if'", Steven Rostedt | |
55 | */ | |
ab3c9c68 LT |
56 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
57 | #define __trace_if(cond) \ | |
b33c8ff4 | 58 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
2bcd521a SR |
59 | ({ \ |
60 | int ______r; \ | |
61 | static struct ftrace_branch_data \ | |
62 | __attribute__((__aligned__(4))) \ | |
63 | __attribute__((section("_ftrace_branch"))) \ | |
64 | ______f = { \ | |
65 | .func = __func__, \ | |
66 | .file = __FILE__, \ | |
67 | .line = __LINE__, \ | |
68 | }; \ | |
69 | ______r = !!(cond); \ | |
97e7e4f3 | 70 | ______f.miss_hit[______r]++; \ |
2bcd521a SR |
71 | ______r; \ |
72 | })) | |
73 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | |
74 | ||
1f0d69a9 SR |
75 | #else |
76 | # define likely(x) __builtin_expect(!!(x), 1) | |
77 | # define unlikely(x) __builtin_expect(!!(x), 0) | |
78 | #endif | |
1da177e4 LT |
79 | |
80 | /* Optimization barrier */ | |
81 | #ifndef barrier | |
82 | # define barrier() __memory_barrier() | |
83 | #endif | |
84 | ||
7829fb09 DB |
85 | #ifndef barrier_data |
86 | # define barrier_data(ptr) barrier() | |
87 | #endif | |
88 | ||
a1fa7ffe AB |
89 | /* workaround for GCC PR82365 if needed */ |
90 | #ifndef barrier_before_unreachable | |
91 | # define barrier_before_unreachable() do { } while (0) | |
92 | #endif | |
93 | ||
38938c87 | 94 | /* Unreachable code */ |
649ea4d5 | 95 | #ifdef CONFIG_STACK_VALIDATION |
d0c2e691 JP |
96 | /* |
97 | * These macros help objtool understand GCC code flow for unreachable code. | |
98 | * The __COUNTER__ based labels are a hack to make each instance of the macros | |
99 | * unique, to convince GCC not to merge duplicate inline asm statements. | |
100 | */ | |
649ea4d5 | 101 | #define annotate_reachable() ({ \ |
10259821 JP |
102 | asm volatile("%c0:\n\t" \ |
103 | ".pushsection .discard.reachable\n\t" \ | |
104 | ".long %c0b - .\n\t" \ | |
105 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | |
649ea4d5 JP |
106 | }) |
107 | #define annotate_unreachable() ({ \ | |
10259821 JP |
108 | asm volatile("%c0:\n\t" \ |
109 | ".pushsection .discard.unreachable\n\t" \ | |
110 | ".long %c0b - .\n\t" \ | |
111 | ".popsection\n\t" : : "i" (__COUNTER__)); \ | |
649ea4d5 JP |
112 | }) |
113 | #define ASM_UNREACHABLE \ | |
114 | "999:\n\t" \ | |
115 | ".pushsection .discard.unreachable\n\t" \ | |
116 | ".long 999b - .\n\t" \ | |
117 | ".popsection\n\t" | |
118 | #else | |
119 | #define annotate_reachable() | |
120 | #define annotate_unreachable() | |
121 | #endif | |
122 | ||
aa5d1b81 KC |
123 | #ifndef ASM_UNREACHABLE |
124 | # define ASM_UNREACHABLE | |
125 | #endif | |
38938c87 | 126 | #ifndef unreachable |
649ea4d5 | 127 | # define unreachable() do { annotate_reachable(); do { } while (1); } while (0) |
38938c87 DD |
128 | #endif |
129 | ||
b67067f1 NP |
130 | /* |
131 | * KENTRY - kernel entry point | |
132 | * This can be used to annotate symbols (functions or data) that are used | |
133 | * without their linker symbol being referenced explicitly. For example, | |
134 | * interrupt vector handlers, or functions in the kernel image that are found | |
135 | * programatically. | |
136 | * | |
137 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those | |
138 | * are handled in their own way (with KEEP() in linker scripts). | |
139 | * | |
140 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the | |
141 | * linker script. For example an architecture could KEEP() its entire | |
142 | * boot/exception vector code rather than annotate each function and data. | |
143 | */ | |
144 | #ifndef KENTRY | |
145 | # define KENTRY(sym) \ | |
146 | extern typeof(sym) sym; \ | |
147 | static const unsigned long __kentry_##sym \ | |
148 | __used \ | |
149 | __attribute__((section("___kentry" "+" #sym ), used)) \ | |
150 | = (unsigned long)&sym; | |
151 | #endif | |
152 | ||
1da177e4 LT |
153 | #ifndef RELOC_HIDE |
154 | # define RELOC_HIDE(ptr, off) \ | |
155 | ({ unsigned long __ptr; \ | |
156 | __ptr = (unsigned long) (ptr); \ | |
157 | (typeof(ptr)) (__ptr + (off)); }) | |
158 | #endif | |
159 | ||
fe8c8a12 CEB |
160 | #ifndef OPTIMIZER_HIDE_VAR |
161 | #define OPTIMIZER_HIDE_VAR(var) barrier() | |
162 | #endif | |
163 | ||
6f33d587 RR |
164 | /* Not-quite-unique ID. */ |
165 | #ifndef __UNIQUE_ID | |
166 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) | |
167 | #endif | |
168 | ||
230fa253 CB |
169 | #include <uapi/linux/types.h> |
170 | ||
d976441f AR |
171 | #define __READ_ONCE_SIZE \ |
172 | ({ \ | |
173 | switch (size) { \ | |
174 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ | |
175 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ | |
176 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ | |
177 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ | |
178 | default: \ | |
179 | barrier(); \ | |
180 | __builtin_memcpy((void *)res, (const void *)p, size); \ | |
181 | barrier(); \ | |
182 | } \ | |
183 | }) | |
184 | ||
185 | static __always_inline | |
186 | void __read_once_size(const volatile void *p, void *res, int size) | |
230fa253 | 187 | { |
d976441f AR |
188 | __READ_ONCE_SIZE; |
189 | } | |
190 | ||
191 | #ifdef CONFIG_KASAN | |
192 | /* | |
f0a2c65d | 193 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
d976441f AR |
194 | * with inlining. Attempt to inline it may cause a build failure. |
195 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 | |
196 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. | |
197 | */ | |
f0a2c65d | 198 | # define __no_kasan_or_inline __no_sanitize_address __maybe_unused |
d976441f | 199 | #else |
f0a2c65d AR |
200 | # define __no_kasan_or_inline __always_inline |
201 | #endif | |
202 | ||
203 | static __no_kasan_or_inline | |
d976441f AR |
204 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
205 | { | |
206 | __READ_ONCE_SIZE; | |
230fa253 CB |
207 | } |
208 | ||
43239cbe | 209 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
230fa253 CB |
210 | { |
211 | switch (size) { | |
212 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
213 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | |
214 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | |
230fa253 | 215 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
230fa253 CB |
216 | default: |
217 | barrier(); | |
218 | __builtin_memcpy((void *)p, (const void *)res, size); | |
230fa253 CB |
219 | barrier(); |
220 | } | |
221 | } | |
222 | ||
223 | /* | |
224 | * Prevent the compiler from merging or refetching reads or writes. The | |
225 | * compiler is also forbidden from reordering successive instances of | |
b899a850 MR |
226 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
227 | * particular ordering. One way to make the compiler aware of ordering is to | |
228 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C | |
229 | * statements. | |
230fa253 | 230 | * |
b899a850 MR |
231 | * These two macros will also work on aggregate data types like structs or |
232 | * unions. If the size of the accessed data type exceeds the word size of | |
233 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will | |
234 | * fall back to memcpy(). There's at least two memcpy()s: one for the | |
235 | * __builtin_memcpy() and then one for the macro doing the copy of variable | |
236 | * - '__u' allocated on the stack. | |
230fa253 CB |
237 | * |
238 | * Their two major use cases are: (1) Mediating communication between | |
239 | * process-level code and irq/NMI handlers, all running on the same CPU, | |
b899a850 | 240 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
230fa253 CB |
241 | * mutilate accesses that either do not require ordering or that interact |
242 | * with an explicit memory barrier or atomic instruction that provides the | |
243 | * required ordering. | |
244 | */ | |
d1515582 | 245 | #include <asm/barrier.h> |
25ea5fd2 | 246 | #include <linux/kasan-checks.h> |
230fa253 | 247 | |
d976441f AR |
248 | #define __READ_ONCE(x, check) \ |
249 | ({ \ | |
250 | union { typeof(x) __val; char __c[1]; } __u; \ | |
251 | if (check) \ | |
252 | __read_once_size(&(x), __u.__c, sizeof(x)); \ | |
253 | else \ | |
254 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ | |
76ebbe78 | 255 | smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ |
d976441f AR |
256 | __u.__val; \ |
257 | }) | |
258 | #define READ_ONCE(x) __READ_ONCE(x, 1) | |
259 | ||
260 | /* | |
261 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need | |
262 | * to hide memory access from KASAN. | |
263 | */ | |
264 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) | |
230fa253 | 265 | |
25ea5fd2 AR |
266 | static __no_kasan_or_inline |
267 | unsigned long read_word_at_a_time(const void *addr) | |
268 | { | |
269 | kasan_check_read(addr, 1); | |
270 | return *(unsigned long *)addr; | |
271 | } | |
272 | ||
43239cbe | 273 | #define WRITE_ONCE(x, val) \ |
ba33034f CB |
274 | ({ \ |
275 | union { typeof(x) __val; char __c[1]; } __u = \ | |
276 | { .__val = (__force typeof(x)) (val) }; \ | |
277 | __write_once_size(&(x), __u.__c, sizeof(x)); \ | |
278 | __u.__val; \ | |
279 | }) | |
230fa253 | 280 | |
1da177e4 LT |
281 | #endif /* __KERNEL__ */ |
282 | ||
283 | #endif /* __ASSEMBLY__ */ | |
284 | ||
ce595859 GU |
285 | #ifndef __optimize |
286 | # define __optimize(level) | |
287 | #endif | |
288 | ||
9f0cf4ad AV |
289 | /* Compile time object size, -1 for unknown */ |
290 | #ifndef __compiletime_object_size | |
291 | # define __compiletime_object_size(obj) -1 | |
292 | #endif | |
4a312769 AV |
293 | #ifndef __compiletime_warning |
294 | # define __compiletime_warning(message) | |
295 | #endif | |
63312b6a AV |
296 | #ifndef __compiletime_error |
297 | # define __compiletime_error(message) | |
298 | #endif | |
c361d3e5 | 299 | |
c03567a8 JS |
300 | #ifdef __OPTIMIZE__ |
301 | # define __compiletime_assert(condition, msg, prefix, suffix) \ | |
9a8ab1c3 | 302 | do { \ |
9a8ab1c3 | 303 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
4e005327 | 304 | if (!(condition)) \ |
9a8ab1c3 | 305 | prefix ## suffix(); \ |
9a8ab1c3 | 306 | } while (0) |
c03567a8 JS |
307 | #else |
308 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) | |
309 | #endif | |
9a8ab1c3 DS |
310 | |
311 | #define _compiletime_assert(condition, msg, prefix, suffix) \ | |
312 | __compiletime_assert(condition, msg, prefix, suffix) | |
313 | ||
314 | /** | |
315 | * compiletime_assert - break build and emit msg if condition is false | |
316 | * @condition: a compile-time constant condition to check | |
317 | * @msg: a message to emit if condition is false | |
318 | * | |
319 | * In tradition of POSIX assert, this macro will break the build if the | |
320 | * supplied condition is *false*, emitting the supplied error message if the | |
321 | * compiler has support to do so. | |
322 | */ | |
323 | #define compiletime_assert(condition, msg) \ | |
324 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) | |
325 | ||
47933ad4 PZ |
326 | #define compiletime_assert_atomic_type(t) \ |
327 | compiletime_assert(__native_word(t), \ | |
328 | "Need native word sized stores/loads for atomicity.") | |
329 | ||
1da177e4 | 330 | #endif /* __LINUX_COMPILER_H */ |