]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_COMPILER_H |
3 | #define __LINUX_COMPILER_H | |
4 | ||
5 | #ifndef __ASSEMBLY__ | |
6 | ||
7 | #ifdef __CHECKER__ | |
8 | # define __user __attribute__((noderef, address_space(1))) | |
e0fdb0e0 | 9 | # define __kernel __attribute__((address_space(0))) |
1da177e4 LT |
10 | # define __safe __attribute__((safe)) |
11 | # define __force __attribute__((force)) | |
12 | # define __nocast __attribute__((nocast)) | |
13 | # define __iomem __attribute__((noderef, address_space(2))) | |
8529091e | 14 | # define __must_hold(x) __attribute__((context(x,1,1))) |
c902e0a0 JT |
15 | # define __acquires(x) __attribute__((context(x,0,1))) |
16 | # define __releases(x) __attribute__((context(x,1,0))) | |
17 | # define __acquire(x) __context__(x,1) | |
18 | # define __release(x) __context__(x,-1) | |
dcc8e559 | 19 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
e0fdb0e0 | 20 | # define __percpu __attribute__((noderef, address_space(3))) |
ca5ecddf | 21 | # define __rcu __attribute__((noderef, address_space(4))) |
ad315455 | 22 | # define __private __attribute__((noderef)) |
c47ffe3d AV |
23 | extern void __chk_user_ptr(const volatile void __user *); |
24 | extern void __chk_io_ptr(const volatile void __iomem *); | |
ad315455 BF |
25 | # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) |
26 | #else /* __CHECKER__ */ | |
c61f13ea KC |
27 | # ifdef STRUCTLEAK_PLUGIN |
28 | # define __user __attribute__((user)) | |
29 | # else | |
30 | # define __user | |
31 | # endif | |
1da177e4 LT |
32 | # define __kernel |
33 | # define __safe | |
34 | # define __force | |
35 | # define __nocast | |
36 | # define __iomem | |
37 | # define __chk_user_ptr(x) (void)0 | |
38 | # define __chk_io_ptr(x) (void)0 | |
39 | # define __builtin_warning(x, y...) (1) | |
8529091e | 40 | # define __must_hold(x) |
1da177e4 LT |
41 | # define __acquires(x) |
42 | # define __releases(x) | |
43 | # define __acquire(x) (void)0 | |
44 | # define __release(x) (void)0 | |
dcc8e559 | 45 | # define __cond_lock(x,c) (c) |
e0fdb0e0 | 46 | # define __percpu |
71d1d5c7 | 47 | # define __rcu |
ad315455 BF |
48 | # define __private |
49 | # define ACCESS_PRIVATE(p, member) ((p)->member) | |
50 | #endif /* __CHECKER__ */ | |
1da177e4 | 51 | |
6f33d587 RR |
52 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ |
53 | #define ___PASTE(a,b) a##b | |
54 | #define __PASTE(a,b) ___PASTE(a,b) | |
55 | ||
1da177e4 LT |
56 | #ifdef __KERNEL__ |
57 | ||
f153b821 LT |
58 | #ifdef __GNUC__ |
59 | #include <linux/compiler-gcc.h> | |
1da177e4 LT |
60 | #endif |
61 | ||
0c5a69f4 | 62 | #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) |
61f55214 HC |
63 | #define notrace __attribute__((hotpatch(0,0))) |
64 | #else | |
28614889 | 65 | #define notrace __attribute__((no_instrument_function)) |
61f55214 | 66 | #endif |
28614889 | 67 | |
1da177e4 LT |
68 | /* Intel compiler defines __GNUC__. So we will overwrite implementations |
69 | * coming from above header files here | |
70 | */ | |
71 | #ifdef __INTEL_COMPILER | |
72 | # include <linux/compiler-intel.h> | |
73 | #endif | |
74 | ||
565cbdc2 MC |
75 | /* Clang compiler defines __GNUC__. So we will overwrite implementations |
76 | * coming from above header files here | |
77 | */ | |
78 | #ifdef __clang__ | |
79 | #include <linux/compiler-clang.h> | |
80 | #endif | |
81 | ||
1da177e4 LT |
82 | /* |
83 | * Generic compiler-dependent macros required for kernel | |
84 | * build go below this comment. Actual compiler/compiler version | |
85 | * specific implementations come from the above header files | |
86 | */ | |
87 | ||
2ed84eeb | 88 | struct ftrace_branch_data { |
1f0d69a9 SR |
89 | const char *func; |
90 | const char *file; | |
91 | unsigned line; | |
2bcd521a SR |
92 | union { |
93 | struct { | |
94 | unsigned long correct; | |
95 | unsigned long incorrect; | |
96 | }; | |
97 | struct { | |
98 | unsigned long miss; | |
99 | unsigned long hit; | |
100 | }; | |
97e7e4f3 | 101 | unsigned long miss_hit[2]; |
2bcd521a | 102 | }; |
1f0d69a9 | 103 | }; |
2ed84eeb | 104 | |
134e6a03 SRV |
105 | struct ftrace_likely_data { |
106 | struct ftrace_branch_data data; | |
107 | unsigned long constant; | |
108 | }; | |
109 | ||
2ed84eeb SR |
110 | /* |
111 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code | |
112 | * to disable branch tracing on a per file basis. | |
113 | */ | |
d9ad8bc0 BVA |
114 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
115 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) | |
134e6a03 | 116 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
d45ae1f7 | 117 | int expect, int is_constant); |
1f0d69a9 SR |
118 | |
119 | #define likely_notrace(x) __builtin_expect(!!(x), 1) | |
120 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) | |
121 | ||
d45ae1f7 | 122 | #define __branch_check__(x, expect, is_constant) ({ \ |
1f0d69a9 | 123 | int ______r; \ |
134e6a03 | 124 | static struct ftrace_likely_data \ |
1f0d69a9 | 125 | __attribute__((__aligned__(4))) \ |
45b79749 | 126 | __attribute__((section("_ftrace_annotated_branch"))) \ |
1f0d69a9 | 127 | ______f = { \ |
134e6a03 SRV |
128 | .data.func = __func__, \ |
129 | .data.file = __FILE__, \ | |
130 | .data.line = __LINE__, \ | |
1f0d69a9 | 131 | }; \ |
d45ae1f7 SRV |
132 | ______r = __builtin_expect(!!(x), expect); \ |
133 | ftrace_likely_update(&______f, ______r, \ | |
134 | expect, is_constant); \ | |
1f0d69a9 SR |
135 | ______r; \ |
136 | }) | |
137 | ||
138 | /* | |
139 | * Using __builtin_constant_p(x) to ignore cases where the return | |
140 | * value is always the same. This idea is taken from a similar patch | |
141 | * written by Daniel Walker. | |
142 | */ | |
143 | # ifndef likely | |
d45ae1f7 | 144 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
1f0d69a9 SR |
145 | # endif |
146 | # ifndef unlikely | |
d45ae1f7 | 147 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
1f0d69a9 | 148 | # endif |
2bcd521a SR |
149 | |
150 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | |
151 | /* | |
152 | * "Define 'is'", Bill Clinton | |
153 | * "Define 'if'", Steven Rostedt | |
154 | */ | |
ab3c9c68 LT |
155 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
156 | #define __trace_if(cond) \ | |
b33c8ff4 | 157 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
2bcd521a SR |
158 | ({ \ |
159 | int ______r; \ | |
160 | static struct ftrace_branch_data \ | |
161 | __attribute__((__aligned__(4))) \ | |
162 | __attribute__((section("_ftrace_branch"))) \ | |
163 | ______f = { \ | |
164 | .func = __func__, \ | |
165 | .file = __FILE__, \ | |
166 | .line = __LINE__, \ | |
167 | }; \ | |
168 | ______r = !!(cond); \ | |
97e7e4f3 | 169 | ______f.miss_hit[______r]++; \ |
2bcd521a SR |
170 | ______r; \ |
171 | })) | |
172 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | |
173 | ||
1f0d69a9 SR |
174 | #else |
175 | # define likely(x) __builtin_expect(!!(x), 1) | |
176 | # define unlikely(x) __builtin_expect(!!(x), 0) | |
177 | #endif | |
1da177e4 LT |
178 | |
179 | /* Optimization barrier */ | |
180 | #ifndef barrier | |
181 | # define barrier() __memory_barrier() | |
182 | #endif | |
183 | ||
7829fb09 DB |
184 | #ifndef barrier_data |
185 | # define barrier_data(ptr) barrier() | |
186 | #endif | |
187 | ||
38938c87 | 188 | /* Unreachable code */ |
649ea4d5 JP |
189 | #ifdef CONFIG_STACK_VALIDATION |
190 | #define annotate_reachable() ({ \ | |
191 | asm("%c0:\n\t" \ | |
192 | ".pushsection .discard.reachable\n\t" \ | |
193 | ".long %c0b - .\n\t" \ | |
ec1e1b61 | 194 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
649ea4d5 JP |
195 | }) |
196 | #define annotate_unreachable() ({ \ | |
197 | asm("%c0:\n\t" \ | |
198 | ".pushsection .discard.unreachable\n\t" \ | |
199 | ".long %c0b - .\n\t" \ | |
ec1e1b61 | 200 | ".popsection\n\t" : : "i" (__COUNTER__)); \ |
649ea4d5 JP |
201 | }) |
202 | #define ASM_UNREACHABLE \ | |
203 | "999:\n\t" \ | |
204 | ".pushsection .discard.unreachable\n\t" \ | |
205 | ".long 999b - .\n\t" \ | |
206 | ".popsection\n\t" | |
207 | #else | |
208 | #define annotate_reachable() | |
209 | #define annotate_unreachable() | |
210 | #endif | |
211 | ||
aa5d1b81 KC |
212 | #ifndef ASM_UNREACHABLE |
213 | # define ASM_UNREACHABLE | |
214 | #endif | |
38938c87 | 215 | #ifndef unreachable |
649ea4d5 | 216 | # define unreachable() do { annotate_reachable(); do { } while (1); } while (0) |
38938c87 DD |
217 | #endif |
218 | ||
b67067f1 NP |
219 | /* |
220 | * KENTRY - kernel entry point | |
221 | * This can be used to annotate symbols (functions or data) that are used | |
222 | * without their linker symbol being referenced explicitly. For example, | |
223 | * interrupt vector handlers, or functions in the kernel image that are found | |
224 | * programatically. | |
225 | * | |
226 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those | |
227 | * are handled in their own way (with KEEP() in linker scripts). | |
228 | * | |
229 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the | |
230 | * linker script. For example an architecture could KEEP() its entire | |
231 | * boot/exception vector code rather than annotate each function and data. | |
232 | */ | |
233 | #ifndef KENTRY | |
234 | # define KENTRY(sym) \ | |
235 | extern typeof(sym) sym; \ | |
236 | static const unsigned long __kentry_##sym \ | |
237 | __used \ | |
238 | __attribute__((section("___kentry" "+" #sym ), used)) \ | |
239 | = (unsigned long)&sym; | |
240 | #endif | |
241 | ||
1da177e4 LT |
242 | #ifndef RELOC_HIDE |
243 | # define RELOC_HIDE(ptr, off) \ | |
244 | ({ unsigned long __ptr; \ | |
245 | __ptr = (unsigned long) (ptr); \ | |
246 | (typeof(ptr)) (__ptr + (off)); }) | |
247 | #endif | |
248 | ||
fe8c8a12 CEB |
249 | #ifndef OPTIMIZER_HIDE_VAR |
250 | #define OPTIMIZER_HIDE_VAR(var) barrier() | |
251 | #endif | |
252 | ||
6f33d587 RR |
253 | /* Not-quite-unique ID. */ |
254 | #ifndef __UNIQUE_ID | |
255 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) | |
256 | #endif | |
257 | ||
230fa253 CB |
258 | #include <uapi/linux/types.h> |
259 | ||
d976441f AR |
260 | #define __READ_ONCE_SIZE \ |
261 | ({ \ | |
262 | switch (size) { \ | |
263 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ | |
264 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ | |
265 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ | |
266 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ | |
267 | default: \ | |
268 | barrier(); \ | |
269 | __builtin_memcpy((void *)res, (const void *)p, size); \ | |
270 | barrier(); \ | |
271 | } \ | |
272 | }) | |
273 | ||
274 | static __always_inline | |
275 | void __read_once_size(const volatile void *p, void *res, int size) | |
230fa253 | 276 | { |
d976441f AR |
277 | __READ_ONCE_SIZE; |
278 | } | |
279 | ||
280 | #ifdef CONFIG_KASAN | |
281 | /* | |
282 | * This function is not 'inline' because __no_sanitize_address confilcts | |
283 | * with inlining. Attempt to inline it may cause a build failure. | |
284 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 | |
285 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. | |
286 | */ | |
287 | static __no_sanitize_address __maybe_unused | |
288 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | |
289 | { | |
290 | __READ_ONCE_SIZE; | |
291 | } | |
292 | #else | |
293 | static __always_inline | |
294 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | |
295 | { | |
296 | __READ_ONCE_SIZE; | |
230fa253 | 297 | } |
d976441f | 298 | #endif |
230fa253 | 299 | |
43239cbe | 300 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
230fa253 CB |
301 | { |
302 | switch (size) { | |
303 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | |
304 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | |
305 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | |
230fa253 | 306 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
230fa253 CB |
307 | default: |
308 | barrier(); | |
309 | __builtin_memcpy((void *)p, (const void *)res, size); | |
230fa253 CB |
310 | barrier(); |
311 | } | |
312 | } | |
313 | ||
314 | /* | |
315 | * Prevent the compiler from merging or refetching reads or writes. The | |
316 | * compiler is also forbidden from reordering successive instances of | |
43239cbe | 317 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the |
230fa253 CB |
318 | * compiler is aware of some particular ordering. One way to make the |
319 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | |
43239cbe | 320 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. |
230fa253 CB |
321 | * |
322 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | |
323 | * data types like structs or unions. If the size of the accessed data | |
324 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | |
fed0764f KRW |
325 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at |
326 | * least two memcpy()s: one for the __builtin_memcpy() and then one for | |
327 | * the macro doing the copy of variable - '__u' allocated on the stack. | |
230fa253 CB |
328 | * |
329 | * Their two major use cases are: (1) Mediating communication between | |
330 | * process-level code and irq/NMI handlers, all running on the same CPU, | |
331 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | |
332 | * mutilate accesses that either do not require ordering or that interact | |
333 | * with an explicit memory barrier or atomic instruction that provides the | |
334 | * required ordering. | |
335 | */ | |
336 | ||
d976441f AR |
337 | #define __READ_ONCE(x, check) \ |
338 | ({ \ | |
339 | union { typeof(x) __val; char __c[1]; } __u; \ | |
340 | if (check) \ | |
341 | __read_once_size(&(x), __u.__c, sizeof(x)); \ | |
342 | else \ | |
343 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ | |
344 | __u.__val; \ | |
345 | }) | |
346 | #define READ_ONCE(x) __READ_ONCE(x, 1) | |
347 | ||
348 | /* | |
349 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need | |
350 | * to hide memory access from KASAN. | |
351 | */ | |
352 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) | |
230fa253 | 353 | |
43239cbe | 354 | #define WRITE_ONCE(x, val) \ |
ba33034f CB |
355 | ({ \ |
356 | union { typeof(x) __val; char __c[1]; } __u = \ | |
357 | { .__val = (__force typeof(x)) (val) }; \ | |
358 | __write_once_size(&(x), __u.__c, sizeof(x)); \ | |
359 | __u.__val; \ | |
360 | }) | |
230fa253 | 361 | |
1da177e4 LT |
362 | #endif /* __KERNEL__ */ |
363 | ||
364 | #endif /* __ASSEMBLY__ */ | |
365 | ||
4f79c3ff | 366 | #ifdef __KERNEL__ |
1da177e4 LT |
367 | /* |
368 | * Allow us to mark functions as 'deprecated' and have gcc emit a nice | |
369 | * warning for each use, in hopes of speeding the functions removal. | |
370 | * Usage is: | |
371 | * int __deprecated foo(void) | |
372 | */ | |
373 | #ifndef __deprecated | |
374 | # define __deprecated /* unimplemented */ | |
375 | #endif | |
376 | ||
512345be PM |
377 | #ifdef MODULE |
378 | #define __deprecated_for_modules __deprecated | |
379 | #else | |
380 | #define __deprecated_for_modules | |
381 | #endif | |
382 | ||
1da177e4 LT |
383 | #ifndef __must_check |
384 | #define __must_check | |
385 | #endif | |
386 | ||
cebc04ba AM |
387 | #ifndef CONFIG_ENABLE_MUST_CHECK |
388 | #undef __must_check | |
389 | #define __must_check | |
390 | #endif | |
de488443 JG |
391 | #ifndef CONFIG_ENABLE_WARN_DEPRECATED |
392 | #undef __deprecated | |
393 | #undef __deprecated_for_modules | |
394 | #define __deprecated | |
395 | #define __deprecated_for_modules | |
396 | #endif | |
cebc04ba | 397 | |
d64e85d3 RV |
398 | #ifndef __malloc |
399 | #define __malloc | |
400 | #endif | |
401 | ||
1da177e4 LT |
402 | /* |
403 | * Allow us to avoid 'defined but not used' warnings on functions and data, | |
404 | * as well as force them to be emitted to the assembly file. | |
405 | * | |
0d7ebbbc DR |
406 | * As of gcc 3.4, static functions that are not marked with attribute((used)) |
407 | * may be elided from the assembly file. As of gcc 3.4, static data not so | |
1da177e4 LT |
408 | * marked will not be elided, but this may change in a future gcc version. |
409 | * | |
0d7ebbbc DR |
410 | * NOTE: Because distributions shipped with a backported unit-at-a-time |
411 | * compiler in gcc 3.3, we must define __used to be __attribute__((used)) | |
412 | * for gcc >=3.3 instead of 3.4. | |
413 | * | |
1da177e4 LT |
414 | * In prior versions of gcc, such functions and data would be emitted, but |
415 | * would be warned about except with attribute((unused)). | |
0d7ebbbc DR |
416 | * |
417 | * Mark functions that are referenced only in inline assembly as __used so | |
418 | * the code is emitted even though it appears to be unreferenced. | |
1da177e4 | 419 | */ |
0d7ebbbc DR |
420 | #ifndef __used |
421 | # define __used /* unimplemented */ | |
422 | #endif | |
423 | ||
424 | #ifndef __maybe_unused | |
425 | # define __maybe_unused /* unimplemented */ | |
1da177e4 LT |
426 | #endif |
427 | ||
7b2a3513 LZ |
428 | #ifndef __always_unused |
429 | # define __always_unused /* unimplemented */ | |
430 | #endif | |
431 | ||
423bc7b2 DW |
432 | #ifndef noinline |
433 | #define noinline | |
434 | #endif | |
435 | ||
735c4fb9 AM |
436 | /* |
437 | * Rather then using noinline to prevent stack consumption, use | |
e6be0c9e | 438 | * noinline_for_stack instead. For documentation reasons. |
735c4fb9 AM |
439 | */ |
440 | #define noinline_for_stack noinline | |
441 | ||
423bc7b2 DW |
442 | #ifndef __always_inline |
443 | #define __always_inline inline | |
444 | #endif | |
445 | ||
446 | #endif /* __KERNEL__ */ | |
447 | ||
1da177e4 LT |
448 | /* |
449 | * From the GCC manual: | |
450 | * | |
451 | * Many functions do not examine any values except their arguments, | |
452 | * and have no effects except the return value. Basically this is | |
453 | * just slightly more strict class than the `pure' attribute above, | |
454 | * since function is not allowed to read global memory. | |
455 | * | |
456 | * Note that a function that has pointer arguments and examines the | |
457 | * data pointed to must _not_ be declared `const'. Likewise, a | |
458 | * function that calls a non-`const' function usually must not be | |
459 | * `const'. It does not make sense for a `const' function to return | |
460 | * `void'. | |
461 | */ | |
462 | #ifndef __attribute_const__ | |
463 | # define __attribute_const__ /* unimplemented */ | |
464 | #endif | |
465 | ||
0aa5e49c KC |
466 | #ifndef __designated_init |
467 | # define __designated_init | |
468 | #endif | |
469 | ||
0766f788 ER |
470 | #ifndef __latent_entropy |
471 | # define __latent_entropy | |
472 | #endif | |
473 | ||
313dd1b6 KC |
474 | #ifndef __randomize_layout |
475 | # define __randomize_layout __designated_init | |
476 | #endif | |
477 | ||
478 | #ifndef __no_randomize_layout | |
479 | # define __no_randomize_layout | |
480 | #endif | |
481 | ||
29e48ce8 KC |
482 | #ifndef randomized_struct_fields_start |
483 | # define randomized_struct_fields_start | |
484 | # define randomized_struct_fields_end | |
485 | #endif | |
486 | ||
a586df06 AK |
487 | /* |
488 | * Tell gcc if a function is cold. The compiler will assume any path | |
489 | * directly leading to the call is unlikely. | |
490 | */ | |
491 | ||
492 | #ifndef __cold | |
493 | #define __cold | |
494 | #endif | |
495 | ||
f3fe866d SR |
496 | /* Simple shorthand for a section definition */ |
497 | #ifndef __section | |
498 | # define __section(S) __attribute__ ((__section__(#S))) | |
499 | #endif | |
500 | ||
9a858dc7 AK |
501 | #ifndef __visible |
502 | #define __visible | |
503 | #endif | |
504 | ||
7375ae3a TL |
505 | #ifndef __nostackprotector |
506 | # define __nostackprotector | |
507 | #endif | |
508 | ||
a744fd17 RV |
509 | /* |
510 | * Assume alignment of return value. | |
511 | */ | |
512 | #ifndef __assume_aligned | |
513 | #define __assume_aligned(a, ...) | |
514 | #endif | |
515 | ||
516 | ||
d2c123c2 RR |
517 | /* Are two types/vars the same type (ignoring qualifiers)? */ |
518 | #ifndef __same_type | |
519 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | |
520 | #endif | |
521 | ||
47933ad4 PZ |
522 | /* Is this type a native word size -- useful for atomic operations */ |
523 | #ifndef __native_word | |
536fa402 | 524 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) |
47933ad4 PZ |
525 | #endif |
526 | ||
9f0cf4ad AV |
527 | /* Compile time object size, -1 for unknown */ |
528 | #ifndef __compiletime_object_size | |
529 | # define __compiletime_object_size(obj) -1 | |
530 | #endif | |
4a312769 AV |
531 | #ifndef __compiletime_warning |
532 | # define __compiletime_warning(message) | |
533 | #endif | |
63312b6a AV |
534 | #ifndef __compiletime_error |
535 | # define __compiletime_error(message) | |
2c0d259e JH |
536 | /* |
537 | * Sparse complains of variable sized arrays due to the temporary variable in | |
538 | * __compiletime_assert. Unfortunately we can't just expand it out to make | |
539 | * sparse see a constant array size without breaking compiletime_assert on old | |
540 | * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. | |
541 | */ | |
542 | # ifndef __CHECKER__ | |
543 | # define __compiletime_error_fallback(condition) \ | |
9a8ab1c3 | 544 | do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
2c0d259e JH |
545 | # endif |
546 | #endif | |
547 | #ifndef __compiletime_error_fallback | |
c361d3e5 | 548 | # define __compiletime_error_fallback(condition) do { } while (0) |
63312b6a | 549 | #endif |
c361d3e5 | 550 | |
c03567a8 JS |
551 | #ifdef __OPTIMIZE__ |
552 | # define __compiletime_assert(condition, msg, prefix, suffix) \ | |
9a8ab1c3 DS |
553 | do { \ |
554 | bool __cond = !(condition); \ | |
555 | extern void prefix ## suffix(void) __compiletime_error(msg); \ | |
556 | if (__cond) \ | |
557 | prefix ## suffix(); \ | |
558 | __compiletime_error_fallback(__cond); \ | |
559 | } while (0) | |
c03567a8 JS |
560 | #else |
561 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) | |
562 | #endif | |
9a8ab1c3 DS |
563 | |
564 | #define _compiletime_assert(condition, msg, prefix, suffix) \ | |
565 | __compiletime_assert(condition, msg, prefix, suffix) | |
566 | ||
567 | /** | |
568 | * compiletime_assert - break build and emit msg if condition is false | |
569 | * @condition: a compile-time constant condition to check | |
570 | * @msg: a message to emit if condition is false | |
571 | * | |
572 | * In tradition of POSIX assert, this macro will break the build if the | |
573 | * supplied condition is *false*, emitting the supplied error message if the | |
574 | * compiler has support to do so. | |
575 | */ | |
576 | #define compiletime_assert(condition, msg) \ | |
577 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) | |
578 | ||
47933ad4 PZ |
579 | #define compiletime_assert_atomic_type(t) \ |
580 | compiletime_assert(__native_word(t), \ | |
581 | "Need native word sized stores/loads for atomicity.") | |
582 | ||
9c3cdc1f LT |
583 | /* |
584 | * Prevent the compiler from merging or refetching accesses. The compiler | |
585 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
586 | * but only when the compiler is aware of some particular ordering. One way | |
587 | * to make the compiler aware of ordering is to put the two invocations of | |
588 | * ACCESS_ONCE() in different C statements. | |
589 | * | |
927609d6 CB |
590 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE |
591 | * on a union member will work as long as the size of the member matches the | |
592 | * size of the union and the size is smaller than word size. | |
593 | * | |
594 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication | |
595 | * between process-level code and irq/NMI handlers, all running on the same CPU, | |
596 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | |
597 | * mutilate accesses that either do not require ordering or that interact | |
598 | * with an explicit memory barrier or atomic instruction that provides the | |
599 | * required ordering. | |
600 | * | |
663fdcbe | 601 | * If possible use READ_ONCE()/WRITE_ONCE() instead. |
9c3cdc1f | 602 | */ |
927609d6 | 603 | #define __ACCESS_ONCE(x) ({ \ |
c5b19946 | 604 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ |
927609d6 CB |
605 | (volatile typeof(x) *)&(x); }) |
606 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | |
9c3cdc1f | 607 | |
0a04b016 PZ |
608 | /** |
609 | * lockless_dereference() - safely load a pointer for later dereference | |
610 | * @p: The pointer to load | |
611 | * | |
612 | * Similar to rcu_dereference(), but for situations where the pointed-to | |
613 | * object's lifetime is managed by something other than RCU. That | |
614 | * "something other" might be reference counting or simple immortality. | |
331b6d8c | 615 | * |
d7127b5e JB |
616 | * The seemingly unused variable ___typecheck_p validates that @p is |
617 | * indeed a pointer type by using a pointer to typeof(*p) as the type. | |
618 | * Taking a pointer to typeof(*p) again is needed in case p is void *. | |
0a04b016 PZ |
619 | */ |
620 | #define lockless_dereference(p) \ | |
621 | ({ \ | |
38183b9c | 622 | typeof(p) _________p1 = READ_ONCE(p); \ |
d7127b5e | 623 | typeof(*(p)) *___typecheck_p __maybe_unused; \ |
0a04b016 PZ |
624 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
625 | (_________p1); \ | |
626 | }) | |
627 | ||
1da177e4 | 628 | #endif /* __LINUX_COMPILER_H */ |