]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef LINUX_HARDIRQ_H |
3 | #define LINUX_HARDIRQ_H | |
4 | ||
aaf2bc50 | 5 | #include <linux/context_tracking_state.h> |
92cf2118 | 6 | #include <linux/preempt.h> |
fbb9ce95 | 7 | #include <linux/lockdep.h> |
6a60dd12 | 8 | #include <linux/ftrace_irq.h> |
dcbf832e | 9 | #include <linux/vtime.h> |
0bd3a173 | 10 | #include <asm/hardirq.h> |
1da177e4 | 11 | |
1da177e4 | 12 | extern void synchronize_irq(unsigned int irq); |
02cea395 | 13 | extern bool synchronize_hardirq(unsigned int irq); |
1da177e4 | 14 | |
aaf2bc50 PM |
15 | #ifdef CONFIG_NO_HZ_FULL |
16 | void __rcu_irq_enter_check_tick(void); | |
17 | #else | |
18 | static inline void __rcu_irq_enter_check_tick(void) { } | |
19 | #endif | |
9b1d82fa | 20 | |
aaf2bc50 | 21 | static __always_inline void rcu_irq_enter_check_tick(void) |
9b1d82fa | 22 | { |
aaf2bc50 PM |
23 | if (context_tracking_enabled()) |
24 | __rcu_irq_enter_check_tick(); | |
9b1d82fa PM |
25 | } |
26 | ||
de30a2b3 IM |
27 | /* |
28 | * It is safe to do non-atomic ops on ->hardirq_context, | |
29 | * because NMI handlers may not preempt and the ops are | |
30 | * always balanced, so the interrupted value of ->hardirq_context | |
31 | * will always be restored. | |
32 | */ | |
79bf2bb3 TG |
33 | #define __irq_enter() \ |
34 | do { \ | |
6a61671b | 35 | account_irq_enter_time(current); \ |
bdb43806 | 36 | preempt_count_add(HARDIRQ_OFFSET); \ |
2502ec37 | 37 | lockdep_hardirq_enter(); \ |
79bf2bb3 TG |
38 | } while (0) |
39 | ||
40 | /* | |
41 | * Enter irq context (on NO_HZ, update jiffies): | |
42 | */ | |
dde4b2b5 | 43 | extern void irq_enter(void); |
de30a2b3 IM |
44 | |
45 | /* | |
46 | * Exit irq context without processing softirqs: | |
47 | */ | |
48 | #define __irq_exit() \ | |
49 | do { \ | |
2502ec37 | 50 | lockdep_hardirq_exit(); \ |
6a61671b | 51 | account_irq_exit_time(current); \ |
bdb43806 | 52 | preempt_count_sub(HARDIRQ_OFFSET); \ |
1da177e4 LT |
53 | } while (0) |
54 | ||
de30a2b3 IM |
55 | /* |
56 | * Exit irq context and process softirqs if needed: | |
57 | */ | |
1da177e4 LT |
58 | extern void irq_exit(void); |
59 | ||
5870970b JT |
60 | #ifndef arch_nmi_enter |
61 | #define arch_nmi_enter() do { } while (0) | |
62 | #define arch_nmi_exit() do { } while (0) | |
63 | #endif | |
64 | ||
aaf2bc50 PM |
65 | #ifdef CONFIG_TINY_RCU |
66 | static inline void rcu_nmi_enter(void) { } | |
67 | static inline void rcu_nmi_exit(void) { } | |
68 | #else | |
69 | extern void rcu_nmi_enter(void); | |
70 | extern void rcu_nmi_exit(void); | |
71 | #endif | |
72 | ||
f93524eb PZ |
73 | /* |
74 | * NMI vs Tracing | |
75 | * -------------- | |
76 | * | |
77 | * We must not land in a tracer until (or after) we've changed preempt_count | |
78 | * such that in_nmi() becomes true. To that effect all NMI C entry points must | |
79 | * be marked 'notrace' and call nmi_enter() as soon as possible. | |
80 | */ | |
81 | ||
69ea03b5 PZ |
82 | /* |
83 | * nmi_enter() can nest up to 15 times; see NMI_BITS. | |
84 | */ | |
2a7b8df0 SR |
85 | #define nmi_enter() \ |
86 | do { \ | |
5870970b | 87 | arch_nmi_enter(); \ |
42a0bb3f | 88 | printk_nmi_enter(); \ |
0f1ac8fd | 89 | lockdep_off(); \ |
2a7b8df0 | 90 | ftrace_nmi_enter(); \ |
69ea03b5 | 91 | BUG_ON(in_nmi() == NMI_MASK); \ |
f93524eb | 92 | __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
2a7b8df0 | 93 | rcu_nmi_enter(); \ |
2502ec37 | 94 | lockdep_hardirq_enter(); \ |
17666f02 | 95 | } while (0) |
5f34fe1c | 96 | |
2a7b8df0 SR |
97 | #define nmi_exit() \ |
98 | do { \ | |
2502ec37 | 99 | lockdep_hardirq_exit(); \ |
2a7b8df0 | 100 | rcu_nmi_exit(); \ |
2a7b8df0 | 101 | BUG_ON(!in_nmi()); \ |
f93524eb | 102 | __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
2a7b8df0 | 103 | ftrace_nmi_exit(); \ |
0f1ac8fd | 104 | lockdep_on(); \ |
42a0bb3f | 105 | printk_nmi_exit(); \ |
5870970b | 106 | arch_nmi_exit(); \ |
17666f02 | 107 | } while (0) |
de30a2b3 | 108 | |
1da177e4 | 109 | #endif /* LINUX_HARDIRQ_H */ |