]>
Commit | Line | Data |
---|---|---|
2601e64d IM |
1 | /* |
2 | * include/asm-x86_64/irqflags.h | |
3 | * | |
4 | * IRQ flags handling | |
5 | * | |
6 | * This file gets included from lowlevel asm headers too, to provide | |
7 | * wrapped versions of the local_irq_*() APIs, based on the | |
6375e2b7 | 8 | * raw_local_irq_*() functions from the lowlevel headers. |
2601e64d IM |
9 | */ |
10 | #ifndef _ASM_IRQFLAGS_H | |
11 | #define _ASM_IRQFLAGS_H | |
5d02d7ae | 12 | #include <asm/processor-flags.h> |
2601e64d IM |
13 | |
14 | #ifndef __ASSEMBLY__ | |
6375e2b7 IM |
15 | /* |
16 | * Interrupt control: | |
17 | */ | |
18 | ||
19 | static inline unsigned long __raw_local_save_flags(void) | |
20 | { | |
21 | unsigned long flags; | |
22 | ||
23 | __asm__ __volatile__( | |
24 | "# __raw_save_flags\n\t" | |
25 | "pushfq ; popq %q0" | |
26 | : "=g" (flags) | |
27 | : /* no input */ | |
28 | : "memory" | |
29 | ); | |
2601e64d | 30 | |
6375e2b7 IM |
31 | return flags; |
32 | } | |
33 | ||
34 | #define raw_local_save_flags(flags) \ | |
35 | do { (flags) = __raw_local_save_flags(); } while (0) | |
36 | ||
37 | static inline void raw_local_irq_restore(unsigned long flags) | |
38 | { | |
39 | __asm__ __volatile__( | |
40 | "pushq %0 ; popfq" | |
41 | : /* no output */ | |
42 | :"g" (flags) | |
43 | :"memory", "cc" | |
44 | ); | |
45 | } | |
2601e64d IM |
46 | |
47 | #ifdef CONFIG_X86_VSMP | |
6375e2b7 IM |
48 | |
49 | /* | |
50 | * Interrupt control for the VSMP architecture: | |
51 | */ | |
52 | ||
53 | static inline void raw_local_irq_disable(void) | |
54 | { | |
55 | unsigned long flags = __raw_local_save_flags(); | |
56 | ||
5d02d7ae | 57 | raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); |
6375e2b7 IM |
58 | } |
59 | ||
60 | static inline void raw_local_irq_enable(void) | |
61 | { | |
62 | unsigned long flags = __raw_local_save_flags(); | |
63 | ||
5d02d7ae | 64 | raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); |
6375e2b7 IM |
65 | } |
66 | ||
67 | static inline int raw_irqs_disabled_flags(unsigned long flags) | |
68 | { | |
5d02d7ae | 69 | return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); |
6375e2b7 IM |
70 | } |
71 | ||
72 | #else /* CONFIG_X86_VSMP */ | |
73 | ||
74 | static inline void raw_local_irq_disable(void) | |
75 | { | |
76 | __asm__ __volatile__("cli" : : : "memory"); | |
77 | } | |
78 | ||
79 | static inline void raw_local_irq_enable(void) | |
80 | { | |
81 | __asm__ __volatile__("sti" : : : "memory"); | |
82 | } | |
83 | ||
84 | static inline int raw_irqs_disabled_flags(unsigned long flags) | |
85 | { | |
5d02d7ae | 86 | return !(flags & X86_EFLAGS_IF); |
6375e2b7 IM |
87 | } |
88 | ||
2601e64d IM |
89 | #endif |
90 | ||
6375e2b7 IM |
91 | /* |
92 | * For spinlocks, etc.: | |
93 | */ | |
94 | ||
95 | static inline unsigned long __raw_local_irq_save(void) | |
96 | { | |
97 | unsigned long flags = __raw_local_save_flags(); | |
98 | ||
99 | raw_local_irq_disable(); | |
100 | ||
101 | return flags; | |
102 | } | |
2601e64d | 103 | |
6375e2b7 IM |
104 | #define raw_local_irq_save(flags) \ |
105 | do { (flags) = __raw_local_irq_save(); } while (0) | |
106 | ||
107 | static inline int raw_irqs_disabled(void) | |
108 | { | |
109 | unsigned long flags = __raw_local_save_flags(); | |
110 | ||
111 | return raw_irqs_disabled_flags(flags); | |
112 | } | |
113 | ||
143a5d32 PZ |
114 | /* |
115 | * makes the traced hardirq state match with the machine state | |
116 | * | |
117 | * should be a rarely used function, only in places where its | |
118 | * otherwise impossible to know the irq state, like in traps. | |
119 | */ | |
120 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | |
121 | { | |
122 | if (raw_irqs_disabled_flags(flags)) | |
123 | trace_hardirqs_off(); | |
124 | else | |
125 | trace_hardirqs_on(); | |
126 | } | |
127 | ||
128 | static inline void trace_hardirqs_fixup(void) | |
129 | { | |
130 | unsigned long flags = __raw_local_save_flags(); | |
131 | ||
132 | trace_hardirqs_fixup_flags(flags); | |
133 | } | |
6375e2b7 IM |
134 | /* |
135 | * Used in the idle loop; sti takes one instruction cycle | |
136 | * to complete: | |
137 | */ | |
138 | static inline void raw_safe_halt(void) | |
139 | { | |
140 | __asm__ __volatile__("sti; hlt" : : : "memory"); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Used when interrupts are already enabled or to | |
145 | * shutdown the processor: | |
146 | */ | |
147 | static inline void halt(void) | |
148 | { | |
149 | __asm__ __volatile__("hlt": : :"memory"); | |
150 | } | |
2601e64d IM |
151 | |
152 | #else /* __ASSEMBLY__: */ | |
6375e2b7 IM |
153 | # ifdef CONFIG_TRACE_IRQFLAGS |
154 | # define TRACE_IRQS_ON call trace_hardirqs_on_thunk | |
155 | # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk | |
156 | # else | |
157 | # define TRACE_IRQS_ON | |
158 | # define TRACE_IRQS_OFF | |
159 | # endif | |
10cd706d PZ |
160 | # ifdef CONFIG_DEBUG_LOCK_ALLOC |
161 | # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk | |
162 | # define LOCKDEP_SYS_EXIT_IRQ \ | |
163 | TRACE_IRQS_ON; \ | |
164 | sti; \ | |
165 | SAVE_REST; \ | |
166 | LOCKDEP_SYS_EXIT; \ | |
167 | RESTORE_REST; \ | |
168 | cli; \ | |
169 | TRACE_IRQS_OFF; | |
170 | # else | |
171 | # define LOCKDEP_SYS_EXIT | |
172 | # define LOCKDEP_SYS_EXIT_IRQ | |
173 | # endif | |
2601e64d IM |
174 | #endif |
175 | ||
176 | #endif |