]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/irqflags.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / irqflags.h
CommitLineData
6abcd98f
GOC
1#ifndef _X86_IRQFLAGS_H_
2#define _X86_IRQFLAGS_H_
3
4#include <asm/processor-flags.h>
5
6#ifndef __ASSEMBLY__
6727ad9e
CM
7
8/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
9#define __cpuidle __attribute__((__section__(".cpuidle.text")))
10
6abcd98f
GOC
11/*
12 * Interrupt control:
13 */
14
15static inline unsigned long native_save_fl(void)
16{
17 unsigned long flags;
18
f1f029c7 19 /*
ab94fcf5
PA
20 * "=rm" is safe here, because "pop" adjusts the stack before
21 * it evaluates its effective address -- this is part of the
22 * documented behavior of the "pop" instruction.
f1f029c7 23 */
cf7f7191
JP
24 asm volatile("# __raw_save_flags\n\t"
25 "pushf ; pop %0"
ab94fcf5 26 : "=rm" (flags)
cf7f7191
JP
27 : /* no input */
28 : "memory");
6abcd98f
GOC
29
30 return flags;
31}
32
33static inline void native_restore_fl(unsigned long flags)
34{
cf7f7191
JP
35 asm volatile("push %0 ; popf"
36 : /* no output */
37 :"g" (flags)
38 :"memory", "cc");
6abcd98f
GOC
39}
40
41static inline void native_irq_disable(void)
42{
43 asm volatile("cli": : :"memory");
44}
45
46static inline void native_irq_enable(void)
47{
48 asm volatile("sti": : :"memory");
49}
50
6727ad9e 51static inline __cpuidle void native_safe_halt(void)
6abcd98f
GOC
52{
53 asm volatile("sti; hlt": : :"memory");
54}
55
6727ad9e 56static inline __cpuidle void native_halt(void)
6abcd98f
GOC
57{
58 asm volatile("hlt": : :"memory");
59}
60
61#endif
62
63#ifdef CONFIG_PARAVIRT
64#include <asm/paravirt.h>
65#else
66#ifndef __ASSEMBLY__
e08fbb78 67#include <linux/types.h>
6abcd98f 68
e08fbb78 69static inline notrace unsigned long arch_local_save_flags(void)
6abcd98f
GOC
70{
71 return native_save_fl();
72}
73
e08fbb78 74static inline notrace void arch_local_irq_restore(unsigned long flags)
6abcd98f
GOC
75{
76 native_restore_fl(flags);
77}
78
e08fbb78 79static inline notrace void arch_local_irq_disable(void)
6abcd98f
GOC
80{
81 native_irq_disable();
82}
83
e08fbb78 84static inline notrace void arch_local_irq_enable(void)
6abcd98f
GOC
85{
86 native_irq_enable();
87}
88
89/*
90 * Used in the idle loop; sti takes one instruction cycle
91 * to complete:
92 */
6727ad9e 93static inline __cpuidle void arch_safe_halt(void)
6abcd98f
GOC
94{
95 native_safe_halt();
96}
97
98/*
99 * Used when interrupts are already enabled or to
100 * shutdown the processor:
101 */
6727ad9e 102static inline __cpuidle void halt(void)
6abcd98f
GOC
103{
104 native_halt();
105}
106
107/*
108 * For spinlocks, etc:
109 */
e08fbb78 110static inline notrace unsigned long arch_local_irq_save(void)
6abcd98f 111{
df9ee292
DH
112 unsigned long flags = arch_local_save_flags();
113 arch_local_irq_disable();
6abcd98f
GOC
114 return flags;
115}
116#else
117
118#define ENABLE_INTERRUPTS(x) sti
119#define DISABLE_INTERRUPTS(x) cli
120
121#ifdef CONFIG_X86_64
df366e98
JF
122#define SWAPGS swapgs
123/*
124 * Currently paravirt can't handle swapgs nicely when we
125 * don't have a stack we can rely on (such as a user space
126 * stack). So we either find a way around these or just fault
127 * and emulate if a guest tries to call swapgs directly.
128 *
129 * Either way, this is a good way to document that we don't
130 * have a reliable stack. x86_64 only.
131 */
a00394f8 132#define SWAPGS_UNSAFE_STACK swapgs
df366e98
JF
133
134#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
135
7209a75d 136#define INTERRUPT_RETURN jmp native_iret
2be29982
JF
137#define USERGS_SYSRET64 \
138 swapgs; \
139 sysretq;
140#define USERGS_SYSRET32 \
141 swapgs; \
142 sysretl
2be29982 143
6abcd98f
GOC
144#else
145#define INTERRUPT_RETURN iret
d75cd22f 146#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
6abcd98f
GOC
147#define GET_CR0_INTO_EAX movl %cr0, %eax
148#endif
149
150
151#endif /* __ASSEMBLY__ */
152#endif /* CONFIG_PARAVIRT */
153
154#ifndef __ASSEMBLY__
df9ee292 155static inline int arch_irqs_disabled_flags(unsigned long flags)
6abcd98f
GOC
156{
157 return !(flags & X86_EFLAGS_IF);
158}
159
df9ee292 160static inline int arch_irqs_disabled(void)
6abcd98f 161{
df9ee292 162 unsigned long flags = arch_local_save_flags();
6abcd98f 163
df9ee292 164 return arch_irqs_disabled_flags(flags);
6abcd98f 165}
40e2ec65 166#endif /* !__ASSEMBLY__ */
6abcd98f 167
40e2ec65
DV
168#ifdef __ASSEMBLY__
169#ifdef CONFIG_TRACE_IRQFLAGS
170# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
171# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
96a388de 172#else
40e2ec65
DV
173# define TRACE_IRQS_ON
174# define TRACE_IRQS_OFF
175#endif
176#ifdef CONFIG_DEBUG_LOCK_ALLOC
177# ifdef CONFIG_X86_64
7dc7cc07
DV
178# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
179# define LOCKDEP_SYS_EXIT_IRQ \
6abcd98f
GOC
180 TRACE_IRQS_ON; \
181 sti; \
7dc7cc07 182 call lockdep_sys_exit_thunk; \
6abcd98f
GOC
183 cli; \
184 TRACE_IRQS_OFF;
40e2ec65 185# else
7dc7cc07 186# define LOCKDEP_SYS_EXIT \
6abcd98f
GOC
187 pushl %eax; \
188 pushl %ecx; \
189 pushl %edx; \
190 call lockdep_sys_exit; \
191 popl %edx; \
192 popl %ecx; \
193 popl %eax;
7dc7cc07 194# define LOCKDEP_SYS_EXIT_IRQ
40e2ec65 195# endif
7dc7cc07 196#else
6abcd98f
GOC
197# define LOCKDEP_SYS_EXIT
198# define LOCKDEP_SYS_EXIT_IRQ
7dc7cc07 199#endif
6abcd98f 200#endif /* __ASSEMBLY__ */
40e2ec65 201
96a388de 202#endif