]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/tile/include/asm/irqflags.h
Merge branch 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas...
[mirror_ubuntu-artful-kernel.git] / arch / tile / include / asm / irqflags.h
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15 #ifndef _ASM_TILE_IRQFLAGS_H
16 #define _ASM_TILE_IRQFLAGS_H
17
18 #include <arch/interrupts.h>
19 #include <arch/chip.h>
20
21 /*
22 * The set of interrupts we want to allow when interrupts are nominally
23 * disabled. The remainder are effectively "NMI" interrupts from
24 * the point of view of the generic Linux code. Note that synchronous
25 * interrupts (aka "non-queued") are not blocked by the mask in any case.
26 */
27 #if CHIP_HAS_AUX_PERF_COUNTERS()
28 #define LINUX_MASKABLE_INTERRUPTS \
29 (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
30 #else
31 #define LINUX_MASKABLE_INTERRUPTS \
32 (~(INT_MASK(INT_PERF_COUNT)))
33 #endif
34
35 #ifndef __ASSEMBLY__
36
37 /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
38 #include <asm/percpu.h>
39 #include <arch/spr_def.h>
40
41 /* Set and clear kernel interrupt masks. */
42 #if CHIP_HAS_SPLIT_INTR_MASK()
43 #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
44 # error Fix assumptions about which word various interrupts are in
45 #endif
46 #define interrupt_mask_set(n) do { \
47 int __n = (n); \
48 int __mask = 1 << (__n & 0x1f); \
49 if (__n < 32) \
50 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \
51 else \
52 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \
53 } while (0)
54 #define interrupt_mask_reset(n) do { \
55 int __n = (n); \
56 int __mask = 1 << (__n & 0x1f); \
57 if (__n < 32) \
58 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \
59 else \
60 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \
61 } while (0)
62 #define interrupt_mask_check(n) ({ \
63 int __n = (n); \
64 (((__n < 32) ? \
65 __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \
66 __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \
67 >> (__n & 0x1f)) & 1; \
68 })
69 #define interrupt_mask_set_mask(mask) do { \
70 unsigned long long __m = (mask); \
71 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \
72 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \
73 } while (0)
74 #define interrupt_mask_reset_mask(mask) do { \
75 unsigned long long __m = (mask); \
76 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \
77 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \
78 } while (0)
79 #else
80 #define interrupt_mask_set(n) \
81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n)))
82 #define interrupt_mask_reset(n) \
83 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n)))
84 #define interrupt_mask_check(n) \
85 ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1)
86 #define interrupt_mask_set_mask(mask) \
87 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask))
88 #define interrupt_mask_reset_mask(mask) \
89 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask))
90 #endif
91
92 /*
93 * The set of interrupts we want active if irqs are enabled.
94 * Note that in particular, the tile timer interrupt comes and goes
95 * from this set, since we have no other way to turn off the timer.
96 * Likewise, INTCTRL_1 is removed and re-added during device
97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
99 * is always claimed as an "active interrupt" so we can query that bit
100 * to know our current state.
101 */
102 DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
103 #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
104
105 /* Disable interrupts. */
106 #define raw_local_irq_disable() \
107 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
108
109 /* Disable all interrupts, including NMIs. */
110 #define raw_local_irq_disable_all() \
111 interrupt_mask_set_mask(-1UL)
112
113 /* Re-enable all maskable interrupts. */
114 #define raw_local_irq_enable() \
115 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
116
117 /* Disable or enable interrupts based on flag argument. */
118 #define raw_local_irq_restore(disabled) do { \
119 if (disabled) \
120 raw_local_irq_disable(); \
121 else \
122 raw_local_irq_enable(); \
123 } while (0)
124
125 /* Return true if "flags" argument means interrupts are disabled. */
126 #define raw_irqs_disabled_flags(flags) ((flags) != 0)
127
128 /* Return true if interrupts are currently disabled. */
129 #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
130
131 /* Save whether interrupts are currently disabled. */
132 #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled())
133
134 /* Save whether interrupts are currently disabled, then disable them. */
135 #define raw_local_irq_save(flags) \
136 do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0)
137
138 /* Prevent the given interrupt from being enabled next time we enable irqs. */
139 #define raw_local_irq_mask(interrupt) \
140 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
141
142 /* Prevent the given interrupt from being enabled immediately. */
143 #define raw_local_irq_mask_now(interrupt) do { \
144 raw_local_irq_mask(interrupt); \
145 interrupt_mask_set(interrupt); \
146 } while (0)
147
148 /* Allow the given interrupt to be enabled next time we enable irqs. */
149 #define raw_local_irq_unmask(interrupt) \
150 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
151
152 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
153 #define raw_local_irq_unmask_now(interrupt) do { \
154 raw_local_irq_unmask(interrupt); \
155 if (!irqs_disabled()) \
156 interrupt_mask_reset(interrupt); \
157 } while (0)
158
159 #else /* __ASSEMBLY__ */
160
161 /* We provide a somewhat more restricted set for assembly. */
162
163 #ifdef __tilegx__
164
165 #if INT_MEM_ERROR != 0
166 # error Fix IRQ_DISABLED() macro
167 #endif
168
169 /* Return 0 or 1 to indicate whether interrupts are currently disabled. */
170 #define IRQS_DISABLED(tmp) \
171 mfspr tmp, INTERRUPT_MASK_1; \
172 andi tmp, tmp, 1
173
174 /* Load up a pointer to &interrupts_enabled_mask. */
175 #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
176 moveli reg, hw2_last(interrupts_enabled_mask); \
177 shl16insli reg, reg, hw1(interrupts_enabled_mask); \
178 shl16insli reg, reg, hw0(interrupts_enabled_mask); \
179 add reg, reg, tp
180
181 /* Disable interrupts. */
182 #define IRQ_DISABLE(tmp0, tmp1) \
183 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
184 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
185 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
186 mtspr INTERRUPT_MASK_SET_1, tmp0
187
188 /* Disable ALL synchronous interrupts (used by NMI entry). */
189 #define IRQ_DISABLE_ALL(tmp) \
190 movei tmp, -1; \
191 mtspr INTERRUPT_MASK_SET_1, tmp
192
193 /* Enable interrupts. */
194 #define IRQ_ENABLE(tmp0, tmp1) \
195 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
196 ld tmp0, tmp0; \
197 mtspr INTERRUPT_MASK_RESET_1, tmp0
198
199 #else /* !__tilegx__ */
200
201 /*
202 * Return 0 or 1 to indicate whether interrupts are currently disabled.
203 * Note that it's important that we use a bit from the "low" mask word,
204 * since when we are enabling, that is the word we write first, so if we
205 * are interrupted after only writing half of the mask, the interrupt
206 * handler will correctly observe that we have interrupts enabled, and
207 * will enable interrupts itself on return from the interrupt handler
208 * (making the original code's write of the "high" mask word idempotent).
209 */
210 #define IRQS_DISABLED(tmp) \
211 mfspr tmp, INTERRUPT_MASK_1_0; \
212 shri tmp, tmp, INT_MEM_ERROR; \
213 andi tmp, tmp, 1
214
215 /* Load up a pointer to &interrupts_enabled_mask. */
216 #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
217 moveli reg, lo16(interrupts_enabled_mask); \
218 auli reg, reg, ha16(interrupts_enabled_mask);\
219 add reg, reg, tp
220
221 /* Disable interrupts. */
222 #define IRQ_DISABLE(tmp0, tmp1) \
223 { \
224 movei tmp0, -1; \
225 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
226 }; \
227 { \
228 mtspr INTERRUPT_MASK_SET_1_0, tmp0; \
229 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
230 }; \
231 mtspr INTERRUPT_MASK_SET_1_1, tmp1
232
233 /* Disable ALL synchronous interrupts (used by NMI entry). */
234 #define IRQ_DISABLE_ALL(tmp) \
235 movei tmp, -1; \
236 mtspr INTERRUPT_MASK_SET_1_0, tmp; \
237 mtspr INTERRUPT_MASK_SET_1_1, tmp
238
239 /* Enable interrupts. */
240 #define IRQ_ENABLE(tmp0, tmp1) \
241 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
242 { \
243 lw tmp0, tmp0; \
244 addi tmp1, tmp0, 4 \
245 }; \
246 lw tmp1, tmp1; \
247 mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \
248 mtspr INTERRUPT_MASK_RESET_1_1, tmp1
249 #endif
250
251 /*
252 * Do the CPU's IRQ-state tracing from assembly code. We call a
253 * C function, but almost everywhere we do, we don't mind clobbering
254 * all the caller-saved registers.
255 */
256 #ifdef CONFIG_TRACE_IRQFLAGS
257 # define TRACE_IRQS_ON jal trace_hardirqs_on
258 # define TRACE_IRQS_OFF jal trace_hardirqs_off
259 #else
260 # define TRACE_IRQS_ON
261 # define TRACE_IRQS_OFF
262 #endif
263
264 #endif /* __ASSEMBLY__ */
265
266 #endif /* _ASM_TILE_IRQFLAGS_H */