]>
Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
18 | ||
f069686e SR |
19 | #include <trace/events/irq.h> |
20 | ||
dd87eb3a TG |
21 | #include "internals.h" |
22 | ||
23 | /** | |
a0cd9ca2 | 24 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
25 | * @irq: irq number |
26 | * @chip: pointer to irq chip description structure | |
27 | */ | |
a0cd9ca2 | 28 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 29 | { |
dd87eb3a | 30 | unsigned long flags; |
31d9d9b6 | 31 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 32 | |
02725e74 | 33 | if (!desc) |
dd87eb3a | 34 | return -EINVAL; |
dd87eb3a TG |
35 | |
36 | if (!chip) | |
37 | chip = &no_irq_chip; | |
38 | ||
6b8ff312 | 39 | desc->irq_data.chip = chip; |
02725e74 | 40 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
41 | /* |
42 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
43 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | |
44 | * already marked, and this call is harmless. | |
45 | */ | |
46 | irq_reserve_irq(irq); | |
dd87eb3a TG |
47 | return 0; |
48 | } | |
a0cd9ca2 | 49 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
50 | |
51 | /** | |
a0cd9ca2 | 52 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 53 | * @irq: irq number |
0c5d1eb7 | 54 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 55 | */ |
a0cd9ca2 | 56 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 57 | { |
dd87eb3a | 58 | unsigned long flags; |
31d9d9b6 | 59 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 60 | int ret = 0; |
dd87eb3a | 61 | |
02725e74 TG |
62 | if (!desc) |
63 | return -EINVAL; | |
dd87eb3a | 64 | |
f2b662da | 65 | type &= IRQ_TYPE_SENSE_MASK; |
a09b659c | 66 | ret = __irq_set_trigger(desc, irq, type); |
02725e74 | 67 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
68 | return ret; |
69 | } | |
a0cd9ca2 | 70 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
71 | |
72 | /** | |
a0cd9ca2 | 73 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
74 | * @irq: Interrupt number |
75 | * @data: Pointer to interrupt specific data | |
76 | * | |
77 | * Set the hardware irq controller data for an irq | |
78 | */ | |
a0cd9ca2 | 79 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 80 | { |
dd87eb3a | 81 | unsigned long flags; |
31d9d9b6 | 82 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 83 | |
02725e74 | 84 | if (!desc) |
dd87eb3a | 85 | return -EINVAL; |
6b8ff312 | 86 | desc->irq_data.handler_data = data; |
02725e74 | 87 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
88 | return 0; |
89 | } | |
a0cd9ca2 | 90 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 91 | |
5b912c10 | 92 | /** |
a0cd9ca2 | 93 | * irq_set_msi_desc - set MSI descriptor data for an irq |
5b912c10 | 94 | * @irq: Interrupt number |
472900b8 | 95 | * @entry: Pointer to MSI descriptor data |
5b912c10 | 96 | * |
24b26d42 | 97 | * Set the MSI descriptor entry for an irq |
5b912c10 | 98 | */ |
a0cd9ca2 | 99 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
5b912c10 | 100 | { |
5b912c10 | 101 | unsigned long flags; |
31d9d9b6 | 102 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 103 | |
02725e74 | 104 | if (!desc) |
5b912c10 | 105 | return -EINVAL; |
6b8ff312 | 106 | desc->irq_data.msi_desc = entry; |
7fe3730d ME |
107 | if (entry) |
108 | entry->irq = irq; | |
02725e74 | 109 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
110 | return 0; |
111 | } | |
112 | ||
dd87eb3a | 113 | /** |
a0cd9ca2 | 114 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
115 | * @irq: Interrupt number |
116 | * @data: Pointer to chip specific data | |
117 | * | |
118 | * Set the hardware irq chip data for an irq | |
119 | */ | |
a0cd9ca2 | 120 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 121 | { |
dd87eb3a | 122 | unsigned long flags; |
31d9d9b6 | 123 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 124 | |
02725e74 | 125 | if (!desc) |
dd87eb3a | 126 | return -EINVAL; |
6b8ff312 | 127 | desc->irq_data.chip_data = data; |
02725e74 | 128 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
129 | return 0; |
130 | } | |
a0cd9ca2 | 131 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 132 | |
f303a6dd TG |
133 | struct irq_data *irq_get_irq_data(unsigned int irq) |
134 | { | |
135 | struct irq_desc *desc = irq_to_desc(irq); | |
136 | ||
137 | return desc ? &desc->irq_data : NULL; | |
138 | } | |
139 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
140 | ||
c1594b77 TG |
141 | static void irq_state_clr_disabled(struct irq_desc *desc) |
142 | { | |
801a0e9a | 143 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
144 | } |
145 | ||
146 | static void irq_state_set_disabled(struct irq_desc *desc) | |
147 | { | |
801a0e9a | 148 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
149 | } |
150 | ||
6e40262e TG |
151 | static void irq_state_clr_masked(struct irq_desc *desc) |
152 | { | |
32f4125e | 153 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
154 | } |
155 | ||
156 | static void irq_state_set_masked(struct irq_desc *desc) | |
157 | { | |
32f4125e | 158 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
159 | } |
160 | ||
b4bc724e | 161 | int irq_startup(struct irq_desc *desc, bool resend) |
46999238 | 162 | { |
b4bc724e TG |
163 | int ret = 0; |
164 | ||
c1594b77 | 165 | irq_state_clr_disabled(desc); |
46999238 TG |
166 | desc->depth = 0; |
167 | ||
3aae994f | 168 | if (desc->irq_data.chip->irq_startup) { |
b4bc724e | 169 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
6e40262e | 170 | irq_state_clr_masked(desc); |
b4bc724e TG |
171 | } else { |
172 | irq_enable(desc); | |
3aae994f | 173 | } |
b4bc724e TG |
174 | if (resend) |
175 | check_irq_resend(desc, desc->irq_data.irq); | |
176 | return ret; | |
46999238 TG |
177 | } |
178 | ||
179 | void irq_shutdown(struct irq_desc *desc) | |
180 | { | |
c1594b77 | 181 | irq_state_set_disabled(desc); |
46999238 | 182 | desc->depth = 1; |
50f7c032 TG |
183 | if (desc->irq_data.chip->irq_shutdown) |
184 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ed585a65 | 185 | else if (desc->irq_data.chip->irq_disable) |
50f7c032 TG |
186 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
187 | else | |
188 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 189 | irq_state_set_masked(desc); |
46999238 TG |
190 | } |
191 | ||
87923470 TG |
192 | void irq_enable(struct irq_desc *desc) |
193 | { | |
c1594b77 | 194 | irq_state_clr_disabled(desc); |
50f7c032 TG |
195 | if (desc->irq_data.chip->irq_enable) |
196 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
197 | else | |
198 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 199 | irq_state_clr_masked(desc); |
dd87eb3a TG |
200 | } |
201 | ||
50f7c032 | 202 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 203 | { |
c1594b77 | 204 | irq_state_set_disabled(desc); |
50f7c032 TG |
205 | if (desc->irq_data.chip->irq_disable) { |
206 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
a61d8258 | 207 | irq_state_set_masked(desc); |
50f7c032 | 208 | } |
89d694b9 TG |
209 | } |
210 | ||
31d9d9b6 MZ |
211 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
212 | { | |
213 | if (desc->irq_data.chip->irq_enable) | |
214 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
215 | else | |
216 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
217 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
218 | } | |
219 | ||
220 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
221 | { | |
222 | if (desc->irq_data.chip->irq_disable) | |
223 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
224 | else | |
225 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
226 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
227 | } | |
228 | ||
9205e31d | 229 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 230 | { |
9205e31d TG |
231 | if (desc->irq_data.chip->irq_mask_ack) |
232 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); | |
dd87eb3a | 233 | else { |
e2c0f8ff | 234 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
22a49163 TG |
235 | if (desc->irq_data.chip->irq_ack) |
236 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 237 | } |
6e40262e | 238 | irq_state_set_masked(desc); |
0b1adaa0 TG |
239 | } |
240 | ||
d4d5e089 | 241 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 242 | { |
e2c0f8ff TG |
243 | if (desc->irq_data.chip->irq_mask) { |
244 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 245 | irq_state_set_masked(desc); |
0b1adaa0 TG |
246 | } |
247 | } | |
248 | ||
d4d5e089 | 249 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 250 | { |
0eda58b7 TG |
251 | if (desc->irq_data.chip->irq_unmask) { |
252 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 253 | irq_state_clr_masked(desc); |
0b1adaa0 | 254 | } |
dd87eb3a TG |
255 | } |
256 | ||
399b5da2 TG |
257 | /* |
258 | * handle_nested_irq - Handle a nested irq from a irq thread | |
259 | * @irq: the interrupt number | |
260 | * | |
261 | * Handle interrupts which are nested into a threaded interrupt | |
262 | * handler. The handler function is called inside the calling | |
263 | * threads context. | |
264 | */ | |
265 | void handle_nested_irq(unsigned int irq) | |
266 | { | |
267 | struct irq_desc *desc = irq_to_desc(irq); | |
268 | struct irqaction *action; | |
269 | irqreturn_t action_ret; | |
270 | ||
271 | might_sleep(); | |
272 | ||
239007b8 | 273 | raw_spin_lock_irq(&desc->lock); |
399b5da2 TG |
274 | |
275 | kstat_incr_irqs_this_cpu(irq, desc); | |
276 | ||
277 | action = desc->action; | |
32f4125e | 278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) |
399b5da2 TG |
279 | goto out_unlock; |
280 | ||
32f4125e | 281 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 282 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
283 | |
284 | action_ret = action->thread_fn(action->irq, action->dev_id); | |
285 | if (!noirqdebug) | |
286 | note_interrupt(irq, desc, action_ret); | |
287 | ||
239007b8 | 288 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 289 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
290 | |
291 | out_unlock: | |
239007b8 | 292 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
293 | } |
294 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
295 | ||
fe200ae4 TG |
296 | static bool irq_check_poll(struct irq_desc *desc) |
297 | { | |
6954b75b | 298 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
299 | return false; |
300 | return irq_wait_for_poll(desc); | |
301 | } | |
302 | ||
dd87eb3a TG |
303 | /** |
304 | * handle_simple_irq - Simple and software-decoded IRQs. | |
305 | * @irq: the interrupt number | |
306 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
307 | * |
308 | * Simple interrupts are either sent from a demultiplexing interrupt | |
309 | * handler or come from hardware, where no interrupt hardware control | |
310 | * is necessary. | |
311 | * | |
312 | * Note: The caller is expected to handle the ack, clear, mask and | |
313 | * unmask issues if necessary. | |
314 | */ | |
7ad5b3a5 | 315 | void |
7d12e780 | 316 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 317 | { |
239007b8 | 318 | raw_spin_lock(&desc->lock); |
dd87eb3a | 319 | |
32f4125e | 320 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
321 | if (!irq_check_poll(desc)) |
322 | goto out_unlock; | |
323 | ||
163ef309 | 324 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 325 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 326 | |
32f4125e | 327 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
dd87eb3a TG |
328 | goto out_unlock; |
329 | ||
107781e7 | 330 | handle_irq_event(desc); |
dd87eb3a | 331 | |
dd87eb3a | 332 | out_unlock: |
239007b8 | 333 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 334 | } |
edf76f83 | 335 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 336 | |
ac563761 TG |
337 | /* |
338 | * Called unconditionally from handle_level_irq() and only for oneshot | |
339 | * interrupts from handle_fasteoi_irq() | |
340 | */ | |
341 | static void cond_unmask_irq(struct irq_desc *desc) | |
342 | { | |
343 | /* | |
344 | * We need to unmask in the following cases: | |
345 | * - Standard level irq (IRQF_ONESHOT is not set) | |
346 | * - Oneshot irq which did not wake the thread (caused by a | |
347 | * spurious interrupt or a primary handler handling it | |
348 | * completely). | |
349 | */ | |
350 | if (!irqd_irq_disabled(&desc->irq_data) && | |
351 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
352 | unmask_irq(desc); | |
353 | } | |
354 | ||
dd87eb3a TG |
355 | /** |
356 | * handle_level_irq - Level type irq handler | |
357 | * @irq: the interrupt number | |
358 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
359 | * |
360 | * Level type interrupts are active as long as the hardware line has | |
361 | * the active level. This may require to mask the interrupt and unmask | |
362 | * it after the associated handler has acknowledged the device, so the | |
363 | * interrupt line is back to inactive. | |
364 | */ | |
7ad5b3a5 | 365 | void |
7d12e780 | 366 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 367 | { |
239007b8 | 368 | raw_spin_lock(&desc->lock); |
9205e31d | 369 | mask_ack_irq(desc); |
dd87eb3a | 370 | |
32f4125e | 371 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
372 | if (!irq_check_poll(desc)) |
373 | goto out_unlock; | |
374 | ||
163ef309 | 375 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 376 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
377 | |
378 | /* | |
379 | * If its disabled or no action available | |
380 | * keep it masked and get out of here | |
381 | */ | |
32f4125e | 382 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) |
86998aa6 | 383 | goto out_unlock; |
dd87eb3a | 384 | |
1529866c | 385 | handle_irq_event(desc); |
b25c340c | 386 | |
ac563761 TG |
387 | cond_unmask_irq(desc); |
388 | ||
86998aa6 | 389 | out_unlock: |
239007b8 | 390 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 391 | } |
14819ea1 | 392 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 393 | |
78129576 TG |
394 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
395 | static inline void preflow_handler(struct irq_desc *desc) | |
396 | { | |
397 | if (desc->preflow_handler) | |
398 | desc->preflow_handler(&desc->irq_data); | |
399 | } | |
400 | #else | |
401 | static inline void preflow_handler(struct irq_desc *desc) { } | |
402 | #endif | |
403 | ||
dd87eb3a | 404 | /** |
47c2a3aa | 405 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a TG |
406 | * @irq: the interrupt number |
407 | * @desc: the interrupt description structure for this irq | |
dd87eb3a | 408 | * |
47c2a3aa | 409 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
410 | * call when the interrupt has been serviced. This enables support |
411 | * for modern forms of interrupt handlers, which handle the flow | |
412 | * details in hardware, transparently. | |
413 | */ | |
7ad5b3a5 | 414 | void |
7d12e780 | 415 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 416 | { |
239007b8 | 417 | raw_spin_lock(&desc->lock); |
dd87eb3a | 418 | |
32f4125e | 419 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
420 | if (!irq_check_poll(desc)) |
421 | goto out; | |
dd87eb3a | 422 | |
163ef309 | 423 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 424 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
425 | |
426 | /* | |
427 | * If its disabled or no action available | |
76d21601 | 428 | * then mask it and get out of here: |
dd87eb3a | 429 | */ |
32f4125e | 430 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 431 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 432 | mask_irq(desc); |
dd87eb3a | 433 | goto out; |
98bb244b | 434 | } |
c69e3758 TG |
435 | |
436 | if (desc->istate & IRQS_ONESHOT) | |
437 | mask_irq(desc); | |
438 | ||
78129576 | 439 | preflow_handler(desc); |
a7ae4de5 | 440 | handle_irq_event(desc); |
77694b40 | 441 | |
ac563761 TG |
442 | if (desc->istate & IRQS_ONESHOT) |
443 | cond_unmask_irq(desc); | |
444 | ||
77694b40 | 445 | out_eoi: |
0c5c1557 | 446 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
77694b40 | 447 | out_unlock: |
239007b8 | 448 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
449 | return; |
450 | out: | |
451 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | |
452 | goto out_eoi; | |
453 | goto out_unlock; | |
dd87eb3a TG |
454 | } |
455 | ||
456 | /** | |
457 | * handle_edge_irq - edge type IRQ handler | |
458 | * @irq: the interrupt number | |
459 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
460 | * |
461 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 462 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
463 | * and must be acked in order to be reenabled. After the ack another |
464 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 465 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
466 | * might be necessary to disable (mask) the interrupt depending on the |
467 | * controller hardware. This requires to reenable the interrupt inside | |
468 | * of the loop which handles the interrupts which have arrived while | |
469 | * the handler was running. If all pending interrupts are handled, the | |
470 | * loop is left. | |
471 | */ | |
7ad5b3a5 | 472 | void |
7d12e780 | 473 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 474 | { |
239007b8 | 475 | raw_spin_lock(&desc->lock); |
dd87eb3a | 476 | |
163ef309 | 477 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
478 | /* |
479 | * If we're currently running this IRQ, or its disabled, | |
480 | * we shouldn't process the IRQ. Mark it pending, handle | |
481 | * the necessary masking and go out | |
482 | */ | |
32f4125e TG |
483 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
484 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
fe200ae4 | 485 | if (!irq_check_poll(desc)) { |
2a0d6fb3 | 486 | desc->istate |= IRQS_PENDING; |
fe200ae4 TG |
487 | mask_ack_irq(desc); |
488 | goto out_unlock; | |
489 | } | |
dd87eb3a | 490 | } |
d6c88a50 | 491 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
492 | |
493 | /* Start handling the irq */ | |
22a49163 | 494 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 495 | |
dd87eb3a | 496 | do { |
a60a5dc2 | 497 | if (unlikely(!desc->action)) { |
e2c0f8ff | 498 | mask_irq(desc); |
dd87eb3a TG |
499 | goto out_unlock; |
500 | } | |
501 | ||
502 | /* | |
503 | * When another irq arrived while we were handling | |
504 | * one, we could have masked the irq. | |
505 | * Renable it, if it was not disabled in meantime. | |
506 | */ | |
2a0d6fb3 | 507 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
508 | if (!irqd_irq_disabled(&desc->irq_data) && |
509 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 510 | unmask_irq(desc); |
dd87eb3a TG |
511 | } |
512 | ||
a60a5dc2 | 513 | handle_irq_event(desc); |
dd87eb3a | 514 | |
2a0d6fb3 | 515 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 516 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 517 | |
dd87eb3a | 518 | out_unlock: |
239007b8 | 519 | raw_spin_unlock(&desc->lock); |
dd87eb3a TG |
520 | } |
521 | ||
0521c8fb TG |
522 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
523 | /** | |
524 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
525 | * @irq: the interrupt number | |
526 | * @desc: the interrupt description structure for this irq | |
527 | * | |
528 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
529 | * mask/unmask logic. | |
530 | */ | |
531 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |
532 | { | |
533 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
534 | ||
535 | raw_spin_lock(&desc->lock); | |
536 | ||
537 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
538 | /* | |
539 | * If we're currently running this IRQ, or its disabled, | |
540 | * we shouldn't process the IRQ. Mark it pending, handle | |
541 | * the necessary masking and go out | |
542 | */ | |
543 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | |
544 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
545 | if (!irq_check_poll(desc)) { | |
546 | desc->istate |= IRQS_PENDING; | |
547 | goto out_eoi; | |
548 | } | |
549 | } | |
550 | kstat_incr_irqs_this_cpu(irq, desc); | |
551 | ||
552 | do { | |
553 | if (unlikely(!desc->action)) | |
554 | goto out_eoi; | |
555 | ||
556 | handle_irq_event(desc); | |
557 | ||
558 | } while ((desc->istate & IRQS_PENDING) && | |
559 | !irqd_irq_disabled(&desc->irq_data)); | |
560 | ||
ac0e0447 | 561 | out_eoi: |
0521c8fb TG |
562 | chip->irq_eoi(&desc->irq_data); |
563 | raw_spin_unlock(&desc->lock); | |
564 | } | |
565 | #endif | |
566 | ||
dd87eb3a | 567 | /** |
24b26d42 | 568 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a TG |
569 | * @irq: the interrupt number |
570 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
571 | * |
572 | * Per CPU interrupts on SMP machines without locking requirements | |
573 | */ | |
7ad5b3a5 | 574 | void |
7d12e780 | 575 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 576 | { |
35e857cb | 577 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 578 | |
d6c88a50 | 579 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 580 | |
849f061c TG |
581 | if (chip->irq_ack) |
582 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 583 | |
849f061c | 584 | handle_irq_event_percpu(desc, desc->action); |
dd87eb3a | 585 | |
849f061c TG |
586 | if (chip->irq_eoi) |
587 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
588 | } |
589 | ||
31d9d9b6 MZ |
590 | /** |
591 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
592 | * @irq: the interrupt number | |
593 | * @desc: the interrupt description structure for this irq | |
594 | * | |
595 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
596 | * handle_percpu_irq() above but with the following extras: | |
597 | * | |
598 | * action->percpu_dev_id is a pointer to percpu variables which | |
599 | * contain the real device id for the cpu on which this handler is | |
600 | * called | |
601 | */ | |
602 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |
603 | { | |
604 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
605 | struct irqaction *action = desc->action; | |
606 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); | |
607 | irqreturn_t res; | |
608 | ||
609 | kstat_incr_irqs_this_cpu(irq, desc); | |
610 | ||
611 | if (chip->irq_ack) | |
612 | chip->irq_ack(&desc->irq_data); | |
613 | ||
614 | trace_irq_handler_entry(irq, action); | |
615 | res = action->handler(irq, dev_id); | |
616 | trace_irq_handler_exit(irq, action, res); | |
617 | ||
618 | if (chip->irq_eoi) | |
619 | chip->irq_eoi(&desc->irq_data); | |
620 | } | |
621 | ||
dd87eb3a | 622 | void |
3836ca08 | 623 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
a460e745 | 624 | const char *name) |
dd87eb3a | 625 | { |
dd87eb3a | 626 | unsigned long flags; |
31d9d9b6 | 627 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); |
dd87eb3a | 628 | |
02725e74 | 629 | if (!desc) |
dd87eb3a | 630 | return; |
dd87eb3a | 631 | |
091738a2 | 632 | if (!handle) { |
dd87eb3a | 633 | handle = handle_bad_irq; |
091738a2 TG |
634 | } else { |
635 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) | |
02725e74 | 636 | goto out; |
f8b5473f | 637 | } |
dd87eb3a | 638 | |
dd87eb3a TG |
639 | /* Uninstall? */ |
640 | if (handle == handle_bad_irq) { | |
6b8ff312 | 641 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 642 | mask_ack_irq(desc); |
801a0e9a | 643 | irq_state_set_disabled(desc); |
dd87eb3a TG |
644 | desc->depth = 1; |
645 | } | |
646 | desc->handle_irq = handle; | |
a460e745 | 647 | desc->name = name; |
dd87eb3a TG |
648 | |
649 | if (handle != handle_bad_irq && is_chained) { | |
1ccb4e61 TG |
650 | irq_settings_set_noprobe(desc); |
651 | irq_settings_set_norequest(desc); | |
7f1b1244 | 652 | irq_settings_set_nothread(desc); |
b4bc724e | 653 | irq_startup(desc, true); |
dd87eb3a | 654 | } |
02725e74 TG |
655 | out: |
656 | irq_put_desc_busunlock(desc, flags); | |
dd87eb3a | 657 | } |
3836ca08 | 658 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a TG |
659 | |
660 | void | |
3836ca08 | 661 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 662 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 663 | { |
35e857cb | 664 | irq_set_chip(irq, chip); |
3836ca08 | 665 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 666 | } |
46f4f8f6 | 667 | |
44247184 | 668 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 669 | { |
46f4f8f6 | 670 | unsigned long flags; |
31d9d9b6 | 671 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 672 | |
44247184 | 673 | if (!desc) |
46f4f8f6 | 674 | return; |
a005677b TG |
675 | irq_settings_clr_and_set(desc, clr, set); |
676 | ||
876dbd4c | 677 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 678 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
679 | if (irq_settings_has_no_balance_set(desc)) |
680 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
681 | if (irq_settings_is_per_cpu(desc)) | |
682 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
683 | if (irq_settings_can_move_pcntxt(desc)) |
684 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
685 | if (irq_settings_is_level(desc)) |
686 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 687 | |
876dbd4c TG |
688 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
689 | ||
02725e74 | 690 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 691 | } |
edf76f83 | 692 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
693 | |
694 | /** | |
695 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
696 | * | |
697 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
698 | * for each. | |
699 | */ | |
700 | void irq_cpu_online(void) | |
701 | { | |
702 | struct irq_desc *desc; | |
703 | struct irq_chip *chip; | |
704 | unsigned long flags; | |
705 | unsigned int irq; | |
706 | ||
707 | for_each_active_irq(irq) { | |
708 | desc = irq_to_desc(irq); | |
709 | if (!desc) | |
710 | continue; | |
711 | ||
712 | raw_spin_lock_irqsave(&desc->lock, flags); | |
713 | ||
714 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
715 | if (chip && chip->irq_cpu_online && |
716 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 717 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
718 | chip->irq_cpu_online(&desc->irq_data); |
719 | ||
720 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
721 | } | |
722 | } | |
723 | ||
724 | /** | |
725 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
726 | * | |
727 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
728 | * for each. | |
729 | */ | |
730 | void irq_cpu_offline(void) | |
731 | { | |
732 | struct irq_desc *desc; | |
733 | struct irq_chip *chip; | |
734 | unsigned long flags; | |
735 | unsigned int irq; | |
736 | ||
737 | for_each_active_irq(irq) { | |
738 | desc = irq_to_desc(irq); | |
739 | if (!desc) | |
740 | continue; | |
741 | ||
742 | raw_spin_lock_irqsave(&desc->lock, flags); | |
743 | ||
744 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
745 | if (chip && chip->irq_cpu_offline && |
746 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 747 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
748 | chip->irq_cpu_offline(&desc->irq_data); |
749 | ||
750 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
751 | } | |
752 | } |