]>
Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
f8264e34 | 18 | #include <linux/irqdomain.h> |
dd87eb3a | 19 | |
f069686e SR |
20 | #include <trace/events/irq.h> |
21 | ||
dd87eb3a TG |
22 | #include "internals.h" |
23 | ||
e509bd7d MW |
24 | static irqreturn_t bad_chained_irq(int irq, void *dev_id) |
25 | { | |
26 | WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); | |
27 | return IRQ_NONE; | |
28 | } | |
29 | ||
30 | /* | |
31 | * Chained handlers should never call action on their IRQ. This default | |
32 | * action will emit warning if such thing happens. | |
33 | */ | |
34 | struct irqaction chained_action = { | |
35 | .handler = bad_chained_irq, | |
36 | }; | |
37 | ||
dd87eb3a | 38 | /** |
a0cd9ca2 | 39 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
40 | * @irq: irq number |
41 | * @chip: pointer to irq chip description structure | |
42 | */ | |
a0cd9ca2 | 43 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 44 | { |
dd87eb3a | 45 | unsigned long flags; |
31d9d9b6 | 46 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 47 | |
02725e74 | 48 | if (!desc) |
dd87eb3a | 49 | return -EINVAL; |
dd87eb3a TG |
50 | |
51 | if (!chip) | |
52 | chip = &no_irq_chip; | |
53 | ||
6b8ff312 | 54 | desc->irq_data.chip = chip; |
02725e74 | 55 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
56 | /* |
57 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
f63b6a05 | 58 | * allocated_irqs. |
d72274e5 | 59 | */ |
f63b6a05 | 60 | irq_mark_irq(irq); |
dd87eb3a TG |
61 | return 0; |
62 | } | |
a0cd9ca2 | 63 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
64 | |
65 | /** | |
a0cd9ca2 | 66 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 67 | * @irq: irq number |
0c5d1eb7 | 68 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 69 | */ |
a0cd9ca2 | 70 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 71 | { |
dd87eb3a | 72 | unsigned long flags; |
31d9d9b6 | 73 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 74 | int ret = 0; |
dd87eb3a | 75 | |
02725e74 TG |
76 | if (!desc) |
77 | return -EINVAL; | |
dd87eb3a | 78 | |
a1ff541a | 79 | ret = __irq_set_trigger(desc, type); |
02725e74 | 80 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
81 | return ret; |
82 | } | |
a0cd9ca2 | 83 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
84 | |
85 | /** | |
a0cd9ca2 | 86 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
87 | * @irq: Interrupt number |
88 | * @data: Pointer to interrupt specific data | |
89 | * | |
90 | * Set the hardware irq controller data for an irq | |
91 | */ | |
a0cd9ca2 | 92 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 93 | { |
dd87eb3a | 94 | unsigned long flags; |
31d9d9b6 | 95 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 96 | |
02725e74 | 97 | if (!desc) |
dd87eb3a | 98 | return -EINVAL; |
af7080e0 | 99 | desc->irq_common_data.handler_data = data; |
02725e74 | 100 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
101 | return 0; |
102 | } | |
a0cd9ca2 | 103 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 104 | |
5b912c10 | 105 | /** |
51906e77 AG |
106 | * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset |
107 | * @irq_base: Interrupt number base | |
108 | * @irq_offset: Interrupt number offset | |
109 | * @entry: Pointer to MSI descriptor data | |
5b912c10 | 110 | * |
51906e77 | 111 | * Set the MSI descriptor entry for an irq at offset |
5b912c10 | 112 | */ |
51906e77 AG |
113 | int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
114 | struct msi_desc *entry) | |
5b912c10 | 115 | { |
5b912c10 | 116 | unsigned long flags; |
51906e77 | 117 | struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 118 | |
02725e74 | 119 | if (!desc) |
5b912c10 | 120 | return -EINVAL; |
b237721c | 121 | desc->irq_common_data.msi_desc = entry; |
51906e77 AG |
122 | if (entry && !irq_offset) |
123 | entry->irq = irq_base; | |
02725e74 | 124 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
125 | return 0; |
126 | } | |
127 | ||
51906e77 AG |
128 | /** |
129 | * irq_set_msi_desc - set MSI descriptor data for an irq | |
130 | * @irq: Interrupt number | |
131 | * @entry: Pointer to MSI descriptor data | |
132 | * | |
133 | * Set the MSI descriptor entry for an irq | |
134 | */ | |
135 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | |
136 | { | |
137 | return irq_set_msi_desc_off(irq, 0, entry); | |
138 | } | |
139 | ||
dd87eb3a | 140 | /** |
a0cd9ca2 | 141 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
142 | * @irq: Interrupt number |
143 | * @data: Pointer to chip specific data | |
144 | * | |
145 | * Set the hardware irq chip data for an irq | |
146 | */ | |
a0cd9ca2 | 147 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 148 | { |
dd87eb3a | 149 | unsigned long flags; |
31d9d9b6 | 150 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 151 | |
02725e74 | 152 | if (!desc) |
dd87eb3a | 153 | return -EINVAL; |
6b8ff312 | 154 | desc->irq_data.chip_data = data; |
02725e74 | 155 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
156 | return 0; |
157 | } | |
a0cd9ca2 | 158 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 159 | |
f303a6dd TG |
160 | struct irq_data *irq_get_irq_data(unsigned int irq) |
161 | { | |
162 | struct irq_desc *desc = irq_to_desc(irq); | |
163 | ||
164 | return desc ? &desc->irq_data : NULL; | |
165 | } | |
166 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
167 | ||
c1594b77 TG |
168 | static void irq_state_clr_disabled(struct irq_desc *desc) |
169 | { | |
801a0e9a | 170 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
171 | } |
172 | ||
173 | static void irq_state_set_disabled(struct irq_desc *desc) | |
174 | { | |
801a0e9a | 175 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
176 | } |
177 | ||
6e40262e TG |
178 | static void irq_state_clr_masked(struct irq_desc *desc) |
179 | { | |
32f4125e | 180 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
181 | } |
182 | ||
183 | static void irq_state_set_masked(struct irq_desc *desc) | |
184 | { | |
32f4125e | 185 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
186 | } |
187 | ||
201d7f47 TG |
188 | static void irq_state_clr_started(struct irq_desc *desc) |
189 | { | |
190 | irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); | |
191 | } | |
192 | ||
193 | static void irq_state_set_started(struct irq_desc *desc) | |
194 | { | |
195 | irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); | |
196 | } | |
197 | ||
761ea388 TG |
198 | enum { |
199 | IRQ_STARTUP_NORMAL, | |
200 | IRQ_STARTUP_MANAGED, | |
201 | IRQ_STARTUP_ABORT, | |
202 | }; | |
203 | ||
204 | #ifdef CONFIG_SMP | |
205 | static int | |
206 | __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) | |
207 | { | |
208 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
209 | ||
210 | if (!irqd_affinity_is_managed(d)) | |
211 | return IRQ_STARTUP_NORMAL; | |
212 | ||
213 | irqd_clr_managed_shutdown(d); | |
214 | ||
215 | if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) { | |
216 | /* | |
217 | * Catch code which fiddles with enable_irq() on a managed | |
218 | * and potentially shutdown IRQ. Chained interrupt | |
219 | * installment or irq auto probing should not happen on | |
220 | * managed irqs either. Emit a warning, break the affinity | |
221 | * and start it up as a normal interrupt. | |
222 | */ | |
223 | if (WARN_ON_ONCE(force)) | |
224 | return IRQ_STARTUP_NORMAL; | |
225 | /* | |
226 | * The interrupt was requested, but there is no online CPU | |
227 | * in it's affinity mask. Put it into managed shutdown | |
228 | * state and let the cpu hotplug mechanism start it up once | |
229 | * a CPU in the mask becomes available. | |
230 | */ | |
231 | irqd_set_managed_shutdown(d); | |
232 | return IRQ_STARTUP_ABORT; | |
233 | } | |
234 | return IRQ_STARTUP_MANAGED; | |
235 | } | |
236 | #else | |
237 | static int | |
238 | __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) | |
239 | { | |
240 | return IRQ_STARTUP_NORMAL; | |
241 | } | |
242 | #endif | |
243 | ||
708d174b TG |
244 | static int __irq_startup(struct irq_desc *desc) |
245 | { | |
246 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
247 | int ret = 0; | |
248 | ||
249 | irq_domain_activate_irq(d); | |
250 | if (d->chip->irq_startup) { | |
251 | ret = d->chip->irq_startup(d); | |
252 | irq_state_clr_disabled(desc); | |
253 | irq_state_clr_masked(desc); | |
254 | } else { | |
255 | irq_enable(desc); | |
256 | } | |
257 | irq_state_set_started(desc); | |
258 | return ret; | |
259 | } | |
260 | ||
4cde9c6b | 261 | int irq_startup(struct irq_desc *desc, bool resend, bool force) |
46999238 | 262 | { |
761ea388 TG |
263 | struct irq_data *d = irq_desc_get_irq_data(desc); |
264 | struct cpumask *aff = irq_data_get_affinity_mask(d); | |
b4bc724e TG |
265 | int ret = 0; |
266 | ||
46999238 TG |
267 | desc->depth = 0; |
268 | ||
761ea388 | 269 | if (irqd_is_started(d)) { |
b4bc724e | 270 | irq_enable(desc); |
201d7f47 | 271 | } else { |
761ea388 TG |
272 | switch (__irq_startup_managed(desc, aff, force)) { |
273 | case IRQ_STARTUP_NORMAL: | |
274 | ret = __irq_startup(desc); | |
275 | irq_setup_affinity(desc); | |
276 | break; | |
277 | case IRQ_STARTUP_MANAGED: | |
278 | ret = __irq_startup(desc); | |
279 | irq_set_affinity_locked(d, aff, false); | |
280 | break; | |
281 | case IRQ_STARTUP_ABORT: | |
282 | return 0; | |
283 | } | |
3aae994f | 284 | } |
b4bc724e | 285 | if (resend) |
0798abeb | 286 | check_irq_resend(desc); |
201d7f47 | 287 | |
b4bc724e | 288 | return ret; |
46999238 TG |
289 | } |
290 | ||
201d7f47 TG |
291 | static void __irq_disable(struct irq_desc *desc, bool mask); |
292 | ||
46999238 TG |
293 | void irq_shutdown(struct irq_desc *desc) |
294 | { | |
201d7f47 TG |
295 | if (irqd_is_started(&desc->irq_data)) { |
296 | desc->depth = 1; | |
297 | if (desc->irq_data.chip->irq_shutdown) { | |
298 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
299 | irq_state_set_disabled(desc); | |
300 | irq_state_set_masked(desc); | |
301 | } else { | |
302 | __irq_disable(desc, true); | |
303 | } | |
304 | irq_state_clr_started(desc); | |
305 | } | |
306 | /* | |
307 | * This must be called even if the interrupt was never started up, | |
308 | * because the activation can happen before the interrupt is | |
309 | * available for request/startup. It has it's own state tracking so | |
310 | * it's safe to call it unconditionally. | |
311 | */ | |
f8264e34 | 312 | irq_domain_deactivate_irq(&desc->irq_data); |
46999238 TG |
313 | } |
314 | ||
87923470 TG |
315 | void irq_enable(struct irq_desc *desc) |
316 | { | |
bf22ff45 JC |
317 | if (!irqd_irq_disabled(&desc->irq_data)) { |
318 | unmask_irq(desc); | |
319 | } else { | |
320 | irq_state_clr_disabled(desc); | |
321 | if (desc->irq_data.chip->irq_enable) { | |
322 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
323 | irq_state_clr_masked(desc); | |
324 | } else { | |
325 | unmask_irq(desc); | |
326 | } | |
327 | } | |
dd87eb3a TG |
328 | } |
329 | ||
201d7f47 TG |
330 | static void __irq_disable(struct irq_desc *desc, bool mask) |
331 | { | |
bf22ff45 JC |
332 | if (irqd_irq_disabled(&desc->irq_data)) { |
333 | if (mask) | |
334 | mask_irq(desc); | |
335 | } else { | |
336 | irq_state_set_disabled(desc); | |
337 | if (desc->irq_data.chip->irq_disable) { | |
338 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
339 | irq_state_set_masked(desc); | |
340 | } else if (mask) { | |
341 | mask_irq(desc); | |
342 | } | |
201d7f47 TG |
343 | } |
344 | } | |
345 | ||
d671a605 | 346 | /** |
f788e7bf | 347 | * irq_disable - Mark interrupt disabled |
d671a605 AF |
348 | * @desc: irq descriptor which should be disabled |
349 | * | |
350 | * If the chip does not implement the irq_disable callback, we | |
351 | * use a lazy disable approach. That means we mark the interrupt | |
352 | * disabled, but leave the hardware unmasked. That's an | |
353 | * optimization because we avoid the hardware access for the | |
354 | * common case where no interrupt happens after we marked it | |
355 | * disabled. If an interrupt happens, then the interrupt flow | |
356 | * handler masks the line at the hardware level and marks it | |
357 | * pending. | |
e9849777 TG |
358 | * |
359 | * If the interrupt chip does not implement the irq_disable callback, | |
360 | * a driver can disable the lazy approach for a particular irq line by | |
361 | * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can | |
362 | * be used for devices which cannot disable the interrupt at the | |
363 | * device level under certain circumstances and have to use | |
364 | * disable_irq[_nosync] instead. | |
d671a605 | 365 | */ |
50f7c032 | 366 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 367 | { |
201d7f47 | 368 | __irq_disable(desc, irq_settings_disable_unlazy(desc)); |
89d694b9 TG |
369 | } |
370 | ||
31d9d9b6 MZ |
371 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
372 | { | |
373 | if (desc->irq_data.chip->irq_enable) | |
374 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
375 | else | |
376 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
377 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
378 | } | |
379 | ||
380 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
381 | { | |
382 | if (desc->irq_data.chip->irq_disable) | |
383 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
384 | else | |
385 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
386 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
387 | } | |
388 | ||
9205e31d | 389 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 390 | { |
bf22ff45 | 391 | if (desc->irq_data.chip->irq_mask_ack) { |
9205e31d | 392 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); |
bf22ff45 JC |
393 | irq_state_set_masked(desc); |
394 | } else { | |
395 | mask_irq(desc); | |
22a49163 TG |
396 | if (desc->irq_data.chip->irq_ack) |
397 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 398 | } |
0b1adaa0 TG |
399 | } |
400 | ||
d4d5e089 | 401 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 402 | { |
bf22ff45 JC |
403 | if (irqd_irq_masked(&desc->irq_data)) |
404 | return; | |
405 | ||
e2c0f8ff TG |
406 | if (desc->irq_data.chip->irq_mask) { |
407 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 408 | irq_state_set_masked(desc); |
0b1adaa0 TG |
409 | } |
410 | } | |
411 | ||
d4d5e089 | 412 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 413 | { |
bf22ff45 JC |
414 | if (!irqd_irq_masked(&desc->irq_data)) |
415 | return; | |
416 | ||
0eda58b7 TG |
417 | if (desc->irq_data.chip->irq_unmask) { |
418 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 419 | irq_state_clr_masked(desc); |
0b1adaa0 | 420 | } |
dd87eb3a TG |
421 | } |
422 | ||
328a4978 TG |
423 | void unmask_threaded_irq(struct irq_desc *desc) |
424 | { | |
425 | struct irq_chip *chip = desc->irq_data.chip; | |
426 | ||
427 | if (chip->flags & IRQCHIP_EOI_THREADED) | |
428 | chip->irq_eoi(&desc->irq_data); | |
429 | ||
bf22ff45 | 430 | unmask_irq(desc); |
328a4978 TG |
431 | } |
432 | ||
399b5da2 TG |
433 | /* |
434 | * handle_nested_irq - Handle a nested irq from a irq thread | |
435 | * @irq: the interrupt number | |
436 | * | |
437 | * Handle interrupts which are nested into a threaded interrupt | |
438 | * handler. The handler function is called inside the calling | |
439 | * threads context. | |
440 | */ | |
441 | void handle_nested_irq(unsigned int irq) | |
442 | { | |
443 | struct irq_desc *desc = irq_to_desc(irq); | |
444 | struct irqaction *action; | |
445 | irqreturn_t action_ret; | |
446 | ||
447 | might_sleep(); | |
448 | ||
239007b8 | 449 | raw_spin_lock_irq(&desc->lock); |
399b5da2 | 450 | |
293a7a0a | 451 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
399b5da2 TG |
452 | |
453 | action = desc->action; | |
23812b9d NJ |
454 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
455 | desc->istate |= IRQS_PENDING; | |
399b5da2 | 456 | goto out_unlock; |
23812b9d | 457 | } |
399b5da2 | 458 | |
a946e8c7 | 459 | kstat_incr_irqs_this_cpu(desc); |
32f4125e | 460 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 461 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 | 462 | |
45e52022 CK |
463 | action_ret = IRQ_NONE; |
464 | for_each_action_of_desc(desc, action) | |
465 | action_ret |= action->thread_fn(action->irq, action->dev_id); | |
466 | ||
399b5da2 | 467 | if (!noirqdebug) |
0dcdbc97 | 468 | note_interrupt(desc, action_ret); |
399b5da2 | 469 | |
239007b8 | 470 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 471 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
472 | |
473 | out_unlock: | |
239007b8 | 474 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
475 | } |
476 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
477 | ||
fe200ae4 TG |
478 | static bool irq_check_poll(struct irq_desc *desc) |
479 | { | |
6954b75b | 480 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
481 | return false; |
482 | return irq_wait_for_poll(desc); | |
483 | } | |
484 | ||
c7bd3ec0 TG |
485 | static bool irq_may_run(struct irq_desc *desc) |
486 | { | |
9ce7a258 TG |
487 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; |
488 | ||
489 | /* | |
490 | * If the interrupt is not in progress and is not an armed | |
491 | * wakeup interrupt, proceed. | |
492 | */ | |
493 | if (!irqd_has_set(&desc->irq_data, mask)) | |
c7bd3ec0 | 494 | return true; |
9ce7a258 TG |
495 | |
496 | /* | |
497 | * If the interrupt is an armed wakeup source, mark it pending | |
498 | * and suspended, disable it and notify the pm core about the | |
499 | * event. | |
500 | */ | |
501 | if (irq_pm_check_wakeup(desc)) | |
502 | return false; | |
503 | ||
504 | /* | |
505 | * Handle a potential concurrent poll on a different core. | |
506 | */ | |
c7bd3ec0 TG |
507 | return irq_check_poll(desc); |
508 | } | |
509 | ||
dd87eb3a TG |
510 | /** |
511 | * handle_simple_irq - Simple and software-decoded IRQs. | |
dd87eb3a | 512 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
513 | * |
514 | * Simple interrupts are either sent from a demultiplexing interrupt | |
515 | * handler or come from hardware, where no interrupt hardware control | |
516 | * is necessary. | |
517 | * | |
518 | * Note: The caller is expected to handle the ack, clear, mask and | |
519 | * unmask issues if necessary. | |
520 | */ | |
bd0b9ac4 | 521 | void handle_simple_irq(struct irq_desc *desc) |
dd87eb3a | 522 | { |
239007b8 | 523 | raw_spin_lock(&desc->lock); |
dd87eb3a | 524 | |
c7bd3ec0 TG |
525 | if (!irq_may_run(desc)) |
526 | goto out_unlock; | |
fe200ae4 | 527 | |
163ef309 | 528 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a | 529 | |
23812b9d NJ |
530 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
531 | desc->istate |= IRQS_PENDING; | |
dd87eb3a | 532 | goto out_unlock; |
23812b9d | 533 | } |
dd87eb3a | 534 | |
a946e8c7 | 535 | kstat_incr_irqs_this_cpu(desc); |
107781e7 | 536 | handle_irq_event(desc); |
dd87eb3a | 537 | |
dd87eb3a | 538 | out_unlock: |
239007b8 | 539 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 540 | } |
edf76f83 | 541 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 542 | |
edd14cfe KB |
543 | /** |
544 | * handle_untracked_irq - Simple and software-decoded IRQs. | |
545 | * @desc: the interrupt description structure for this irq | |
546 | * | |
547 | * Untracked interrupts are sent from a demultiplexing interrupt | |
548 | * handler when the demultiplexer does not know which device it its | |
549 | * multiplexed irq domain generated the interrupt. IRQ's handled | |
550 | * through here are not subjected to stats tracking, randomness, or | |
551 | * spurious interrupt detection. | |
552 | * | |
553 | * Note: Like handle_simple_irq, the caller is expected to handle | |
554 | * the ack, clear, mask and unmask issues if necessary. | |
555 | */ | |
556 | void handle_untracked_irq(struct irq_desc *desc) | |
557 | { | |
558 | unsigned int flags = 0; | |
559 | ||
560 | raw_spin_lock(&desc->lock); | |
561 | ||
562 | if (!irq_may_run(desc)) | |
563 | goto out_unlock; | |
564 | ||
565 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
566 | ||
567 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { | |
568 | desc->istate |= IRQS_PENDING; | |
569 | goto out_unlock; | |
570 | } | |
571 | ||
572 | desc->istate &= ~IRQS_PENDING; | |
573 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
574 | raw_spin_unlock(&desc->lock); | |
575 | ||
576 | __handle_irq_event_percpu(desc, &flags); | |
577 | ||
578 | raw_spin_lock(&desc->lock); | |
579 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | |
580 | ||
581 | out_unlock: | |
582 | raw_spin_unlock(&desc->lock); | |
583 | } | |
584 | EXPORT_SYMBOL_GPL(handle_untracked_irq); | |
585 | ||
ac563761 TG |
586 | /* |
587 | * Called unconditionally from handle_level_irq() and only for oneshot | |
588 | * interrupts from handle_fasteoi_irq() | |
589 | */ | |
590 | static void cond_unmask_irq(struct irq_desc *desc) | |
591 | { | |
592 | /* | |
593 | * We need to unmask in the following cases: | |
594 | * - Standard level irq (IRQF_ONESHOT is not set) | |
595 | * - Oneshot irq which did not wake the thread (caused by a | |
596 | * spurious interrupt or a primary handler handling it | |
597 | * completely). | |
598 | */ | |
599 | if (!irqd_irq_disabled(&desc->irq_data) && | |
600 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
601 | unmask_irq(desc); | |
602 | } | |
603 | ||
dd87eb3a TG |
604 | /** |
605 | * handle_level_irq - Level type irq handler | |
dd87eb3a | 606 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
607 | * |
608 | * Level type interrupts are active as long as the hardware line has | |
609 | * the active level. This may require to mask the interrupt and unmask | |
610 | * it after the associated handler has acknowledged the device, so the | |
611 | * interrupt line is back to inactive. | |
612 | */ | |
bd0b9ac4 | 613 | void handle_level_irq(struct irq_desc *desc) |
dd87eb3a | 614 | { |
239007b8 | 615 | raw_spin_lock(&desc->lock); |
9205e31d | 616 | mask_ack_irq(desc); |
dd87eb3a | 617 | |
c7bd3ec0 TG |
618 | if (!irq_may_run(desc)) |
619 | goto out_unlock; | |
fe200ae4 | 620 | |
163ef309 | 621 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
622 | |
623 | /* | |
624 | * If its disabled or no action available | |
625 | * keep it masked and get out of here | |
626 | */ | |
d4dc0f90 TG |
627 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
628 | desc->istate |= IRQS_PENDING; | |
86998aa6 | 629 | goto out_unlock; |
d4dc0f90 | 630 | } |
dd87eb3a | 631 | |
a946e8c7 | 632 | kstat_incr_irqs_this_cpu(desc); |
1529866c | 633 | handle_irq_event(desc); |
b25c340c | 634 | |
ac563761 TG |
635 | cond_unmask_irq(desc); |
636 | ||
86998aa6 | 637 | out_unlock: |
239007b8 | 638 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 639 | } |
14819ea1 | 640 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 641 | |
78129576 TG |
642 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
643 | static inline void preflow_handler(struct irq_desc *desc) | |
644 | { | |
645 | if (desc->preflow_handler) | |
646 | desc->preflow_handler(&desc->irq_data); | |
647 | } | |
648 | #else | |
649 | static inline void preflow_handler(struct irq_desc *desc) { } | |
650 | #endif | |
651 | ||
328a4978 TG |
652 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) |
653 | { | |
654 | if (!(desc->istate & IRQS_ONESHOT)) { | |
655 | chip->irq_eoi(&desc->irq_data); | |
656 | return; | |
657 | } | |
658 | /* | |
659 | * We need to unmask in the following cases: | |
660 | * - Oneshot irq which did not wake the thread (caused by a | |
661 | * spurious interrupt or a primary handler handling it | |
662 | * completely). | |
663 | */ | |
664 | if (!irqd_irq_disabled(&desc->irq_data) && | |
665 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | |
666 | chip->irq_eoi(&desc->irq_data); | |
667 | unmask_irq(desc); | |
668 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | |
669 | chip->irq_eoi(&desc->irq_data); | |
670 | } | |
671 | } | |
672 | ||
dd87eb3a | 673 | /** |
47c2a3aa | 674 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a | 675 | * @desc: the interrupt description structure for this irq |
dd87eb3a | 676 | * |
47c2a3aa | 677 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
678 | * call when the interrupt has been serviced. This enables support |
679 | * for modern forms of interrupt handlers, which handle the flow | |
680 | * details in hardware, transparently. | |
681 | */ | |
bd0b9ac4 | 682 | void handle_fasteoi_irq(struct irq_desc *desc) |
dd87eb3a | 683 | { |
328a4978 TG |
684 | struct irq_chip *chip = desc->irq_data.chip; |
685 | ||
239007b8 | 686 | raw_spin_lock(&desc->lock); |
dd87eb3a | 687 | |
c7bd3ec0 TG |
688 | if (!irq_may_run(desc)) |
689 | goto out; | |
dd87eb3a | 690 | |
163ef309 | 691 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
692 | |
693 | /* | |
694 | * If its disabled or no action available | |
76d21601 | 695 | * then mask it and get out of here: |
dd87eb3a | 696 | */ |
32f4125e | 697 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 698 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 699 | mask_irq(desc); |
dd87eb3a | 700 | goto out; |
98bb244b | 701 | } |
c69e3758 | 702 | |
a946e8c7 | 703 | kstat_incr_irqs_this_cpu(desc); |
c69e3758 TG |
704 | if (desc->istate & IRQS_ONESHOT) |
705 | mask_irq(desc); | |
706 | ||
78129576 | 707 | preflow_handler(desc); |
a7ae4de5 | 708 | handle_irq_event(desc); |
77694b40 | 709 | |
328a4978 | 710 | cond_unmask_eoi_irq(desc, chip); |
ac563761 | 711 | |
239007b8 | 712 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
713 | return; |
714 | out: | |
328a4978 TG |
715 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
716 | chip->irq_eoi(&desc->irq_data); | |
717 | raw_spin_unlock(&desc->lock); | |
dd87eb3a | 718 | } |
7cad45ee | 719 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); |
dd87eb3a TG |
720 | |
721 | /** | |
722 | * handle_edge_irq - edge type IRQ handler | |
dd87eb3a | 723 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
724 | * |
725 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 726 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
727 | * and must be acked in order to be reenabled. After the ack another |
728 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 729 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
730 | * might be necessary to disable (mask) the interrupt depending on the |
731 | * controller hardware. This requires to reenable the interrupt inside | |
732 | * of the loop which handles the interrupts which have arrived while | |
733 | * the handler was running. If all pending interrupts are handled, the | |
734 | * loop is left. | |
735 | */ | |
bd0b9ac4 | 736 | void handle_edge_irq(struct irq_desc *desc) |
dd87eb3a | 737 | { |
239007b8 | 738 | raw_spin_lock(&desc->lock); |
dd87eb3a | 739 | |
163ef309 | 740 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
c3d7acd0 | 741 | |
c7bd3ec0 TG |
742 | if (!irq_may_run(desc)) { |
743 | desc->istate |= IRQS_PENDING; | |
744 | mask_ack_irq(desc); | |
745 | goto out_unlock; | |
dd87eb3a | 746 | } |
c3d7acd0 | 747 | |
dd87eb3a | 748 | /* |
c3d7acd0 TG |
749 | * If its disabled or no action available then mask it and get |
750 | * out of here. | |
dd87eb3a | 751 | */ |
c3d7acd0 TG |
752 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
753 | desc->istate |= IRQS_PENDING; | |
754 | mask_ack_irq(desc); | |
755 | goto out_unlock; | |
dd87eb3a | 756 | } |
c3d7acd0 | 757 | |
b51bf95c | 758 | kstat_incr_irqs_this_cpu(desc); |
dd87eb3a TG |
759 | |
760 | /* Start handling the irq */ | |
22a49163 | 761 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 762 | |
dd87eb3a | 763 | do { |
a60a5dc2 | 764 | if (unlikely(!desc->action)) { |
e2c0f8ff | 765 | mask_irq(desc); |
dd87eb3a TG |
766 | goto out_unlock; |
767 | } | |
768 | ||
769 | /* | |
770 | * When another irq arrived while we were handling | |
771 | * one, we could have masked the irq. | |
772 | * Renable it, if it was not disabled in meantime. | |
773 | */ | |
2a0d6fb3 | 774 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
775 | if (!irqd_irq_disabled(&desc->irq_data) && |
776 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 777 | unmask_irq(desc); |
dd87eb3a TG |
778 | } |
779 | ||
a60a5dc2 | 780 | handle_irq_event(desc); |
dd87eb3a | 781 | |
2a0d6fb3 | 782 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 783 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 784 | |
dd87eb3a | 785 | out_unlock: |
239007b8 | 786 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 787 | } |
3911ff30 | 788 | EXPORT_SYMBOL(handle_edge_irq); |
dd87eb3a | 789 | |
0521c8fb TG |
790 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
791 | /** | |
792 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
0521c8fb TG |
793 | * @desc: the interrupt description structure for this irq |
794 | * | |
795 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
796 | * mask/unmask logic. | |
797 | */ | |
bd0b9ac4 | 798 | void handle_edge_eoi_irq(struct irq_desc *desc) |
0521c8fb TG |
799 | { |
800 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
801 | ||
802 | raw_spin_lock(&desc->lock); | |
803 | ||
804 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
c3d7acd0 | 805 | |
c7bd3ec0 TG |
806 | if (!irq_may_run(desc)) { |
807 | desc->istate |= IRQS_PENDING; | |
808 | goto out_eoi; | |
0521c8fb | 809 | } |
c3d7acd0 | 810 | |
0521c8fb | 811 | /* |
c3d7acd0 TG |
812 | * If its disabled or no action available then mask it and get |
813 | * out of here. | |
0521c8fb | 814 | */ |
c3d7acd0 TG |
815 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
816 | desc->istate |= IRQS_PENDING; | |
817 | goto out_eoi; | |
0521c8fb | 818 | } |
c3d7acd0 | 819 | |
b51bf95c | 820 | kstat_incr_irqs_this_cpu(desc); |
0521c8fb TG |
821 | |
822 | do { | |
823 | if (unlikely(!desc->action)) | |
824 | goto out_eoi; | |
825 | ||
826 | handle_irq_event(desc); | |
827 | ||
828 | } while ((desc->istate & IRQS_PENDING) && | |
829 | !irqd_irq_disabled(&desc->irq_data)); | |
830 | ||
ac0e0447 | 831 | out_eoi: |
0521c8fb TG |
832 | chip->irq_eoi(&desc->irq_data); |
833 | raw_spin_unlock(&desc->lock); | |
834 | } | |
835 | #endif | |
836 | ||
dd87eb3a | 837 | /** |
24b26d42 | 838 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a | 839 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
840 | * |
841 | * Per CPU interrupts on SMP machines without locking requirements | |
842 | */ | |
bd0b9ac4 | 843 | void handle_percpu_irq(struct irq_desc *desc) |
dd87eb3a | 844 | { |
35e857cb | 845 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 846 | |
b51bf95c | 847 | kstat_incr_irqs_this_cpu(desc); |
dd87eb3a | 848 | |
849f061c TG |
849 | if (chip->irq_ack) |
850 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 851 | |
71f64340 | 852 | handle_irq_event_percpu(desc); |
dd87eb3a | 853 | |
849f061c TG |
854 | if (chip->irq_eoi) |
855 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
856 | } |
857 | ||
31d9d9b6 MZ |
858 | /** |
859 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
31d9d9b6 MZ |
860 | * @desc: the interrupt description structure for this irq |
861 | * | |
862 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
863 | * handle_percpu_irq() above but with the following extras: | |
864 | * | |
865 | * action->percpu_dev_id is a pointer to percpu variables which | |
866 | * contain the real device id for the cpu on which this handler is | |
867 | * called | |
868 | */ | |
bd0b9ac4 | 869 | void handle_percpu_devid_irq(struct irq_desc *desc) |
31d9d9b6 MZ |
870 | { |
871 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
872 | struct irqaction *action = desc->action; | |
bd0b9ac4 | 873 | unsigned int irq = irq_desc_get_irq(desc); |
31d9d9b6 MZ |
874 | irqreturn_t res; |
875 | ||
b51bf95c | 876 | kstat_incr_irqs_this_cpu(desc); |
31d9d9b6 MZ |
877 | |
878 | if (chip->irq_ack) | |
879 | chip->irq_ack(&desc->irq_data); | |
880 | ||
fc590c22 TG |
881 | if (likely(action)) { |
882 | trace_irq_handler_entry(irq, action); | |
883 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); | |
884 | trace_irq_handler_exit(irq, action, res); | |
885 | } else { | |
886 | unsigned int cpu = smp_processor_id(); | |
887 | bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); | |
888 | ||
889 | if (enabled) | |
890 | irq_percpu_disable(desc, cpu); | |
891 | ||
892 | pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", | |
893 | enabled ? " and unmasked" : "", irq, cpu); | |
894 | } | |
31d9d9b6 MZ |
895 | |
896 | if (chip->irq_eoi) | |
897 | chip->irq_eoi(&desc->irq_data); | |
898 | } | |
899 | ||
b8129a1f | 900 | static void |
3b0f95be RK |
901 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, |
902 | int is_chained, const char *name) | |
dd87eb3a | 903 | { |
091738a2 | 904 | if (!handle) { |
dd87eb3a | 905 | handle = handle_bad_irq; |
091738a2 | 906 | } else { |
f86eff22 MZ |
907 | struct irq_data *irq_data = &desc->irq_data; |
908 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
909 | /* | |
910 | * With hierarchical domains we might run into a | |
911 | * situation where the outermost chip is not yet set | |
912 | * up, but the inner chips are there. Instead of | |
913 | * bailing we install the handler, but obviously we | |
914 | * cannot enable/startup the interrupt at this point. | |
915 | */ | |
916 | while (irq_data) { | |
917 | if (irq_data->chip != &no_irq_chip) | |
918 | break; | |
919 | /* | |
920 | * Bail out if the outer chip is not set up | |
921 | * and the interrrupt supposed to be started | |
922 | * right away. | |
923 | */ | |
924 | if (WARN_ON(is_chained)) | |
3b0f95be | 925 | return; |
f86eff22 MZ |
926 | /* Try the parent */ |
927 | irq_data = irq_data->parent_data; | |
928 | } | |
929 | #endif | |
930 | if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) | |
3b0f95be | 931 | return; |
f8b5473f | 932 | } |
dd87eb3a | 933 | |
dd87eb3a TG |
934 | /* Uninstall? */ |
935 | if (handle == handle_bad_irq) { | |
6b8ff312 | 936 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 937 | mask_ack_irq(desc); |
801a0e9a | 938 | irq_state_set_disabled(desc); |
e509bd7d MW |
939 | if (is_chained) |
940 | desc->action = NULL; | |
dd87eb3a TG |
941 | desc->depth = 1; |
942 | } | |
943 | desc->handle_irq = handle; | |
a460e745 | 944 | desc->name = name; |
dd87eb3a TG |
945 | |
946 | if (handle != handle_bad_irq && is_chained) { | |
1984e075 MZ |
947 | unsigned int type = irqd_get_trigger_type(&desc->irq_data); |
948 | ||
1e12c4a9 MZ |
949 | /* |
950 | * We're about to start this interrupt immediately, | |
951 | * hence the need to set the trigger configuration. | |
952 | * But the .set_type callback may have overridden the | |
953 | * flow handler, ignoring that we're dealing with a | |
954 | * chained interrupt. Reset it immediately because we | |
955 | * do know better. | |
956 | */ | |
1984e075 MZ |
957 | if (type != IRQ_TYPE_NONE) { |
958 | __irq_set_trigger(desc, type); | |
959 | desc->handle_irq = handle; | |
960 | } | |
1e12c4a9 | 961 | |
1ccb4e61 TG |
962 | irq_settings_set_noprobe(desc); |
963 | irq_settings_set_norequest(desc); | |
7f1b1244 | 964 | irq_settings_set_nothread(desc); |
e509bd7d | 965 | desc->action = &chained_action; |
4cde9c6b | 966 | irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); |
dd87eb3a | 967 | } |
3b0f95be RK |
968 | } |
969 | ||
970 | void | |
971 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
972 | const char *name) | |
973 | { | |
974 | unsigned long flags; | |
975 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
976 | ||
977 | if (!desc) | |
978 | return; | |
979 | ||
980 | __irq_do_set_handler(desc, handle, is_chained, name); | |
02725e74 | 981 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a | 982 | } |
3836ca08 | 983 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a | 984 | |
3b0f95be RK |
985 | void |
986 | irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |
987 | void *data) | |
988 | { | |
989 | unsigned long flags; | |
990 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
991 | ||
992 | if (!desc) | |
993 | return; | |
994 | ||
af7080e0 | 995 | desc->irq_common_data.handler_data = data; |
2c4569ca | 996 | __irq_do_set_handler(desc, handle, 1, NULL); |
3b0f95be RK |
997 | |
998 | irq_put_desc_busunlock(desc, flags); | |
999 | } | |
1000 | EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); | |
1001 | ||
dd87eb3a | 1002 | void |
3836ca08 | 1003 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 1004 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 1005 | { |
35e857cb | 1006 | irq_set_chip(irq, chip); |
3836ca08 | 1007 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 1008 | } |
b3ae66f2 | 1009 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
46f4f8f6 | 1010 | |
44247184 | 1011 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 1012 | { |
46f4f8f6 | 1013 | unsigned long flags; |
31d9d9b6 | 1014 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 1015 | |
44247184 | 1016 | if (!desc) |
46f4f8f6 | 1017 | return; |
04c848d3 TG |
1018 | |
1019 | /* | |
1020 | * Warn when a driver sets the no autoenable flag on an already | |
1021 | * active interrupt. | |
1022 | */ | |
1023 | WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); | |
1024 | ||
a005677b TG |
1025 | irq_settings_clr_and_set(desc, clr, set); |
1026 | ||
876dbd4c | 1027 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 1028 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
1029 | if (irq_settings_has_no_balance_set(desc)) |
1030 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
1031 | if (irq_settings_is_per_cpu(desc)) | |
1032 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
1033 | if (irq_settings_can_move_pcntxt(desc)) |
1034 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
1035 | if (irq_settings_is_level(desc)) |
1036 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 1037 | |
876dbd4c TG |
1038 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
1039 | ||
02725e74 | 1040 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 1041 | } |
edf76f83 | 1042 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
1043 | |
1044 | /** | |
1045 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
1046 | * | |
1047 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
1048 | * for each. | |
1049 | */ | |
1050 | void irq_cpu_online(void) | |
1051 | { | |
1052 | struct irq_desc *desc; | |
1053 | struct irq_chip *chip; | |
1054 | unsigned long flags; | |
1055 | unsigned int irq; | |
1056 | ||
1057 | for_each_active_irq(irq) { | |
1058 | desc = irq_to_desc(irq); | |
1059 | if (!desc) | |
1060 | continue; | |
1061 | ||
1062 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1063 | ||
1064 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
1065 | if (chip && chip->irq_cpu_online && |
1066 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 1067 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
1068 | chip->irq_cpu_online(&desc->irq_data); |
1069 | ||
1070 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1071 | } | |
1072 | } | |
1073 | ||
1074 | /** | |
1075 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
1076 | * | |
1077 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
1078 | * for each. | |
1079 | */ | |
1080 | void irq_cpu_offline(void) | |
1081 | { | |
1082 | struct irq_desc *desc; | |
1083 | struct irq_chip *chip; | |
1084 | unsigned long flags; | |
1085 | unsigned int irq; | |
1086 | ||
1087 | for_each_active_irq(irq) { | |
1088 | desc = irq_to_desc(irq); | |
1089 | if (!desc) | |
1090 | continue; | |
1091 | ||
1092 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1093 | ||
1094 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
1095 | if (chip && chip->irq_cpu_offline && |
1096 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 1097 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
1098 | chip->irq_cpu_offline(&desc->irq_data); |
1099 | ||
1100 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1101 | } | |
1102 | } | |
85f08c17 JL |
1103 | |
1104 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
3cfeffc2 SA |
1105 | /** |
1106 | * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if | |
1107 | * NULL) | |
1108 | * @data: Pointer to interrupt specific data | |
1109 | */ | |
1110 | void irq_chip_enable_parent(struct irq_data *data) | |
1111 | { | |
1112 | data = data->parent_data; | |
1113 | if (data->chip->irq_enable) | |
1114 | data->chip->irq_enable(data); | |
1115 | else | |
1116 | data->chip->irq_unmask(data); | |
1117 | } | |
1118 | ||
1119 | /** | |
1120 | * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if | |
1121 | * NULL) | |
1122 | * @data: Pointer to interrupt specific data | |
1123 | */ | |
1124 | void irq_chip_disable_parent(struct irq_data *data) | |
1125 | { | |
1126 | data = data->parent_data; | |
1127 | if (data->chip->irq_disable) | |
1128 | data->chip->irq_disable(data); | |
1129 | else | |
1130 | data->chip->irq_mask(data); | |
1131 | } | |
1132 | ||
85f08c17 JL |
1133 | /** |
1134 | * irq_chip_ack_parent - Acknowledge the parent interrupt | |
1135 | * @data: Pointer to interrupt specific data | |
1136 | */ | |
1137 | void irq_chip_ack_parent(struct irq_data *data) | |
1138 | { | |
1139 | data = data->parent_data; | |
1140 | data->chip->irq_ack(data); | |
1141 | } | |
a4289dc2 | 1142 | EXPORT_SYMBOL_GPL(irq_chip_ack_parent); |
85f08c17 | 1143 | |
56e8abab YC |
1144 | /** |
1145 | * irq_chip_mask_parent - Mask the parent interrupt | |
1146 | * @data: Pointer to interrupt specific data | |
1147 | */ | |
1148 | void irq_chip_mask_parent(struct irq_data *data) | |
1149 | { | |
1150 | data = data->parent_data; | |
1151 | data->chip->irq_mask(data); | |
1152 | } | |
52b2a05f | 1153 | EXPORT_SYMBOL_GPL(irq_chip_mask_parent); |
56e8abab YC |
1154 | |
1155 | /** | |
1156 | * irq_chip_unmask_parent - Unmask the parent interrupt | |
1157 | * @data: Pointer to interrupt specific data | |
1158 | */ | |
1159 | void irq_chip_unmask_parent(struct irq_data *data) | |
1160 | { | |
1161 | data = data->parent_data; | |
1162 | data->chip->irq_unmask(data); | |
1163 | } | |
52b2a05f | 1164 | EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); |
56e8abab YC |
1165 | |
1166 | /** | |
1167 | * irq_chip_eoi_parent - Invoke EOI on the parent interrupt | |
1168 | * @data: Pointer to interrupt specific data | |
1169 | */ | |
1170 | void irq_chip_eoi_parent(struct irq_data *data) | |
1171 | { | |
1172 | data = data->parent_data; | |
1173 | data->chip->irq_eoi(data); | |
1174 | } | |
52b2a05f | 1175 | EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); |
56e8abab YC |
1176 | |
1177 | /** | |
1178 | * irq_chip_set_affinity_parent - Set affinity on the parent interrupt | |
1179 | * @data: Pointer to interrupt specific data | |
1180 | * @dest: The affinity mask to set | |
1181 | * @force: Flag to enforce setting (disable online checks) | |
1182 | * | |
1183 | * Conditinal, as the underlying parent chip might not implement it. | |
1184 | */ | |
1185 | int irq_chip_set_affinity_parent(struct irq_data *data, | |
1186 | const struct cpumask *dest, bool force) | |
1187 | { | |
1188 | data = data->parent_data; | |
1189 | if (data->chip->irq_set_affinity) | |
1190 | return data->chip->irq_set_affinity(data, dest, force); | |
b7560de1 GS |
1191 | |
1192 | return -ENOSYS; | |
1193 | } | |
1194 | ||
1195 | /** | |
1196 | * irq_chip_set_type_parent - Set IRQ type on the parent interrupt | |
1197 | * @data: Pointer to interrupt specific data | |
1198 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h | |
1199 | * | |
1200 | * Conditional, as the underlying parent chip might not implement it. | |
1201 | */ | |
1202 | int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) | |
1203 | { | |
1204 | data = data->parent_data; | |
1205 | ||
1206 | if (data->chip->irq_set_type) | |
1207 | return data->chip->irq_set_type(data, type); | |
56e8abab YC |
1208 | |
1209 | return -ENOSYS; | |
1210 | } | |
52b2a05f | 1211 | EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); |
56e8abab | 1212 | |
85f08c17 JL |
1213 | /** |
1214 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware | |
1215 | * @data: Pointer to interrupt specific data | |
1216 | * | |
1217 | * Iterate through the domain hierarchy of the interrupt and check | |
1218 | * whether a hw retrigger function exists. If yes, invoke it. | |
1219 | */ | |
1220 | int irq_chip_retrigger_hierarchy(struct irq_data *data) | |
1221 | { | |
1222 | for (data = data->parent_data; data; data = data->parent_data) | |
1223 | if (data->chip && data->chip->irq_retrigger) | |
1224 | return data->chip->irq_retrigger(data); | |
1225 | ||
6d4affea | 1226 | return 0; |
85f08c17 | 1227 | } |
08b55e2a | 1228 | |
0a4377de JL |
1229 | /** |
1230 | * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt | |
1231 | * @data: Pointer to interrupt specific data | |
8505a81b | 1232 | * @vcpu_info: The vcpu affinity information |
0a4377de JL |
1233 | */ |
1234 | int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) | |
1235 | { | |
1236 | data = data->parent_data; | |
1237 | if (data->chip->irq_set_vcpu_affinity) | |
1238 | return data->chip->irq_set_vcpu_affinity(data, vcpu_info); | |
1239 | ||
1240 | return -ENOSYS; | |
1241 | } | |
1242 | ||
08b55e2a MZ |
1243 | /** |
1244 | * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt | |
1245 | * @data: Pointer to interrupt specific data | |
1246 | * @on: Whether to set or reset the wake-up capability of this irq | |
1247 | * | |
1248 | * Conditional, as the underlying parent chip might not implement it. | |
1249 | */ | |
1250 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) | |
1251 | { | |
1252 | data = data->parent_data; | |
1253 | if (data->chip->irq_set_wake) | |
1254 | return data->chip->irq_set_wake(data, on); | |
1255 | ||
1256 | return -ENOSYS; | |
1257 | } | |
85f08c17 | 1258 | #endif |
515085ef JL |
1259 | |
1260 | /** | |
1261 | * irq_chip_compose_msi_msg - Componse msi message for a irq chip | |
1262 | * @data: Pointer to interrupt specific data | |
1263 | * @msg: Pointer to the MSI message | |
1264 | * | |
1265 | * For hierarchical domains we find the first chip in the hierarchy | |
1266 | * which implements the irq_compose_msi_msg callback. For non | |
1267 | * hierarchical we use the top level chip. | |
1268 | */ | |
1269 | int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |
1270 | { | |
1271 | struct irq_data *pos = NULL; | |
1272 | ||
1273 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
1274 | for (; data; data = data->parent_data) | |
1275 | #endif | |
1276 | if (data->chip && data->chip->irq_compose_msi_msg) | |
1277 | pos = data; | |
1278 | if (!pos) | |
1279 | return -ENOSYS; | |
1280 | ||
1281 | pos->chip->irq_compose_msi_msg(pos, msg); | |
1282 | ||
1283 | return 0; | |
1284 | } | |
be45beb2 JH |
1285 | |
1286 | /** | |
1287 | * irq_chip_pm_get - Enable power for an IRQ chip | |
1288 | * @data: Pointer to interrupt specific data | |
1289 | * | |
1290 | * Enable the power to the IRQ chip referenced by the interrupt data | |
1291 | * structure. | |
1292 | */ | |
1293 | int irq_chip_pm_get(struct irq_data *data) | |
1294 | { | |
1295 | int retval; | |
1296 | ||
1297 | if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { | |
1298 | retval = pm_runtime_get_sync(data->chip->parent_device); | |
1299 | if (retval < 0) { | |
1300 | pm_runtime_put_noidle(data->chip->parent_device); | |
1301 | return retval; | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | return 0; | |
1306 | } | |
1307 | ||
1308 | /** | |
1309 | * irq_chip_pm_put - Disable power for an IRQ chip | |
1310 | * @data: Pointer to interrupt specific data | |
1311 | * | |
1312 | * Disable the power to the IRQ chip referenced by the interrupt data | |
1313 | * structure, belongs. Note that power will only be disabled, once this | |
1314 | * function has been called for all IRQs that have called irq_chip_pm_get(). | |
1315 | */ | |
1316 | int irq_chip_pm_put(struct irq_data *data) | |
1317 | { | |
1318 | int retval = 0; | |
1319 | ||
1320 | if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) | |
1321 | retval = pm_runtime_put(data->chip->parent_device); | |
1322 | ||
1323 | return (retval < 0) ? retval : 0; | |
1324 | } |