]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/irq/chip.c
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / kernel / irq / chip.c
1 /*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/core-api/genericirq.rst
11 */
12
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/irqdomain.h>
19
20 #include <trace/events/irq.h>
21
22 #include "internals.h"
23
24 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25 {
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 return IRQ_NONE;
28 }
29
30 /*
31 * Chained handlers should never call action on their IRQ. This default
32 * action will emit warning if such thing happens.
33 */
34 struct irqaction chained_action = {
35 .handler = bad_chained_irq,
36 };
37
38 /**
39 * irq_set_chip - set the irq chip for an irq
40 * @irq: irq number
41 * @chip: pointer to irq chip description structure
42 */
43 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44 {
45 unsigned long flags;
46 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47
48 if (!desc)
49 return -EINVAL;
50
51 if (!chip)
52 chip = &no_irq_chip;
53
54 desc->irq_data.chip = chip;
55 irq_put_desc_unlock(desc, flags);
56 /*
57 * For !CONFIG_SPARSE_IRQ make the irq show up in
58 * allocated_irqs.
59 */
60 irq_mark_irq(irq);
61 return 0;
62 }
63 EXPORT_SYMBOL(irq_set_chip);
64
65 /**
66 * irq_set_type - set the irq trigger type for an irq
67 * @irq: irq number
68 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69 */
70 int irq_set_irq_type(unsigned int irq, unsigned int type)
71 {
72 unsigned long flags;
73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74 int ret = 0;
75
76 if (!desc)
77 return -EINVAL;
78
79 ret = __irq_set_trigger(desc, type);
80 irq_put_desc_busunlock(desc, flags);
81 return ret;
82 }
83 EXPORT_SYMBOL(irq_set_irq_type);
84
85 /**
86 * irq_set_handler_data - set irq handler data for an irq
87 * @irq: Interrupt number
88 * @data: Pointer to interrupt specific data
89 *
90 * Set the hardware irq controller data for an irq
91 */
92 int irq_set_handler_data(unsigned int irq, void *data)
93 {
94 unsigned long flags;
95 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96
97 if (!desc)
98 return -EINVAL;
99 desc->irq_common_data.handler_data = data;
100 irq_put_desc_unlock(desc, flags);
101 return 0;
102 }
103 EXPORT_SYMBOL(irq_set_handler_data);
104
105 /**
106 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107 * @irq_base: Interrupt number base
108 * @irq_offset: Interrupt number offset
109 * @entry: Pointer to MSI descriptor data
110 *
111 * Set the MSI descriptor entry for an irq at offset
112 */
113 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 struct msi_desc *entry)
115 {
116 unsigned long flags;
117 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118
119 if (!desc)
120 return -EINVAL;
121 desc->irq_common_data.msi_desc = entry;
122 if (entry && !irq_offset)
123 entry->irq = irq_base;
124 irq_put_desc_unlock(desc, flags);
125 return 0;
126 }
127
128 /**
129 * irq_set_msi_desc - set MSI descriptor data for an irq
130 * @irq: Interrupt number
131 * @entry: Pointer to MSI descriptor data
132 *
133 * Set the MSI descriptor entry for an irq
134 */
135 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
136 {
137 return irq_set_msi_desc_off(irq, 0, entry);
138 }
139
140 /**
141 * irq_set_chip_data - set irq chip data for an irq
142 * @irq: Interrupt number
143 * @data: Pointer to chip specific data
144 *
145 * Set the hardware irq chip data for an irq
146 */
147 int irq_set_chip_data(unsigned int irq, void *data)
148 {
149 unsigned long flags;
150 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151
152 if (!desc)
153 return -EINVAL;
154 desc->irq_data.chip_data = data;
155 irq_put_desc_unlock(desc, flags);
156 return 0;
157 }
158 EXPORT_SYMBOL(irq_set_chip_data);
159
160 struct irq_data *irq_get_irq_data(unsigned int irq)
161 {
162 struct irq_desc *desc = irq_to_desc(irq);
163
164 return desc ? &desc->irq_data : NULL;
165 }
166 EXPORT_SYMBOL_GPL(irq_get_irq_data);
167
168 static void irq_state_clr_disabled(struct irq_desc *desc)
169 {
170 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171 }
172
173 static void irq_state_set_disabled(struct irq_desc *desc)
174 {
175 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
176 }
177
178 static void irq_state_clr_masked(struct irq_desc *desc)
179 {
180 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
181 }
182
183 static void irq_state_set_masked(struct irq_desc *desc)
184 {
185 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186 }
187
188 static void irq_state_clr_started(struct irq_desc *desc)
189 {
190 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
191 }
192
193 static void irq_state_set_started(struct irq_desc *desc)
194 {
195 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
196 }
197
198 enum {
199 IRQ_STARTUP_NORMAL,
200 IRQ_STARTUP_MANAGED,
201 IRQ_STARTUP_ABORT,
202 };
203
204 #ifdef CONFIG_SMP
205 static int
206 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
207 {
208 struct irq_data *d = irq_desc_get_irq_data(desc);
209
210 if (!irqd_affinity_is_managed(d))
211 return IRQ_STARTUP_NORMAL;
212
213 irqd_clr_managed_shutdown(d);
214
215 if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
216 /*
217 * Catch code which fiddles with enable_irq() on a managed
218 * and potentially shutdown IRQ. Chained interrupt
219 * installment or irq auto probing should not happen on
220 * managed irqs either. Emit a warning, break the affinity
221 * and start it up as a normal interrupt.
222 */
223 if (WARN_ON_ONCE(force))
224 return IRQ_STARTUP_NORMAL;
225 /*
226 * The interrupt was requested, but there is no online CPU
227 * in it's affinity mask. Put it into managed shutdown
228 * state and let the cpu hotplug mechanism start it up once
229 * a CPU in the mask becomes available.
230 */
231 irqd_set_managed_shutdown(d);
232 return IRQ_STARTUP_ABORT;
233 }
234 return IRQ_STARTUP_MANAGED;
235 }
236 #else
237 static __always_inline int
238 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
239 {
240 return IRQ_STARTUP_NORMAL;
241 }
242 #endif
243
244 static int __irq_startup(struct irq_desc *desc)
245 {
246 struct irq_data *d = irq_desc_get_irq_data(desc);
247 int ret = 0;
248
249 irq_domain_activate_irq(d);
250 if (d->chip->irq_startup) {
251 ret = d->chip->irq_startup(d);
252 irq_state_clr_disabled(desc);
253 irq_state_clr_masked(desc);
254 } else {
255 irq_enable(desc);
256 }
257 irq_state_set_started(desc);
258 return ret;
259 }
260
261 int irq_startup(struct irq_desc *desc, bool resend, bool force)
262 {
263 struct irq_data *d = irq_desc_get_irq_data(desc);
264 struct cpumask *aff = irq_data_get_affinity_mask(d);
265 int ret = 0;
266
267 desc->depth = 0;
268
269 if (irqd_is_started(d)) {
270 irq_enable(desc);
271 } else {
272 switch (__irq_startup_managed(desc, aff, force)) {
273 case IRQ_STARTUP_NORMAL:
274 ret = __irq_startup(desc);
275 irq_setup_affinity(desc);
276 break;
277 case IRQ_STARTUP_MANAGED:
278 ret = __irq_startup(desc);
279 irq_set_affinity_locked(d, aff, false);
280 break;
281 case IRQ_STARTUP_ABORT:
282 return 0;
283 }
284 }
285 if (resend)
286 check_irq_resend(desc);
287
288 return ret;
289 }
290
291 static void __irq_disable(struct irq_desc *desc, bool mask);
292
293 void irq_shutdown(struct irq_desc *desc)
294 {
295 if (irqd_is_started(&desc->irq_data)) {
296 desc->depth = 1;
297 if (desc->irq_data.chip->irq_shutdown) {
298 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
299 irq_state_set_disabled(desc);
300 irq_state_set_masked(desc);
301 } else {
302 __irq_disable(desc, true);
303 }
304 irq_state_clr_started(desc);
305 }
306 /*
307 * This must be called even if the interrupt was never started up,
308 * because the activation can happen before the interrupt is
309 * available for request/startup. It has it's own state tracking so
310 * it's safe to call it unconditionally.
311 */
312 irq_domain_deactivate_irq(&desc->irq_data);
313 }
314
315 void irq_enable(struct irq_desc *desc)
316 {
317 if (!irqd_irq_disabled(&desc->irq_data)) {
318 unmask_irq(desc);
319 } else {
320 irq_state_clr_disabled(desc);
321 if (desc->irq_data.chip->irq_enable) {
322 desc->irq_data.chip->irq_enable(&desc->irq_data);
323 irq_state_clr_masked(desc);
324 } else {
325 unmask_irq(desc);
326 }
327 }
328 }
329
330 static void __irq_disable(struct irq_desc *desc, bool mask)
331 {
332 if (irqd_irq_disabled(&desc->irq_data)) {
333 if (mask)
334 mask_irq(desc);
335 } else {
336 irq_state_set_disabled(desc);
337 if (desc->irq_data.chip->irq_disable) {
338 desc->irq_data.chip->irq_disable(&desc->irq_data);
339 irq_state_set_masked(desc);
340 } else if (mask) {
341 mask_irq(desc);
342 }
343 }
344 }
345
346 /**
347 * irq_disable - Mark interrupt disabled
348 * @desc: irq descriptor which should be disabled
349 *
350 * If the chip does not implement the irq_disable callback, we
351 * use a lazy disable approach. That means we mark the interrupt
352 * disabled, but leave the hardware unmasked. That's an
353 * optimization because we avoid the hardware access for the
354 * common case where no interrupt happens after we marked it
355 * disabled. If an interrupt happens, then the interrupt flow
356 * handler masks the line at the hardware level and marks it
357 * pending.
358 *
359 * If the interrupt chip does not implement the irq_disable callback,
360 * a driver can disable the lazy approach for a particular irq line by
361 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
362 * be used for devices which cannot disable the interrupt at the
363 * device level under certain circumstances and have to use
364 * disable_irq[_nosync] instead.
365 */
366 void irq_disable(struct irq_desc *desc)
367 {
368 __irq_disable(desc, irq_settings_disable_unlazy(desc));
369 }
370
371 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
372 {
373 if (desc->irq_data.chip->irq_enable)
374 desc->irq_data.chip->irq_enable(&desc->irq_data);
375 else
376 desc->irq_data.chip->irq_unmask(&desc->irq_data);
377 cpumask_set_cpu(cpu, desc->percpu_enabled);
378 }
379
380 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
381 {
382 if (desc->irq_data.chip->irq_disable)
383 desc->irq_data.chip->irq_disable(&desc->irq_data);
384 else
385 desc->irq_data.chip->irq_mask(&desc->irq_data);
386 cpumask_clear_cpu(cpu, desc->percpu_enabled);
387 }
388
389 static inline void mask_ack_irq(struct irq_desc *desc)
390 {
391 if (desc->irq_data.chip->irq_mask_ack) {
392 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
393 irq_state_set_masked(desc);
394 } else {
395 mask_irq(desc);
396 if (desc->irq_data.chip->irq_ack)
397 desc->irq_data.chip->irq_ack(&desc->irq_data);
398 }
399 }
400
401 void mask_irq(struct irq_desc *desc)
402 {
403 if (irqd_irq_masked(&desc->irq_data))
404 return;
405
406 if (desc->irq_data.chip->irq_mask) {
407 desc->irq_data.chip->irq_mask(&desc->irq_data);
408 irq_state_set_masked(desc);
409 }
410 }
411
412 void unmask_irq(struct irq_desc *desc)
413 {
414 if (!irqd_irq_masked(&desc->irq_data))
415 return;
416
417 if (desc->irq_data.chip->irq_unmask) {
418 desc->irq_data.chip->irq_unmask(&desc->irq_data);
419 irq_state_clr_masked(desc);
420 }
421 }
422
423 void unmask_threaded_irq(struct irq_desc *desc)
424 {
425 struct irq_chip *chip = desc->irq_data.chip;
426
427 if (chip->flags & IRQCHIP_EOI_THREADED)
428 chip->irq_eoi(&desc->irq_data);
429
430 unmask_irq(desc);
431 }
432
433 /*
434 * handle_nested_irq - Handle a nested irq from a irq thread
435 * @irq: the interrupt number
436 *
437 * Handle interrupts which are nested into a threaded interrupt
438 * handler. The handler function is called inside the calling
439 * threads context.
440 */
441 void handle_nested_irq(unsigned int irq)
442 {
443 struct irq_desc *desc = irq_to_desc(irq);
444 struct irqaction *action;
445 irqreturn_t action_ret;
446
447 might_sleep();
448
449 raw_spin_lock_irq(&desc->lock);
450
451 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
452
453 action = desc->action;
454 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
455 desc->istate |= IRQS_PENDING;
456 goto out_unlock;
457 }
458
459 kstat_incr_irqs_this_cpu(desc);
460 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
461 raw_spin_unlock_irq(&desc->lock);
462
463 action_ret = IRQ_NONE;
464 for_each_action_of_desc(desc, action)
465 action_ret |= action->thread_fn(action->irq, action->dev_id);
466
467 if (!noirqdebug)
468 note_interrupt(desc, action_ret);
469
470 raw_spin_lock_irq(&desc->lock);
471 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
472
473 out_unlock:
474 raw_spin_unlock_irq(&desc->lock);
475 }
476 EXPORT_SYMBOL_GPL(handle_nested_irq);
477
478 static bool irq_check_poll(struct irq_desc *desc)
479 {
480 if (!(desc->istate & IRQS_POLL_INPROGRESS))
481 return false;
482 return irq_wait_for_poll(desc);
483 }
484
485 static bool irq_may_run(struct irq_desc *desc)
486 {
487 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
488
489 /*
490 * If the interrupt is not in progress and is not an armed
491 * wakeup interrupt, proceed.
492 */
493 if (!irqd_has_set(&desc->irq_data, mask))
494 return true;
495
496 /*
497 * If the interrupt is an armed wakeup source, mark it pending
498 * and suspended, disable it and notify the pm core about the
499 * event.
500 */
501 if (irq_pm_check_wakeup(desc))
502 return false;
503
504 /*
505 * Handle a potential concurrent poll on a different core.
506 */
507 return irq_check_poll(desc);
508 }
509
510 /**
511 * handle_simple_irq - Simple and software-decoded IRQs.
512 * @desc: the interrupt description structure for this irq
513 *
514 * Simple interrupts are either sent from a demultiplexing interrupt
515 * handler or come from hardware, where no interrupt hardware control
516 * is necessary.
517 *
518 * Note: The caller is expected to handle the ack, clear, mask and
519 * unmask issues if necessary.
520 */
521 void handle_simple_irq(struct irq_desc *desc)
522 {
523 raw_spin_lock(&desc->lock);
524
525 if (!irq_may_run(desc))
526 goto out_unlock;
527
528 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
529
530 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
531 desc->istate |= IRQS_PENDING;
532 goto out_unlock;
533 }
534
535 kstat_incr_irqs_this_cpu(desc);
536 handle_irq_event(desc);
537
538 out_unlock:
539 raw_spin_unlock(&desc->lock);
540 }
541 EXPORT_SYMBOL_GPL(handle_simple_irq);
542
543 /**
544 * handle_untracked_irq - Simple and software-decoded IRQs.
545 * @desc: the interrupt description structure for this irq
546 *
547 * Untracked interrupts are sent from a demultiplexing interrupt
548 * handler when the demultiplexer does not know which device it its
549 * multiplexed irq domain generated the interrupt. IRQ's handled
550 * through here are not subjected to stats tracking, randomness, or
551 * spurious interrupt detection.
552 *
553 * Note: Like handle_simple_irq, the caller is expected to handle
554 * the ack, clear, mask and unmask issues if necessary.
555 */
556 void handle_untracked_irq(struct irq_desc *desc)
557 {
558 unsigned int flags = 0;
559
560 raw_spin_lock(&desc->lock);
561
562 if (!irq_may_run(desc))
563 goto out_unlock;
564
565 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
566
567 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
568 desc->istate |= IRQS_PENDING;
569 goto out_unlock;
570 }
571
572 desc->istate &= ~IRQS_PENDING;
573 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
574 raw_spin_unlock(&desc->lock);
575
576 __handle_irq_event_percpu(desc, &flags);
577
578 raw_spin_lock(&desc->lock);
579 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
580
581 out_unlock:
582 raw_spin_unlock(&desc->lock);
583 }
584 EXPORT_SYMBOL_GPL(handle_untracked_irq);
585
586 /*
587 * Called unconditionally from handle_level_irq() and only for oneshot
588 * interrupts from handle_fasteoi_irq()
589 */
590 static void cond_unmask_irq(struct irq_desc *desc)
591 {
592 /*
593 * We need to unmask in the following cases:
594 * - Standard level irq (IRQF_ONESHOT is not set)
595 * - Oneshot irq which did not wake the thread (caused by a
596 * spurious interrupt or a primary handler handling it
597 * completely).
598 */
599 if (!irqd_irq_disabled(&desc->irq_data) &&
600 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
601 unmask_irq(desc);
602 }
603
604 /**
605 * handle_level_irq - Level type irq handler
606 * @desc: the interrupt description structure for this irq
607 *
608 * Level type interrupts are active as long as the hardware line has
609 * the active level. This may require to mask the interrupt and unmask
610 * it after the associated handler has acknowledged the device, so the
611 * interrupt line is back to inactive.
612 */
613 void handle_level_irq(struct irq_desc *desc)
614 {
615 raw_spin_lock(&desc->lock);
616 mask_ack_irq(desc);
617
618 if (!irq_may_run(desc))
619 goto out_unlock;
620
621 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
622
623 /*
624 * If its disabled or no action available
625 * keep it masked and get out of here
626 */
627 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
628 desc->istate |= IRQS_PENDING;
629 goto out_unlock;
630 }
631
632 kstat_incr_irqs_this_cpu(desc);
633 handle_irq_event(desc);
634
635 cond_unmask_irq(desc);
636
637 out_unlock:
638 raw_spin_unlock(&desc->lock);
639 }
640 EXPORT_SYMBOL_GPL(handle_level_irq);
641
642 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
643 static inline void preflow_handler(struct irq_desc *desc)
644 {
645 if (desc->preflow_handler)
646 desc->preflow_handler(&desc->irq_data);
647 }
648 #else
649 static inline void preflow_handler(struct irq_desc *desc) { }
650 #endif
651
652 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
653 {
654 if (!(desc->istate & IRQS_ONESHOT)) {
655 chip->irq_eoi(&desc->irq_data);
656 return;
657 }
658 /*
659 * We need to unmask in the following cases:
660 * - Oneshot irq which did not wake the thread (caused by a
661 * spurious interrupt or a primary handler handling it
662 * completely).
663 */
664 if (!irqd_irq_disabled(&desc->irq_data) &&
665 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
666 chip->irq_eoi(&desc->irq_data);
667 unmask_irq(desc);
668 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
669 chip->irq_eoi(&desc->irq_data);
670 }
671 }
672
673 /**
674 * handle_fasteoi_irq - irq handler for transparent controllers
675 * @desc: the interrupt description structure for this irq
676 *
677 * Only a single callback will be issued to the chip: an ->eoi()
678 * call when the interrupt has been serviced. This enables support
679 * for modern forms of interrupt handlers, which handle the flow
680 * details in hardware, transparently.
681 */
682 void handle_fasteoi_irq(struct irq_desc *desc)
683 {
684 struct irq_chip *chip = desc->irq_data.chip;
685
686 raw_spin_lock(&desc->lock);
687
688 if (!irq_may_run(desc))
689 goto out;
690
691 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
692
693 /*
694 * If its disabled or no action available
695 * then mask it and get out of here:
696 */
697 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
698 desc->istate |= IRQS_PENDING;
699 mask_irq(desc);
700 goto out;
701 }
702
703 kstat_incr_irqs_this_cpu(desc);
704 if (desc->istate & IRQS_ONESHOT)
705 mask_irq(desc);
706
707 preflow_handler(desc);
708 handle_irq_event(desc);
709
710 cond_unmask_eoi_irq(desc, chip);
711
712 raw_spin_unlock(&desc->lock);
713 return;
714 out:
715 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
716 chip->irq_eoi(&desc->irq_data);
717 raw_spin_unlock(&desc->lock);
718 }
719 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
720
721 /**
722 * handle_edge_irq - edge type IRQ handler
723 * @desc: the interrupt description structure for this irq
724 *
725 * Interrupt occures on the falling and/or rising edge of a hardware
726 * signal. The occurrence is latched into the irq controller hardware
727 * and must be acked in order to be reenabled. After the ack another
728 * interrupt can happen on the same source even before the first one
729 * is handled by the associated event handler. If this happens it
730 * might be necessary to disable (mask) the interrupt depending on the
731 * controller hardware. This requires to reenable the interrupt inside
732 * of the loop which handles the interrupts which have arrived while
733 * the handler was running. If all pending interrupts are handled, the
734 * loop is left.
735 */
736 void handle_edge_irq(struct irq_desc *desc)
737 {
738 raw_spin_lock(&desc->lock);
739
740 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
741
742 if (!irq_may_run(desc)) {
743 desc->istate |= IRQS_PENDING;
744 mask_ack_irq(desc);
745 goto out_unlock;
746 }
747
748 /*
749 * If its disabled or no action available then mask it and get
750 * out of here.
751 */
752 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
753 desc->istate |= IRQS_PENDING;
754 mask_ack_irq(desc);
755 goto out_unlock;
756 }
757
758 kstat_incr_irqs_this_cpu(desc);
759
760 /* Start handling the irq */
761 desc->irq_data.chip->irq_ack(&desc->irq_data);
762
763 do {
764 if (unlikely(!desc->action)) {
765 mask_irq(desc);
766 goto out_unlock;
767 }
768
769 /*
770 * When another irq arrived while we were handling
771 * one, we could have masked the irq.
772 * Renable it, if it was not disabled in meantime.
773 */
774 if (unlikely(desc->istate & IRQS_PENDING)) {
775 if (!irqd_irq_disabled(&desc->irq_data) &&
776 irqd_irq_masked(&desc->irq_data))
777 unmask_irq(desc);
778 }
779
780 handle_irq_event(desc);
781
782 } while ((desc->istate & IRQS_PENDING) &&
783 !irqd_irq_disabled(&desc->irq_data));
784
785 out_unlock:
786 raw_spin_unlock(&desc->lock);
787 }
788 EXPORT_SYMBOL(handle_edge_irq);
789
790 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
791 /**
792 * handle_edge_eoi_irq - edge eoi type IRQ handler
793 * @desc: the interrupt description structure for this irq
794 *
795 * Similar as the above handle_edge_irq, but using eoi and w/o the
796 * mask/unmask logic.
797 */
798 void handle_edge_eoi_irq(struct irq_desc *desc)
799 {
800 struct irq_chip *chip = irq_desc_get_chip(desc);
801
802 raw_spin_lock(&desc->lock);
803
804 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
805
806 if (!irq_may_run(desc)) {
807 desc->istate |= IRQS_PENDING;
808 goto out_eoi;
809 }
810
811 /*
812 * If its disabled or no action available then mask it and get
813 * out of here.
814 */
815 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
816 desc->istate |= IRQS_PENDING;
817 goto out_eoi;
818 }
819
820 kstat_incr_irqs_this_cpu(desc);
821
822 do {
823 if (unlikely(!desc->action))
824 goto out_eoi;
825
826 handle_irq_event(desc);
827
828 } while ((desc->istate & IRQS_PENDING) &&
829 !irqd_irq_disabled(&desc->irq_data));
830
831 out_eoi:
832 chip->irq_eoi(&desc->irq_data);
833 raw_spin_unlock(&desc->lock);
834 }
835 #endif
836
837 /**
838 * handle_percpu_irq - Per CPU local irq handler
839 * @desc: the interrupt description structure for this irq
840 *
841 * Per CPU interrupts on SMP machines without locking requirements
842 */
843 void handle_percpu_irq(struct irq_desc *desc)
844 {
845 struct irq_chip *chip = irq_desc_get_chip(desc);
846
847 kstat_incr_irqs_this_cpu(desc);
848
849 if (chip->irq_ack)
850 chip->irq_ack(&desc->irq_data);
851
852 handle_irq_event_percpu(desc);
853
854 if (chip->irq_eoi)
855 chip->irq_eoi(&desc->irq_data);
856 }
857
858 /**
859 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
860 * @desc: the interrupt description structure for this irq
861 *
862 * Per CPU interrupts on SMP machines without locking requirements. Same as
863 * handle_percpu_irq() above but with the following extras:
864 *
865 * action->percpu_dev_id is a pointer to percpu variables which
866 * contain the real device id for the cpu on which this handler is
867 * called
868 */
869 void handle_percpu_devid_irq(struct irq_desc *desc)
870 {
871 struct irq_chip *chip = irq_desc_get_chip(desc);
872 struct irqaction *action = desc->action;
873 unsigned int irq = irq_desc_get_irq(desc);
874 irqreturn_t res;
875
876 kstat_incr_irqs_this_cpu(desc);
877
878 if (chip->irq_ack)
879 chip->irq_ack(&desc->irq_data);
880
881 if (likely(action)) {
882 trace_irq_handler_entry(irq, action);
883 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
884 trace_irq_handler_exit(irq, action, res);
885 } else {
886 unsigned int cpu = smp_processor_id();
887 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
888
889 if (enabled)
890 irq_percpu_disable(desc, cpu);
891
892 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
893 enabled ? " and unmasked" : "", irq, cpu);
894 }
895
896 if (chip->irq_eoi)
897 chip->irq_eoi(&desc->irq_data);
898 }
899
900 static void
901 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
902 int is_chained, const char *name)
903 {
904 if (!handle) {
905 handle = handle_bad_irq;
906 } else {
907 struct irq_data *irq_data = &desc->irq_data;
908 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
909 /*
910 * With hierarchical domains we might run into a
911 * situation where the outermost chip is not yet set
912 * up, but the inner chips are there. Instead of
913 * bailing we install the handler, but obviously we
914 * cannot enable/startup the interrupt at this point.
915 */
916 while (irq_data) {
917 if (irq_data->chip != &no_irq_chip)
918 break;
919 /*
920 * Bail out if the outer chip is not set up
921 * and the interrrupt supposed to be started
922 * right away.
923 */
924 if (WARN_ON(is_chained))
925 return;
926 /* Try the parent */
927 irq_data = irq_data->parent_data;
928 }
929 #endif
930 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
931 return;
932 }
933
934 /* Uninstall? */
935 if (handle == handle_bad_irq) {
936 if (desc->irq_data.chip != &no_irq_chip)
937 mask_ack_irq(desc);
938 irq_state_set_disabled(desc);
939 if (is_chained)
940 desc->action = NULL;
941 desc->depth = 1;
942 }
943 desc->handle_irq = handle;
944 desc->name = name;
945
946 if (handle != handle_bad_irq && is_chained) {
947 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
948
949 /*
950 * We're about to start this interrupt immediately,
951 * hence the need to set the trigger configuration.
952 * But the .set_type callback may have overridden the
953 * flow handler, ignoring that we're dealing with a
954 * chained interrupt. Reset it immediately because we
955 * do know better.
956 */
957 if (type != IRQ_TYPE_NONE) {
958 __irq_set_trigger(desc, type);
959 desc->handle_irq = handle;
960 }
961
962 irq_settings_set_noprobe(desc);
963 irq_settings_set_norequest(desc);
964 irq_settings_set_nothread(desc);
965 desc->action = &chained_action;
966 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
967 }
968 }
969
970 void
971 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
972 const char *name)
973 {
974 unsigned long flags;
975 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
976
977 if (!desc)
978 return;
979
980 __irq_do_set_handler(desc, handle, is_chained, name);
981 irq_put_desc_busunlock(desc, flags);
982 }
983 EXPORT_SYMBOL_GPL(__irq_set_handler);
984
985 void
986 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
987 void *data)
988 {
989 unsigned long flags;
990 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
991
992 if (!desc)
993 return;
994
995 desc->irq_common_data.handler_data = data;
996 __irq_do_set_handler(desc, handle, 1, NULL);
997
998 irq_put_desc_busunlock(desc, flags);
999 }
1000 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1001
1002 void
1003 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1004 irq_flow_handler_t handle, const char *name)
1005 {
1006 irq_set_chip(irq, chip);
1007 __irq_set_handler(irq, handle, 0, name);
1008 }
1009 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1010
1011 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1012 {
1013 unsigned long flags;
1014 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1015
1016 if (!desc)
1017 return;
1018
1019 /*
1020 * Warn when a driver sets the no autoenable flag on an already
1021 * active interrupt.
1022 */
1023 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1024
1025 irq_settings_clr_and_set(desc, clr, set);
1026
1027 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1028 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1029 if (irq_settings_has_no_balance_set(desc))
1030 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1031 if (irq_settings_is_per_cpu(desc))
1032 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1033 if (irq_settings_can_move_pcntxt(desc))
1034 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1035 if (irq_settings_is_level(desc))
1036 irqd_set(&desc->irq_data, IRQD_LEVEL);
1037
1038 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
1039
1040 irq_put_desc_unlock(desc, flags);
1041 }
1042 EXPORT_SYMBOL_GPL(irq_modify_status);
1043
1044 /**
1045 * irq_cpu_online - Invoke all irq_cpu_online functions.
1046 *
1047 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1048 * for each.
1049 */
1050 void irq_cpu_online(void)
1051 {
1052 struct irq_desc *desc;
1053 struct irq_chip *chip;
1054 unsigned long flags;
1055 unsigned int irq;
1056
1057 for_each_active_irq(irq) {
1058 desc = irq_to_desc(irq);
1059 if (!desc)
1060 continue;
1061
1062 raw_spin_lock_irqsave(&desc->lock, flags);
1063
1064 chip = irq_data_get_irq_chip(&desc->irq_data);
1065 if (chip && chip->irq_cpu_online &&
1066 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1067 !irqd_irq_disabled(&desc->irq_data)))
1068 chip->irq_cpu_online(&desc->irq_data);
1069
1070 raw_spin_unlock_irqrestore(&desc->lock, flags);
1071 }
1072 }
1073
1074 /**
1075 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1076 *
1077 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1078 * for each.
1079 */
1080 void irq_cpu_offline(void)
1081 {
1082 struct irq_desc *desc;
1083 struct irq_chip *chip;
1084 unsigned long flags;
1085 unsigned int irq;
1086
1087 for_each_active_irq(irq) {
1088 desc = irq_to_desc(irq);
1089 if (!desc)
1090 continue;
1091
1092 raw_spin_lock_irqsave(&desc->lock, flags);
1093
1094 chip = irq_data_get_irq_chip(&desc->irq_data);
1095 if (chip && chip->irq_cpu_offline &&
1096 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1097 !irqd_irq_disabled(&desc->irq_data)))
1098 chip->irq_cpu_offline(&desc->irq_data);
1099
1100 raw_spin_unlock_irqrestore(&desc->lock, flags);
1101 }
1102 }
1103
1104 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1105 /**
1106 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1107 * NULL)
1108 * @data: Pointer to interrupt specific data
1109 */
1110 void irq_chip_enable_parent(struct irq_data *data)
1111 {
1112 data = data->parent_data;
1113 if (data->chip->irq_enable)
1114 data->chip->irq_enable(data);
1115 else
1116 data->chip->irq_unmask(data);
1117 }
1118
1119 /**
1120 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1121 * NULL)
1122 * @data: Pointer to interrupt specific data
1123 */
1124 void irq_chip_disable_parent(struct irq_data *data)
1125 {
1126 data = data->parent_data;
1127 if (data->chip->irq_disable)
1128 data->chip->irq_disable(data);
1129 else
1130 data->chip->irq_mask(data);
1131 }
1132
1133 /**
1134 * irq_chip_ack_parent - Acknowledge the parent interrupt
1135 * @data: Pointer to interrupt specific data
1136 */
1137 void irq_chip_ack_parent(struct irq_data *data)
1138 {
1139 data = data->parent_data;
1140 data->chip->irq_ack(data);
1141 }
1142 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1143
1144 /**
1145 * irq_chip_mask_parent - Mask the parent interrupt
1146 * @data: Pointer to interrupt specific data
1147 */
1148 void irq_chip_mask_parent(struct irq_data *data)
1149 {
1150 data = data->parent_data;
1151 data->chip->irq_mask(data);
1152 }
1153 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1154
1155 /**
1156 * irq_chip_unmask_parent - Unmask the parent interrupt
1157 * @data: Pointer to interrupt specific data
1158 */
1159 void irq_chip_unmask_parent(struct irq_data *data)
1160 {
1161 data = data->parent_data;
1162 data->chip->irq_unmask(data);
1163 }
1164 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1165
1166 /**
1167 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1168 * @data: Pointer to interrupt specific data
1169 */
1170 void irq_chip_eoi_parent(struct irq_data *data)
1171 {
1172 data = data->parent_data;
1173 data->chip->irq_eoi(data);
1174 }
1175 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1176
1177 /**
1178 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1179 * @data: Pointer to interrupt specific data
1180 * @dest: The affinity mask to set
1181 * @force: Flag to enforce setting (disable online checks)
1182 *
1183 * Conditinal, as the underlying parent chip might not implement it.
1184 */
1185 int irq_chip_set_affinity_parent(struct irq_data *data,
1186 const struct cpumask *dest, bool force)
1187 {
1188 data = data->parent_data;
1189 if (data->chip->irq_set_affinity)
1190 return data->chip->irq_set_affinity(data, dest, force);
1191
1192 return -ENOSYS;
1193 }
1194
1195 /**
1196 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1197 * @data: Pointer to interrupt specific data
1198 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1199 *
1200 * Conditional, as the underlying parent chip might not implement it.
1201 */
1202 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1203 {
1204 data = data->parent_data;
1205
1206 if (data->chip->irq_set_type)
1207 return data->chip->irq_set_type(data, type);
1208
1209 return -ENOSYS;
1210 }
1211 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1212
1213 /**
1214 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1215 * @data: Pointer to interrupt specific data
1216 *
1217 * Iterate through the domain hierarchy of the interrupt and check
1218 * whether a hw retrigger function exists. If yes, invoke it.
1219 */
1220 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1221 {
1222 for (data = data->parent_data; data; data = data->parent_data)
1223 if (data->chip && data->chip->irq_retrigger)
1224 return data->chip->irq_retrigger(data);
1225
1226 return 0;
1227 }
1228
1229 /**
1230 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1231 * @data: Pointer to interrupt specific data
1232 * @vcpu_info: The vcpu affinity information
1233 */
1234 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1235 {
1236 data = data->parent_data;
1237 if (data->chip->irq_set_vcpu_affinity)
1238 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1239
1240 return -ENOSYS;
1241 }
1242
1243 /**
1244 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1245 * @data: Pointer to interrupt specific data
1246 * @on: Whether to set or reset the wake-up capability of this irq
1247 *
1248 * Conditional, as the underlying parent chip might not implement it.
1249 */
1250 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1251 {
1252 data = data->parent_data;
1253 if (data->chip->irq_set_wake)
1254 return data->chip->irq_set_wake(data, on);
1255
1256 return -ENOSYS;
1257 }
1258 #endif
1259
1260 /**
1261 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1262 * @data: Pointer to interrupt specific data
1263 * @msg: Pointer to the MSI message
1264 *
1265 * For hierarchical domains we find the first chip in the hierarchy
1266 * which implements the irq_compose_msi_msg callback. For non
1267 * hierarchical we use the top level chip.
1268 */
1269 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1270 {
1271 struct irq_data *pos = NULL;
1272
1273 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1274 for (; data; data = data->parent_data)
1275 #endif
1276 if (data->chip && data->chip->irq_compose_msi_msg)
1277 pos = data;
1278 if (!pos)
1279 return -ENOSYS;
1280
1281 pos->chip->irq_compose_msi_msg(pos, msg);
1282
1283 return 0;
1284 }
1285
1286 /**
1287 * irq_chip_pm_get - Enable power for an IRQ chip
1288 * @data: Pointer to interrupt specific data
1289 *
1290 * Enable the power to the IRQ chip referenced by the interrupt data
1291 * structure.
1292 */
1293 int irq_chip_pm_get(struct irq_data *data)
1294 {
1295 int retval;
1296
1297 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1298 retval = pm_runtime_get_sync(data->chip->parent_device);
1299 if (retval < 0) {
1300 pm_runtime_put_noidle(data->chip->parent_device);
1301 return retval;
1302 }
1303 }
1304
1305 return 0;
1306 }
1307
1308 /**
1309 * irq_chip_pm_put - Disable power for an IRQ chip
1310 * @data: Pointer to interrupt specific data
1311 *
1312 * Disable the power to the IRQ chip referenced by the interrupt data
1313 * structure, belongs. Note that power will only be disabled, once this
1314 * function has been called for all IRQs that have called irq_chip_pm_get().
1315 */
1316 int irq_chip_pm_put(struct irq_data *data)
1317 {
1318 int retval = 0;
1319
1320 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1321 retval = pm_runtime_put(data->chip->parent_device);
1322
1323 return (retval < 0) ? retval : 0;
1324 }