]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/irq/chip.c
Merge branches 'acpi-fan', 'acpi-video' and 'acpi-ec'
[mirror_ubuntu-focal-kernel.git] / kernel / irq / chip.c
1 /*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/irqdomain.h>
19
20 #include <trace/events/irq.h>
21
22 #include "internals.h"
23
24 /**
25 * irq_set_chip - set the irq chip for an irq
26 * @irq: irq number
27 * @chip: pointer to irq chip description structure
28 */
29 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
30 {
31 unsigned long flags;
32 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
33
34 if (!desc)
35 return -EINVAL;
36
37 if (!chip)
38 chip = &no_irq_chip;
39
40 desc->irq_data.chip = chip;
41 irq_put_desc_unlock(desc, flags);
42 /*
43 * For !CONFIG_SPARSE_IRQ make the irq show up in
44 * allocated_irqs.
45 */
46 irq_mark_irq(irq);
47 return 0;
48 }
49 EXPORT_SYMBOL(irq_set_chip);
50
51 /**
52 * irq_set_type - set the irq trigger type for an irq
53 * @irq: irq number
54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55 */
56 int irq_set_irq_type(unsigned int irq, unsigned int type)
57 {
58 unsigned long flags;
59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60 int ret = 0;
61
62 if (!desc)
63 return -EINVAL;
64
65 type &= IRQ_TYPE_SENSE_MASK;
66 ret = __irq_set_trigger(desc, irq, type);
67 irq_put_desc_busunlock(desc, flags);
68 return ret;
69 }
70 EXPORT_SYMBOL(irq_set_irq_type);
71
72 /**
73 * irq_set_handler_data - set irq handler data for an irq
74 * @irq: Interrupt number
75 * @data: Pointer to interrupt specific data
76 *
77 * Set the hardware irq controller data for an irq
78 */
79 int irq_set_handler_data(unsigned int irq, void *data)
80 {
81 unsigned long flags;
82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83
84 if (!desc)
85 return -EINVAL;
86 desc->irq_data.handler_data = data;
87 irq_put_desc_unlock(desc, flags);
88 return 0;
89 }
90 EXPORT_SYMBOL(irq_set_handler_data);
91
92 /**
93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94 * @irq_base: Interrupt number base
95 * @irq_offset: Interrupt number offset
96 * @entry: Pointer to MSI descriptor data
97 *
98 * Set the MSI descriptor entry for an irq at offset
99 */
100 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
101 struct msi_desc *entry)
102 {
103 unsigned long flags;
104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
105
106 if (!desc)
107 return -EINVAL;
108 desc->irq_data.msi_desc = entry;
109 if (entry && !irq_offset)
110 entry->irq = irq_base;
111 irq_put_desc_unlock(desc, flags);
112 return 0;
113 }
114
115 /**
116 * irq_set_msi_desc - set MSI descriptor data for an irq
117 * @irq: Interrupt number
118 * @entry: Pointer to MSI descriptor data
119 *
120 * Set the MSI descriptor entry for an irq
121 */
122 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
123 {
124 return irq_set_msi_desc_off(irq, 0, entry);
125 }
126
127 /**
128 * irq_set_chip_data - set irq chip data for an irq
129 * @irq: Interrupt number
130 * @data: Pointer to chip specific data
131 *
132 * Set the hardware irq chip data for an irq
133 */
134 int irq_set_chip_data(unsigned int irq, void *data)
135 {
136 unsigned long flags;
137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
138
139 if (!desc)
140 return -EINVAL;
141 desc->irq_data.chip_data = data;
142 irq_put_desc_unlock(desc, flags);
143 return 0;
144 }
145 EXPORT_SYMBOL(irq_set_chip_data);
146
147 struct irq_data *irq_get_irq_data(unsigned int irq)
148 {
149 struct irq_desc *desc = irq_to_desc(irq);
150
151 return desc ? &desc->irq_data : NULL;
152 }
153 EXPORT_SYMBOL_GPL(irq_get_irq_data);
154
155 static void irq_state_clr_disabled(struct irq_desc *desc)
156 {
157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
158 }
159
160 static void irq_state_set_disabled(struct irq_desc *desc)
161 {
162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
163 }
164
165 static void irq_state_clr_masked(struct irq_desc *desc)
166 {
167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
168 }
169
170 static void irq_state_set_masked(struct irq_desc *desc)
171 {
172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
173 }
174
175 int irq_startup(struct irq_desc *desc, bool resend)
176 {
177 int ret = 0;
178
179 irq_state_clr_disabled(desc);
180 desc->depth = 0;
181
182 irq_domain_activate_irq(&desc->irq_data);
183 if (desc->irq_data.chip->irq_startup) {
184 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
185 irq_state_clr_masked(desc);
186 } else {
187 irq_enable(desc);
188 }
189 if (resend)
190 check_irq_resend(desc, desc->irq_data.irq);
191 return ret;
192 }
193
194 void irq_shutdown(struct irq_desc *desc)
195 {
196 irq_state_set_disabled(desc);
197 desc->depth = 1;
198 if (desc->irq_data.chip->irq_shutdown)
199 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
200 else if (desc->irq_data.chip->irq_disable)
201 desc->irq_data.chip->irq_disable(&desc->irq_data);
202 else
203 desc->irq_data.chip->irq_mask(&desc->irq_data);
204 irq_domain_deactivate_irq(&desc->irq_data);
205 irq_state_set_masked(desc);
206 }
207
208 void irq_enable(struct irq_desc *desc)
209 {
210 irq_state_clr_disabled(desc);
211 if (desc->irq_data.chip->irq_enable)
212 desc->irq_data.chip->irq_enable(&desc->irq_data);
213 else
214 desc->irq_data.chip->irq_unmask(&desc->irq_data);
215 irq_state_clr_masked(desc);
216 }
217
218 /**
219 * irq_disable - Mark interrupt disabled
220 * @desc: irq descriptor which should be disabled
221 *
222 * If the chip does not implement the irq_disable callback, we
223 * use a lazy disable approach. That means we mark the interrupt
224 * disabled, but leave the hardware unmasked. That's an
225 * optimization because we avoid the hardware access for the
226 * common case where no interrupt happens after we marked it
227 * disabled. If an interrupt happens, then the interrupt flow
228 * handler masks the line at the hardware level and marks it
229 * pending.
230 */
231 void irq_disable(struct irq_desc *desc)
232 {
233 irq_state_set_disabled(desc);
234 if (desc->irq_data.chip->irq_disable) {
235 desc->irq_data.chip->irq_disable(&desc->irq_data);
236 irq_state_set_masked(desc);
237 }
238 }
239
240 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
241 {
242 if (desc->irq_data.chip->irq_enable)
243 desc->irq_data.chip->irq_enable(&desc->irq_data);
244 else
245 desc->irq_data.chip->irq_unmask(&desc->irq_data);
246 cpumask_set_cpu(cpu, desc->percpu_enabled);
247 }
248
249 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
250 {
251 if (desc->irq_data.chip->irq_disable)
252 desc->irq_data.chip->irq_disable(&desc->irq_data);
253 else
254 desc->irq_data.chip->irq_mask(&desc->irq_data);
255 cpumask_clear_cpu(cpu, desc->percpu_enabled);
256 }
257
258 static inline void mask_ack_irq(struct irq_desc *desc)
259 {
260 if (desc->irq_data.chip->irq_mask_ack)
261 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
262 else {
263 desc->irq_data.chip->irq_mask(&desc->irq_data);
264 if (desc->irq_data.chip->irq_ack)
265 desc->irq_data.chip->irq_ack(&desc->irq_data);
266 }
267 irq_state_set_masked(desc);
268 }
269
270 void mask_irq(struct irq_desc *desc)
271 {
272 if (desc->irq_data.chip->irq_mask) {
273 desc->irq_data.chip->irq_mask(&desc->irq_data);
274 irq_state_set_masked(desc);
275 }
276 }
277
278 void unmask_irq(struct irq_desc *desc)
279 {
280 if (desc->irq_data.chip->irq_unmask) {
281 desc->irq_data.chip->irq_unmask(&desc->irq_data);
282 irq_state_clr_masked(desc);
283 }
284 }
285
286 void unmask_threaded_irq(struct irq_desc *desc)
287 {
288 struct irq_chip *chip = desc->irq_data.chip;
289
290 if (chip->flags & IRQCHIP_EOI_THREADED)
291 chip->irq_eoi(&desc->irq_data);
292
293 if (chip->irq_unmask) {
294 chip->irq_unmask(&desc->irq_data);
295 irq_state_clr_masked(desc);
296 }
297 }
298
299 /*
300 * handle_nested_irq - Handle a nested irq from a irq thread
301 * @irq: the interrupt number
302 *
303 * Handle interrupts which are nested into a threaded interrupt
304 * handler. The handler function is called inside the calling
305 * threads context.
306 */
307 void handle_nested_irq(unsigned int irq)
308 {
309 struct irq_desc *desc = irq_to_desc(irq);
310 struct irqaction *action;
311 irqreturn_t action_ret;
312
313 might_sleep();
314
315 raw_spin_lock_irq(&desc->lock);
316
317 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
318 kstat_incr_irqs_this_cpu(irq, desc);
319
320 action = desc->action;
321 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
322 desc->istate |= IRQS_PENDING;
323 goto out_unlock;
324 }
325
326 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
327 raw_spin_unlock_irq(&desc->lock);
328
329 action_ret = action->thread_fn(action->irq, action->dev_id);
330 if (!noirqdebug)
331 note_interrupt(irq, desc, action_ret);
332
333 raw_spin_lock_irq(&desc->lock);
334 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
335
336 out_unlock:
337 raw_spin_unlock_irq(&desc->lock);
338 }
339 EXPORT_SYMBOL_GPL(handle_nested_irq);
340
341 static bool irq_check_poll(struct irq_desc *desc)
342 {
343 if (!(desc->istate & IRQS_POLL_INPROGRESS))
344 return false;
345 return irq_wait_for_poll(desc);
346 }
347
348 static bool irq_may_run(struct irq_desc *desc)
349 {
350 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
351
352 /*
353 * If the interrupt is not in progress and is not an armed
354 * wakeup interrupt, proceed.
355 */
356 if (!irqd_has_set(&desc->irq_data, mask))
357 return true;
358
359 /*
360 * If the interrupt is an armed wakeup source, mark it pending
361 * and suspended, disable it and notify the pm core about the
362 * event.
363 */
364 if (irq_pm_check_wakeup(desc))
365 return false;
366
367 /*
368 * Handle a potential concurrent poll on a different core.
369 */
370 return irq_check_poll(desc);
371 }
372
373 /**
374 * handle_simple_irq - Simple and software-decoded IRQs.
375 * @irq: the interrupt number
376 * @desc: the interrupt description structure for this irq
377 *
378 * Simple interrupts are either sent from a demultiplexing interrupt
379 * handler or come from hardware, where no interrupt hardware control
380 * is necessary.
381 *
382 * Note: The caller is expected to handle the ack, clear, mask and
383 * unmask issues if necessary.
384 */
385 void
386 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
387 {
388 raw_spin_lock(&desc->lock);
389
390 if (!irq_may_run(desc))
391 goto out_unlock;
392
393 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
394 kstat_incr_irqs_this_cpu(irq, desc);
395
396 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
397 desc->istate |= IRQS_PENDING;
398 goto out_unlock;
399 }
400
401 handle_irq_event(desc);
402
403 out_unlock:
404 raw_spin_unlock(&desc->lock);
405 }
406 EXPORT_SYMBOL_GPL(handle_simple_irq);
407
408 /*
409 * Called unconditionally from handle_level_irq() and only for oneshot
410 * interrupts from handle_fasteoi_irq()
411 */
412 static void cond_unmask_irq(struct irq_desc *desc)
413 {
414 /*
415 * We need to unmask in the following cases:
416 * - Standard level irq (IRQF_ONESHOT is not set)
417 * - Oneshot irq which did not wake the thread (caused by a
418 * spurious interrupt or a primary handler handling it
419 * completely).
420 */
421 if (!irqd_irq_disabled(&desc->irq_data) &&
422 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
423 unmask_irq(desc);
424 }
425
426 /**
427 * handle_level_irq - Level type irq handler
428 * @irq: the interrupt number
429 * @desc: the interrupt description structure for this irq
430 *
431 * Level type interrupts are active as long as the hardware line has
432 * the active level. This may require to mask the interrupt and unmask
433 * it after the associated handler has acknowledged the device, so the
434 * interrupt line is back to inactive.
435 */
436 void
437 handle_level_irq(unsigned int irq, struct irq_desc *desc)
438 {
439 raw_spin_lock(&desc->lock);
440 mask_ack_irq(desc);
441
442 if (!irq_may_run(desc))
443 goto out_unlock;
444
445 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
446 kstat_incr_irqs_this_cpu(irq, desc);
447
448 /*
449 * If its disabled or no action available
450 * keep it masked and get out of here
451 */
452 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
453 desc->istate |= IRQS_PENDING;
454 goto out_unlock;
455 }
456
457 handle_irq_event(desc);
458
459 cond_unmask_irq(desc);
460
461 out_unlock:
462 raw_spin_unlock(&desc->lock);
463 }
464 EXPORT_SYMBOL_GPL(handle_level_irq);
465
466 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
467 static inline void preflow_handler(struct irq_desc *desc)
468 {
469 if (desc->preflow_handler)
470 desc->preflow_handler(&desc->irq_data);
471 }
472 #else
473 static inline void preflow_handler(struct irq_desc *desc) { }
474 #endif
475
476 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
477 {
478 if (!(desc->istate & IRQS_ONESHOT)) {
479 chip->irq_eoi(&desc->irq_data);
480 return;
481 }
482 /*
483 * We need to unmask in the following cases:
484 * - Oneshot irq which did not wake the thread (caused by a
485 * spurious interrupt or a primary handler handling it
486 * completely).
487 */
488 if (!irqd_irq_disabled(&desc->irq_data) &&
489 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
490 chip->irq_eoi(&desc->irq_data);
491 unmask_irq(desc);
492 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
493 chip->irq_eoi(&desc->irq_data);
494 }
495 }
496
497 /**
498 * handle_fasteoi_irq - irq handler for transparent controllers
499 * @irq: the interrupt number
500 * @desc: the interrupt description structure for this irq
501 *
502 * Only a single callback will be issued to the chip: an ->eoi()
503 * call when the interrupt has been serviced. This enables support
504 * for modern forms of interrupt handlers, which handle the flow
505 * details in hardware, transparently.
506 */
507 void
508 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
509 {
510 struct irq_chip *chip = desc->irq_data.chip;
511
512 raw_spin_lock(&desc->lock);
513
514 if (!irq_may_run(desc))
515 goto out;
516
517 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
518 kstat_incr_irqs_this_cpu(irq, desc);
519
520 /*
521 * If its disabled or no action available
522 * then mask it and get out of here:
523 */
524 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
525 desc->istate |= IRQS_PENDING;
526 mask_irq(desc);
527 goto out;
528 }
529
530 if (desc->istate & IRQS_ONESHOT)
531 mask_irq(desc);
532
533 preflow_handler(desc);
534 handle_irq_event(desc);
535
536 cond_unmask_eoi_irq(desc, chip);
537
538 raw_spin_unlock(&desc->lock);
539 return;
540 out:
541 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
542 chip->irq_eoi(&desc->irq_data);
543 raw_spin_unlock(&desc->lock);
544 }
545 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
546
547 /**
548 * handle_edge_irq - edge type IRQ handler
549 * @irq: the interrupt number
550 * @desc: the interrupt description structure for this irq
551 *
552 * Interrupt occures on the falling and/or rising edge of a hardware
553 * signal. The occurrence is latched into the irq controller hardware
554 * and must be acked in order to be reenabled. After the ack another
555 * interrupt can happen on the same source even before the first one
556 * is handled by the associated event handler. If this happens it
557 * might be necessary to disable (mask) the interrupt depending on the
558 * controller hardware. This requires to reenable the interrupt inside
559 * of the loop which handles the interrupts which have arrived while
560 * the handler was running. If all pending interrupts are handled, the
561 * loop is left.
562 */
563 void
564 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
565 {
566 raw_spin_lock(&desc->lock);
567
568 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
569
570 if (!irq_may_run(desc)) {
571 desc->istate |= IRQS_PENDING;
572 mask_ack_irq(desc);
573 goto out_unlock;
574 }
575
576 /*
577 * If its disabled or no action available then mask it and get
578 * out of here.
579 */
580 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
581 desc->istate |= IRQS_PENDING;
582 mask_ack_irq(desc);
583 goto out_unlock;
584 }
585
586 kstat_incr_irqs_this_cpu(irq, desc);
587
588 /* Start handling the irq */
589 desc->irq_data.chip->irq_ack(&desc->irq_data);
590
591 do {
592 if (unlikely(!desc->action)) {
593 mask_irq(desc);
594 goto out_unlock;
595 }
596
597 /*
598 * When another irq arrived while we were handling
599 * one, we could have masked the irq.
600 * Renable it, if it was not disabled in meantime.
601 */
602 if (unlikely(desc->istate & IRQS_PENDING)) {
603 if (!irqd_irq_disabled(&desc->irq_data) &&
604 irqd_irq_masked(&desc->irq_data))
605 unmask_irq(desc);
606 }
607
608 handle_irq_event(desc);
609
610 } while ((desc->istate & IRQS_PENDING) &&
611 !irqd_irq_disabled(&desc->irq_data));
612
613 out_unlock:
614 raw_spin_unlock(&desc->lock);
615 }
616 EXPORT_SYMBOL(handle_edge_irq);
617
618 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
619 /**
620 * handle_edge_eoi_irq - edge eoi type IRQ handler
621 * @irq: the interrupt number
622 * @desc: the interrupt description structure for this irq
623 *
624 * Similar as the above handle_edge_irq, but using eoi and w/o the
625 * mask/unmask logic.
626 */
627 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
628 {
629 struct irq_chip *chip = irq_desc_get_chip(desc);
630
631 raw_spin_lock(&desc->lock);
632
633 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
634
635 if (!irq_may_run(desc)) {
636 desc->istate |= IRQS_PENDING;
637 goto out_eoi;
638 }
639
640 /*
641 * If its disabled or no action available then mask it and get
642 * out of here.
643 */
644 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
645 desc->istate |= IRQS_PENDING;
646 goto out_eoi;
647 }
648
649 kstat_incr_irqs_this_cpu(irq, desc);
650
651 do {
652 if (unlikely(!desc->action))
653 goto out_eoi;
654
655 handle_irq_event(desc);
656
657 } while ((desc->istate & IRQS_PENDING) &&
658 !irqd_irq_disabled(&desc->irq_data));
659
660 out_eoi:
661 chip->irq_eoi(&desc->irq_data);
662 raw_spin_unlock(&desc->lock);
663 }
664 #endif
665
666 /**
667 * handle_percpu_irq - Per CPU local irq handler
668 * @irq: the interrupt number
669 * @desc: the interrupt description structure for this irq
670 *
671 * Per CPU interrupts on SMP machines without locking requirements
672 */
673 void
674 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
675 {
676 struct irq_chip *chip = irq_desc_get_chip(desc);
677
678 kstat_incr_irqs_this_cpu(irq, desc);
679
680 if (chip->irq_ack)
681 chip->irq_ack(&desc->irq_data);
682
683 handle_irq_event_percpu(desc, desc->action);
684
685 if (chip->irq_eoi)
686 chip->irq_eoi(&desc->irq_data);
687 }
688
689 /**
690 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
691 * @irq: the interrupt number
692 * @desc: the interrupt description structure for this irq
693 *
694 * Per CPU interrupts on SMP machines without locking requirements. Same as
695 * handle_percpu_irq() above but with the following extras:
696 *
697 * action->percpu_dev_id is a pointer to percpu variables which
698 * contain the real device id for the cpu on which this handler is
699 * called
700 */
701 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
702 {
703 struct irq_chip *chip = irq_desc_get_chip(desc);
704 struct irqaction *action = desc->action;
705 void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
706 irqreturn_t res;
707
708 kstat_incr_irqs_this_cpu(irq, desc);
709
710 if (chip->irq_ack)
711 chip->irq_ack(&desc->irq_data);
712
713 trace_irq_handler_entry(irq, action);
714 res = action->handler(irq, dev_id);
715 trace_irq_handler_exit(irq, action, res);
716
717 if (chip->irq_eoi)
718 chip->irq_eoi(&desc->irq_data);
719 }
720
721 void
722 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
723 const char *name)
724 {
725 unsigned long flags;
726 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
727
728 if (!desc)
729 return;
730
731 if (!handle) {
732 handle = handle_bad_irq;
733 } else {
734 struct irq_data *irq_data = &desc->irq_data;
735 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
736 /*
737 * With hierarchical domains we might run into a
738 * situation where the outermost chip is not yet set
739 * up, but the inner chips are there. Instead of
740 * bailing we install the handler, but obviously we
741 * cannot enable/startup the interrupt at this point.
742 */
743 while (irq_data) {
744 if (irq_data->chip != &no_irq_chip)
745 break;
746 /*
747 * Bail out if the outer chip is not set up
748 * and the interrrupt supposed to be started
749 * right away.
750 */
751 if (WARN_ON(is_chained))
752 goto out;
753 /* Try the parent */
754 irq_data = irq_data->parent_data;
755 }
756 #endif
757 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
758 goto out;
759 }
760
761 /* Uninstall? */
762 if (handle == handle_bad_irq) {
763 if (desc->irq_data.chip != &no_irq_chip)
764 mask_ack_irq(desc);
765 irq_state_set_disabled(desc);
766 desc->depth = 1;
767 }
768 desc->handle_irq = handle;
769 desc->name = name;
770
771 if (handle != handle_bad_irq && is_chained) {
772 irq_settings_set_noprobe(desc);
773 irq_settings_set_norequest(desc);
774 irq_settings_set_nothread(desc);
775 irq_startup(desc, true);
776 }
777 out:
778 irq_put_desc_busunlock(desc, flags);
779 }
780 EXPORT_SYMBOL_GPL(__irq_set_handler);
781
782 void
783 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
784 irq_flow_handler_t handle, const char *name)
785 {
786 irq_set_chip(irq, chip);
787 __irq_set_handler(irq, handle, 0, name);
788 }
789 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
790
791 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
792 {
793 unsigned long flags;
794 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
795
796 if (!desc)
797 return;
798 irq_settings_clr_and_set(desc, clr, set);
799
800 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
801 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
802 if (irq_settings_has_no_balance_set(desc))
803 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
804 if (irq_settings_is_per_cpu(desc))
805 irqd_set(&desc->irq_data, IRQD_PER_CPU);
806 if (irq_settings_can_move_pcntxt(desc))
807 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
808 if (irq_settings_is_level(desc))
809 irqd_set(&desc->irq_data, IRQD_LEVEL);
810
811 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
812
813 irq_put_desc_unlock(desc, flags);
814 }
815 EXPORT_SYMBOL_GPL(irq_modify_status);
816
817 /**
818 * irq_cpu_online - Invoke all irq_cpu_online functions.
819 *
820 * Iterate through all irqs and invoke the chip.irq_cpu_online()
821 * for each.
822 */
823 void irq_cpu_online(void)
824 {
825 struct irq_desc *desc;
826 struct irq_chip *chip;
827 unsigned long flags;
828 unsigned int irq;
829
830 for_each_active_irq(irq) {
831 desc = irq_to_desc(irq);
832 if (!desc)
833 continue;
834
835 raw_spin_lock_irqsave(&desc->lock, flags);
836
837 chip = irq_data_get_irq_chip(&desc->irq_data);
838 if (chip && chip->irq_cpu_online &&
839 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
840 !irqd_irq_disabled(&desc->irq_data)))
841 chip->irq_cpu_online(&desc->irq_data);
842
843 raw_spin_unlock_irqrestore(&desc->lock, flags);
844 }
845 }
846
847 /**
848 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
849 *
850 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
851 * for each.
852 */
853 void irq_cpu_offline(void)
854 {
855 struct irq_desc *desc;
856 struct irq_chip *chip;
857 unsigned long flags;
858 unsigned int irq;
859
860 for_each_active_irq(irq) {
861 desc = irq_to_desc(irq);
862 if (!desc)
863 continue;
864
865 raw_spin_lock_irqsave(&desc->lock, flags);
866
867 chip = irq_data_get_irq_chip(&desc->irq_data);
868 if (chip && chip->irq_cpu_offline &&
869 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
870 !irqd_irq_disabled(&desc->irq_data)))
871 chip->irq_cpu_offline(&desc->irq_data);
872
873 raw_spin_unlock_irqrestore(&desc->lock, flags);
874 }
875 }
876
877 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
878 /**
879 * irq_chip_ack_parent - Acknowledge the parent interrupt
880 * @data: Pointer to interrupt specific data
881 */
882 void irq_chip_ack_parent(struct irq_data *data)
883 {
884 data = data->parent_data;
885 data->chip->irq_ack(data);
886 }
887
888 /**
889 * irq_chip_mask_parent - Mask the parent interrupt
890 * @data: Pointer to interrupt specific data
891 */
892 void irq_chip_mask_parent(struct irq_data *data)
893 {
894 data = data->parent_data;
895 data->chip->irq_mask(data);
896 }
897
898 /**
899 * irq_chip_unmask_parent - Unmask the parent interrupt
900 * @data: Pointer to interrupt specific data
901 */
902 void irq_chip_unmask_parent(struct irq_data *data)
903 {
904 data = data->parent_data;
905 data->chip->irq_unmask(data);
906 }
907
908 /**
909 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
910 * @data: Pointer to interrupt specific data
911 */
912 void irq_chip_eoi_parent(struct irq_data *data)
913 {
914 data = data->parent_data;
915 data->chip->irq_eoi(data);
916 }
917
918 /**
919 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
920 * @data: Pointer to interrupt specific data
921 * @dest: The affinity mask to set
922 * @force: Flag to enforce setting (disable online checks)
923 *
924 * Conditinal, as the underlying parent chip might not implement it.
925 */
926 int irq_chip_set_affinity_parent(struct irq_data *data,
927 const struct cpumask *dest, bool force)
928 {
929 data = data->parent_data;
930 if (data->chip->irq_set_affinity)
931 return data->chip->irq_set_affinity(data, dest, force);
932
933 return -ENOSYS;
934 }
935
936 /**
937 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
938 * @data: Pointer to interrupt specific data
939 *
940 * Iterate through the domain hierarchy of the interrupt and check
941 * whether a hw retrigger function exists. If yes, invoke it.
942 */
943 int irq_chip_retrigger_hierarchy(struct irq_data *data)
944 {
945 for (data = data->parent_data; data; data = data->parent_data)
946 if (data->chip && data->chip->irq_retrigger)
947 return data->chip->irq_retrigger(data);
948
949 return -ENOSYS;
950 }
951 #endif
952
953 /**
954 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
955 * @data: Pointer to interrupt specific data
956 * @msg: Pointer to the MSI message
957 *
958 * For hierarchical domains we find the first chip in the hierarchy
959 * which implements the irq_compose_msi_msg callback. For non
960 * hierarchical we use the top level chip.
961 */
962 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
963 {
964 struct irq_data *pos = NULL;
965
966 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
967 for (; data; data = data->parent_data)
968 #endif
969 if (data->chip && data->chip->irq_compose_msi_msg)
970 pos = data;
971 if (!pos)
972 return -ENOSYS;
973
974 pos->chip->irq_compose_msi_msg(pos, msg);
975
976 return 0;
977 }