]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/irq/chip.c
genirq: Add force argument to irq_startup()
[mirror_ubuntu-artful-kernel.git] / kernel / irq / chip.c
CommitLineData
dd87eb3a
TG
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
7fe3730d 14#include <linux/msi.h>
dd87eb3a
TG
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
f8264e34 18#include <linux/irqdomain.h>
dd87eb3a 19
f069686e
SR
20#include <trace/events/irq.h>
21
dd87eb3a
TG
22#include "internals.h"
23
e509bd7d
MW
24static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25{
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 return IRQ_NONE;
28}
29
30/*
31 * Chained handlers should never call action on their IRQ. This default
32 * action will emit warning if such thing happens.
33 */
34struct irqaction chained_action = {
35 .handler = bad_chained_irq,
36};
37
dd87eb3a 38/**
a0cd9ca2 39 * irq_set_chip - set the irq chip for an irq
dd87eb3a
TG
40 * @irq: irq number
41 * @chip: pointer to irq chip description structure
42 */
a0cd9ca2 43int irq_set_chip(unsigned int irq, struct irq_chip *chip)
dd87eb3a 44{
dd87eb3a 45 unsigned long flags;
31d9d9b6 46 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
dd87eb3a 47
02725e74 48 if (!desc)
dd87eb3a 49 return -EINVAL;
dd87eb3a
TG
50
51 if (!chip)
52 chip = &no_irq_chip;
53
6b8ff312 54 desc->irq_data.chip = chip;
02725e74 55 irq_put_desc_unlock(desc, flags);
d72274e5
DD
56 /*
57 * For !CONFIG_SPARSE_IRQ make the irq show up in
f63b6a05 58 * allocated_irqs.
d72274e5 59 */
f63b6a05 60 irq_mark_irq(irq);
dd87eb3a
TG
61 return 0;
62}
a0cd9ca2 63EXPORT_SYMBOL(irq_set_chip);
dd87eb3a
TG
64
65/**
a0cd9ca2 66 * irq_set_type - set the irq trigger type for an irq
dd87eb3a 67 * @irq: irq number
0c5d1eb7 68 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
dd87eb3a 69 */
a0cd9ca2 70int irq_set_irq_type(unsigned int irq, unsigned int type)
dd87eb3a 71{
dd87eb3a 72 unsigned long flags;
31d9d9b6 73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
02725e74 74 int ret = 0;
dd87eb3a 75
02725e74
TG
76 if (!desc)
77 return -EINVAL;
dd87eb3a 78
a1ff541a 79 ret = __irq_set_trigger(desc, type);
02725e74 80 irq_put_desc_busunlock(desc, flags);
dd87eb3a
TG
81 return ret;
82}
a0cd9ca2 83EXPORT_SYMBOL(irq_set_irq_type);
dd87eb3a
TG
84
85/**
a0cd9ca2 86 * irq_set_handler_data - set irq handler data for an irq
dd87eb3a
TG
87 * @irq: Interrupt number
88 * @data: Pointer to interrupt specific data
89 *
90 * Set the hardware irq controller data for an irq
91 */
a0cd9ca2 92int irq_set_handler_data(unsigned int irq, void *data)
dd87eb3a 93{
dd87eb3a 94 unsigned long flags;
31d9d9b6 95 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
dd87eb3a 96
02725e74 97 if (!desc)
dd87eb3a 98 return -EINVAL;
af7080e0 99 desc->irq_common_data.handler_data = data;
02725e74 100 irq_put_desc_unlock(desc, flags);
dd87eb3a
TG
101 return 0;
102}
a0cd9ca2 103EXPORT_SYMBOL(irq_set_handler_data);
dd87eb3a 104
5b912c10 105/**
51906e77
AG
106 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107 * @irq_base: Interrupt number base
108 * @irq_offset: Interrupt number offset
109 * @entry: Pointer to MSI descriptor data
5b912c10 110 *
51906e77 111 * Set the MSI descriptor entry for an irq at offset
5b912c10 112 */
51906e77
AG
113int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 struct msi_desc *entry)
5b912c10 115{
5b912c10 116 unsigned long flags;
51906e77 117 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
5b912c10 118
02725e74 119 if (!desc)
5b912c10 120 return -EINVAL;
b237721c 121 desc->irq_common_data.msi_desc = entry;
51906e77
AG
122 if (entry && !irq_offset)
123 entry->irq = irq_base;
02725e74 124 irq_put_desc_unlock(desc, flags);
5b912c10
EB
125 return 0;
126}
127
51906e77
AG
128/**
129 * irq_set_msi_desc - set MSI descriptor data for an irq
130 * @irq: Interrupt number
131 * @entry: Pointer to MSI descriptor data
132 *
133 * Set the MSI descriptor entry for an irq
134 */
135int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
136{
137 return irq_set_msi_desc_off(irq, 0, entry);
138}
139
dd87eb3a 140/**
a0cd9ca2 141 * irq_set_chip_data - set irq chip data for an irq
dd87eb3a
TG
142 * @irq: Interrupt number
143 * @data: Pointer to chip specific data
144 *
145 * Set the hardware irq chip data for an irq
146 */
a0cd9ca2 147int irq_set_chip_data(unsigned int irq, void *data)
dd87eb3a 148{
dd87eb3a 149 unsigned long flags;
31d9d9b6 150 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
dd87eb3a 151
02725e74 152 if (!desc)
dd87eb3a 153 return -EINVAL;
6b8ff312 154 desc->irq_data.chip_data = data;
02725e74 155 irq_put_desc_unlock(desc, flags);
dd87eb3a
TG
156 return 0;
157}
a0cd9ca2 158EXPORT_SYMBOL(irq_set_chip_data);
dd87eb3a 159
f303a6dd
TG
160struct irq_data *irq_get_irq_data(unsigned int irq)
161{
162 struct irq_desc *desc = irq_to_desc(irq);
163
164 return desc ? &desc->irq_data : NULL;
165}
166EXPORT_SYMBOL_GPL(irq_get_irq_data);
167
c1594b77
TG
168static void irq_state_clr_disabled(struct irq_desc *desc)
169{
801a0e9a 170 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
c1594b77
TG
171}
172
173static void irq_state_set_disabled(struct irq_desc *desc)
174{
801a0e9a 175 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
c1594b77
TG
176}
177
6e40262e
TG
178static void irq_state_clr_masked(struct irq_desc *desc)
179{
32f4125e 180 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
6e40262e
TG
181}
182
183static void irq_state_set_masked(struct irq_desc *desc)
184{
32f4125e 185 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
6e40262e
TG
186}
187
201d7f47
TG
188static void irq_state_clr_started(struct irq_desc *desc)
189{
190 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
191}
192
193static void irq_state_set_started(struct irq_desc *desc)
194{
195 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
196}
197
708d174b
TG
198static int __irq_startup(struct irq_desc *desc)
199{
200 struct irq_data *d = irq_desc_get_irq_data(desc);
201 int ret = 0;
202
203 irq_domain_activate_irq(d);
204 if (d->chip->irq_startup) {
205 ret = d->chip->irq_startup(d);
206 irq_state_clr_disabled(desc);
207 irq_state_clr_masked(desc);
208 } else {
209 irq_enable(desc);
210 }
211 irq_state_set_started(desc);
212 return ret;
213}
214
4cde9c6b 215int irq_startup(struct irq_desc *desc, bool resend, bool force)
46999238 216{
b4bc724e
TG
217 int ret = 0;
218
46999238
TG
219 desc->depth = 0;
220
201d7f47 221 if (irqd_is_started(&desc->irq_data)) {
b4bc724e 222 irq_enable(desc);
201d7f47 223 } else {
708d174b 224 ret = __irq_startup(desc);
2e051552 225 irq_setup_affinity(desc);
3aae994f 226 }
b4bc724e 227 if (resend)
0798abeb 228 check_irq_resend(desc);
201d7f47 229
b4bc724e 230 return ret;
46999238
TG
231}
232
201d7f47
TG
233static void __irq_disable(struct irq_desc *desc, bool mask);
234
46999238
TG
235void irq_shutdown(struct irq_desc *desc)
236{
201d7f47
TG
237 if (irqd_is_started(&desc->irq_data)) {
238 desc->depth = 1;
239 if (desc->irq_data.chip->irq_shutdown) {
240 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
241 irq_state_set_disabled(desc);
242 irq_state_set_masked(desc);
243 } else {
244 __irq_disable(desc, true);
245 }
246 irq_state_clr_started(desc);
247 }
248 /*
249 * This must be called even if the interrupt was never started up,
250 * because the activation can happen before the interrupt is
251 * available for request/startup. It has it's own state tracking so
252 * it's safe to call it unconditionally.
253 */
f8264e34 254 irq_domain_deactivate_irq(&desc->irq_data);
46999238
TG
255}
256
87923470
TG
257void irq_enable(struct irq_desc *desc)
258{
c1594b77 259 irq_state_clr_disabled(desc);
50f7c032
TG
260 if (desc->irq_data.chip->irq_enable)
261 desc->irq_data.chip->irq_enable(&desc->irq_data);
262 else
263 desc->irq_data.chip->irq_unmask(&desc->irq_data);
6e40262e 264 irq_state_clr_masked(desc);
dd87eb3a
TG
265}
266
201d7f47
TG
267static void __irq_disable(struct irq_desc *desc, bool mask)
268{
269 irq_state_set_disabled(desc);
270 if (desc->irq_data.chip->irq_disable) {
271 desc->irq_data.chip->irq_disable(&desc->irq_data);
272 irq_state_set_masked(desc);
273 } else if (mask) {
274 mask_irq(desc);
275 }
276}
277
d671a605 278/**
f788e7bf 279 * irq_disable - Mark interrupt disabled
d671a605
AF
280 * @desc: irq descriptor which should be disabled
281 *
282 * If the chip does not implement the irq_disable callback, we
283 * use a lazy disable approach. That means we mark the interrupt
284 * disabled, but leave the hardware unmasked. That's an
285 * optimization because we avoid the hardware access for the
286 * common case where no interrupt happens after we marked it
287 * disabled. If an interrupt happens, then the interrupt flow
288 * handler masks the line at the hardware level and marks it
289 * pending.
e9849777
TG
290 *
291 * If the interrupt chip does not implement the irq_disable callback,
292 * a driver can disable the lazy approach for a particular irq line by
293 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
294 * be used for devices which cannot disable the interrupt at the
295 * device level under certain circumstances and have to use
296 * disable_irq[_nosync] instead.
d671a605 297 */
50f7c032 298void irq_disable(struct irq_desc *desc)
89d694b9 299{
201d7f47 300 __irq_disable(desc, irq_settings_disable_unlazy(desc));
89d694b9
TG
301}
302
31d9d9b6
MZ
303void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
304{
305 if (desc->irq_data.chip->irq_enable)
306 desc->irq_data.chip->irq_enable(&desc->irq_data);
307 else
308 desc->irq_data.chip->irq_unmask(&desc->irq_data);
309 cpumask_set_cpu(cpu, desc->percpu_enabled);
310}
311
312void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
313{
314 if (desc->irq_data.chip->irq_disable)
315 desc->irq_data.chip->irq_disable(&desc->irq_data);
316 else
317 desc->irq_data.chip->irq_mask(&desc->irq_data);
318 cpumask_clear_cpu(cpu, desc->percpu_enabled);
319}
320
9205e31d 321static inline void mask_ack_irq(struct irq_desc *desc)
dd87eb3a 322{
9205e31d
TG
323 if (desc->irq_data.chip->irq_mask_ack)
324 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
dd87eb3a 325 else {
e2c0f8ff 326 desc->irq_data.chip->irq_mask(&desc->irq_data);
22a49163
TG
327 if (desc->irq_data.chip->irq_ack)
328 desc->irq_data.chip->irq_ack(&desc->irq_data);
dd87eb3a 329 }
6e40262e 330 irq_state_set_masked(desc);
0b1adaa0
TG
331}
332
d4d5e089 333void mask_irq(struct irq_desc *desc)
0b1adaa0 334{
e2c0f8ff
TG
335 if (desc->irq_data.chip->irq_mask) {
336 desc->irq_data.chip->irq_mask(&desc->irq_data);
6e40262e 337 irq_state_set_masked(desc);
0b1adaa0
TG
338 }
339}
340
d4d5e089 341void unmask_irq(struct irq_desc *desc)
0b1adaa0 342{
0eda58b7
TG
343 if (desc->irq_data.chip->irq_unmask) {
344 desc->irq_data.chip->irq_unmask(&desc->irq_data);
6e40262e 345 irq_state_clr_masked(desc);
0b1adaa0 346 }
dd87eb3a
TG
347}
348
328a4978
TG
349void unmask_threaded_irq(struct irq_desc *desc)
350{
351 struct irq_chip *chip = desc->irq_data.chip;
352
353 if (chip->flags & IRQCHIP_EOI_THREADED)
354 chip->irq_eoi(&desc->irq_data);
355
356 if (chip->irq_unmask) {
357 chip->irq_unmask(&desc->irq_data);
358 irq_state_clr_masked(desc);
359 }
360}
361
399b5da2
TG
362/*
363 * handle_nested_irq - Handle a nested irq from a irq thread
364 * @irq: the interrupt number
365 *
366 * Handle interrupts which are nested into a threaded interrupt
367 * handler. The handler function is called inside the calling
368 * threads context.
369 */
370void handle_nested_irq(unsigned int irq)
371{
372 struct irq_desc *desc = irq_to_desc(irq);
373 struct irqaction *action;
374 irqreturn_t action_ret;
375
376 might_sleep();
377
239007b8 378 raw_spin_lock_irq(&desc->lock);
399b5da2 379
293a7a0a 380 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
399b5da2
TG
381
382 action = desc->action;
23812b9d
NJ
383 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
384 desc->istate |= IRQS_PENDING;
399b5da2 385 goto out_unlock;
23812b9d 386 }
399b5da2 387
a946e8c7 388 kstat_incr_irqs_this_cpu(desc);
32f4125e 389 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
239007b8 390 raw_spin_unlock_irq(&desc->lock);
399b5da2 391
45e52022
CK
392 action_ret = IRQ_NONE;
393 for_each_action_of_desc(desc, action)
394 action_ret |= action->thread_fn(action->irq, action->dev_id);
395
399b5da2 396 if (!noirqdebug)
0dcdbc97 397 note_interrupt(desc, action_ret);
399b5da2 398
239007b8 399 raw_spin_lock_irq(&desc->lock);
32f4125e 400 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
399b5da2
TG
401
402out_unlock:
239007b8 403 raw_spin_unlock_irq(&desc->lock);
399b5da2
TG
404}
405EXPORT_SYMBOL_GPL(handle_nested_irq);
406
fe200ae4
TG
407static bool irq_check_poll(struct irq_desc *desc)
408{
6954b75b 409 if (!(desc->istate & IRQS_POLL_INPROGRESS))
fe200ae4
TG
410 return false;
411 return irq_wait_for_poll(desc);
412}
413
c7bd3ec0
TG
414static bool irq_may_run(struct irq_desc *desc)
415{
9ce7a258
TG
416 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
417
418 /*
419 * If the interrupt is not in progress and is not an armed
420 * wakeup interrupt, proceed.
421 */
422 if (!irqd_has_set(&desc->irq_data, mask))
c7bd3ec0 423 return true;
9ce7a258
TG
424
425 /*
426 * If the interrupt is an armed wakeup source, mark it pending
427 * and suspended, disable it and notify the pm core about the
428 * event.
429 */
430 if (irq_pm_check_wakeup(desc))
431 return false;
432
433 /*
434 * Handle a potential concurrent poll on a different core.
435 */
c7bd3ec0
TG
436 return irq_check_poll(desc);
437}
438
dd87eb3a
TG
439/**
440 * handle_simple_irq - Simple and software-decoded IRQs.
dd87eb3a 441 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
442 *
443 * Simple interrupts are either sent from a demultiplexing interrupt
444 * handler or come from hardware, where no interrupt hardware control
445 * is necessary.
446 *
447 * Note: The caller is expected to handle the ack, clear, mask and
448 * unmask issues if necessary.
449 */
bd0b9ac4 450void handle_simple_irq(struct irq_desc *desc)
dd87eb3a 451{
239007b8 452 raw_spin_lock(&desc->lock);
dd87eb3a 453
c7bd3ec0
TG
454 if (!irq_may_run(desc))
455 goto out_unlock;
fe200ae4 456
163ef309 457 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
dd87eb3a 458
23812b9d
NJ
459 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
460 desc->istate |= IRQS_PENDING;
dd87eb3a 461 goto out_unlock;
23812b9d 462 }
dd87eb3a 463
a946e8c7 464 kstat_incr_irqs_this_cpu(desc);
107781e7 465 handle_irq_event(desc);
dd87eb3a 466
dd87eb3a 467out_unlock:
239007b8 468 raw_spin_unlock(&desc->lock);
dd87eb3a 469}
edf76f83 470EXPORT_SYMBOL_GPL(handle_simple_irq);
dd87eb3a 471
edd14cfe
KB
472/**
473 * handle_untracked_irq - Simple and software-decoded IRQs.
474 * @desc: the interrupt description structure for this irq
475 *
476 * Untracked interrupts are sent from a demultiplexing interrupt
477 * handler when the demultiplexer does not know which device it its
478 * multiplexed irq domain generated the interrupt. IRQ's handled
479 * through here are not subjected to stats tracking, randomness, or
480 * spurious interrupt detection.
481 *
482 * Note: Like handle_simple_irq, the caller is expected to handle
483 * the ack, clear, mask and unmask issues if necessary.
484 */
485void handle_untracked_irq(struct irq_desc *desc)
486{
487 unsigned int flags = 0;
488
489 raw_spin_lock(&desc->lock);
490
491 if (!irq_may_run(desc))
492 goto out_unlock;
493
494 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
495
496 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
497 desc->istate |= IRQS_PENDING;
498 goto out_unlock;
499 }
500
501 desc->istate &= ~IRQS_PENDING;
502 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
503 raw_spin_unlock(&desc->lock);
504
505 __handle_irq_event_percpu(desc, &flags);
506
507 raw_spin_lock(&desc->lock);
508 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
509
510out_unlock:
511 raw_spin_unlock(&desc->lock);
512}
513EXPORT_SYMBOL_GPL(handle_untracked_irq);
514
ac563761
TG
515/*
516 * Called unconditionally from handle_level_irq() and only for oneshot
517 * interrupts from handle_fasteoi_irq()
518 */
519static void cond_unmask_irq(struct irq_desc *desc)
520{
521 /*
522 * We need to unmask in the following cases:
523 * - Standard level irq (IRQF_ONESHOT is not set)
524 * - Oneshot irq which did not wake the thread (caused by a
525 * spurious interrupt or a primary handler handling it
526 * completely).
527 */
528 if (!irqd_irq_disabled(&desc->irq_data) &&
529 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
530 unmask_irq(desc);
531}
532
dd87eb3a
TG
533/**
534 * handle_level_irq - Level type irq handler
dd87eb3a 535 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
536 *
537 * Level type interrupts are active as long as the hardware line has
538 * the active level. This may require to mask the interrupt and unmask
539 * it after the associated handler has acknowledged the device, so the
540 * interrupt line is back to inactive.
541 */
bd0b9ac4 542void handle_level_irq(struct irq_desc *desc)
dd87eb3a 543{
239007b8 544 raw_spin_lock(&desc->lock);
9205e31d 545 mask_ack_irq(desc);
dd87eb3a 546
c7bd3ec0
TG
547 if (!irq_may_run(desc))
548 goto out_unlock;
fe200ae4 549
163ef309 550 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
dd87eb3a
TG
551
552 /*
553 * If its disabled or no action available
554 * keep it masked and get out of here
555 */
d4dc0f90
TG
556 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
557 desc->istate |= IRQS_PENDING;
86998aa6 558 goto out_unlock;
d4dc0f90 559 }
dd87eb3a 560
a946e8c7 561 kstat_incr_irqs_this_cpu(desc);
1529866c 562 handle_irq_event(desc);
b25c340c 563
ac563761
TG
564 cond_unmask_irq(desc);
565
86998aa6 566out_unlock:
239007b8 567 raw_spin_unlock(&desc->lock);
dd87eb3a 568}
14819ea1 569EXPORT_SYMBOL_GPL(handle_level_irq);
dd87eb3a 570
78129576
TG
571#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
572static inline void preflow_handler(struct irq_desc *desc)
573{
574 if (desc->preflow_handler)
575 desc->preflow_handler(&desc->irq_data);
576}
577#else
578static inline void preflow_handler(struct irq_desc *desc) { }
579#endif
580
328a4978
TG
581static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
582{
583 if (!(desc->istate & IRQS_ONESHOT)) {
584 chip->irq_eoi(&desc->irq_data);
585 return;
586 }
587 /*
588 * We need to unmask in the following cases:
589 * - Oneshot irq which did not wake the thread (caused by a
590 * spurious interrupt or a primary handler handling it
591 * completely).
592 */
593 if (!irqd_irq_disabled(&desc->irq_data) &&
594 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
595 chip->irq_eoi(&desc->irq_data);
596 unmask_irq(desc);
597 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
598 chip->irq_eoi(&desc->irq_data);
599 }
600}
601
dd87eb3a 602/**
47c2a3aa 603 * handle_fasteoi_irq - irq handler for transparent controllers
dd87eb3a 604 * @desc: the interrupt description structure for this irq
dd87eb3a 605 *
47c2a3aa 606 * Only a single callback will be issued to the chip: an ->eoi()
dd87eb3a
TG
607 * call when the interrupt has been serviced. This enables support
608 * for modern forms of interrupt handlers, which handle the flow
609 * details in hardware, transparently.
610 */
bd0b9ac4 611void handle_fasteoi_irq(struct irq_desc *desc)
dd87eb3a 612{
328a4978
TG
613 struct irq_chip *chip = desc->irq_data.chip;
614
239007b8 615 raw_spin_lock(&desc->lock);
dd87eb3a 616
c7bd3ec0
TG
617 if (!irq_may_run(desc))
618 goto out;
dd87eb3a 619
163ef309 620 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
dd87eb3a
TG
621
622 /*
623 * If its disabled or no action available
76d21601 624 * then mask it and get out of here:
dd87eb3a 625 */
32f4125e 626 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
2a0d6fb3 627 desc->istate |= IRQS_PENDING;
e2c0f8ff 628 mask_irq(desc);
dd87eb3a 629 goto out;
98bb244b 630 }
c69e3758 631
a946e8c7 632 kstat_incr_irqs_this_cpu(desc);
c69e3758
TG
633 if (desc->istate & IRQS_ONESHOT)
634 mask_irq(desc);
635
78129576 636 preflow_handler(desc);
a7ae4de5 637 handle_irq_event(desc);
77694b40 638
328a4978 639 cond_unmask_eoi_irq(desc, chip);
ac563761 640
239007b8 641 raw_spin_unlock(&desc->lock);
77694b40
TG
642 return;
643out:
328a4978
TG
644 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
645 chip->irq_eoi(&desc->irq_data);
646 raw_spin_unlock(&desc->lock);
dd87eb3a 647}
7cad45ee 648EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
dd87eb3a
TG
649
650/**
651 * handle_edge_irq - edge type IRQ handler
dd87eb3a 652 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
653 *
654 * Interrupt occures on the falling and/or rising edge of a hardware
25985edc 655 * signal. The occurrence is latched into the irq controller hardware
dd87eb3a
TG
656 * and must be acked in order to be reenabled. After the ack another
657 * interrupt can happen on the same source even before the first one
dfff0615 658 * is handled by the associated event handler. If this happens it
dd87eb3a
TG
659 * might be necessary to disable (mask) the interrupt depending on the
660 * controller hardware. This requires to reenable the interrupt inside
661 * of the loop which handles the interrupts which have arrived while
662 * the handler was running. If all pending interrupts are handled, the
663 * loop is left.
664 */
bd0b9ac4 665void handle_edge_irq(struct irq_desc *desc)
dd87eb3a 666{
239007b8 667 raw_spin_lock(&desc->lock);
dd87eb3a 668
163ef309 669 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
c3d7acd0 670
c7bd3ec0
TG
671 if (!irq_may_run(desc)) {
672 desc->istate |= IRQS_PENDING;
673 mask_ack_irq(desc);
674 goto out_unlock;
dd87eb3a 675 }
c3d7acd0 676
dd87eb3a 677 /*
c3d7acd0
TG
678 * If its disabled or no action available then mask it and get
679 * out of here.
dd87eb3a 680 */
c3d7acd0
TG
681 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
682 desc->istate |= IRQS_PENDING;
683 mask_ack_irq(desc);
684 goto out_unlock;
dd87eb3a 685 }
c3d7acd0 686
b51bf95c 687 kstat_incr_irqs_this_cpu(desc);
dd87eb3a
TG
688
689 /* Start handling the irq */
22a49163 690 desc->irq_data.chip->irq_ack(&desc->irq_data);
dd87eb3a 691
dd87eb3a 692 do {
a60a5dc2 693 if (unlikely(!desc->action)) {
e2c0f8ff 694 mask_irq(desc);
dd87eb3a
TG
695 goto out_unlock;
696 }
697
698 /*
699 * When another irq arrived while we were handling
700 * one, we could have masked the irq.
701 * Renable it, if it was not disabled in meantime.
702 */
2a0d6fb3 703 if (unlikely(desc->istate & IRQS_PENDING)) {
32f4125e
TG
704 if (!irqd_irq_disabled(&desc->irq_data) &&
705 irqd_irq_masked(&desc->irq_data))
c1594b77 706 unmask_irq(desc);
dd87eb3a
TG
707 }
708
a60a5dc2 709 handle_irq_event(desc);
dd87eb3a 710
2a0d6fb3 711 } while ((desc->istate & IRQS_PENDING) &&
32f4125e 712 !irqd_irq_disabled(&desc->irq_data));
dd87eb3a 713
dd87eb3a 714out_unlock:
239007b8 715 raw_spin_unlock(&desc->lock);
dd87eb3a 716}
3911ff30 717EXPORT_SYMBOL(handle_edge_irq);
dd87eb3a 718
0521c8fb
TG
719#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
720/**
721 * handle_edge_eoi_irq - edge eoi type IRQ handler
0521c8fb
TG
722 * @desc: the interrupt description structure for this irq
723 *
724 * Similar as the above handle_edge_irq, but using eoi and w/o the
725 * mask/unmask logic.
726 */
bd0b9ac4 727void handle_edge_eoi_irq(struct irq_desc *desc)
0521c8fb
TG
728{
729 struct irq_chip *chip = irq_desc_get_chip(desc);
730
731 raw_spin_lock(&desc->lock);
732
733 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
c3d7acd0 734
c7bd3ec0
TG
735 if (!irq_may_run(desc)) {
736 desc->istate |= IRQS_PENDING;
737 goto out_eoi;
0521c8fb 738 }
c3d7acd0 739
0521c8fb 740 /*
c3d7acd0
TG
741 * If its disabled or no action available then mask it and get
742 * out of here.
0521c8fb 743 */
c3d7acd0
TG
744 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
745 desc->istate |= IRQS_PENDING;
746 goto out_eoi;
0521c8fb 747 }
c3d7acd0 748
b51bf95c 749 kstat_incr_irqs_this_cpu(desc);
0521c8fb
TG
750
751 do {
752 if (unlikely(!desc->action))
753 goto out_eoi;
754
755 handle_irq_event(desc);
756
757 } while ((desc->istate & IRQS_PENDING) &&
758 !irqd_irq_disabled(&desc->irq_data));
759
ac0e0447 760out_eoi:
0521c8fb
TG
761 chip->irq_eoi(&desc->irq_data);
762 raw_spin_unlock(&desc->lock);
763}
764#endif
765
dd87eb3a 766/**
24b26d42 767 * handle_percpu_irq - Per CPU local irq handler
dd87eb3a 768 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
769 *
770 * Per CPU interrupts on SMP machines without locking requirements
771 */
bd0b9ac4 772void handle_percpu_irq(struct irq_desc *desc)
dd87eb3a 773{
35e857cb 774 struct irq_chip *chip = irq_desc_get_chip(desc);
dd87eb3a 775
b51bf95c 776 kstat_incr_irqs_this_cpu(desc);
dd87eb3a 777
849f061c
TG
778 if (chip->irq_ack)
779 chip->irq_ack(&desc->irq_data);
dd87eb3a 780
71f64340 781 handle_irq_event_percpu(desc);
dd87eb3a 782
849f061c
TG
783 if (chip->irq_eoi)
784 chip->irq_eoi(&desc->irq_data);
dd87eb3a
TG
785}
786
31d9d9b6
MZ
787/**
788 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
31d9d9b6
MZ
789 * @desc: the interrupt description structure for this irq
790 *
791 * Per CPU interrupts on SMP machines without locking requirements. Same as
792 * handle_percpu_irq() above but with the following extras:
793 *
794 * action->percpu_dev_id is a pointer to percpu variables which
795 * contain the real device id for the cpu on which this handler is
796 * called
797 */
bd0b9ac4 798void handle_percpu_devid_irq(struct irq_desc *desc)
31d9d9b6
MZ
799{
800 struct irq_chip *chip = irq_desc_get_chip(desc);
801 struct irqaction *action = desc->action;
bd0b9ac4 802 unsigned int irq = irq_desc_get_irq(desc);
31d9d9b6
MZ
803 irqreturn_t res;
804
b51bf95c 805 kstat_incr_irqs_this_cpu(desc);
31d9d9b6
MZ
806
807 if (chip->irq_ack)
808 chip->irq_ack(&desc->irq_data);
809
fc590c22
TG
810 if (likely(action)) {
811 trace_irq_handler_entry(irq, action);
812 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
813 trace_irq_handler_exit(irq, action, res);
814 } else {
815 unsigned int cpu = smp_processor_id();
816 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
817
818 if (enabled)
819 irq_percpu_disable(desc, cpu);
820
821 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
822 enabled ? " and unmasked" : "", irq, cpu);
823 }
31d9d9b6
MZ
824
825 if (chip->irq_eoi)
826 chip->irq_eoi(&desc->irq_data);
827}
828
b8129a1f 829static void
3b0f95be
RK
830__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
831 int is_chained, const char *name)
dd87eb3a 832{
091738a2 833 if (!handle) {
dd87eb3a 834 handle = handle_bad_irq;
091738a2 835 } else {
f86eff22
MZ
836 struct irq_data *irq_data = &desc->irq_data;
837#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
838 /*
839 * With hierarchical domains we might run into a
840 * situation where the outermost chip is not yet set
841 * up, but the inner chips are there. Instead of
842 * bailing we install the handler, but obviously we
843 * cannot enable/startup the interrupt at this point.
844 */
845 while (irq_data) {
846 if (irq_data->chip != &no_irq_chip)
847 break;
848 /*
849 * Bail out if the outer chip is not set up
850 * and the interrrupt supposed to be started
851 * right away.
852 */
853 if (WARN_ON(is_chained))
3b0f95be 854 return;
f86eff22
MZ
855 /* Try the parent */
856 irq_data = irq_data->parent_data;
857 }
858#endif
859 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
3b0f95be 860 return;
f8b5473f 861 }
dd87eb3a 862
dd87eb3a
TG
863 /* Uninstall? */
864 if (handle == handle_bad_irq) {
6b8ff312 865 if (desc->irq_data.chip != &no_irq_chip)
9205e31d 866 mask_ack_irq(desc);
801a0e9a 867 irq_state_set_disabled(desc);
e509bd7d
MW
868 if (is_chained)
869 desc->action = NULL;
dd87eb3a
TG
870 desc->depth = 1;
871 }
872 desc->handle_irq = handle;
a460e745 873 desc->name = name;
dd87eb3a
TG
874
875 if (handle != handle_bad_irq && is_chained) {
1984e075
MZ
876 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
877
1e12c4a9
MZ
878 /*
879 * We're about to start this interrupt immediately,
880 * hence the need to set the trigger configuration.
881 * But the .set_type callback may have overridden the
882 * flow handler, ignoring that we're dealing with a
883 * chained interrupt. Reset it immediately because we
884 * do know better.
885 */
1984e075
MZ
886 if (type != IRQ_TYPE_NONE) {
887 __irq_set_trigger(desc, type);
888 desc->handle_irq = handle;
889 }
1e12c4a9 890
1ccb4e61
TG
891 irq_settings_set_noprobe(desc);
892 irq_settings_set_norequest(desc);
7f1b1244 893 irq_settings_set_nothread(desc);
e509bd7d 894 desc->action = &chained_action;
4cde9c6b 895 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
dd87eb3a 896 }
3b0f95be
RK
897}
898
899void
900__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
901 const char *name)
902{
903 unsigned long flags;
904 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
905
906 if (!desc)
907 return;
908
909 __irq_do_set_handler(desc, handle, is_chained, name);
02725e74 910 irq_put_desc_busunlock(desc, flags);
dd87eb3a 911}
3836ca08 912EXPORT_SYMBOL_GPL(__irq_set_handler);
dd87eb3a 913
3b0f95be
RK
914void
915irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
916 void *data)
917{
918 unsigned long flags;
919 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
920
921 if (!desc)
922 return;
923
af7080e0 924 desc->irq_common_data.handler_data = data;
2c4569ca 925 __irq_do_set_handler(desc, handle, 1, NULL);
3b0f95be
RK
926
927 irq_put_desc_busunlock(desc, flags);
928}
929EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
930
dd87eb3a 931void
3836ca08 932irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
a460e745 933 irq_flow_handler_t handle, const char *name)
dd87eb3a 934{
35e857cb 935 irq_set_chip(irq, chip);
3836ca08 936 __irq_set_handler(irq, handle, 0, name);
dd87eb3a 937}
b3ae66f2 938EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
46f4f8f6 939
44247184 940void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
46f4f8f6 941{
46f4f8f6 942 unsigned long flags;
31d9d9b6 943 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
46f4f8f6 944
44247184 945 if (!desc)
46f4f8f6 946 return;
04c848d3
TG
947
948 /*
949 * Warn when a driver sets the no autoenable flag on an already
950 * active interrupt.
951 */
952 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
953
a005677b
TG
954 irq_settings_clr_and_set(desc, clr, set);
955
876dbd4c 956 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
e1ef8241 957 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
a005677b
TG
958 if (irq_settings_has_no_balance_set(desc))
959 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
960 if (irq_settings_is_per_cpu(desc))
961 irqd_set(&desc->irq_data, IRQD_PER_CPU);
e1ef8241
TG
962 if (irq_settings_can_move_pcntxt(desc))
963 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
0ef5ca1e
TG
964 if (irq_settings_is_level(desc))
965 irqd_set(&desc->irq_data, IRQD_LEVEL);
a005677b 966
876dbd4c
TG
967 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
968
02725e74 969 irq_put_desc_unlock(desc, flags);
46f4f8f6 970}
edf76f83 971EXPORT_SYMBOL_GPL(irq_modify_status);
0fdb4b25
DD
972
973/**
974 * irq_cpu_online - Invoke all irq_cpu_online functions.
975 *
976 * Iterate through all irqs and invoke the chip.irq_cpu_online()
977 * for each.
978 */
979void irq_cpu_online(void)
980{
981 struct irq_desc *desc;
982 struct irq_chip *chip;
983 unsigned long flags;
984 unsigned int irq;
985
986 for_each_active_irq(irq) {
987 desc = irq_to_desc(irq);
988 if (!desc)
989 continue;
990
991 raw_spin_lock_irqsave(&desc->lock, flags);
992
993 chip = irq_data_get_irq_chip(&desc->irq_data);
b3d42232
TG
994 if (chip && chip->irq_cpu_online &&
995 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
32f4125e 996 !irqd_irq_disabled(&desc->irq_data)))
0fdb4b25
DD
997 chip->irq_cpu_online(&desc->irq_data);
998
999 raw_spin_unlock_irqrestore(&desc->lock, flags);
1000 }
1001}
1002
1003/**
1004 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1005 *
1006 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1007 * for each.
1008 */
1009void irq_cpu_offline(void)
1010{
1011 struct irq_desc *desc;
1012 struct irq_chip *chip;
1013 unsigned long flags;
1014 unsigned int irq;
1015
1016 for_each_active_irq(irq) {
1017 desc = irq_to_desc(irq);
1018 if (!desc)
1019 continue;
1020
1021 raw_spin_lock_irqsave(&desc->lock, flags);
1022
1023 chip = irq_data_get_irq_chip(&desc->irq_data);
b3d42232
TG
1024 if (chip && chip->irq_cpu_offline &&
1025 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
32f4125e 1026 !irqd_irq_disabled(&desc->irq_data)))
0fdb4b25
DD
1027 chip->irq_cpu_offline(&desc->irq_data);
1028
1029 raw_spin_unlock_irqrestore(&desc->lock, flags);
1030 }
1031}
85f08c17
JL
1032
1033#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
3cfeffc2
SA
1034/**
1035 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1036 * NULL)
1037 * @data: Pointer to interrupt specific data
1038 */
1039void irq_chip_enable_parent(struct irq_data *data)
1040{
1041 data = data->parent_data;
1042 if (data->chip->irq_enable)
1043 data->chip->irq_enable(data);
1044 else
1045 data->chip->irq_unmask(data);
1046}
1047
1048/**
1049 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1050 * NULL)
1051 * @data: Pointer to interrupt specific data
1052 */
1053void irq_chip_disable_parent(struct irq_data *data)
1054{
1055 data = data->parent_data;
1056 if (data->chip->irq_disable)
1057 data->chip->irq_disable(data);
1058 else
1059 data->chip->irq_mask(data);
1060}
1061
85f08c17
JL
1062/**
1063 * irq_chip_ack_parent - Acknowledge the parent interrupt
1064 * @data: Pointer to interrupt specific data
1065 */
1066void irq_chip_ack_parent(struct irq_data *data)
1067{
1068 data = data->parent_data;
1069 data->chip->irq_ack(data);
1070}
a4289dc2 1071EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
85f08c17 1072
56e8abab
YC
1073/**
1074 * irq_chip_mask_parent - Mask the parent interrupt
1075 * @data: Pointer to interrupt specific data
1076 */
1077void irq_chip_mask_parent(struct irq_data *data)
1078{
1079 data = data->parent_data;
1080 data->chip->irq_mask(data);
1081}
52b2a05f 1082EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
56e8abab
YC
1083
1084/**
1085 * irq_chip_unmask_parent - Unmask the parent interrupt
1086 * @data: Pointer to interrupt specific data
1087 */
1088void irq_chip_unmask_parent(struct irq_data *data)
1089{
1090 data = data->parent_data;
1091 data->chip->irq_unmask(data);
1092}
52b2a05f 1093EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
56e8abab
YC
1094
1095/**
1096 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1097 * @data: Pointer to interrupt specific data
1098 */
1099void irq_chip_eoi_parent(struct irq_data *data)
1100{
1101 data = data->parent_data;
1102 data->chip->irq_eoi(data);
1103}
52b2a05f 1104EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
56e8abab
YC
1105
1106/**
1107 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1108 * @data: Pointer to interrupt specific data
1109 * @dest: The affinity mask to set
1110 * @force: Flag to enforce setting (disable online checks)
1111 *
1112 * Conditinal, as the underlying parent chip might not implement it.
1113 */
1114int irq_chip_set_affinity_parent(struct irq_data *data,
1115 const struct cpumask *dest, bool force)
1116{
1117 data = data->parent_data;
1118 if (data->chip->irq_set_affinity)
1119 return data->chip->irq_set_affinity(data, dest, force);
b7560de1
GS
1120
1121 return -ENOSYS;
1122}
1123
1124/**
1125 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1126 * @data: Pointer to interrupt specific data
1127 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1128 *
1129 * Conditional, as the underlying parent chip might not implement it.
1130 */
1131int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1132{
1133 data = data->parent_data;
1134
1135 if (data->chip->irq_set_type)
1136 return data->chip->irq_set_type(data, type);
56e8abab
YC
1137
1138 return -ENOSYS;
1139}
52b2a05f 1140EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
56e8abab 1141
85f08c17
JL
1142/**
1143 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1144 * @data: Pointer to interrupt specific data
1145 *
1146 * Iterate through the domain hierarchy of the interrupt and check
1147 * whether a hw retrigger function exists. If yes, invoke it.
1148 */
1149int irq_chip_retrigger_hierarchy(struct irq_data *data)
1150{
1151 for (data = data->parent_data; data; data = data->parent_data)
1152 if (data->chip && data->chip->irq_retrigger)
1153 return data->chip->irq_retrigger(data);
1154
6d4affea 1155 return 0;
85f08c17 1156}
08b55e2a 1157
0a4377de
JL
1158/**
1159 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1160 * @data: Pointer to interrupt specific data
8505a81b 1161 * @vcpu_info: The vcpu affinity information
0a4377de
JL
1162 */
1163int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1164{
1165 data = data->parent_data;
1166 if (data->chip->irq_set_vcpu_affinity)
1167 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1168
1169 return -ENOSYS;
1170}
1171
08b55e2a
MZ
1172/**
1173 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1174 * @data: Pointer to interrupt specific data
1175 * @on: Whether to set or reset the wake-up capability of this irq
1176 *
1177 * Conditional, as the underlying parent chip might not implement it.
1178 */
1179int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1180{
1181 data = data->parent_data;
1182 if (data->chip->irq_set_wake)
1183 return data->chip->irq_set_wake(data, on);
1184
1185 return -ENOSYS;
1186}
85f08c17 1187#endif
515085ef
JL
1188
1189/**
1190 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1191 * @data: Pointer to interrupt specific data
1192 * @msg: Pointer to the MSI message
1193 *
1194 * For hierarchical domains we find the first chip in the hierarchy
1195 * which implements the irq_compose_msi_msg callback. For non
1196 * hierarchical we use the top level chip.
1197 */
1198int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1199{
1200 struct irq_data *pos = NULL;
1201
1202#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1203 for (; data; data = data->parent_data)
1204#endif
1205 if (data->chip && data->chip->irq_compose_msi_msg)
1206 pos = data;
1207 if (!pos)
1208 return -ENOSYS;
1209
1210 pos->chip->irq_compose_msi_msg(pos, msg);
1211
1212 return 0;
1213}
be45beb2
JH
1214
1215/**
1216 * irq_chip_pm_get - Enable power for an IRQ chip
1217 * @data: Pointer to interrupt specific data
1218 *
1219 * Enable the power to the IRQ chip referenced by the interrupt data
1220 * structure.
1221 */
1222int irq_chip_pm_get(struct irq_data *data)
1223{
1224 int retval;
1225
1226 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1227 retval = pm_runtime_get_sync(data->chip->parent_device);
1228 if (retval < 0) {
1229 pm_runtime_put_noidle(data->chip->parent_device);
1230 return retval;
1231 }
1232 }
1233
1234 return 0;
1235}
1236
1237/**
1238 * irq_chip_pm_put - Disable power for an IRQ chip
1239 * @data: Pointer to interrupt specific data
1240 *
1241 * Disable the power to the IRQ chip referenced by the interrupt data
1242 * structure, belongs. Note that power will only be disabled, once this
1243 * function has been called for all IRQs that have called irq_chip_pm_get().
1244 */
1245int irq_chip_pm_put(struct irq_data *data)
1246{
1247 int retval = 0;
1248
1249 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1250 retval = pm_runtime_put(data->chip->parent_device);
1251
1252 return (retval < 0) ? retval : 0;
1253}