]>
Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
f8264e34 | 18 | #include <linux/irqdomain.h> |
dd87eb3a | 19 | |
f069686e SR |
20 | #include <trace/events/irq.h> |
21 | ||
dd87eb3a TG |
22 | #include "internals.h" |
23 | ||
e509bd7d MW |
24 | static irqreturn_t bad_chained_irq(int irq, void *dev_id) |
25 | { | |
26 | WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); | |
27 | return IRQ_NONE; | |
28 | } | |
29 | ||
30 | /* | |
31 | * Chained handlers should never call action on their IRQ. This default | |
32 | * action will emit warning if such thing happens. | |
33 | */ | |
34 | struct irqaction chained_action = { | |
35 | .handler = bad_chained_irq, | |
36 | }; | |
37 | ||
dd87eb3a | 38 | /** |
a0cd9ca2 | 39 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
40 | * @irq: irq number |
41 | * @chip: pointer to irq chip description structure | |
42 | */ | |
a0cd9ca2 | 43 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 44 | { |
dd87eb3a | 45 | unsigned long flags; |
31d9d9b6 | 46 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 47 | |
02725e74 | 48 | if (!desc) |
dd87eb3a | 49 | return -EINVAL; |
dd87eb3a TG |
50 | |
51 | if (!chip) | |
52 | chip = &no_irq_chip; | |
53 | ||
6b8ff312 | 54 | desc->irq_data.chip = chip; |
02725e74 | 55 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
56 | /* |
57 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
f63b6a05 | 58 | * allocated_irqs. |
d72274e5 | 59 | */ |
f63b6a05 | 60 | irq_mark_irq(irq); |
dd87eb3a TG |
61 | return 0; |
62 | } | |
a0cd9ca2 | 63 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
64 | |
65 | /** | |
a0cd9ca2 | 66 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 67 | * @irq: irq number |
0c5d1eb7 | 68 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 69 | */ |
a0cd9ca2 | 70 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 71 | { |
dd87eb3a | 72 | unsigned long flags; |
31d9d9b6 | 73 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 74 | int ret = 0; |
dd87eb3a | 75 | |
02725e74 TG |
76 | if (!desc) |
77 | return -EINVAL; | |
dd87eb3a | 78 | |
f2b662da | 79 | type &= IRQ_TYPE_SENSE_MASK; |
a1ff541a | 80 | ret = __irq_set_trigger(desc, type); |
02725e74 | 81 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
82 | return ret; |
83 | } | |
a0cd9ca2 | 84 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
85 | |
86 | /** | |
a0cd9ca2 | 87 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
88 | * @irq: Interrupt number |
89 | * @data: Pointer to interrupt specific data | |
90 | * | |
91 | * Set the hardware irq controller data for an irq | |
92 | */ | |
a0cd9ca2 | 93 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 94 | { |
dd87eb3a | 95 | unsigned long flags; |
31d9d9b6 | 96 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 97 | |
02725e74 | 98 | if (!desc) |
dd87eb3a | 99 | return -EINVAL; |
af7080e0 | 100 | desc->irq_common_data.handler_data = data; |
02725e74 | 101 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
102 | return 0; |
103 | } | |
a0cd9ca2 | 104 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 105 | |
5b912c10 | 106 | /** |
51906e77 AG |
107 | * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset |
108 | * @irq_base: Interrupt number base | |
109 | * @irq_offset: Interrupt number offset | |
110 | * @entry: Pointer to MSI descriptor data | |
5b912c10 | 111 | * |
51906e77 | 112 | * Set the MSI descriptor entry for an irq at offset |
5b912c10 | 113 | */ |
51906e77 AG |
114 | int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
115 | struct msi_desc *entry) | |
5b912c10 | 116 | { |
5b912c10 | 117 | unsigned long flags; |
51906e77 | 118 | struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 119 | |
02725e74 | 120 | if (!desc) |
5b912c10 | 121 | return -EINVAL; |
b237721c | 122 | desc->irq_common_data.msi_desc = entry; |
51906e77 AG |
123 | if (entry && !irq_offset) |
124 | entry->irq = irq_base; | |
02725e74 | 125 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
126 | return 0; |
127 | } | |
128 | ||
51906e77 AG |
129 | /** |
130 | * irq_set_msi_desc - set MSI descriptor data for an irq | |
131 | * @irq: Interrupt number | |
132 | * @entry: Pointer to MSI descriptor data | |
133 | * | |
134 | * Set the MSI descriptor entry for an irq | |
135 | */ | |
136 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | |
137 | { | |
138 | return irq_set_msi_desc_off(irq, 0, entry); | |
139 | } | |
140 | ||
dd87eb3a | 141 | /** |
a0cd9ca2 | 142 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
143 | * @irq: Interrupt number |
144 | * @data: Pointer to chip specific data | |
145 | * | |
146 | * Set the hardware irq chip data for an irq | |
147 | */ | |
a0cd9ca2 | 148 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 149 | { |
dd87eb3a | 150 | unsigned long flags; |
31d9d9b6 | 151 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 152 | |
02725e74 | 153 | if (!desc) |
dd87eb3a | 154 | return -EINVAL; |
6b8ff312 | 155 | desc->irq_data.chip_data = data; |
02725e74 | 156 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
157 | return 0; |
158 | } | |
a0cd9ca2 | 159 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 160 | |
f303a6dd TG |
161 | struct irq_data *irq_get_irq_data(unsigned int irq) |
162 | { | |
163 | struct irq_desc *desc = irq_to_desc(irq); | |
164 | ||
165 | return desc ? &desc->irq_data : NULL; | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
168 | ||
c1594b77 TG |
169 | static void irq_state_clr_disabled(struct irq_desc *desc) |
170 | { | |
801a0e9a | 171 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
172 | } |
173 | ||
174 | static void irq_state_set_disabled(struct irq_desc *desc) | |
175 | { | |
801a0e9a | 176 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
177 | } |
178 | ||
6e40262e TG |
179 | static void irq_state_clr_masked(struct irq_desc *desc) |
180 | { | |
32f4125e | 181 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
182 | } |
183 | ||
184 | static void irq_state_set_masked(struct irq_desc *desc) | |
185 | { | |
32f4125e | 186 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
187 | } |
188 | ||
b4bc724e | 189 | int irq_startup(struct irq_desc *desc, bool resend) |
46999238 | 190 | { |
b4bc724e TG |
191 | int ret = 0; |
192 | ||
c1594b77 | 193 | irq_state_clr_disabled(desc); |
46999238 TG |
194 | desc->depth = 0; |
195 | ||
f8264e34 | 196 | irq_domain_activate_irq(&desc->irq_data); |
3aae994f | 197 | if (desc->irq_data.chip->irq_startup) { |
b4bc724e | 198 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
6e40262e | 199 | irq_state_clr_masked(desc); |
b4bc724e TG |
200 | } else { |
201 | irq_enable(desc); | |
3aae994f | 202 | } |
b4bc724e | 203 | if (resend) |
0798abeb | 204 | check_irq_resend(desc); |
b4bc724e | 205 | return ret; |
46999238 TG |
206 | } |
207 | ||
208 | void irq_shutdown(struct irq_desc *desc) | |
209 | { | |
c1594b77 | 210 | irq_state_set_disabled(desc); |
46999238 | 211 | desc->depth = 1; |
50f7c032 TG |
212 | if (desc->irq_data.chip->irq_shutdown) |
213 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ed585a65 | 214 | else if (desc->irq_data.chip->irq_disable) |
50f7c032 TG |
215 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
216 | else | |
217 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
f8264e34 | 218 | irq_domain_deactivate_irq(&desc->irq_data); |
6e40262e | 219 | irq_state_set_masked(desc); |
46999238 TG |
220 | } |
221 | ||
87923470 TG |
222 | void irq_enable(struct irq_desc *desc) |
223 | { | |
c1594b77 | 224 | irq_state_clr_disabled(desc); |
50f7c032 TG |
225 | if (desc->irq_data.chip->irq_enable) |
226 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
227 | else | |
228 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 229 | irq_state_clr_masked(desc); |
dd87eb3a TG |
230 | } |
231 | ||
d671a605 | 232 | /** |
f788e7bf | 233 | * irq_disable - Mark interrupt disabled |
d671a605 AF |
234 | * @desc: irq descriptor which should be disabled |
235 | * | |
236 | * If the chip does not implement the irq_disable callback, we | |
237 | * use a lazy disable approach. That means we mark the interrupt | |
238 | * disabled, but leave the hardware unmasked. That's an | |
239 | * optimization because we avoid the hardware access for the | |
240 | * common case where no interrupt happens after we marked it | |
241 | * disabled. If an interrupt happens, then the interrupt flow | |
242 | * handler masks the line at the hardware level and marks it | |
243 | * pending. | |
e9849777 TG |
244 | * |
245 | * If the interrupt chip does not implement the irq_disable callback, | |
246 | * a driver can disable the lazy approach for a particular irq line by | |
247 | * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can | |
248 | * be used for devices which cannot disable the interrupt at the | |
249 | * device level under certain circumstances and have to use | |
250 | * disable_irq[_nosync] instead. | |
d671a605 | 251 | */ |
50f7c032 | 252 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 253 | { |
c1594b77 | 254 | irq_state_set_disabled(desc); |
50f7c032 TG |
255 | if (desc->irq_data.chip->irq_disable) { |
256 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
a61d8258 | 257 | irq_state_set_masked(desc); |
e9849777 TG |
258 | } else if (irq_settings_disable_unlazy(desc)) { |
259 | mask_irq(desc); | |
50f7c032 | 260 | } |
89d694b9 TG |
261 | } |
262 | ||
31d9d9b6 MZ |
263 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
264 | { | |
265 | if (desc->irq_data.chip->irq_enable) | |
266 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
267 | else | |
268 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
269 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
270 | } | |
271 | ||
272 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
273 | { | |
274 | if (desc->irq_data.chip->irq_disable) | |
275 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
276 | else | |
277 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
278 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
279 | } | |
280 | ||
9205e31d | 281 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 282 | { |
9205e31d TG |
283 | if (desc->irq_data.chip->irq_mask_ack) |
284 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); | |
dd87eb3a | 285 | else { |
e2c0f8ff | 286 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
22a49163 TG |
287 | if (desc->irq_data.chip->irq_ack) |
288 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 289 | } |
6e40262e | 290 | irq_state_set_masked(desc); |
0b1adaa0 TG |
291 | } |
292 | ||
d4d5e089 | 293 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 294 | { |
e2c0f8ff TG |
295 | if (desc->irq_data.chip->irq_mask) { |
296 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 297 | irq_state_set_masked(desc); |
0b1adaa0 TG |
298 | } |
299 | } | |
300 | ||
d4d5e089 | 301 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 302 | { |
0eda58b7 TG |
303 | if (desc->irq_data.chip->irq_unmask) { |
304 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 305 | irq_state_clr_masked(desc); |
0b1adaa0 | 306 | } |
dd87eb3a TG |
307 | } |
308 | ||
328a4978 TG |
309 | void unmask_threaded_irq(struct irq_desc *desc) |
310 | { | |
311 | struct irq_chip *chip = desc->irq_data.chip; | |
312 | ||
313 | if (chip->flags & IRQCHIP_EOI_THREADED) | |
314 | chip->irq_eoi(&desc->irq_data); | |
315 | ||
316 | if (chip->irq_unmask) { | |
317 | chip->irq_unmask(&desc->irq_data); | |
318 | irq_state_clr_masked(desc); | |
319 | } | |
320 | } | |
321 | ||
399b5da2 TG |
322 | /* |
323 | * handle_nested_irq - Handle a nested irq from a irq thread | |
324 | * @irq: the interrupt number | |
325 | * | |
326 | * Handle interrupts which are nested into a threaded interrupt | |
327 | * handler. The handler function is called inside the calling | |
328 | * threads context. | |
329 | */ | |
330 | void handle_nested_irq(unsigned int irq) | |
331 | { | |
332 | struct irq_desc *desc = irq_to_desc(irq); | |
333 | struct irqaction *action; | |
334 | irqreturn_t action_ret; | |
335 | ||
336 | might_sleep(); | |
337 | ||
239007b8 | 338 | raw_spin_lock_irq(&desc->lock); |
399b5da2 | 339 | |
293a7a0a | 340 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
399b5da2 TG |
341 | |
342 | action = desc->action; | |
23812b9d NJ |
343 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
344 | desc->istate |= IRQS_PENDING; | |
399b5da2 | 345 | goto out_unlock; |
23812b9d | 346 | } |
399b5da2 | 347 | |
a946e8c7 | 348 | kstat_incr_irqs_this_cpu(desc); |
32f4125e | 349 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 350 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
351 | |
352 | action_ret = action->thread_fn(action->irq, action->dev_id); | |
353 | if (!noirqdebug) | |
0dcdbc97 | 354 | note_interrupt(desc, action_ret); |
399b5da2 | 355 | |
239007b8 | 356 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 357 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
358 | |
359 | out_unlock: | |
239007b8 | 360 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
361 | } |
362 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
363 | ||
fe200ae4 TG |
364 | static bool irq_check_poll(struct irq_desc *desc) |
365 | { | |
6954b75b | 366 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
367 | return false; |
368 | return irq_wait_for_poll(desc); | |
369 | } | |
370 | ||
c7bd3ec0 TG |
371 | static bool irq_may_run(struct irq_desc *desc) |
372 | { | |
9ce7a258 TG |
373 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; |
374 | ||
375 | /* | |
376 | * If the interrupt is not in progress and is not an armed | |
377 | * wakeup interrupt, proceed. | |
378 | */ | |
379 | if (!irqd_has_set(&desc->irq_data, mask)) | |
c7bd3ec0 | 380 | return true; |
9ce7a258 TG |
381 | |
382 | /* | |
383 | * If the interrupt is an armed wakeup source, mark it pending | |
384 | * and suspended, disable it and notify the pm core about the | |
385 | * event. | |
386 | */ | |
387 | if (irq_pm_check_wakeup(desc)) | |
388 | return false; | |
389 | ||
390 | /* | |
391 | * Handle a potential concurrent poll on a different core. | |
392 | */ | |
c7bd3ec0 TG |
393 | return irq_check_poll(desc); |
394 | } | |
395 | ||
dd87eb3a TG |
396 | /** |
397 | * handle_simple_irq - Simple and software-decoded IRQs. | |
dd87eb3a | 398 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
399 | * |
400 | * Simple interrupts are either sent from a demultiplexing interrupt | |
401 | * handler or come from hardware, where no interrupt hardware control | |
402 | * is necessary. | |
403 | * | |
404 | * Note: The caller is expected to handle the ack, clear, mask and | |
405 | * unmask issues if necessary. | |
406 | */ | |
bd0b9ac4 | 407 | void handle_simple_irq(struct irq_desc *desc) |
dd87eb3a | 408 | { |
239007b8 | 409 | raw_spin_lock(&desc->lock); |
dd87eb3a | 410 | |
c7bd3ec0 TG |
411 | if (!irq_may_run(desc)) |
412 | goto out_unlock; | |
fe200ae4 | 413 | |
163ef309 | 414 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a | 415 | |
23812b9d NJ |
416 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
417 | desc->istate |= IRQS_PENDING; | |
dd87eb3a | 418 | goto out_unlock; |
23812b9d | 419 | } |
dd87eb3a | 420 | |
a946e8c7 | 421 | kstat_incr_irqs_this_cpu(desc); |
107781e7 | 422 | handle_irq_event(desc); |
dd87eb3a | 423 | |
dd87eb3a | 424 | out_unlock: |
239007b8 | 425 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 426 | } |
edf76f83 | 427 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 428 | |
ac563761 TG |
429 | /* |
430 | * Called unconditionally from handle_level_irq() and only for oneshot | |
431 | * interrupts from handle_fasteoi_irq() | |
432 | */ | |
433 | static void cond_unmask_irq(struct irq_desc *desc) | |
434 | { | |
435 | /* | |
436 | * We need to unmask in the following cases: | |
437 | * - Standard level irq (IRQF_ONESHOT is not set) | |
438 | * - Oneshot irq which did not wake the thread (caused by a | |
439 | * spurious interrupt or a primary handler handling it | |
440 | * completely). | |
441 | */ | |
442 | if (!irqd_irq_disabled(&desc->irq_data) && | |
443 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
444 | unmask_irq(desc); | |
445 | } | |
446 | ||
dd87eb3a TG |
447 | /** |
448 | * handle_level_irq - Level type irq handler | |
dd87eb3a | 449 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
450 | * |
451 | * Level type interrupts are active as long as the hardware line has | |
452 | * the active level. This may require to mask the interrupt and unmask | |
453 | * it after the associated handler has acknowledged the device, so the | |
454 | * interrupt line is back to inactive. | |
455 | */ | |
bd0b9ac4 | 456 | void handle_level_irq(struct irq_desc *desc) |
dd87eb3a | 457 | { |
239007b8 | 458 | raw_spin_lock(&desc->lock); |
9205e31d | 459 | mask_ack_irq(desc); |
dd87eb3a | 460 | |
c7bd3ec0 TG |
461 | if (!irq_may_run(desc)) |
462 | goto out_unlock; | |
fe200ae4 | 463 | |
163ef309 | 464 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
465 | |
466 | /* | |
467 | * If its disabled or no action available | |
468 | * keep it masked and get out of here | |
469 | */ | |
d4dc0f90 TG |
470 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
471 | desc->istate |= IRQS_PENDING; | |
86998aa6 | 472 | goto out_unlock; |
d4dc0f90 | 473 | } |
dd87eb3a | 474 | |
a946e8c7 | 475 | kstat_incr_irqs_this_cpu(desc); |
1529866c | 476 | handle_irq_event(desc); |
b25c340c | 477 | |
ac563761 TG |
478 | cond_unmask_irq(desc); |
479 | ||
86998aa6 | 480 | out_unlock: |
239007b8 | 481 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 482 | } |
14819ea1 | 483 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 484 | |
78129576 TG |
485 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
486 | static inline void preflow_handler(struct irq_desc *desc) | |
487 | { | |
488 | if (desc->preflow_handler) | |
489 | desc->preflow_handler(&desc->irq_data); | |
490 | } | |
491 | #else | |
492 | static inline void preflow_handler(struct irq_desc *desc) { } | |
493 | #endif | |
494 | ||
328a4978 TG |
495 | static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) |
496 | { | |
497 | if (!(desc->istate & IRQS_ONESHOT)) { | |
498 | chip->irq_eoi(&desc->irq_data); | |
499 | return; | |
500 | } | |
501 | /* | |
502 | * We need to unmask in the following cases: | |
503 | * - Oneshot irq which did not wake the thread (caused by a | |
504 | * spurious interrupt or a primary handler handling it | |
505 | * completely). | |
506 | */ | |
507 | if (!irqd_irq_disabled(&desc->irq_data) && | |
508 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { | |
509 | chip->irq_eoi(&desc->irq_data); | |
510 | unmask_irq(desc); | |
511 | } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { | |
512 | chip->irq_eoi(&desc->irq_data); | |
513 | } | |
514 | } | |
515 | ||
dd87eb3a | 516 | /** |
47c2a3aa | 517 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a | 518 | * @desc: the interrupt description structure for this irq |
dd87eb3a | 519 | * |
47c2a3aa | 520 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
521 | * call when the interrupt has been serviced. This enables support |
522 | * for modern forms of interrupt handlers, which handle the flow | |
523 | * details in hardware, transparently. | |
524 | */ | |
bd0b9ac4 | 525 | void handle_fasteoi_irq(struct irq_desc *desc) |
dd87eb3a | 526 | { |
328a4978 TG |
527 | struct irq_chip *chip = desc->irq_data.chip; |
528 | ||
239007b8 | 529 | raw_spin_lock(&desc->lock); |
dd87eb3a | 530 | |
c7bd3ec0 TG |
531 | if (!irq_may_run(desc)) |
532 | goto out; | |
dd87eb3a | 533 | |
163ef309 | 534 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
535 | |
536 | /* | |
537 | * If its disabled or no action available | |
76d21601 | 538 | * then mask it and get out of here: |
dd87eb3a | 539 | */ |
32f4125e | 540 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 541 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 542 | mask_irq(desc); |
dd87eb3a | 543 | goto out; |
98bb244b | 544 | } |
c69e3758 | 545 | |
a946e8c7 | 546 | kstat_incr_irqs_this_cpu(desc); |
c69e3758 TG |
547 | if (desc->istate & IRQS_ONESHOT) |
548 | mask_irq(desc); | |
549 | ||
78129576 | 550 | preflow_handler(desc); |
a7ae4de5 | 551 | handle_irq_event(desc); |
77694b40 | 552 | |
328a4978 | 553 | cond_unmask_eoi_irq(desc, chip); |
ac563761 | 554 | |
239007b8 | 555 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
556 | return; |
557 | out: | |
328a4978 TG |
558 | if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
559 | chip->irq_eoi(&desc->irq_data); | |
560 | raw_spin_unlock(&desc->lock); | |
dd87eb3a | 561 | } |
7cad45ee | 562 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); |
dd87eb3a TG |
563 | |
564 | /** | |
565 | * handle_edge_irq - edge type IRQ handler | |
dd87eb3a | 566 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
567 | * |
568 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 569 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
570 | * and must be acked in order to be reenabled. After the ack another |
571 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 572 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
573 | * might be necessary to disable (mask) the interrupt depending on the |
574 | * controller hardware. This requires to reenable the interrupt inside | |
575 | * of the loop which handles the interrupts which have arrived while | |
576 | * the handler was running. If all pending interrupts are handled, the | |
577 | * loop is left. | |
578 | */ | |
bd0b9ac4 | 579 | void handle_edge_irq(struct irq_desc *desc) |
dd87eb3a | 580 | { |
239007b8 | 581 | raw_spin_lock(&desc->lock); |
dd87eb3a | 582 | |
163ef309 | 583 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
c3d7acd0 | 584 | |
c7bd3ec0 TG |
585 | if (!irq_may_run(desc)) { |
586 | desc->istate |= IRQS_PENDING; | |
587 | mask_ack_irq(desc); | |
588 | goto out_unlock; | |
dd87eb3a | 589 | } |
c3d7acd0 | 590 | |
dd87eb3a | 591 | /* |
c3d7acd0 TG |
592 | * If its disabled or no action available then mask it and get |
593 | * out of here. | |
dd87eb3a | 594 | */ |
c3d7acd0 TG |
595 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
596 | desc->istate |= IRQS_PENDING; | |
597 | mask_ack_irq(desc); | |
598 | goto out_unlock; | |
dd87eb3a | 599 | } |
c3d7acd0 | 600 | |
b51bf95c | 601 | kstat_incr_irqs_this_cpu(desc); |
dd87eb3a TG |
602 | |
603 | /* Start handling the irq */ | |
22a49163 | 604 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 605 | |
dd87eb3a | 606 | do { |
a60a5dc2 | 607 | if (unlikely(!desc->action)) { |
e2c0f8ff | 608 | mask_irq(desc); |
dd87eb3a TG |
609 | goto out_unlock; |
610 | } | |
611 | ||
612 | /* | |
613 | * When another irq arrived while we were handling | |
614 | * one, we could have masked the irq. | |
615 | * Renable it, if it was not disabled in meantime. | |
616 | */ | |
2a0d6fb3 | 617 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
618 | if (!irqd_irq_disabled(&desc->irq_data) && |
619 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 620 | unmask_irq(desc); |
dd87eb3a TG |
621 | } |
622 | ||
a60a5dc2 | 623 | handle_irq_event(desc); |
dd87eb3a | 624 | |
2a0d6fb3 | 625 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 626 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 627 | |
dd87eb3a | 628 | out_unlock: |
239007b8 | 629 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 630 | } |
3911ff30 | 631 | EXPORT_SYMBOL(handle_edge_irq); |
dd87eb3a | 632 | |
0521c8fb TG |
633 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
634 | /** | |
635 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
0521c8fb TG |
636 | * @desc: the interrupt description structure for this irq |
637 | * | |
638 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
639 | * mask/unmask logic. | |
640 | */ | |
bd0b9ac4 | 641 | void handle_edge_eoi_irq(struct irq_desc *desc) |
0521c8fb TG |
642 | { |
643 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
644 | ||
645 | raw_spin_lock(&desc->lock); | |
646 | ||
647 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
c3d7acd0 | 648 | |
c7bd3ec0 TG |
649 | if (!irq_may_run(desc)) { |
650 | desc->istate |= IRQS_PENDING; | |
651 | goto out_eoi; | |
0521c8fb | 652 | } |
c3d7acd0 | 653 | |
0521c8fb | 654 | /* |
c3d7acd0 TG |
655 | * If its disabled or no action available then mask it and get |
656 | * out of here. | |
0521c8fb | 657 | */ |
c3d7acd0 TG |
658 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
659 | desc->istate |= IRQS_PENDING; | |
660 | goto out_eoi; | |
0521c8fb | 661 | } |
c3d7acd0 | 662 | |
b51bf95c | 663 | kstat_incr_irqs_this_cpu(desc); |
0521c8fb TG |
664 | |
665 | do { | |
666 | if (unlikely(!desc->action)) | |
667 | goto out_eoi; | |
668 | ||
669 | handle_irq_event(desc); | |
670 | ||
671 | } while ((desc->istate & IRQS_PENDING) && | |
672 | !irqd_irq_disabled(&desc->irq_data)); | |
673 | ||
ac0e0447 | 674 | out_eoi: |
0521c8fb TG |
675 | chip->irq_eoi(&desc->irq_data); |
676 | raw_spin_unlock(&desc->lock); | |
677 | } | |
678 | #endif | |
679 | ||
dd87eb3a | 680 | /** |
24b26d42 | 681 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a | 682 | * @desc: the interrupt description structure for this irq |
dd87eb3a TG |
683 | * |
684 | * Per CPU interrupts on SMP machines without locking requirements | |
685 | */ | |
bd0b9ac4 | 686 | void handle_percpu_irq(struct irq_desc *desc) |
dd87eb3a | 687 | { |
35e857cb | 688 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 689 | |
b51bf95c | 690 | kstat_incr_irqs_this_cpu(desc); |
dd87eb3a | 691 | |
849f061c TG |
692 | if (chip->irq_ack) |
693 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 694 | |
71f64340 | 695 | handle_irq_event_percpu(desc); |
dd87eb3a | 696 | |
849f061c TG |
697 | if (chip->irq_eoi) |
698 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
699 | } |
700 | ||
31d9d9b6 MZ |
701 | /** |
702 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
31d9d9b6 MZ |
703 | * @desc: the interrupt description structure for this irq |
704 | * | |
705 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
706 | * handle_percpu_irq() above but with the following extras: | |
707 | * | |
708 | * action->percpu_dev_id is a pointer to percpu variables which | |
709 | * contain the real device id for the cpu on which this handler is | |
710 | * called | |
711 | */ | |
bd0b9ac4 | 712 | void handle_percpu_devid_irq(struct irq_desc *desc) |
31d9d9b6 MZ |
713 | { |
714 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
715 | struct irqaction *action = desc->action; | |
532d0d06 | 716 | void *dev_id = raw_cpu_ptr(action->percpu_dev_id); |
bd0b9ac4 | 717 | unsigned int irq = irq_desc_get_irq(desc); |
31d9d9b6 MZ |
718 | irqreturn_t res; |
719 | ||
b51bf95c | 720 | kstat_incr_irqs_this_cpu(desc); |
31d9d9b6 MZ |
721 | |
722 | if (chip->irq_ack) | |
723 | chip->irq_ack(&desc->irq_data); | |
724 | ||
725 | trace_irq_handler_entry(irq, action); | |
726 | res = action->handler(irq, dev_id); | |
727 | trace_irq_handler_exit(irq, action, res); | |
728 | ||
729 | if (chip->irq_eoi) | |
730 | chip->irq_eoi(&desc->irq_data); | |
731 | } | |
732 | ||
dd87eb3a | 733 | void |
3b0f95be RK |
734 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, |
735 | int is_chained, const char *name) | |
dd87eb3a | 736 | { |
091738a2 | 737 | if (!handle) { |
dd87eb3a | 738 | handle = handle_bad_irq; |
091738a2 | 739 | } else { |
f86eff22 MZ |
740 | struct irq_data *irq_data = &desc->irq_data; |
741 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
742 | /* | |
743 | * With hierarchical domains we might run into a | |
744 | * situation where the outermost chip is not yet set | |
745 | * up, but the inner chips are there. Instead of | |
746 | * bailing we install the handler, but obviously we | |
747 | * cannot enable/startup the interrupt at this point. | |
748 | */ | |
749 | while (irq_data) { | |
750 | if (irq_data->chip != &no_irq_chip) | |
751 | break; | |
752 | /* | |
753 | * Bail out if the outer chip is not set up | |
754 | * and the interrrupt supposed to be started | |
755 | * right away. | |
756 | */ | |
757 | if (WARN_ON(is_chained)) | |
3b0f95be | 758 | return; |
f86eff22 MZ |
759 | /* Try the parent */ |
760 | irq_data = irq_data->parent_data; | |
761 | } | |
762 | #endif | |
763 | if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) | |
3b0f95be | 764 | return; |
f8b5473f | 765 | } |
dd87eb3a | 766 | |
dd87eb3a TG |
767 | /* Uninstall? */ |
768 | if (handle == handle_bad_irq) { | |
6b8ff312 | 769 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 770 | mask_ack_irq(desc); |
801a0e9a | 771 | irq_state_set_disabled(desc); |
e509bd7d MW |
772 | if (is_chained) |
773 | desc->action = NULL; | |
dd87eb3a TG |
774 | desc->depth = 1; |
775 | } | |
776 | desc->handle_irq = handle; | |
a460e745 | 777 | desc->name = name; |
dd87eb3a TG |
778 | |
779 | if (handle != handle_bad_irq && is_chained) { | |
1ccb4e61 TG |
780 | irq_settings_set_noprobe(desc); |
781 | irq_settings_set_norequest(desc); | |
7f1b1244 | 782 | irq_settings_set_nothread(desc); |
e509bd7d | 783 | desc->action = &chained_action; |
b4bc724e | 784 | irq_startup(desc, true); |
dd87eb3a | 785 | } |
3b0f95be RK |
786 | } |
787 | ||
788 | void | |
789 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
790 | const char *name) | |
791 | { | |
792 | unsigned long flags; | |
793 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
794 | ||
795 | if (!desc) | |
796 | return; | |
797 | ||
798 | __irq_do_set_handler(desc, handle, is_chained, name); | |
02725e74 | 799 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a | 800 | } |
3836ca08 | 801 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a | 802 | |
3b0f95be RK |
803 | void |
804 | irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, | |
805 | void *data) | |
806 | { | |
807 | unsigned long flags; | |
808 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); | |
809 | ||
810 | if (!desc) | |
811 | return; | |
812 | ||
813 | __irq_do_set_handler(desc, handle, 1, NULL); | |
af7080e0 | 814 | desc->irq_common_data.handler_data = data; |
3b0f95be RK |
815 | |
816 | irq_put_desc_busunlock(desc, flags); | |
817 | } | |
818 | EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); | |
819 | ||
dd87eb3a | 820 | void |
3836ca08 | 821 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 822 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 823 | { |
35e857cb | 824 | irq_set_chip(irq, chip); |
3836ca08 | 825 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 826 | } |
b3ae66f2 | 827 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
46f4f8f6 | 828 | |
44247184 | 829 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 830 | { |
46f4f8f6 | 831 | unsigned long flags; |
31d9d9b6 | 832 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 833 | |
44247184 | 834 | if (!desc) |
46f4f8f6 | 835 | return; |
a005677b TG |
836 | irq_settings_clr_and_set(desc, clr, set); |
837 | ||
876dbd4c | 838 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 839 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
840 | if (irq_settings_has_no_balance_set(desc)) |
841 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
842 | if (irq_settings_is_per_cpu(desc)) | |
843 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
844 | if (irq_settings_can_move_pcntxt(desc)) |
845 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
846 | if (irq_settings_is_level(desc)) |
847 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 848 | |
876dbd4c TG |
849 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
850 | ||
02725e74 | 851 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 852 | } |
edf76f83 | 853 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
854 | |
855 | /** | |
856 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
857 | * | |
858 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
859 | * for each. | |
860 | */ | |
861 | void irq_cpu_online(void) | |
862 | { | |
863 | struct irq_desc *desc; | |
864 | struct irq_chip *chip; | |
865 | unsigned long flags; | |
866 | unsigned int irq; | |
867 | ||
868 | for_each_active_irq(irq) { | |
869 | desc = irq_to_desc(irq); | |
870 | if (!desc) | |
871 | continue; | |
872 | ||
873 | raw_spin_lock_irqsave(&desc->lock, flags); | |
874 | ||
875 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
876 | if (chip && chip->irq_cpu_online && |
877 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 878 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
879 | chip->irq_cpu_online(&desc->irq_data); |
880 | ||
881 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
882 | } | |
883 | } | |
884 | ||
885 | /** | |
886 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
887 | * | |
888 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
889 | * for each. | |
890 | */ | |
891 | void irq_cpu_offline(void) | |
892 | { | |
893 | struct irq_desc *desc; | |
894 | struct irq_chip *chip; | |
895 | unsigned long flags; | |
896 | unsigned int irq; | |
897 | ||
898 | for_each_active_irq(irq) { | |
899 | desc = irq_to_desc(irq); | |
900 | if (!desc) | |
901 | continue; | |
902 | ||
903 | raw_spin_lock_irqsave(&desc->lock, flags); | |
904 | ||
905 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
906 | if (chip && chip->irq_cpu_offline && |
907 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 908 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
909 | chip->irq_cpu_offline(&desc->irq_data); |
910 | ||
911 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
912 | } | |
913 | } | |
85f08c17 JL |
914 | |
915 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
3cfeffc2 SA |
916 | /** |
917 | * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if | |
918 | * NULL) | |
919 | * @data: Pointer to interrupt specific data | |
920 | */ | |
921 | void irq_chip_enable_parent(struct irq_data *data) | |
922 | { | |
923 | data = data->parent_data; | |
924 | if (data->chip->irq_enable) | |
925 | data->chip->irq_enable(data); | |
926 | else | |
927 | data->chip->irq_unmask(data); | |
928 | } | |
929 | ||
930 | /** | |
931 | * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if | |
932 | * NULL) | |
933 | * @data: Pointer to interrupt specific data | |
934 | */ | |
935 | void irq_chip_disable_parent(struct irq_data *data) | |
936 | { | |
937 | data = data->parent_data; | |
938 | if (data->chip->irq_disable) | |
939 | data->chip->irq_disable(data); | |
940 | else | |
941 | data->chip->irq_mask(data); | |
942 | } | |
943 | ||
85f08c17 JL |
944 | /** |
945 | * irq_chip_ack_parent - Acknowledge the parent interrupt | |
946 | * @data: Pointer to interrupt specific data | |
947 | */ | |
948 | void irq_chip_ack_parent(struct irq_data *data) | |
949 | { | |
950 | data = data->parent_data; | |
951 | data->chip->irq_ack(data); | |
952 | } | |
a4289dc2 | 953 | EXPORT_SYMBOL_GPL(irq_chip_ack_parent); |
85f08c17 | 954 | |
56e8abab YC |
955 | /** |
956 | * irq_chip_mask_parent - Mask the parent interrupt | |
957 | * @data: Pointer to interrupt specific data | |
958 | */ | |
959 | void irq_chip_mask_parent(struct irq_data *data) | |
960 | { | |
961 | data = data->parent_data; | |
962 | data->chip->irq_mask(data); | |
963 | } | |
52b2a05f | 964 | EXPORT_SYMBOL_GPL(irq_chip_mask_parent); |
56e8abab YC |
965 | |
966 | /** | |
967 | * irq_chip_unmask_parent - Unmask the parent interrupt | |
968 | * @data: Pointer to interrupt specific data | |
969 | */ | |
970 | void irq_chip_unmask_parent(struct irq_data *data) | |
971 | { | |
972 | data = data->parent_data; | |
973 | data->chip->irq_unmask(data); | |
974 | } | |
52b2a05f | 975 | EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); |
56e8abab YC |
976 | |
977 | /** | |
978 | * irq_chip_eoi_parent - Invoke EOI on the parent interrupt | |
979 | * @data: Pointer to interrupt specific data | |
980 | */ | |
981 | void irq_chip_eoi_parent(struct irq_data *data) | |
982 | { | |
983 | data = data->parent_data; | |
984 | data->chip->irq_eoi(data); | |
985 | } | |
52b2a05f | 986 | EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); |
56e8abab YC |
987 | |
988 | /** | |
989 | * irq_chip_set_affinity_parent - Set affinity on the parent interrupt | |
990 | * @data: Pointer to interrupt specific data | |
991 | * @dest: The affinity mask to set | |
992 | * @force: Flag to enforce setting (disable online checks) | |
993 | * | |
994 | * Conditinal, as the underlying parent chip might not implement it. | |
995 | */ | |
996 | int irq_chip_set_affinity_parent(struct irq_data *data, | |
997 | const struct cpumask *dest, bool force) | |
998 | { | |
999 | data = data->parent_data; | |
1000 | if (data->chip->irq_set_affinity) | |
1001 | return data->chip->irq_set_affinity(data, dest, force); | |
b7560de1 GS |
1002 | |
1003 | return -ENOSYS; | |
1004 | } | |
1005 | ||
1006 | /** | |
1007 | * irq_chip_set_type_parent - Set IRQ type on the parent interrupt | |
1008 | * @data: Pointer to interrupt specific data | |
1009 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h | |
1010 | * | |
1011 | * Conditional, as the underlying parent chip might not implement it. | |
1012 | */ | |
1013 | int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) | |
1014 | { | |
1015 | data = data->parent_data; | |
1016 | ||
1017 | if (data->chip->irq_set_type) | |
1018 | return data->chip->irq_set_type(data, type); | |
56e8abab YC |
1019 | |
1020 | return -ENOSYS; | |
1021 | } | |
52b2a05f | 1022 | EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); |
56e8abab | 1023 | |
85f08c17 JL |
1024 | /** |
1025 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware | |
1026 | * @data: Pointer to interrupt specific data | |
1027 | * | |
1028 | * Iterate through the domain hierarchy of the interrupt and check | |
1029 | * whether a hw retrigger function exists. If yes, invoke it. | |
1030 | */ | |
1031 | int irq_chip_retrigger_hierarchy(struct irq_data *data) | |
1032 | { | |
1033 | for (data = data->parent_data; data; data = data->parent_data) | |
1034 | if (data->chip && data->chip->irq_retrigger) | |
1035 | return data->chip->irq_retrigger(data); | |
1036 | ||
6d4affea | 1037 | return 0; |
85f08c17 | 1038 | } |
08b55e2a | 1039 | |
0a4377de JL |
1040 | /** |
1041 | * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt | |
1042 | * @data: Pointer to interrupt specific data | |
8505a81b | 1043 | * @vcpu_info: The vcpu affinity information |
0a4377de JL |
1044 | */ |
1045 | int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) | |
1046 | { | |
1047 | data = data->parent_data; | |
1048 | if (data->chip->irq_set_vcpu_affinity) | |
1049 | return data->chip->irq_set_vcpu_affinity(data, vcpu_info); | |
1050 | ||
1051 | return -ENOSYS; | |
1052 | } | |
1053 | ||
08b55e2a MZ |
1054 | /** |
1055 | * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt | |
1056 | * @data: Pointer to interrupt specific data | |
1057 | * @on: Whether to set or reset the wake-up capability of this irq | |
1058 | * | |
1059 | * Conditional, as the underlying parent chip might not implement it. | |
1060 | */ | |
1061 | int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) | |
1062 | { | |
1063 | data = data->parent_data; | |
1064 | if (data->chip->irq_set_wake) | |
1065 | return data->chip->irq_set_wake(data, on); | |
1066 | ||
1067 | return -ENOSYS; | |
1068 | } | |
85f08c17 | 1069 | #endif |
515085ef JL |
1070 | |
1071 | /** | |
1072 | * irq_chip_compose_msi_msg - Componse msi message for a irq chip | |
1073 | * @data: Pointer to interrupt specific data | |
1074 | * @msg: Pointer to the MSI message | |
1075 | * | |
1076 | * For hierarchical domains we find the first chip in the hierarchy | |
1077 | * which implements the irq_compose_msi_msg callback. For non | |
1078 | * hierarchical we use the top level chip. | |
1079 | */ | |
1080 | int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |
1081 | { | |
1082 | struct irq_data *pos = NULL; | |
1083 | ||
1084 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | |
1085 | for (; data; data = data->parent_data) | |
1086 | #endif | |
1087 | if (data->chip && data->chip->irq_compose_msi_msg) | |
1088 | pos = data; | |
1089 | if (!pos) | |
1090 | return -ENOSYS; | |
1091 | ||
1092 | pos->chip->irq_compose_msi_msg(pos, msg); | |
1093 | ||
1094 | return 0; | |
1095 | } |