]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/irq/manage.c
x86/get_user: Use pointer masking to limit speculation
[mirror_ubuntu-bionic-kernel.git] / kernel / irq / manage.c
1 /*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <uapi/linux/sched/types.h>
22 #include <linux/task_work.h>
23
24 #include "internals.h"
25
26 #ifdef CONFIG_IRQ_FORCED_THREADING
27 __read_mostly bool force_irqthreads = IS_ENABLED(CONFIG_IRQ_FORCED_THREADING_DEFAULT);
28
29 static int __init setup_forced_irqthreads(char *arg)
30 {
31 force_irqthreads = true;
32 return 0;
33 }
34 static int __init setup_no_irqthreads(char *arg)
35 {
36 force_irqthreads = false;
37 return 0;
38 }
39 early_param("threadirqs", setup_forced_irqthreads);
40 early_param("nothreadirqs", setup_no_irqthreads);
41 #endif
42
43 static void __synchronize_hardirq(struct irq_desc *desc)
44 {
45 bool inprogress;
46
47 do {
48 unsigned long flags;
49
50 /*
51 * Wait until we're out of the critical section. This might
52 * give the wrong answer due to the lack of memory barriers.
53 */
54 while (irqd_irq_inprogress(&desc->irq_data))
55 cpu_relax();
56
57 /* Ok, that indicated we're done: double-check carefully. */
58 raw_spin_lock_irqsave(&desc->lock, flags);
59 inprogress = irqd_irq_inprogress(&desc->irq_data);
60 raw_spin_unlock_irqrestore(&desc->lock, flags);
61
62 /* Oops, that failed? */
63 } while (inprogress);
64 }
65
66 /**
67 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
68 * @irq: interrupt number to wait for
69 *
70 * This function waits for any pending hard IRQ handlers for this
71 * interrupt to complete before returning. If you use this
72 * function while holding a resource the IRQ handler may need you
73 * will deadlock. It does not take associated threaded handlers
74 * into account.
75 *
76 * Do not use this for shutdown scenarios where you must be sure
77 * that all parts (hardirq and threaded handler) have completed.
78 *
79 * Returns: false if a threaded handler is active.
80 *
81 * This function may be called - with care - from IRQ context.
82 */
83 bool synchronize_hardirq(unsigned int irq)
84 {
85 struct irq_desc *desc = irq_to_desc(irq);
86
87 if (desc) {
88 __synchronize_hardirq(desc);
89 return !atomic_read(&desc->threads_active);
90 }
91
92 return true;
93 }
94 EXPORT_SYMBOL(synchronize_hardirq);
95
96 /**
97 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
98 * @irq: interrupt number to wait for
99 *
100 * This function waits for any pending IRQ handlers for this interrupt
101 * to complete before returning. If you use this function while
102 * holding a resource the IRQ handler may need you will deadlock.
103 *
104 * This function may be called - with care - from IRQ context.
105 */
106 void synchronize_irq(unsigned int irq)
107 {
108 struct irq_desc *desc = irq_to_desc(irq);
109
110 if (desc) {
111 __synchronize_hardirq(desc);
112 /*
113 * We made sure that no hardirq handler is
114 * running. Now verify that no threaded handlers are
115 * active.
116 */
117 wait_event(desc->wait_for_threads,
118 !atomic_read(&desc->threads_active));
119 }
120 }
121 EXPORT_SYMBOL(synchronize_irq);
122
123 #ifdef CONFIG_SMP
124 cpumask_var_t irq_default_affinity;
125
126 static bool __irq_can_set_affinity(struct irq_desc *desc)
127 {
128 if (!desc || !irqd_can_balance(&desc->irq_data) ||
129 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
130 return false;
131 return true;
132 }
133
134 /**
135 * irq_can_set_affinity - Check if the affinity of a given irq can be set
136 * @irq: Interrupt to check
137 *
138 */
139 int irq_can_set_affinity(unsigned int irq)
140 {
141 return __irq_can_set_affinity(irq_to_desc(irq));
142 }
143
144 /**
145 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
146 * @irq: Interrupt to check
147 *
148 * Like irq_can_set_affinity() above, but additionally checks for the
149 * AFFINITY_MANAGED flag.
150 */
151 bool irq_can_set_affinity_usr(unsigned int irq)
152 {
153 struct irq_desc *desc = irq_to_desc(irq);
154
155 return __irq_can_set_affinity(desc) &&
156 !irqd_affinity_is_managed(&desc->irq_data);
157 }
158
159 /**
160 * irq_set_thread_affinity - Notify irq threads to adjust affinity
161 * @desc: irq descriptor which has affitnity changed
162 *
163 * We just set IRQTF_AFFINITY and delegate the affinity setting
164 * to the interrupt thread itself. We can not call
165 * set_cpus_allowed_ptr() here as we hold desc->lock and this
166 * code can be called from hard interrupt context.
167 */
168 void irq_set_thread_affinity(struct irq_desc *desc)
169 {
170 struct irqaction *action;
171
172 for_each_action_of_desc(desc, action)
173 if (action->thread)
174 set_bit(IRQTF_AFFINITY, &action->thread_flags);
175 }
176
177 static void irq_validate_effective_affinity(struct irq_data *data)
178 {
179 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
180 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
181 struct irq_chip *chip = irq_data_get_irq_chip(data);
182
183 if (!cpumask_empty(m))
184 return;
185 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
186 chip->name, data->irq);
187 #endif
188 }
189
190 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
191 bool force)
192 {
193 struct irq_desc *desc = irq_data_to_desc(data);
194 struct irq_chip *chip = irq_data_get_irq_chip(data);
195 int ret;
196
197 if (!chip || !chip->irq_set_affinity)
198 return -EINVAL;
199
200 ret = chip->irq_set_affinity(data, mask, force);
201 switch (ret) {
202 case IRQ_SET_MASK_OK:
203 case IRQ_SET_MASK_OK_DONE:
204 cpumask_copy(desc->irq_common_data.affinity, mask);
205 case IRQ_SET_MASK_OK_NOCOPY:
206 irq_validate_effective_affinity(data);
207 irq_set_thread_affinity(desc);
208 ret = 0;
209 }
210
211 return ret;
212 }
213
214 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
215 bool force)
216 {
217 struct irq_chip *chip = irq_data_get_irq_chip(data);
218 struct irq_desc *desc = irq_data_to_desc(data);
219 int ret = 0;
220
221 if (!chip || !chip->irq_set_affinity)
222 return -EINVAL;
223
224 if (irq_can_move_pcntxt(data)) {
225 ret = irq_do_set_affinity(data, mask, force);
226 } else {
227 irqd_set_move_pending(data);
228 irq_copy_pending(desc, mask);
229 }
230
231 if (desc->affinity_notify) {
232 kref_get(&desc->affinity_notify->kref);
233 schedule_work(&desc->affinity_notify->work);
234 }
235 irqd_set(data, IRQD_AFFINITY_SET);
236
237 return ret;
238 }
239
240 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
241 {
242 struct irq_desc *desc = irq_to_desc(irq);
243 unsigned long flags;
244 int ret;
245
246 if (!desc)
247 return -EINVAL;
248
249 raw_spin_lock_irqsave(&desc->lock, flags);
250 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
251 raw_spin_unlock_irqrestore(&desc->lock, flags);
252 return ret;
253 }
254
255 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
256 {
257 unsigned long flags;
258 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
259
260 if (!desc)
261 return -EINVAL;
262 desc->affinity_hint = m;
263 irq_put_desc_unlock(desc, flags);
264 /* set the initial affinity to prevent every interrupt being on CPU0 */
265 if (m)
266 __irq_set_affinity(irq, m, false);
267 return 0;
268 }
269 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
270
271 static void irq_affinity_notify(struct work_struct *work)
272 {
273 struct irq_affinity_notify *notify =
274 container_of(work, struct irq_affinity_notify, work);
275 struct irq_desc *desc = irq_to_desc(notify->irq);
276 cpumask_var_t cpumask;
277 unsigned long flags;
278
279 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
280 goto out;
281
282 raw_spin_lock_irqsave(&desc->lock, flags);
283 if (irq_move_pending(&desc->irq_data))
284 irq_get_pending(cpumask, desc);
285 else
286 cpumask_copy(cpumask, desc->irq_common_data.affinity);
287 raw_spin_unlock_irqrestore(&desc->lock, flags);
288
289 notify->notify(notify, cpumask);
290
291 free_cpumask_var(cpumask);
292 out:
293 kref_put(&notify->kref, notify->release);
294 }
295
296 /**
297 * irq_set_affinity_notifier - control notification of IRQ affinity changes
298 * @irq: Interrupt for which to enable/disable notification
299 * @notify: Context for notification, or %NULL to disable
300 * notification. Function pointers must be initialised;
301 * the other fields will be initialised by this function.
302 *
303 * Must be called in process context. Notification may only be enabled
304 * after the IRQ is allocated and must be disabled before the IRQ is
305 * freed using free_irq().
306 */
307 int
308 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
309 {
310 struct irq_desc *desc = irq_to_desc(irq);
311 struct irq_affinity_notify *old_notify;
312 unsigned long flags;
313
314 /* The release function is promised process context */
315 might_sleep();
316
317 if (!desc)
318 return -EINVAL;
319
320 /* Complete initialisation of *notify */
321 if (notify) {
322 notify->irq = irq;
323 kref_init(&notify->kref);
324 INIT_WORK(&notify->work, irq_affinity_notify);
325 }
326
327 raw_spin_lock_irqsave(&desc->lock, flags);
328 old_notify = desc->affinity_notify;
329 desc->affinity_notify = notify;
330 raw_spin_unlock_irqrestore(&desc->lock, flags);
331
332 if (old_notify)
333 kref_put(&old_notify->kref, old_notify->release);
334
335 return 0;
336 }
337 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
338
339 #ifndef CONFIG_AUTO_IRQ_AFFINITY
340 /*
341 * Generic version of the affinity autoselector.
342 */
343 int irq_setup_affinity(struct irq_desc *desc)
344 {
345 struct cpumask *set = irq_default_affinity;
346 int ret, node = irq_desc_get_node(desc);
347 static DEFINE_RAW_SPINLOCK(mask_lock);
348 static struct cpumask mask;
349
350 /* Excludes PER_CPU and NO_BALANCE interrupts */
351 if (!__irq_can_set_affinity(desc))
352 return 0;
353
354 raw_spin_lock(&mask_lock);
355 /*
356 * Preserve the managed affinity setting and a userspace affinity
357 * setup, but make sure that one of the targets is online.
358 */
359 if (irqd_affinity_is_managed(&desc->irq_data) ||
360 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
361 if (cpumask_intersects(desc->irq_common_data.affinity,
362 cpu_online_mask))
363 set = desc->irq_common_data.affinity;
364 else
365 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
366 }
367
368 cpumask_and(&mask, cpu_online_mask, set);
369 if (node != NUMA_NO_NODE) {
370 const struct cpumask *nodemask = cpumask_of_node(node);
371
372 /* make sure at least one of the cpus in nodemask is online */
373 if (cpumask_intersects(&mask, nodemask))
374 cpumask_and(&mask, &mask, nodemask);
375 }
376 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
377 raw_spin_unlock(&mask_lock);
378 return ret;
379 }
380 #else
381 /* Wrapper for ALPHA specific affinity selector magic */
382 int irq_setup_affinity(struct irq_desc *desc)
383 {
384 return irq_select_affinity(irq_desc_get_irq(desc));
385 }
386 #endif
387
388 /*
389 * Called when a bogus affinity is set via /proc/irq
390 */
391 int irq_select_affinity_usr(unsigned int irq)
392 {
393 struct irq_desc *desc = irq_to_desc(irq);
394 unsigned long flags;
395 int ret;
396
397 raw_spin_lock_irqsave(&desc->lock, flags);
398 ret = irq_setup_affinity(desc);
399 raw_spin_unlock_irqrestore(&desc->lock, flags);
400 return ret;
401 }
402 #endif
403
404 /**
405 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
406 * @irq: interrupt number to set affinity
407 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
408 * specific data for percpu_devid interrupts
409 *
410 * This function uses the vCPU specific data to set the vCPU
411 * affinity for an irq. The vCPU specific data is passed from
412 * outside, such as KVM. One example code path is as below:
413 * KVM -> IOMMU -> irq_set_vcpu_affinity().
414 */
415 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
416 {
417 unsigned long flags;
418 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
419 struct irq_data *data;
420 struct irq_chip *chip;
421 int ret = -ENOSYS;
422
423 if (!desc)
424 return -EINVAL;
425
426 data = irq_desc_get_irq_data(desc);
427 do {
428 chip = irq_data_get_irq_chip(data);
429 if (chip && chip->irq_set_vcpu_affinity)
430 break;
431 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
432 data = data->parent_data;
433 #else
434 data = NULL;
435 #endif
436 } while (data);
437
438 if (data)
439 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
440 irq_put_desc_unlock(desc, flags);
441
442 return ret;
443 }
444 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
445
446 void __disable_irq(struct irq_desc *desc)
447 {
448 if (!desc->depth++)
449 irq_disable(desc);
450 }
451
452 static int __disable_irq_nosync(unsigned int irq)
453 {
454 unsigned long flags;
455 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
456
457 if (!desc)
458 return -EINVAL;
459 __disable_irq(desc);
460 irq_put_desc_busunlock(desc, flags);
461 return 0;
462 }
463
464 /**
465 * disable_irq_nosync - disable an irq without waiting
466 * @irq: Interrupt to disable
467 *
468 * Disable the selected interrupt line. Disables and Enables are
469 * nested.
470 * Unlike disable_irq(), this function does not ensure existing
471 * instances of the IRQ handler have completed before returning.
472 *
473 * This function may be called from IRQ context.
474 */
475 void disable_irq_nosync(unsigned int irq)
476 {
477 __disable_irq_nosync(irq);
478 }
479 EXPORT_SYMBOL(disable_irq_nosync);
480
481 /**
482 * disable_irq - disable an irq and wait for completion
483 * @irq: Interrupt to disable
484 *
485 * Disable the selected interrupt line. Enables and Disables are
486 * nested.
487 * This function waits for any pending IRQ handlers for this interrupt
488 * to complete before returning. If you use this function while
489 * holding a resource the IRQ handler may need you will deadlock.
490 *
491 * This function may be called - with care - from IRQ context.
492 */
493 void disable_irq(unsigned int irq)
494 {
495 if (!__disable_irq_nosync(irq))
496 synchronize_irq(irq);
497 }
498 EXPORT_SYMBOL(disable_irq);
499
500 /**
501 * disable_hardirq - disables an irq and waits for hardirq completion
502 * @irq: Interrupt to disable
503 *
504 * Disable the selected interrupt line. Enables and Disables are
505 * nested.
506 * This function waits for any pending hard IRQ handlers for this
507 * interrupt to complete before returning. If you use this function while
508 * holding a resource the hard IRQ handler may need you will deadlock.
509 *
510 * When used to optimistically disable an interrupt from atomic context
511 * the return value must be checked.
512 *
513 * Returns: false if a threaded handler is active.
514 *
515 * This function may be called - with care - from IRQ context.
516 */
517 bool disable_hardirq(unsigned int irq)
518 {
519 if (!__disable_irq_nosync(irq))
520 return synchronize_hardirq(irq);
521
522 return false;
523 }
524 EXPORT_SYMBOL_GPL(disable_hardirq);
525
526 void __enable_irq(struct irq_desc *desc)
527 {
528 switch (desc->depth) {
529 case 0:
530 err_out:
531 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
532 irq_desc_get_irq(desc));
533 break;
534 case 1: {
535 if (desc->istate & IRQS_SUSPENDED)
536 goto err_out;
537 /* Prevent probing on this irq: */
538 irq_settings_set_noprobe(desc);
539 /*
540 * Call irq_startup() not irq_enable() here because the
541 * interrupt might be marked NOAUTOEN. So irq_startup()
542 * needs to be invoked when it gets enabled the first
543 * time. If it was already started up, then irq_startup()
544 * will invoke irq_enable() under the hood.
545 */
546 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
547 break;
548 }
549 default:
550 desc->depth--;
551 }
552 }
553
554 /**
555 * enable_irq - enable handling of an irq
556 * @irq: Interrupt to enable
557 *
558 * Undoes the effect of one call to disable_irq(). If this
559 * matches the last disable, processing of interrupts on this
560 * IRQ line is re-enabled.
561 *
562 * This function may be called from IRQ context only when
563 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
564 */
565 void enable_irq(unsigned int irq)
566 {
567 unsigned long flags;
568 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
569
570 if (!desc)
571 return;
572 if (WARN(!desc->irq_data.chip,
573 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
574 goto out;
575
576 __enable_irq(desc);
577 out:
578 irq_put_desc_busunlock(desc, flags);
579 }
580 EXPORT_SYMBOL(enable_irq);
581
582 static int set_irq_wake_real(unsigned int irq, unsigned int on)
583 {
584 struct irq_desc *desc = irq_to_desc(irq);
585 int ret = -ENXIO;
586
587 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
588 return 0;
589
590 if (desc->irq_data.chip->irq_set_wake)
591 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
592
593 return ret;
594 }
595
596 /**
597 * irq_set_irq_wake - control irq power management wakeup
598 * @irq: interrupt to control
599 * @on: enable/disable power management wakeup
600 *
601 * Enable/disable power management wakeup mode, which is
602 * disabled by default. Enables and disables must match,
603 * just as they match for non-wakeup mode support.
604 *
605 * Wakeup mode lets this IRQ wake the system from sleep
606 * states like "suspend to RAM".
607 */
608 int irq_set_irq_wake(unsigned int irq, unsigned int on)
609 {
610 unsigned long flags;
611 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
612 int ret = 0;
613
614 if (!desc)
615 return -EINVAL;
616
617 /* wakeup-capable irqs can be shared between drivers that
618 * don't need to have the same sleep mode behaviors.
619 */
620 if (on) {
621 if (desc->wake_depth++ == 0) {
622 ret = set_irq_wake_real(irq, on);
623 if (ret)
624 desc->wake_depth = 0;
625 else
626 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
627 }
628 } else {
629 if (desc->wake_depth == 0) {
630 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
631 } else if (--desc->wake_depth == 0) {
632 ret = set_irq_wake_real(irq, on);
633 if (ret)
634 desc->wake_depth = 1;
635 else
636 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
637 }
638 }
639 irq_put_desc_busunlock(desc, flags);
640 return ret;
641 }
642 EXPORT_SYMBOL(irq_set_irq_wake);
643
644 /*
645 * Internal function that tells the architecture code whether a
646 * particular irq has been exclusively allocated or is available
647 * for driver use.
648 */
649 int can_request_irq(unsigned int irq, unsigned long irqflags)
650 {
651 unsigned long flags;
652 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
653 int canrequest = 0;
654
655 if (!desc)
656 return 0;
657
658 if (irq_settings_can_request(desc)) {
659 if (!desc->action ||
660 irqflags & desc->action->flags & IRQF_SHARED)
661 canrequest = 1;
662 }
663 irq_put_desc_unlock(desc, flags);
664 return canrequest;
665 }
666
667 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
668 {
669 struct irq_chip *chip = desc->irq_data.chip;
670 int ret, unmask = 0;
671
672 if (!chip || !chip->irq_set_type) {
673 /*
674 * IRQF_TRIGGER_* but the PIC does not support multiple
675 * flow-types?
676 */
677 pr_debug("No set_type function for IRQ %d (%s)\n",
678 irq_desc_get_irq(desc),
679 chip ? (chip->name ? : "unknown") : "unknown");
680 return 0;
681 }
682
683 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
684 if (!irqd_irq_masked(&desc->irq_data))
685 mask_irq(desc);
686 if (!irqd_irq_disabled(&desc->irq_data))
687 unmask = 1;
688 }
689
690 /* Mask all flags except trigger mode */
691 flags &= IRQ_TYPE_SENSE_MASK;
692 ret = chip->irq_set_type(&desc->irq_data, flags);
693
694 switch (ret) {
695 case IRQ_SET_MASK_OK:
696 case IRQ_SET_MASK_OK_DONE:
697 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
698 irqd_set(&desc->irq_data, flags);
699
700 case IRQ_SET_MASK_OK_NOCOPY:
701 flags = irqd_get_trigger_type(&desc->irq_data);
702 irq_settings_set_trigger_mask(desc, flags);
703 irqd_clear(&desc->irq_data, IRQD_LEVEL);
704 irq_settings_clr_level(desc);
705 if (flags & IRQ_TYPE_LEVEL_MASK) {
706 irq_settings_set_level(desc);
707 irqd_set(&desc->irq_data, IRQD_LEVEL);
708 }
709
710 ret = 0;
711 break;
712 default:
713 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
714 flags, irq_desc_get_irq(desc), chip->irq_set_type);
715 }
716 if (unmask)
717 unmask_irq(desc);
718 return ret;
719 }
720
721 #ifdef CONFIG_HARDIRQS_SW_RESEND
722 int irq_set_parent(int irq, int parent_irq)
723 {
724 unsigned long flags;
725 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
726
727 if (!desc)
728 return -EINVAL;
729
730 desc->parent_irq = parent_irq;
731
732 irq_put_desc_unlock(desc, flags);
733 return 0;
734 }
735 EXPORT_SYMBOL_GPL(irq_set_parent);
736 #endif
737
738 /*
739 * Default primary interrupt handler for threaded interrupts. Is
740 * assigned as primary handler when request_threaded_irq is called
741 * with handler == NULL. Useful for oneshot interrupts.
742 */
743 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
744 {
745 return IRQ_WAKE_THREAD;
746 }
747
748 /*
749 * Primary handler for nested threaded interrupts. Should never be
750 * called.
751 */
752 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
753 {
754 WARN(1, "Primary handler called for nested irq %d\n", irq);
755 return IRQ_NONE;
756 }
757
758 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
759 {
760 WARN(1, "Secondary action handler called for irq %d\n", irq);
761 return IRQ_NONE;
762 }
763
764 static int irq_wait_for_interrupt(struct irqaction *action)
765 {
766 set_current_state(TASK_INTERRUPTIBLE);
767
768 while (!kthread_should_stop()) {
769
770 if (test_and_clear_bit(IRQTF_RUNTHREAD,
771 &action->thread_flags)) {
772 __set_current_state(TASK_RUNNING);
773 return 0;
774 }
775 schedule();
776 set_current_state(TASK_INTERRUPTIBLE);
777 }
778 __set_current_state(TASK_RUNNING);
779 return -1;
780 }
781
782 /*
783 * Oneshot interrupts keep the irq line masked until the threaded
784 * handler finished. unmask if the interrupt has not been disabled and
785 * is marked MASKED.
786 */
787 static void irq_finalize_oneshot(struct irq_desc *desc,
788 struct irqaction *action)
789 {
790 if (!(desc->istate & IRQS_ONESHOT) ||
791 action->handler == irq_forced_secondary_handler)
792 return;
793 again:
794 chip_bus_lock(desc);
795 raw_spin_lock_irq(&desc->lock);
796
797 /*
798 * Implausible though it may be we need to protect us against
799 * the following scenario:
800 *
801 * The thread is faster done than the hard interrupt handler
802 * on the other CPU. If we unmask the irq line then the
803 * interrupt can come in again and masks the line, leaves due
804 * to IRQS_INPROGRESS and the irq line is masked forever.
805 *
806 * This also serializes the state of shared oneshot handlers
807 * versus "desc->threads_onehsot |= action->thread_mask;" in
808 * irq_wake_thread(). See the comment there which explains the
809 * serialization.
810 */
811 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
812 raw_spin_unlock_irq(&desc->lock);
813 chip_bus_sync_unlock(desc);
814 cpu_relax();
815 goto again;
816 }
817
818 /*
819 * Now check again, whether the thread should run. Otherwise
820 * we would clear the threads_oneshot bit of this thread which
821 * was just set.
822 */
823 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
824 goto out_unlock;
825
826 desc->threads_oneshot &= ~action->thread_mask;
827
828 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
829 irqd_irq_masked(&desc->irq_data))
830 unmask_threaded_irq(desc);
831
832 out_unlock:
833 raw_spin_unlock_irq(&desc->lock);
834 chip_bus_sync_unlock(desc);
835 }
836
837 #ifdef CONFIG_SMP
838 /*
839 * Check whether we need to change the affinity of the interrupt thread.
840 */
841 static void
842 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
843 {
844 cpumask_var_t mask;
845 bool valid = true;
846
847 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
848 return;
849
850 /*
851 * In case we are out of memory we set IRQTF_AFFINITY again and
852 * try again next time
853 */
854 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
855 set_bit(IRQTF_AFFINITY, &action->thread_flags);
856 return;
857 }
858
859 raw_spin_lock_irq(&desc->lock);
860 /*
861 * This code is triggered unconditionally. Check the affinity
862 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
863 */
864 if (cpumask_available(desc->irq_common_data.affinity))
865 cpumask_copy(mask, desc->irq_common_data.affinity);
866 else
867 valid = false;
868 raw_spin_unlock_irq(&desc->lock);
869
870 if (valid)
871 set_cpus_allowed_ptr(current, mask);
872 free_cpumask_var(mask);
873 }
874 #else
875 static inline void
876 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
877 #endif
878
879 /*
880 * Interrupts which are not explicitely requested as threaded
881 * interrupts rely on the implicit bh/preempt disable of the hard irq
882 * context. So we need to disable bh here to avoid deadlocks and other
883 * side effects.
884 */
885 static irqreturn_t
886 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
887 {
888 irqreturn_t ret;
889
890 local_bh_disable();
891 ret = action->thread_fn(action->irq, action->dev_id);
892 irq_finalize_oneshot(desc, action);
893 local_bh_enable();
894 return ret;
895 }
896
897 /*
898 * Interrupts explicitly requested as threaded interrupts want to be
899 * preemtible - many of them need to sleep and wait for slow busses to
900 * complete.
901 */
902 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
903 struct irqaction *action)
904 {
905 irqreturn_t ret;
906
907 ret = action->thread_fn(action->irq, action->dev_id);
908 irq_finalize_oneshot(desc, action);
909 return ret;
910 }
911
912 static void wake_threads_waitq(struct irq_desc *desc)
913 {
914 if (atomic_dec_and_test(&desc->threads_active))
915 wake_up(&desc->wait_for_threads);
916 }
917
918 static void irq_thread_dtor(struct callback_head *unused)
919 {
920 struct task_struct *tsk = current;
921 struct irq_desc *desc;
922 struct irqaction *action;
923
924 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
925 return;
926
927 action = kthread_data(tsk);
928
929 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
930 tsk->comm, tsk->pid, action->irq);
931
932
933 desc = irq_to_desc(action->irq);
934 /*
935 * If IRQTF_RUNTHREAD is set, we need to decrement
936 * desc->threads_active and wake possible waiters.
937 */
938 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
939 wake_threads_waitq(desc);
940
941 /* Prevent a stale desc->threads_oneshot */
942 irq_finalize_oneshot(desc, action);
943 }
944
945 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
946 {
947 struct irqaction *secondary = action->secondary;
948
949 if (WARN_ON_ONCE(!secondary))
950 return;
951
952 raw_spin_lock_irq(&desc->lock);
953 __irq_wake_thread(desc, secondary);
954 raw_spin_unlock_irq(&desc->lock);
955 }
956
957 /*
958 * Interrupt handler thread
959 */
960 static int irq_thread(void *data)
961 {
962 struct callback_head on_exit_work;
963 struct irqaction *action = data;
964 struct irq_desc *desc = irq_to_desc(action->irq);
965 irqreturn_t (*handler_fn)(struct irq_desc *desc,
966 struct irqaction *action);
967
968 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
969 &action->thread_flags))
970 handler_fn = irq_forced_thread_fn;
971 else
972 handler_fn = irq_thread_fn;
973
974 init_task_work(&on_exit_work, irq_thread_dtor);
975 task_work_add(current, &on_exit_work, false);
976
977 irq_thread_check_affinity(desc, action);
978
979 while (!irq_wait_for_interrupt(action)) {
980 irqreturn_t action_ret;
981
982 irq_thread_check_affinity(desc, action);
983
984 action_ret = handler_fn(desc, action);
985 if (action_ret == IRQ_HANDLED)
986 atomic_inc(&desc->threads_handled);
987 if (action_ret == IRQ_WAKE_THREAD)
988 irq_wake_secondary(desc, action);
989
990 wake_threads_waitq(desc);
991 }
992
993 /*
994 * This is the regular exit path. __free_irq() is stopping the
995 * thread via kthread_stop() after calling
996 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
997 * oneshot mask bit can be set. We cannot verify that as we
998 * cannot touch the oneshot mask at this point anymore as
999 * __setup_irq() might have given out currents thread_mask
1000 * again.
1001 */
1002 task_work_cancel(current, irq_thread_dtor);
1003 return 0;
1004 }
1005
1006 /**
1007 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1008 * @irq: Interrupt line
1009 * @dev_id: Device identity for which the thread should be woken
1010 *
1011 */
1012 void irq_wake_thread(unsigned int irq, void *dev_id)
1013 {
1014 struct irq_desc *desc = irq_to_desc(irq);
1015 struct irqaction *action;
1016 unsigned long flags;
1017
1018 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1019 return;
1020
1021 raw_spin_lock_irqsave(&desc->lock, flags);
1022 for_each_action_of_desc(desc, action) {
1023 if (action->dev_id == dev_id) {
1024 if (action->thread)
1025 __irq_wake_thread(desc, action);
1026 break;
1027 }
1028 }
1029 raw_spin_unlock_irqrestore(&desc->lock, flags);
1030 }
1031 EXPORT_SYMBOL_GPL(irq_wake_thread);
1032
1033 static int irq_setup_forced_threading(struct irqaction *new)
1034 {
1035 if (!force_irqthreads)
1036 return 0;
1037 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1038 return 0;
1039
1040 new->flags |= IRQF_ONESHOT;
1041
1042 /*
1043 * Handle the case where we have a real primary handler and a
1044 * thread handler. We force thread them as well by creating a
1045 * secondary action.
1046 */
1047 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1048 /* Allocate the secondary action */
1049 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1050 if (!new->secondary)
1051 return -ENOMEM;
1052 new->secondary->handler = irq_forced_secondary_handler;
1053 new->secondary->thread_fn = new->thread_fn;
1054 new->secondary->dev_id = new->dev_id;
1055 new->secondary->irq = new->irq;
1056 new->secondary->name = new->name;
1057 }
1058 /* Deal with the primary handler */
1059 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1060 new->thread_fn = new->handler;
1061 new->handler = irq_default_primary_handler;
1062 return 0;
1063 }
1064
1065 static int irq_request_resources(struct irq_desc *desc)
1066 {
1067 struct irq_data *d = &desc->irq_data;
1068 struct irq_chip *c = d->chip;
1069
1070 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1071 }
1072
1073 static void irq_release_resources(struct irq_desc *desc)
1074 {
1075 struct irq_data *d = &desc->irq_data;
1076 struct irq_chip *c = d->chip;
1077
1078 if (c->irq_release_resources)
1079 c->irq_release_resources(d);
1080 }
1081
1082 static int
1083 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1084 {
1085 struct task_struct *t;
1086 struct sched_param param = {
1087 .sched_priority = MAX_USER_RT_PRIO/2,
1088 };
1089
1090 if (!secondary) {
1091 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1092 new->name);
1093 } else {
1094 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1095 new->name);
1096 param.sched_priority -= 1;
1097 }
1098
1099 if (IS_ERR(t))
1100 return PTR_ERR(t);
1101
1102 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1103
1104 /*
1105 * We keep the reference to the task struct even if
1106 * the thread dies to avoid that the interrupt code
1107 * references an already freed task_struct.
1108 */
1109 get_task_struct(t);
1110 new->thread = t;
1111 /*
1112 * Tell the thread to set its affinity. This is
1113 * important for shared interrupt handlers as we do
1114 * not invoke setup_affinity() for the secondary
1115 * handlers as everything is already set up. Even for
1116 * interrupts marked with IRQF_NO_BALANCE this is
1117 * correct as we want the thread to move to the cpu(s)
1118 * on which the requesting code placed the interrupt.
1119 */
1120 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1121 return 0;
1122 }
1123
1124 /*
1125 * Internal function to register an irqaction - typically used to
1126 * allocate special interrupts that are part of the architecture.
1127 *
1128 * Locking rules:
1129 *
1130 * desc->request_mutex Provides serialization against a concurrent free_irq()
1131 * chip_bus_lock Provides serialization for slow bus operations
1132 * desc->lock Provides serialization against hard interrupts
1133 *
1134 * chip_bus_lock and desc->lock are sufficient for all other management and
1135 * interrupt related functions. desc->request_mutex solely serializes
1136 * request/free_irq().
1137 */
1138 static int
1139 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1140 {
1141 struct irqaction *old, **old_ptr;
1142 unsigned long flags, thread_mask = 0;
1143 int ret, nested, shared = 0;
1144
1145 if (!desc)
1146 return -EINVAL;
1147
1148 if (desc->irq_data.chip == &no_irq_chip)
1149 return -ENOSYS;
1150 if (!try_module_get(desc->owner))
1151 return -ENODEV;
1152
1153 new->irq = irq;
1154
1155 /*
1156 * If the trigger type is not specified by the caller,
1157 * then use the default for this interrupt.
1158 */
1159 if (!(new->flags & IRQF_TRIGGER_MASK))
1160 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1161
1162 /*
1163 * Check whether the interrupt nests into another interrupt
1164 * thread.
1165 */
1166 nested = irq_settings_is_nested_thread(desc);
1167 if (nested) {
1168 if (!new->thread_fn) {
1169 ret = -EINVAL;
1170 goto out_mput;
1171 }
1172 /*
1173 * Replace the primary handler which was provided from
1174 * the driver for non nested interrupt handling by the
1175 * dummy function which warns when called.
1176 */
1177 new->handler = irq_nested_primary_handler;
1178 } else {
1179 if (irq_settings_can_thread(desc)) {
1180 ret = irq_setup_forced_threading(new);
1181 if (ret)
1182 goto out_mput;
1183 }
1184 }
1185
1186 /*
1187 * Create a handler thread when a thread function is supplied
1188 * and the interrupt does not nest into another interrupt
1189 * thread.
1190 */
1191 if (new->thread_fn && !nested) {
1192 ret = setup_irq_thread(new, irq, false);
1193 if (ret)
1194 goto out_mput;
1195 if (new->secondary) {
1196 ret = setup_irq_thread(new->secondary, irq, true);
1197 if (ret)
1198 goto out_thread;
1199 }
1200 }
1201
1202 /*
1203 * Drivers are often written to work w/o knowledge about the
1204 * underlying irq chip implementation, so a request for a
1205 * threaded irq without a primary hard irq context handler
1206 * requires the ONESHOT flag to be set. Some irq chips like
1207 * MSI based interrupts are per se one shot safe. Check the
1208 * chip flags, so we can avoid the unmask dance at the end of
1209 * the threaded handler for those.
1210 */
1211 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1212 new->flags &= ~IRQF_ONESHOT;
1213
1214 /*
1215 * Protects against a concurrent __free_irq() call which might wait
1216 * for synchronize_irq() to complete without holding the optional
1217 * chip bus lock and desc->lock.
1218 */
1219 mutex_lock(&desc->request_mutex);
1220
1221 /*
1222 * Acquire bus lock as the irq_request_resources() callback below
1223 * might rely on the serialization or the magic power management
1224 * functions which are abusing the irq_bus_lock() callback,
1225 */
1226 chip_bus_lock(desc);
1227
1228 /* First installed action requests resources. */
1229 if (!desc->action) {
1230 ret = irq_request_resources(desc);
1231 if (ret) {
1232 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1233 new->name, irq, desc->irq_data.chip->name);
1234 goto out_bus_unlock;
1235 }
1236 }
1237
1238 /*
1239 * The following block of code has to be executed atomically
1240 * protected against a concurrent interrupt and any of the other
1241 * management calls which are not serialized via
1242 * desc->request_mutex or the optional bus lock.
1243 */
1244 raw_spin_lock_irqsave(&desc->lock, flags);
1245 old_ptr = &desc->action;
1246 old = *old_ptr;
1247 if (old) {
1248 /*
1249 * Can't share interrupts unless both agree to and are
1250 * the same type (level, edge, polarity). So both flag
1251 * fields must have IRQF_SHARED set and the bits which
1252 * set the trigger type must match. Also all must
1253 * agree on ONESHOT.
1254 */
1255 unsigned int oldtype;
1256
1257 /*
1258 * If nobody did set the configuration before, inherit
1259 * the one provided by the requester.
1260 */
1261 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1262 oldtype = irqd_get_trigger_type(&desc->irq_data);
1263 } else {
1264 oldtype = new->flags & IRQF_TRIGGER_MASK;
1265 irqd_set_trigger_type(&desc->irq_data, oldtype);
1266 }
1267
1268 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1269 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1270 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1271 goto mismatch;
1272
1273 /* All handlers must agree on per-cpuness */
1274 if ((old->flags & IRQF_PERCPU) !=
1275 (new->flags & IRQF_PERCPU))
1276 goto mismatch;
1277
1278 /* add new interrupt at end of irq queue */
1279 do {
1280 /*
1281 * Or all existing action->thread_mask bits,
1282 * so we can find the next zero bit for this
1283 * new action.
1284 */
1285 thread_mask |= old->thread_mask;
1286 old_ptr = &old->next;
1287 old = *old_ptr;
1288 } while (old);
1289 shared = 1;
1290 }
1291
1292 /*
1293 * Setup the thread mask for this irqaction for ONESHOT. For
1294 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1295 * conditional in irq_wake_thread().
1296 */
1297 if (new->flags & IRQF_ONESHOT) {
1298 /*
1299 * Unlikely to have 32 resp 64 irqs sharing one line,
1300 * but who knows.
1301 */
1302 if (thread_mask == ~0UL) {
1303 ret = -EBUSY;
1304 goto out_unlock;
1305 }
1306 /*
1307 * The thread_mask for the action is or'ed to
1308 * desc->thread_active to indicate that the
1309 * IRQF_ONESHOT thread handler has been woken, but not
1310 * yet finished. The bit is cleared when a thread
1311 * completes. When all threads of a shared interrupt
1312 * line have completed desc->threads_active becomes
1313 * zero and the interrupt line is unmasked. See
1314 * handle.c:irq_wake_thread() for further information.
1315 *
1316 * If no thread is woken by primary (hard irq context)
1317 * interrupt handlers, then desc->threads_active is
1318 * also checked for zero to unmask the irq line in the
1319 * affected hard irq flow handlers
1320 * (handle_[fasteoi|level]_irq).
1321 *
1322 * The new action gets the first zero bit of
1323 * thread_mask assigned. See the loop above which or's
1324 * all existing action->thread_mask bits.
1325 */
1326 new->thread_mask = 1UL << ffz(thread_mask);
1327
1328 } else if (new->handler == irq_default_primary_handler &&
1329 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1330 /*
1331 * The interrupt was requested with handler = NULL, so
1332 * we use the default primary handler for it. But it
1333 * does not have the oneshot flag set. In combination
1334 * with level interrupts this is deadly, because the
1335 * default primary handler just wakes the thread, then
1336 * the irq lines is reenabled, but the device still
1337 * has the level irq asserted. Rinse and repeat....
1338 *
1339 * While this works for edge type interrupts, we play
1340 * it safe and reject unconditionally because we can't
1341 * say for sure which type this interrupt really
1342 * has. The type flags are unreliable as the
1343 * underlying chip implementation can override them.
1344 */
1345 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1346 irq);
1347 ret = -EINVAL;
1348 goto out_unlock;
1349 }
1350
1351 if (!shared) {
1352 init_waitqueue_head(&desc->wait_for_threads);
1353
1354 /* Setup the type (level, edge polarity) if configured: */
1355 if (new->flags & IRQF_TRIGGER_MASK) {
1356 ret = __irq_set_trigger(desc,
1357 new->flags & IRQF_TRIGGER_MASK);
1358
1359 if (ret)
1360 goto out_unlock;
1361 }
1362
1363 /*
1364 * Activate the interrupt. That activation must happen
1365 * independently of IRQ_NOAUTOEN. request_irq() can fail
1366 * and the callers are supposed to handle
1367 * that. enable_irq() of an interrupt requested with
1368 * IRQ_NOAUTOEN is not supposed to fail. The activation
1369 * keeps it in shutdown mode, it merily associates
1370 * resources if necessary and if that's not possible it
1371 * fails. Interrupts which are in managed shutdown mode
1372 * will simply ignore that activation request.
1373 */
1374 ret = irq_activate(desc);
1375 if (ret)
1376 goto out_unlock;
1377
1378 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1379 IRQS_ONESHOT | IRQS_WAITING);
1380 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1381
1382 if (new->flags & IRQF_PERCPU) {
1383 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1384 irq_settings_set_per_cpu(desc);
1385 }
1386
1387 if (new->flags & IRQF_ONESHOT)
1388 desc->istate |= IRQS_ONESHOT;
1389
1390 /* Exclude IRQ from balancing if requested */
1391 if (new->flags & IRQF_NOBALANCING) {
1392 irq_settings_set_no_balancing(desc);
1393 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1394 }
1395
1396 if (irq_settings_can_autoenable(desc)) {
1397 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1398 } else {
1399 /*
1400 * Shared interrupts do not go well with disabling
1401 * auto enable. The sharing interrupt might request
1402 * it while it's still disabled and then wait for
1403 * interrupts forever.
1404 */
1405 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1406 /* Undo nested disables: */
1407 desc->depth = 1;
1408 }
1409
1410 } else if (new->flags & IRQF_TRIGGER_MASK) {
1411 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1412 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1413
1414 if (nmsk != omsk)
1415 /* hope the handler works with current trigger mode */
1416 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1417 irq, omsk, nmsk);
1418 }
1419
1420 *old_ptr = new;
1421
1422 irq_pm_install_action(desc, new);
1423
1424 /* Reset broken irq detection when installing new handler */
1425 desc->irq_count = 0;
1426 desc->irqs_unhandled = 0;
1427
1428 /*
1429 * Check whether we disabled the irq via the spurious handler
1430 * before. Reenable it and give it another chance.
1431 */
1432 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1433 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1434 __enable_irq(desc);
1435 }
1436
1437 raw_spin_unlock_irqrestore(&desc->lock, flags);
1438 chip_bus_sync_unlock(desc);
1439 mutex_unlock(&desc->request_mutex);
1440
1441 irq_setup_timings(desc, new);
1442
1443 /*
1444 * Strictly no need to wake it up, but hung_task complains
1445 * when no hard interrupt wakes the thread up.
1446 */
1447 if (new->thread)
1448 wake_up_process(new->thread);
1449 if (new->secondary)
1450 wake_up_process(new->secondary->thread);
1451
1452 register_irq_proc(irq, desc);
1453 new->dir = NULL;
1454 register_handler_proc(irq, new);
1455 return 0;
1456
1457 mismatch:
1458 if (!(new->flags & IRQF_PROBE_SHARED)) {
1459 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1460 irq, new->flags, new->name, old->flags, old->name);
1461 #ifdef CONFIG_DEBUG_SHIRQ
1462 dump_stack();
1463 #endif
1464 }
1465 ret = -EBUSY;
1466
1467 out_unlock:
1468 raw_spin_unlock_irqrestore(&desc->lock, flags);
1469
1470 if (!desc->action)
1471 irq_release_resources(desc);
1472 out_bus_unlock:
1473 chip_bus_sync_unlock(desc);
1474 mutex_unlock(&desc->request_mutex);
1475
1476 out_thread:
1477 if (new->thread) {
1478 struct task_struct *t = new->thread;
1479
1480 new->thread = NULL;
1481 kthread_stop(t);
1482 put_task_struct(t);
1483 }
1484 if (new->secondary && new->secondary->thread) {
1485 struct task_struct *t = new->secondary->thread;
1486
1487 new->secondary->thread = NULL;
1488 kthread_stop(t);
1489 put_task_struct(t);
1490 }
1491 out_mput:
1492 module_put(desc->owner);
1493 return ret;
1494 }
1495
1496 /**
1497 * setup_irq - setup an interrupt
1498 * @irq: Interrupt line to setup
1499 * @act: irqaction for the interrupt
1500 *
1501 * Used to statically setup interrupts in the early boot process.
1502 */
1503 int setup_irq(unsigned int irq, struct irqaction *act)
1504 {
1505 int retval;
1506 struct irq_desc *desc = irq_to_desc(irq);
1507
1508 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1509 return -EINVAL;
1510
1511 retval = irq_chip_pm_get(&desc->irq_data);
1512 if (retval < 0)
1513 return retval;
1514
1515 retval = __setup_irq(irq, desc, act);
1516
1517 if (retval)
1518 irq_chip_pm_put(&desc->irq_data);
1519
1520 return retval;
1521 }
1522 EXPORT_SYMBOL_GPL(setup_irq);
1523
1524 /*
1525 * Internal function to unregister an irqaction - used to free
1526 * regular and special interrupts that are part of the architecture.
1527 */
1528 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1529 {
1530 struct irq_desc *desc = irq_to_desc(irq);
1531 struct irqaction *action, **action_ptr;
1532 unsigned long flags;
1533
1534 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1535
1536 if (!desc)
1537 return NULL;
1538
1539 mutex_lock(&desc->request_mutex);
1540 chip_bus_lock(desc);
1541 raw_spin_lock_irqsave(&desc->lock, flags);
1542
1543 /*
1544 * There can be multiple actions per IRQ descriptor, find the right
1545 * one based on the dev_id:
1546 */
1547 action_ptr = &desc->action;
1548 for (;;) {
1549 action = *action_ptr;
1550
1551 if (!action) {
1552 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1553 raw_spin_unlock_irqrestore(&desc->lock, flags);
1554 chip_bus_sync_unlock(desc);
1555 mutex_unlock(&desc->request_mutex);
1556 return NULL;
1557 }
1558
1559 if (action->dev_id == dev_id)
1560 break;
1561 action_ptr = &action->next;
1562 }
1563
1564 /* Found it - now remove it from the list of entries: */
1565 *action_ptr = action->next;
1566
1567 irq_pm_remove_action(desc, action);
1568
1569 /* If this was the last handler, shut down the IRQ line: */
1570 if (!desc->action) {
1571 irq_settings_clr_disable_unlazy(desc);
1572 irq_shutdown(desc);
1573 }
1574
1575 #ifdef CONFIG_SMP
1576 /* make sure affinity_hint is cleaned up */
1577 if (WARN_ON_ONCE(desc->affinity_hint))
1578 desc->affinity_hint = NULL;
1579 #endif
1580
1581 raw_spin_unlock_irqrestore(&desc->lock, flags);
1582 /*
1583 * Drop bus_lock here so the changes which were done in the chip
1584 * callbacks above are synced out to the irq chips which hang
1585 * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1586 *
1587 * Aside of that the bus_lock can also be taken from the threaded
1588 * handler in irq_finalize_oneshot() which results in a deadlock
1589 * because synchronize_irq() would wait forever for the thread to
1590 * complete, which is blocked on the bus lock.
1591 *
1592 * The still held desc->request_mutex() protects against a
1593 * concurrent request_irq() of this irq so the release of resources
1594 * and timing data is properly serialized.
1595 */
1596 chip_bus_sync_unlock(desc);
1597
1598 unregister_handler_proc(irq, action);
1599
1600 /* Make sure it's not being used on another CPU: */
1601 synchronize_irq(irq);
1602
1603 #ifdef CONFIG_DEBUG_SHIRQ
1604 /*
1605 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1606 * event to happen even now it's being freed, so let's make sure that
1607 * is so by doing an extra call to the handler ....
1608 *
1609 * ( We do this after actually deregistering it, to make sure that a
1610 * 'real' IRQ doesn't run in * parallel with our fake. )
1611 */
1612 if (action->flags & IRQF_SHARED) {
1613 local_irq_save(flags);
1614 action->handler(irq, dev_id);
1615 local_irq_restore(flags);
1616 }
1617 #endif
1618
1619 if (action->thread) {
1620 kthread_stop(action->thread);
1621 put_task_struct(action->thread);
1622 if (action->secondary && action->secondary->thread) {
1623 kthread_stop(action->secondary->thread);
1624 put_task_struct(action->secondary->thread);
1625 }
1626 }
1627
1628 /* Last action releases resources */
1629 if (!desc->action) {
1630 /*
1631 * Reaquire bus lock as irq_release_resources() might
1632 * require it to deallocate resources over the slow bus.
1633 */
1634 chip_bus_lock(desc);
1635 irq_release_resources(desc);
1636 chip_bus_sync_unlock(desc);
1637 irq_remove_timings(desc);
1638 }
1639
1640 mutex_unlock(&desc->request_mutex);
1641
1642 irq_chip_pm_put(&desc->irq_data);
1643 module_put(desc->owner);
1644 kfree(action->secondary);
1645 return action;
1646 }
1647
1648 /**
1649 * remove_irq - free an interrupt
1650 * @irq: Interrupt line to free
1651 * @act: irqaction for the interrupt
1652 *
1653 * Used to remove interrupts statically setup by the early boot process.
1654 */
1655 void remove_irq(unsigned int irq, struct irqaction *act)
1656 {
1657 struct irq_desc *desc = irq_to_desc(irq);
1658
1659 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1660 __free_irq(irq, act->dev_id);
1661 }
1662 EXPORT_SYMBOL_GPL(remove_irq);
1663
1664 /**
1665 * free_irq - free an interrupt allocated with request_irq
1666 * @irq: Interrupt line to free
1667 * @dev_id: Device identity to free
1668 *
1669 * Remove an interrupt handler. The handler is removed and if the
1670 * interrupt line is no longer in use by any driver it is disabled.
1671 * On a shared IRQ the caller must ensure the interrupt is disabled
1672 * on the card it drives before calling this function. The function
1673 * does not return until any executing interrupts for this IRQ
1674 * have completed.
1675 *
1676 * This function must not be called from interrupt context.
1677 *
1678 * Returns the devname argument passed to request_irq.
1679 */
1680 const void *free_irq(unsigned int irq, void *dev_id)
1681 {
1682 struct irq_desc *desc = irq_to_desc(irq);
1683 struct irqaction *action;
1684 const char *devname;
1685
1686 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1687 return NULL;
1688
1689 #ifdef CONFIG_SMP
1690 if (WARN_ON(desc->affinity_notify))
1691 desc->affinity_notify = NULL;
1692 #endif
1693
1694 action = __free_irq(irq, dev_id);
1695
1696 if (!action)
1697 return NULL;
1698
1699 devname = action->name;
1700 kfree(action);
1701 return devname;
1702 }
1703 EXPORT_SYMBOL(free_irq);
1704
1705 /**
1706 * request_threaded_irq - allocate an interrupt line
1707 * @irq: Interrupt line to allocate
1708 * @handler: Function to be called when the IRQ occurs.
1709 * Primary handler for threaded interrupts
1710 * If NULL and thread_fn != NULL the default
1711 * primary handler is installed
1712 * @thread_fn: Function called from the irq handler thread
1713 * If NULL, no irq thread is created
1714 * @irqflags: Interrupt type flags
1715 * @devname: An ascii name for the claiming device
1716 * @dev_id: A cookie passed back to the handler function
1717 *
1718 * This call allocates interrupt resources and enables the
1719 * interrupt line and IRQ handling. From the point this
1720 * call is made your handler function may be invoked. Since
1721 * your handler function must clear any interrupt the board
1722 * raises, you must take care both to initialise your hardware
1723 * and to set up the interrupt handler in the right order.
1724 *
1725 * If you want to set up a threaded irq handler for your device
1726 * then you need to supply @handler and @thread_fn. @handler is
1727 * still called in hard interrupt context and has to check
1728 * whether the interrupt originates from the device. If yes it
1729 * needs to disable the interrupt on the device and return
1730 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1731 * @thread_fn. This split handler design is necessary to support
1732 * shared interrupts.
1733 *
1734 * Dev_id must be globally unique. Normally the address of the
1735 * device data structure is used as the cookie. Since the handler
1736 * receives this value it makes sense to use it.
1737 *
1738 * If your interrupt is shared you must pass a non NULL dev_id
1739 * as this is required when freeing the interrupt.
1740 *
1741 * Flags:
1742 *
1743 * IRQF_SHARED Interrupt is shared
1744 * IRQF_TRIGGER_* Specify active edge(s) or level
1745 *
1746 */
1747 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1748 irq_handler_t thread_fn, unsigned long irqflags,
1749 const char *devname, void *dev_id)
1750 {
1751 struct irqaction *action;
1752 struct irq_desc *desc;
1753 int retval;
1754
1755 if (irq == IRQ_NOTCONNECTED)
1756 return -ENOTCONN;
1757
1758 /*
1759 * Sanity-check: shared interrupts must pass in a real dev-ID,
1760 * otherwise we'll have trouble later trying to figure out
1761 * which interrupt is which (messes up the interrupt freeing
1762 * logic etc).
1763 *
1764 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1765 * it cannot be set along with IRQF_NO_SUSPEND.
1766 */
1767 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1768 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1769 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1770 return -EINVAL;
1771
1772 desc = irq_to_desc(irq);
1773 if (!desc)
1774 return -EINVAL;
1775
1776 if (!irq_settings_can_request(desc) ||
1777 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1778 return -EINVAL;
1779
1780 if (!handler) {
1781 if (!thread_fn)
1782 return -EINVAL;
1783 handler = irq_default_primary_handler;
1784 }
1785
1786 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1787 if (!action)
1788 return -ENOMEM;
1789
1790 action->handler = handler;
1791 action->thread_fn = thread_fn;
1792 action->flags = irqflags;
1793 action->name = devname;
1794 action->dev_id = dev_id;
1795
1796 retval = irq_chip_pm_get(&desc->irq_data);
1797 if (retval < 0) {
1798 kfree(action);
1799 return retval;
1800 }
1801
1802 retval = __setup_irq(irq, desc, action);
1803
1804 if (retval) {
1805 irq_chip_pm_put(&desc->irq_data);
1806 kfree(action->secondary);
1807 kfree(action);
1808 }
1809
1810 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1811 if (!retval && (irqflags & IRQF_SHARED)) {
1812 /*
1813 * It's a shared IRQ -- the driver ought to be prepared for it
1814 * to happen immediately, so let's make sure....
1815 * We disable the irq to make sure that a 'real' IRQ doesn't
1816 * run in parallel with our fake.
1817 */
1818 unsigned long flags;
1819
1820 disable_irq(irq);
1821 local_irq_save(flags);
1822
1823 handler(irq, dev_id);
1824
1825 local_irq_restore(flags);
1826 enable_irq(irq);
1827 }
1828 #endif
1829 return retval;
1830 }
1831 EXPORT_SYMBOL(request_threaded_irq);
1832
1833 /**
1834 * request_any_context_irq - allocate an interrupt line
1835 * @irq: Interrupt line to allocate
1836 * @handler: Function to be called when the IRQ occurs.
1837 * Threaded handler for threaded interrupts.
1838 * @flags: Interrupt type flags
1839 * @name: An ascii name for the claiming device
1840 * @dev_id: A cookie passed back to the handler function
1841 *
1842 * This call allocates interrupt resources and enables the
1843 * interrupt line and IRQ handling. It selects either a
1844 * hardirq or threaded handling method depending on the
1845 * context.
1846 *
1847 * On failure, it returns a negative value. On success,
1848 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1849 */
1850 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1851 unsigned long flags, const char *name, void *dev_id)
1852 {
1853 struct irq_desc *desc;
1854 int ret;
1855
1856 if (irq == IRQ_NOTCONNECTED)
1857 return -ENOTCONN;
1858
1859 desc = irq_to_desc(irq);
1860 if (!desc)
1861 return -EINVAL;
1862
1863 if (irq_settings_is_nested_thread(desc)) {
1864 ret = request_threaded_irq(irq, NULL, handler,
1865 flags, name, dev_id);
1866 return !ret ? IRQC_IS_NESTED : ret;
1867 }
1868
1869 ret = request_irq(irq, handler, flags, name, dev_id);
1870 return !ret ? IRQC_IS_HARDIRQ : ret;
1871 }
1872 EXPORT_SYMBOL_GPL(request_any_context_irq);
1873
1874 void enable_percpu_irq(unsigned int irq, unsigned int type)
1875 {
1876 unsigned int cpu = smp_processor_id();
1877 unsigned long flags;
1878 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1879
1880 if (!desc)
1881 return;
1882
1883 /*
1884 * If the trigger type is not specified by the caller, then
1885 * use the default for this interrupt.
1886 */
1887 type &= IRQ_TYPE_SENSE_MASK;
1888 if (type == IRQ_TYPE_NONE)
1889 type = irqd_get_trigger_type(&desc->irq_data);
1890
1891 if (type != IRQ_TYPE_NONE) {
1892 int ret;
1893
1894 ret = __irq_set_trigger(desc, type);
1895
1896 if (ret) {
1897 WARN(1, "failed to set type for IRQ%d\n", irq);
1898 goto out;
1899 }
1900 }
1901
1902 irq_percpu_enable(desc, cpu);
1903 out:
1904 irq_put_desc_unlock(desc, flags);
1905 }
1906 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1907
1908 /**
1909 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1910 * @irq: Linux irq number to check for
1911 *
1912 * Must be called from a non migratable context. Returns the enable
1913 * state of a per cpu interrupt on the current cpu.
1914 */
1915 bool irq_percpu_is_enabled(unsigned int irq)
1916 {
1917 unsigned int cpu = smp_processor_id();
1918 struct irq_desc *desc;
1919 unsigned long flags;
1920 bool is_enabled;
1921
1922 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1923 if (!desc)
1924 return false;
1925
1926 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1927 irq_put_desc_unlock(desc, flags);
1928
1929 return is_enabled;
1930 }
1931 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1932
1933 void disable_percpu_irq(unsigned int irq)
1934 {
1935 unsigned int cpu = smp_processor_id();
1936 unsigned long flags;
1937 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1938
1939 if (!desc)
1940 return;
1941
1942 irq_percpu_disable(desc, cpu);
1943 irq_put_desc_unlock(desc, flags);
1944 }
1945 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1946
1947 /*
1948 * Internal function to unregister a percpu irqaction.
1949 */
1950 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1951 {
1952 struct irq_desc *desc = irq_to_desc(irq);
1953 struct irqaction *action;
1954 unsigned long flags;
1955
1956 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1957
1958 if (!desc)
1959 return NULL;
1960
1961 raw_spin_lock_irqsave(&desc->lock, flags);
1962
1963 action = desc->action;
1964 if (!action || action->percpu_dev_id != dev_id) {
1965 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1966 goto bad;
1967 }
1968
1969 if (!cpumask_empty(desc->percpu_enabled)) {
1970 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1971 irq, cpumask_first(desc->percpu_enabled));
1972 goto bad;
1973 }
1974
1975 /* Found it - now remove it from the list of entries: */
1976 desc->action = NULL;
1977
1978 raw_spin_unlock_irqrestore(&desc->lock, flags);
1979
1980 unregister_handler_proc(irq, action);
1981
1982 irq_chip_pm_put(&desc->irq_data);
1983 module_put(desc->owner);
1984 return action;
1985
1986 bad:
1987 raw_spin_unlock_irqrestore(&desc->lock, flags);
1988 return NULL;
1989 }
1990
1991 /**
1992 * remove_percpu_irq - free a per-cpu interrupt
1993 * @irq: Interrupt line to free
1994 * @act: irqaction for the interrupt
1995 *
1996 * Used to remove interrupts statically setup by the early boot process.
1997 */
1998 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1999 {
2000 struct irq_desc *desc = irq_to_desc(irq);
2001
2002 if (desc && irq_settings_is_per_cpu_devid(desc))
2003 __free_percpu_irq(irq, act->percpu_dev_id);
2004 }
2005
2006 /**
2007 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2008 * @irq: Interrupt line to free
2009 * @dev_id: Device identity to free
2010 *
2011 * Remove a percpu interrupt handler. The handler is removed, but
2012 * the interrupt line is not disabled. This must be done on each
2013 * CPU before calling this function. The function does not return
2014 * until any executing interrupts for this IRQ have completed.
2015 *
2016 * This function must not be called from interrupt context.
2017 */
2018 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2019 {
2020 struct irq_desc *desc = irq_to_desc(irq);
2021
2022 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2023 return;
2024
2025 chip_bus_lock(desc);
2026 kfree(__free_percpu_irq(irq, dev_id));
2027 chip_bus_sync_unlock(desc);
2028 }
2029 EXPORT_SYMBOL_GPL(free_percpu_irq);
2030
2031 /**
2032 * setup_percpu_irq - setup a per-cpu interrupt
2033 * @irq: Interrupt line to setup
2034 * @act: irqaction for the interrupt
2035 *
2036 * Used to statically setup per-cpu interrupts in the early boot process.
2037 */
2038 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2039 {
2040 struct irq_desc *desc = irq_to_desc(irq);
2041 int retval;
2042
2043 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2044 return -EINVAL;
2045
2046 retval = irq_chip_pm_get(&desc->irq_data);
2047 if (retval < 0)
2048 return retval;
2049
2050 retval = __setup_irq(irq, desc, act);
2051
2052 if (retval)
2053 irq_chip_pm_put(&desc->irq_data);
2054
2055 return retval;
2056 }
2057
2058 /**
2059 * __request_percpu_irq - allocate a percpu interrupt line
2060 * @irq: Interrupt line to allocate
2061 * @handler: Function to be called when the IRQ occurs.
2062 * @flags: Interrupt type flags (IRQF_TIMER only)
2063 * @devname: An ascii name for the claiming device
2064 * @dev_id: A percpu cookie passed back to the handler function
2065 *
2066 * This call allocates interrupt resources and enables the
2067 * interrupt on the local CPU. If the interrupt is supposed to be
2068 * enabled on other CPUs, it has to be done on each CPU using
2069 * enable_percpu_irq().
2070 *
2071 * Dev_id must be globally unique. It is a per-cpu variable, and
2072 * the handler gets called with the interrupted CPU's instance of
2073 * that variable.
2074 */
2075 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2076 unsigned long flags, const char *devname,
2077 void __percpu *dev_id)
2078 {
2079 struct irqaction *action;
2080 struct irq_desc *desc;
2081 int retval;
2082
2083 if (!dev_id)
2084 return -EINVAL;
2085
2086 desc = irq_to_desc(irq);
2087 if (!desc || !irq_settings_can_request(desc) ||
2088 !irq_settings_is_per_cpu_devid(desc))
2089 return -EINVAL;
2090
2091 if (flags && flags != IRQF_TIMER)
2092 return -EINVAL;
2093
2094 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2095 if (!action)
2096 return -ENOMEM;
2097
2098 action->handler = handler;
2099 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2100 action->name = devname;
2101 action->percpu_dev_id = dev_id;
2102
2103 retval = irq_chip_pm_get(&desc->irq_data);
2104 if (retval < 0) {
2105 kfree(action);
2106 return retval;
2107 }
2108
2109 retval = __setup_irq(irq, desc, action);
2110
2111 if (retval) {
2112 irq_chip_pm_put(&desc->irq_data);
2113 kfree(action);
2114 }
2115
2116 return retval;
2117 }
2118 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2119
2120 /**
2121 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2122 * @irq: Interrupt line that is forwarded to a VM
2123 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2124 * @state: a pointer to a boolean where the state is to be storeed
2125 *
2126 * This call snapshots the internal irqchip state of an
2127 * interrupt, returning into @state the bit corresponding to
2128 * stage @which
2129 *
2130 * This function should be called with preemption disabled if the
2131 * interrupt controller has per-cpu registers.
2132 */
2133 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2134 bool *state)
2135 {
2136 struct irq_desc *desc;
2137 struct irq_data *data;
2138 struct irq_chip *chip;
2139 unsigned long flags;
2140 int err = -EINVAL;
2141
2142 desc = irq_get_desc_buslock(irq, &flags, 0);
2143 if (!desc)
2144 return err;
2145
2146 data = irq_desc_get_irq_data(desc);
2147
2148 do {
2149 chip = irq_data_get_irq_chip(data);
2150 if (chip->irq_get_irqchip_state)
2151 break;
2152 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2153 data = data->parent_data;
2154 #else
2155 data = NULL;
2156 #endif
2157 } while (data);
2158
2159 if (data)
2160 err = chip->irq_get_irqchip_state(data, which, state);
2161
2162 irq_put_desc_busunlock(desc, flags);
2163 return err;
2164 }
2165 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2166
2167 /**
2168 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2169 * @irq: Interrupt line that is forwarded to a VM
2170 * @which: State to be restored (one of IRQCHIP_STATE_*)
2171 * @val: Value corresponding to @which
2172 *
2173 * This call sets the internal irqchip state of an interrupt,
2174 * depending on the value of @which.
2175 *
2176 * This function should be called with preemption disabled if the
2177 * interrupt controller has per-cpu registers.
2178 */
2179 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2180 bool val)
2181 {
2182 struct irq_desc *desc;
2183 struct irq_data *data;
2184 struct irq_chip *chip;
2185 unsigned long flags;
2186 int err = -EINVAL;
2187
2188 desc = irq_get_desc_buslock(irq, &flags, 0);
2189 if (!desc)
2190 return err;
2191
2192 data = irq_desc_get_irq_data(desc);
2193
2194 do {
2195 chip = irq_data_get_irq_chip(data);
2196 if (chip->irq_set_irqchip_state)
2197 break;
2198 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2199 data = data->parent_data;
2200 #else
2201 data = NULL;
2202 #endif
2203 } while (data);
2204
2205 if (data)
2206 err = chip->irq_set_irqchip_state(data, which, val);
2207
2208 irq_put_desc_busunlock(desc, flags);
2209 return err;
2210 }
2211 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);