]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/irq/manage.c
609376da23349a733b7308cad528fd7df5aa5430
[mirror_ubuntu-bionic-kernel.git] / kernel / irq / manage.c
1 /*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10 #define pr_fmt(fmt) "genirq: " fmt
11
12 #include <linux/irq.h>
13 #include <linux/kthread.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/irqdomain.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/sched/rt.h>
21 #include <linux/sched/task.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24
25 #include "internals.h"
26
27 #ifdef CONFIG_IRQ_FORCED_THREADING
28 __read_mostly bool force_irqthreads = IS_ENABLED(CONFIG_IRQ_FORCED_THREADING_DEFAULT);
29
30 static int __init setup_forced_irqthreads(char *arg)
31 {
32 force_irqthreads = true;
33 return 0;
34 }
35 static int __init setup_no_irqthreads(char *arg)
36 {
37 force_irqthreads = false;
38 return 0;
39 }
40 early_param("threadirqs", setup_forced_irqthreads);
41 early_param("nothreadirqs", setup_no_irqthreads);
42 #endif
43
44 static void __synchronize_hardirq(struct irq_desc *desc)
45 {
46 bool inprogress;
47
48 do {
49 unsigned long flags;
50
51 /*
52 * Wait until we're out of the critical section. This might
53 * give the wrong answer due to the lack of memory barriers.
54 */
55 while (irqd_irq_inprogress(&desc->irq_data))
56 cpu_relax();
57
58 /* Ok, that indicated we're done: double-check carefully. */
59 raw_spin_lock_irqsave(&desc->lock, flags);
60 inprogress = irqd_irq_inprogress(&desc->irq_data);
61 raw_spin_unlock_irqrestore(&desc->lock, flags);
62
63 /* Oops, that failed? */
64 } while (inprogress);
65 }
66
67 /**
68 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
69 * @irq: interrupt number to wait for
70 *
71 * This function waits for any pending hard IRQ handlers for this
72 * interrupt to complete before returning. If you use this
73 * function while holding a resource the IRQ handler may need you
74 * will deadlock. It does not take associated threaded handlers
75 * into account.
76 *
77 * Do not use this for shutdown scenarios where you must be sure
78 * that all parts (hardirq and threaded handler) have completed.
79 *
80 * Returns: false if a threaded handler is active.
81 *
82 * This function may be called - with care - from IRQ context.
83 */
84 bool synchronize_hardirq(unsigned int irq)
85 {
86 struct irq_desc *desc = irq_to_desc(irq);
87
88 if (desc) {
89 __synchronize_hardirq(desc);
90 return !atomic_read(&desc->threads_active);
91 }
92
93 return true;
94 }
95 EXPORT_SYMBOL(synchronize_hardirq);
96
97 /**
98 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
99 * @irq: interrupt number to wait for
100 *
101 * This function waits for any pending IRQ handlers for this interrupt
102 * to complete before returning. If you use this function while
103 * holding a resource the IRQ handler may need you will deadlock.
104 *
105 * This function may be called - with care - from IRQ context.
106 */
107 void synchronize_irq(unsigned int irq)
108 {
109 struct irq_desc *desc = irq_to_desc(irq);
110
111 if (desc) {
112 __synchronize_hardirq(desc);
113 /*
114 * We made sure that no hardirq handler is
115 * running. Now verify that no threaded handlers are
116 * active.
117 */
118 wait_event(desc->wait_for_threads,
119 !atomic_read(&desc->threads_active));
120 }
121 }
122 EXPORT_SYMBOL(synchronize_irq);
123
124 #ifdef CONFIG_SMP
125 cpumask_var_t irq_default_affinity;
126
127 static bool __irq_can_set_affinity(struct irq_desc *desc)
128 {
129 if (!desc || !irqd_can_balance(&desc->irq_data) ||
130 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
131 return false;
132 return true;
133 }
134
135 /**
136 * irq_can_set_affinity - Check if the affinity of a given irq can be set
137 * @irq: Interrupt to check
138 *
139 */
140 int irq_can_set_affinity(unsigned int irq)
141 {
142 return __irq_can_set_affinity(irq_to_desc(irq));
143 }
144
145 /**
146 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
147 * @irq: Interrupt to check
148 *
149 * Like irq_can_set_affinity() above, but additionally checks for the
150 * AFFINITY_MANAGED flag.
151 */
152 bool irq_can_set_affinity_usr(unsigned int irq)
153 {
154 struct irq_desc *desc = irq_to_desc(irq);
155
156 return __irq_can_set_affinity(desc) &&
157 !irqd_affinity_is_managed(&desc->irq_data);
158 }
159
160 /**
161 * irq_set_thread_affinity - Notify irq threads to adjust affinity
162 * @desc: irq descriptor which has affitnity changed
163 *
164 * We just set IRQTF_AFFINITY and delegate the affinity setting
165 * to the interrupt thread itself. We can not call
166 * set_cpus_allowed_ptr() here as we hold desc->lock and this
167 * code can be called from hard interrupt context.
168 */
169 void irq_set_thread_affinity(struct irq_desc *desc)
170 {
171 struct irqaction *action;
172
173 for_each_action_of_desc(desc, action)
174 if (action->thread)
175 set_bit(IRQTF_AFFINITY, &action->thread_flags);
176 }
177
178 static void irq_validate_effective_affinity(struct irq_data *data)
179 {
180 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
181 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
182 struct irq_chip *chip = irq_data_get_irq_chip(data);
183
184 if (!cpumask_empty(m))
185 return;
186 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
187 chip->name, data->irq);
188 #endif
189 }
190
191 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
192 bool force)
193 {
194 struct irq_desc *desc = irq_data_to_desc(data);
195 struct irq_chip *chip = irq_data_get_irq_chip(data);
196 int ret;
197
198 if (!chip || !chip->irq_set_affinity)
199 return -EINVAL;
200
201 ret = chip->irq_set_affinity(data, mask, force);
202 switch (ret) {
203 case IRQ_SET_MASK_OK:
204 case IRQ_SET_MASK_OK_DONE:
205 cpumask_copy(desc->irq_common_data.affinity, mask);
206 case IRQ_SET_MASK_OK_NOCOPY:
207 irq_validate_effective_affinity(data);
208 irq_set_thread_affinity(desc);
209 ret = 0;
210 }
211
212 return ret;
213 }
214
215 #ifdef CONFIG_GENERIC_PENDING_IRQ
216 static inline int irq_set_affinity_pending(struct irq_data *data,
217 const struct cpumask *dest)
218 {
219 struct irq_desc *desc = irq_data_to_desc(data);
220
221 irqd_set_move_pending(data);
222 irq_copy_pending(desc, dest);
223 return 0;
224 }
225 #else
226 static inline int irq_set_affinity_pending(struct irq_data *data,
227 const struct cpumask *dest)
228 {
229 return -EBUSY;
230 }
231 #endif
232
233 static int irq_try_set_affinity(struct irq_data *data,
234 const struct cpumask *dest, bool force)
235 {
236 int ret = irq_do_set_affinity(data, dest, force);
237
238 /*
239 * In case that the underlying vector management is busy and the
240 * architecture supports the generic pending mechanism then utilize
241 * this to avoid returning an error to user space.
242 */
243 if (ret == -EBUSY && !force)
244 ret = irq_set_affinity_pending(data, dest);
245 return ret;
246 }
247
248 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
249 bool force)
250 {
251 struct irq_chip *chip = irq_data_get_irq_chip(data);
252 struct irq_desc *desc = irq_data_to_desc(data);
253 int ret = 0;
254
255 if (!chip || !chip->irq_set_affinity)
256 return -EINVAL;
257
258 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
259 ret = irq_try_set_affinity(data, mask, force);
260 } else {
261 irqd_set_move_pending(data);
262 irq_copy_pending(desc, mask);
263 }
264
265 if (desc->affinity_notify) {
266 kref_get(&desc->affinity_notify->kref);
267 schedule_work(&desc->affinity_notify->work);
268 }
269 irqd_set(data, IRQD_AFFINITY_SET);
270
271 return ret;
272 }
273
274 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
275 {
276 struct irq_desc *desc = irq_to_desc(irq);
277 unsigned long flags;
278 int ret;
279
280 if (!desc)
281 return -EINVAL;
282
283 raw_spin_lock_irqsave(&desc->lock, flags);
284 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
285 raw_spin_unlock_irqrestore(&desc->lock, flags);
286 return ret;
287 }
288
289 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
290 {
291 unsigned long flags;
292 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
293
294 if (!desc)
295 return -EINVAL;
296 desc->affinity_hint = m;
297 irq_put_desc_unlock(desc, flags);
298 /* set the initial affinity to prevent every interrupt being on CPU0 */
299 if (m)
300 __irq_set_affinity(irq, m, false);
301 return 0;
302 }
303 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
304
305 static void irq_affinity_notify(struct work_struct *work)
306 {
307 struct irq_affinity_notify *notify =
308 container_of(work, struct irq_affinity_notify, work);
309 struct irq_desc *desc = irq_to_desc(notify->irq);
310 cpumask_var_t cpumask;
311 unsigned long flags;
312
313 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
314 goto out;
315
316 raw_spin_lock_irqsave(&desc->lock, flags);
317 if (irq_move_pending(&desc->irq_data))
318 irq_get_pending(cpumask, desc);
319 else
320 cpumask_copy(cpumask, desc->irq_common_data.affinity);
321 raw_spin_unlock_irqrestore(&desc->lock, flags);
322
323 notify->notify(notify, cpumask);
324
325 free_cpumask_var(cpumask);
326 out:
327 kref_put(&notify->kref, notify->release);
328 }
329
330 /**
331 * irq_set_affinity_notifier - control notification of IRQ affinity changes
332 * @irq: Interrupt for which to enable/disable notification
333 * @notify: Context for notification, or %NULL to disable
334 * notification. Function pointers must be initialised;
335 * the other fields will be initialised by this function.
336 *
337 * Must be called in process context. Notification may only be enabled
338 * after the IRQ is allocated and must be disabled before the IRQ is
339 * freed using free_irq().
340 */
341 int
342 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
343 {
344 struct irq_desc *desc = irq_to_desc(irq);
345 struct irq_affinity_notify *old_notify;
346 unsigned long flags;
347
348 /* The release function is promised process context */
349 might_sleep();
350
351 if (!desc)
352 return -EINVAL;
353
354 /* Complete initialisation of *notify */
355 if (notify) {
356 notify->irq = irq;
357 kref_init(&notify->kref);
358 INIT_WORK(&notify->work, irq_affinity_notify);
359 }
360
361 raw_spin_lock_irqsave(&desc->lock, flags);
362 old_notify = desc->affinity_notify;
363 desc->affinity_notify = notify;
364 raw_spin_unlock_irqrestore(&desc->lock, flags);
365
366 if (old_notify) {
367 cancel_work_sync(&old_notify->work);
368 kref_put(&old_notify->kref, old_notify->release);
369 }
370
371 return 0;
372 }
373 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
374
375 #ifndef CONFIG_AUTO_IRQ_AFFINITY
376 /*
377 * Generic version of the affinity autoselector.
378 */
379 int irq_setup_affinity(struct irq_desc *desc)
380 {
381 struct cpumask *set = irq_default_affinity;
382 int ret, node = irq_desc_get_node(desc);
383 static DEFINE_RAW_SPINLOCK(mask_lock);
384 static struct cpumask mask;
385
386 /* Excludes PER_CPU and NO_BALANCE interrupts */
387 if (!__irq_can_set_affinity(desc))
388 return 0;
389
390 raw_spin_lock(&mask_lock);
391 /*
392 * Preserve the managed affinity setting and a userspace affinity
393 * setup, but make sure that one of the targets is online.
394 */
395 if (irqd_affinity_is_managed(&desc->irq_data) ||
396 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
397 if (cpumask_intersects(desc->irq_common_data.affinity,
398 cpu_online_mask))
399 set = desc->irq_common_data.affinity;
400 else
401 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
402 }
403
404 cpumask_and(&mask, cpu_online_mask, set);
405 if (cpumask_empty(&mask))
406 cpumask_copy(&mask, cpu_online_mask);
407
408 if (node != NUMA_NO_NODE) {
409 const struct cpumask *nodemask = cpumask_of_node(node);
410
411 /* make sure at least one of the cpus in nodemask is online */
412 if (cpumask_intersects(&mask, nodemask))
413 cpumask_and(&mask, &mask, nodemask);
414 }
415 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
416 raw_spin_unlock(&mask_lock);
417 return ret;
418 }
419 #else
420 /* Wrapper for ALPHA specific affinity selector magic */
421 int irq_setup_affinity(struct irq_desc *desc)
422 {
423 return irq_select_affinity(irq_desc_get_irq(desc));
424 }
425 #endif
426
427 /*
428 * Called when a bogus affinity is set via /proc/irq
429 */
430 int irq_select_affinity_usr(unsigned int irq)
431 {
432 struct irq_desc *desc = irq_to_desc(irq);
433 unsigned long flags;
434 int ret;
435
436 raw_spin_lock_irqsave(&desc->lock, flags);
437 ret = irq_setup_affinity(desc);
438 raw_spin_unlock_irqrestore(&desc->lock, flags);
439 return ret;
440 }
441 #endif
442
443 /**
444 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
445 * @irq: interrupt number to set affinity
446 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
447 * specific data for percpu_devid interrupts
448 *
449 * This function uses the vCPU specific data to set the vCPU
450 * affinity for an irq. The vCPU specific data is passed from
451 * outside, such as KVM. One example code path is as below:
452 * KVM -> IOMMU -> irq_set_vcpu_affinity().
453 */
454 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
455 {
456 unsigned long flags;
457 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
458 struct irq_data *data;
459 struct irq_chip *chip;
460 int ret = -ENOSYS;
461
462 if (!desc)
463 return -EINVAL;
464
465 data = irq_desc_get_irq_data(desc);
466 do {
467 chip = irq_data_get_irq_chip(data);
468 if (chip && chip->irq_set_vcpu_affinity)
469 break;
470 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
471 data = data->parent_data;
472 #else
473 data = NULL;
474 #endif
475 } while (data);
476
477 if (data)
478 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
479 irq_put_desc_unlock(desc, flags);
480
481 return ret;
482 }
483 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
484
485 void __disable_irq(struct irq_desc *desc)
486 {
487 if (!desc->depth++)
488 irq_disable(desc);
489 }
490
491 static int __disable_irq_nosync(unsigned int irq)
492 {
493 unsigned long flags;
494 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
495
496 if (!desc)
497 return -EINVAL;
498 __disable_irq(desc);
499 irq_put_desc_busunlock(desc, flags);
500 return 0;
501 }
502
503 /**
504 * disable_irq_nosync - disable an irq without waiting
505 * @irq: Interrupt to disable
506 *
507 * Disable the selected interrupt line. Disables and Enables are
508 * nested.
509 * Unlike disable_irq(), this function does not ensure existing
510 * instances of the IRQ handler have completed before returning.
511 *
512 * This function may be called from IRQ context.
513 */
514 void disable_irq_nosync(unsigned int irq)
515 {
516 __disable_irq_nosync(irq);
517 }
518 EXPORT_SYMBOL(disable_irq_nosync);
519
520 /**
521 * disable_irq - disable an irq and wait for completion
522 * @irq: Interrupt to disable
523 *
524 * Disable the selected interrupt line. Enables and Disables are
525 * nested.
526 * This function waits for any pending IRQ handlers for this interrupt
527 * to complete before returning. If you use this function while
528 * holding a resource the IRQ handler may need you will deadlock.
529 *
530 * This function may be called - with care - from IRQ context.
531 */
532 void disable_irq(unsigned int irq)
533 {
534 if (!__disable_irq_nosync(irq))
535 synchronize_irq(irq);
536 }
537 EXPORT_SYMBOL(disable_irq);
538
539 /**
540 * disable_hardirq - disables an irq and waits for hardirq completion
541 * @irq: Interrupt to disable
542 *
543 * Disable the selected interrupt line. Enables and Disables are
544 * nested.
545 * This function waits for any pending hard IRQ handlers for this
546 * interrupt to complete before returning. If you use this function while
547 * holding a resource the hard IRQ handler may need you will deadlock.
548 *
549 * When used to optimistically disable an interrupt from atomic context
550 * the return value must be checked.
551 *
552 * Returns: false if a threaded handler is active.
553 *
554 * This function may be called - with care - from IRQ context.
555 */
556 bool disable_hardirq(unsigned int irq)
557 {
558 if (!__disable_irq_nosync(irq))
559 return synchronize_hardirq(irq);
560
561 return false;
562 }
563 EXPORT_SYMBOL_GPL(disable_hardirq);
564
565 void __enable_irq(struct irq_desc *desc)
566 {
567 switch (desc->depth) {
568 case 0:
569 err_out:
570 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
571 irq_desc_get_irq(desc));
572 break;
573 case 1: {
574 if (desc->istate & IRQS_SUSPENDED)
575 goto err_out;
576 /* Prevent probing on this irq: */
577 irq_settings_set_noprobe(desc);
578 /*
579 * Call irq_startup() not irq_enable() here because the
580 * interrupt might be marked NOAUTOEN. So irq_startup()
581 * needs to be invoked when it gets enabled the first
582 * time. If it was already started up, then irq_startup()
583 * will invoke irq_enable() under the hood.
584 */
585 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
586 break;
587 }
588 default:
589 desc->depth--;
590 }
591 }
592
593 /**
594 * enable_irq - enable handling of an irq
595 * @irq: Interrupt to enable
596 *
597 * Undoes the effect of one call to disable_irq(). If this
598 * matches the last disable, processing of interrupts on this
599 * IRQ line is re-enabled.
600 *
601 * This function may be called from IRQ context only when
602 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
603 */
604 void enable_irq(unsigned int irq)
605 {
606 unsigned long flags;
607 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
608
609 if (!desc)
610 return;
611 if (WARN(!desc->irq_data.chip,
612 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
613 goto out;
614
615 __enable_irq(desc);
616 out:
617 irq_put_desc_busunlock(desc, flags);
618 }
619 EXPORT_SYMBOL(enable_irq);
620
621 static int set_irq_wake_real(unsigned int irq, unsigned int on)
622 {
623 struct irq_desc *desc = irq_to_desc(irq);
624 int ret = -ENXIO;
625
626 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
627 return 0;
628
629 if (desc->irq_data.chip->irq_set_wake)
630 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
631
632 return ret;
633 }
634
635 /**
636 * irq_set_irq_wake - control irq power management wakeup
637 * @irq: interrupt to control
638 * @on: enable/disable power management wakeup
639 *
640 * Enable/disable power management wakeup mode, which is
641 * disabled by default. Enables and disables must match,
642 * just as they match for non-wakeup mode support.
643 *
644 * Wakeup mode lets this IRQ wake the system from sleep
645 * states like "suspend to RAM".
646 */
647 int irq_set_irq_wake(unsigned int irq, unsigned int on)
648 {
649 unsigned long flags;
650 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
651 int ret = 0;
652
653 if (!desc)
654 return -EINVAL;
655
656 /* wakeup-capable irqs can be shared between drivers that
657 * don't need to have the same sleep mode behaviors.
658 */
659 if (on) {
660 if (desc->wake_depth++ == 0) {
661 ret = set_irq_wake_real(irq, on);
662 if (ret)
663 desc->wake_depth = 0;
664 else
665 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
666 }
667 } else {
668 if (desc->wake_depth == 0) {
669 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
670 } else if (--desc->wake_depth == 0) {
671 ret = set_irq_wake_real(irq, on);
672 if (ret)
673 desc->wake_depth = 1;
674 else
675 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
676 }
677 }
678 irq_put_desc_busunlock(desc, flags);
679 return ret;
680 }
681 EXPORT_SYMBOL(irq_set_irq_wake);
682
683 /*
684 * Internal function that tells the architecture code whether a
685 * particular irq has been exclusively allocated or is available
686 * for driver use.
687 */
688 int can_request_irq(unsigned int irq, unsigned long irqflags)
689 {
690 unsigned long flags;
691 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
692 int canrequest = 0;
693
694 if (!desc)
695 return 0;
696
697 if (irq_settings_can_request(desc)) {
698 if (!desc->action ||
699 irqflags & desc->action->flags & IRQF_SHARED)
700 canrequest = 1;
701 }
702 irq_put_desc_unlock(desc, flags);
703 return canrequest;
704 }
705
706 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
707 {
708 struct irq_chip *chip = desc->irq_data.chip;
709 int ret, unmask = 0;
710
711 if (!chip || !chip->irq_set_type) {
712 /*
713 * IRQF_TRIGGER_* but the PIC does not support multiple
714 * flow-types?
715 */
716 pr_debug("No set_type function for IRQ %d (%s)\n",
717 irq_desc_get_irq(desc),
718 chip ? (chip->name ? : "unknown") : "unknown");
719 return 0;
720 }
721
722 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
723 if (!irqd_irq_masked(&desc->irq_data))
724 mask_irq(desc);
725 if (!irqd_irq_disabled(&desc->irq_data))
726 unmask = 1;
727 }
728
729 /* Mask all flags except trigger mode */
730 flags &= IRQ_TYPE_SENSE_MASK;
731 ret = chip->irq_set_type(&desc->irq_data, flags);
732
733 switch (ret) {
734 case IRQ_SET_MASK_OK:
735 case IRQ_SET_MASK_OK_DONE:
736 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
737 irqd_set(&desc->irq_data, flags);
738
739 case IRQ_SET_MASK_OK_NOCOPY:
740 flags = irqd_get_trigger_type(&desc->irq_data);
741 irq_settings_set_trigger_mask(desc, flags);
742 irqd_clear(&desc->irq_data, IRQD_LEVEL);
743 irq_settings_clr_level(desc);
744 if (flags & IRQ_TYPE_LEVEL_MASK) {
745 irq_settings_set_level(desc);
746 irqd_set(&desc->irq_data, IRQD_LEVEL);
747 }
748
749 ret = 0;
750 break;
751 default:
752 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
753 flags, irq_desc_get_irq(desc), chip->irq_set_type);
754 }
755 if (unmask)
756 unmask_irq(desc);
757 return ret;
758 }
759
760 #ifdef CONFIG_HARDIRQS_SW_RESEND
761 int irq_set_parent(int irq, int parent_irq)
762 {
763 unsigned long flags;
764 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
765
766 if (!desc)
767 return -EINVAL;
768
769 desc->parent_irq = parent_irq;
770
771 irq_put_desc_unlock(desc, flags);
772 return 0;
773 }
774 EXPORT_SYMBOL_GPL(irq_set_parent);
775 #endif
776
777 /*
778 * Default primary interrupt handler for threaded interrupts. Is
779 * assigned as primary handler when request_threaded_irq is called
780 * with handler == NULL. Useful for oneshot interrupts.
781 */
782 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
783 {
784 return IRQ_WAKE_THREAD;
785 }
786
787 /*
788 * Primary handler for nested threaded interrupts. Should never be
789 * called.
790 */
791 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
792 {
793 WARN(1, "Primary handler called for nested irq %d\n", irq);
794 return IRQ_NONE;
795 }
796
797 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
798 {
799 WARN(1, "Secondary action handler called for irq %d\n", irq);
800 return IRQ_NONE;
801 }
802
803 static int irq_wait_for_interrupt(struct irqaction *action)
804 {
805 set_current_state(TASK_INTERRUPTIBLE);
806
807 while (!kthread_should_stop()) {
808
809 if (test_and_clear_bit(IRQTF_RUNTHREAD,
810 &action->thread_flags)) {
811 __set_current_state(TASK_RUNNING);
812 return 0;
813 }
814 schedule();
815 set_current_state(TASK_INTERRUPTIBLE);
816 }
817 __set_current_state(TASK_RUNNING);
818 return -1;
819 }
820
821 /*
822 * Oneshot interrupts keep the irq line masked until the threaded
823 * handler finished. unmask if the interrupt has not been disabled and
824 * is marked MASKED.
825 */
826 static void irq_finalize_oneshot(struct irq_desc *desc,
827 struct irqaction *action)
828 {
829 if (!(desc->istate & IRQS_ONESHOT) ||
830 action->handler == irq_forced_secondary_handler)
831 return;
832 again:
833 chip_bus_lock(desc);
834 raw_spin_lock_irq(&desc->lock);
835
836 /*
837 * Implausible though it may be we need to protect us against
838 * the following scenario:
839 *
840 * The thread is faster done than the hard interrupt handler
841 * on the other CPU. If we unmask the irq line then the
842 * interrupt can come in again and masks the line, leaves due
843 * to IRQS_INPROGRESS and the irq line is masked forever.
844 *
845 * This also serializes the state of shared oneshot handlers
846 * versus "desc->threads_onehsot |= action->thread_mask;" in
847 * irq_wake_thread(). See the comment there which explains the
848 * serialization.
849 */
850 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
851 raw_spin_unlock_irq(&desc->lock);
852 chip_bus_sync_unlock(desc);
853 cpu_relax();
854 goto again;
855 }
856
857 /*
858 * Now check again, whether the thread should run. Otherwise
859 * we would clear the threads_oneshot bit of this thread which
860 * was just set.
861 */
862 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
863 goto out_unlock;
864
865 desc->threads_oneshot &= ~action->thread_mask;
866
867 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
868 irqd_irq_masked(&desc->irq_data))
869 unmask_threaded_irq(desc);
870
871 out_unlock:
872 raw_spin_unlock_irq(&desc->lock);
873 chip_bus_sync_unlock(desc);
874 }
875
876 #ifdef CONFIG_SMP
877 /*
878 * Check whether we need to change the affinity of the interrupt thread.
879 */
880 static void
881 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
882 {
883 cpumask_var_t mask;
884 bool valid = true;
885
886 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
887 return;
888
889 /*
890 * In case we are out of memory we set IRQTF_AFFINITY again and
891 * try again next time
892 */
893 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
894 set_bit(IRQTF_AFFINITY, &action->thread_flags);
895 return;
896 }
897
898 raw_spin_lock_irq(&desc->lock);
899 /*
900 * This code is triggered unconditionally. Check the affinity
901 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
902 */
903 if (cpumask_available(desc->irq_common_data.affinity))
904 cpumask_copy(mask, desc->irq_common_data.affinity);
905 else
906 valid = false;
907 raw_spin_unlock_irq(&desc->lock);
908
909 if (valid)
910 set_cpus_allowed_ptr(current, mask);
911 free_cpumask_var(mask);
912 }
913 #else
914 static inline void
915 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
916 #endif
917
918 /*
919 * Interrupts which are not explicitely requested as threaded
920 * interrupts rely on the implicit bh/preempt disable of the hard irq
921 * context. So we need to disable bh here to avoid deadlocks and other
922 * side effects.
923 */
924 static irqreturn_t
925 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
926 {
927 irqreturn_t ret;
928
929 local_bh_disable();
930 ret = action->thread_fn(action->irq, action->dev_id);
931 if (ret == IRQ_HANDLED)
932 atomic_inc(&desc->threads_handled);
933
934 irq_finalize_oneshot(desc, action);
935 local_bh_enable();
936 return ret;
937 }
938
939 /*
940 * Interrupts explicitly requested as threaded interrupts want to be
941 * preemtible - many of them need to sleep and wait for slow busses to
942 * complete.
943 */
944 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
945 struct irqaction *action)
946 {
947 irqreturn_t ret;
948
949 ret = action->thread_fn(action->irq, action->dev_id);
950 if (ret == IRQ_HANDLED)
951 atomic_inc(&desc->threads_handled);
952
953 irq_finalize_oneshot(desc, action);
954 return ret;
955 }
956
957 static void wake_threads_waitq(struct irq_desc *desc)
958 {
959 if (atomic_dec_and_test(&desc->threads_active))
960 wake_up(&desc->wait_for_threads);
961 }
962
963 static void irq_thread_dtor(struct callback_head *unused)
964 {
965 struct task_struct *tsk = current;
966 struct irq_desc *desc;
967 struct irqaction *action;
968
969 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
970 return;
971
972 action = kthread_data(tsk);
973
974 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
975 tsk->comm, tsk->pid, action->irq);
976
977
978 desc = irq_to_desc(action->irq);
979 /*
980 * If IRQTF_RUNTHREAD is set, we need to decrement
981 * desc->threads_active and wake possible waiters.
982 */
983 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
984 wake_threads_waitq(desc);
985
986 /* Prevent a stale desc->threads_oneshot */
987 irq_finalize_oneshot(desc, action);
988 }
989
990 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
991 {
992 struct irqaction *secondary = action->secondary;
993
994 if (WARN_ON_ONCE(!secondary))
995 return;
996
997 raw_spin_lock_irq(&desc->lock);
998 __irq_wake_thread(desc, secondary);
999 raw_spin_unlock_irq(&desc->lock);
1000 }
1001
1002 /*
1003 * Interrupt handler thread
1004 */
1005 static int irq_thread(void *data)
1006 {
1007 struct callback_head on_exit_work;
1008 struct irqaction *action = data;
1009 struct irq_desc *desc = irq_to_desc(action->irq);
1010 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1011 struct irqaction *action);
1012
1013 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1014 &action->thread_flags))
1015 handler_fn = irq_forced_thread_fn;
1016 else
1017 handler_fn = irq_thread_fn;
1018
1019 init_task_work(&on_exit_work, irq_thread_dtor);
1020 task_work_add(current, &on_exit_work, false);
1021
1022 irq_thread_check_affinity(desc, action);
1023
1024 while (!irq_wait_for_interrupt(action)) {
1025 irqreturn_t action_ret;
1026
1027 irq_thread_check_affinity(desc, action);
1028
1029 action_ret = handler_fn(desc, action);
1030 if (action_ret == IRQ_WAKE_THREAD)
1031 irq_wake_secondary(desc, action);
1032
1033 wake_threads_waitq(desc);
1034 }
1035
1036 /*
1037 * This is the regular exit path. __free_irq() is stopping the
1038 * thread via kthread_stop() after calling
1039 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
1040 * oneshot mask bit can be set. We cannot verify that as we
1041 * cannot touch the oneshot mask at this point anymore as
1042 * __setup_irq() might have given out currents thread_mask
1043 * again.
1044 */
1045 task_work_cancel(current, irq_thread_dtor);
1046 return 0;
1047 }
1048
1049 /**
1050 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1051 * @irq: Interrupt line
1052 * @dev_id: Device identity for which the thread should be woken
1053 *
1054 */
1055 void irq_wake_thread(unsigned int irq, void *dev_id)
1056 {
1057 struct irq_desc *desc = irq_to_desc(irq);
1058 struct irqaction *action;
1059 unsigned long flags;
1060
1061 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1062 return;
1063
1064 raw_spin_lock_irqsave(&desc->lock, flags);
1065 for_each_action_of_desc(desc, action) {
1066 if (action->dev_id == dev_id) {
1067 if (action->thread)
1068 __irq_wake_thread(desc, action);
1069 break;
1070 }
1071 }
1072 raw_spin_unlock_irqrestore(&desc->lock, flags);
1073 }
1074 EXPORT_SYMBOL_GPL(irq_wake_thread);
1075
1076 static int irq_setup_forced_threading(struct irqaction *new)
1077 {
1078 if (!force_irqthreads)
1079 return 0;
1080 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1081 return 0;
1082
1083 /*
1084 * No further action required for interrupts which are requested as
1085 * threaded interrupts already
1086 */
1087 if (new->handler == irq_default_primary_handler)
1088 return 0;
1089
1090 new->flags |= IRQF_ONESHOT;
1091
1092 /*
1093 * Handle the case where we have a real primary handler and a
1094 * thread handler. We force thread them as well by creating a
1095 * secondary action.
1096 */
1097 if (new->handler && new->thread_fn) {
1098 /* Allocate the secondary action */
1099 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1100 if (!new->secondary)
1101 return -ENOMEM;
1102 new->secondary->handler = irq_forced_secondary_handler;
1103 new->secondary->thread_fn = new->thread_fn;
1104 new->secondary->dev_id = new->dev_id;
1105 new->secondary->irq = new->irq;
1106 new->secondary->name = new->name;
1107 }
1108 /* Deal with the primary handler */
1109 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1110 new->thread_fn = new->handler;
1111 new->handler = irq_default_primary_handler;
1112 return 0;
1113 }
1114
1115 static int irq_request_resources(struct irq_desc *desc)
1116 {
1117 struct irq_data *d = &desc->irq_data;
1118 struct irq_chip *c = d->chip;
1119
1120 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1121 }
1122
1123 static void irq_release_resources(struct irq_desc *desc)
1124 {
1125 struct irq_data *d = &desc->irq_data;
1126 struct irq_chip *c = d->chip;
1127
1128 if (c->irq_release_resources)
1129 c->irq_release_resources(d);
1130 }
1131
1132 static int
1133 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1134 {
1135 struct task_struct *t;
1136 struct sched_param param = {
1137 .sched_priority = MAX_USER_RT_PRIO/2,
1138 };
1139
1140 if (!secondary) {
1141 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1142 new->name);
1143 } else {
1144 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1145 new->name);
1146 param.sched_priority -= 1;
1147 }
1148
1149 if (IS_ERR(t))
1150 return PTR_ERR(t);
1151
1152 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1153
1154 /*
1155 * We keep the reference to the task struct even if
1156 * the thread dies to avoid that the interrupt code
1157 * references an already freed task_struct.
1158 */
1159 get_task_struct(t);
1160 new->thread = t;
1161 /*
1162 * Tell the thread to set its affinity. This is
1163 * important for shared interrupt handlers as we do
1164 * not invoke setup_affinity() for the secondary
1165 * handlers as everything is already set up. Even for
1166 * interrupts marked with IRQF_NO_BALANCE this is
1167 * correct as we want the thread to move to the cpu(s)
1168 * on which the requesting code placed the interrupt.
1169 */
1170 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1171 return 0;
1172 }
1173
1174 /*
1175 * Internal function to register an irqaction - typically used to
1176 * allocate special interrupts that are part of the architecture.
1177 *
1178 * Locking rules:
1179 *
1180 * desc->request_mutex Provides serialization against a concurrent free_irq()
1181 * chip_bus_lock Provides serialization for slow bus operations
1182 * desc->lock Provides serialization against hard interrupts
1183 *
1184 * chip_bus_lock and desc->lock are sufficient for all other management and
1185 * interrupt related functions. desc->request_mutex solely serializes
1186 * request/free_irq().
1187 */
1188 static int
1189 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1190 {
1191 struct irqaction *old, **old_ptr;
1192 unsigned long flags, thread_mask = 0;
1193 int ret, nested, shared = 0;
1194
1195 if (!desc)
1196 return -EINVAL;
1197
1198 if (desc->irq_data.chip == &no_irq_chip)
1199 return -ENOSYS;
1200 if (!try_module_get(desc->owner))
1201 return -ENODEV;
1202
1203 new->irq = irq;
1204
1205 /*
1206 * If the trigger type is not specified by the caller,
1207 * then use the default for this interrupt.
1208 */
1209 if (!(new->flags & IRQF_TRIGGER_MASK))
1210 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1211
1212 /*
1213 * Check whether the interrupt nests into another interrupt
1214 * thread.
1215 */
1216 nested = irq_settings_is_nested_thread(desc);
1217 if (nested) {
1218 if (!new->thread_fn) {
1219 ret = -EINVAL;
1220 goto out_mput;
1221 }
1222 /*
1223 * Replace the primary handler which was provided from
1224 * the driver for non nested interrupt handling by the
1225 * dummy function which warns when called.
1226 */
1227 new->handler = irq_nested_primary_handler;
1228 } else {
1229 if (irq_settings_can_thread(desc)) {
1230 ret = irq_setup_forced_threading(new);
1231 if (ret)
1232 goto out_mput;
1233 }
1234 }
1235
1236 /*
1237 * Create a handler thread when a thread function is supplied
1238 * and the interrupt does not nest into another interrupt
1239 * thread.
1240 */
1241 if (new->thread_fn && !nested) {
1242 ret = setup_irq_thread(new, irq, false);
1243 if (ret)
1244 goto out_mput;
1245 if (new->secondary) {
1246 ret = setup_irq_thread(new->secondary, irq, true);
1247 if (ret)
1248 goto out_thread;
1249 }
1250 }
1251
1252 /*
1253 * Drivers are often written to work w/o knowledge about the
1254 * underlying irq chip implementation, so a request for a
1255 * threaded irq without a primary hard irq context handler
1256 * requires the ONESHOT flag to be set. Some irq chips like
1257 * MSI based interrupts are per se one shot safe. Check the
1258 * chip flags, so we can avoid the unmask dance at the end of
1259 * the threaded handler for those.
1260 */
1261 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1262 new->flags &= ~IRQF_ONESHOT;
1263
1264 /*
1265 * Protects against a concurrent __free_irq() call which might wait
1266 * for synchronize_irq() to complete without holding the optional
1267 * chip bus lock and desc->lock.
1268 */
1269 mutex_lock(&desc->request_mutex);
1270
1271 /*
1272 * Acquire bus lock as the irq_request_resources() callback below
1273 * might rely on the serialization or the magic power management
1274 * functions which are abusing the irq_bus_lock() callback,
1275 */
1276 chip_bus_lock(desc);
1277
1278 /* First installed action requests resources. */
1279 if (!desc->action) {
1280 ret = irq_request_resources(desc);
1281 if (ret) {
1282 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1283 new->name, irq, desc->irq_data.chip->name);
1284 goto out_bus_unlock;
1285 }
1286 }
1287
1288 /*
1289 * The following block of code has to be executed atomically
1290 * protected against a concurrent interrupt and any of the other
1291 * management calls which are not serialized via
1292 * desc->request_mutex or the optional bus lock.
1293 */
1294 raw_spin_lock_irqsave(&desc->lock, flags);
1295 old_ptr = &desc->action;
1296 old = *old_ptr;
1297 if (old) {
1298 /*
1299 * Can't share interrupts unless both agree to and are
1300 * the same type (level, edge, polarity). So both flag
1301 * fields must have IRQF_SHARED set and the bits which
1302 * set the trigger type must match. Also all must
1303 * agree on ONESHOT.
1304 */
1305 unsigned int oldtype;
1306
1307 /*
1308 * If nobody did set the configuration before, inherit
1309 * the one provided by the requester.
1310 */
1311 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1312 oldtype = irqd_get_trigger_type(&desc->irq_data);
1313 } else {
1314 oldtype = new->flags & IRQF_TRIGGER_MASK;
1315 irqd_set_trigger_type(&desc->irq_data, oldtype);
1316 }
1317
1318 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1319 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1320 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1321 goto mismatch;
1322
1323 /* All handlers must agree on per-cpuness */
1324 if ((old->flags & IRQF_PERCPU) !=
1325 (new->flags & IRQF_PERCPU))
1326 goto mismatch;
1327
1328 /* add new interrupt at end of irq queue */
1329 do {
1330 /*
1331 * Or all existing action->thread_mask bits,
1332 * so we can find the next zero bit for this
1333 * new action.
1334 */
1335 thread_mask |= old->thread_mask;
1336 old_ptr = &old->next;
1337 old = *old_ptr;
1338 } while (old);
1339 shared = 1;
1340 }
1341
1342 /*
1343 * Setup the thread mask for this irqaction for ONESHOT. For
1344 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1345 * conditional in irq_wake_thread().
1346 */
1347 if (new->flags & IRQF_ONESHOT) {
1348 /*
1349 * Unlikely to have 32 resp 64 irqs sharing one line,
1350 * but who knows.
1351 */
1352 if (thread_mask == ~0UL) {
1353 ret = -EBUSY;
1354 goto out_unlock;
1355 }
1356 /*
1357 * The thread_mask for the action is or'ed to
1358 * desc->thread_active to indicate that the
1359 * IRQF_ONESHOT thread handler has been woken, but not
1360 * yet finished. The bit is cleared when a thread
1361 * completes. When all threads of a shared interrupt
1362 * line have completed desc->threads_active becomes
1363 * zero and the interrupt line is unmasked. See
1364 * handle.c:irq_wake_thread() for further information.
1365 *
1366 * If no thread is woken by primary (hard irq context)
1367 * interrupt handlers, then desc->threads_active is
1368 * also checked for zero to unmask the irq line in the
1369 * affected hard irq flow handlers
1370 * (handle_[fasteoi|level]_irq).
1371 *
1372 * The new action gets the first zero bit of
1373 * thread_mask assigned. See the loop above which or's
1374 * all existing action->thread_mask bits.
1375 */
1376 new->thread_mask = 1UL << ffz(thread_mask);
1377
1378 } else if (new->handler == irq_default_primary_handler &&
1379 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1380 /*
1381 * The interrupt was requested with handler = NULL, so
1382 * we use the default primary handler for it. But it
1383 * does not have the oneshot flag set. In combination
1384 * with level interrupts this is deadly, because the
1385 * default primary handler just wakes the thread, then
1386 * the irq lines is reenabled, but the device still
1387 * has the level irq asserted. Rinse and repeat....
1388 *
1389 * While this works for edge type interrupts, we play
1390 * it safe and reject unconditionally because we can't
1391 * say for sure which type this interrupt really
1392 * has. The type flags are unreliable as the
1393 * underlying chip implementation can override them.
1394 */
1395 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1396 irq);
1397 ret = -EINVAL;
1398 goto out_unlock;
1399 }
1400
1401 if (!shared) {
1402 init_waitqueue_head(&desc->wait_for_threads);
1403
1404 /* Setup the type (level, edge polarity) if configured: */
1405 if (new->flags & IRQF_TRIGGER_MASK) {
1406 ret = __irq_set_trigger(desc,
1407 new->flags & IRQF_TRIGGER_MASK);
1408
1409 if (ret)
1410 goto out_unlock;
1411 }
1412
1413 /*
1414 * Activate the interrupt. That activation must happen
1415 * independently of IRQ_NOAUTOEN. request_irq() can fail
1416 * and the callers are supposed to handle
1417 * that. enable_irq() of an interrupt requested with
1418 * IRQ_NOAUTOEN is not supposed to fail. The activation
1419 * keeps it in shutdown mode, it merily associates
1420 * resources if necessary and if that's not possible it
1421 * fails. Interrupts which are in managed shutdown mode
1422 * will simply ignore that activation request.
1423 */
1424 ret = irq_activate(desc);
1425 if (ret)
1426 goto out_unlock;
1427
1428 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1429 IRQS_ONESHOT | IRQS_WAITING);
1430 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1431
1432 if (new->flags & IRQF_PERCPU) {
1433 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1434 irq_settings_set_per_cpu(desc);
1435 }
1436
1437 if (new->flags & IRQF_ONESHOT)
1438 desc->istate |= IRQS_ONESHOT;
1439
1440 /* Exclude IRQ from balancing if requested */
1441 if (new->flags & IRQF_NOBALANCING) {
1442 irq_settings_set_no_balancing(desc);
1443 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1444 }
1445
1446 if (irq_settings_can_autoenable(desc)) {
1447 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1448 } else {
1449 /*
1450 * Shared interrupts do not go well with disabling
1451 * auto enable. The sharing interrupt might request
1452 * it while it's still disabled and then wait for
1453 * interrupts forever.
1454 */
1455 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1456 /* Undo nested disables: */
1457 desc->depth = 1;
1458 }
1459
1460 } else if (new->flags & IRQF_TRIGGER_MASK) {
1461 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1462 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1463
1464 if (nmsk != omsk)
1465 /* hope the handler works with current trigger mode */
1466 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1467 irq, omsk, nmsk);
1468 }
1469
1470 *old_ptr = new;
1471
1472 irq_pm_install_action(desc, new);
1473
1474 /* Reset broken irq detection when installing new handler */
1475 desc->irq_count = 0;
1476 desc->irqs_unhandled = 0;
1477
1478 /*
1479 * Check whether we disabled the irq via the spurious handler
1480 * before. Reenable it and give it another chance.
1481 */
1482 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1483 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1484 __enable_irq(desc);
1485 }
1486
1487 raw_spin_unlock_irqrestore(&desc->lock, flags);
1488 chip_bus_sync_unlock(desc);
1489 mutex_unlock(&desc->request_mutex);
1490
1491 irq_setup_timings(desc, new);
1492
1493 /*
1494 * Strictly no need to wake it up, but hung_task complains
1495 * when no hard interrupt wakes the thread up.
1496 */
1497 if (new->thread)
1498 wake_up_process(new->thread);
1499 if (new->secondary)
1500 wake_up_process(new->secondary->thread);
1501
1502 register_irq_proc(irq, desc);
1503 new->dir = NULL;
1504 register_handler_proc(irq, new);
1505 return 0;
1506
1507 mismatch:
1508 if (!(new->flags & IRQF_PROBE_SHARED)) {
1509 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1510 irq, new->flags, new->name, old->flags, old->name);
1511 #ifdef CONFIG_DEBUG_SHIRQ
1512 dump_stack();
1513 #endif
1514 }
1515 ret = -EBUSY;
1516
1517 out_unlock:
1518 raw_spin_unlock_irqrestore(&desc->lock, flags);
1519
1520 if (!desc->action)
1521 irq_release_resources(desc);
1522 out_bus_unlock:
1523 chip_bus_sync_unlock(desc);
1524 mutex_unlock(&desc->request_mutex);
1525
1526 out_thread:
1527 if (new->thread) {
1528 struct task_struct *t = new->thread;
1529
1530 new->thread = NULL;
1531 kthread_stop(t);
1532 put_task_struct(t);
1533 }
1534 if (new->secondary && new->secondary->thread) {
1535 struct task_struct *t = new->secondary->thread;
1536
1537 new->secondary->thread = NULL;
1538 kthread_stop(t);
1539 put_task_struct(t);
1540 }
1541 out_mput:
1542 module_put(desc->owner);
1543 return ret;
1544 }
1545
1546 /**
1547 * setup_irq - setup an interrupt
1548 * @irq: Interrupt line to setup
1549 * @act: irqaction for the interrupt
1550 *
1551 * Used to statically setup interrupts in the early boot process.
1552 */
1553 int setup_irq(unsigned int irq, struct irqaction *act)
1554 {
1555 int retval;
1556 struct irq_desc *desc = irq_to_desc(irq);
1557
1558 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1559 return -EINVAL;
1560
1561 retval = irq_chip_pm_get(&desc->irq_data);
1562 if (retval < 0)
1563 return retval;
1564
1565 retval = __setup_irq(irq, desc, act);
1566
1567 if (retval)
1568 irq_chip_pm_put(&desc->irq_data);
1569
1570 return retval;
1571 }
1572 EXPORT_SYMBOL_GPL(setup_irq);
1573
1574 /*
1575 * Internal function to unregister an irqaction - used to free
1576 * regular and special interrupts that are part of the architecture.
1577 */
1578 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1579 {
1580 struct irq_desc *desc = irq_to_desc(irq);
1581 struct irqaction *action, **action_ptr;
1582 unsigned long flags;
1583
1584 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1585
1586 if (!desc)
1587 return NULL;
1588
1589 mutex_lock(&desc->request_mutex);
1590 chip_bus_lock(desc);
1591 raw_spin_lock_irqsave(&desc->lock, flags);
1592
1593 /*
1594 * There can be multiple actions per IRQ descriptor, find the right
1595 * one based on the dev_id:
1596 */
1597 action_ptr = &desc->action;
1598 for (;;) {
1599 action = *action_ptr;
1600
1601 if (!action) {
1602 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1603 raw_spin_unlock_irqrestore(&desc->lock, flags);
1604 chip_bus_sync_unlock(desc);
1605 mutex_unlock(&desc->request_mutex);
1606 return NULL;
1607 }
1608
1609 if (action->dev_id == dev_id)
1610 break;
1611 action_ptr = &action->next;
1612 }
1613
1614 /* Found it - now remove it from the list of entries: */
1615 *action_ptr = action->next;
1616
1617 irq_pm_remove_action(desc, action);
1618
1619 /* If this was the last handler, shut down the IRQ line: */
1620 if (!desc->action) {
1621 irq_settings_clr_disable_unlazy(desc);
1622 /* Only shutdown. Deactivate after synchronize_hardirq() */
1623 irq_shutdown(desc);
1624 }
1625
1626 #ifdef CONFIG_SMP
1627 /* make sure affinity_hint is cleaned up */
1628 if (WARN_ON_ONCE(desc->affinity_hint))
1629 desc->affinity_hint = NULL;
1630 #endif
1631
1632 raw_spin_unlock_irqrestore(&desc->lock, flags);
1633 /*
1634 * Drop bus_lock here so the changes which were done in the chip
1635 * callbacks above are synced out to the irq chips which hang
1636 * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1637 *
1638 * Aside of that the bus_lock can also be taken from the threaded
1639 * handler in irq_finalize_oneshot() which results in a deadlock
1640 * because synchronize_irq() would wait forever for the thread to
1641 * complete, which is blocked on the bus lock.
1642 *
1643 * The still held desc->request_mutex() protects against a
1644 * concurrent request_irq() of this irq so the release of resources
1645 * and timing data is properly serialized.
1646 */
1647 chip_bus_sync_unlock(desc);
1648
1649 unregister_handler_proc(irq, action);
1650
1651 /* Make sure it's not being used on another CPU: */
1652 synchronize_irq(irq);
1653
1654 #ifdef CONFIG_DEBUG_SHIRQ
1655 /*
1656 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1657 * event to happen even now it's being freed, so let's make sure that
1658 * is so by doing an extra call to the handler ....
1659 *
1660 * ( We do this after actually deregistering it, to make sure that a
1661 * 'real' IRQ doesn't run in * parallel with our fake. )
1662 */
1663 if (action->flags & IRQF_SHARED) {
1664 local_irq_save(flags);
1665 action->handler(irq, dev_id);
1666 local_irq_restore(flags);
1667 }
1668 #endif
1669
1670 if (action->thread) {
1671 kthread_stop(action->thread);
1672 put_task_struct(action->thread);
1673 if (action->secondary && action->secondary->thread) {
1674 kthread_stop(action->secondary->thread);
1675 put_task_struct(action->secondary->thread);
1676 }
1677 }
1678
1679 /* Last action releases resources */
1680 if (!desc->action) {
1681 /*
1682 * Reaquire bus lock as irq_release_resources() might
1683 * require it to deallocate resources over the slow bus.
1684 */
1685 chip_bus_lock(desc);
1686 /*
1687 * There is no interrupt on the fly anymore. Deactivate it
1688 * completely.
1689 */
1690 raw_spin_lock_irqsave(&desc->lock, flags);
1691 irq_domain_deactivate_irq(&desc->irq_data);
1692 raw_spin_unlock_irqrestore(&desc->lock, flags);
1693
1694 irq_release_resources(desc);
1695 chip_bus_sync_unlock(desc);
1696 irq_remove_timings(desc);
1697 }
1698
1699 mutex_unlock(&desc->request_mutex);
1700
1701 irq_chip_pm_put(&desc->irq_data);
1702 module_put(desc->owner);
1703 kfree(action->secondary);
1704 return action;
1705 }
1706
1707 /**
1708 * remove_irq - free an interrupt
1709 * @irq: Interrupt line to free
1710 * @act: irqaction for the interrupt
1711 *
1712 * Used to remove interrupts statically setup by the early boot process.
1713 */
1714 void remove_irq(unsigned int irq, struct irqaction *act)
1715 {
1716 struct irq_desc *desc = irq_to_desc(irq);
1717
1718 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1719 __free_irq(irq, act->dev_id);
1720 }
1721 EXPORT_SYMBOL_GPL(remove_irq);
1722
1723 /**
1724 * free_irq - free an interrupt allocated with request_irq
1725 * @irq: Interrupt line to free
1726 * @dev_id: Device identity to free
1727 *
1728 * Remove an interrupt handler. The handler is removed and if the
1729 * interrupt line is no longer in use by any driver it is disabled.
1730 * On a shared IRQ the caller must ensure the interrupt is disabled
1731 * on the card it drives before calling this function. The function
1732 * does not return until any executing interrupts for this IRQ
1733 * have completed.
1734 *
1735 * This function must not be called from interrupt context.
1736 *
1737 * Returns the devname argument passed to request_irq.
1738 */
1739 const void *free_irq(unsigned int irq, void *dev_id)
1740 {
1741 struct irq_desc *desc = irq_to_desc(irq);
1742 struct irqaction *action;
1743 const char *devname;
1744
1745 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1746 return NULL;
1747
1748 #ifdef CONFIG_SMP
1749 if (WARN_ON(desc->affinity_notify))
1750 desc->affinity_notify = NULL;
1751 #endif
1752
1753 action = __free_irq(irq, dev_id);
1754
1755 if (!action)
1756 return NULL;
1757
1758 devname = action->name;
1759 kfree(action);
1760 return devname;
1761 }
1762 EXPORT_SYMBOL(free_irq);
1763
1764 /**
1765 * request_threaded_irq - allocate an interrupt line
1766 * @irq: Interrupt line to allocate
1767 * @handler: Function to be called when the IRQ occurs.
1768 * Primary handler for threaded interrupts
1769 * If NULL and thread_fn != NULL the default
1770 * primary handler is installed
1771 * @thread_fn: Function called from the irq handler thread
1772 * If NULL, no irq thread is created
1773 * @irqflags: Interrupt type flags
1774 * @devname: An ascii name for the claiming device
1775 * @dev_id: A cookie passed back to the handler function
1776 *
1777 * This call allocates interrupt resources and enables the
1778 * interrupt line and IRQ handling. From the point this
1779 * call is made your handler function may be invoked. Since
1780 * your handler function must clear any interrupt the board
1781 * raises, you must take care both to initialise your hardware
1782 * and to set up the interrupt handler in the right order.
1783 *
1784 * If you want to set up a threaded irq handler for your device
1785 * then you need to supply @handler and @thread_fn. @handler is
1786 * still called in hard interrupt context and has to check
1787 * whether the interrupt originates from the device. If yes it
1788 * needs to disable the interrupt on the device and return
1789 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1790 * @thread_fn. This split handler design is necessary to support
1791 * shared interrupts.
1792 *
1793 * Dev_id must be globally unique. Normally the address of the
1794 * device data structure is used as the cookie. Since the handler
1795 * receives this value it makes sense to use it.
1796 *
1797 * If your interrupt is shared you must pass a non NULL dev_id
1798 * as this is required when freeing the interrupt.
1799 *
1800 * Flags:
1801 *
1802 * IRQF_SHARED Interrupt is shared
1803 * IRQF_TRIGGER_* Specify active edge(s) or level
1804 *
1805 */
1806 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1807 irq_handler_t thread_fn, unsigned long irqflags,
1808 const char *devname, void *dev_id)
1809 {
1810 struct irqaction *action;
1811 struct irq_desc *desc;
1812 int retval;
1813
1814 if (irq == IRQ_NOTCONNECTED)
1815 return -ENOTCONN;
1816
1817 /*
1818 * Sanity-check: shared interrupts must pass in a real dev-ID,
1819 * otherwise we'll have trouble later trying to figure out
1820 * which interrupt is which (messes up the interrupt freeing
1821 * logic etc).
1822 *
1823 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1824 * it cannot be set along with IRQF_NO_SUSPEND.
1825 */
1826 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1827 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1828 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1829 return -EINVAL;
1830
1831 desc = irq_to_desc(irq);
1832 if (!desc)
1833 return -EINVAL;
1834
1835 if (!irq_settings_can_request(desc) ||
1836 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1837 return -EINVAL;
1838
1839 if (!handler) {
1840 if (!thread_fn)
1841 return -EINVAL;
1842 handler = irq_default_primary_handler;
1843 }
1844
1845 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1846 if (!action)
1847 return -ENOMEM;
1848
1849 action->handler = handler;
1850 action->thread_fn = thread_fn;
1851 action->flags = irqflags;
1852 action->name = devname;
1853 action->dev_id = dev_id;
1854
1855 retval = irq_chip_pm_get(&desc->irq_data);
1856 if (retval < 0) {
1857 kfree(action);
1858 return retval;
1859 }
1860
1861 retval = __setup_irq(irq, desc, action);
1862
1863 if (retval) {
1864 irq_chip_pm_put(&desc->irq_data);
1865 kfree(action->secondary);
1866 kfree(action);
1867 }
1868
1869 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1870 if (!retval && (irqflags & IRQF_SHARED)) {
1871 /*
1872 * It's a shared IRQ -- the driver ought to be prepared for it
1873 * to happen immediately, so let's make sure....
1874 * We disable the irq to make sure that a 'real' IRQ doesn't
1875 * run in parallel with our fake.
1876 */
1877 unsigned long flags;
1878
1879 disable_irq(irq);
1880 local_irq_save(flags);
1881
1882 handler(irq, dev_id);
1883
1884 local_irq_restore(flags);
1885 enable_irq(irq);
1886 }
1887 #endif
1888 return retval;
1889 }
1890 EXPORT_SYMBOL(request_threaded_irq);
1891
1892 /**
1893 * request_any_context_irq - allocate an interrupt line
1894 * @irq: Interrupt line to allocate
1895 * @handler: Function to be called when the IRQ occurs.
1896 * Threaded handler for threaded interrupts.
1897 * @flags: Interrupt type flags
1898 * @name: An ascii name for the claiming device
1899 * @dev_id: A cookie passed back to the handler function
1900 *
1901 * This call allocates interrupt resources and enables the
1902 * interrupt line and IRQ handling. It selects either a
1903 * hardirq or threaded handling method depending on the
1904 * context.
1905 *
1906 * On failure, it returns a negative value. On success,
1907 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1908 */
1909 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1910 unsigned long flags, const char *name, void *dev_id)
1911 {
1912 struct irq_desc *desc;
1913 int ret;
1914
1915 if (irq == IRQ_NOTCONNECTED)
1916 return -ENOTCONN;
1917
1918 desc = irq_to_desc(irq);
1919 if (!desc)
1920 return -EINVAL;
1921
1922 if (irq_settings_is_nested_thread(desc)) {
1923 ret = request_threaded_irq(irq, NULL, handler,
1924 flags, name, dev_id);
1925 return !ret ? IRQC_IS_NESTED : ret;
1926 }
1927
1928 ret = request_irq(irq, handler, flags, name, dev_id);
1929 return !ret ? IRQC_IS_HARDIRQ : ret;
1930 }
1931 EXPORT_SYMBOL_GPL(request_any_context_irq);
1932
1933 void enable_percpu_irq(unsigned int irq, unsigned int type)
1934 {
1935 unsigned int cpu = smp_processor_id();
1936 unsigned long flags;
1937 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1938
1939 if (!desc)
1940 return;
1941
1942 /*
1943 * If the trigger type is not specified by the caller, then
1944 * use the default for this interrupt.
1945 */
1946 type &= IRQ_TYPE_SENSE_MASK;
1947 if (type == IRQ_TYPE_NONE)
1948 type = irqd_get_trigger_type(&desc->irq_data);
1949
1950 if (type != IRQ_TYPE_NONE) {
1951 int ret;
1952
1953 ret = __irq_set_trigger(desc, type);
1954
1955 if (ret) {
1956 WARN(1, "failed to set type for IRQ%d\n", irq);
1957 goto out;
1958 }
1959 }
1960
1961 irq_percpu_enable(desc, cpu);
1962 out:
1963 irq_put_desc_unlock(desc, flags);
1964 }
1965 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1966
1967 /**
1968 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1969 * @irq: Linux irq number to check for
1970 *
1971 * Must be called from a non migratable context. Returns the enable
1972 * state of a per cpu interrupt on the current cpu.
1973 */
1974 bool irq_percpu_is_enabled(unsigned int irq)
1975 {
1976 unsigned int cpu = smp_processor_id();
1977 struct irq_desc *desc;
1978 unsigned long flags;
1979 bool is_enabled;
1980
1981 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1982 if (!desc)
1983 return false;
1984
1985 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1986 irq_put_desc_unlock(desc, flags);
1987
1988 return is_enabled;
1989 }
1990 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1991
1992 void disable_percpu_irq(unsigned int irq)
1993 {
1994 unsigned int cpu = smp_processor_id();
1995 unsigned long flags;
1996 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1997
1998 if (!desc)
1999 return;
2000
2001 irq_percpu_disable(desc, cpu);
2002 irq_put_desc_unlock(desc, flags);
2003 }
2004 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2005
2006 /*
2007 * Internal function to unregister a percpu irqaction.
2008 */
2009 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2010 {
2011 struct irq_desc *desc = irq_to_desc(irq);
2012 struct irqaction *action;
2013 unsigned long flags;
2014
2015 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2016
2017 if (!desc)
2018 return NULL;
2019
2020 raw_spin_lock_irqsave(&desc->lock, flags);
2021
2022 action = desc->action;
2023 if (!action || action->percpu_dev_id != dev_id) {
2024 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2025 goto bad;
2026 }
2027
2028 if (!cpumask_empty(desc->percpu_enabled)) {
2029 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2030 irq, cpumask_first(desc->percpu_enabled));
2031 goto bad;
2032 }
2033
2034 /* Found it - now remove it from the list of entries: */
2035 desc->action = NULL;
2036
2037 raw_spin_unlock_irqrestore(&desc->lock, flags);
2038
2039 unregister_handler_proc(irq, action);
2040
2041 irq_chip_pm_put(&desc->irq_data);
2042 module_put(desc->owner);
2043 return action;
2044
2045 bad:
2046 raw_spin_unlock_irqrestore(&desc->lock, flags);
2047 return NULL;
2048 }
2049
2050 /**
2051 * remove_percpu_irq - free a per-cpu interrupt
2052 * @irq: Interrupt line to free
2053 * @act: irqaction for the interrupt
2054 *
2055 * Used to remove interrupts statically setup by the early boot process.
2056 */
2057 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2058 {
2059 struct irq_desc *desc = irq_to_desc(irq);
2060
2061 if (desc && irq_settings_is_per_cpu_devid(desc))
2062 __free_percpu_irq(irq, act->percpu_dev_id);
2063 }
2064
2065 /**
2066 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2067 * @irq: Interrupt line to free
2068 * @dev_id: Device identity to free
2069 *
2070 * Remove a percpu interrupt handler. The handler is removed, but
2071 * the interrupt line is not disabled. This must be done on each
2072 * CPU before calling this function. The function does not return
2073 * until any executing interrupts for this IRQ have completed.
2074 *
2075 * This function must not be called from interrupt context.
2076 */
2077 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2078 {
2079 struct irq_desc *desc = irq_to_desc(irq);
2080
2081 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2082 return;
2083
2084 chip_bus_lock(desc);
2085 kfree(__free_percpu_irq(irq, dev_id));
2086 chip_bus_sync_unlock(desc);
2087 }
2088 EXPORT_SYMBOL_GPL(free_percpu_irq);
2089
2090 /**
2091 * setup_percpu_irq - setup a per-cpu interrupt
2092 * @irq: Interrupt line to setup
2093 * @act: irqaction for the interrupt
2094 *
2095 * Used to statically setup per-cpu interrupts in the early boot process.
2096 */
2097 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2098 {
2099 struct irq_desc *desc = irq_to_desc(irq);
2100 int retval;
2101
2102 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2103 return -EINVAL;
2104
2105 retval = irq_chip_pm_get(&desc->irq_data);
2106 if (retval < 0)
2107 return retval;
2108
2109 retval = __setup_irq(irq, desc, act);
2110
2111 if (retval)
2112 irq_chip_pm_put(&desc->irq_data);
2113
2114 return retval;
2115 }
2116
2117 /**
2118 * __request_percpu_irq - allocate a percpu interrupt line
2119 * @irq: Interrupt line to allocate
2120 * @handler: Function to be called when the IRQ occurs.
2121 * @flags: Interrupt type flags (IRQF_TIMER only)
2122 * @devname: An ascii name for the claiming device
2123 * @dev_id: A percpu cookie passed back to the handler function
2124 *
2125 * This call allocates interrupt resources and enables the
2126 * interrupt on the local CPU. If the interrupt is supposed to be
2127 * enabled on other CPUs, it has to be done on each CPU using
2128 * enable_percpu_irq().
2129 *
2130 * Dev_id must be globally unique. It is a per-cpu variable, and
2131 * the handler gets called with the interrupted CPU's instance of
2132 * that variable.
2133 */
2134 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2135 unsigned long flags, const char *devname,
2136 void __percpu *dev_id)
2137 {
2138 struct irqaction *action;
2139 struct irq_desc *desc;
2140 int retval;
2141
2142 if (!dev_id)
2143 return -EINVAL;
2144
2145 desc = irq_to_desc(irq);
2146 if (!desc || !irq_settings_can_request(desc) ||
2147 !irq_settings_is_per_cpu_devid(desc))
2148 return -EINVAL;
2149
2150 if (flags && flags != IRQF_TIMER)
2151 return -EINVAL;
2152
2153 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2154 if (!action)
2155 return -ENOMEM;
2156
2157 action->handler = handler;
2158 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2159 action->name = devname;
2160 action->percpu_dev_id = dev_id;
2161
2162 retval = irq_chip_pm_get(&desc->irq_data);
2163 if (retval < 0) {
2164 kfree(action);
2165 return retval;
2166 }
2167
2168 retval = __setup_irq(irq, desc, action);
2169
2170 if (retval) {
2171 irq_chip_pm_put(&desc->irq_data);
2172 kfree(action);
2173 }
2174
2175 return retval;
2176 }
2177 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2178
2179 /**
2180 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2181 * @irq: Interrupt line that is forwarded to a VM
2182 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2183 * @state: a pointer to a boolean where the state is to be storeed
2184 *
2185 * This call snapshots the internal irqchip state of an
2186 * interrupt, returning into @state the bit corresponding to
2187 * stage @which
2188 *
2189 * This function should be called with preemption disabled if the
2190 * interrupt controller has per-cpu registers.
2191 */
2192 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2193 bool *state)
2194 {
2195 struct irq_desc *desc;
2196 struct irq_data *data;
2197 struct irq_chip *chip;
2198 unsigned long flags;
2199 int err = -EINVAL;
2200
2201 desc = irq_get_desc_buslock(irq, &flags, 0);
2202 if (!desc)
2203 return err;
2204
2205 data = irq_desc_get_irq_data(desc);
2206
2207 do {
2208 chip = irq_data_get_irq_chip(data);
2209 if (chip->irq_get_irqchip_state)
2210 break;
2211 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2212 data = data->parent_data;
2213 #else
2214 data = NULL;
2215 #endif
2216 } while (data);
2217
2218 if (data)
2219 err = chip->irq_get_irqchip_state(data, which, state);
2220
2221 irq_put_desc_busunlock(desc, flags);
2222 return err;
2223 }
2224 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2225
2226 /**
2227 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2228 * @irq: Interrupt line that is forwarded to a VM
2229 * @which: State to be restored (one of IRQCHIP_STATE_*)
2230 * @val: Value corresponding to @which
2231 *
2232 * This call sets the internal irqchip state of an interrupt,
2233 * depending on the value of @which.
2234 *
2235 * This function should be called with preemption disabled if the
2236 * interrupt controller has per-cpu registers.
2237 */
2238 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2239 bool val)
2240 {
2241 struct irq_desc *desc;
2242 struct irq_data *data;
2243 struct irq_chip *chip;
2244 unsigned long flags;
2245 int err = -EINVAL;
2246
2247 desc = irq_get_desc_buslock(irq, &flags, 0);
2248 if (!desc)
2249 return err;
2250
2251 data = irq_desc_get_irq_data(desc);
2252
2253 do {
2254 chip = irq_data_get_irq_chip(data);
2255 if (chip->irq_set_irqchip_state)
2256 break;
2257 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2258 data = data->parent_data;
2259 #else
2260 data = NULL;
2261 #endif
2262 } while (data);
2263
2264 if (data)
2265 err = chip->irq_set_irqchip_state(data, which, val);
2266
2267 irq_put_desc_busunlock(desc, flags);
2268 return err;
2269 }
2270 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);