]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/irq/manage.c
x86/speculation/mds: Conditionally clear CPU buffers on idle entry
[mirror_ubuntu-bionic-kernel.git] / kernel / irq / manage.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/irq/manage.c
3 *
a34db9b2
IM
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
1da177e4
LT
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
97fd75b7
AM
10#define pr_fmt(fmt) "genirq: " fmt
11
1da177e4 12#include <linux/irq.h>
3aa551c9 13#include <linux/kthread.h>
1da177e4
LT
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
1aeb272c 17#include <linux/slab.h>
3aa551c9 18#include <linux/sched.h>
8bd75c77 19#include <linux/sched/rt.h>
0881e7bd 20#include <linux/sched/task.h>
ae7e81c0 21#include <uapi/linux/sched/types.h>
4d1d61a6 22#include <linux/task_work.h>
1da177e4
LT
23
24#include "internals.h"
25
8d32a307 26#ifdef CONFIG_IRQ_FORCED_THREADING
d4a14556 27__read_mostly bool force_irqthreads = IS_ENABLED(CONFIG_IRQ_FORCED_THREADING_DEFAULT);
8d32a307
TG
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
d4a14556
AW
34static int __init setup_no_irqthreads(char *arg)
35{
36 force_irqthreads = false;
37 return 0;
38}
8d32a307 39early_param("threadirqs", setup_forced_irqthreads);
d4a14556 40early_param("nothreadirqs", setup_no_irqthreads);
8d32a307
TG
41#endif
42
18258f72 43static void __synchronize_hardirq(struct irq_desc *desc)
1da177e4 44{
32f4125e 45 bool inprogress;
1da177e4 46
a98ce5c6
HX
47 do {
48 unsigned long flags;
49
50 /*
51 * Wait until we're out of the critical section. This might
52 * give the wrong answer due to the lack of memory barriers.
53 */
32f4125e 54 while (irqd_irq_inprogress(&desc->irq_data))
a98ce5c6
HX
55 cpu_relax();
56
57 /* Ok, that indicated we're done: double-check carefully. */
239007b8 58 raw_spin_lock_irqsave(&desc->lock, flags);
32f4125e 59 inprogress = irqd_irq_inprogress(&desc->irq_data);
239007b8 60 raw_spin_unlock_irqrestore(&desc->lock, flags);
a98ce5c6
HX
61
62 /* Oops, that failed? */
32f4125e 63 } while (inprogress);
18258f72
TG
64}
65
66/**
67 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
68 * @irq: interrupt number to wait for
69 *
70 * This function waits for any pending hard IRQ handlers for this
71 * interrupt to complete before returning. If you use this
72 * function while holding a resource the IRQ handler may need you
73 * will deadlock. It does not take associated threaded handlers
74 * into account.
75 *
76 * Do not use this for shutdown scenarios where you must be sure
77 * that all parts (hardirq and threaded handler) have completed.
78 *
02cea395
PZ
79 * Returns: false if a threaded handler is active.
80 *
18258f72
TG
81 * This function may be called - with care - from IRQ context.
82 */
02cea395 83bool synchronize_hardirq(unsigned int irq)
18258f72
TG
84{
85 struct irq_desc *desc = irq_to_desc(irq);
3aa551c9 86
02cea395 87 if (desc) {
18258f72 88 __synchronize_hardirq(desc);
02cea395
PZ
89 return !atomic_read(&desc->threads_active);
90 }
91
92 return true;
18258f72
TG
93}
94EXPORT_SYMBOL(synchronize_hardirq);
95
96/**
97 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
98 * @irq: interrupt number to wait for
99 *
100 * This function waits for any pending IRQ handlers for this interrupt
101 * to complete before returning. If you use this function while
102 * holding a resource the IRQ handler may need you will deadlock.
103 *
104 * This function may be called - with care - from IRQ context.
105 */
106void synchronize_irq(unsigned int irq)
107{
108 struct irq_desc *desc = irq_to_desc(irq);
109
110 if (desc) {
111 __synchronize_hardirq(desc);
112 /*
113 * We made sure that no hardirq handler is
114 * running. Now verify that no threaded handlers are
115 * active.
116 */
117 wait_event(desc->wait_for_threads,
118 !atomic_read(&desc->threads_active));
119 }
1da177e4 120}
1da177e4
LT
121EXPORT_SYMBOL(synchronize_irq);
122
3aa551c9
TG
123#ifdef CONFIG_SMP
124cpumask_var_t irq_default_affinity;
125
9c255583 126static bool __irq_can_set_affinity(struct irq_desc *desc)
e019c249
JL
127{
128 if (!desc || !irqd_can_balance(&desc->irq_data) ||
129 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
9c255583
TG
130 return false;
131 return true;
e019c249
JL
132}
133
771ee3b0
TG
134/**
135 * irq_can_set_affinity - Check if the affinity of a given irq can be set
136 * @irq: Interrupt to check
137 *
138 */
139int irq_can_set_affinity(unsigned int irq)
140{
e019c249 141 return __irq_can_set_affinity(irq_to_desc(irq));
771ee3b0
TG
142}
143
9c255583
TG
144/**
145 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
146 * @irq: Interrupt to check
147 *
148 * Like irq_can_set_affinity() above, but additionally checks for the
149 * AFFINITY_MANAGED flag.
150 */
151bool irq_can_set_affinity_usr(unsigned int irq)
152{
153 struct irq_desc *desc = irq_to_desc(irq);
154
155 return __irq_can_set_affinity(desc) &&
156 !irqd_affinity_is_managed(&desc->irq_data);
157}
158
591d2fb0
TG
159/**
160 * irq_set_thread_affinity - Notify irq threads to adjust affinity
161 * @desc: irq descriptor which has affitnity changed
162 *
163 * We just set IRQTF_AFFINITY and delegate the affinity setting
164 * to the interrupt thread itself. We can not call
165 * set_cpus_allowed_ptr() here as we hold desc->lock and this
166 * code can be called from hard interrupt context.
167 */
168void irq_set_thread_affinity(struct irq_desc *desc)
3aa551c9 169{
f944b5a7 170 struct irqaction *action;
3aa551c9 171
f944b5a7 172 for_each_action_of_desc(desc, action)
3aa551c9 173 if (action->thread)
591d2fb0 174 set_bit(IRQTF_AFFINITY, &action->thread_flags);
3aa551c9
TG
175}
176
19e1d4e9
TG
177static void irq_validate_effective_affinity(struct irq_data *data)
178{
179#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
180 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
181 struct irq_chip *chip = irq_data_get_irq_chip(data);
182
183 if (!cpumask_empty(m))
184 return;
185 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
186 chip->name, data->irq);
187#endif
188}
189
818b0f3b
JL
190int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
191 bool force)
192{
193 struct irq_desc *desc = irq_data_to_desc(data);
194 struct irq_chip *chip = irq_data_get_irq_chip(data);
195 int ret;
196
e43b3b58
TG
197 if (!chip || !chip->irq_set_affinity)
198 return -EINVAL;
199
01f8fa4f 200 ret = chip->irq_set_affinity(data, mask, force);
818b0f3b
JL
201 switch (ret) {
202 case IRQ_SET_MASK_OK:
2cb62547 203 case IRQ_SET_MASK_OK_DONE:
9df872fa 204 cpumask_copy(desc->irq_common_data.affinity, mask);
818b0f3b 205 case IRQ_SET_MASK_OK_NOCOPY:
19e1d4e9 206 irq_validate_effective_affinity(data);
818b0f3b
JL
207 irq_set_thread_affinity(desc);
208 ret = 0;
209 }
210
211 return ret;
212}
213
951f610b
TG
214#ifdef CONFIG_GENERIC_PENDING_IRQ
215static inline int irq_set_affinity_pending(struct irq_data *data,
216 const struct cpumask *dest)
217{
218 struct irq_desc *desc = irq_data_to_desc(data);
219
220 irqd_set_move_pending(data);
221 irq_copy_pending(desc, dest);
222 return 0;
223}
224#else
225static inline int irq_set_affinity_pending(struct irq_data *data,
226 const struct cpumask *dest)
227{
228 return -EBUSY;
229}
230#endif
231
232static int irq_try_set_affinity(struct irq_data *data,
233 const struct cpumask *dest, bool force)
234{
235 int ret = irq_do_set_affinity(data, dest, force);
236
237 /*
238 * In case that the underlying vector management is busy and the
239 * architecture supports the generic pending mechanism then utilize
240 * this to avoid returning an error to user space.
241 */
242 if (ret == -EBUSY && !force)
243 ret = irq_set_affinity_pending(data, dest);
244 return ret;
245}
246
01f8fa4f
TG
247int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
248 bool force)
771ee3b0 249{
c2d0c555
DD
250 struct irq_chip *chip = irq_data_get_irq_chip(data);
251 struct irq_desc *desc = irq_data_to_desc(data);
1fa46f1f 252 int ret = 0;
771ee3b0 253
c2d0c555 254 if (!chip || !chip->irq_set_affinity)
771ee3b0
TG
255 return -EINVAL;
256
951f610b
TG
257 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
258 ret = irq_try_set_affinity(data, mask, force);
1fa46f1f 259 } else {
c2d0c555 260 irqd_set_move_pending(data);
1fa46f1f 261 irq_copy_pending(desc, mask);
57b150cc 262 }
1fa46f1f 263
cd7eab44
BH
264 if (desc->affinity_notify) {
265 kref_get(&desc->affinity_notify->kref);
266 schedule_work(&desc->affinity_notify->work);
267 }
c2d0c555
DD
268 irqd_set(data, IRQD_AFFINITY_SET);
269
270 return ret;
271}
272
01f8fa4f 273int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
c2d0c555
DD
274{
275 struct irq_desc *desc = irq_to_desc(irq);
276 unsigned long flags;
277 int ret;
278
279 if (!desc)
280 return -EINVAL;
281
282 raw_spin_lock_irqsave(&desc->lock, flags);
01f8fa4f 283 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
239007b8 284 raw_spin_unlock_irqrestore(&desc->lock, flags);
1fa46f1f 285 return ret;
771ee3b0
TG
286}
287
e7a297b0
PWJ
288int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
289{
e7a297b0 290 unsigned long flags;
31d9d9b6 291 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
e7a297b0
PWJ
292
293 if (!desc)
294 return -EINVAL;
e7a297b0 295 desc->affinity_hint = m;
02725e74 296 irq_put_desc_unlock(desc, flags);
e2e64a93 297 /* set the initial affinity to prevent every interrupt being on CPU0 */
4fe7ffb7
JB
298 if (m)
299 __irq_set_affinity(irq, m, false);
e7a297b0
PWJ
300 return 0;
301}
302EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
303
cd7eab44
BH
304static void irq_affinity_notify(struct work_struct *work)
305{
306 struct irq_affinity_notify *notify =
307 container_of(work, struct irq_affinity_notify, work);
308 struct irq_desc *desc = irq_to_desc(notify->irq);
309 cpumask_var_t cpumask;
310 unsigned long flags;
311
1fa46f1f 312 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
cd7eab44
BH
313 goto out;
314
315 raw_spin_lock_irqsave(&desc->lock, flags);
0ef5ca1e 316 if (irq_move_pending(&desc->irq_data))
1fa46f1f 317 irq_get_pending(cpumask, desc);
cd7eab44 318 else
9df872fa 319 cpumask_copy(cpumask, desc->irq_common_data.affinity);
cd7eab44
BH
320 raw_spin_unlock_irqrestore(&desc->lock, flags);
321
322 notify->notify(notify, cpumask);
323
324 free_cpumask_var(cpumask);
325out:
326 kref_put(&notify->kref, notify->release);
327}
328
329/**
330 * irq_set_affinity_notifier - control notification of IRQ affinity changes
331 * @irq: Interrupt for which to enable/disable notification
332 * @notify: Context for notification, or %NULL to disable
333 * notification. Function pointers must be initialised;
334 * the other fields will be initialised by this function.
335 *
336 * Must be called in process context. Notification may only be enabled
337 * after the IRQ is allocated and must be disabled before the IRQ is
338 * freed using free_irq().
339 */
340int
341irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
342{
343 struct irq_desc *desc = irq_to_desc(irq);
344 struct irq_affinity_notify *old_notify;
345 unsigned long flags;
346
347 /* The release function is promised process context */
348 might_sleep();
349
350 if (!desc)
351 return -EINVAL;
352
353 /* Complete initialisation of *notify */
354 if (notify) {
355 notify->irq = irq;
356 kref_init(&notify->kref);
357 INIT_WORK(&notify->work, irq_affinity_notify);
358 }
359
360 raw_spin_lock_irqsave(&desc->lock, flags);
361 old_notify = desc->affinity_notify;
362 desc->affinity_notify = notify;
363 raw_spin_unlock_irqrestore(&desc->lock, flags);
364
365 if (old_notify)
366 kref_put(&old_notify->kref, old_notify->release);
367
368 return 0;
369}
370EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
371
18404756
MK
372#ifndef CONFIG_AUTO_IRQ_AFFINITY
373/*
374 * Generic version of the affinity autoselector.
375 */
43564bd9 376int irq_setup_affinity(struct irq_desc *desc)
18404756 377{
569bda8d 378 struct cpumask *set = irq_default_affinity;
cba4235e
TG
379 int ret, node = irq_desc_get_node(desc);
380 static DEFINE_RAW_SPINLOCK(mask_lock);
381 static struct cpumask mask;
569bda8d 382
b008207c 383 /* Excludes PER_CPU and NO_BALANCE interrupts */
e019c249 384 if (!__irq_can_set_affinity(desc))
18404756
MK
385 return 0;
386
cba4235e 387 raw_spin_lock(&mask_lock);
f6d87f4b 388 /*
9332ef9d 389 * Preserve the managed affinity setting and a userspace affinity
06ee6d57 390 * setup, but make sure that one of the targets is online.
f6d87f4b 391 */
06ee6d57
TG
392 if (irqd_affinity_is_managed(&desc->irq_data) ||
393 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
9df872fa 394 if (cpumask_intersects(desc->irq_common_data.affinity,
569bda8d 395 cpu_online_mask))
9df872fa 396 set = desc->irq_common_data.affinity;
0c6f8a8b 397 else
2bdd1055 398 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
f6d87f4b 399 }
18404756 400
cba4235e 401 cpumask_and(&mask, cpu_online_mask, set);
241fc640
PB
402 if (node != NUMA_NO_NODE) {
403 const struct cpumask *nodemask = cpumask_of_node(node);
404
405 /* make sure at least one of the cpus in nodemask is online */
cba4235e
TG
406 if (cpumask_intersects(&mask, nodemask))
407 cpumask_and(&mask, &mask, nodemask);
241fc640 408 }
cba4235e
TG
409 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
410 raw_spin_unlock(&mask_lock);
411 return ret;
18404756 412}
f6d87f4b 413#else
a8a98eac 414/* Wrapper for ALPHA specific affinity selector magic */
cba4235e 415int irq_setup_affinity(struct irq_desc *desc)
f6d87f4b 416{
cba4235e 417 return irq_select_affinity(irq_desc_get_irq(desc));
f6d87f4b 418}
18404756
MK
419#endif
420
f6d87f4b 421/*
cba4235e 422 * Called when a bogus affinity is set via /proc/irq
f6d87f4b 423 */
cba4235e 424int irq_select_affinity_usr(unsigned int irq)
f6d87f4b
TG
425{
426 struct irq_desc *desc = irq_to_desc(irq);
427 unsigned long flags;
428 int ret;
429
239007b8 430 raw_spin_lock_irqsave(&desc->lock, flags);
cba4235e 431 ret = irq_setup_affinity(desc);
239007b8 432 raw_spin_unlock_irqrestore(&desc->lock, flags);
f6d87f4b
TG
433 return ret;
434}
1da177e4
LT
435#endif
436
fcf1ae2f
FW
437/**
438 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
439 * @irq: interrupt number to set affinity
250a53d6
CD
440 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
441 * specific data for percpu_devid interrupts
fcf1ae2f
FW
442 *
443 * This function uses the vCPU specific data to set the vCPU
444 * affinity for an irq. The vCPU specific data is passed from
445 * outside, such as KVM. One example code path is as below:
446 * KVM -> IOMMU -> irq_set_vcpu_affinity().
447 */
448int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
449{
450 unsigned long flags;
451 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
452 struct irq_data *data;
453 struct irq_chip *chip;
454 int ret = -ENOSYS;
455
456 if (!desc)
457 return -EINVAL;
458
459 data = irq_desc_get_irq_data(desc);
0abce64a
MZ
460 do {
461 chip = irq_data_get_irq_chip(data);
462 if (chip && chip->irq_set_vcpu_affinity)
463 break;
464#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
465 data = data->parent_data;
466#else
467 data = NULL;
468#endif
469 } while (data);
470
471 if (data)
fcf1ae2f
FW
472 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
473 irq_put_desc_unlock(desc, flags);
474
475 return ret;
476}
477EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
478
79ff1cda 479void __disable_irq(struct irq_desc *desc)
0a0c5168 480{
3aae994f 481 if (!desc->depth++)
87923470 482 irq_disable(desc);
0a0c5168
RW
483}
484
02725e74
TG
485static int __disable_irq_nosync(unsigned int irq)
486{
487 unsigned long flags;
31d9d9b6 488 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
02725e74
TG
489
490 if (!desc)
491 return -EINVAL;
79ff1cda 492 __disable_irq(desc);
02725e74
TG
493 irq_put_desc_busunlock(desc, flags);
494 return 0;
495}
496
1da177e4
LT
497/**
498 * disable_irq_nosync - disable an irq without waiting
499 * @irq: Interrupt to disable
500 *
501 * Disable the selected interrupt line. Disables and Enables are
502 * nested.
503 * Unlike disable_irq(), this function does not ensure existing
504 * instances of the IRQ handler have completed before returning.
505 *
506 * This function may be called from IRQ context.
507 */
508void disable_irq_nosync(unsigned int irq)
509{
02725e74 510 __disable_irq_nosync(irq);
1da177e4 511}
1da177e4
LT
512EXPORT_SYMBOL(disable_irq_nosync);
513
514/**
515 * disable_irq - disable an irq and wait for completion
516 * @irq: Interrupt to disable
517 *
518 * Disable the selected interrupt line. Enables and Disables are
519 * nested.
520 * This function waits for any pending IRQ handlers for this interrupt
521 * to complete before returning. If you use this function while
522 * holding a resource the IRQ handler may need you will deadlock.
523 *
524 * This function may be called - with care - from IRQ context.
525 */
526void disable_irq(unsigned int irq)
527{
02725e74 528 if (!__disable_irq_nosync(irq))
1da177e4
LT
529 synchronize_irq(irq);
530}
1da177e4
LT
531EXPORT_SYMBOL(disable_irq);
532
02cea395
PZ
533/**
534 * disable_hardirq - disables an irq and waits for hardirq completion
535 * @irq: Interrupt to disable
536 *
537 * Disable the selected interrupt line. Enables and Disables are
538 * nested.
539 * This function waits for any pending hard IRQ handlers for this
540 * interrupt to complete before returning. If you use this function while
541 * holding a resource the hard IRQ handler may need you will deadlock.
542 *
543 * When used to optimistically disable an interrupt from atomic context
544 * the return value must be checked.
545 *
546 * Returns: false if a threaded handler is active.
547 *
548 * This function may be called - with care - from IRQ context.
549 */
550bool disable_hardirq(unsigned int irq)
551{
552 if (!__disable_irq_nosync(irq))
553 return synchronize_hardirq(irq);
554
555 return false;
556}
557EXPORT_SYMBOL_GPL(disable_hardirq);
558
79ff1cda 559void __enable_irq(struct irq_desc *desc)
1adb0850
TG
560{
561 switch (desc->depth) {
562 case 0:
0a0c5168 563 err_out:
79ff1cda
JL
564 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
565 irq_desc_get_irq(desc));
1adb0850
TG
566 break;
567 case 1: {
c531e836 568 if (desc->istate & IRQS_SUSPENDED)
0a0c5168 569 goto err_out;
1adb0850 570 /* Prevent probing on this irq: */
1ccb4e61 571 irq_settings_set_noprobe(desc);
201d7f47
TG
572 /*
573 * Call irq_startup() not irq_enable() here because the
574 * interrupt might be marked NOAUTOEN. So irq_startup()
575 * needs to be invoked when it gets enabled the first
576 * time. If it was already started up, then irq_startup()
577 * will invoke irq_enable() under the hood.
578 */
c942cee4 579 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
201d7f47 580 break;
1adb0850
TG
581 }
582 default:
583 desc->depth--;
584 }
585}
586
1da177e4
LT
587/**
588 * enable_irq - enable handling of an irq
589 * @irq: Interrupt to enable
590 *
591 * Undoes the effect of one call to disable_irq(). If this
592 * matches the last disable, processing of interrupts on this
593 * IRQ line is re-enabled.
594 *
70aedd24 595 * This function may be called from IRQ context only when
6b8ff312 596 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
1da177e4
LT
597 */
598void enable_irq(unsigned int irq)
599{
1da177e4 600 unsigned long flags;
31d9d9b6 601 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
1da177e4 602
7d94f7ca 603 if (!desc)
c2b5a251 604 return;
50f7c032
TG
605 if (WARN(!desc->irq_data.chip,
606 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
02725e74 607 goto out;
2656c366 608
79ff1cda 609 __enable_irq(desc);
02725e74
TG
610out:
611 irq_put_desc_busunlock(desc, flags);
1da177e4 612}
1da177e4
LT
613EXPORT_SYMBOL(enable_irq);
614
0c5d1eb7 615static int set_irq_wake_real(unsigned int irq, unsigned int on)
2db87321 616{
08678b08 617 struct irq_desc *desc = irq_to_desc(irq);
2db87321
UKK
618 int ret = -ENXIO;
619
60f96b41
SS
620 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
621 return 0;
622
2f7e99bb
TG
623 if (desc->irq_data.chip->irq_set_wake)
624 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
2db87321
UKK
625
626 return ret;
627}
628
ba9a2331 629/**
a0cd9ca2 630 * irq_set_irq_wake - control irq power management wakeup
ba9a2331
TG
631 * @irq: interrupt to control
632 * @on: enable/disable power management wakeup
633 *
15a647eb
DB
634 * Enable/disable power management wakeup mode, which is
635 * disabled by default. Enables and disables must match,
636 * just as they match for non-wakeup mode support.
637 *
638 * Wakeup mode lets this IRQ wake the system from sleep
639 * states like "suspend to RAM".
ba9a2331 640 */
a0cd9ca2 641int irq_set_irq_wake(unsigned int irq, unsigned int on)
ba9a2331 642{
ba9a2331 643 unsigned long flags;
31d9d9b6 644 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
2db87321 645 int ret = 0;
ba9a2331 646
13863a66
JJ
647 if (!desc)
648 return -EINVAL;
649
15a647eb
DB
650 /* wakeup-capable irqs can be shared between drivers that
651 * don't need to have the same sleep mode behaviors.
652 */
15a647eb 653 if (on) {
2db87321
UKK
654 if (desc->wake_depth++ == 0) {
655 ret = set_irq_wake_real(irq, on);
656 if (ret)
657 desc->wake_depth = 0;
658 else
7f94226f 659 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
2db87321 660 }
15a647eb
DB
661 } else {
662 if (desc->wake_depth == 0) {
7a2c4770 663 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
2db87321
UKK
664 } else if (--desc->wake_depth == 0) {
665 ret = set_irq_wake_real(irq, on);
666 if (ret)
667 desc->wake_depth = 1;
668 else
7f94226f 669 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
2db87321 670 }
15a647eb 671 }
02725e74 672 irq_put_desc_busunlock(desc, flags);
ba9a2331
TG
673 return ret;
674}
a0cd9ca2 675EXPORT_SYMBOL(irq_set_irq_wake);
ba9a2331 676
1da177e4
LT
677/*
678 * Internal function that tells the architecture code whether a
679 * particular irq has been exclusively allocated or is available
680 * for driver use.
681 */
682int can_request_irq(unsigned int irq, unsigned long irqflags)
683{
cc8c3b78 684 unsigned long flags;
31d9d9b6 685 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
02725e74 686 int canrequest = 0;
1da177e4 687
7d94f7ca
YL
688 if (!desc)
689 return 0;
690
02725e74 691 if (irq_settings_can_request(desc)) {
2779db8d
BH
692 if (!desc->action ||
693 irqflags & desc->action->flags & IRQF_SHARED)
694 canrequest = 1;
02725e74
TG
695 }
696 irq_put_desc_unlock(desc, flags);
697 return canrequest;
1da177e4
LT
698}
699
a1ff541a 700int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
82736f4d 701{
6b8ff312 702 struct irq_chip *chip = desc->irq_data.chip;
d4d5e089 703 int ret, unmask = 0;
82736f4d 704
b2ba2c30 705 if (!chip || !chip->irq_set_type) {
82736f4d
UKK
706 /*
707 * IRQF_TRIGGER_* but the PIC does not support multiple
708 * flow-types?
709 */
a1ff541a
JL
710 pr_debug("No set_type function for IRQ %d (%s)\n",
711 irq_desc_get_irq(desc),
f5d89470 712 chip ? (chip->name ? : "unknown") : "unknown");
82736f4d
UKK
713 return 0;
714 }
715
d4d5e089 716 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
32f4125e 717 if (!irqd_irq_masked(&desc->irq_data))
d4d5e089 718 mask_irq(desc);
32f4125e 719 if (!irqd_irq_disabled(&desc->irq_data))
d4d5e089
TG
720 unmask = 1;
721 }
722
00b992de
AK
723 /* Mask all flags except trigger mode */
724 flags &= IRQ_TYPE_SENSE_MASK;
b2ba2c30 725 ret = chip->irq_set_type(&desc->irq_data, flags);
82736f4d 726
876dbd4c
TG
727 switch (ret) {
728 case IRQ_SET_MASK_OK:
2cb62547 729 case IRQ_SET_MASK_OK_DONE:
876dbd4c
TG
730 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
731 irqd_set(&desc->irq_data, flags);
732
733 case IRQ_SET_MASK_OK_NOCOPY:
734 flags = irqd_get_trigger_type(&desc->irq_data);
735 irq_settings_set_trigger_mask(desc, flags);
736 irqd_clear(&desc->irq_data, IRQD_LEVEL);
737 irq_settings_clr_level(desc);
738 if (flags & IRQ_TYPE_LEVEL_MASK) {
739 irq_settings_set_level(desc);
740 irqd_set(&desc->irq_data, IRQD_LEVEL);
741 }
46732475 742
d4d5e089 743 ret = 0;
8fff39e0 744 break;
876dbd4c 745 default:
97fd75b7 746 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
a1ff541a 747 flags, irq_desc_get_irq(desc), chip->irq_set_type);
0c5d1eb7 748 }
d4d5e089
TG
749 if (unmask)
750 unmask_irq(desc);
82736f4d
UKK
751 return ret;
752}
753
293a7a0a
TG
754#ifdef CONFIG_HARDIRQS_SW_RESEND
755int irq_set_parent(int irq, int parent_irq)
756{
757 unsigned long flags;
758 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
759
760 if (!desc)
761 return -EINVAL;
762
763 desc->parent_irq = parent_irq;
764
765 irq_put_desc_unlock(desc, flags);
766 return 0;
767}
3118dac5 768EXPORT_SYMBOL_GPL(irq_set_parent);
293a7a0a
TG
769#endif
770
b25c340c
TG
771/*
772 * Default primary interrupt handler for threaded interrupts. Is
773 * assigned as primary handler when request_threaded_irq is called
774 * with handler == NULL. Useful for oneshot interrupts.
775 */
776static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
777{
778 return IRQ_WAKE_THREAD;
779}
780
399b5da2
TG
781/*
782 * Primary handler for nested threaded interrupts. Should never be
783 * called.
784 */
785static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
786{
787 WARN(1, "Primary handler called for nested irq %d\n", irq);
788 return IRQ_NONE;
789}
790
2a1d3ab8
TG
791static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
792{
793 WARN(1, "Secondary action handler called for irq %d\n", irq);
794 return IRQ_NONE;
795}
796
3aa551c9
TG
797static int irq_wait_for_interrupt(struct irqaction *action)
798{
550acb19
IY
799 set_current_state(TASK_INTERRUPTIBLE);
800
3aa551c9 801 while (!kthread_should_stop()) {
f48fe81e
TG
802
803 if (test_and_clear_bit(IRQTF_RUNTHREAD,
804 &action->thread_flags)) {
3aa551c9
TG
805 __set_current_state(TASK_RUNNING);
806 return 0;
f48fe81e
TG
807 }
808 schedule();
550acb19 809 set_current_state(TASK_INTERRUPTIBLE);
3aa551c9 810 }
550acb19 811 __set_current_state(TASK_RUNNING);
3aa551c9
TG
812 return -1;
813}
814
b25c340c
TG
815/*
816 * Oneshot interrupts keep the irq line masked until the threaded
817 * handler finished. unmask if the interrupt has not been disabled and
818 * is marked MASKED.
819 */
b5faba21 820static void irq_finalize_oneshot(struct irq_desc *desc,
f3f79e38 821 struct irqaction *action)
b25c340c 822{
2a1d3ab8
TG
823 if (!(desc->istate & IRQS_ONESHOT) ||
824 action->handler == irq_forced_secondary_handler)
b5faba21 825 return;
0b1adaa0 826again:
3876ec9e 827 chip_bus_lock(desc);
239007b8 828 raw_spin_lock_irq(&desc->lock);
0b1adaa0
TG
829
830 /*
831 * Implausible though it may be we need to protect us against
832 * the following scenario:
833 *
834 * The thread is faster done than the hard interrupt handler
835 * on the other CPU. If we unmask the irq line then the
836 * interrupt can come in again and masks the line, leaves due
009b4c3b 837 * to IRQS_INPROGRESS and the irq line is masked forever.
b5faba21
TG
838 *
839 * This also serializes the state of shared oneshot handlers
840 * versus "desc->threads_onehsot |= action->thread_mask;" in
841 * irq_wake_thread(). See the comment there which explains the
842 * serialization.
0b1adaa0 843 */
32f4125e 844 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
0b1adaa0 845 raw_spin_unlock_irq(&desc->lock);
3876ec9e 846 chip_bus_sync_unlock(desc);
0b1adaa0
TG
847 cpu_relax();
848 goto again;
849 }
850
b5faba21
TG
851 /*
852 * Now check again, whether the thread should run. Otherwise
853 * we would clear the threads_oneshot bit of this thread which
854 * was just set.
855 */
f3f79e38 856 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
b5faba21
TG
857 goto out_unlock;
858
859 desc->threads_oneshot &= ~action->thread_mask;
860
32f4125e
TG
861 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
862 irqd_irq_masked(&desc->irq_data))
328a4978 863 unmask_threaded_irq(desc);
32f4125e 864
b5faba21 865out_unlock:
239007b8 866 raw_spin_unlock_irq(&desc->lock);
3876ec9e 867 chip_bus_sync_unlock(desc);
b25c340c
TG
868}
869
61f38261 870#ifdef CONFIG_SMP
591d2fb0 871/*
b04c644e 872 * Check whether we need to change the affinity of the interrupt thread.
591d2fb0
TG
873 */
874static void
875irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
876{
877 cpumask_var_t mask;
04aa530e 878 bool valid = true;
591d2fb0
TG
879
880 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
881 return;
882
883 /*
884 * In case we are out of memory we set IRQTF_AFFINITY again and
885 * try again next time
886 */
887 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
888 set_bit(IRQTF_AFFINITY, &action->thread_flags);
889 return;
890 }
891
239007b8 892 raw_spin_lock_irq(&desc->lock);
04aa530e
TG
893 /*
894 * This code is triggered unconditionally. Check the affinity
895 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
896 */
d170fe7d 897 if (cpumask_available(desc->irq_common_data.affinity))
9df872fa 898 cpumask_copy(mask, desc->irq_common_data.affinity);
04aa530e
TG
899 else
900 valid = false;
239007b8 901 raw_spin_unlock_irq(&desc->lock);
591d2fb0 902
04aa530e
TG
903 if (valid)
904 set_cpus_allowed_ptr(current, mask);
591d2fb0
TG
905 free_cpumask_var(mask);
906}
61f38261
BP
907#else
908static inline void
909irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
910#endif
591d2fb0 911
8d32a307
TG
912/*
913 * Interrupts which are not explicitely requested as threaded
914 * interrupts rely on the implicit bh/preempt disable of the hard irq
915 * context. So we need to disable bh here to avoid deadlocks and other
916 * side effects.
917 */
3a43e05f 918static irqreturn_t
8d32a307
TG
919irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
920{
3a43e05f
SAS
921 irqreturn_t ret;
922
8d32a307 923 local_bh_disable();
3a43e05f 924 ret = action->thread_fn(action->irq, action->dev_id);
f3f79e38 925 irq_finalize_oneshot(desc, action);
8d32a307 926 local_bh_enable();
3a43e05f 927 return ret;
8d32a307
TG
928}
929
930/*
f788e7bf 931 * Interrupts explicitly requested as threaded interrupts want to be
8d32a307
TG
932 * preemtible - many of them need to sleep and wait for slow busses to
933 * complete.
934 */
3a43e05f
SAS
935static irqreturn_t irq_thread_fn(struct irq_desc *desc,
936 struct irqaction *action)
8d32a307 937{
3a43e05f
SAS
938 irqreturn_t ret;
939
940 ret = action->thread_fn(action->irq, action->dev_id);
f3f79e38 941 irq_finalize_oneshot(desc, action);
3a43e05f 942 return ret;
8d32a307
TG
943}
944
7140ea19
IY
945static void wake_threads_waitq(struct irq_desc *desc)
946{
c685689f 947 if (atomic_dec_and_test(&desc->threads_active))
7140ea19
IY
948 wake_up(&desc->wait_for_threads);
949}
950
67d12145 951static void irq_thread_dtor(struct callback_head *unused)
4d1d61a6
ON
952{
953 struct task_struct *tsk = current;
954 struct irq_desc *desc;
955 struct irqaction *action;
956
957 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
958 return;
959
960 action = kthread_data(tsk);
961
fb21affa 962 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
19af395d 963 tsk->comm, tsk->pid, action->irq);
4d1d61a6
ON
964
965
966 desc = irq_to_desc(action->irq);
967 /*
968 * If IRQTF_RUNTHREAD is set, we need to decrement
969 * desc->threads_active and wake possible waiters.
970 */
971 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
972 wake_threads_waitq(desc);
973
974 /* Prevent a stale desc->threads_oneshot */
975 irq_finalize_oneshot(desc, action);
976}
977
2a1d3ab8
TG
978static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
979{
980 struct irqaction *secondary = action->secondary;
981
982 if (WARN_ON_ONCE(!secondary))
983 return;
984
985 raw_spin_lock_irq(&desc->lock);
986 __irq_wake_thread(desc, secondary);
987 raw_spin_unlock_irq(&desc->lock);
988}
989
3aa551c9
TG
990/*
991 * Interrupt handler thread
992 */
993static int irq_thread(void *data)
994{
67d12145 995 struct callback_head on_exit_work;
3aa551c9
TG
996 struct irqaction *action = data;
997 struct irq_desc *desc = irq_to_desc(action->irq);
3a43e05f
SAS
998 irqreturn_t (*handler_fn)(struct irq_desc *desc,
999 struct irqaction *action);
3aa551c9 1000
540b60e2 1001 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
8d32a307
TG
1002 &action->thread_flags))
1003 handler_fn = irq_forced_thread_fn;
1004 else
1005 handler_fn = irq_thread_fn;
1006
41f9d29f 1007 init_task_work(&on_exit_work, irq_thread_dtor);
4d1d61a6 1008 task_work_add(current, &on_exit_work, false);
3aa551c9 1009
f3de44ed
SM
1010 irq_thread_check_affinity(desc, action);
1011
3aa551c9 1012 while (!irq_wait_for_interrupt(action)) {
7140ea19 1013 irqreturn_t action_ret;
3aa551c9 1014
591d2fb0
TG
1015 irq_thread_check_affinity(desc, action);
1016
7140ea19 1017 action_ret = handler_fn(desc, action);
1e77d0a1
TG
1018 if (action_ret == IRQ_HANDLED)
1019 atomic_inc(&desc->threads_handled);
2a1d3ab8
TG
1020 if (action_ret == IRQ_WAKE_THREAD)
1021 irq_wake_secondary(desc, action);
3aa551c9 1022
7140ea19 1023 wake_threads_waitq(desc);
3aa551c9
TG
1024 }
1025
7140ea19
IY
1026 /*
1027 * This is the regular exit path. __free_irq() is stopping the
1028 * thread via kthread_stop() after calling
1029 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
e04268b0
TG
1030 * oneshot mask bit can be set. We cannot verify that as we
1031 * cannot touch the oneshot mask at this point anymore as
1032 * __setup_irq() might have given out currents thread_mask
1033 * again.
3aa551c9 1034 */
4d1d61a6 1035 task_work_cancel(current, irq_thread_dtor);
3aa551c9
TG
1036 return 0;
1037}
1038
a92444c6
TG
1039/**
1040 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1041 * @irq: Interrupt line
1042 * @dev_id: Device identity for which the thread should be woken
1043 *
1044 */
1045void irq_wake_thread(unsigned int irq, void *dev_id)
1046{
1047 struct irq_desc *desc = irq_to_desc(irq);
1048 struct irqaction *action;
1049 unsigned long flags;
1050
1051 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1052 return;
1053
1054 raw_spin_lock_irqsave(&desc->lock, flags);
f944b5a7 1055 for_each_action_of_desc(desc, action) {
a92444c6
TG
1056 if (action->dev_id == dev_id) {
1057 if (action->thread)
1058 __irq_wake_thread(desc, action);
1059 break;
1060 }
1061 }
1062 raw_spin_unlock_irqrestore(&desc->lock, flags);
1063}
1064EXPORT_SYMBOL_GPL(irq_wake_thread);
1065
2a1d3ab8 1066static int irq_setup_forced_threading(struct irqaction *new)
8d32a307
TG
1067{
1068 if (!force_irqthreads)
2a1d3ab8 1069 return 0;
8d32a307 1070 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
2a1d3ab8 1071 return 0;
8d32a307
TG
1072
1073 new->flags |= IRQF_ONESHOT;
1074
2a1d3ab8
TG
1075 /*
1076 * Handle the case where we have a real primary handler and a
1077 * thread handler. We force thread them as well by creating a
1078 * secondary action.
1079 */
1080 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1081 /* Allocate the secondary action */
1082 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1083 if (!new->secondary)
1084 return -ENOMEM;
1085 new->secondary->handler = irq_forced_secondary_handler;
1086 new->secondary->thread_fn = new->thread_fn;
1087 new->secondary->dev_id = new->dev_id;
1088 new->secondary->irq = new->irq;
1089 new->secondary->name = new->name;
8d32a307 1090 }
2a1d3ab8
TG
1091 /* Deal with the primary handler */
1092 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1093 new->thread_fn = new->handler;
1094 new->handler = irq_default_primary_handler;
1095 return 0;
8d32a307
TG
1096}
1097
c1bacbae
TG
1098static int irq_request_resources(struct irq_desc *desc)
1099{
1100 struct irq_data *d = &desc->irq_data;
1101 struct irq_chip *c = d->chip;
1102
1103 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1104}
1105
1106static void irq_release_resources(struct irq_desc *desc)
1107{
1108 struct irq_data *d = &desc->irq_data;
1109 struct irq_chip *c = d->chip;
1110
1111 if (c->irq_release_resources)
1112 c->irq_release_resources(d);
1113}
1114
2a1d3ab8
TG
1115static int
1116setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1117{
1118 struct task_struct *t;
1119 struct sched_param param = {
1120 .sched_priority = MAX_USER_RT_PRIO/2,
1121 };
1122
1123 if (!secondary) {
1124 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1125 new->name);
1126 } else {
1127 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1128 new->name);
1129 param.sched_priority -= 1;
1130 }
1131
1132 if (IS_ERR(t))
1133 return PTR_ERR(t);
1134
1135 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1136
1137 /*
1138 * We keep the reference to the task struct even if
1139 * the thread dies to avoid that the interrupt code
1140 * references an already freed task_struct.
1141 */
1142 get_task_struct(t);
1143 new->thread = t;
1144 /*
1145 * Tell the thread to set its affinity. This is
1146 * important for shared interrupt handlers as we do
1147 * not invoke setup_affinity() for the secondary
1148 * handlers as everything is already set up. Even for
1149 * interrupts marked with IRQF_NO_BALANCE this is
1150 * correct as we want the thread to move to the cpu(s)
1151 * on which the requesting code placed the interrupt.
1152 */
1153 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1154 return 0;
1155}
1156
1da177e4
LT
1157/*
1158 * Internal function to register an irqaction - typically used to
1159 * allocate special interrupts that are part of the architecture.
19d39a38
TG
1160 *
1161 * Locking rules:
1162 *
1163 * desc->request_mutex Provides serialization against a concurrent free_irq()
1164 * chip_bus_lock Provides serialization for slow bus operations
1165 * desc->lock Provides serialization against hard interrupts
1166 *
1167 * chip_bus_lock and desc->lock are sufficient for all other management and
1168 * interrupt related functions. desc->request_mutex solely serializes
1169 * request/free_irq().
1da177e4 1170 */
d3c60047 1171static int
327ec569 1172__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1da177e4 1173{
f17c7545 1174 struct irqaction *old, **old_ptr;
b5faba21 1175 unsigned long flags, thread_mask = 0;
3b8249e7 1176 int ret, nested, shared = 0;
1da177e4 1177
7d94f7ca 1178 if (!desc)
c2b5a251
MW
1179 return -EINVAL;
1180
6b8ff312 1181 if (desc->irq_data.chip == &no_irq_chip)
1da177e4 1182 return -ENOSYS;
b6873807
SAS
1183 if (!try_module_get(desc->owner))
1184 return -ENODEV;
1da177e4 1185
2a1d3ab8
TG
1186 new->irq = irq;
1187
4b357dae
JH
1188 /*
1189 * If the trigger type is not specified by the caller,
1190 * then use the default for this interrupt.
1191 */
1192 if (!(new->flags & IRQF_TRIGGER_MASK))
1193 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1194
3aa551c9 1195 /*
399b5da2
TG
1196 * Check whether the interrupt nests into another interrupt
1197 * thread.
1198 */
1ccb4e61 1199 nested = irq_settings_is_nested_thread(desc);
399b5da2 1200 if (nested) {
b6873807
SAS
1201 if (!new->thread_fn) {
1202 ret = -EINVAL;
1203 goto out_mput;
1204 }
399b5da2
TG
1205 /*
1206 * Replace the primary handler which was provided from
1207 * the driver for non nested interrupt handling by the
1208 * dummy function which warns when called.
1209 */
1210 new->handler = irq_nested_primary_handler;
8d32a307 1211 } else {
2a1d3ab8
TG
1212 if (irq_settings_can_thread(desc)) {
1213 ret = irq_setup_forced_threading(new);
1214 if (ret)
1215 goto out_mput;
1216 }
399b5da2
TG
1217 }
1218
3aa551c9 1219 /*
399b5da2
TG
1220 * Create a handler thread when a thread function is supplied
1221 * and the interrupt does not nest into another interrupt
1222 * thread.
3aa551c9 1223 */
399b5da2 1224 if (new->thread_fn && !nested) {
2a1d3ab8
TG
1225 ret = setup_irq_thread(new, irq, false);
1226 if (ret)
b6873807 1227 goto out_mput;
2a1d3ab8
TG
1228 if (new->secondary) {
1229 ret = setup_irq_thread(new->secondary, irq, true);
1230 if (ret)
1231 goto out_thread;
b6873807 1232 }
3aa551c9
TG
1233 }
1234
dc9b229a
TG
1235 /*
1236 * Drivers are often written to work w/o knowledge about the
1237 * underlying irq chip implementation, so a request for a
1238 * threaded irq without a primary hard irq context handler
1239 * requires the ONESHOT flag to be set. Some irq chips like
1240 * MSI based interrupts are per se one shot safe. Check the
1241 * chip flags, so we can avoid the unmask dance at the end of
1242 * the threaded handler for those.
1243 */
1244 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1245 new->flags &= ~IRQF_ONESHOT;
1246
19d39a38
TG
1247 /*
1248 * Protects against a concurrent __free_irq() call which might wait
1249 * for synchronize_irq() to complete without holding the optional
1250 * chip bus lock and desc->lock.
1251 */
9114014c 1252 mutex_lock(&desc->request_mutex);
19d39a38
TG
1253
1254 /*
1255 * Acquire bus lock as the irq_request_resources() callback below
1256 * might rely on the serialization or the magic power management
1257 * functions which are abusing the irq_bus_lock() callback,
1258 */
1259 chip_bus_lock(desc);
1260
1261 /* First installed action requests resources. */
46e48e25
TG
1262 if (!desc->action) {
1263 ret = irq_request_resources(desc);
1264 if (ret) {
1265 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1266 new->name, irq, desc->irq_data.chip->name);
19d39a38 1267 goto out_bus_unlock;
46e48e25
TG
1268 }
1269 }
9114014c 1270
1da177e4
LT
1271 /*
1272 * The following block of code has to be executed atomically
19d39a38
TG
1273 * protected against a concurrent interrupt and any of the other
1274 * management calls which are not serialized via
1275 * desc->request_mutex or the optional bus lock.
1da177e4 1276 */
239007b8 1277 raw_spin_lock_irqsave(&desc->lock, flags);
f17c7545
IM
1278 old_ptr = &desc->action;
1279 old = *old_ptr;
06fcb0c6 1280 if (old) {
e76de9f8
TG
1281 /*
1282 * Can't share interrupts unless both agree to and are
1283 * the same type (level, edge, polarity). So both flag
3cca53b0 1284 * fields must have IRQF_SHARED set and the bits which
9d591edd
TG
1285 * set the trigger type must match. Also all must
1286 * agree on ONESHOT.
e76de9f8 1287 */
4f8413a3
MZ
1288 unsigned int oldtype;
1289
1290 /*
1291 * If nobody did set the configuration before, inherit
1292 * the one provided by the requester.
1293 */
1294 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1295 oldtype = irqd_get_trigger_type(&desc->irq_data);
1296 } else {
1297 oldtype = new->flags & IRQF_TRIGGER_MASK;
1298 irqd_set_trigger_type(&desc->irq_data, oldtype);
1299 }
382bd4de 1300
3cca53b0 1301 if (!((old->flags & new->flags) & IRQF_SHARED) ||
382bd4de 1302 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
f5d89470 1303 ((old->flags ^ new->flags) & IRQF_ONESHOT))
f5163427
DS
1304 goto mismatch;
1305
f5163427 1306 /* All handlers must agree on per-cpuness */
3cca53b0
TG
1307 if ((old->flags & IRQF_PERCPU) !=
1308 (new->flags & IRQF_PERCPU))
f5163427 1309 goto mismatch;
1da177e4
LT
1310
1311 /* add new interrupt at end of irq queue */
1312 do {
52abb700
TG
1313 /*
1314 * Or all existing action->thread_mask bits,
1315 * so we can find the next zero bit for this
1316 * new action.
1317 */
b5faba21 1318 thread_mask |= old->thread_mask;
f17c7545
IM
1319 old_ptr = &old->next;
1320 old = *old_ptr;
1da177e4
LT
1321 } while (old);
1322 shared = 1;
1323 }
1324
b5faba21 1325 /*
52abb700
TG
1326 * Setup the thread mask for this irqaction for ONESHOT. For
1327 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1328 * conditional in irq_wake_thread().
b5faba21 1329 */
52abb700
TG
1330 if (new->flags & IRQF_ONESHOT) {
1331 /*
1332 * Unlikely to have 32 resp 64 irqs sharing one line,
1333 * but who knows.
1334 */
1335 if (thread_mask == ~0UL) {
1336 ret = -EBUSY;
cba4235e 1337 goto out_unlock;
52abb700
TG
1338 }
1339 /*
1340 * The thread_mask for the action is or'ed to
1341 * desc->thread_active to indicate that the
1342 * IRQF_ONESHOT thread handler has been woken, but not
1343 * yet finished. The bit is cleared when a thread
1344 * completes. When all threads of a shared interrupt
1345 * line have completed desc->threads_active becomes
1346 * zero and the interrupt line is unmasked. See
1347 * handle.c:irq_wake_thread() for further information.
1348 *
1349 * If no thread is woken by primary (hard irq context)
1350 * interrupt handlers, then desc->threads_active is
1351 * also checked for zero to unmask the irq line in the
1352 * affected hard irq flow handlers
1353 * (handle_[fasteoi|level]_irq).
1354 *
1355 * The new action gets the first zero bit of
1356 * thread_mask assigned. See the loop above which or's
1357 * all existing action->thread_mask bits.
1358 */
ffc661c9 1359 new->thread_mask = 1UL << ffz(thread_mask);
1c6c6952 1360
dc9b229a
TG
1361 } else if (new->handler == irq_default_primary_handler &&
1362 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1c6c6952
TG
1363 /*
1364 * The interrupt was requested with handler = NULL, so
1365 * we use the default primary handler for it. But it
1366 * does not have the oneshot flag set. In combination
1367 * with level interrupts this is deadly, because the
1368 * default primary handler just wakes the thread, then
1369 * the irq lines is reenabled, but the device still
1370 * has the level irq asserted. Rinse and repeat....
1371 *
1372 * While this works for edge type interrupts, we play
1373 * it safe and reject unconditionally because we can't
1374 * say for sure which type this interrupt really
1375 * has. The type flags are unreliable as the
1376 * underlying chip implementation can override them.
1377 */
97fd75b7 1378 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1c6c6952
TG
1379 irq);
1380 ret = -EINVAL;
cba4235e 1381 goto out_unlock;
b5faba21 1382 }
b5faba21 1383
1da177e4 1384 if (!shared) {
3aa551c9
TG
1385 init_waitqueue_head(&desc->wait_for_threads);
1386
e76de9f8 1387 /* Setup the type (level, edge polarity) if configured: */
3cca53b0 1388 if (new->flags & IRQF_TRIGGER_MASK) {
a1ff541a
JL
1389 ret = __irq_set_trigger(desc,
1390 new->flags & IRQF_TRIGGER_MASK);
82736f4d 1391
19d39a38 1392 if (ret)
cba4235e 1393 goto out_unlock;
091738a2 1394 }
6a6de9ef 1395
c942cee4
TG
1396 /*
1397 * Activate the interrupt. That activation must happen
1398 * independently of IRQ_NOAUTOEN. request_irq() can fail
1399 * and the callers are supposed to handle
1400 * that. enable_irq() of an interrupt requested with
1401 * IRQ_NOAUTOEN is not supposed to fail. The activation
1402 * keeps it in shutdown mode, it merily associates
1403 * resources if necessary and if that's not possible it
1404 * fails. Interrupts which are in managed shutdown mode
1405 * will simply ignore that activation request.
1406 */
1407 ret = irq_activate(desc);
1408 if (ret)
1409 goto out_unlock;
1410
009b4c3b 1411 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
32f4125e
TG
1412 IRQS_ONESHOT | IRQS_WAITING);
1413 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
94d39e1f 1414
a005677b
TG
1415 if (new->flags & IRQF_PERCPU) {
1416 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1417 irq_settings_set_per_cpu(desc);
1418 }
6a58fb3b 1419
b25c340c 1420 if (new->flags & IRQF_ONESHOT)
3d67baec 1421 desc->istate |= IRQS_ONESHOT;
b25c340c 1422
2e051552
TG
1423 /* Exclude IRQ from balancing if requested */
1424 if (new->flags & IRQF_NOBALANCING) {
1425 irq_settings_set_no_balancing(desc);
1426 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1427 }
1428
04c848d3 1429 if (irq_settings_can_autoenable(desc)) {
4cde9c6b 1430 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
04c848d3
TG
1431 } else {
1432 /*
1433 * Shared interrupts do not go well with disabling
1434 * auto enable. The sharing interrupt might request
1435 * it while it's still disabled and then wait for
1436 * interrupts forever.
1437 */
1438 WARN_ON_ONCE(new->flags & IRQF_SHARED);
e76de9f8
TG
1439 /* Undo nested disables: */
1440 desc->depth = 1;
04c848d3 1441 }
18404756 1442
876dbd4c
TG
1443 } else if (new->flags & IRQF_TRIGGER_MASK) {
1444 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
7ee7e87d 1445 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
876dbd4c
TG
1446
1447 if (nmsk != omsk)
1448 /* hope the handler works with current trigger mode */
a395d6a7 1449 pr_warn("irq %d uses trigger mode %u; requested %u\n",
7ee7e87d 1450 irq, omsk, nmsk);
1da177e4 1451 }
82736f4d 1452
f17c7545 1453 *old_ptr = new;
82736f4d 1454
cab303be
TG
1455 irq_pm_install_action(desc, new);
1456
8528b0f1
LT
1457 /* Reset broken irq detection when installing new handler */
1458 desc->irq_count = 0;
1459 desc->irqs_unhandled = 0;
1adb0850
TG
1460
1461 /*
1462 * Check whether we disabled the irq via the spurious handler
1463 * before. Reenable it and give it another chance.
1464 */
7acdd53e
TG
1465 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1466 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
79ff1cda 1467 __enable_irq(desc);
1adb0850
TG
1468 }
1469
239007b8 1470 raw_spin_unlock_irqrestore(&desc->lock, flags);
3a90795e 1471 chip_bus_sync_unlock(desc);
9114014c 1472 mutex_unlock(&desc->request_mutex);
1da177e4 1473
b2d3d61a
DL
1474 irq_setup_timings(desc, new);
1475
69ab8494
TG
1476 /*
1477 * Strictly no need to wake it up, but hung_task complains
1478 * when no hard interrupt wakes the thread up.
1479 */
1480 if (new->thread)
1481 wake_up_process(new->thread);
2a1d3ab8
TG
1482 if (new->secondary)
1483 wake_up_process(new->secondary->thread);
69ab8494 1484
2c6927a3 1485 register_irq_proc(irq, desc);
1da177e4
LT
1486 new->dir = NULL;
1487 register_handler_proc(irq, new);
1da177e4 1488 return 0;
f5163427
DS
1489
1490mismatch:
3cca53b0 1491 if (!(new->flags & IRQF_PROBE_SHARED)) {
97fd75b7 1492 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
f5d89470
TG
1493 irq, new->flags, new->name, old->flags, old->name);
1494#ifdef CONFIG_DEBUG_SHIRQ
13e87ec6 1495 dump_stack();
3f050447 1496#endif
f5d89470 1497 }
3aa551c9
TG
1498 ret = -EBUSY;
1499
cba4235e 1500out_unlock:
1c389795 1501 raw_spin_unlock_irqrestore(&desc->lock, flags);
3b8249e7 1502
46e48e25
TG
1503 if (!desc->action)
1504 irq_release_resources(desc);
19d39a38
TG
1505out_bus_unlock:
1506 chip_bus_sync_unlock(desc);
9114014c
TG
1507 mutex_unlock(&desc->request_mutex);
1508
3aa551c9 1509out_thread:
3aa551c9
TG
1510 if (new->thread) {
1511 struct task_struct *t = new->thread;
1512
1513 new->thread = NULL;
05d74efa 1514 kthread_stop(t);
3aa551c9
TG
1515 put_task_struct(t);
1516 }
2a1d3ab8
TG
1517 if (new->secondary && new->secondary->thread) {
1518 struct task_struct *t = new->secondary->thread;
1519
1520 new->secondary->thread = NULL;
1521 kthread_stop(t);
1522 put_task_struct(t);
1523 }
b6873807
SAS
1524out_mput:
1525 module_put(desc->owner);
3aa551c9 1526 return ret;
1da177e4
LT
1527}
1528
d3c60047
TG
1529/**
1530 * setup_irq - setup an interrupt
1531 * @irq: Interrupt line to setup
1532 * @act: irqaction for the interrupt
1533 *
1534 * Used to statically setup interrupts in the early boot process.
1535 */
1536int setup_irq(unsigned int irq, struct irqaction *act)
1537{
986c011d 1538 int retval;
d3c60047
TG
1539 struct irq_desc *desc = irq_to_desc(irq);
1540
9b5d585d 1541 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
31d9d9b6 1542 return -EINVAL;
be45beb2
JH
1543
1544 retval = irq_chip_pm_get(&desc->irq_data);
1545 if (retval < 0)
1546 return retval;
1547
986c011d 1548 retval = __setup_irq(irq, desc, act);
986c011d 1549
be45beb2
JH
1550 if (retval)
1551 irq_chip_pm_put(&desc->irq_data);
1552
986c011d 1553 return retval;
d3c60047 1554}
eb53b4e8 1555EXPORT_SYMBOL_GPL(setup_irq);
d3c60047 1556
31d9d9b6 1557/*
cbf94f06
MD
1558 * Internal function to unregister an irqaction - used to free
1559 * regular and special interrupts that are part of the architecture.
1da177e4 1560 */
cbf94f06 1561static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1da177e4 1562{
d3c60047 1563 struct irq_desc *desc = irq_to_desc(irq);
f17c7545 1564 struct irqaction *action, **action_ptr;
1da177e4
LT
1565 unsigned long flags;
1566
ae88a23b 1567 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
7d94f7ca 1568
7d94f7ca 1569 if (!desc)
f21cfb25 1570 return NULL;
1da177e4 1571
9114014c 1572 mutex_lock(&desc->request_mutex);
abc7e40c 1573 chip_bus_lock(desc);
239007b8 1574 raw_spin_lock_irqsave(&desc->lock, flags);
ae88a23b
IM
1575
1576 /*
1577 * There can be multiple actions per IRQ descriptor, find the right
1578 * one based on the dev_id:
1579 */
f17c7545 1580 action_ptr = &desc->action;
1da177e4 1581 for (;;) {
f17c7545 1582 action = *action_ptr;
1da177e4 1583
ae88a23b
IM
1584 if (!action) {
1585 WARN(1, "Trying to free already-free IRQ %d\n", irq);
239007b8 1586 raw_spin_unlock_irqrestore(&desc->lock, flags);
abc7e40c 1587 chip_bus_sync_unlock(desc);
19d39a38 1588 mutex_unlock(&desc->request_mutex);
f21cfb25 1589 return NULL;
ae88a23b 1590 }
1da177e4 1591
8316e381
IM
1592 if (action->dev_id == dev_id)
1593 break;
f17c7545 1594 action_ptr = &action->next;
ae88a23b 1595 }
dbce706e 1596
ae88a23b 1597 /* Found it - now remove it from the list of entries: */
f17c7545 1598 *action_ptr = action->next;
ae88a23b 1599
cab303be
TG
1600 irq_pm_remove_action(desc, action);
1601
ae88a23b 1602 /* If this was the last handler, shut down the IRQ line: */
c1bacbae 1603 if (!desc->action) {
e9849777 1604 irq_settings_clr_disable_unlazy(desc);
46999238 1605 irq_shutdown(desc);
c1bacbae 1606 }
3aa551c9 1607
e7a297b0
PWJ
1608#ifdef CONFIG_SMP
1609 /* make sure affinity_hint is cleaned up */
1610 if (WARN_ON_ONCE(desc->affinity_hint))
1611 desc->affinity_hint = NULL;
1612#endif
1613
239007b8 1614 raw_spin_unlock_irqrestore(&desc->lock, flags);
19d39a38
TG
1615 /*
1616 * Drop bus_lock here so the changes which were done in the chip
1617 * callbacks above are synced out to the irq chips which hang
1618 * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1619 *
1620 * Aside of that the bus_lock can also be taken from the threaded
1621 * handler in irq_finalize_oneshot() which results in a deadlock
1622 * because synchronize_irq() would wait forever for the thread to
1623 * complete, which is blocked on the bus lock.
1624 *
1625 * The still held desc->request_mutex() protects against a
1626 * concurrent request_irq() of this irq so the release of resources
1627 * and timing data is properly serialized.
1628 */
abc7e40c 1629 chip_bus_sync_unlock(desc);
ae88a23b
IM
1630
1631 unregister_handler_proc(irq, action);
1632
1633 /* Make sure it's not being used on another CPU: */
1634 synchronize_irq(irq);
1da177e4 1635
70edcd77 1636#ifdef CONFIG_DEBUG_SHIRQ
ae88a23b
IM
1637 /*
1638 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1639 * event to happen even now it's being freed, so let's make sure that
1640 * is so by doing an extra call to the handler ....
1641 *
1642 * ( We do this after actually deregistering it, to make sure that a
1643 * 'real' IRQ doesn't run in * parallel with our fake. )
1644 */
1645 if (action->flags & IRQF_SHARED) {
1646 local_irq_save(flags);
1647 action->handler(irq, dev_id);
1648 local_irq_restore(flags);
1da177e4 1649 }
ae88a23b 1650#endif
2d860ad7
LT
1651
1652 if (action->thread) {
05d74efa 1653 kthread_stop(action->thread);
2d860ad7 1654 put_task_struct(action->thread);
2a1d3ab8
TG
1655 if (action->secondary && action->secondary->thread) {
1656 kthread_stop(action->secondary->thread);
1657 put_task_struct(action->secondary->thread);
1658 }
2d860ad7
LT
1659 }
1660
19d39a38 1661 /* Last action releases resources */
2343877f 1662 if (!desc->action) {
19d39a38
TG
1663 /*
1664 * Reaquire bus lock as irq_release_resources() might
1665 * require it to deallocate resources over the slow bus.
1666 */
1667 chip_bus_lock(desc);
46e48e25 1668 irq_release_resources(desc);
19d39a38 1669 chip_bus_sync_unlock(desc);
2343877f
TG
1670 irq_remove_timings(desc);
1671 }
46e48e25 1672
9114014c
TG
1673 mutex_unlock(&desc->request_mutex);
1674
be45beb2 1675 irq_chip_pm_put(&desc->irq_data);
b6873807 1676 module_put(desc->owner);
2a1d3ab8 1677 kfree(action->secondary);
f21cfb25
MD
1678 return action;
1679}
1680
cbf94f06
MD
1681/**
1682 * remove_irq - free an interrupt
1683 * @irq: Interrupt line to free
1684 * @act: irqaction for the interrupt
1685 *
1686 * Used to remove interrupts statically setup by the early boot process.
1687 */
1688void remove_irq(unsigned int irq, struct irqaction *act)
1689{
31d9d9b6
MZ
1690 struct irq_desc *desc = irq_to_desc(irq);
1691
1692 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
a7e60e55 1693 __free_irq(irq, act->dev_id);
cbf94f06 1694}
eb53b4e8 1695EXPORT_SYMBOL_GPL(remove_irq);
cbf94f06 1696
f21cfb25
MD
1697/**
1698 * free_irq - free an interrupt allocated with request_irq
1699 * @irq: Interrupt line to free
1700 * @dev_id: Device identity to free
1701 *
1702 * Remove an interrupt handler. The handler is removed and if the
1703 * interrupt line is no longer in use by any driver it is disabled.
1704 * On a shared IRQ the caller must ensure the interrupt is disabled
1705 * on the card it drives before calling this function. The function
1706 * does not return until any executing interrupts for this IRQ
1707 * have completed.
1708 *
1709 * This function must not be called from interrupt context.
25ce4be7
CH
1710 *
1711 * Returns the devname argument passed to request_irq.
f21cfb25 1712 */
25ce4be7 1713const void *free_irq(unsigned int irq, void *dev_id)
f21cfb25 1714{
70aedd24 1715 struct irq_desc *desc = irq_to_desc(irq);
25ce4be7
CH
1716 struct irqaction *action;
1717 const char *devname;
70aedd24 1718
31d9d9b6 1719 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
25ce4be7 1720 return NULL;
70aedd24 1721
cd7eab44
BH
1722#ifdef CONFIG_SMP
1723 if (WARN_ON(desc->affinity_notify))
1724 desc->affinity_notify = NULL;
1725#endif
1726
25ce4be7 1727 action = __free_irq(irq, dev_id);
2827a418
AM
1728
1729 if (!action)
1730 return NULL;
1731
25ce4be7
CH
1732 devname = action->name;
1733 kfree(action);
1734 return devname;
1da177e4 1735}
1da177e4
LT
1736EXPORT_SYMBOL(free_irq);
1737
1738/**
3aa551c9 1739 * request_threaded_irq - allocate an interrupt line
1da177e4 1740 * @irq: Interrupt line to allocate
3aa551c9
TG
1741 * @handler: Function to be called when the IRQ occurs.
1742 * Primary handler for threaded interrupts
b25c340c
TG
1743 * If NULL and thread_fn != NULL the default
1744 * primary handler is installed
f48fe81e
TG
1745 * @thread_fn: Function called from the irq handler thread
1746 * If NULL, no irq thread is created
1da177e4
LT
1747 * @irqflags: Interrupt type flags
1748 * @devname: An ascii name for the claiming device
1749 * @dev_id: A cookie passed back to the handler function
1750 *
1751 * This call allocates interrupt resources and enables the
1752 * interrupt line and IRQ handling. From the point this
1753 * call is made your handler function may be invoked. Since
1754 * your handler function must clear any interrupt the board
1755 * raises, you must take care both to initialise your hardware
1756 * and to set up the interrupt handler in the right order.
1757 *
3aa551c9 1758 * If you want to set up a threaded irq handler for your device
6d21af4f 1759 * then you need to supply @handler and @thread_fn. @handler is
3aa551c9
TG
1760 * still called in hard interrupt context and has to check
1761 * whether the interrupt originates from the device. If yes it
1762 * needs to disable the interrupt on the device and return
39a2eddb 1763 * IRQ_WAKE_THREAD which will wake up the handler thread and run
3aa551c9
TG
1764 * @thread_fn. This split handler design is necessary to support
1765 * shared interrupts.
1766 *
1da177e4
LT
1767 * Dev_id must be globally unique. Normally the address of the
1768 * device data structure is used as the cookie. Since the handler
1769 * receives this value it makes sense to use it.
1770 *
1771 * If your interrupt is shared you must pass a non NULL dev_id
1772 * as this is required when freeing the interrupt.
1773 *
1774 * Flags:
1775 *
3cca53b0 1776 * IRQF_SHARED Interrupt is shared
0c5d1eb7 1777 * IRQF_TRIGGER_* Specify active edge(s) or level
1da177e4
LT
1778 *
1779 */
3aa551c9
TG
1780int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1781 irq_handler_t thread_fn, unsigned long irqflags,
1782 const char *devname, void *dev_id)
1da177e4 1783{
06fcb0c6 1784 struct irqaction *action;
08678b08 1785 struct irq_desc *desc;
d3c60047 1786 int retval;
1da177e4 1787
e237a551
CF
1788 if (irq == IRQ_NOTCONNECTED)
1789 return -ENOTCONN;
1790
1da177e4
LT
1791 /*
1792 * Sanity-check: shared interrupts must pass in a real dev-ID,
1793 * otherwise we'll have trouble later trying to figure out
1794 * which interrupt is which (messes up the interrupt freeing
1795 * logic etc).
17f48034
RW
1796 *
1797 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1798 * it cannot be set along with IRQF_NO_SUSPEND.
1da177e4 1799 */
17f48034
RW
1800 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1801 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1802 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1da177e4 1803 return -EINVAL;
7d94f7ca 1804
cb5bc832 1805 desc = irq_to_desc(irq);
7d94f7ca 1806 if (!desc)
1da177e4 1807 return -EINVAL;
7d94f7ca 1808
31d9d9b6
MZ
1809 if (!irq_settings_can_request(desc) ||
1810 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
6550c775 1811 return -EINVAL;
b25c340c
TG
1812
1813 if (!handler) {
1814 if (!thread_fn)
1815 return -EINVAL;
1816 handler = irq_default_primary_handler;
1817 }
1da177e4 1818
45535732 1819 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1da177e4
LT
1820 if (!action)
1821 return -ENOMEM;
1822
1823 action->handler = handler;
3aa551c9 1824 action->thread_fn = thread_fn;
1da177e4 1825 action->flags = irqflags;
1da177e4 1826 action->name = devname;
1da177e4
LT
1827 action->dev_id = dev_id;
1828
be45beb2 1829 retval = irq_chip_pm_get(&desc->irq_data);
4396f46c
SL
1830 if (retval < 0) {
1831 kfree(action);
be45beb2 1832 return retval;
4396f46c 1833 }
be45beb2 1834
d3c60047 1835 retval = __setup_irq(irq, desc, action);
70aedd24 1836
2a1d3ab8 1837 if (retval) {
be45beb2 1838 irq_chip_pm_put(&desc->irq_data);
2a1d3ab8 1839 kfree(action->secondary);
377bf1e4 1840 kfree(action);
2a1d3ab8 1841 }
377bf1e4 1842
6d83f94d 1843#ifdef CONFIG_DEBUG_SHIRQ_FIXME
6ce51c43 1844 if (!retval && (irqflags & IRQF_SHARED)) {
a304e1b8
DW
1845 /*
1846 * It's a shared IRQ -- the driver ought to be prepared for it
1847 * to happen immediately, so let's make sure....
377bf1e4
AV
1848 * We disable the irq to make sure that a 'real' IRQ doesn't
1849 * run in parallel with our fake.
a304e1b8 1850 */
59845b1f 1851 unsigned long flags;
a304e1b8 1852
377bf1e4 1853 disable_irq(irq);
59845b1f 1854 local_irq_save(flags);
377bf1e4 1855
59845b1f 1856 handler(irq, dev_id);
377bf1e4 1857
59845b1f 1858 local_irq_restore(flags);
377bf1e4 1859 enable_irq(irq);
a304e1b8
DW
1860 }
1861#endif
1da177e4
LT
1862 return retval;
1863}
3aa551c9 1864EXPORT_SYMBOL(request_threaded_irq);
ae731f8d
MZ
1865
1866/**
1867 * request_any_context_irq - allocate an interrupt line
1868 * @irq: Interrupt line to allocate
1869 * @handler: Function to be called when the IRQ occurs.
1870 * Threaded handler for threaded interrupts.
1871 * @flags: Interrupt type flags
1872 * @name: An ascii name for the claiming device
1873 * @dev_id: A cookie passed back to the handler function
1874 *
1875 * This call allocates interrupt resources and enables the
1876 * interrupt line and IRQ handling. It selects either a
1877 * hardirq or threaded handling method depending on the
1878 * context.
1879 *
1880 * On failure, it returns a negative value. On success,
1881 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1882 */
1883int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1884 unsigned long flags, const char *name, void *dev_id)
1885{
e237a551 1886 struct irq_desc *desc;
ae731f8d
MZ
1887 int ret;
1888
e237a551
CF
1889 if (irq == IRQ_NOTCONNECTED)
1890 return -ENOTCONN;
1891
1892 desc = irq_to_desc(irq);
ae731f8d
MZ
1893 if (!desc)
1894 return -EINVAL;
1895
1ccb4e61 1896 if (irq_settings_is_nested_thread(desc)) {
ae731f8d
MZ
1897 ret = request_threaded_irq(irq, NULL, handler,
1898 flags, name, dev_id);
1899 return !ret ? IRQC_IS_NESTED : ret;
1900 }
1901
1902 ret = request_irq(irq, handler, flags, name, dev_id);
1903 return !ret ? IRQC_IS_HARDIRQ : ret;
1904}
1905EXPORT_SYMBOL_GPL(request_any_context_irq);
31d9d9b6 1906
1e7c5fd2 1907void enable_percpu_irq(unsigned int irq, unsigned int type)
31d9d9b6
MZ
1908{
1909 unsigned int cpu = smp_processor_id();
1910 unsigned long flags;
1911 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1912
1913 if (!desc)
1914 return;
1915
f35ad083
MZ
1916 /*
1917 * If the trigger type is not specified by the caller, then
1918 * use the default for this interrupt.
1919 */
1e7c5fd2 1920 type &= IRQ_TYPE_SENSE_MASK;
f35ad083
MZ
1921 if (type == IRQ_TYPE_NONE)
1922 type = irqd_get_trigger_type(&desc->irq_data);
1923
1e7c5fd2
MZ
1924 if (type != IRQ_TYPE_NONE) {
1925 int ret;
1926
a1ff541a 1927 ret = __irq_set_trigger(desc, type);
1e7c5fd2
MZ
1928
1929 if (ret) {
32cffdde 1930 WARN(1, "failed to set type for IRQ%d\n", irq);
1e7c5fd2
MZ
1931 goto out;
1932 }
1933 }
1934
31d9d9b6 1935 irq_percpu_enable(desc, cpu);
1e7c5fd2 1936out:
31d9d9b6
MZ
1937 irq_put_desc_unlock(desc, flags);
1938}
36a5df85 1939EXPORT_SYMBOL_GPL(enable_percpu_irq);
31d9d9b6 1940
f0cb3220
TP
1941/**
1942 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1943 * @irq: Linux irq number to check for
1944 *
1945 * Must be called from a non migratable context. Returns the enable
1946 * state of a per cpu interrupt on the current cpu.
1947 */
1948bool irq_percpu_is_enabled(unsigned int irq)
1949{
1950 unsigned int cpu = smp_processor_id();
1951 struct irq_desc *desc;
1952 unsigned long flags;
1953 bool is_enabled;
1954
1955 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1956 if (!desc)
1957 return false;
1958
1959 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1960 irq_put_desc_unlock(desc, flags);
1961
1962 return is_enabled;
1963}
1964EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1965
31d9d9b6
MZ
1966void disable_percpu_irq(unsigned int irq)
1967{
1968 unsigned int cpu = smp_processor_id();
1969 unsigned long flags;
1970 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1971
1972 if (!desc)
1973 return;
1974
1975 irq_percpu_disable(desc, cpu);
1976 irq_put_desc_unlock(desc, flags);
1977}
36a5df85 1978EXPORT_SYMBOL_GPL(disable_percpu_irq);
31d9d9b6
MZ
1979
1980/*
1981 * Internal function to unregister a percpu irqaction.
1982 */
1983static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1984{
1985 struct irq_desc *desc = irq_to_desc(irq);
1986 struct irqaction *action;
1987 unsigned long flags;
1988
1989 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1990
1991 if (!desc)
1992 return NULL;
1993
1994 raw_spin_lock_irqsave(&desc->lock, flags);
1995
1996 action = desc->action;
1997 if (!action || action->percpu_dev_id != dev_id) {
1998 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1999 goto bad;
2000 }
2001
2002 if (!cpumask_empty(desc->percpu_enabled)) {
2003 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2004 irq, cpumask_first(desc->percpu_enabled));
2005 goto bad;
2006 }
2007
2008 /* Found it - now remove it from the list of entries: */
2009 desc->action = NULL;
2010
2011 raw_spin_unlock_irqrestore(&desc->lock, flags);
2012
2013 unregister_handler_proc(irq, action);
2014
be45beb2 2015 irq_chip_pm_put(&desc->irq_data);
31d9d9b6
MZ
2016 module_put(desc->owner);
2017 return action;
2018
2019bad:
2020 raw_spin_unlock_irqrestore(&desc->lock, flags);
2021 return NULL;
2022}
2023
2024/**
2025 * remove_percpu_irq - free a per-cpu interrupt
2026 * @irq: Interrupt line to free
2027 * @act: irqaction for the interrupt
2028 *
2029 * Used to remove interrupts statically setup by the early boot process.
2030 */
2031void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2032{
2033 struct irq_desc *desc = irq_to_desc(irq);
2034
2035 if (desc && irq_settings_is_per_cpu_devid(desc))
2036 __free_percpu_irq(irq, act->percpu_dev_id);
2037}
2038
2039/**
2040 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2041 * @irq: Interrupt line to free
2042 * @dev_id: Device identity to free
2043 *
2044 * Remove a percpu interrupt handler. The handler is removed, but
2045 * the interrupt line is not disabled. This must be done on each
2046 * CPU before calling this function. The function does not return
2047 * until any executing interrupts for this IRQ have completed.
2048 *
2049 * This function must not be called from interrupt context.
2050 */
2051void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2052{
2053 struct irq_desc *desc = irq_to_desc(irq);
2054
2055 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2056 return;
2057
2058 chip_bus_lock(desc);
2059 kfree(__free_percpu_irq(irq, dev_id));
2060 chip_bus_sync_unlock(desc);
2061}
aec2e2ad 2062EXPORT_SYMBOL_GPL(free_percpu_irq);
31d9d9b6
MZ
2063
2064/**
2065 * setup_percpu_irq - setup a per-cpu interrupt
2066 * @irq: Interrupt line to setup
2067 * @act: irqaction for the interrupt
2068 *
2069 * Used to statically setup per-cpu interrupts in the early boot process.
2070 */
2071int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2072{
2073 struct irq_desc *desc = irq_to_desc(irq);
2074 int retval;
2075
2076 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2077 return -EINVAL;
be45beb2
JH
2078
2079 retval = irq_chip_pm_get(&desc->irq_data);
2080 if (retval < 0)
2081 return retval;
2082
31d9d9b6 2083 retval = __setup_irq(irq, desc, act);
31d9d9b6 2084
be45beb2
JH
2085 if (retval)
2086 irq_chip_pm_put(&desc->irq_data);
2087
31d9d9b6
MZ
2088 return retval;
2089}
2090
2091/**
c80081b9 2092 * __request_percpu_irq - allocate a percpu interrupt line
31d9d9b6
MZ
2093 * @irq: Interrupt line to allocate
2094 * @handler: Function to be called when the IRQ occurs.
c80081b9 2095 * @flags: Interrupt type flags (IRQF_TIMER only)
31d9d9b6
MZ
2096 * @devname: An ascii name for the claiming device
2097 * @dev_id: A percpu cookie passed back to the handler function
2098 *
a1b7febd
MR
2099 * This call allocates interrupt resources and enables the
2100 * interrupt on the local CPU. If the interrupt is supposed to be
2101 * enabled on other CPUs, it has to be done on each CPU using
2102 * enable_percpu_irq().
31d9d9b6
MZ
2103 *
2104 * Dev_id must be globally unique. It is a per-cpu variable, and
2105 * the handler gets called with the interrupted CPU's instance of
2106 * that variable.
2107 */
c80081b9
DL
2108int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2109 unsigned long flags, const char *devname,
2110 void __percpu *dev_id)
31d9d9b6
MZ
2111{
2112 struct irqaction *action;
2113 struct irq_desc *desc;
2114 int retval;
2115
2116 if (!dev_id)
2117 return -EINVAL;
2118
2119 desc = irq_to_desc(irq);
2120 if (!desc || !irq_settings_can_request(desc) ||
2121 !irq_settings_is_per_cpu_devid(desc))
2122 return -EINVAL;
2123
c80081b9
DL
2124 if (flags && flags != IRQF_TIMER)
2125 return -EINVAL;
2126
31d9d9b6
MZ
2127 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2128 if (!action)
2129 return -ENOMEM;
2130
2131 action->handler = handler;
c80081b9 2132 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
31d9d9b6
MZ
2133 action->name = devname;
2134 action->percpu_dev_id = dev_id;
2135
be45beb2 2136 retval = irq_chip_pm_get(&desc->irq_data);
4396f46c
SL
2137 if (retval < 0) {
2138 kfree(action);
be45beb2 2139 return retval;
4396f46c 2140 }
be45beb2 2141
31d9d9b6 2142 retval = __setup_irq(irq, desc, action);
31d9d9b6 2143
be45beb2
JH
2144 if (retval) {
2145 irq_chip_pm_put(&desc->irq_data);
31d9d9b6 2146 kfree(action);
be45beb2 2147 }
31d9d9b6
MZ
2148
2149 return retval;
2150}
c80081b9 2151EXPORT_SYMBOL_GPL(__request_percpu_irq);
1b7047ed
MZ
2152
2153/**
2154 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2155 * @irq: Interrupt line that is forwarded to a VM
2156 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2157 * @state: a pointer to a boolean where the state is to be storeed
2158 *
2159 * This call snapshots the internal irqchip state of an
2160 * interrupt, returning into @state the bit corresponding to
2161 * stage @which
2162 *
2163 * This function should be called with preemption disabled if the
2164 * interrupt controller has per-cpu registers.
2165 */
2166int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2167 bool *state)
2168{
2169 struct irq_desc *desc;
2170 struct irq_data *data;
2171 struct irq_chip *chip;
2172 unsigned long flags;
2173 int err = -EINVAL;
2174
2175 desc = irq_get_desc_buslock(irq, &flags, 0);
2176 if (!desc)
2177 return err;
2178
2179 data = irq_desc_get_irq_data(desc);
2180
2181 do {
2182 chip = irq_data_get_irq_chip(data);
2183 if (chip->irq_get_irqchip_state)
2184 break;
2185#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2186 data = data->parent_data;
2187#else
2188 data = NULL;
2189#endif
2190 } while (data);
2191
2192 if (data)
2193 err = chip->irq_get_irqchip_state(data, which, state);
2194
2195 irq_put_desc_busunlock(desc, flags);
2196 return err;
2197}
1ee4fb3e 2198EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
1b7047ed
MZ
2199
2200/**
2201 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2202 * @irq: Interrupt line that is forwarded to a VM
2203 * @which: State to be restored (one of IRQCHIP_STATE_*)
2204 * @val: Value corresponding to @which
2205 *
2206 * This call sets the internal irqchip state of an interrupt,
2207 * depending on the value of @which.
2208 *
2209 * This function should be called with preemption disabled if the
2210 * interrupt controller has per-cpu registers.
2211 */
2212int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2213 bool val)
2214{
2215 struct irq_desc *desc;
2216 struct irq_data *data;
2217 struct irq_chip *chip;
2218 unsigned long flags;
2219 int err = -EINVAL;
2220
2221 desc = irq_get_desc_buslock(irq, &flags, 0);
2222 if (!desc)
2223 return err;
2224
2225 data = irq_desc_get_irq_data(desc);
2226
2227 do {
2228 chip = irq_data_get_irq_chip(data);
2229 if (chip->irq_set_irqchip_state)
2230 break;
2231#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2232 data = data->parent_data;
2233#else
2234 data = NULL;
2235#endif
2236 } while (data);
2237
2238 if (data)
2239 err = chip->irq_set_irqchip_state(data, which, val);
2240
2241 irq_put_desc_busunlock(desc, flags);
2242 return err;
2243}
1ee4fb3e 2244EXPORT_SYMBOL_GPL(irq_set_irqchip_state);