]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/irq/manage.c
Merge git://git.marvell.com/orion into devel
[mirror_ubuntu-zesty-kernel.git] / kernel / irq / manage.c
1 /*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10 #include <linux/irq.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15
16 #include "internals.h"
17
18 #ifdef CONFIG_SMP
19
20 cpumask_t irq_default_affinity = CPU_MASK_ALL;
21
22 /**
23 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
24 * @irq: interrupt number to wait for
25 *
26 * This function waits for any pending IRQ handlers for this interrupt
27 * to complete before returning. If you use this function while
28 * holding a resource the IRQ handler may need you will deadlock.
29 *
30 * This function may be called - with care - from IRQ context.
31 */
32 void synchronize_irq(unsigned int irq)
33 {
34 struct irq_desc *desc = irq_to_desc(irq);
35 unsigned int status;
36
37 if (!desc)
38 return;
39
40 do {
41 unsigned long flags;
42
43 /*
44 * Wait until we're out of the critical section. This might
45 * give the wrong answer due to the lack of memory barriers.
46 */
47 while (desc->status & IRQ_INPROGRESS)
48 cpu_relax();
49
50 /* Ok, that indicated we're done: double-check carefully. */
51 spin_lock_irqsave(&desc->lock, flags);
52 status = desc->status;
53 spin_unlock_irqrestore(&desc->lock, flags);
54
55 /* Oops, that failed? */
56 } while (status & IRQ_INPROGRESS);
57 }
58 EXPORT_SYMBOL(synchronize_irq);
59
60 /**
61 * irq_can_set_affinity - Check if the affinity of a given irq can be set
62 * @irq: Interrupt to check
63 *
64 */
65 int irq_can_set_affinity(unsigned int irq)
66 {
67 struct irq_desc *desc = irq_to_desc(irq);
68
69 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
70 !desc->chip->set_affinity)
71 return 0;
72
73 return 1;
74 }
75
76 /**
77 * irq_set_affinity - Set the irq affinity of a given irq
78 * @irq: Interrupt to set affinity
79 * @cpumask: cpumask
80 *
81 */
82 int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
83 {
84 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags;
86
87 if (!desc->chip->set_affinity)
88 return -EINVAL;
89
90 spin_lock_irqsave(&desc->lock, flags);
91
92 #ifdef CONFIG_GENERIC_PENDING_IRQ
93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
94 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask);
96 } else {
97 desc->status |= IRQ_MOVE_PENDING;
98 desc->pending_mask = cpumask;
99 }
100 #else
101 desc->affinity = cpumask;
102 desc->chip->set_affinity(irq, cpumask);
103 #endif
104 desc->status |= IRQ_AFFINITY_SET;
105 spin_unlock_irqrestore(&desc->lock, flags);
106 return 0;
107 }
108
109 #ifndef CONFIG_AUTO_IRQ_AFFINITY
110 /*
111 * Generic version of the affinity autoselector.
112 */
113 int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
114 {
115 cpumask_t mask;
116
117 if (!irq_can_set_affinity(irq))
118 return 0;
119
120 cpus_and(mask, cpu_online_map, irq_default_affinity);
121
122 /*
123 * Preserve an userspace affinity setup, but make sure that
124 * one of the targets is online.
125 */
126 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
127 if (cpus_intersects(desc->affinity, cpu_online_map))
128 mask = desc->affinity;
129 else
130 desc->status &= ~IRQ_AFFINITY_SET;
131 }
132
133 desc->affinity = mask;
134 desc->chip->set_affinity(irq, mask);
135
136 return 0;
137 }
138 #else
139 static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
140 {
141 return irq_select_affinity(irq);
142 }
143 #endif
144
145 /*
146 * Called when affinity is set via /proc/irq
147 */
148 int irq_select_affinity_usr(unsigned int irq)
149 {
150 struct irq_desc *desc = irq_to_desc(irq);
151 unsigned long flags;
152 int ret;
153
154 spin_lock_irqsave(&desc->lock, flags);
155 ret = do_irq_select_affinity(irq, desc);
156 spin_unlock_irqrestore(&desc->lock, flags);
157
158 return ret;
159 }
160
161 #else
162 static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
163 {
164 return 0;
165 }
166 #endif
167
168 /**
169 * disable_irq_nosync - disable an irq without waiting
170 * @irq: Interrupt to disable
171 *
172 * Disable the selected interrupt line. Disables and Enables are
173 * nested.
174 * Unlike disable_irq(), this function does not ensure existing
175 * instances of the IRQ handler have completed before returning.
176 *
177 * This function may be called from IRQ context.
178 */
179 void disable_irq_nosync(unsigned int irq)
180 {
181 struct irq_desc *desc = irq_to_desc(irq);
182 unsigned long flags;
183
184 if (!desc)
185 return;
186
187 spin_lock_irqsave(&desc->lock, flags);
188 if (!desc->depth++) {
189 desc->status |= IRQ_DISABLED;
190 desc->chip->disable(irq);
191 }
192 spin_unlock_irqrestore(&desc->lock, flags);
193 }
194 EXPORT_SYMBOL(disable_irq_nosync);
195
196 /**
197 * disable_irq - disable an irq and wait for completion
198 * @irq: Interrupt to disable
199 *
200 * Disable the selected interrupt line. Enables and Disables are
201 * nested.
202 * This function waits for any pending IRQ handlers for this interrupt
203 * to complete before returning. If you use this function while
204 * holding a resource the IRQ handler may need you will deadlock.
205 *
206 * This function may be called - with care - from IRQ context.
207 */
208 void disable_irq(unsigned int irq)
209 {
210 struct irq_desc *desc = irq_to_desc(irq);
211
212 if (!desc)
213 return;
214
215 disable_irq_nosync(irq);
216 if (desc->action)
217 synchronize_irq(irq);
218 }
219 EXPORT_SYMBOL(disable_irq);
220
221 static void __enable_irq(struct irq_desc *desc, unsigned int irq)
222 {
223 switch (desc->depth) {
224 case 0:
225 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
226 break;
227 case 1: {
228 unsigned int status = desc->status & ~IRQ_DISABLED;
229
230 /* Prevent probing on this irq: */
231 desc->status = status | IRQ_NOPROBE;
232 check_irq_resend(desc, irq);
233 /* fall-through */
234 }
235 default:
236 desc->depth--;
237 }
238 }
239
240 /**
241 * enable_irq - enable handling of an irq
242 * @irq: Interrupt to enable
243 *
244 * Undoes the effect of one call to disable_irq(). If this
245 * matches the last disable, processing of interrupts on this
246 * IRQ line is re-enabled.
247 *
248 * This function may be called from IRQ context.
249 */
250 void enable_irq(unsigned int irq)
251 {
252 struct irq_desc *desc = irq_to_desc(irq);
253 unsigned long flags;
254
255 if (!desc)
256 return;
257
258 spin_lock_irqsave(&desc->lock, flags);
259 __enable_irq(desc, irq);
260 spin_unlock_irqrestore(&desc->lock, flags);
261 }
262 EXPORT_SYMBOL(enable_irq);
263
264 static int set_irq_wake_real(unsigned int irq, unsigned int on)
265 {
266 struct irq_desc *desc = irq_to_desc(irq);
267 int ret = -ENXIO;
268
269 if (desc->chip->set_wake)
270 ret = desc->chip->set_wake(irq, on);
271
272 return ret;
273 }
274
275 /**
276 * set_irq_wake - control irq power management wakeup
277 * @irq: interrupt to control
278 * @on: enable/disable power management wakeup
279 *
280 * Enable/disable power management wakeup mode, which is
281 * disabled by default. Enables and disables must match,
282 * just as they match for non-wakeup mode support.
283 *
284 * Wakeup mode lets this IRQ wake the system from sleep
285 * states like "suspend to RAM".
286 */
287 int set_irq_wake(unsigned int irq, unsigned int on)
288 {
289 struct irq_desc *desc = irq_to_desc(irq);
290 unsigned long flags;
291 int ret = 0;
292
293 /* wakeup-capable irqs can be shared between drivers that
294 * don't need to have the same sleep mode behaviors.
295 */
296 spin_lock_irqsave(&desc->lock, flags);
297 if (on) {
298 if (desc->wake_depth++ == 0) {
299 ret = set_irq_wake_real(irq, on);
300 if (ret)
301 desc->wake_depth = 0;
302 else
303 desc->status |= IRQ_WAKEUP;
304 }
305 } else {
306 if (desc->wake_depth == 0) {
307 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
308 } else if (--desc->wake_depth == 0) {
309 ret = set_irq_wake_real(irq, on);
310 if (ret)
311 desc->wake_depth = 1;
312 else
313 desc->status &= ~IRQ_WAKEUP;
314 }
315 }
316
317 spin_unlock_irqrestore(&desc->lock, flags);
318 return ret;
319 }
320 EXPORT_SYMBOL(set_irq_wake);
321
322 /*
323 * Internal function that tells the architecture code whether a
324 * particular irq has been exclusively allocated or is available
325 * for driver use.
326 */
327 int can_request_irq(unsigned int irq, unsigned long irqflags)
328 {
329 struct irq_desc *desc = irq_to_desc(irq);
330 struct irqaction *action;
331
332 if (!desc)
333 return 0;
334
335 if (desc->status & IRQ_NOREQUEST)
336 return 0;
337
338 action = desc->action;
339 if (action)
340 if (irqflags & action->flags & IRQF_SHARED)
341 action = NULL;
342
343 return !action;
344 }
345
346 void compat_irq_chip_set_default_handler(struct irq_desc *desc)
347 {
348 /*
349 * If the architecture still has not overriden
350 * the flow handler then zap the default. This
351 * should catch incorrect flow-type setting.
352 */
353 if (desc->handle_irq == &handle_bad_irq)
354 desc->handle_irq = NULL;
355 }
356
357 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
358 unsigned long flags)
359 {
360 int ret;
361 struct irq_chip *chip = desc->chip;
362
363 if (!chip || !chip->set_type) {
364 /*
365 * IRQF_TRIGGER_* but the PIC does not support multiple
366 * flow-types?
367 */
368 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
369 chip ? (chip->name ? : "unknown") : "unknown");
370 return 0;
371 }
372
373 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
374
375 if (ret)
376 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
377 (int)(flags & IRQF_TRIGGER_MASK),
378 irq, chip->set_type);
379 else {
380 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
381 desc->status &= ~IRQ_TYPE_SENSE_MASK;
382 desc->status |= flags & IRQ_TYPE_SENSE_MASK;
383 }
384
385 return ret;
386 }
387
388 /*
389 * Internal function to register an irqaction - typically used to
390 * allocate special interrupts that are part of the architecture.
391 */
392 static int
393 __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
394 {
395 struct irqaction *old, **p;
396 const char *old_name = NULL;
397 unsigned long flags;
398 int shared = 0;
399 int ret;
400
401 if (!desc)
402 return -EINVAL;
403
404 if (desc->chip == &no_irq_chip)
405 return -ENOSYS;
406 /*
407 * Some drivers like serial.c use request_irq() heavily,
408 * so we have to be careful not to interfere with a
409 * running system.
410 */
411 if (new->flags & IRQF_SAMPLE_RANDOM) {
412 /*
413 * This function might sleep, we want to call it first,
414 * outside of the atomic block.
415 * Yes, this might clear the entropy pool if the wrong
416 * driver is attempted to be loaded, without actually
417 * installing a new handler, but is this really a problem,
418 * only the sysadmin is able to do this.
419 */
420 rand_initialize_irq(irq);
421 }
422
423 /*
424 * The following block of code has to be executed atomically
425 */
426 spin_lock_irqsave(&desc->lock, flags);
427 p = &desc->action;
428 old = *p;
429 if (old) {
430 /*
431 * Can't share interrupts unless both agree to and are
432 * the same type (level, edge, polarity). So both flag
433 * fields must have IRQF_SHARED set and the bits which
434 * set the trigger type must match.
435 */
436 if (!((old->flags & new->flags) & IRQF_SHARED) ||
437 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
438 old_name = old->name;
439 goto mismatch;
440 }
441
442 #if defined(CONFIG_IRQ_PER_CPU)
443 /* All handlers must agree on per-cpuness */
444 if ((old->flags & IRQF_PERCPU) !=
445 (new->flags & IRQF_PERCPU))
446 goto mismatch;
447 #endif
448
449 /* add new interrupt at end of irq queue */
450 do {
451 p = &old->next;
452 old = *p;
453 } while (old);
454 shared = 1;
455 }
456
457 if (!shared) {
458 irq_chip_set_defaults(desc->chip);
459
460 /* Setup the type (level, edge polarity) if configured: */
461 if (new->flags & IRQF_TRIGGER_MASK) {
462 ret = __irq_set_trigger(desc, irq, new->flags);
463
464 if (ret) {
465 spin_unlock_irqrestore(&desc->lock, flags);
466 return ret;
467 }
468 } else
469 compat_irq_chip_set_default_handler(desc);
470 #if defined(CONFIG_IRQ_PER_CPU)
471 if (new->flags & IRQF_PERCPU)
472 desc->status |= IRQ_PER_CPU;
473 #endif
474
475 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
476 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
477
478 if (!(desc->status & IRQ_NOAUTOEN)) {
479 desc->depth = 0;
480 desc->status &= ~IRQ_DISABLED;
481 desc->chip->startup(irq);
482 } else
483 /* Undo nested disables: */
484 desc->depth = 1;
485
486 /* Exclude IRQ from balancing if requested */
487 if (new->flags & IRQF_NOBALANCING)
488 desc->status |= IRQ_NO_BALANCING;
489
490 /* Set default affinity mask once everything is setup */
491 do_irq_select_affinity(irq, desc);
492
493 } else if ((new->flags & IRQF_TRIGGER_MASK)
494 && (new->flags & IRQF_TRIGGER_MASK)
495 != (desc->status & IRQ_TYPE_SENSE_MASK)) {
496 /* hope the handler works with the actual trigger mode... */
497 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
498 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
499 (int)(new->flags & IRQF_TRIGGER_MASK));
500 }
501
502 *p = new;
503
504 /* Reset broken irq detection when installing new handler */
505 desc->irq_count = 0;
506 desc->irqs_unhandled = 0;
507
508 /*
509 * Check whether we disabled the irq via the spurious handler
510 * before. Reenable it and give it another chance.
511 */
512 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
513 desc->status &= ~IRQ_SPURIOUS_DISABLED;
514 __enable_irq(desc, irq);
515 }
516
517 spin_unlock_irqrestore(&desc->lock, flags);
518
519 new->irq = irq;
520 register_irq_proc(irq, desc);
521 new->dir = NULL;
522 register_handler_proc(irq, new);
523
524 return 0;
525
526 mismatch:
527 #ifdef CONFIG_DEBUG_SHIRQ
528 if (!(new->flags & IRQF_PROBE_SHARED)) {
529 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
530 if (old_name)
531 printk(KERN_ERR "current handler: %s\n", old_name);
532 dump_stack();
533 }
534 #endif
535 spin_unlock_irqrestore(&desc->lock, flags);
536 return -EBUSY;
537 }
538
539 /**
540 * setup_irq - setup an interrupt
541 * @irq: Interrupt line to setup
542 * @act: irqaction for the interrupt
543 *
544 * Used to statically setup interrupts in the early boot process.
545 */
546 int setup_irq(unsigned int irq, struct irqaction *act)
547 {
548 struct irq_desc *desc = irq_to_desc(irq);
549
550 return __setup_irq(irq, desc, act);
551 }
552
553 /**
554 * free_irq - free an interrupt
555 * @irq: Interrupt line to free
556 * @dev_id: Device identity to free
557 *
558 * Remove an interrupt handler. The handler is removed and if the
559 * interrupt line is no longer in use by any driver it is disabled.
560 * On a shared IRQ the caller must ensure the interrupt is disabled
561 * on the card it drives before calling this function. The function
562 * does not return until any executing interrupts for this IRQ
563 * have completed.
564 *
565 * This function must not be called from interrupt context.
566 */
567 void free_irq(unsigned int irq, void *dev_id)
568 {
569 struct irq_desc *desc = irq_to_desc(irq);
570 struct irqaction **p;
571 unsigned long flags;
572
573 WARN_ON(in_interrupt());
574
575 if (!desc)
576 return;
577
578 spin_lock_irqsave(&desc->lock, flags);
579 p = &desc->action;
580 for (;;) {
581 struct irqaction *action = *p;
582
583 if (action) {
584 struct irqaction **pp = p;
585
586 p = &action->next;
587 if (action->dev_id != dev_id)
588 continue;
589
590 /* Found it - now remove it from the list of entries */
591 *pp = action->next;
592
593 /* Currently used only by UML, might disappear one day.*/
594 #ifdef CONFIG_IRQ_RELEASE_METHOD
595 if (desc->chip->release)
596 desc->chip->release(irq, dev_id);
597 #endif
598
599 if (!desc->action) {
600 desc->status |= IRQ_DISABLED;
601 if (desc->chip->shutdown)
602 desc->chip->shutdown(irq);
603 else
604 desc->chip->disable(irq);
605 }
606 spin_unlock_irqrestore(&desc->lock, flags);
607 unregister_handler_proc(irq, action);
608
609 /* Make sure it's not being used on another CPU */
610 synchronize_irq(irq);
611 #ifdef CONFIG_DEBUG_SHIRQ
612 /*
613 * It's a shared IRQ -- the driver ought to be
614 * prepared for it to happen even now it's
615 * being freed, so let's make sure.... We do
616 * this after actually deregistering it, to
617 * make sure that a 'real' IRQ doesn't run in
618 * parallel with our fake
619 */
620 if (action->flags & IRQF_SHARED) {
621 local_irq_save(flags);
622 action->handler(irq, dev_id);
623 local_irq_restore(flags);
624 }
625 #endif
626 kfree(action);
627 return;
628 }
629 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
630 #ifdef CONFIG_DEBUG_SHIRQ
631 dump_stack();
632 #endif
633 spin_unlock_irqrestore(&desc->lock, flags);
634 return;
635 }
636 }
637 EXPORT_SYMBOL(free_irq);
638
639 /**
640 * request_irq - allocate an interrupt line
641 * @irq: Interrupt line to allocate
642 * @handler: Function to be called when the IRQ occurs
643 * @irqflags: Interrupt type flags
644 * @devname: An ascii name for the claiming device
645 * @dev_id: A cookie passed back to the handler function
646 *
647 * This call allocates interrupt resources and enables the
648 * interrupt line and IRQ handling. From the point this
649 * call is made your handler function may be invoked. Since
650 * your handler function must clear any interrupt the board
651 * raises, you must take care both to initialise your hardware
652 * and to set up the interrupt handler in the right order.
653 *
654 * Dev_id must be globally unique. Normally the address of the
655 * device data structure is used as the cookie. Since the handler
656 * receives this value it makes sense to use it.
657 *
658 * If your interrupt is shared you must pass a non NULL dev_id
659 * as this is required when freeing the interrupt.
660 *
661 * Flags:
662 *
663 * IRQF_SHARED Interrupt is shared
664 * IRQF_DISABLED Disable local interrupts while processing
665 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
666 * IRQF_TRIGGER_* Specify active edge(s) or level
667 *
668 */
669 int request_irq(unsigned int irq, irq_handler_t handler,
670 unsigned long irqflags, const char *devname, void *dev_id)
671 {
672 struct irqaction *action;
673 struct irq_desc *desc;
674 int retval;
675
676 #ifdef CONFIG_LOCKDEP
677 /*
678 * Lockdep wants atomic interrupt handlers:
679 */
680 irqflags |= IRQF_DISABLED;
681 #endif
682 /*
683 * Sanity-check: shared interrupts must pass in a real dev-ID,
684 * otherwise we'll have trouble later trying to figure out
685 * which interrupt is which (messes up the interrupt freeing
686 * logic etc).
687 */
688 if ((irqflags & IRQF_SHARED) && !dev_id)
689 return -EINVAL;
690
691 desc = irq_to_desc(irq);
692 if (!desc)
693 return -EINVAL;
694
695 if (desc->status & IRQ_NOREQUEST)
696 return -EINVAL;
697 if (!handler)
698 return -EINVAL;
699
700 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
701 if (!action)
702 return -ENOMEM;
703
704 action->handler = handler;
705 action->flags = irqflags;
706 cpus_clear(action->mask);
707 action->name = devname;
708 action->next = NULL;
709 action->dev_id = dev_id;
710
711 retval = __setup_irq(irq, desc, action);
712 if (retval)
713 kfree(action);
714
715 #ifdef CONFIG_DEBUG_SHIRQ
716 if (irqflags & IRQF_SHARED) {
717 /*
718 * It's a shared IRQ -- the driver ought to be prepared for it
719 * to happen immediately, so let's make sure....
720 * We disable the irq to make sure that a 'real' IRQ doesn't
721 * run in parallel with our fake.
722 */
723 unsigned long flags;
724
725 disable_irq(irq);
726 local_irq_save(flags);
727
728 handler(irq, dev_id);
729
730 local_irq_restore(flags);
731 enable_irq(irq);
732 }
733 #endif
734 return retval;
735 }
736 EXPORT_SYMBOL(request_irq);