]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/irq/migration.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / kernel / irq / migration.c
CommitLineData
c777ac55 1
d824e66a 2#include <linux/irq.h>
57b150cc
YL
3#include <linux/interrupt.h>
4
5#include "internals.h"
c777ac55 6
cdd16365
TG
7/**
8 * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
9 * @desc: Interrupt descpriptor to clean up
10 * @force_clear: If set clear the move pending bit unconditionally.
11 * If not set, clear it only when the dying CPU is the
12 * last one in the pending mask.
13 *
14 * Returns true if the pending bit was set and the pending mask contains an
15 * online CPU other than the dying CPU.
16 */
17bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
18{
19 struct irq_data *data = irq_desc_get_irq_data(desc);
20
21 if (!irqd_is_setaffinity_pending(data))
22 return false;
23
24 /*
25 * The outgoing CPU might be the last online target in a pending
26 * interrupt move. If that's the case clear the pending move bit.
27 */
28 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
29 irqd_clr_move_pending(data);
30 return false;
31 }
32 if (force_clear)
33 irqd_clr_move_pending(data);
34 return true;
35}
36
a439520f 37void irq_move_masked_irq(struct irq_data *idata)
c777ac55 38{
a439520f 39 struct irq_desc *desc = irq_data_to_desc(idata);
77ed42f1 40 struct irq_chip *chip = desc->irq_data.chip;
c777ac55 41
f230b6d5 42 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
c777ac55
AM
43 return;
44
a614a610
TG
45 irqd_clr_move_pending(&desc->irq_data);
46
501f2499
BH
47 /*
48 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
49 */
a614a610 50 if (irqd_is_per_cpu(&desc->irq_data)) {
501f2499
BH
51 WARN_ON(1);
52 return;
53 }
54
7f7ace0c 55 if (unlikely(cpumask_empty(desc->pending_mask)))
c777ac55
AM
56 return;
57
c96b3b3c 58 if (!chip->irq_set_affinity)
c777ac55
AM
59 return;
60
239007b8 61 assert_raw_spin_locked(&desc->lock);
501f2499 62
c777ac55
AM
63 /*
64 * If there was a valid mask to work with, please
65 * do the disable, re-program, enable sequence.
66 * This is *not* particularly important for level triggered
67 * but in a edge trigger case, we might be setting rte
25985edc 68 * when an active trigger is coming in. This could
c777ac55
AM
69 * cause some ioapics to mal-function.
70 * Being paranoid i guess!
e7b946e9
EB
71 *
72 * For correct operation this depends on the caller
73 * masking the irqs.
c777ac55 74 */
818b0f3b
JL
75 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
76 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
57b150cc 77
7f7ace0c 78 cpumask_clear(desc->pending_mask);
c777ac55 79}
e7b946e9 80
a439520f 81void irq_move_irq(struct irq_data *idata)
e7b946e9 82{
f1a06390 83 bool masked;
e7b946e9 84
77ed42f1
JL
85 /*
86 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
87 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
88 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
89 */
90 idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
91
a439520f 92 if (likely(!irqd_is_setaffinity_pending(idata)))
e7b946e9
EB
93 return;
94
32f4125e 95 if (unlikely(irqd_irq_disabled(idata)))
2a786b45 96 return;
e7b946e9 97
f1a06390
TG
98 /*
99 * Be careful vs. already masked interrupts. If this is a
100 * threaded interrupt with ONESHOT set, we can end up with an
101 * interrupt storm.
102 */
32f4125e 103 masked = irqd_irq_masked(idata);
f1a06390 104 if (!masked)
a439520f
TG
105 idata->chip->irq_mask(idata);
106 irq_move_masked_irq(idata);
f1a06390 107 if (!masked)
a439520f
TG
108 idata->chip->irq_unmask(idata);
109}