]>
Commit | Line | Data |
---|---|---|
f1e0bb0a YY |
1 | /* |
2 | * Generic cpu hotunplug interrupt migration code copied from the | |
3 | * arch/arm implementation | |
4 | * | |
5 | * Copyright (C) Russell King | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/ratelimit.h> | |
13 | #include <linux/irq.h> | |
14 | ||
15 | #include "internals.h" | |
16 | ||
415fcf1a TG |
17 | /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */ |
18 | static inline bool irq_needs_fixup(struct irq_data *d) | |
19 | { | |
20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | |
60b09c51 | 21 | unsigned int cpu = smp_processor_id(); |
415fcf1a | 22 | |
60b09c51 TG |
23 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
24 | /* | |
25 | * The cpumask_empty() check is a workaround for interrupt chips, | |
26 | * which do not implement effective affinity, but the architecture has | |
27 | * enabled the config switch. Use the general affinity mask instead. | |
28 | */ | |
29 | if (cpumask_empty(m)) | |
30 | m = irq_data_get_affinity_mask(d); | |
31 | ||
32 | /* | |
33 | * Sanity check. If the mask is not empty when excluding the outgoing | |
34 | * CPU then it must contain at least one online CPU. The outgoing CPU | |
35 | * has been removed from the online mask already. | |
36 | */ | |
37 | if (cpumask_any_but(m, cpu) < nr_cpu_ids && | |
38 | cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { | |
39 | /* | |
40 | * If this happens then there was a missed IRQ fixup at some | |
41 | * point. Warn about it and enforce fixup. | |
42 | */ | |
43 | pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", | |
44 | cpumask_pr_args(m), d->irq, cpu); | |
45 | return true; | |
46 | } | |
47 | #endif | |
48 | return cpumask_test_cpu(cpu, m); | |
415fcf1a TG |
49 | } |
50 | ||
f1e0bb0a YY |
51 | static bool migrate_one_irq(struct irq_desc *desc) |
52 | { | |
53 | struct irq_data *d = irq_desc_get_irq_data(desc); | |
e8a70350 | 54 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
47a06d3a | 55 | bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); |
f0383c24 | 56 | const struct cpumask *affinity; |
e8a70350 TG |
57 | bool brokeaff = false; |
58 | int err; | |
59 | ||
60 | /* | |
61 | * IRQ chip might be already torn down, but the irq descriptor is | |
62 | * still in the radix tree. Also if the chip has no affinity setter, | |
63 | * nothing can be done here. | |
64 | */ | |
65 | if (!chip || !chip->irq_set_affinity) { | |
66 | pr_debug("IRQ %u: Unable to migrate away\n", d->irq); | |
67 | return false; | |
68 | } | |
f1e0bb0a YY |
69 | |
70 | /* | |
91f26cb4 TG |
71 | * No move required, if: |
72 | * - Interrupt is per cpu | |
73 | * - Interrupt is not started | |
74 | * - Affinity mask does not include this CPU. | |
75 | * | |
76 | * Note: Do not check desc->action as this might be a chained | |
77 | * interrupt. | |
f1e0bb0a | 78 | */ |
415fcf1a | 79 | if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { |
f0383c24 TG |
80 | /* |
81 | * If an irq move is pending, abort it if the dying CPU is | |
82 | * the sole target. | |
83 | */ | |
84 | irq_fixup_move_pending(desc, false); | |
f1e0bb0a | 85 | return false; |
f0383c24 TG |
86 | } |
87 | ||
88 | /* | |
89 | * Complete an eventually pending irq move cleanup. If this | |
90 | * interrupt was moved in hard irq context, then the vectors need | |
91 | * to be cleaned up. It can't wait until this interrupt actually | |
92 | * happens and this CPU was involved. | |
93 | */ | |
94 | irq_force_complete_move(desc); | |
95 | ||
96 | /* | |
97 | * If there is a setaffinity pending, then try to reuse the pending | |
98 | * mask, so the last change of the affinity does not get lost. If | |
99 | * there is no move pending or the pending mask does not contain | |
100 | * any online CPU, use the current affinity mask. | |
101 | */ | |
102 | if (irq_fixup_move_pending(desc, true)) | |
103 | affinity = irq_desc_get_pending_mask(desc); | |
415fcf1a TG |
104 | else |
105 | affinity = irq_data_get_affinity_mask(d); | |
f1e0bb0a | 106 | |
47a06d3a TG |
107 | /* Mask the chip for interrupts which cannot move in process context */ |
108 | if (maskchip && chip->irq_mask) | |
109 | chip->irq_mask(d); | |
110 | ||
f1e0bb0a | 111 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
c5cb83bb TG |
112 | /* |
113 | * If the interrupt is managed, then shut it down and leave | |
114 | * the affinity untouched. | |
115 | */ | |
116 | if (irqd_affinity_is_managed(d)) { | |
117 | irqd_set_managed_shutdown(d); | |
118 | irq_shutdown(desc); | |
119 | return false; | |
120 | } | |
f1e0bb0a | 121 | affinity = cpu_online_mask; |
e8a70350 | 122 | brokeaff = true; |
f1e0bb0a | 123 | } |
83979133 TG |
124 | /* |
125 | * Do not set the force argument of irq_do_set_affinity() as this | |
126 | * disables the masking of offline CPUs from the supplied affinity | |
127 | * mask and therefore might keep/reassign the irq to the outgoing | |
128 | * CPU. | |
129 | */ | |
130 | err = irq_do_set_affinity(d, affinity, false); | |
e8a70350 TG |
131 | if (err) { |
132 | pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", | |
133 | d->irq, err); | |
47a06d3a | 134 | brokeaff = false; |
f1e0bb0a | 135 | } |
47a06d3a TG |
136 | |
137 | if (maskchip && chip->irq_unmask) | |
138 | chip->irq_unmask(d); | |
139 | ||
e8a70350 | 140 | return brokeaff; |
f1e0bb0a YY |
141 | } |
142 | ||
143 | /** | |
144 | * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu | |
145 | * | |
146 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | |
147 | * If the affinity settings do not allow other CPUs, force them onto any | |
148 | * available CPU. | |
149 | * | |
150 | * Note: we must iterate over all IRQs, whether they have an attached | |
151 | * action structure or not, as we need to get chained interrupts too. | |
152 | */ | |
153 | void irq_migrate_all_off_this_cpu(void) | |
154 | { | |
f1e0bb0a | 155 | struct irq_desc *desc; |
0dd945ff | 156 | unsigned int irq; |
f1e0bb0a YY |
157 | |
158 | for_each_active_irq(irq) { | |
159 | bool affinity_broken; | |
160 | ||
161 | desc = irq_to_desc(irq); | |
162 | raw_spin_lock(&desc->lock); | |
163 | affinity_broken = migrate_one_irq(desc); | |
164 | raw_spin_unlock(&desc->lock); | |
165 | ||
0dd945ff TG |
166 | if (affinity_broken) { |
167 | pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n", | |
f1e0bb0a | 168 | irq, smp_processor_id()); |
0dd945ff | 169 | } |
f1e0bb0a | 170 | } |
f1e0bb0a | 171 | } |
c5cb83bb TG |
172 | |
173 | static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) | |
174 | { | |
175 | struct irq_data *data = irq_desc_get_irq_data(desc); | |
176 | const struct cpumask *affinity = irq_data_get_affinity_mask(data); | |
177 | ||
178 | if (!irqd_affinity_is_managed(data) || !desc->action || | |
179 | !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity)) | |
180 | return; | |
181 | ||
8f31a984 | 182 | if (irqd_is_managed_and_shutdown(data)) { |
c5cb83bb | 183 | irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
8f31a984 TG |
184 | return; |
185 | } | |
186 | ||
187 | /* | |
188 | * If the interrupt can only be directed to a single target | |
189 | * CPU then it is already assigned to a CPU in the affinity | |
190 | * mask. No point in trying to move it around. | |
191 | */ | |
192 | if (!irqd_is_single_target(data)) | |
c5cb83bb TG |
193 | irq_set_affinity_locked(data, affinity, false); |
194 | } | |
195 | ||
196 | /** | |
197 | * irq_affinity_online_cpu - Restore affinity for managed interrupts | |
198 | * @cpu: Upcoming CPU for which interrupts should be restored | |
199 | */ | |
200 | int irq_affinity_online_cpu(unsigned int cpu) | |
201 | { | |
202 | struct irq_desc *desc; | |
203 | unsigned int irq; | |
204 | ||
205 | irq_lock_sparse(); | |
206 | for_each_active_irq(irq) { | |
207 | desc = irq_to_desc(irq); | |
208 | raw_spin_lock_irq(&desc->lock); | |
209 | irq_restore_affinity_of_irq(desc, cpu); | |
210 | raw_spin_unlock_irq(&desc->lock); | |
211 | } | |
212 | irq_unlock_sparse(); | |
213 | ||
214 | return 0; | |
215 | } |