]>
git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/irq/cpuhotplug.c
2 * Generic cpu hotunplug interrupt migration code copied from the
3 * arch/arm implementation
5 * Copyright (C) Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/interrupt.h>
12 #include <linux/ratelimit.h>
13 #include <linux/irq.h>
15 #include "internals.h"
17 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
18 static inline bool irq_needs_fixup(struct irq_data
*d
)
20 const struct cpumask
*m
= irq_data_get_effective_affinity_mask(d
);
22 return cpumask_test_cpu(smp_processor_id(), m
);
25 static bool migrate_one_irq(struct irq_desc
*desc
)
27 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
28 struct irq_chip
*chip
= irq_data_get_irq_chip(d
);
29 bool maskchip
= !irq_can_move_pcntxt(d
) && !irqd_irq_masked(d
);
30 const struct cpumask
*affinity
;
31 bool brokeaff
= false;
35 * IRQ chip might be already torn down, but the irq descriptor is
36 * still in the radix tree. Also if the chip has no affinity setter,
37 * nothing can be done here.
39 if (!chip
|| !chip
->irq_set_affinity
) {
40 pr_debug("IRQ %u: Unable to migrate away\n", d
->irq
);
45 * No move required, if:
46 * - Interrupt is per cpu
47 * - Interrupt is not started
48 * - Affinity mask does not include this CPU.
50 * Note: Do not check desc->action as this might be a chained
53 if (irqd_is_per_cpu(d
) || !irqd_is_started(d
) || !irq_needs_fixup(d
)) {
55 * If an irq move is pending, abort it if the dying CPU is
58 irq_fixup_move_pending(desc
, false);
63 * Complete an eventually pending irq move cleanup. If this
64 * interrupt was moved in hard irq context, then the vectors need
65 * to be cleaned up. It can't wait until this interrupt actually
66 * happens and this CPU was involved.
68 irq_force_complete_move(desc
);
71 * If there is a setaffinity pending, then try to reuse the pending
72 * mask, so the last change of the affinity does not get lost. If
73 * there is no move pending or the pending mask does not contain
74 * any online CPU, use the current affinity mask.
76 if (irq_fixup_move_pending(desc
, true))
77 affinity
= irq_desc_get_pending_mask(desc
);
79 affinity
= irq_data_get_affinity_mask(d
);
81 /* Mask the chip for interrupts which cannot move in process context */
82 if (maskchip
&& chip
->irq_mask
)
85 if (cpumask_any_and(affinity
, cpu_online_mask
) >= nr_cpu_ids
) {
87 * If the interrupt is managed, then shut it down and leave
88 * the affinity untouched.
90 if (irqd_affinity_is_managed(d
)) {
91 irqd_set_managed_shutdown(d
);
95 affinity
= cpu_online_mask
;
99 * Do not set the force argument of irq_do_set_affinity() as this
100 * disables the masking of offline CPUs from the supplied affinity
101 * mask and therefore might keep/reassign the irq to the outgoing
104 err
= irq_do_set_affinity(d
, affinity
, false);
106 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
111 if (maskchip
&& chip
->irq_unmask
)
118 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
120 * The current CPU has been marked offline. Migrate IRQs off this CPU.
121 * If the affinity settings do not allow other CPUs, force them onto any
124 * Note: we must iterate over all IRQs, whether they have an attached
125 * action structure or not, as we need to get chained interrupts too.
127 void irq_migrate_all_off_this_cpu(void)
129 struct irq_desc
*desc
;
132 for_each_active_irq(irq
) {
133 bool affinity_broken
;
135 desc
= irq_to_desc(irq
);
136 raw_spin_lock(&desc
->lock
);
137 affinity_broken
= migrate_one_irq(desc
);
138 raw_spin_unlock(&desc
->lock
);
140 if (affinity_broken
) {
141 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
142 irq
, smp_processor_id());
147 static void irq_restore_affinity_of_irq(struct irq_desc
*desc
, unsigned int cpu
)
149 struct irq_data
*data
= irq_desc_get_irq_data(desc
);
150 const struct cpumask
*affinity
= irq_data_get_affinity_mask(data
);
152 if (!irqd_affinity_is_managed(data
) || !desc
->action
||
153 !irq_data_get_irq_chip(data
) || !cpumask_test_cpu(cpu
, affinity
))
156 if (irqd_is_managed_and_shutdown(data
)) {
157 irq_startup(desc
, IRQ_RESEND
, IRQ_START_COND
);
162 * If the interrupt can only be directed to a single target
163 * CPU then it is already assigned to a CPU in the affinity
164 * mask. No point in trying to move it around.
166 if (!irqd_is_single_target(data
))
167 irq_set_affinity_locked(data
, affinity
, false);
171 * irq_affinity_online_cpu - Restore affinity for managed interrupts
172 * @cpu: Upcoming CPU for which interrupts should be restored
174 int irq_affinity_online_cpu(unsigned int cpu
)
176 struct irq_desc
*desc
;
180 for_each_active_irq(irq
) {
181 desc
= irq_to_desc(irq
);
182 raw_spin_lock_irq(&desc
->lock
);
183 irq_restore_affinity_of_irq(desc
, cpu
);
184 raw_spin_unlock_irq(&desc
->lock
);