]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/irq_work.c
irq_work: Split raised and lazy lists
[mirror_ubuntu-bionic-kernel.git] / kernel / irq_work.c
CommitLineData
e360adbe
PZ
1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
83e3fa6f 8#include <linux/bug.h>
e360adbe 9#include <linux/kernel.h>
9984de1a 10#include <linux/export.h>
e360adbe 11#include <linux/irq_work.h>
967d1f90 12#include <linux/percpu.h>
e360adbe 13#include <linux/hardirq.h>
ef1f0982 14#include <linux/irqflags.h>
bc6679ae
FW
15#include <linux/sched.h>
16#include <linux/tick.h>
c0e980a4
SR
17#include <linux/cpu.h>
18#include <linux/notifier.h>
967d1f90 19#include <asm/processor.h>
e360adbe 20
e360adbe 21
b93e0b8f
FW
22static DEFINE_PER_CPU(struct llist_head, raised_list);
23static DEFINE_PER_CPU(struct llist_head, lazy_list);
e360adbe
PZ
24
25/*
26 * Claim the entry so that no one else will poke at it.
27 */
38aaf809 28static bool irq_work_claim(struct irq_work *work)
e360adbe 29{
e0bbe2d8 30 unsigned long flags, oflags, nflags;
e360adbe 31
e0bbe2d8
FW
32 /*
33 * Start with our best wish as a premise but only trust any
34 * flag value after cmpxchg() result.
35 */
36 flags = work->flags & ~IRQ_WORK_PENDING;
38aaf809 37 for (;;) {
38aaf809 38 nflags = flags | IRQ_WORK_FLAGS;
e0bbe2d8
FW
39 oflags = cmpxchg(&work->flags, flags, nflags);
40 if (oflags == flags)
38aaf809 41 break;
e0bbe2d8
FW
42 if (oflags & IRQ_WORK_PENDING)
43 return false;
44 flags = oflags;
38aaf809
HY
45 cpu_relax();
46 }
e360adbe
PZ
47
48 return true;
49}
50
e360adbe
PZ
51void __weak arch_irq_work_raise(void)
52{
53 /*
54 * Lame architectures will get the timer tick callback
55 */
56}
57
58/*
c02cf5f8 59 * Enqueue the irq_work @entry unless it's already pending
60 * somewhere.
61 *
62 * Can be re-enqueued while the callback is still in progress.
e360adbe 63 */
cd578abb 64bool irq_work_queue(struct irq_work *work)
e360adbe 65{
c02cf5f8 66 /* Only queue if not already pending */
67 if (!irq_work_claim(work))
cd578abb 68 return false;
c02cf5f8 69
70 /* Queue the entry and raise the IPI if needed. */
20b87691 71 preempt_disable();
e360adbe 72
b93e0b8f
FW
73 /* If the work is "lazy", handle it from next tick if any */
74 if (work->flags & IRQ_WORK_LAZY) {
75 if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
76 tick_nohz_tick_stopped())
77 arch_irq_work_raise();
78 } else {
79 if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
bc6679ae
FW
80 arch_irq_work_raise();
81 }
e360adbe 82
20b87691 83 preempt_enable();
cd578abb
PZ
84
85 return true;
e360adbe 86}
e360adbe
PZ
87EXPORT_SYMBOL_GPL(irq_work_queue);
88
00b42959
FW
89bool irq_work_needs_cpu(void)
90{
b93e0b8f 91 struct llist_head *raised, *lazy;
00b42959 92
b93e0b8f
FW
93 raised = &__get_cpu_var(raised_list);
94 lazy = &__get_cpu_var(lazy_list);
95 if (llist_empty(raised) && llist_empty(lazy))
00b42959
FW
96 return false;
97
8aa2acce
SR
98 /* All work should have been flushed before going offline */
99 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
100
00b42959
FW
101 return true;
102}
103
b93e0b8f 104static void irq_work_run_list(struct llist_head *list)
e360adbe 105{
bc6679ae 106 unsigned long flags;
38aaf809 107 struct irq_work *work;
38aaf809 108 struct llist_node *llnode;
e360adbe 109
b93e0b8f 110 BUG_ON(!irqs_disabled());
bc6679ae 111
b93e0b8f 112 if (llist_empty(list))
e360adbe
PZ
113 return;
114
b93e0b8f 115 llnode = llist_del_all(list);
38aaf809
HY
116 while (llnode != NULL) {
117 work = llist_entry(llnode, struct irq_work, llnode);
e360adbe 118
924f8f5a 119 llnode = llist_next(llnode);
e360adbe
PZ
120
121 /*
38aaf809 122 * Clear the PENDING bit, after this point the @work
e360adbe 123 * can be re-used.
c8446b75
FW
124 * Make it immediately visible so that other CPUs trying
125 * to claim that work don't rely on us to handle their data
126 * while we are in the middle of the func.
e360adbe 127 */
bc6679ae
FW
128 flags = work->flags & ~IRQ_WORK_PENDING;
129 xchg(&work->flags, flags);
130
38aaf809 131 work->func(work);
e360adbe
PZ
132 /*
133 * Clear the BUSY bit and return to the free state if
134 * no-one else claimed it meanwhile.
135 */
bc6679ae 136 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
e360adbe
PZ
137 }
138}
c0e980a4 139
b93e0b8f
FW
140static void __irq_work_run(void)
141{
142 irq_work_run_list(&__get_cpu_var(raised_list));
143 irq_work_run_list(&__get_cpu_var(lazy_list));
144}
145
c0e980a4
SR
146/*
147 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
148 * context with local IRQs disabled.
149 */
150void irq_work_run(void)
151{
152 BUG_ON(!in_irq());
153 __irq_work_run();
154}
e360adbe
PZ
155EXPORT_SYMBOL_GPL(irq_work_run);
156
157/*
158 * Synchronize against the irq_work @entry, ensures the entry is not
159 * currently in use.
160 */
38aaf809 161void irq_work_sync(struct irq_work *work)
e360adbe
PZ
162{
163 WARN_ON_ONCE(irqs_disabled());
164
38aaf809 165 while (work->flags & IRQ_WORK_BUSY)
e360adbe
PZ
166 cpu_relax();
167}
168EXPORT_SYMBOL_GPL(irq_work_sync);
c0e980a4
SR
169
170#ifdef CONFIG_HOTPLUG_CPU
171static int irq_work_cpu_notify(struct notifier_block *self,
172 unsigned long action, void *hcpu)
173{
174 long cpu = (long)hcpu;
175
176 switch (action) {
177 case CPU_DYING:
178 /* Called from stop_machine */
179 if (WARN_ON_ONCE(cpu != smp_processor_id()))
180 break;
181 __irq_work_run();
182 break;
183 default:
184 break;
185 }
186 return NOTIFY_OK;
187}
188
189static struct notifier_block cpu_notify;
190
191static __init int irq_work_init_cpu_notifier(void)
192{
193 cpu_notify.notifier_call = irq_work_cpu_notify;
194 cpu_notify.priority = 0;
195 register_cpu_notifier(&cpu_notify);
196 return 0;
197}
198device_initcall(irq_work_init_cpu_notifier);
199
200#endif /* CONFIG_HOTPLUG_CPU */