]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra | |
3 | * | |
4 | * Provides a framework for enqueueing and running callbacks from hardirq | |
5 | * context. The enqueueing is NMI-safe. | |
6 | */ | |
7 | ||
8 | #include <linux/bug.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/export.h> | |
11 | #include <linux/irq_work.h> | |
12 | #include <linux/percpu.h> | |
13 | #include <linux/hardirq.h> | |
14 | #include <linux/irqflags.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/tick.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/notifier.h> | |
19 | #include <linux/smp.h> | |
20 | #include <asm/processor.h> | |
21 | ||
22 | ||
23 | static DEFINE_PER_CPU(struct llist_head, raised_list); | |
24 | static DEFINE_PER_CPU(struct llist_head, lazy_list); | |
25 | ||
26 | /* | |
27 | * Claim the entry so that no one else will poke at it. | |
28 | */ | |
29 | static bool irq_work_claim(struct irq_work *work) | |
30 | { | |
31 | unsigned long flags, oflags, nflags; | |
32 | ||
33 | /* | |
34 | * Start with our best wish as a premise but only trust any | |
35 | * flag value after cmpxchg() result. | |
36 | */ | |
37 | flags = work->flags & ~IRQ_WORK_PENDING; | |
38 | for (;;) { | |
39 | nflags = flags | IRQ_WORK_FLAGS; | |
40 | oflags = cmpxchg(&work->flags, flags, nflags); | |
41 | if (oflags == flags) | |
42 | break; | |
43 | if (oflags & IRQ_WORK_PENDING) | |
44 | return false; | |
45 | flags = oflags; | |
46 | cpu_relax(); | |
47 | } | |
48 | ||
49 | return true; | |
50 | } | |
51 | ||
52 | void __weak arch_irq_work_raise(void) | |
53 | { | |
54 | /* | |
55 | * Lame architectures will get the timer tick callback | |
56 | */ | |
57 | } | |
58 | ||
59 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ | |
60 | static void __irq_work_queue_local(struct irq_work *work) | |
61 | { | |
62 | /* If the work is "lazy", handle it from next tick if any */ | |
63 | if (work->flags & IRQ_WORK_LAZY) { | |
64 | if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && | |
65 | tick_nohz_tick_stopped()) | |
66 | arch_irq_work_raise(); | |
67 | } else { | |
68 | if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) | |
69 | arch_irq_work_raise(); | |
70 | } | |
71 | } | |
72 | ||
73 | /* Enqueue the irq work @work on the current CPU */ | |
74 | bool irq_work_queue(struct irq_work *work) | |
75 | { | |
76 | /* Only queue if not already pending */ | |
77 | if (!irq_work_claim(work)) | |
78 | return false; | |
79 | ||
80 | /* Queue the entry and raise the IPI if needed. */ | |
81 | preempt_disable(); | |
82 | __irq_work_queue_local(work); | |
83 | preempt_enable(); | |
84 | ||
85 | return true; | |
86 | } | |
87 | EXPORT_SYMBOL_GPL(irq_work_queue); | |
88 | ||
89 | /* | |
90 | * Enqueue the irq_work @work on @cpu unless it's already pending | |
91 | * somewhere. | |
92 | * | |
93 | * Can be re-enqueued while the callback is still in progress. | |
94 | */ | |
95 | bool irq_work_queue_on(struct irq_work *work, int cpu) | |
96 | { | |
97 | #ifndef CONFIG_SMP | |
98 | return irq_work_queue(work); | |
99 | ||
100 | #else /* CONFIG_SMP: */ | |
101 | /* All work should have been flushed before going offline */ | |
102 | WARN_ON_ONCE(cpu_is_offline(cpu)); | |
103 | ||
104 | /* Only queue if not already pending */ | |
105 | if (!irq_work_claim(work)) | |
106 | return false; | |
107 | ||
108 | preempt_disable(); | |
109 | if (cpu != smp_processor_id()) { | |
110 | /* Arch remote IPI send/receive backend aren't NMI safe */ | |
111 | WARN_ON_ONCE(in_nmi()); | |
112 | if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) | |
113 | arch_send_call_function_single_ipi(cpu); | |
114 | } else { | |
115 | __irq_work_queue_local(work); | |
116 | } | |
117 | preempt_enable(); | |
118 | ||
119 | return true; | |
120 | #endif /* CONFIG_SMP */ | |
121 | } | |
122 | ||
123 | ||
124 | bool irq_work_needs_cpu(void) | |
125 | { | |
126 | struct llist_head *raised, *lazy; | |
127 | ||
128 | raised = this_cpu_ptr(&raised_list); | |
129 | lazy = this_cpu_ptr(&lazy_list); | |
130 | ||
131 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
132 | if (llist_empty(lazy)) | |
133 | return false; | |
134 | ||
135 | /* All work should have been flushed before going offline */ | |
136 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | |
137 | ||
138 | return true; | |
139 | } | |
140 | ||
141 | static void irq_work_run_list(struct llist_head *list) | |
142 | { | |
143 | struct irq_work *work, *tmp; | |
144 | struct llist_node *llnode; | |
145 | unsigned long flags; | |
146 | ||
147 | BUG_ON(!irqs_disabled()); | |
148 | ||
149 | if (llist_empty(list)) | |
150 | return; | |
151 | ||
152 | llnode = llist_del_all(list); | |
153 | llist_for_each_entry_safe(work, tmp, llnode, llnode) { | |
154 | /* | |
155 | * Clear the PENDING bit, after this point the @work | |
156 | * can be re-used. | |
157 | * Make it immediately visible so that other CPUs trying | |
158 | * to claim that work don't rely on us to handle their data | |
159 | * while we are in the middle of the func. | |
160 | */ | |
161 | flags = work->flags & ~IRQ_WORK_PENDING; | |
162 | xchg(&work->flags, flags); | |
163 | ||
164 | work->func(work); | |
165 | /* | |
166 | * Clear the BUSY bit and return to the free state if | |
167 | * no-one else claimed it meanwhile. | |
168 | */ | |
169 | (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); | |
170 | } | |
171 | } | |
172 | ||
173 | /* | |
174 | * hotplug calls this through: | |
175 | * hotplug_cfd() -> flush_smp_call_function_queue() | |
176 | */ | |
177 | void irq_work_run(void) | |
178 | { | |
179 | irq_work_run_list(this_cpu_ptr(&raised_list)); | |
180 | irq_work_run_list(this_cpu_ptr(&lazy_list)); | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(irq_work_run); | |
183 | ||
184 | void irq_work_tick(void) | |
185 | { | |
186 | struct llist_head *raised = this_cpu_ptr(&raised_list); | |
187 | ||
188 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) | |
189 | irq_work_run_list(raised); | |
190 | irq_work_run_list(this_cpu_ptr(&lazy_list)); | |
191 | } | |
192 | ||
193 | /* | |
194 | * Synchronize against the irq_work @entry, ensures the entry is not | |
195 | * currently in use. | |
196 | */ | |
197 | void irq_work_sync(struct irq_work *work) | |
198 | { | |
199 | lockdep_assert_irqs_enabled(); | |
200 | ||
201 | while (work->flags & IRQ_WORK_BUSY) | |
202 | cpu_relax(); | |
203 | } | |
204 | EXPORT_SYMBOL_GPL(irq_work_sync); |