]>
Commit | Line | Data |
---|---|---|
3d442233 JA |
1 | /* |
2 | * Generic helpers for smp ipi calls | |
3 | * | |
4 | * (C) Jens Axboe <jens.axboe@oracle.com> 2008 | |
5 | * | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/percpu.h> | |
10 | #include <linux/rcupdate.h> | |
59190f42 | 11 | #include <linux/rculist.h> |
3d442233 JA |
12 | #include <linux/smp.h> |
13 | ||
14 | static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); | |
15 | static LIST_HEAD(call_function_queue); | |
16 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | |
17 | ||
18 | enum { | |
19 | CSD_FLAG_WAIT = 0x01, | |
20 | CSD_FLAG_ALLOC = 0x02, | |
d7240b98 | 21 | CSD_FLAG_LOCK = 0x04, |
3d442233 JA |
22 | }; |
23 | ||
24 | struct call_function_data { | |
25 | struct call_single_data csd; | |
26 | spinlock_t lock; | |
27 | unsigned int refs; | |
3d442233 | 28 | struct rcu_head rcu_head; |
54b11e6d | 29 | unsigned long cpumask_bits[]; |
3d442233 JA |
30 | }; |
31 | ||
32 | struct call_single_queue { | |
33 | struct list_head list; | |
34 | spinlock_t lock; | |
35 | }; | |
36 | ||
7babe8db | 37 | static int __cpuinit init_call_single_data(void) |
3d442233 JA |
38 | { |
39 | int i; | |
40 | ||
41 | for_each_possible_cpu(i) { | |
42 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | |
43 | ||
44 | spin_lock_init(&q->lock); | |
45 | INIT_LIST_HEAD(&q->list); | |
46 | } | |
7babe8db | 47 | return 0; |
3d442233 | 48 | } |
7babe8db | 49 | early_initcall(init_call_single_data); |
3d442233 JA |
50 | |
51 | static void csd_flag_wait(struct call_single_data *data) | |
52 | { | |
53 | /* Wait for response */ | |
54 | do { | |
3d442233 JA |
55 | if (!(data->flags & CSD_FLAG_WAIT)) |
56 | break; | |
57 | cpu_relax(); | |
58 | } while (1); | |
59 | } | |
60 | ||
61 | /* | |
62 | * Insert a previously allocated call_single_data element for execution | |
63 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | |
64 | */ | |
65 | static void generic_exec_single(int cpu, struct call_single_data *data) | |
66 | { | |
67 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | |
68 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | |
69 | unsigned long flags; | |
70 | ||
71 | spin_lock_irqsave(&dst->lock, flags); | |
72 | ipi = list_empty(&dst->list); | |
73 | list_add_tail(&data->list, &dst->list); | |
74 | spin_unlock_irqrestore(&dst->lock, flags); | |
75 | ||
561920a0 | 76 | /* |
15d0d3b3 NP |
77 | * The list addition should be visible before sending the IPI |
78 | * handler locks the list to pull the entry off it because of | |
79 | * normal cache coherency rules implied by spinlocks. | |
80 | * | |
81 | * If IPIs can go out of order to the cache coherency protocol | |
82 | * in an architecture, sufficient synchronisation should be added | |
83 | * to arch code to make it appear to obey cache coherency WRT | |
84 | * locking and barrier primitives. Generic code isn't really equipped | |
85 | * to do the right thing... | |
561920a0 | 86 | */ |
561920a0 | 87 | |
3d442233 JA |
88 | if (ipi) |
89 | arch_send_call_function_single_ipi(cpu); | |
90 | ||
91 | if (wait) | |
92 | csd_flag_wait(data); | |
93 | } | |
94 | ||
95 | static void rcu_free_call_data(struct rcu_head *head) | |
96 | { | |
97 | struct call_function_data *data; | |
98 | ||
99 | data = container_of(head, struct call_function_data, rcu_head); | |
100 | ||
101 | kfree(data); | |
102 | } | |
103 | ||
104 | /* | |
105 | * Invoked by arch to handle an IPI for call function. Must be called with | |
106 | * interrupts disabled. | |
107 | */ | |
108 | void generic_smp_call_function_interrupt(void) | |
109 | { | |
110 | struct call_function_data *data; | |
111 | int cpu = get_cpu(); | |
112 | ||
15d0d3b3 NP |
113 | /* |
114 | * Ensure entry is visible on call_function_queue after we have | |
115 | * entered the IPI. See comment in smp_call_function_many. | |
116 | * If we don't have this, then we may miss an entry on the list | |
117 | * and never get another IPI to process it. | |
118 | */ | |
119 | smp_mb(); | |
120 | ||
3d442233 JA |
121 | /* |
122 | * It's ok to use list_for_each_rcu() here even though we may delete | |
123 | * 'pos', since list_del_rcu() doesn't clear ->next | |
124 | */ | |
125 | rcu_read_lock(); | |
126 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { | |
127 | int refs; | |
128 | ||
54b11e6d | 129 | if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits))) |
3d442233 JA |
130 | continue; |
131 | ||
132 | data->csd.func(data->csd.info); | |
133 | ||
134 | spin_lock(&data->lock); | |
54b11e6d | 135 | cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits)); |
3d442233 JA |
136 | WARN_ON(data->refs == 0); |
137 | data->refs--; | |
138 | refs = data->refs; | |
139 | spin_unlock(&data->lock); | |
140 | ||
141 | if (refs) | |
142 | continue; | |
143 | ||
144 | spin_lock(&call_function_lock); | |
145 | list_del_rcu(&data->csd.list); | |
146 | spin_unlock(&call_function_lock); | |
147 | ||
148 | if (data->csd.flags & CSD_FLAG_WAIT) { | |
149 | /* | |
150 | * serialize stores to data with the flag clear | |
151 | * and wakeup | |
152 | */ | |
153 | smp_wmb(); | |
154 | data->csd.flags &= ~CSD_FLAG_WAIT; | |
c2fc1198 NP |
155 | } |
156 | if (data->csd.flags & CSD_FLAG_ALLOC) | |
3d442233 JA |
157 | call_rcu(&data->rcu_head, rcu_free_call_data); |
158 | } | |
159 | rcu_read_unlock(); | |
160 | ||
161 | put_cpu(); | |
162 | } | |
163 | ||
164 | /* | |
165 | * Invoked by arch to handle an IPI for call function single. Must be called | |
166 | * from the arch with interrupts disabled. | |
167 | */ | |
168 | void generic_smp_call_function_single_interrupt(void) | |
169 | { | |
170 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | |
171 | LIST_HEAD(list); | |
15d0d3b3 | 172 | unsigned int data_flags; |
3d442233 | 173 | |
15d0d3b3 NP |
174 | spin_lock(&q->lock); |
175 | list_replace_init(&q->list, &list); | |
176 | spin_unlock(&q->lock); | |
3d442233 | 177 | |
15d0d3b3 NP |
178 | while (!list_empty(&list)) { |
179 | struct call_single_data *data; | |
3d442233 | 180 | |
15d0d3b3 NP |
181 | data = list_entry(list.next, struct call_single_data, |
182 | list); | |
183 | list_del(&data->list); | |
3d442233 | 184 | |
3d442233 | 185 | /* |
15d0d3b3 NP |
186 | * 'data' can be invalid after this call if |
187 | * flags == 0 (when called through | |
188 | * generic_exec_single(), so save them away before | |
189 | * making the call. | |
3d442233 | 190 | */ |
15d0d3b3 NP |
191 | data_flags = data->flags; |
192 | ||
193 | data->func(data->info); | |
194 | ||
195 | if (data_flags & CSD_FLAG_WAIT) { | |
196 | smp_wmb(); | |
197 | data->flags &= ~CSD_FLAG_WAIT; | |
198 | } else if (data_flags & CSD_FLAG_LOCK) { | |
199 | smp_wmb(); | |
200 | data->flags &= ~CSD_FLAG_LOCK; | |
201 | } else if (data_flags & CSD_FLAG_ALLOC) | |
202 | kfree(data); | |
3d442233 JA |
203 | } |
204 | } | |
205 | ||
d7240b98 SR |
206 | static DEFINE_PER_CPU(struct call_single_data, csd_data); |
207 | ||
3d442233 JA |
208 | /* |
209 | * smp_call_function_single - Run a function on a specific CPU | |
210 | * @func: The function to run. This must be fast and non-blocking. | |
211 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
212 | * @wait: If true, wait until function has completed on other CPUs. |
213 | * | |
214 | * Returns 0 on success, else a negative status code. Note that @wait | |
215 | * will be implicitly turned on in case of allocation failures, since | |
216 | * we fall back to on-stack allocation. | |
217 | */ | |
218 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
8691e5a8 | 219 | int wait) |
3d442233 JA |
220 | { |
221 | struct call_single_data d; | |
222 | unsigned long flags; | |
f73be6de PA |
223 | /* prevent preemption and reschedule on another processor, |
224 | as well as CPU removal */ | |
3d442233 | 225 | int me = get_cpu(); |
f73be6de | 226 | int err = 0; |
3d442233 JA |
227 | |
228 | /* Can deadlock when called with interrupts disabled */ | |
229 | WARN_ON(irqs_disabled()); | |
230 | ||
231 | if (cpu == me) { | |
232 | local_irq_save(flags); | |
233 | func(info); | |
234 | local_irq_restore(flags); | |
4f4b6c1a | 235 | } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { |
d7240b98 | 236 | struct call_single_data *data; |
3d442233 JA |
237 | |
238 | if (!wait) { | |
d7240b98 SR |
239 | /* |
240 | * We are calling a function on a single CPU | |
241 | * and we are not going to wait for it to finish. | |
242 | * We first try to allocate the data, but if we | |
243 | * fail, we fall back to use a per cpu data to pass | |
244 | * the information to that CPU. Since all callers | |
245 | * of this code will use the same data, we must | |
246 | * synchronize the callers to prevent a new caller | |
247 | * from corrupting the data before the callee | |
248 | * can access it. | |
249 | * | |
250 | * The CSD_FLAG_LOCK is used to let us know when | |
251 | * the IPI handler is done with the data. | |
252 | * The first caller will set it, and the callee | |
253 | * will clear it. The next caller must wait for | |
254 | * it to clear before we set it again. This | |
255 | * will make sure the callee is done with the | |
256 | * data before a new caller will use it. | |
257 | */ | |
3d442233 JA |
258 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
259 | if (data) | |
260 | data->flags = CSD_FLAG_ALLOC; | |
d7240b98 SR |
261 | else { |
262 | data = &per_cpu(csd_data, me); | |
263 | while (data->flags & CSD_FLAG_LOCK) | |
264 | cpu_relax(); | |
265 | data->flags = CSD_FLAG_LOCK; | |
266 | } | |
267 | } else { | |
3d442233 JA |
268 | data = &d; |
269 | data->flags = CSD_FLAG_WAIT; | |
270 | } | |
271 | ||
272 | data->func = func; | |
273 | data->info = info; | |
274 | generic_exec_single(cpu, data); | |
f73be6de PA |
275 | } else { |
276 | err = -ENXIO; /* CPU not online */ | |
3d442233 JA |
277 | } |
278 | ||
279 | put_cpu(); | |
f73be6de | 280 | return err; |
3d442233 JA |
281 | } |
282 | EXPORT_SYMBOL(smp_call_function_single); | |
283 | ||
284 | /** | |
285 | * __smp_call_function_single(): Run a function on another CPU | |
286 | * @cpu: The CPU to run on. | |
287 | * @data: Pre-allocated and setup data structure | |
288 | * | |
289 | * Like smp_call_function_single(), but allow caller to pass in a pre-allocated | |
290 | * data structure. Useful for embedding @data inside other structures, for | |
291 | * instance. | |
292 | * | |
293 | */ | |
294 | void __smp_call_function_single(int cpu, struct call_single_data *data) | |
295 | { | |
296 | /* Can deadlock when called with interrupts disabled */ | |
297 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | |
298 | ||
299 | generic_exec_single(cpu, data); | |
300 | } | |
301 | ||
ce47d974 RR |
302 | /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ |
303 | #ifndef arch_send_call_function_ipi_mask | |
304 | #define arch_send_call_function_ipi_mask(maskp) \ | |
305 | arch_send_call_function_ipi(*(maskp)) | |
306 | #endif | |
307 | ||
3d442233 | 308 | /** |
54b11e6d RR |
309 | * smp_call_function_many(): Run a function on a set of other CPUs. |
310 | * @mask: The set of cpus to run on (only runs on online subset). | |
3d442233 JA |
311 | * @func: The function to run. This must be fast and non-blocking. |
312 | * @info: An arbitrary pointer to pass to the function. | |
313 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
314 | * | |
3d442233 JA |
315 | * If @wait is true, then returns once @func has returned. Note that @wait |
316 | * will be implicitly turned on in case of allocation failures, since | |
317 | * we fall back to on-stack allocation. | |
318 | * | |
319 | * You must not call this function with disabled interrupts or from a | |
320 | * hardware interrupt handler or from a bottom half handler. Preemption | |
321 | * must be disabled when calling this function. | |
322 | */ | |
54b11e6d RR |
323 | void smp_call_function_many(const struct cpumask *mask, |
324 | void (*func)(void *), void *info, | |
325 | bool wait) | |
3d442233 | 326 | { |
54b11e6d | 327 | struct call_function_data *data; |
3d442233 | 328 | unsigned long flags; |
54b11e6d | 329 | int cpu, next_cpu; |
3d442233 JA |
330 | |
331 | /* Can deadlock when called with interrupts disabled */ | |
332 | WARN_ON(irqs_disabled()); | |
333 | ||
54b11e6d RR |
334 | /* So, what's a CPU they want? Ignoring this one. */ |
335 | cpu = cpumask_first_and(mask, cpu_online_mask); | |
336 | if (cpu == smp_processor_id()) | |
337 | cpu = cpumask_next_and(cpu, mask, cpu_online_mask); | |
338 | /* No online cpus? We're done. */ | |
339 | if (cpu >= nr_cpu_ids) | |
340 | return; | |
341 | ||
342 | /* Do we have another CPU which isn't us? */ | |
343 | next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); | |
344 | if (next_cpu == smp_processor_id()) | |
345 | next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); | |
346 | ||
347 | /* Fastpath: do that cpu by itself. */ | |
348 | if (next_cpu >= nr_cpu_ids) { | |
349 | smp_call_function_single(cpu, func, info, wait); | |
350 | return; | |
3d442233 JA |
351 | } |
352 | ||
54b11e6d RR |
353 | data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC); |
354 | if (unlikely(!data)) { | |
355 | /* Slow path. */ | |
356 | for_each_online_cpu(cpu) { | |
357 | if (cpu == smp_processor_id()) | |
358 | continue; | |
359 | if (cpumask_test_cpu(cpu, mask)) | |
360 | smp_call_function_single(cpu, func, info, wait); | |
361 | } | |
362 | return; | |
3d442233 JA |
363 | } |
364 | ||
365 | spin_lock_init(&data->lock); | |
54b11e6d RR |
366 | data->csd.flags = CSD_FLAG_ALLOC; |
367 | if (wait) | |
368 | data->csd.flags |= CSD_FLAG_WAIT; | |
3d442233 JA |
369 | data->csd.func = func; |
370 | data->csd.info = info; | |
54b11e6d RR |
371 | cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask); |
372 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits)); | |
373 | data->refs = cpumask_weight(to_cpumask(data->cpumask_bits)); | |
3d442233 JA |
374 | |
375 | spin_lock_irqsave(&call_function_lock, flags); | |
376 | list_add_tail_rcu(&data->csd.list, &call_function_queue); | |
377 | spin_unlock_irqrestore(&call_function_lock, flags); | |
378 | ||
561920a0 SS |
379 | /* |
380 | * Make the list addition visible before sending the ipi. | |
15d0d3b3 NP |
381 | * (IPIs must obey or appear to obey normal Linux cache coherency |
382 | * rules -- see comment in generic_exec_single). | |
561920a0 SS |
383 | */ |
384 | smp_mb(); | |
385 | ||
3d442233 | 386 | /* Send a message to all CPUs in the map */ |
ce47d974 | 387 | arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits)); |
3d442233 JA |
388 | |
389 | /* optionally wait for the CPUs to complete */ | |
54b11e6d | 390 | if (wait) |
3d442233 | 391 | csd_flag_wait(&data->csd); |
3d442233 | 392 | } |
54b11e6d | 393 | EXPORT_SYMBOL(smp_call_function_many); |
3d442233 JA |
394 | |
395 | /** | |
396 | * smp_call_function(): Run a function on all other CPUs. | |
397 | * @func: The function to run. This must be fast and non-blocking. | |
398 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
399 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
400 | * | |
54b11e6d | 401 | * Returns 0. |
3d442233 JA |
402 | * |
403 | * If @wait is true, then returns once @func has returned; otherwise | |
404 | * it returns just before the target cpu calls @func. In case of allocation | |
405 | * failure, @wait will be implicitly turned on. | |
406 | * | |
407 | * You must not call this function with disabled interrupts or from a | |
408 | * hardware interrupt handler or from a bottom half handler. | |
409 | */ | |
8691e5a8 | 410 | int smp_call_function(void (*func)(void *), void *info, int wait) |
3d442233 | 411 | { |
3d442233 | 412 | preempt_disable(); |
54b11e6d | 413 | smp_call_function_many(cpu_online_mask, func, info, wait); |
3d442233 | 414 | preempt_enable(); |
54b11e6d | 415 | return 0; |
3d442233 JA |
416 | } |
417 | EXPORT_SYMBOL(smp_call_function); | |
418 | ||
419 | void ipi_call_lock(void) | |
420 | { | |
421 | spin_lock(&call_function_lock); | |
422 | } | |
423 | ||
424 | void ipi_call_unlock(void) | |
425 | { | |
426 | spin_unlock(&call_function_lock); | |
427 | } | |
428 | ||
429 | void ipi_call_lock_irq(void) | |
430 | { | |
431 | spin_lock_irq(&call_function_lock); | |
432 | } | |
433 | ||
434 | void ipi_call_unlock_irq(void) | |
435 | { | |
436 | spin_unlock_irq(&call_function_lock); | |
437 | } |