]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/smp.c
memcg: correctly order reading PCG_USED and pc->mem_cgroup
[mirror_ubuntu-hirsute-kernel.git] / kernel / smp.c
CommitLineData
3d442233
JA
1/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
3d442233 5 */
3d442233 6#include <linux/rcupdate.h>
59190f42 7#include <linux/rculist.h>
641cd4cf 8#include <linux/kernel.h>
0b13fda1
IM
9#include <linux/module.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
5a0e3ad6 12#include <linux/gfp.h>
3d442233 13#include <linux/smp.h>
8969a5ed 14#include <linux/cpu.h>
3d442233 15
351f8f8e 16#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
8969a5ed
PZ
17static struct {
18 struct list_head queue;
9f5a5621 19 raw_spinlock_t lock;
0b13fda1
IM
20} call_function __cacheline_aligned_in_smp =
21 {
22 .queue = LIST_HEAD_INIT(call_function.queue),
9f5a5621 23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
0b13fda1 24 };
3d442233
JA
25
26enum {
6e275637 27 CSD_FLAG_LOCK = 0x01,
3d442233
JA
28};
29
30struct call_function_data {
0b13fda1 31 struct call_single_data csd;
54fdade1 32 atomic_t refs;
0b13fda1 33 cpumask_var_t cpumask;
3d442233
JA
34};
35
e03bcb68
MM
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
3d442233 38struct call_single_queue {
0b13fda1 39 struct list_head list;
9f5a5621 40 raw_spinlock_t lock;
3d442233
JA
41};
42
e03bcb68 43static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
8969a5ed
PZ
44
45static int
46hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47{
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
50
51 switch (action) {
52 case CPU_UP_PREPARE:
53 case CPU_UP_PREPARE_FROZEN:
eaa95840 54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
8969a5ed 55 cpu_to_node(cpu)))
80b5184c 56 return notifier_from_errno(-ENOMEM);
8969a5ed
PZ
57 break;
58
69dd647f 59#ifdef CONFIG_HOTPLUG_CPU
8969a5ed
PZ
60 case CPU_UP_CANCELED:
61 case CPU_UP_CANCELED_FROZEN:
62
63 case CPU_DEAD:
64 case CPU_DEAD_FROZEN:
65 free_cpumask_var(cfd->cpumask);
66 break;
67#endif
68 };
69
70 return NOTIFY_OK;
71}
72
73static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
0b13fda1 74 .notifier_call = hotplug_cfd,
8969a5ed
PZ
75};
76
7babe8db 77static int __cpuinit init_call_single_data(void)
3d442233 78{
8969a5ed 79 void *cpu = (void *)(long)smp_processor_id();
3d442233
JA
80 int i;
81
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
84
9f5a5621 85 raw_spin_lock_init(&q->lock);
3d442233
JA
86 INIT_LIST_HEAD(&q->list);
87 }
8969a5ed
PZ
88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
91
7babe8db 92 return 0;
3d442233 93}
7babe8db 94early_initcall(init_call_single_data);
3d442233 95
8969a5ed
PZ
96/*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
98 *
0b13fda1
IM
99 * For non-synchronous ipi calls the csd can still be in use by the
100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd.
8969a5ed 102 */
6e275637 103static void csd_lock_wait(struct call_single_data *data)
8969a5ed
PZ
104{
105 while (data->flags & CSD_FLAG_LOCK)
106 cpu_relax();
6e275637
PZ
107}
108
109static void csd_lock(struct call_single_data *data)
110{
111 csd_lock_wait(data);
8969a5ed
PZ
112 data->flags = CSD_FLAG_LOCK;
113
114 /*
0b13fda1
IM
115 * prevent CPU from reordering the above assignment
116 * to ->flags with any subsequent assignments to other
117 * fields of the specified call_single_data structure:
8969a5ed 118 */
8969a5ed
PZ
119 smp_mb();
120}
121
122static void csd_unlock(struct call_single_data *data)
123{
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
0b13fda1 125
8969a5ed 126 /*
0b13fda1 127 * ensure we're all done before releasing data:
8969a5ed
PZ
128 */
129 smp_mb();
0b13fda1 130
8969a5ed 131 data->flags &= ~CSD_FLAG_LOCK;
3d442233
JA
132}
133
134/*
0b13fda1
IM
135 * Insert a previously allocated call_single_data element
136 * for execution on the given CPU. data must already have
137 * ->func, ->info, and ->flags set.
3d442233 138 */
6e275637
PZ
139static
140void generic_exec_single(int cpu, struct call_single_data *data, int wait)
3d442233
JA
141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
3d442233 143 unsigned long flags;
6e275637 144 int ipi;
3d442233 145
9f5a5621 146 raw_spin_lock_irqsave(&dst->lock, flags);
3d442233
JA
147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list);
9f5a5621 149 raw_spin_unlock_irqrestore(&dst->lock, flags);
3d442233 150
561920a0 151 /*
15d0d3b3
NP
152 * The list addition should be visible before sending the IPI
153 * handler locks the list to pull the entry off it because of
154 * normal cache coherency rules implied by spinlocks.
155 *
156 * If IPIs can go out of order to the cache coherency protocol
157 * in an architecture, sufficient synchronisation should be added
158 * to arch code to make it appear to obey cache coherency WRT
0b13fda1
IM
159 * locking and barrier primitives. Generic code isn't really
160 * equipped to do the right thing...
561920a0 161 */
3d442233
JA
162 if (ipi)
163 arch_send_call_function_single_ipi(cpu);
164
165 if (wait)
6e275637 166 csd_lock_wait(data);
3d442233
JA
167}
168
169/*
170 * Invoked by arch to handle an IPI for call function. Must be called with
171 * interrupts disabled.
172 */
173void generic_smp_call_function_interrupt(void)
174{
175 struct call_function_data *data;
c0f68c2f 176 int cpu = smp_processor_id();
3d442233 177
269c861b
SS
178 /*
179 * Shouldn't receive this interrupt on a cpu that is not yet online.
180 */
181 WARN_ON_ONCE(!cpu_online(cpu));
182
15d0d3b3
NP
183 /*
184 * Ensure entry is visible on call_function_queue after we have
185 * entered the IPI. See comment in smp_call_function_many.
186 * If we don't have this, then we may miss an entry on the list
187 * and never get another IPI to process it.
188 */
189 smp_mb();
190
3d442233 191 /*
0b13fda1
IM
192 * It's ok to use list_for_each_rcu() here even though we may
193 * delete 'pos', since list_del_rcu() doesn't clear ->next
3d442233 194 */
8969a5ed 195 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
3d442233
JA
196 int refs;
197
54fdade1 198 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
3d442233
JA
199 continue;
200
201 data->csd.func(data->csd.info);
202
54fdade1
XG
203 refs = atomic_dec_return(&data->refs);
204 WARN_ON(refs < 0);
8969a5ed 205 if (!refs) {
9f5a5621 206 raw_spin_lock(&call_function.lock);
8969a5ed 207 list_del_rcu(&data->csd.list);
9f5a5621 208 raw_spin_unlock(&call_function.lock);
8969a5ed 209 }
3d442233
JA
210
211 if (refs)
212 continue;
213
8969a5ed 214 csd_unlock(&data->csd);
3d442233 215 }
3d442233 216
3d442233
JA
217}
218
219/*
0b13fda1
IM
220 * Invoked by arch to handle an IPI for call function single. Must be
221 * called from the arch with interrupts disabled.
3d442233
JA
222 */
223void generic_smp_call_function_single_interrupt(void)
224{
225 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
15d0d3b3 226 unsigned int data_flags;
0b13fda1 227 LIST_HEAD(list);
3d442233 228
269c861b
SS
229 /*
230 * Shouldn't receive this interrupt on a cpu that is not yet online.
231 */
232 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
233
9f5a5621 234 raw_spin_lock(&q->lock);
15d0d3b3 235 list_replace_init(&q->list, &list);
9f5a5621 236 raw_spin_unlock(&q->lock);
3d442233 237
15d0d3b3
NP
238 while (!list_empty(&list)) {
239 struct call_single_data *data;
3d442233 240
0b13fda1 241 data = list_entry(list.next, struct call_single_data, list);
15d0d3b3 242 list_del(&data->list);
3d442233 243
3d442233 244 /*
0b13fda1
IM
245 * 'data' can be invalid after this call if flags == 0
246 * (when called through generic_exec_single()),
247 * so save them away before making the call:
3d442233 248 */
15d0d3b3
NP
249 data_flags = data->flags;
250
251 data->func(data->info);
252
8969a5ed 253 /*
0b13fda1 254 * Unlocked CSDs are valid through generic_exec_single():
8969a5ed
PZ
255 */
256 if (data_flags & CSD_FLAG_LOCK)
257 csd_unlock(data);
3d442233
JA
258 }
259}
260
e03bcb68 261static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
d7240b98 262
3d442233
JA
263/*
264 * smp_call_function_single - Run a function on a specific CPU
265 * @func: The function to run. This must be fast and non-blocking.
266 * @info: An arbitrary pointer to pass to the function.
3d442233
JA
267 * @wait: If true, wait until function has completed on other CPUs.
268 *
72f279b2 269 * Returns 0 on success, else a negative status code.
3d442233 270 */
3a5f65df 271int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
8691e5a8 272 int wait)
3d442233 273{
8969a5ed
PZ
274 struct call_single_data d = {
275 .flags = 0,
276 };
3d442233 277 unsigned long flags;
0b13fda1 278 int this_cpu;
f73be6de 279 int err = 0;
3d442233 280
0b13fda1
IM
281 /*
282 * prevent preemption and reschedule on another processor,
283 * as well as CPU removal
284 */
285 this_cpu = get_cpu();
286
269c861b
SS
287 /*
288 * Can deadlock when called with interrupts disabled.
289 * We allow cpu's that are not yet online though, as no one else can
290 * send smp call function interrupt to this cpu and as such deadlocks
291 * can't happen.
292 */
293 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
294 && !oops_in_progress);
3d442233 295
0b13fda1 296 if (cpu == this_cpu) {
3d442233
JA
297 local_irq_save(flags);
298 func(info);
299 local_irq_restore(flags);
0b13fda1
IM
300 } else {
301 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
302 struct call_single_data *data = &d;
3d442233 303
0b13fda1
IM
304 if (!wait)
305 data = &__get_cpu_var(csd_data);
6e275637 306
0b13fda1 307 csd_lock(data);
3d442233 308
0b13fda1
IM
309 data->func = func;
310 data->info = info;
311 generic_exec_single(cpu, data, wait);
312 } else {
313 err = -ENXIO; /* CPU not online */
314 }
3d442233
JA
315 }
316
317 put_cpu();
0b13fda1 318
f73be6de 319 return err;
3d442233
JA
320}
321EXPORT_SYMBOL(smp_call_function_single);
322
2ea6dec4
RR
323/*
324 * smp_call_function_any - Run a function on any of the given cpus
325 * @mask: The mask of cpus it can run on.
326 * @func: The function to run. This must be fast and non-blocking.
327 * @info: An arbitrary pointer to pass to the function.
328 * @wait: If true, wait until function has completed.
329 *
330 * Returns 0 on success, else a negative status code (if no cpus were online).
331 * Note that @wait will be implicitly turned on in case of allocation failures,
332 * since we fall back to on-stack allocation.
333 *
334 * Selection preference:
335 * 1) current cpu if in @mask
336 * 2) any cpu of current node if in @mask
337 * 3) any other online cpu in @mask
338 */
339int smp_call_function_any(const struct cpumask *mask,
3a5f65df 340 smp_call_func_t func, void *info, int wait)
2ea6dec4
RR
341{
342 unsigned int cpu;
343 const struct cpumask *nodemask;
344 int ret;
345
346 /* Try for same CPU (cheapest) */
347 cpu = get_cpu();
348 if (cpumask_test_cpu(cpu, mask))
349 goto call;
350
351 /* Try for same node. */
af2422c4 352 nodemask = cpumask_of_node(cpu_to_node(cpu));
2ea6dec4
RR
353 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
354 cpu = cpumask_next_and(cpu, nodemask, mask)) {
355 if (cpu_online(cpu))
356 goto call;
357 }
358
359 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
360 cpu = cpumask_any_and(mask, cpu_online_mask);
361call:
362 ret = smp_call_function_single(cpu, func, info, wait);
363 put_cpu();
364 return ret;
365}
366EXPORT_SYMBOL_GPL(smp_call_function_any);
367
3d442233 368/**
27c379f7 369 * __smp_call_function_single(): Run a function on a specific CPU
3d442233
JA
370 * @cpu: The CPU to run on.
371 * @data: Pre-allocated and setup data structure
27c379f7 372 * @wait: If true, wait until function has completed on specified CPU.
3d442233 373 *
0b13fda1
IM
374 * Like smp_call_function_single(), but allow caller to pass in a
375 * pre-allocated data structure. Useful for embedding @data inside
376 * other structures, for instance.
3d442233 377 */
6e275637
PZ
378void __smp_call_function_single(int cpu, struct call_single_data *data,
379 int wait)
3d442233 380{
27c379f7
HC
381 unsigned int this_cpu;
382 unsigned long flags;
6e275637 383
27c379f7 384 this_cpu = get_cpu();
269c861b
SS
385 /*
386 * Can deadlock when called with interrupts disabled.
387 * We allow cpu's that are not yet online though, as no one else can
388 * send smp call function interrupt to this cpu and as such deadlocks
389 * can't happen.
390 */
391 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
392 && !oops_in_progress);
3d442233 393
27c379f7
HC
394 if (cpu == this_cpu) {
395 local_irq_save(flags);
396 data->func(data->info);
397 local_irq_restore(flags);
398 } else {
399 csd_lock(data);
400 generic_exec_single(cpu, data, wait);
401 }
402 put_cpu();
3d442233
JA
403}
404
405/**
54b11e6d
RR
406 * smp_call_function_many(): Run a function on a set of other CPUs.
407 * @mask: The set of cpus to run on (only runs on online subset).
3d442233
JA
408 * @func: The function to run. This must be fast and non-blocking.
409 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
410 * @wait: If true, wait (atomically) until function has completed
411 * on other CPUs.
3d442233 412 *
72f279b2 413 * If @wait is true, then returns once @func has returned.
3d442233
JA
414 *
415 * You must not call this function with disabled interrupts or from a
416 * hardware interrupt handler or from a bottom half handler. Preemption
417 * must be disabled when calling this function.
418 */
54b11e6d 419void smp_call_function_many(const struct cpumask *mask,
3a5f65df 420 smp_call_func_t func, void *info, bool wait)
3d442233 421{
54b11e6d 422 struct call_function_data *data;
3d442233 423 unsigned long flags;
0b13fda1 424 int cpu, next_cpu, this_cpu = smp_processor_id();
3d442233 425
269c861b
SS
426 /*
427 * Can deadlock when called with interrupts disabled.
428 * We allow cpu's that are not yet online though, as no one else can
429 * send smp call function interrupt to this cpu and as such deadlocks
430 * can't happen.
431 */
432 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
433 && !oops_in_progress);
3d442233 434
0b13fda1 435 /* So, what's a CPU they want? Ignoring this one. */
54b11e6d 436 cpu = cpumask_first_and(mask, cpu_online_mask);
0b13fda1 437 if (cpu == this_cpu)
54b11e6d 438 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 439
54b11e6d
RR
440 /* No online cpus? We're done. */
441 if (cpu >= nr_cpu_ids)
442 return;
443
444 /* Do we have another CPU which isn't us? */
445 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0b13fda1 446 if (next_cpu == this_cpu)
54b11e6d
RR
447 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
448
449 /* Fastpath: do that cpu by itself. */
450 if (next_cpu >= nr_cpu_ids) {
451 smp_call_function_single(cpu, func, info, wait);
452 return;
3d442233
JA
453 }
454
8969a5ed
PZ
455 data = &__get_cpu_var(cfd_data);
456 csd_lock(&data->csd);
3d442233 457
3d442233
JA
458 data->csd.func = func;
459 data->csd.info = info;
8969a5ed 460 cpumask_and(data->cpumask, mask, cpu_online_mask);
0b13fda1 461 cpumask_clear_cpu(this_cpu, data->cpumask);
54fdade1 462 atomic_set(&data->refs, cpumask_weight(data->cpumask));
3d442233 463
9f5a5621 464 raw_spin_lock_irqsave(&call_function.lock, flags);
8969a5ed
PZ
465 /*
466 * Place entry at the _HEAD_ of the list, so that any cpu still
0b13fda1
IM
467 * observing the entry in generic_smp_call_function_interrupt()
468 * will not miss any other list entries:
8969a5ed
PZ
469 */
470 list_add_rcu(&data->csd.list, &call_function.queue);
9f5a5621 471 raw_spin_unlock_irqrestore(&call_function.lock, flags);
3d442233 472
561920a0
SS
473 /*
474 * Make the list addition visible before sending the ipi.
0b13fda1
IM
475 * (IPIs must obey or appear to obey normal Linux cache
476 * coherency rules -- see comment in generic_exec_single).
561920a0
SS
477 */
478 smp_mb();
479
3d442233 480 /* Send a message to all CPUs in the map */
8969a5ed 481 arch_send_call_function_ipi_mask(data->cpumask);
3d442233 482
0b13fda1 483 /* Optionally wait for the CPUs to complete */
54b11e6d 484 if (wait)
6e275637 485 csd_lock_wait(&data->csd);
3d442233 486}
54b11e6d 487EXPORT_SYMBOL(smp_call_function_many);
3d442233
JA
488
489/**
490 * smp_call_function(): Run a function on all other CPUs.
491 * @func: The function to run. This must be fast and non-blocking.
492 * @info: An arbitrary pointer to pass to the function.
0b13fda1
IM
493 * @wait: If true, wait (atomically) until function has completed
494 * on other CPUs.
3d442233 495 *
54b11e6d 496 * Returns 0.
3d442233
JA
497 *
498 * If @wait is true, then returns once @func has returned; otherwise
72f279b2 499 * it returns just before the target cpu calls @func.
3d442233
JA
500 *
501 * You must not call this function with disabled interrupts or from a
502 * hardware interrupt handler or from a bottom half handler.
503 */
3a5f65df 504int smp_call_function(smp_call_func_t func, void *info, int wait)
3d442233 505{
3d442233 506 preempt_disable();
54b11e6d 507 smp_call_function_many(cpu_online_mask, func, info, wait);
3d442233 508 preempt_enable();
0b13fda1 509
54b11e6d 510 return 0;
3d442233
JA
511}
512EXPORT_SYMBOL(smp_call_function);
513
514void ipi_call_lock(void)
515{
9f5a5621 516 raw_spin_lock(&call_function.lock);
3d442233
JA
517}
518
519void ipi_call_unlock(void)
520{
9f5a5621 521 raw_spin_unlock(&call_function.lock);
3d442233
JA
522}
523
524void ipi_call_lock_irq(void)
525{
9f5a5621 526 raw_spin_lock_irq(&call_function.lock);
3d442233
JA
527}
528
529void ipi_call_unlock_irq(void)
530{
9f5a5621 531 raw_spin_unlock_irq(&call_function.lock);
3d442233 532}
351f8f8e
AW
533#endif /* USE_GENERIC_SMP_HELPERS */
534
535/*
536 * Call a function on all processors
537 */
538int on_each_cpu(void (*func) (void *info), void *info, int wait)
539{
540 int ret = 0;
541
542 preempt_disable();
543 ret = smp_call_function(func, info, wait);
544 local_irq_disable();
545 func(info);
546 local_irq_enable();
547 preempt_enable();
548 return ret;
549}
550EXPORT_SYMBOL(on_each_cpu);