]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/smp.c
x86, UV: cpu_relax in uv_wait_completion
[mirror_ubuntu-bionic-kernel.git] / kernel / smp.c
1 /*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 *
6 */
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
13
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15 static LIST_HEAD(call_function_queue);
16 __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
17
18 enum {
19 CSD_FLAG_WAIT = 0x01,
20 CSD_FLAG_ALLOC = 0x02,
21 };
22
23 struct call_function_data {
24 struct call_single_data csd;
25 spinlock_t lock;
26 unsigned int refs;
27 struct rcu_head rcu_head;
28 unsigned long cpumask_bits[];
29 };
30
31 struct call_single_queue {
32 struct list_head list;
33 spinlock_t lock;
34 };
35
36 static int __cpuinit init_call_single_data(void)
37 {
38 int i;
39
40 for_each_possible_cpu(i) {
41 struct call_single_queue *q = &per_cpu(call_single_queue, i);
42
43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list);
45 }
46 return 0;
47 }
48 early_initcall(init_call_single_data);
49
50 static void csd_flag_wait(struct call_single_data *data)
51 {
52 /* Wait for response */
53 do {
54 if (!(data->flags & CSD_FLAG_WAIT))
55 break;
56 cpu_relax();
57 } while (1);
58 }
59
60 /*
61 * Insert a previously allocated call_single_data element for execution
62 * on the given CPU. data must already have ->func, ->info, and ->flags set.
63 */
64 static void generic_exec_single(int cpu, struct call_single_data *data)
65 {
66 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
67 int wait = data->flags & CSD_FLAG_WAIT, ipi;
68 unsigned long flags;
69
70 spin_lock_irqsave(&dst->lock, flags);
71 ipi = list_empty(&dst->list);
72 list_add_tail(&data->list, &dst->list);
73 spin_unlock_irqrestore(&dst->lock, flags);
74
75 /*
76 * Make the list addition visible before sending the ipi.
77 */
78 smp_mb();
79
80 if (ipi)
81 arch_send_call_function_single_ipi(cpu);
82
83 if (wait)
84 csd_flag_wait(data);
85 }
86
87 static void rcu_free_call_data(struct rcu_head *head)
88 {
89 struct call_function_data *data;
90
91 data = container_of(head, struct call_function_data, rcu_head);
92
93 kfree(data);
94 }
95
96 /*
97 * Invoked by arch to handle an IPI for call function. Must be called with
98 * interrupts disabled.
99 */
100 void generic_smp_call_function_interrupt(void)
101 {
102 struct call_function_data *data;
103 int cpu = get_cpu();
104
105 /*
106 * It's ok to use list_for_each_rcu() here even though we may delete
107 * 'pos', since list_del_rcu() doesn't clear ->next
108 */
109 rcu_read_lock();
110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
111 int refs;
112
113 if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
114 continue;
115
116 data->csd.func(data->csd.info);
117
118 spin_lock(&data->lock);
119 cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
120 WARN_ON(data->refs == 0);
121 data->refs--;
122 refs = data->refs;
123 spin_unlock(&data->lock);
124
125 if (refs)
126 continue;
127
128 spin_lock(&call_function_lock);
129 list_del_rcu(&data->csd.list);
130 spin_unlock(&call_function_lock);
131
132 if (data->csd.flags & CSD_FLAG_WAIT) {
133 /*
134 * serialize stores to data with the flag clear
135 * and wakeup
136 */
137 smp_wmb();
138 data->csd.flags &= ~CSD_FLAG_WAIT;
139 }
140 if (data->csd.flags & CSD_FLAG_ALLOC)
141 call_rcu(&data->rcu_head, rcu_free_call_data);
142 }
143 rcu_read_unlock();
144
145 put_cpu();
146 }
147
148 /*
149 * Invoked by arch to handle an IPI for call function single. Must be called
150 * from the arch with interrupts disabled.
151 */
152 void generic_smp_call_function_single_interrupt(void)
153 {
154 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
155 LIST_HEAD(list);
156
157 /*
158 * Need to see other stores to list head for checking whether
159 * list is empty without holding q->lock
160 */
161 smp_read_barrier_depends();
162 while (!list_empty(&q->list)) {
163 unsigned int data_flags;
164
165 spin_lock(&q->lock);
166 list_replace_init(&q->list, &list);
167 spin_unlock(&q->lock);
168
169 while (!list_empty(&list)) {
170 struct call_single_data *data;
171
172 data = list_entry(list.next, struct call_single_data,
173 list);
174 list_del(&data->list);
175
176 /*
177 * 'data' can be invalid after this call if
178 * flags == 0 (when called through
179 * generic_exec_single(), so save them away before
180 * making the call.
181 */
182 data_flags = data->flags;
183
184 data->func(data->info);
185
186 if (data_flags & CSD_FLAG_WAIT) {
187 smp_wmb();
188 data->flags &= ~CSD_FLAG_WAIT;
189 } else if (data_flags & CSD_FLAG_ALLOC)
190 kfree(data);
191 }
192 /*
193 * See comment on outer loop
194 */
195 smp_read_barrier_depends();
196 }
197 }
198
199 /*
200 * smp_call_function_single - Run a function on a specific CPU
201 * @func: The function to run. This must be fast and non-blocking.
202 * @info: An arbitrary pointer to pass to the function.
203 * @wait: If true, wait until function has completed on other CPUs.
204 *
205 * Returns 0 on success, else a negative status code. Note that @wait
206 * will be implicitly turned on in case of allocation failures, since
207 * we fall back to on-stack allocation.
208 */
209 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
210 int wait)
211 {
212 struct call_single_data d;
213 unsigned long flags;
214 /* prevent preemption and reschedule on another processor,
215 as well as CPU removal */
216 int me = get_cpu();
217 int err = 0;
218
219 /* Can deadlock when called with interrupts disabled */
220 WARN_ON(irqs_disabled());
221
222 if (cpu == me) {
223 local_irq_save(flags);
224 func(info);
225 local_irq_restore(flags);
226 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
227 struct call_single_data *data = NULL;
228
229 if (!wait) {
230 data = kmalloc(sizeof(*data), GFP_ATOMIC);
231 if (data)
232 data->flags = CSD_FLAG_ALLOC;
233 }
234 if (!data) {
235 data = &d;
236 data->flags = CSD_FLAG_WAIT;
237 }
238
239 data->func = func;
240 data->info = info;
241 generic_exec_single(cpu, data);
242 } else {
243 err = -ENXIO; /* CPU not online */
244 }
245
246 put_cpu();
247 return err;
248 }
249 EXPORT_SYMBOL(smp_call_function_single);
250
251 /**
252 * __smp_call_function_single(): Run a function on another CPU
253 * @cpu: The CPU to run on.
254 * @data: Pre-allocated and setup data structure
255 *
256 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
257 * data structure. Useful for embedding @data inside other structures, for
258 * instance.
259 *
260 */
261 void __smp_call_function_single(int cpu, struct call_single_data *data)
262 {
263 /* Can deadlock when called with interrupts disabled */
264 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
265
266 generic_exec_single(cpu, data);
267 }
268
269 /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
270 #ifndef arch_send_call_function_ipi_mask
271 #define arch_send_call_function_ipi_mask(maskp) \
272 arch_send_call_function_ipi(*(maskp))
273 #endif
274
275 /**
276 * smp_call_function_many(): Run a function on a set of other CPUs.
277 * @mask: The set of cpus to run on (only runs on online subset).
278 * @func: The function to run. This must be fast and non-blocking.
279 * @info: An arbitrary pointer to pass to the function.
280 * @wait: If true, wait (atomically) until function has completed on other CPUs.
281 *
282 * If @wait is true, then returns once @func has returned. Note that @wait
283 * will be implicitly turned on in case of allocation failures, since
284 * we fall back to on-stack allocation.
285 *
286 * You must not call this function with disabled interrupts or from a
287 * hardware interrupt handler or from a bottom half handler. Preemption
288 * must be disabled when calling this function.
289 */
290 void smp_call_function_many(const struct cpumask *mask,
291 void (*func)(void *), void *info,
292 bool wait)
293 {
294 struct call_function_data *data;
295 unsigned long flags;
296 int cpu, next_cpu;
297
298 /* Can deadlock when called with interrupts disabled */
299 WARN_ON(irqs_disabled());
300
301 /* So, what's a CPU they want? Ignoring this one. */
302 cpu = cpumask_first_and(mask, cpu_online_mask);
303 if (cpu == smp_processor_id())
304 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
305 /* No online cpus? We're done. */
306 if (cpu >= nr_cpu_ids)
307 return;
308
309 /* Do we have another CPU which isn't us? */
310 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
311 if (next_cpu == smp_processor_id())
312 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
313
314 /* Fastpath: do that cpu by itself. */
315 if (next_cpu >= nr_cpu_ids) {
316 smp_call_function_single(cpu, func, info, wait);
317 return;
318 }
319
320 data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
321 if (unlikely(!data)) {
322 /* Slow path. */
323 for_each_online_cpu(cpu) {
324 if (cpu == smp_processor_id())
325 continue;
326 if (cpumask_test_cpu(cpu, mask))
327 smp_call_function_single(cpu, func, info, wait);
328 }
329 return;
330 }
331
332 spin_lock_init(&data->lock);
333 data->csd.flags = CSD_FLAG_ALLOC;
334 if (wait)
335 data->csd.flags |= CSD_FLAG_WAIT;
336 data->csd.func = func;
337 data->csd.info = info;
338 cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
339 cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
340 data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
341
342 spin_lock_irqsave(&call_function_lock, flags);
343 list_add_tail_rcu(&data->csd.list, &call_function_queue);
344 spin_unlock_irqrestore(&call_function_lock, flags);
345
346 /*
347 * Make the list addition visible before sending the ipi.
348 */
349 smp_mb();
350
351 /* Send a message to all CPUs in the map */
352 arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
353
354 /* optionally wait for the CPUs to complete */
355 if (wait)
356 csd_flag_wait(&data->csd);
357 }
358 EXPORT_SYMBOL(smp_call_function_many);
359
360 /**
361 * smp_call_function(): Run a function on all other CPUs.
362 * @func: The function to run. This must be fast and non-blocking.
363 * @info: An arbitrary pointer to pass to the function.
364 * @wait: If true, wait (atomically) until function has completed on other CPUs.
365 *
366 * Returns 0.
367 *
368 * If @wait is true, then returns once @func has returned; otherwise
369 * it returns just before the target cpu calls @func. In case of allocation
370 * failure, @wait will be implicitly turned on.
371 *
372 * You must not call this function with disabled interrupts or from a
373 * hardware interrupt handler or from a bottom half handler.
374 */
375 int smp_call_function(void (*func)(void *), void *info, int wait)
376 {
377 preempt_disable();
378 smp_call_function_many(cpu_online_mask, func, info, wait);
379 preempt_enable();
380 return 0;
381 }
382 EXPORT_SYMBOL(smp_call_function);
383
384 void ipi_call_lock(void)
385 {
386 spin_lock(&call_function_lock);
387 }
388
389 void ipi_call_unlock(void)
390 {
391 spin_unlock(&call_function_lock);
392 }
393
394 void ipi_call_lock_irq(void)
395 {
396 spin_lock_irq(&call_function_lock);
397 }
398
399 void ipi_call_unlock_irq(void)
400 {
401 spin_unlock_irq(&call_function_lock);
402 }