]>
Commit | Line | Data |
---|---|---|
3d442233 JA |
1 | /* |
2 | * Generic helpers for smp ipi calls | |
3 | * | |
4 | * (C) Jens Axboe <jens.axboe@oracle.com> 2008 | |
5 | * | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/percpu.h> | |
10 | #include <linux/rcupdate.h> | |
11 | #include <linux/smp.h> | |
12 | ||
13 | static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); | |
14 | static LIST_HEAD(call_function_queue); | |
15 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | |
16 | ||
17 | enum { | |
18 | CSD_FLAG_WAIT = 0x01, | |
19 | CSD_FLAG_ALLOC = 0x02, | |
20 | }; | |
21 | ||
22 | struct call_function_data { | |
23 | struct call_single_data csd; | |
24 | spinlock_t lock; | |
25 | unsigned int refs; | |
26 | cpumask_t cpumask; | |
27 | struct rcu_head rcu_head; | |
28 | }; | |
29 | ||
30 | struct call_single_queue { | |
31 | struct list_head list; | |
32 | spinlock_t lock; | |
33 | }; | |
34 | ||
35 | void __cpuinit init_call_single_data(void) | |
36 | { | |
37 | int i; | |
38 | ||
39 | for_each_possible_cpu(i) { | |
40 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | |
41 | ||
42 | spin_lock_init(&q->lock); | |
43 | INIT_LIST_HEAD(&q->list); | |
44 | } | |
45 | } | |
46 | ||
47 | static void csd_flag_wait(struct call_single_data *data) | |
48 | { | |
49 | /* Wait for response */ | |
50 | do { | |
51 | /* | |
52 | * We need to see the flags store in the IPI handler | |
53 | */ | |
54 | smp_mb(); | |
55 | if (!(data->flags & CSD_FLAG_WAIT)) | |
56 | break; | |
57 | cpu_relax(); | |
58 | } while (1); | |
59 | } | |
60 | ||
61 | /* | |
62 | * Insert a previously allocated call_single_data element for execution | |
63 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | |
64 | */ | |
65 | static void generic_exec_single(int cpu, struct call_single_data *data) | |
66 | { | |
67 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | |
68 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | |
69 | unsigned long flags; | |
70 | ||
71 | spin_lock_irqsave(&dst->lock, flags); | |
72 | ipi = list_empty(&dst->list); | |
73 | list_add_tail(&data->list, &dst->list); | |
74 | spin_unlock_irqrestore(&dst->lock, flags); | |
75 | ||
76 | if (ipi) | |
77 | arch_send_call_function_single_ipi(cpu); | |
78 | ||
79 | if (wait) | |
80 | csd_flag_wait(data); | |
81 | } | |
82 | ||
83 | static void rcu_free_call_data(struct rcu_head *head) | |
84 | { | |
85 | struct call_function_data *data; | |
86 | ||
87 | data = container_of(head, struct call_function_data, rcu_head); | |
88 | ||
89 | kfree(data); | |
90 | } | |
91 | ||
92 | /* | |
93 | * Invoked by arch to handle an IPI for call function. Must be called with | |
94 | * interrupts disabled. | |
95 | */ | |
96 | void generic_smp_call_function_interrupt(void) | |
97 | { | |
98 | struct call_function_data *data; | |
99 | int cpu = get_cpu(); | |
100 | ||
101 | /* | |
102 | * It's ok to use list_for_each_rcu() here even though we may delete | |
103 | * 'pos', since list_del_rcu() doesn't clear ->next | |
104 | */ | |
105 | rcu_read_lock(); | |
106 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { | |
107 | int refs; | |
108 | ||
109 | if (!cpu_isset(cpu, data->cpumask)) | |
110 | continue; | |
111 | ||
112 | data->csd.func(data->csd.info); | |
113 | ||
114 | spin_lock(&data->lock); | |
115 | cpu_clear(cpu, data->cpumask); | |
116 | WARN_ON(data->refs == 0); | |
117 | data->refs--; | |
118 | refs = data->refs; | |
119 | spin_unlock(&data->lock); | |
120 | ||
121 | if (refs) | |
122 | continue; | |
123 | ||
124 | spin_lock(&call_function_lock); | |
125 | list_del_rcu(&data->csd.list); | |
126 | spin_unlock(&call_function_lock); | |
127 | ||
128 | if (data->csd.flags & CSD_FLAG_WAIT) { | |
129 | /* | |
130 | * serialize stores to data with the flag clear | |
131 | * and wakeup | |
132 | */ | |
133 | smp_wmb(); | |
134 | data->csd.flags &= ~CSD_FLAG_WAIT; | |
135 | } else | |
136 | call_rcu(&data->rcu_head, rcu_free_call_data); | |
137 | } | |
138 | rcu_read_unlock(); | |
139 | ||
140 | put_cpu(); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Invoked by arch to handle an IPI for call function single. Must be called | |
145 | * from the arch with interrupts disabled. | |
146 | */ | |
147 | void generic_smp_call_function_single_interrupt(void) | |
148 | { | |
149 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | |
150 | LIST_HEAD(list); | |
151 | ||
152 | /* | |
153 | * Need to see other stores to list head for checking whether | |
154 | * list is empty without holding q->lock | |
155 | */ | |
156 | smp_mb(); | |
157 | while (!list_empty(&q->list)) { | |
158 | unsigned int data_flags; | |
159 | ||
160 | spin_lock(&q->lock); | |
161 | list_replace_init(&q->list, &list); | |
162 | spin_unlock(&q->lock); | |
163 | ||
164 | while (!list_empty(&list)) { | |
165 | struct call_single_data *data; | |
166 | ||
167 | data = list_entry(list.next, struct call_single_data, | |
168 | list); | |
169 | list_del(&data->list); | |
170 | ||
171 | /* | |
172 | * 'data' can be invalid after this call if | |
173 | * flags == 0 (when called through | |
174 | * generic_exec_single(), so save them away before | |
175 | * making the call. | |
176 | */ | |
177 | data_flags = data->flags; | |
178 | ||
179 | data->func(data->info); | |
180 | ||
181 | if (data_flags & CSD_FLAG_WAIT) { | |
182 | smp_wmb(); | |
183 | data->flags &= ~CSD_FLAG_WAIT; | |
184 | } else if (data_flags & CSD_FLAG_ALLOC) | |
185 | kfree(data); | |
186 | } | |
187 | /* | |
188 | * See comment on outer loop | |
189 | */ | |
190 | smp_mb(); | |
191 | } | |
192 | } | |
193 | ||
194 | /* | |
195 | * smp_call_function_single - Run a function on a specific CPU | |
196 | * @func: The function to run. This must be fast and non-blocking. | |
197 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
198 | * @wait: If true, wait until function has completed on other CPUs. |
199 | * | |
200 | * Returns 0 on success, else a negative status code. Note that @wait | |
201 | * will be implicitly turned on in case of allocation failures, since | |
202 | * we fall back to on-stack allocation. | |
203 | */ | |
204 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
8691e5a8 | 205 | int wait) |
3d442233 JA |
206 | { |
207 | struct call_single_data d; | |
208 | unsigned long flags; | |
209 | /* prevent preemption and reschedule on another processor */ | |
210 | int me = get_cpu(); | |
211 | ||
212 | /* Can deadlock when called with interrupts disabled */ | |
213 | WARN_ON(irqs_disabled()); | |
214 | ||
215 | if (cpu == me) { | |
216 | local_irq_save(flags); | |
217 | func(info); | |
218 | local_irq_restore(flags); | |
219 | } else { | |
220 | struct call_single_data *data = NULL; | |
221 | ||
222 | if (!wait) { | |
223 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | |
224 | if (data) | |
225 | data->flags = CSD_FLAG_ALLOC; | |
226 | } | |
227 | if (!data) { | |
228 | data = &d; | |
229 | data->flags = CSD_FLAG_WAIT; | |
230 | } | |
231 | ||
232 | data->func = func; | |
233 | data->info = info; | |
234 | generic_exec_single(cpu, data); | |
235 | } | |
236 | ||
237 | put_cpu(); | |
238 | return 0; | |
239 | } | |
240 | EXPORT_SYMBOL(smp_call_function_single); | |
241 | ||
242 | /** | |
243 | * __smp_call_function_single(): Run a function on another CPU | |
244 | * @cpu: The CPU to run on. | |
245 | * @data: Pre-allocated and setup data structure | |
246 | * | |
247 | * Like smp_call_function_single(), but allow caller to pass in a pre-allocated | |
248 | * data structure. Useful for embedding @data inside other structures, for | |
249 | * instance. | |
250 | * | |
251 | */ | |
252 | void __smp_call_function_single(int cpu, struct call_single_data *data) | |
253 | { | |
254 | /* Can deadlock when called with interrupts disabled */ | |
255 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | |
256 | ||
257 | generic_exec_single(cpu, data); | |
258 | } | |
259 | ||
260 | /** | |
261 | * smp_call_function_mask(): Run a function on a set of other CPUs. | |
262 | * @mask: The set of cpus to run on. | |
263 | * @func: The function to run. This must be fast and non-blocking. | |
264 | * @info: An arbitrary pointer to pass to the function. | |
265 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
266 | * | |
267 | * Returns 0 on success, else a negative status code. | |
268 | * | |
269 | * If @wait is true, then returns once @func has returned. Note that @wait | |
270 | * will be implicitly turned on in case of allocation failures, since | |
271 | * we fall back to on-stack allocation. | |
272 | * | |
273 | * You must not call this function with disabled interrupts or from a | |
274 | * hardware interrupt handler or from a bottom half handler. Preemption | |
275 | * must be disabled when calling this function. | |
276 | */ | |
277 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |
278 | int wait) | |
279 | { | |
280 | struct call_function_data d; | |
281 | struct call_function_data *data = NULL; | |
282 | cpumask_t allbutself; | |
283 | unsigned long flags; | |
284 | int cpu, num_cpus; | |
285 | ||
286 | /* Can deadlock when called with interrupts disabled */ | |
287 | WARN_ON(irqs_disabled()); | |
288 | ||
289 | cpu = smp_processor_id(); | |
290 | allbutself = cpu_online_map; | |
291 | cpu_clear(cpu, allbutself); | |
292 | cpus_and(mask, mask, allbutself); | |
293 | num_cpus = cpus_weight(mask); | |
294 | ||
295 | /* | |
296 | * If zero CPUs, return. If just a single CPU, turn this request | |
297 | * into a targetted single call instead since it's faster. | |
298 | */ | |
299 | if (!num_cpus) | |
300 | return 0; | |
301 | else if (num_cpus == 1) { | |
302 | cpu = first_cpu(mask); | |
ce0d1b6f | 303 | return smp_call_function_single(cpu, func, info, wait); |
3d442233 JA |
304 | } |
305 | ||
306 | if (!wait) { | |
307 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | |
308 | if (data) | |
309 | data->csd.flags = CSD_FLAG_ALLOC; | |
310 | } | |
311 | if (!data) { | |
312 | data = &d; | |
313 | data->csd.flags = CSD_FLAG_WAIT; | |
314 | } | |
315 | ||
316 | spin_lock_init(&data->lock); | |
317 | data->csd.func = func; | |
318 | data->csd.info = info; | |
319 | data->refs = num_cpus; | |
320 | data->cpumask = mask; | |
321 | ||
322 | spin_lock_irqsave(&call_function_lock, flags); | |
323 | list_add_tail_rcu(&data->csd.list, &call_function_queue); | |
324 | spin_unlock_irqrestore(&call_function_lock, flags); | |
325 | ||
326 | /* Send a message to all CPUs in the map */ | |
327 | arch_send_call_function_ipi(mask); | |
328 | ||
329 | /* optionally wait for the CPUs to complete */ | |
330 | if (wait) | |
331 | csd_flag_wait(&data->csd); | |
332 | ||
333 | return 0; | |
334 | } | |
335 | EXPORT_SYMBOL(smp_call_function_mask); | |
336 | ||
337 | /** | |
338 | * smp_call_function(): Run a function on all other CPUs. | |
339 | * @func: The function to run. This must be fast and non-blocking. | |
340 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
341 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
342 | * | |
343 | * Returns 0 on success, else a negative status code. | |
344 | * | |
345 | * If @wait is true, then returns once @func has returned; otherwise | |
346 | * it returns just before the target cpu calls @func. In case of allocation | |
347 | * failure, @wait will be implicitly turned on. | |
348 | * | |
349 | * You must not call this function with disabled interrupts or from a | |
350 | * hardware interrupt handler or from a bottom half handler. | |
351 | */ | |
8691e5a8 | 352 | int smp_call_function(void (*func)(void *), void *info, int wait) |
3d442233 JA |
353 | { |
354 | int ret; | |
355 | ||
356 | preempt_disable(); | |
357 | ret = smp_call_function_mask(cpu_online_map, func, info, wait); | |
358 | preempt_enable(); | |
359 | return ret; | |
360 | } | |
361 | EXPORT_SYMBOL(smp_call_function); | |
362 | ||
363 | void ipi_call_lock(void) | |
364 | { | |
365 | spin_lock(&call_function_lock); | |
366 | } | |
367 | ||
368 | void ipi_call_unlock(void) | |
369 | { | |
370 | spin_unlock(&call_function_lock); | |
371 | } | |
372 | ||
373 | void ipi_call_lock_irq(void) | |
374 | { | |
375 | spin_lock_irq(&call_function_lock); | |
376 | } | |
377 | ||
378 | void ipi_call_unlock_irq(void) | |
379 | { | |
380 | spin_unlock_irq(&call_function_lock); | |
381 | } |