]>
Commit | Line | Data |
---|---|---|
3549c2bc PM |
1 | /* |
2 | * RCU expedited grace periods | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, you can access it online at | |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
17 | * | |
18 | * Copyright IBM Corporation, 2016 | |
19 | * | |
20 | * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
21 | */ | |
22 | ||
23 | /* Wrapper functions for expedited grace periods. */ | |
24 | static void rcu_exp_gp_seq_start(struct rcu_state *rsp) | |
25 | { | |
26 | rcu_seq_start(&rsp->expedited_sequence); | |
27 | } | |
28 | static void rcu_exp_gp_seq_end(struct rcu_state *rsp) | |
29 | { | |
30 | rcu_seq_end(&rsp->expedited_sequence); | |
31 | smp_mb(); /* Ensure that consecutive grace periods serialize. */ | |
32 | } | |
33 | static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) | |
34 | { | |
35 | unsigned long s; | |
36 | ||
37 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
38 | s = rcu_seq_snap(&rsp->expedited_sequence); | |
39 | trace_rcu_exp_grace_period(rsp->name, s, TPS("snap")); | |
40 | return s; | |
41 | } | |
42 | static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) | |
43 | { | |
44 | return rcu_seq_done(&rsp->expedited_sequence, s); | |
45 | } | |
46 | ||
47 | /* | |
48 | * Reset the ->expmaskinit values in the rcu_node tree to reflect any | |
49 | * recent CPU-online activity. Note that these masks are not cleared | |
50 | * when CPUs go offline, so they reflect the union of all CPUs that have | |
51 | * ever been online. This means that this function normally takes its | |
52 | * no-work-to-do fastpath. | |
53 | */ | |
54 | static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) | |
55 | { | |
56 | bool done; | |
57 | unsigned long flags; | |
58 | unsigned long mask; | |
59 | unsigned long oldmask; | |
60 | int ncpus = READ_ONCE(rsp->ncpus); | |
61 | struct rcu_node *rnp; | |
62 | struct rcu_node *rnp_up; | |
63 | ||
64 | /* If no new CPUs onlined since last time, nothing to do. */ | |
65 | if (likely(ncpus == rsp->ncpus_snap)) | |
66 | return; | |
67 | rsp->ncpus_snap = ncpus; | |
68 | ||
69 | /* | |
70 | * Each pass through the following loop propagates newly onlined | |
71 | * CPUs for the current rcu_node structure up the rcu_node tree. | |
72 | */ | |
73 | rcu_for_each_leaf_node(rsp, rnp) { | |
74 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
75 | if (rnp->expmaskinit == rnp->expmaskinitnext) { | |
76 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
77 | continue; /* No new CPUs, nothing to do. */ | |
78 | } | |
79 | ||
80 | /* Update this node's mask, track old value for propagation. */ | |
81 | oldmask = rnp->expmaskinit; | |
82 | rnp->expmaskinit = rnp->expmaskinitnext; | |
83 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
84 | ||
85 | /* If was already nonzero, nothing to propagate. */ | |
86 | if (oldmask) | |
87 | continue; | |
88 | ||
89 | /* Propagate the new CPU up the tree. */ | |
90 | mask = rnp->grpmask; | |
91 | rnp_up = rnp->parent; | |
92 | done = false; | |
93 | while (rnp_up) { | |
94 | raw_spin_lock_irqsave_rcu_node(rnp_up, flags); | |
95 | if (rnp_up->expmaskinit) | |
96 | done = true; | |
97 | rnp_up->expmaskinit |= mask; | |
98 | raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); | |
99 | if (done) | |
100 | break; | |
101 | mask = rnp_up->grpmask; | |
102 | rnp_up = rnp_up->parent; | |
103 | } | |
104 | } | |
105 | } | |
106 | ||
107 | /* | |
108 | * Reset the ->expmask values in the rcu_node tree in preparation for | |
109 | * a new expedited grace period. | |
110 | */ | |
111 | static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) | |
112 | { | |
113 | unsigned long flags; | |
114 | struct rcu_node *rnp; | |
115 | ||
116 | sync_exp_reset_tree_hotplug(rsp); | |
117 | rcu_for_each_node_breadth_first(rsp, rnp) { | |
118 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
119 | WARN_ON_ONCE(rnp->expmask); | |
120 | rnp->expmask = rnp->expmaskinit; | |
121 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
122 | } | |
123 | } | |
124 | ||
125 | /* | |
126 | * Return non-zero if there is no RCU expedited grace period in progress | |
127 | * for the specified rcu_node structure, in other words, if all CPUs and | |
128 | * tasks covered by the specified rcu_node structure have done their bit | |
129 | * for the current expedited grace period. Works only for preemptible | |
130 | * RCU -- other RCU implementation use other means. | |
131 | * | |
132 | * Caller must hold the rcu_state's exp_mutex. | |
133 | */ | |
134 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
135 | { | |
136 | return rnp->exp_tasks == NULL && | |
137 | READ_ONCE(rnp->expmask) == 0; | |
138 | } | |
139 | ||
140 | /* | |
141 | * Report the exit from RCU read-side critical section for the last task | |
142 | * that queued itself during or before the current expedited preemptible-RCU | |
143 | * grace period. This event is reported either to the rcu_node structure on | |
144 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
145 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
146 | * iteratively!) | |
147 | * | |
148 | * Caller must hold the rcu_state's exp_mutex and the specified rcu_node | |
149 | * structure's ->lock. | |
150 | */ | |
151 | static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | |
152 | bool wake, unsigned long flags) | |
153 | __releases(rnp->lock) | |
154 | { | |
155 | unsigned long mask; | |
156 | ||
157 | for (;;) { | |
158 | if (!sync_rcu_preempt_exp_done(rnp)) { | |
159 | if (!rnp->expmask) | |
160 | rcu_initiate_boost(rnp, flags); | |
161 | else | |
162 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
163 | break; | |
164 | } | |
165 | if (rnp->parent == NULL) { | |
166 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
167 | if (wake) { | |
168 | smp_mb(); /* EGP done before wake_up(). */ | |
169 | swake_up(&rsp->expedited_wq); | |
170 | } | |
171 | break; | |
172 | } | |
173 | mask = rnp->grpmask; | |
174 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ | |
175 | rnp = rnp->parent; | |
176 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ | |
177 | WARN_ON_ONCE(!(rnp->expmask & mask)); | |
178 | rnp->expmask &= ~mask; | |
179 | } | |
180 | } | |
181 | ||
182 | /* | |
183 | * Report expedited quiescent state for specified node. This is a | |
184 | * lock-acquisition wrapper function for __rcu_report_exp_rnp(). | |
185 | * | |
186 | * Caller must hold the rcu_state's exp_mutex. | |
187 | */ | |
188 | static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, | |
189 | struct rcu_node *rnp, bool wake) | |
190 | { | |
191 | unsigned long flags; | |
192 | ||
193 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
194 | __rcu_report_exp_rnp(rsp, rnp, wake, flags); | |
195 | } | |
196 | ||
197 | /* | |
198 | * Report expedited quiescent state for multiple CPUs, all covered by the | |
199 | * specified leaf rcu_node structure. Caller must hold the rcu_state's | |
200 | * exp_mutex. | |
201 | */ | |
202 | static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, | |
203 | unsigned long mask, bool wake) | |
204 | { | |
205 | unsigned long flags; | |
206 | ||
207 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
208 | if (!(rnp->expmask & mask)) { | |
209 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
210 | return; | |
211 | } | |
212 | rnp->expmask &= ~mask; | |
213 | __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ | |
214 | } | |
215 | ||
216 | /* | |
217 | * Report expedited quiescent state for specified rcu_data (CPU). | |
218 | */ | |
219 | static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, | |
220 | bool wake) | |
221 | { | |
222 | rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); | |
223 | } | |
224 | ||
225 | /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ | |
226 | static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat, | |
227 | unsigned long s) | |
228 | { | |
229 | if (rcu_exp_gp_seq_done(rsp, s)) { | |
230 | trace_rcu_exp_grace_period(rsp->name, s, TPS("done")); | |
231 | /* Ensure test happens before caller kfree(). */ | |
232 | smp_mb__before_atomic(); /* ^^^ */ | |
233 | atomic_long_inc(stat); | |
234 | return true; | |
235 | } | |
236 | return false; | |
237 | } | |
238 | ||
239 | /* | |
240 | * Funnel-lock acquisition for expedited grace periods. Returns true | |
241 | * if some other task completed an expedited grace period that this task | |
242 | * can piggy-back on, and with no mutex held. Otherwise, returns false | |
243 | * with the mutex held, indicating that the caller must actually do the | |
244 | * expedited grace period. | |
245 | */ | |
246 | static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) | |
247 | { | |
248 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); | |
249 | struct rcu_node *rnp = rdp->mynode; | |
250 | struct rcu_node *rnp_root = rcu_get_root(rsp); | |
251 | ||
252 | /* Low-contention fastpath. */ | |
253 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && | |
254 | (rnp == rnp_root || | |
255 | ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && | |
3549c2bc PM |
256 | mutex_trylock(&rsp->exp_mutex)) |
257 | goto fastpath; | |
258 | ||
259 | /* | |
260 | * Each pass through the following loop works its way up | |
261 | * the rcu_node tree, returning if others have done the work or | |
262 | * otherwise falls through to acquire rsp->exp_mutex. The mapping | |
263 | * from CPU to rcu_node structure can be inexact, as it is just | |
264 | * promoting locality and is not strictly needed for correctness. | |
265 | */ | |
266 | for (; rnp != NULL; rnp = rnp->parent) { | |
267 | if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s)) | |
268 | return true; | |
269 | ||
270 | /* Work not done, either wait here or go up. */ | |
271 | spin_lock(&rnp->exp_lock); | |
272 | if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { | |
273 | ||
274 | /* Someone else doing GP, so wait for them. */ | |
275 | spin_unlock(&rnp->exp_lock); | |
276 | trace_rcu_exp_funnel_lock(rsp->name, rnp->level, | |
277 | rnp->grplo, rnp->grphi, | |
278 | TPS("wait")); | |
279 | wait_event(rnp->exp_wq[(s >> 1) & 0x3], | |
280 | sync_exp_work_done(rsp, | |
281 | &rdp->exp_workdone2, s)); | |
282 | return true; | |
283 | } | |
284 | rnp->exp_seq_rq = s; /* Followers can wait on us. */ | |
285 | spin_unlock(&rnp->exp_lock); | |
286 | trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo, | |
287 | rnp->grphi, TPS("nxtlvl")); | |
288 | } | |
289 | mutex_lock(&rsp->exp_mutex); | |
290 | fastpath: | |
291 | if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) { | |
292 | mutex_unlock(&rsp->exp_mutex); | |
293 | return true; | |
294 | } | |
295 | rcu_exp_gp_seq_start(rsp); | |
296 | trace_rcu_exp_grace_period(rsp->name, s, TPS("start")); | |
297 | return false; | |
298 | } | |
299 | ||
300 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ | |
301 | static void sync_sched_exp_handler(void *data) | |
302 | { | |
303 | struct rcu_data *rdp; | |
304 | struct rcu_node *rnp; | |
305 | struct rcu_state *rsp = data; | |
306 | ||
307 | rdp = this_cpu_ptr(rsp->rda); | |
308 | rnp = rdp->mynode; | |
309 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || | |
310 | __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) | |
311 | return; | |
312 | if (rcu_is_cpu_rrupt_from_idle()) { | |
313 | rcu_report_exp_rdp(&rcu_sched_state, | |
314 | this_cpu_ptr(&rcu_sched_data), true); | |
315 | return; | |
316 | } | |
317 | __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); | |
318 | resched_cpu(smp_processor_id()); | |
319 | } | |
320 | ||
321 | /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ | |
322 | static void sync_sched_exp_online_cleanup(int cpu) | |
323 | { | |
324 | struct rcu_data *rdp; | |
325 | int ret; | |
326 | struct rcu_node *rnp; | |
327 | struct rcu_state *rsp = &rcu_sched_state; | |
328 | ||
329 | rdp = per_cpu_ptr(rsp->rda, cpu); | |
330 | rnp = rdp->mynode; | |
331 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) | |
332 | return; | |
333 | ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); | |
334 | WARN_ON_ONCE(ret); | |
335 | } | |
336 | ||
337 | /* | |
338 | * Select the nodes that the upcoming expedited grace period needs | |
339 | * to wait for. | |
340 | */ | |
341 | static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, | |
342 | smp_call_func_t func) | |
343 | { | |
344 | int cpu; | |
345 | unsigned long flags; | |
3549c2bc PM |
346 | unsigned long mask_ofl_test; |
347 | unsigned long mask_ofl_ipi; | |
348 | int ret; | |
349 | struct rcu_node *rnp; | |
350 | ||
351 | sync_exp_reset_tree(rsp); | |
352 | rcu_for_each_leaf_node(rsp, rnp) { | |
353 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
354 | ||
355 | /* Each pass checks a CPU for identity, offline, and idle. */ | |
356 | mask_ofl_test = 0; | |
bc75e999 | 357 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
3549c2bc PM |
358 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
359 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | |
360 | ||
361 | if (raw_smp_processor_id() == cpu || | |
362 | !(atomic_add_return(0, &rdtp->dynticks) & 0x1)) | |
363 | mask_ofl_test |= rdp->grpmask; | |
364 | } | |
365 | mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; | |
366 | ||
367 | /* | |
368 | * Need to wait for any blocked tasks as well. Note that | |
369 | * additional blocking tasks will also block the expedited | |
370 | * GP until such time as the ->expmask bits are cleared. | |
371 | */ | |
372 | if (rcu_preempt_has_tasks(rnp)) | |
373 | rnp->exp_tasks = rnp->blkd_tasks.next; | |
374 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
375 | ||
376 | /* IPI the remaining CPUs for expedited quiescent state. */ | |
bc75e999 MR |
377 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
378 | unsigned long mask = leaf_node_cpu_bit(rnp, cpu); | |
3549c2bc PM |
379 | if (!(mask_ofl_ipi & mask)) |
380 | continue; | |
381 | retry_ipi: | |
382 | ret = smp_call_function_single(cpu, func, rsp, 0); | |
383 | if (!ret) { | |
384 | mask_ofl_ipi &= ~mask; | |
385 | continue; | |
386 | } | |
387 | /* Failed, raced with offline. */ | |
388 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
389 | if (cpu_online(cpu) && | |
390 | (rnp->expmask & mask)) { | |
391 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
392 | schedule_timeout_uninterruptible(1); | |
393 | if (cpu_online(cpu) && | |
394 | (rnp->expmask & mask)) | |
395 | goto retry_ipi; | |
396 | raw_spin_lock_irqsave_rcu_node(rnp, flags); | |
397 | } | |
398 | if (!(rnp->expmask & mask)) | |
399 | mask_ofl_ipi &= ~mask; | |
400 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | |
401 | } | |
402 | /* Report quiescent states for those that went offline. */ | |
403 | mask_ofl_test |= mask_ofl_ipi; | |
404 | if (mask_ofl_test) | |
405 | rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); | |
406 | } | |
407 | } | |
408 | ||
409 | static void synchronize_sched_expedited_wait(struct rcu_state *rsp) | |
410 | { | |
411 | int cpu; | |
412 | unsigned long jiffies_stall; | |
413 | unsigned long jiffies_start; | |
414 | unsigned long mask; | |
415 | int ndetected; | |
416 | struct rcu_node *rnp; | |
417 | struct rcu_node *rnp_root = rcu_get_root(rsp); | |
418 | int ret; | |
419 | ||
420 | jiffies_stall = rcu_jiffies_till_stall_check(); | |
421 | jiffies_start = jiffies; | |
422 | ||
423 | for (;;) { | |
424 | ret = swait_event_timeout( | |
425 | rsp->expedited_wq, | |
426 | sync_rcu_preempt_exp_done(rnp_root), | |
427 | jiffies_stall); | |
428 | if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root)) | |
429 | return; | |
908d2c1f | 430 | WARN_ON(ret < 0); /* workqueues should not be signaled. */ |
3549c2bc PM |
431 | pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", |
432 | rsp->name); | |
433 | ndetected = 0; | |
434 | rcu_for_each_leaf_node(rsp, rnp) { | |
435 | ndetected += rcu_print_task_exp_stall(rnp); | |
bc75e999 | 436 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
3549c2bc PM |
437 | struct rcu_data *rdp; |
438 | ||
bc75e999 | 439 | mask = leaf_node_cpu_bit(rnp, cpu); |
3549c2bc PM |
440 | if (!(rnp->expmask & mask)) |
441 | continue; | |
442 | ndetected++; | |
443 | rdp = per_cpu_ptr(rsp->rda, cpu); | |
444 | pr_cont(" %d-%c%c%c", cpu, | |
445 | "O."[!!cpu_online(cpu)], | |
446 | "o."[!!(rdp->grpmask & rnp->expmaskinit)], | |
447 | "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); | |
448 | } | |
3549c2bc PM |
449 | } |
450 | pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", | |
451 | jiffies - jiffies_start, rsp->expedited_sequence, | |
452 | rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); | |
453 | if (ndetected) { | |
454 | pr_err("blocking rcu_node structures:"); | |
455 | rcu_for_each_node_breadth_first(rsp, rnp) { | |
456 | if (rnp == rnp_root) | |
457 | continue; /* printed unconditionally */ | |
458 | if (sync_rcu_preempt_exp_done(rnp)) | |
459 | continue; | |
460 | pr_cont(" l=%u:%d-%d:%#lx/%c", | |
461 | rnp->level, rnp->grplo, rnp->grphi, | |
462 | rnp->expmask, | |
463 | ".T"[!!rnp->exp_tasks]); | |
464 | } | |
465 | pr_cont("\n"); | |
466 | } | |
467 | rcu_for_each_leaf_node(rsp, rnp) { | |
bc75e999 MR |
468 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
469 | mask = leaf_node_cpu_bit(rnp, cpu); | |
3549c2bc PM |
470 | if (!(rnp->expmask & mask)) |
471 | continue; | |
472 | dump_cpu_task(cpu); | |
473 | } | |
474 | } | |
475 | jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; | |
476 | } | |
477 | } | |
478 | ||
479 | /* | |
480 | * Wait for the current expedited grace period to complete, and then | |
481 | * wake up everyone who piggybacked on the just-completed expedited | |
482 | * grace period. Also update all the ->exp_seq_rq counters as needed | |
483 | * in order to avoid counter-wrap problems. | |
484 | */ | |
485 | static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) | |
486 | { | |
487 | struct rcu_node *rnp; | |
488 | ||
489 | synchronize_sched_expedited_wait(rsp); | |
490 | rcu_exp_gp_seq_end(rsp); | |
491 | trace_rcu_exp_grace_period(rsp->name, s, TPS("end")); | |
492 | ||
493 | /* | |
494 | * Switch over to wakeup mode, allowing the next GP, but -only- the | |
495 | * next GP, to proceed. | |
496 | */ | |
497 | mutex_lock(&rsp->exp_wake_mutex); | |
3549c2bc PM |
498 | |
499 | rcu_for_each_node_breadth_first(rsp, rnp) { | |
500 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { | |
501 | spin_lock(&rnp->exp_lock); | |
502 | /* Recheck, avoid hang in case someone just arrived. */ | |
503 | if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) | |
504 | rnp->exp_seq_rq = s; | |
505 | spin_unlock(&rnp->exp_lock); | |
506 | } | |
507 | wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]); | |
508 | } | |
509 | trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); | |
510 | mutex_unlock(&rsp->exp_wake_mutex); | |
511 | } | |
512 | ||
8b355e3b PM |
513 | /* Let the workqueue handler know what it is supposed to do. */ |
514 | struct rcu_exp_work { | |
515 | smp_call_func_t rew_func; | |
516 | struct rcu_state *rew_rsp; | |
517 | unsigned long rew_s; | |
518 | struct work_struct rew_work; | |
519 | }; | |
520 | ||
521 | /* | |
522 | * Work-queue handler to drive an expedited grace period forward. | |
523 | */ | |
524 | static void wait_rcu_exp_gp(struct work_struct *wp) | |
525 | { | |
526 | struct rcu_exp_work *rewp; | |
527 | ||
528 | /* Initialize the rcu_node tree in preparation for the wait. */ | |
529 | rewp = container_of(wp, struct rcu_exp_work, rew_work); | |
530 | sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); | |
531 | ||
532 | /* Wait and clean up, including waking everyone. */ | |
533 | rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s); | |
534 | } | |
535 | ||
f7b8eb84 PM |
536 | /* |
537 | * Given an rcu_state pointer and a smp_call_function() handler, kick | |
538 | * off the specified flavor of expedited grace period. | |
539 | */ | |
540 | static void _synchronize_rcu_expedited(struct rcu_state *rsp, | |
541 | smp_call_func_t func) | |
542 | { | |
8b355e3b PM |
543 | struct rcu_data *rdp; |
544 | struct rcu_exp_work rew; | |
545 | struct rcu_node *rnp; | |
f7b8eb84 PM |
546 | unsigned long s; |
547 | ||
548 | /* If expedited grace periods are prohibited, fall back to normal. */ | |
549 | if (rcu_gp_is_normal()) { | |
550 | wait_rcu_gp(rsp->call); | |
551 | return; | |
552 | } | |
553 | ||
554 | /* Take a snapshot of the sequence number. */ | |
555 | s = rcu_exp_gp_seq_snap(rsp); | |
556 | if (exp_funnel_lock(rsp, s)) | |
557 | return; /* Someone else did our work for us. */ | |
558 | ||
8b355e3b PM |
559 | /* Marshall arguments and schedule the expedited grace period. */ |
560 | rew.rew_func = func; | |
561 | rew.rew_rsp = rsp; | |
562 | rew.rew_s = s; | |
563 | INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); | |
564 | schedule_work(&rew.rew_work); | |
565 | ||
566 | /* Wait for expedited grace period to complete. */ | |
567 | rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); | |
568 | rnp = rcu_get_root(rsp); | |
569 | wait_event(rnp->exp_wq[(s >> 1) & 0x3], | |
570 | sync_exp_work_done(rsp, | |
571 | &rdp->exp_workdone0, s)); | |
572 | ||
573 | /* Let the next expedited grace period start. */ | |
574 | mutex_unlock(&rsp->exp_mutex); | |
f7b8eb84 PM |
575 | } |
576 | ||
3549c2bc PM |
577 | /** |
578 | * synchronize_sched_expedited - Brute-force RCU-sched grace period | |
579 | * | |
580 | * Wait for an RCU-sched grace period to elapse, but use a "big hammer" | |
581 | * approach to force the grace period to end quickly. This consumes | |
582 | * significant time on all CPUs and is unfriendly to real-time workloads, | |
583 | * so is thus not recommended for any sort of common-case code. In fact, | |
584 | * if you are using synchronize_sched_expedited() in a loop, please | |
585 | * restructure your code to batch your updates, and then use a single | |
586 | * synchronize_sched() instead. | |
587 | * | |
588 | * This implementation can be thought of as an application of sequence | |
589 | * locking to expedited grace periods, but using the sequence counter to | |
590 | * determine when someone else has already done the work instead of for | |
591 | * retrying readers. | |
592 | */ | |
593 | void synchronize_sched_expedited(void) | |
594 | { | |
3549c2bc PM |
595 | struct rcu_state *rsp = &rcu_sched_state; |
596 | ||
597 | /* If only one CPU, this is automatically a grace period. */ | |
598 | if (rcu_blocking_is_gp()) | |
599 | return; | |
600 | ||
f7b8eb84 | 601 | _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); |
3549c2bc PM |
602 | } |
603 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | |
40e0a6cf PM |
604 | |
605 | #ifdef CONFIG_PREEMPT_RCU | |
606 | ||
607 | /* | |
608 | * Remote handler for smp_call_function_single(). If there is an | |
609 | * RCU read-side critical section in effect, request that the | |
610 | * next rcu_read_unlock() record the quiescent state up the | |
611 | * ->expmask fields in the rcu_node tree. Otherwise, immediately | |
612 | * report the quiescent state. | |
613 | */ | |
614 | static void sync_rcu_exp_handler(void *info) | |
615 | { | |
616 | struct rcu_data *rdp; | |
617 | struct rcu_state *rsp = info; | |
618 | struct task_struct *t = current; | |
619 | ||
620 | /* | |
621 | * Within an RCU read-side critical section, request that the next | |
622 | * rcu_read_unlock() report. Unless this RCU read-side critical | |
623 | * section has already blocked, in which case it is already set | |
624 | * up for the expedited grace period to wait on it. | |
625 | */ | |
626 | if (t->rcu_read_lock_nesting > 0 && | |
627 | !t->rcu_read_unlock_special.b.blocked) { | |
628 | t->rcu_read_unlock_special.b.exp_need_qs = true; | |
629 | return; | |
630 | } | |
631 | ||
632 | /* | |
633 | * We are either exiting an RCU read-side critical section (negative | |
634 | * values of t->rcu_read_lock_nesting) or are not in one at all | |
635 | * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU | |
636 | * read-side critical section that blocked before this expedited | |
637 | * grace period started. Either way, we can immediately report | |
638 | * the quiescent state. | |
639 | */ | |
640 | rdp = this_cpu_ptr(rsp->rda); | |
641 | rcu_report_exp_rdp(rsp, rdp, true); | |
642 | } | |
643 | ||
644 | /** | |
645 | * synchronize_rcu_expedited - Brute-force RCU grace period | |
646 | * | |
647 | * Wait for an RCU-preempt grace period, but expedite it. The basic | |
648 | * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler | |
649 | * checks whether the CPU is in an RCU-preempt critical section, and | |
650 | * if so, it sets a flag that causes the outermost rcu_read_unlock() | |
651 | * to report the quiescent state. On the other hand, if the CPU is | |
652 | * not in an RCU read-side critical section, the IPI handler reports | |
653 | * the quiescent state immediately. | |
654 | * | |
655 | * Although this is a greate improvement over previous expedited | |
656 | * implementations, it is still unfriendly to real-time workloads, so is | |
657 | * thus not recommended for any sort of common-case code. In fact, if | |
658 | * you are using synchronize_rcu_expedited() in a loop, please restructure | |
659 | * your code to batch your updates, and then Use a single synchronize_rcu() | |
660 | * instead. | |
661 | */ | |
662 | void synchronize_rcu_expedited(void) | |
663 | { | |
664 | struct rcu_state *rsp = rcu_state_p; | |
40e0a6cf | 665 | |
f7b8eb84 | 666 | _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); |
40e0a6cf PM |
667 | } |
668 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
669 | ||
670 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | |
671 | ||
672 | /* | |
673 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
674 | * But because preemptible RCU does not exist, map to rcu-sched. | |
675 | */ | |
676 | void synchronize_rcu_expedited(void) | |
677 | { | |
678 | synchronize_sched_expedited(); | |
679 | } | |
680 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
681 | ||
682 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |