]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - kernel/rcu/rcu_segcblist.c
Merge tag 'sched-psi-2022-10-14' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-kernels.git] / kernel / rcu / rcu_segcblist.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * RCU segmented callback lists, function definitions
4 *
5 * Copyright IBM Corporation, 2017
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 #include <linux/cpu.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14
15 #include "rcu_segcblist.h"
16
17 /* Initialize simple callback list. */
18 void rcu_cblist_init(struct rcu_cblist *rclp)
19 {
20 rclp->head = NULL;
21 rclp->tail = &rclp->head;
22 rclp->len = 0;
23 }
24
25 /*
26 * Enqueue an rcu_head structure onto the specified callback list.
27 */
28 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp)
29 {
30 *rclp->tail = rhp;
31 rclp->tail = &rhp->next;
32 WRITE_ONCE(rclp->len, rclp->len + 1);
33 }
34
35 /*
36 * Flush the second rcu_cblist structure onto the first one, obliterating
37 * any contents of the first. If rhp is non-NULL, enqueue it as the sole
38 * element of the second rcu_cblist structure, but ensuring that the second
39 * rcu_cblist structure, if initially non-empty, always appears non-empty
40 * throughout the process. If rdp is NULL, the second rcu_cblist structure
41 * is instead initialized to empty.
42 */
43 void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
44 struct rcu_cblist *srclp,
45 struct rcu_head *rhp)
46 {
47 drclp->head = srclp->head;
48 if (drclp->head)
49 drclp->tail = srclp->tail;
50 else
51 drclp->tail = &drclp->head;
52 drclp->len = srclp->len;
53 if (!rhp) {
54 rcu_cblist_init(srclp);
55 } else {
56 rhp->next = NULL;
57 srclp->head = rhp;
58 srclp->tail = &rhp->next;
59 WRITE_ONCE(srclp->len, 1);
60 }
61 }
62
63 /*
64 * Dequeue the oldest rcu_head structure from the specified callback
65 * list.
66 */
67 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
68 {
69 struct rcu_head *rhp;
70
71 rhp = rclp->head;
72 if (!rhp)
73 return NULL;
74 rclp->len--;
75 rclp->head = rhp->next;
76 if (!rclp->head)
77 rclp->tail = &rclp->head;
78 return rhp;
79 }
80
81 /* Set the length of an rcu_segcblist structure. */
82 static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
83 {
84 #ifdef CONFIG_RCU_NOCB_CPU
85 atomic_long_set(&rsclp->len, v);
86 #else
87 WRITE_ONCE(rsclp->len, v);
88 #endif
89 }
90
91 /* Get the length of a segment of the rcu_segcblist structure. */
92 static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg)
93 {
94 return READ_ONCE(rsclp->seglen[seg]);
95 }
96
97 /* Return number of callbacks in segmented callback list by summing seglen. */
98 long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp)
99 {
100 long len = 0;
101 int i;
102
103 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
104 len += rcu_segcblist_get_seglen(rsclp, i);
105
106 return len;
107 }
108
109 /* Set the length of a segment of the rcu_segcblist structure. */
110 static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v)
111 {
112 WRITE_ONCE(rsclp->seglen[seg], v);
113 }
114
115 /* Increase the numeric length of a segment by a specified amount. */
116 static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v)
117 {
118 WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v);
119 }
120
121 /* Move from's segment length to to's segment. */
122 static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to)
123 {
124 long len;
125
126 if (from == to)
127 return;
128
129 len = rcu_segcblist_get_seglen(rsclp, from);
130 if (!len)
131 return;
132
133 rcu_segcblist_add_seglen(rsclp, to, len);
134 rcu_segcblist_set_seglen(rsclp, from, 0);
135 }
136
137 /* Increment segment's length. */
138 static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg)
139 {
140 rcu_segcblist_add_seglen(rsclp, seg, 1);
141 }
142
143 /*
144 * Increase the numeric length of an rcu_segcblist structure by the
145 * specified amount, which can be negative. This can cause the ->len
146 * field to disagree with the actual number of callbacks on the structure.
147 * This increase is fully ordered with respect to the callers accesses
148 * both before and after.
149 *
150 * So why on earth is a memory barrier required both before and after
151 * the update to the ->len field???
152 *
153 * The reason is that rcu_barrier() locklessly samples each CPU's ->len
154 * field, and if a given CPU's field is zero, avoids IPIing that CPU.
155 * This can of course race with both queuing and invoking of callbacks.
156 * Failing to correctly handle either of these races could result in
157 * rcu_barrier() failing to IPI a CPU that actually had callbacks queued
158 * which rcu_barrier() was obligated to wait on. And if rcu_barrier()
159 * failed to wait on such a callback, unloading certain kernel modules
160 * would result in calls to functions whose code was no longer present in
161 * the kernel, for but one example.
162 *
163 * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully
164 * ordered with respect with both list modifications and the rcu_barrier().
165 *
166 * The queuing case is CASE 1 and the invoking case is CASE 2.
167 *
168 * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes
169 * call_rcu() just as CPU 1 invokes rcu_barrier(). CPU 0's ->len field
170 * will transition from 0->1, which is one of the transitions that must
171 * be handled carefully. Without the full memory barriers after the ->len
172 * update and at the beginning of rcu_barrier(), the following could happen:
173 *
174 * CPU 0 CPU 1
175 *
176 * call_rcu().
177 * rcu_barrier() sees ->len as 0.
178 * set ->len = 1.
179 * rcu_barrier() does nothing.
180 * module is unloaded.
181 * callback invokes unloaded function!
182 *
183 * With the full barriers, any case where rcu_barrier() sees ->len as 0 will
184 * have unambiguously preceded the return from the racing call_rcu(), which
185 * means that this call_rcu() invocation is OK to not wait on. After all,
186 * you are supposed to make sure that any problematic call_rcu() invocations
187 * happen before the rcu_barrier().
188 *
189 *
190 * CASE 2: Suppose that CPU 0 is invoking its last callback just as
191 * CPU 1 invokes rcu_barrier(). CPU 0's ->len field will transition from
192 * 1->0, which is one of the transitions that must be handled carefully.
193 * Without the full memory barriers before the ->len update and at the
194 * end of rcu_barrier(), the following could happen:
195 *
196 * CPU 0 CPU 1
197 *
198 * start invoking last callback
199 * set ->len = 0 (reordered)
200 * rcu_barrier() sees ->len as 0
201 * rcu_barrier() does nothing.
202 * module is unloaded
203 * callback executing after unloaded!
204 *
205 * With the full barriers, any case where rcu_barrier() sees ->len as 0
206 * will be fully ordered after the completion of the callback function,
207 * so that the module unloading operation is completely safe.
208 *
209 */
210 void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
211 {
212 #ifdef CONFIG_RCU_NOCB_CPU
213 smp_mb__before_atomic(); // Read header comment above.
214 atomic_long_add(v, &rsclp->len);
215 smp_mb__after_atomic(); // Read header comment above.
216 #else
217 smp_mb(); // Read header comment above.
218 WRITE_ONCE(rsclp->len, rsclp->len + v);
219 smp_mb(); // Read header comment above.
220 #endif
221 }
222
223 /*
224 * Increase the numeric length of an rcu_segcblist structure by one.
225 * This can cause the ->len field to disagree with the actual number of
226 * callbacks on the structure. This increase is fully ordered with respect
227 * to the callers accesses both before and after.
228 */
229 void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp)
230 {
231 rcu_segcblist_add_len(rsclp, 1);
232 }
233
234 /*
235 * Initialize an rcu_segcblist structure.
236 */
237 void rcu_segcblist_init(struct rcu_segcblist *rsclp)
238 {
239 int i;
240
241 BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
242 BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
243 rsclp->head = NULL;
244 for (i = 0; i < RCU_CBLIST_NSEGS; i++) {
245 rsclp->tails[i] = &rsclp->head;
246 rcu_segcblist_set_seglen(rsclp, i, 0);
247 }
248 rcu_segcblist_set_len(rsclp, 0);
249 rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED);
250 }
251
252 /*
253 * Disable the specified rcu_segcblist structure, so that callbacks can
254 * no longer be posted to it. This structure must be empty.
255 */
256 void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
257 {
258 WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
259 WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
260 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED);
261 }
262
263 /*
264 * Mark the specified rcu_segcblist structure as offloaded (or not)
265 */
266 void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
267 {
268 if (offload)
269 rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED);
270 else
271 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
272 }
273
274 /*
275 * Does the specified rcu_segcblist structure contain callbacks that
276 * are ready to be invoked?
277 */
278 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp)
279 {
280 return rcu_segcblist_is_enabled(rsclp) &&
281 &rsclp->head != READ_ONCE(rsclp->tails[RCU_DONE_TAIL]);
282 }
283
284 /*
285 * Does the specified rcu_segcblist structure contain callbacks that
286 * are still pending, that is, not yet ready to be invoked?
287 */
288 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp)
289 {
290 return rcu_segcblist_is_enabled(rsclp) &&
291 !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL);
292 }
293
294 /*
295 * Return a pointer to the first callback in the specified rcu_segcblist
296 * structure. This is useful for diagnostics.
297 */
298 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp)
299 {
300 if (rcu_segcblist_is_enabled(rsclp))
301 return rsclp->head;
302 return NULL;
303 }
304
305 /*
306 * Return a pointer to the first pending callback in the specified
307 * rcu_segcblist structure. This is useful just after posting a given
308 * callback -- if that callback is the first pending callback, then
309 * you cannot rely on someone else having already started up the required
310 * grace period.
311 */
312 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp)
313 {
314 if (rcu_segcblist_is_enabled(rsclp))
315 return *rsclp->tails[RCU_DONE_TAIL];
316 return NULL;
317 }
318
319 /*
320 * Return false if there are no CBs awaiting grace periods, otherwise,
321 * return true and store the nearest waited-upon grace period into *lp.
322 */
323 bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp)
324 {
325 if (!rcu_segcblist_pend_cbs(rsclp))
326 return false;
327 *lp = rsclp->gp_seq[RCU_WAIT_TAIL];
328 return true;
329 }
330
331 /*
332 * Enqueue the specified callback onto the specified rcu_segcblist
333 * structure, updating accounting as needed. Note that the ->len
334 * field may be accessed locklessly, hence the WRITE_ONCE().
335 * The ->len field is used by rcu_barrier() and friends to determine
336 * if it must post a callback on this structure, and it is OK
337 * for rcu_barrier() to sometimes post callbacks needlessly, but
338 * absolutely not OK for it to ever miss posting a callback.
339 */
340 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
341 struct rcu_head *rhp)
342 {
343 rcu_segcblist_inc_len(rsclp);
344 rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL);
345 rhp->next = NULL;
346 WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
347 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next);
348 }
349
350 /*
351 * Entrain the specified callback onto the specified rcu_segcblist at
352 * the end of the last non-empty segment. If the entire rcu_segcblist
353 * is empty, make no change, but return false.
354 *
355 * This is intended for use by rcu_barrier()-like primitives, -not-
356 * for normal grace-period use. IMPORTANT: The callback you enqueue
357 * will wait for all prior callbacks, NOT necessarily for a grace
358 * period. You have been warned.
359 */
360 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
361 struct rcu_head *rhp)
362 {
363 int i;
364
365 if (rcu_segcblist_n_cbs(rsclp) == 0)
366 return false;
367 rcu_segcblist_inc_len(rsclp);
368 smp_mb(); /* Ensure counts are updated before callback is entrained. */
369 rhp->next = NULL;
370 for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
371 if (rsclp->tails[i] != rsclp->tails[i - 1])
372 break;
373 rcu_segcblist_inc_seglen(rsclp, i);
374 WRITE_ONCE(*rsclp->tails[i], rhp);
375 for (; i <= RCU_NEXT_TAIL; i++)
376 WRITE_ONCE(rsclp->tails[i], &rhp->next);
377 return true;
378 }
379
380 /*
381 * Extract only those callbacks ready to be invoked from the specified
382 * rcu_segcblist structure and place them in the specified rcu_cblist
383 * structure.
384 */
385 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
386 struct rcu_cblist *rclp)
387 {
388 int i;
389
390 if (!rcu_segcblist_ready_cbs(rsclp))
391 return; /* Nothing to do. */
392 rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL);
393 *rclp->tail = rsclp->head;
394 WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
395 WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
396 rclp->tail = rsclp->tails[RCU_DONE_TAIL];
397 for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
398 if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
399 WRITE_ONCE(rsclp->tails[i], &rsclp->head);
400 rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0);
401 }
402
403 /*
404 * Extract only those callbacks still pending (not yet ready to be
405 * invoked) from the specified rcu_segcblist structure and place them in
406 * the specified rcu_cblist structure. Note that this loses information
407 * about any callbacks that might have been partway done waiting for
408 * their grace period. Too bad! They will have to start over.
409 */
410 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
411 struct rcu_cblist *rclp)
412 {
413 int i;
414
415 if (!rcu_segcblist_pend_cbs(rsclp))
416 return; /* Nothing to do. */
417 rclp->len = 0;
418 *rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
419 rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
420 WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
421 for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) {
422 rclp->len += rcu_segcblist_get_seglen(rsclp, i);
423 WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]);
424 rcu_segcblist_set_seglen(rsclp, i, 0);
425 }
426 }
427
428 /*
429 * Insert counts from the specified rcu_cblist structure in the
430 * specified rcu_segcblist structure.
431 */
432 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
433 struct rcu_cblist *rclp)
434 {
435 rcu_segcblist_add_len(rsclp, rclp->len);
436 }
437
438 /*
439 * Move callbacks from the specified rcu_cblist to the beginning of the
440 * done-callbacks segment of the specified rcu_segcblist.
441 */
442 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
443 struct rcu_cblist *rclp)
444 {
445 int i;
446
447 if (!rclp->head)
448 return; /* No callbacks to move. */
449 rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len);
450 *rclp->tail = rsclp->head;
451 WRITE_ONCE(rsclp->head, rclp->head);
452 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
453 if (&rsclp->head == rsclp->tails[i])
454 WRITE_ONCE(rsclp->tails[i], rclp->tail);
455 else
456 break;
457 rclp->head = NULL;
458 rclp->tail = &rclp->head;
459 }
460
461 /*
462 * Move callbacks from the specified rcu_cblist to the end of the
463 * new-callbacks segment of the specified rcu_segcblist.
464 */
465 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
466 struct rcu_cblist *rclp)
467 {
468 if (!rclp->head)
469 return; /* Nothing to do. */
470
471 rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len);
472 WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head);
473 WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail);
474 }
475
476 /*
477 * Advance the callbacks in the specified rcu_segcblist structure based
478 * on the current value passed in for the grace-period counter.
479 */
480 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
481 {
482 int i, j;
483
484 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
485 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
486 return;
487
488 /*
489 * Find all callbacks whose ->gp_seq numbers indicate that they
490 * are ready to invoke, and put them into the RCU_DONE_TAIL segment.
491 */
492 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
493 if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
494 break;
495 WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]);
496 rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL);
497 }
498
499 /* If no callbacks moved, nothing more need be done. */
500 if (i == RCU_WAIT_TAIL)
501 return;
502
503 /* Clean up tail pointers that might have been misordered above. */
504 for (j = RCU_WAIT_TAIL; j < i; j++)
505 WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]);
506
507 /*
508 * Callbacks moved, so there might be an empty RCU_WAIT_TAIL
509 * and a non-empty RCU_NEXT_READY_TAIL. If so, copy the
510 * RCU_NEXT_READY_TAIL segment to fill the RCU_WAIT_TAIL gap
511 * created by the now-ready-to-invoke segments.
512 */
513 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
514 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
515 break; /* No more callbacks. */
516 WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]);
517 rcu_segcblist_move_seglen(rsclp, i, j);
518 rsclp->gp_seq[j] = rsclp->gp_seq[i];
519 }
520 }
521
522 /*
523 * "Accelerate" callbacks based on more-accurate grace-period information.
524 * The reason for this is that RCU does not synchronize the beginnings and
525 * ends of grace periods, and that callbacks are posted locally. This in
526 * turn means that the callbacks must be labelled conservatively early
527 * on, as getting exact information would degrade both performance and
528 * scalability. When more accurate grace-period information becomes
529 * available, previously posted callbacks can be "accelerated", marking
530 * them to complete at the end of the earlier grace period.
531 *
532 * This function operates on an rcu_segcblist structure, and also the
533 * grace-period sequence number seq at which new callbacks would become
534 * ready to invoke. Returns true if there are callbacks that won't be
535 * ready to invoke until seq, false otherwise.
536 */
537 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
538 {
539 int i, j;
540
541 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
542 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
543 return false;
544
545 /*
546 * Find the segment preceding the oldest segment of callbacks
547 * whose ->gp_seq[] completion is at or after that passed in via
548 * "seq", skipping any empty segments. This oldest segment, along
549 * with any later segments, can be merged in with any newly arrived
550 * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
551 * as their ->gp_seq[] grace-period completion sequence number.
552 */
553 for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
554 if (rsclp->tails[i] != rsclp->tails[i - 1] &&
555 ULONG_CMP_LT(rsclp->gp_seq[i], seq))
556 break;
557
558 /*
559 * If all the segments contain callbacks that correspond to
560 * earlier grace-period sequence numbers than "seq", leave.
561 * Assuming that the rcu_segcblist structure has enough
562 * segments in its arrays, this can only happen if some of
563 * the non-done segments contain callbacks that really are
564 * ready to invoke. This situation will get straightened
565 * out by the next call to rcu_segcblist_advance().
566 *
567 * Also advance to the oldest segment of callbacks whose
568 * ->gp_seq[] completion is at or after that passed in via "seq",
569 * skipping any empty segments.
570 *
571 * Note that segment "i" (and any lower-numbered segments
572 * containing older callbacks) will be unaffected, and their
573 * grace-period numbers remain unchanged. For example, if i ==
574 * WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched.
575 * Instead, the CBs in NEXT_TAIL will be merged with those in
576 * NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL
577 * would be updated. NEXT_TAIL would then be empty.
578 */
579 if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL)
580 return false;
581
582 /* Accounting: everything below i is about to get merged into i. */
583 for (j = i + 1; j <= RCU_NEXT_TAIL; j++)
584 rcu_segcblist_move_seglen(rsclp, j, i);
585
586 /*
587 * Merge all later callbacks, including newly arrived callbacks,
588 * into the segment located by the for-loop above. Assign "seq"
589 * as the ->gp_seq[] value in order to correctly handle the case
590 * where there were no pending callbacks in the rcu_segcblist
591 * structure other than in the RCU_NEXT_TAIL segment.
592 */
593 for (; i < RCU_NEXT_TAIL; i++) {
594 WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_NEXT_TAIL]);
595 rsclp->gp_seq[i] = seq;
596 }
597 return true;
598 }
599
600 /*
601 * Merge the source rcu_segcblist structure into the destination
602 * rcu_segcblist structure, then initialize the source. Any pending
603 * callbacks from the source get to start over. It is best to
604 * advance and accelerate both the destination and the source
605 * before merging.
606 */
607 void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
608 struct rcu_segcblist *src_rsclp)
609 {
610 struct rcu_cblist donecbs;
611 struct rcu_cblist pendcbs;
612
613 lockdep_assert_cpus_held();
614
615 rcu_cblist_init(&donecbs);
616 rcu_cblist_init(&pendcbs);
617
618 rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs);
619 rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs);
620
621 /*
622 * No need smp_mb() before setting length to 0, because CPU hotplug
623 * lock excludes rcu_barrier.
624 */
625 rcu_segcblist_set_len(src_rsclp, 0);
626
627 rcu_segcblist_insert_count(dst_rsclp, &donecbs);
628 rcu_segcblist_insert_count(dst_rsclp, &pendcbs);
629 rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs);
630 rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs);
631
632 rcu_segcblist_init(src_rsclp);
633 }