1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update definitions shared among RCU implementations.
5 * Copyright IBM Corporation, 2011
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
13 #include <trace/events/rcu.h>
14 #ifdef CONFIG_RCU_TRACE
15 #define RCU_TRACE(stmt) stmt
16 #else /* #ifdef CONFIG_RCU_TRACE */
17 #define RCU_TRACE(stmt)
18 #endif /* #else #ifdef CONFIG_RCU_TRACE */
20 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
21 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
25 * Grace-period counter management.
28 #define RCU_SEQ_CTR_SHIFT 2
29 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
32 * Return the counter portion of a sequence number previously returned
33 * by rcu_seq_snap() or rcu_seq_current().
35 static inline unsigned long rcu_seq_ctr(unsigned long s
)
37 return s
>> RCU_SEQ_CTR_SHIFT
;
41 * Return the state portion of a sequence number previously returned
42 * by rcu_seq_snap() or rcu_seq_current().
44 static inline int rcu_seq_state(unsigned long s
)
46 return s
& RCU_SEQ_STATE_MASK
;
50 * Set the state portion of the pointed-to sequence number.
51 * The caller is responsible for preventing conflicting updates.
53 static inline void rcu_seq_set_state(unsigned long *sp
, int newstate
)
55 WARN_ON_ONCE(newstate
& ~RCU_SEQ_STATE_MASK
);
56 WRITE_ONCE(*sp
, (*sp
& ~RCU_SEQ_STATE_MASK
) + newstate
);
59 /* Adjust sequence number for start of update-side operation. */
60 static inline void rcu_seq_start(unsigned long *sp
)
62 WRITE_ONCE(*sp
, *sp
+ 1);
63 smp_mb(); /* Ensure update-side operation after counter increment. */
64 WARN_ON_ONCE(rcu_seq_state(*sp
) != 1);
67 /* Compute the end-of-grace-period value for the specified sequence number. */
68 static inline unsigned long rcu_seq_endval(unsigned long *sp
)
70 return (*sp
| RCU_SEQ_STATE_MASK
) + 1;
73 /* Adjust sequence number for end of update-side operation. */
74 static inline void rcu_seq_end(unsigned long *sp
)
76 smp_mb(); /* Ensure update-side operation before counter increment. */
77 WARN_ON_ONCE(!rcu_seq_state(*sp
));
78 WRITE_ONCE(*sp
, rcu_seq_endval(sp
));
82 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
84 * This function returns the earliest value of the grace-period sequence number
85 * that will indicate that a full grace period has elapsed since the current
86 * time. Once the grace-period sequence number has reached this value, it will
87 * be safe to invoke all callbacks that have been registered prior to the
88 * current time. This value is the current grace-period number plus two to the
89 * power of the number of low-order bits reserved for state, then rounded up to
90 * the next value in which the state bits are all zero.
92 static inline unsigned long rcu_seq_snap(unsigned long *sp
)
96 s
= (READ_ONCE(*sp
) + 2 * RCU_SEQ_STATE_MASK
+ 1) & ~RCU_SEQ_STATE_MASK
;
97 smp_mb(); /* Above access must not bleed into critical section. */
101 /* Return the current value the update side's sequence number, no ordering. */
102 static inline unsigned long rcu_seq_current(unsigned long *sp
)
104 return READ_ONCE(*sp
);
108 * Given a snapshot from rcu_seq_snap(), determine whether or not the
109 * corresponding update-side operation has started.
111 static inline bool rcu_seq_started(unsigned long *sp
, unsigned long s
)
113 return ULONG_CMP_LT((s
- 1) & ~RCU_SEQ_STATE_MASK
, READ_ONCE(*sp
));
117 * Given a snapshot from rcu_seq_snap(), determine whether or not a
118 * full update-side operation has occurred.
120 static inline bool rcu_seq_done(unsigned long *sp
, unsigned long s
)
122 return ULONG_CMP_GE(READ_ONCE(*sp
), s
);
126 * Has a grace period completed since the time the old gp_seq was collected?
128 static inline bool rcu_seq_completed_gp(unsigned long old
, unsigned long new)
130 return ULONG_CMP_LT(old
, new & ~RCU_SEQ_STATE_MASK
);
134 * Has a grace period started since the time the old gp_seq was collected?
136 static inline bool rcu_seq_new_gp(unsigned long old
, unsigned long new)
138 return ULONG_CMP_LT((old
+ RCU_SEQ_STATE_MASK
) & ~RCU_SEQ_STATE_MASK
,
143 * Roughly how many full grace periods have elapsed between the collection
144 * of the two specified grace periods?
146 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old
)
148 unsigned long rnd_diff
;
153 * Compute the number of grace periods (still shifted up), plus
154 * one if either of new and old is not an exact grace period.
156 rnd_diff
= (new & ~RCU_SEQ_STATE_MASK
) -
157 ((old
+ RCU_SEQ_STATE_MASK
) & ~RCU_SEQ_STATE_MASK
) +
158 ((new & RCU_SEQ_STATE_MASK
) || (old
& RCU_SEQ_STATE_MASK
));
159 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK
, rnd_diff
))
160 return 1; /* Definitely no grace period has elapsed. */
161 return ((rnd_diff
- RCU_SEQ_STATE_MASK
- 1) >> RCU_SEQ_CTR_SHIFT
) + 2;
165 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
166 * by call_rcu() and rcu callback execution, and are therefore not part
167 * of the RCU API. These are in rcupdate.h because they are used by all
168 * RCU implementations.
171 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
172 # define STATE_RCU_HEAD_READY 0
173 # define STATE_RCU_HEAD_QUEUED 1
175 extern struct debug_obj_descr rcuhead_debug_descr
;
177 static inline int debug_rcu_head_queue(struct rcu_head
*head
)
181 r1
= debug_object_activate(head
, &rcuhead_debug_descr
);
182 debug_object_active_state(head
, &rcuhead_debug_descr
,
183 STATE_RCU_HEAD_READY
,
184 STATE_RCU_HEAD_QUEUED
);
188 static inline void debug_rcu_head_unqueue(struct rcu_head
*head
)
190 debug_object_active_state(head
, &rcuhead_debug_descr
,
191 STATE_RCU_HEAD_QUEUED
,
192 STATE_RCU_HEAD_READY
);
193 debug_object_deactivate(head
, &rcuhead_debug_descr
);
195 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
196 static inline int debug_rcu_head_queue(struct rcu_head
*head
)
201 static inline void debug_rcu_head_unqueue(struct rcu_head
*head
)
204 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
206 void kfree(const void *);
209 * Reclaim the specified callback, either by invoking it (non-lazy case)
210 * or freeing it directly (lazy case). Return true if lazy, false otherwise.
212 static inline bool __rcu_reclaim(const char *rn
, struct rcu_head
*head
)
215 unsigned long offset
= (unsigned long)head
->func
;
217 rcu_lock_acquire(&rcu_callback_map
);
218 if (__is_kfree_rcu_offset(offset
)) {
219 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn
, head
, offset
);)
220 kfree((void *)head
- offset
);
221 rcu_lock_release(&rcu_callback_map
);
224 RCU_TRACE(trace_rcu_invoke_callback(rn
, head
);)
226 WRITE_ONCE(head
->func
, (rcu_callback_t
)0L);
228 rcu_lock_release(&rcu_callback_map
);
233 #ifdef CONFIG_RCU_STALL_COMMON
235 extern int rcu_cpu_stall_suppress
;
236 extern int rcu_cpu_stall_timeout
;
237 int rcu_jiffies_till_stall_check(void);
239 #define rcu_ftrace_dump_stall_suppress() \
241 if (!rcu_cpu_stall_suppress) \
242 rcu_cpu_stall_suppress = 3; \
245 #define rcu_ftrace_dump_stall_unsuppress() \
247 if (rcu_cpu_stall_suppress == 3) \
248 rcu_cpu_stall_suppress = 0; \
251 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
252 #define rcu_ftrace_dump_stall_suppress()
253 #define rcu_ftrace_dump_stall_unsuppress()
254 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
257 * Strings used in tracepoints need to be exported via the
258 * tracing system such that tools like perf and trace-cmd can
259 * translate the string address pointers to actual text.
261 #define TPS(x) tracepoint_string(x)
264 * Dump the ftrace buffer, but only one time per callsite per boot.
266 #define rcu_ftrace_dump(oops_dump_mode) \
268 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
270 if (!atomic_read(&___rfd_beenhere) && \
271 !atomic_xchg(&___rfd_beenhere, 1)) { \
273 rcu_ftrace_dump_stall_suppress(); \
274 ftrace_dump(oops_dump_mode); \
275 rcu_ftrace_dump_stall_unsuppress(); \
279 void rcu_early_boot_tests(void);
280 void rcu_test_sync_prims(void);
283 * This function really isn't for public consumption, but RCU is special in
284 * that context switches can allow the state machine to make progress.
286 extern void resched_cpu(int cpu
);
288 #if defined(SRCU) || !defined(TINY_RCU)
290 #include <linux/rcu_node_tree.h>
292 extern int rcu_num_lvls
;
293 extern int num_rcu_lvl
[];
294 extern int rcu_num_nodes
;
295 static bool rcu_fanout_exact
;
296 static int rcu_fanout_leaf
;
299 * Compute the per-level fanout, either using the exact fanout specified
300 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
302 static inline void rcu_init_levelspread(int *levelspread
, const int *levelcnt
)
306 if (rcu_fanout_exact
) {
307 levelspread
[rcu_num_lvls
- 1] = rcu_fanout_leaf
;
308 for (i
= rcu_num_lvls
- 2; i
>= 0; i
--)
309 levelspread
[i
] = RCU_FANOUT
;
315 for (i
= rcu_num_lvls
- 1; i
>= 0; i
--) {
317 levelspread
[i
] = (cprv
+ ccur
- 1) / ccur
;
323 /* Returns a pointer to the first leaf rcu_node structure. */
324 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
326 /* Is this rcu_node a leaf? */
327 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
329 /* Is this rcu_node the last leaf? */
330 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
333 * Do a full breadth-first scan of the {s,}rcu_node structures for the
334 * specified state structure (for SRCU) or the only rcu_state structure
337 #define srcu_for_each_node_breadth_first(sp, rnp) \
338 for ((rnp) = &(sp)->node[0]; \
339 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
340 #define rcu_for_each_node_breadth_first(rnp) \
341 srcu_for_each_node_breadth_first(&rcu_state, rnp)
344 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
345 * Note that if there is a singleton rcu_node tree with but one rcu_node
346 * structure, this loop -will- visit the rcu_node structure. It is still
347 * a leaf node, even if it is also the root node.
349 #define rcu_for_each_leaf_node(rnp) \
350 for ((rnp) = rcu_first_leaf_node(); \
351 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
354 * Iterate over all possible CPUs in a leaf RCU node.
356 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
357 for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
358 (cpu) <= rnp->grphi; \
359 (cpu) = cpumask_next((cpu), cpu_possible_mask))
362 * Iterate over all CPUs in a leaf RCU node's specified mask.
364 #define rcu_find_next_bit(rnp, cpu, mask) \
365 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
366 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
367 for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
368 (cpu) <= rnp->grphi; \
369 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
372 * Wrappers for the rcu_node::lock acquire and release.
374 * Because the rcu_nodes form a tree, the tree traversal locking will observe
375 * different lock values, this in turn means that an UNLOCK of one level
376 * followed by a LOCK of another level does not imply a full memory barrier;
377 * and most importantly transitivity is lost.
379 * In order to restore full ordering between tree levels, augment the regular
380 * lock acquire functions with smp_mb__after_unlock_lock().
382 * As ->lock of struct rcu_node is a __private field, therefore one should use
383 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
385 #define raw_spin_lock_rcu_node(p) \
387 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
388 smp_mb__after_unlock_lock(); \
391 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
393 #define raw_spin_lock_irq_rcu_node(p) \
395 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
396 smp_mb__after_unlock_lock(); \
399 #define raw_spin_unlock_irq_rcu_node(p) \
400 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
402 #define raw_spin_lock_irqsave_rcu_node(p, flags) \
404 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
405 smp_mb__after_unlock_lock(); \
408 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
409 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
411 #define raw_spin_trylock_rcu_node(p) \
413 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
416 smp_mb__after_unlock_lock(); \
420 #define raw_lockdep_assert_held_rcu_node(p) \
421 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
423 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
426 void srcu_init(void);
427 #else /* #ifdef CONFIG_SRCU */
428 static inline void srcu_init(void) { }
429 #endif /* #else #ifdef CONFIG_SRCU */
431 #ifdef CONFIG_TINY_RCU
432 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
433 static inline bool rcu_gp_is_normal(void) { return true; }
434 static inline bool rcu_gp_is_expedited(void) { return false; }
435 static inline void rcu_expedite_gp(void) { }
436 static inline void rcu_unexpedite_gp(void) { }
437 static inline void rcu_request_urgent_qs_task(struct task_struct
*t
) { }
438 #else /* #ifdef CONFIG_TINY_RCU */
439 bool rcu_gp_is_normal(void); /* Internal RCU use. */
440 bool rcu_gp_is_expedited(void); /* Internal RCU use. */
441 void rcu_expedite_gp(void);
442 void rcu_unexpedite_gp(void);
443 void rcupdate_announce_bootup_oddness(void);
444 void rcu_request_urgent_qs_task(struct task_struct
*t
);
445 #endif /* #else #ifdef CONFIG_TINY_RCU */
447 #define RCU_SCHEDULER_INACTIVE 0
448 #define RCU_SCHEDULER_INIT 1
449 #define RCU_SCHEDULER_RUNNING 2
451 enum rcutorture_type
{
458 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
459 void rcutorture_get_gp_data(enum rcutorture_type test_type
, int *flags
,
460 unsigned long *gp_seq
);
461 void rcutorture_record_progress(unsigned long vernum
);
462 void do_trace_rcu_torture_read(const char *rcutorturename
,
463 struct rcu_head
*rhp
,
468 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type
,
469 int *flags
, unsigned long *gp_seq
)
474 static inline void rcutorture_record_progress(unsigned long vernum
) { }
475 #ifdef CONFIG_RCU_TRACE
476 void do_trace_rcu_torture_read(const char *rcutorturename
,
477 struct rcu_head
*rhp
,
482 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
487 #ifdef CONFIG_TINY_SRCU
489 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type
,
490 struct srcu_struct
*sp
, int *flags
,
491 unsigned long *gp_seq
)
493 if (test_type
!= SRCU_FLAVOR
)
496 *gp_seq
= sp
->srcu_idx
;
499 #elif defined(CONFIG_TREE_SRCU)
501 void srcutorture_get_gp_data(enum rcutorture_type test_type
,
502 struct srcu_struct
*sp
, int *flags
,
503 unsigned long *gp_seq
);
507 #ifdef CONFIG_TINY_RCU
508 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
509 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
510 static inline unsigned long
511 srcu_batches_completed(struct srcu_struct
*sp
) { return 0; }
512 static inline void rcu_force_quiescent_state(void) { }
513 static inline void show_rcu_gp_kthreads(void) { }
514 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
515 static inline void rcu_fwd_progress_check(unsigned long j
) { }
516 #else /* #ifdef CONFIG_TINY_RCU */
517 unsigned long rcu_get_gp_seq(void);
518 unsigned long rcu_exp_batches_completed(void);
519 unsigned long srcu_batches_completed(struct srcu_struct
*sp
);
520 void show_rcu_gp_kthreads(void);
521 int rcu_get_gp_kthreads_prio(void);
522 void rcu_fwd_progress_check(unsigned long j
);
523 void rcu_force_quiescent_state(void);
524 extern struct workqueue_struct
*rcu_gp_wq
;
525 extern struct workqueue_struct
*rcu_par_gp_wq
;
526 #endif /* #else #ifdef CONFIG_TINY_RCU */
528 #ifdef CONFIG_RCU_NOCB_CPU
529 bool rcu_is_nocb_cpu(int cpu
);
530 void rcu_bind_current_to_nocb(void);
532 static inline bool rcu_is_nocb_cpu(int cpu
) { return false; }
533 static inline void rcu_bind_current_to_nocb(void) { }
536 #endif /* __LINUX_RCU_H */