]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/rcu/tree.h
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / kernel / rcu / tree.h
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25 #include <linux/cache.h>
26 #include <linux/spinlock.h>
27 #include <linux/rtmutex.h>
28 #include <linux/threads.h>
29 #include <linux/cpumask.h>
30 #include <linux/seqlock.h>
31 #include <linux/swait.h>
32 #include <linux/stop_machine.h>
33 #include <linux/rcu_node_tree.h>
34
35 #include "rcu_segcblist.h"
36
37 /*
38 * Dynticks per-CPU state.
39 */
40 struct rcu_dynticks {
41 long long dynticks_nesting; /* Track irq/process nesting level. */
42 /* Process level is worth LLONG_MAX/2. */
43 int dynticks_nmi_nesting; /* Track NMI nesting level. */
44 atomic_t dynticks; /* Even value for idle, else odd. */
45 bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
46 unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
47 bool rcu_urgent_qs; /* GP old need light quiescent state. */
48 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
49 long long dynticks_idle_nesting;
50 /* irq/process nesting level from idle. */
51 atomic_t dynticks_idle; /* Even value for idle, else odd. */
52 /* "Idle" excludes userspace execution. */
53 unsigned long dynticks_idle_jiffies;
54 /* End of last non-NMI non-idle period. */
55 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
56 #ifdef CONFIG_RCU_FAST_NO_HZ
57 bool all_lazy; /* Are all CPU's CBs lazy? */
58 unsigned long nonlazy_posted;
59 /* # times non-lazy CBs posted to CPU. */
60 unsigned long nonlazy_posted_snap;
61 /* idle-period nonlazy_posted snapshot. */
62 unsigned long last_accelerate;
63 /* Last jiffy CBs were accelerated. */
64 unsigned long last_advance_all;
65 /* Last jiffy CBs were all advanced. */
66 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
67 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
68 };
69
70 /* RCU's kthread states for tracing. */
71 #define RCU_KTHREAD_STOPPED 0
72 #define RCU_KTHREAD_RUNNING 1
73 #define RCU_KTHREAD_WAITING 2
74 #define RCU_KTHREAD_OFFCPU 3
75 #define RCU_KTHREAD_YIELDING 4
76 #define RCU_KTHREAD_MAX 4
77
78 /*
79 * Definition for node within the RCU grace-period-detection hierarchy.
80 */
81 struct rcu_node {
82 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
83 /* some rcu_state fields as well as */
84 /* following. */
85 unsigned long gpnum; /* Current grace period for this node. */
86 /* This will either be equal to or one */
87 /* behind the root rcu_node's gpnum. */
88 unsigned long completed; /* Last GP completed for this node. */
89 /* This will either be equal to or one */
90 /* behind the root rcu_node's gpnum. */
91 unsigned long qsmask; /* CPUs or groups that need to switch in */
92 /* order for current grace period to proceed.*/
93 /* In leaf rcu_node, each bit corresponds to */
94 /* an rcu_data structure, otherwise, each */
95 /* bit corresponds to a child rcu_node */
96 /* structure. */
97 unsigned long qsmaskinit;
98 /* Per-GP initial value for qsmask. */
99 /* Initialized from ->qsmaskinitnext at the */
100 /* beginning of each grace period. */
101 unsigned long qsmaskinitnext;
102 /* Online CPUs for next grace period. */
103 unsigned long expmask; /* CPUs or groups that need to check in */
104 /* to allow the current expedited GP */
105 /* to complete. */
106 unsigned long expmaskinit;
107 /* Per-GP initial values for expmask. */
108 /* Initialized from ->expmaskinitnext at the */
109 /* beginning of each expedited GP. */
110 unsigned long expmaskinitnext;
111 /* Online CPUs for next expedited GP. */
112 /* Any CPU that has ever been online will */
113 /* have its bit set. */
114 unsigned long grpmask; /* Mask to apply to parent qsmask. */
115 /* Only one bit will be set in this mask. */
116 int grplo; /* lowest-numbered CPU or group here. */
117 int grphi; /* highest-numbered CPU or group here. */
118 u8 grpnum; /* CPU/group number for next level up. */
119 u8 level; /* root is at level 0. */
120 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
121 /* exit RCU read-side critical sections */
122 /* before propagating offline up the */
123 /* rcu_node tree? */
124 struct rcu_node *parent;
125 struct list_head blkd_tasks;
126 /* Tasks blocked in RCU read-side critical */
127 /* section. Tasks are placed at the head */
128 /* of this list and age towards the tail. */
129 struct list_head *gp_tasks;
130 /* Pointer to the first task blocking the */
131 /* current grace period, or NULL if there */
132 /* is no such task. */
133 struct list_head *exp_tasks;
134 /* Pointer to the first task blocking the */
135 /* current expedited grace period, or NULL */
136 /* if there is no such task. If there */
137 /* is no current expedited grace period, */
138 /* then there can cannot be any such task. */
139 struct list_head *boost_tasks;
140 /* Pointer to first task that needs to be */
141 /* priority boosted, or NULL if no priority */
142 /* boosting is needed for this rcu_node */
143 /* structure. If there are no tasks */
144 /* queued on this rcu_node structure that */
145 /* are blocking the current grace period, */
146 /* there can be no such task. */
147 struct rt_mutex boost_mtx;
148 /* Used only for the priority-boosting */
149 /* side effect, not as a lock. */
150 unsigned long boost_time;
151 /* When to start boosting (jiffies). */
152 struct task_struct *boost_kthread_task;
153 /* kthread that takes care of priority */
154 /* boosting for this rcu_node structure. */
155 unsigned int boost_kthread_status;
156 /* State of boost_kthread_task for tracing. */
157 unsigned long n_tasks_boosted;
158 /* Total number of tasks boosted. */
159 unsigned long n_exp_boosts;
160 /* Number of tasks boosted for expedited GP. */
161 unsigned long n_normal_boosts;
162 /* Number of tasks boosted for normal GP. */
163 unsigned long n_balk_blkd_tasks;
164 /* Refused to boost: no blocked tasks. */
165 unsigned long n_balk_exp_gp_tasks;
166 /* Refused to boost: nothing blocking GP. */
167 unsigned long n_balk_boost_tasks;
168 /* Refused to boost: already boosting. */
169 unsigned long n_balk_notblocked;
170 /* Refused to boost: RCU RS CS still running. */
171 unsigned long n_balk_notyet;
172 /* Refused to boost: not yet time. */
173 unsigned long n_balk_nos;
174 /* Refused to boost: not sure why, though. */
175 /* This can happen due to race conditions. */
176 #ifdef CONFIG_RCU_NOCB_CPU
177 struct swait_queue_head nocb_gp_wq[2];
178 /* Place for rcu_nocb_kthread() to wait GP. */
179 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
180 int need_future_gp[2];
181 /* Counts of upcoming no-CB GP requests. */
182 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
183
184 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
185 unsigned long exp_seq_rq;
186 wait_queue_head_t exp_wq[4];
187 } ____cacheline_internodealigned_in_smp;
188
189 /*
190 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
191 * are indexed relative to this interval rather than the global CPU ID space.
192 * This generates the bit for a CPU in node-local masks.
193 */
194 #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
195
196 /*
197 * Union to allow "aggregate OR" operation on the need for a quiescent
198 * state by the normal and expedited grace periods.
199 */
200 union rcu_noqs {
201 struct {
202 u8 norm;
203 u8 exp;
204 } b; /* Bits. */
205 u16 s; /* Set of bits, aggregate OR here. */
206 };
207
208 /* Index values for nxttail array in struct rcu_data. */
209 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
210 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
211 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
212 #define RCU_NEXT_TAIL 3
213 #define RCU_NEXT_SIZE 4
214
215 /* Per-CPU data for read-copy update. */
216 struct rcu_data {
217 /* 1) quiescent-state and grace-period handling : */
218 unsigned long completed; /* Track rsp->completed gp number */
219 /* in order to detect GP end. */
220 unsigned long gpnum; /* Highest gp number that this CPU */
221 /* is aware of having started. */
222 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
223 /* for rcu_all_qs() invocations. */
224 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
225 bool core_needs_qs; /* Core waits for quiesc state. */
226 bool beenonline; /* CPU online at least once. */
227 bool gpwrap; /* Possible gpnum/completed wrap. */
228 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
229 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
230 unsigned long ticks_this_gp; /* The number of scheduling-clock */
231 /* ticks this CPU has handled */
232 /* during and after the last grace */
233 /* period it is aware of. */
234
235 /* 2) batch handling */
236 struct rcu_segcblist cblist; /* Segmented callback list, with */
237 /* different callbacks waiting for */
238 /* different grace periods. */
239 long qlen_last_fqs_check;
240 /* qlen at last check for QS forcing */
241 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
242 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
243 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
244 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
245 unsigned long n_force_qs_snap;
246 /* did other CPU force QS recently? */
247 long blimit; /* Upper limit on a processed batch */
248
249 /* 3) dynticks interface. */
250 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
251 int dynticks_snap; /* Per-GP tracking for dynticks. */
252
253 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
254 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
255 unsigned long offline_fqs; /* Kicked due to being offline. */
256 unsigned long cond_resched_completed;
257 /* Grace period that needs help */
258 /* from cond_resched(). */
259
260 /* 5) __rcu_pending() statistics. */
261 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
262 unsigned long n_rp_core_needs_qs;
263 unsigned long n_rp_report_qs;
264 unsigned long n_rp_cb_ready;
265 unsigned long n_rp_cpu_needs_gp;
266 unsigned long n_rp_gp_completed;
267 unsigned long n_rp_gp_started;
268 unsigned long n_rp_nocb_defer_wakeup;
269 unsigned long n_rp_need_nothing;
270
271 /* 6) _rcu_barrier(), OOM callbacks, and expediting. */
272 struct rcu_head barrier_head;
273 #ifdef CONFIG_RCU_FAST_NO_HZ
274 struct rcu_head oom_head;
275 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
276 atomic_long_t exp_workdone0; /* # done by workqueue. */
277 atomic_long_t exp_workdone1; /* # done by others #1. */
278 atomic_long_t exp_workdone2; /* # done by others #2. */
279 atomic_long_t exp_workdone3; /* # done by others #3. */
280 int exp_dynticks_snap; /* Double-check need for IPI. */
281
282 /* 7) Callback offloading. */
283 #ifdef CONFIG_RCU_NOCB_CPU
284 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
285 struct rcu_head **nocb_tail;
286 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
287 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
288 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
289 struct rcu_head **nocb_follower_tail;
290 struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
291 struct task_struct *nocb_kthread;
292 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
293
294 /* The following fields are used by the leader, hence own cacheline. */
295 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
296 /* CBs waiting for GP. */
297 struct rcu_head **nocb_gp_tail;
298 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
299 struct rcu_data *nocb_next_follower;
300 /* Next follower in wakeup chain. */
301
302 /* The following fields are used by the follower, hence new cachline. */
303 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
304 /* Leader CPU takes GP-end wakeups. */
305 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
306
307 /* 8) RCU CPU stall data. */
308 unsigned int softirq_snap; /* Snapshot of softirq activity. */
309
310 int cpu;
311 struct rcu_state *rsp;
312 };
313
314 /* Values for nocb_defer_wakeup field in struct rcu_data. */
315 #define RCU_NOGP_WAKE_NOT 0
316 #define RCU_NOGP_WAKE 1
317 #define RCU_NOGP_WAKE_FORCE 2
318
319 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
320 /* For jiffies_till_first_fqs and */
321 /* and jiffies_till_next_fqs. */
322
323 #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
324 /* delay between bouts of */
325 /* quiescent-state forcing. */
326
327 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
328 /* at least one scheduling clock */
329 /* irq before ratting on them. */
330
331 #define rcu_wait(cond) \
332 do { \
333 for (;;) { \
334 set_current_state(TASK_INTERRUPTIBLE); \
335 if (cond) \
336 break; \
337 schedule(); \
338 } \
339 __set_current_state(TASK_RUNNING); \
340 } while (0)
341
342 /*
343 * RCU global state, including node hierarchy. This hierarchy is
344 * represented in "heap" form in a dense array. The root (first level)
345 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
346 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
347 * and the third level in ->node[m+1] and following (->node[m+1] referenced
348 * by ->level[2]). The number of levels is determined by the number of
349 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
350 * consisting of a single rcu_node.
351 */
352 struct rcu_state {
353 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
354 struct rcu_node *level[RCU_NUM_LVLS + 1];
355 /* Hierarchy levels (+1 to */
356 /* shut bogus gcc warning) */
357 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
358 call_rcu_func_t call; /* call_rcu() flavor. */
359 int ncpus; /* # CPUs seen so far. */
360
361 /* The following fields are guarded by the root rcu_node's lock. */
362
363 u8 boost ____cacheline_internodealigned_in_smp;
364 /* Subject to priority boost. */
365 unsigned long gpnum; /* Current gp number. */
366 unsigned long completed; /* # of last completed gp. */
367 struct task_struct *gp_kthread; /* Task for grace periods. */
368 struct swait_queue_head gp_wq; /* Where GP task waits. */
369 short gp_flags; /* Commands for GP task. */
370 short gp_state; /* GP kthread sleep state. */
371
372 /* End of fields guarded by root rcu_node's lock. */
373
374 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
375 /* Protect following fields. */
376 struct rcu_cblist orphan_pend; /* Orphaned callbacks that */
377 /* need a grace period. */
378 struct rcu_cblist orphan_done; /* Orphaned callbacks that */
379 /* are ready to invoke. */
380 /* (Contains counts.) */
381 /* End of fields guarded by orphan_lock. */
382
383 struct mutex barrier_mutex; /* Guards barrier fields. */
384 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
385 struct completion barrier_completion; /* Wake at barrier end. */
386 unsigned long barrier_sequence; /* ++ at start and end of */
387 /* _rcu_barrier(). */
388 /* End of fields guarded by barrier_mutex. */
389
390 struct mutex exp_mutex; /* Serialize expedited GP. */
391 struct mutex exp_wake_mutex; /* Serialize wakeup. */
392 unsigned long expedited_sequence; /* Take a ticket. */
393 atomic_t expedited_need_qs; /* # CPUs left to check in. */
394 struct swait_queue_head expedited_wq; /* Wait for check-ins. */
395 int ncpus_snap; /* # CPUs seen last time. */
396
397 unsigned long jiffies_force_qs; /* Time at which to invoke */
398 /* force_quiescent_state(). */
399 unsigned long jiffies_kick_kthreads; /* Time at which to kick */
400 /* kthreads, if configured. */
401 unsigned long n_force_qs; /* Number of calls to */
402 /* force_quiescent_state(). */
403 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
404 /* due to lock unavailable. */
405 unsigned long n_force_qs_ngp; /* Number of calls leaving */
406 /* due to no GP active. */
407 unsigned long gp_start; /* Time at which GP started, */
408 /* but in jiffies. */
409 unsigned long gp_activity; /* Time of last GP kthread */
410 /* activity in jiffies. */
411 unsigned long jiffies_stall; /* Time at which to check */
412 /* for CPU stalls. */
413 unsigned long jiffies_resched; /* Time at which to resched */
414 /* a reluctant CPU. */
415 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
416 /* GP start. */
417 unsigned long gp_max; /* Maximum GP duration in */
418 /* jiffies. */
419 const char *name; /* Name of structure. */
420 char abbr; /* Abbreviated name. */
421 struct list_head flavors; /* List of RCU flavors. */
422 };
423
424 /* Values for rcu_state structure's gp_flags field. */
425 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
426 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
427
428 /* Values for rcu_state structure's gp_state field. */
429 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
430 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
431 #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
432 #define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */
433 #define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */
434 #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */
435 #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */
436
437 #ifndef RCU_TREE_NONCORE
438 static const char * const gp_state_names[] = {
439 "RCU_GP_IDLE",
440 "RCU_GP_WAIT_GPS",
441 "RCU_GP_DONE_GPS",
442 "RCU_GP_WAIT_FQS",
443 "RCU_GP_DOING_FQS",
444 "RCU_GP_CLEANUP",
445 "RCU_GP_CLEANED",
446 };
447 #endif /* #ifndef RCU_TREE_NONCORE */
448
449 extern struct list_head rcu_struct_flavors;
450
451 /* Sequence through rcu_state structures for each RCU flavor. */
452 #define for_each_rcu_flavor(rsp) \
453 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
454
455 /*
456 * RCU implementation internal declarations:
457 */
458 extern struct rcu_state rcu_sched_state;
459
460 extern struct rcu_state rcu_bh_state;
461
462 #ifdef CONFIG_PREEMPT_RCU
463 extern struct rcu_state rcu_preempt_state;
464 #endif /* #ifdef CONFIG_PREEMPT_RCU */
465
466 int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
467 bool rcu_eqs_special_set(int cpu);
468
469 #ifdef CONFIG_RCU_BOOST
470 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
471 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
472 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
473 DECLARE_PER_CPU(char, rcu_cpu_has_work);
474 #endif /* #ifdef CONFIG_RCU_BOOST */
475
476 #ifndef RCU_TREE_NONCORE
477
478 /* Forward declarations for rcutree_plugin.h */
479 static void rcu_bootup_announce(void);
480 static void rcu_preempt_note_context_switch(void);
481 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
482 #ifdef CONFIG_HOTPLUG_CPU
483 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
484 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
485 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
486 static int rcu_print_task_stall(struct rcu_node *rnp);
487 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
488 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
489 static void rcu_preempt_check_callbacks(void);
490 void call_rcu(struct rcu_head *head, rcu_callback_t func);
491 static void __init __rcu_init_preempt(void);
492 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
493 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
494 static void invoke_rcu_callbacks_kthread(void);
495 static bool rcu_is_callbacks_kthread(void);
496 #ifdef CONFIG_RCU_BOOST
497 static void rcu_preempt_do_callbacks(void);
498 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
499 struct rcu_node *rnp);
500 #endif /* #ifdef CONFIG_RCU_BOOST */
501 static void __init rcu_spawn_boost_kthreads(void);
502 static void rcu_prepare_kthreads(int cpu);
503 static void rcu_cleanup_after_idle(void);
504 static void rcu_prepare_for_idle(void);
505 static void rcu_idle_count_callbacks_posted(void);
506 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
507 static void print_cpu_stall_info_begin(void);
508 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
509 static void print_cpu_stall_info_end(void);
510 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
511 static void increment_cpu_stall_ticks(void);
512 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
513 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
514 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
515 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
516 static void rcu_init_one_nocb(struct rcu_node *rnp);
517 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
518 bool lazy, unsigned long flags);
519 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
520 struct rcu_data *rdp,
521 unsigned long flags);
522 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
523 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
524 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
525 static void rcu_spawn_all_nocb_kthreads(int cpu);
526 static void __init rcu_spawn_nocb_kthreads(void);
527 #ifdef CONFIG_RCU_NOCB_CPU
528 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
529 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
530 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
531 static bool init_nocb_callback_list(struct rcu_data *rdp);
532 static void rcu_sysidle_enter(int irq);
533 static void rcu_sysidle_exit(int irq);
534 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
535 unsigned long *maxj);
536 static bool is_sysidle_rcu_state(struct rcu_state *rsp);
537 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
538 unsigned long maxj);
539 static void rcu_bind_gp_kthread(void);
540 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
541 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
542 static void rcu_dynticks_task_enter(void);
543 static void rcu_dynticks_task_exit(void);
544
545 #ifdef CONFIG_SRCU
546 void srcu_online_cpu(unsigned int cpu);
547 void srcu_offline_cpu(unsigned int cpu);
548 #else /* #ifdef CONFIG_SRCU */
549 void srcu_online_cpu(unsigned int cpu) { }
550 void srcu_offline_cpu(unsigned int cpu) { }
551 #endif /* #else #ifdef CONFIG_SRCU */
552
553 #endif /* #ifndef RCU_TREE_NONCORE */
554
555 #ifdef CONFIG_RCU_TRACE
556 /* Read out queue lengths for tracing. */
557 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
558 {
559 #ifdef CONFIG_RCU_NOCB_CPU
560 *ql = atomic_long_read(&rdp->nocb_q_count);
561 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
562 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
563 *ql = 0;
564 *qll = 0;
565 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
566 }
567 #endif /* #ifdef CONFIG_RCU_TRACE */
568
569 /*
570 * Wrappers for the rcu_node::lock acquire and release.
571 *
572 * Because the rcu_nodes form a tree, the tree traversal locking will observe
573 * different lock values, this in turn means that an UNLOCK of one level
574 * followed by a LOCK of another level does not imply a full memory barrier;
575 * and most importantly transitivity is lost.
576 *
577 * In order to restore full ordering between tree levels, augment the regular
578 * lock acquire functions with smp_mb__after_unlock_lock().
579 *
580 * As ->lock of struct rcu_node is a __private field, therefore one should use
581 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
582 */
583 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
584 {
585 raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
586 smp_mb__after_unlock_lock();
587 }
588
589 static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
590 {
591 raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
592 }
593
594 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
595 {
596 raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
597 smp_mb__after_unlock_lock();
598 }
599
600 static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
601 {
602 raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
603 }
604
605 #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
606 do { \
607 typecheck(unsigned long, flags); \
608 raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
609 smp_mb__after_unlock_lock(); \
610 } while (0)
611
612 #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
613 do { \
614 typecheck(unsigned long, flags); \
615 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
616 } while (0)
617
618 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
619 {
620 bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
621
622 if (locked)
623 smp_mb__after_unlock_lock();
624 return locked;
625 }