the specified list of CPUs whose tick will be stopped
whenever possible. The boot CPU will be forced outside
the range to maintain the timekeeping.
+ The CPUs in this range must also be included in the
+ rcu_nocbs= set.
noiotrap [SH] Disables trapped I/O port accesses.
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
+#ifdef CONFIG_RCU_NOCB_CPU
+extern bool rcu_is_nocb_cpu(int cpu);
+#else
+static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
+
#endif /* __LINUX_RCUPDATE_H */
# endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
+extern void tick_nohz_init(void);
extern int tick_nohz_full_cpu(int cpu);
#else
+static inline void tick_nohz_init(void) { }
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
#endif
idr_init_cache();
perf_event_init();
rcu_init();
+ tick_nohz_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
struct rcu_node *rnp, struct rcu_data *rdp)
{
/* No-CBs CPUs do not have orphanable callbacks. */
- if (is_nocb_cpu(rdp->cpu))
+ if (rcu_is_nocb_cpu(rdp->cpu))
return;
/*
* corresponding CPU's preceding callbacks have been invoked.
*/
for_each_possible_cpu(cpu) {
- if (!cpu_online(cpu) && !is_nocb_cpu(cpu))
+ if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
continue;
rdp = per_cpu_ptr(rsp->rda, cpu);
- if (is_nocb_cpu(cpu)) {
+ if (rcu_is_nocb_cpu(cpu)) {
_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
rsp->n_barrier_done);
atomic_inc(&rsp->barrier_cpu_count);
static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static void increment_cpu_stall_ticks(void);
-static bool is_nocb_cpu(int cpu);
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy);
static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
/* Is the specified CPU a no-CPUs CPU? */
-static bool is_nocb_cpu(int cpu)
+bool rcu_is_nocb_cpu(int cpu)
{
if (have_rcu_nocb_mask)
return cpumask_test_cpu(cpu, rcu_nocb_mask);
bool lazy)
{
- if (!is_nocb_cpu(rdp->cpu))
+ if (!rcu_is_nocb_cpu(rdp->cpu))
return 0;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
return 1;
long qll = rsp->qlen_lazy;
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
- if (!is_nocb_cpu(smp_processor_id()))
+ if (!rcu_is_nocb_cpu(smp_processor_id()))
return 0;
rsp->qlen = 0;
rsp->qlen_lazy = 0;
* If there are no no-CB CPUs or if this CPU is not a no-CB CPU,
* then offlining this CPU is harmless. Let it happen.
*/
- if (!have_rcu_nocb_mask || is_nocb_cpu(cpu))
+ if (!have_rcu_nocb_mask || rcu_is_nocb_cpu(cpu))
return 1;
/* If no memory, play it safe and keep the CPU around. */
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static bool is_nocb_cpu(int cpu)
-{
- return false;
-}
-
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy)
{
*/
static char __initdata nohz_full_buf[NR_CPUS + 1];
-static int __init init_tick_nohz_full(void)
+void __init tick_nohz_init(void)
{
- if (have_nohz_full_mask)
- cpu_notifier(tick_nohz_cpu_down_callback, 0);
+ int cpu;
+
+ if (!have_nohz_full_mask)
+ return;
+
+ cpu_notifier(tick_nohz_cpu_down_callback, 0);
+
+ /* Make sure full dynticks CPU are also RCU nocbs */
+ for_each_cpu(cpu, nohz_full_mask) {
+ if (!rcu_is_nocb_cpu(cpu)) {
+ pr_warning("NO_HZ: CPU %d is not RCU nocb: "
+ "cleared from nohz_full range", cpu);
+ cpumask_clear_cpu(cpu, nohz_full_mask);
+ }
+ }
cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
-
- return 0;
}
-core_initcall(init_tick_nohz_full);
#else
#define have_nohz_full_mask (0)
#endif