}
/**
--- - * list_splice_init_rcu - splice an RCU-protected list into an existing list.
+++ + * __list_splice_init_rcu - join an RCU-protected list into an existing list.
* @list: the RCU-protected list to splice
--- - * @head: the place in the list to splice the first list into
+++ + * @prev: points to the last element of the existing list
+++ + * @next: points to the first element of the existing list
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*
--- - * @head can be RCU-read traversed concurrently with this function.
+++ + * The list pointed to by @prev and @next can be RCU-read traversed
+++ + * concurrently with this function.
*
* Note that this function blocks.
*
--- - * Important note: the caller must take whatever action is necessary to
--- - * prevent any other updates to @head. In principle, it is possible
--- - * to modify the list as soon as sync() begins execution.
--- - * If this sort of thing becomes necessary, an alternative version
--- - * based on call_rcu() could be created. But only if -really-
--- - * needed -- there is no shortage of RCU API members.
+++ + * Important note: the caller must take whatever action is necessary to prevent
+++ + * any other updates to the existing list. In principle, it is possible to
+++ + * modify the list as soon as sync() begins execution. If this sort of thing
+++ + * becomes necessary, an alternative version based on call_rcu() could be
+++ + * created. But only if -really- needed -- there is no shortage of RCU API
+++ + * members.
*/
--- -static inline void list_splice_init_rcu(struct list_head *list,
--- - struct list_head *head,
--- - void (*sync)(void))
+++ +static inline void __list_splice_init_rcu(struct list_head *list,
+++ + struct list_head *prev,
+++ + struct list_head *next,
+++ + void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
--- - struct list_head *at = head->next;
--- -
--- - if (list_empty(list))
--- - return;
/*
* "first" and "last" tracking list, so initialize it. RCU readers
* this function.
*/
--- - last->next = at;
--- - rcu_assign_pointer(list_next_rcu(head), first);
--- - first->prev = head;
--- - at->prev = last;
+++ + last->next = next;
+++ + rcu_assign_pointer(list_next_rcu(prev), first);
+++ + first->prev = prev;
+++ + next->prev = last;
+++ +}
+++ +
+++ +/**
+++ + * list_splice_init_rcu - splice an RCU-protected list into an existing list,
+++ + * designed for stacks.
+++ + * @list: the RCU-protected list to splice
+++ + * @head: the place in the existing list to splice the first list into
+++ + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+++ + */
+++ +static inline void list_splice_init_rcu(struct list_head *list,
+++ + struct list_head *head,
+++ + void (*sync)(void))
+++ +{
+++ + if (!list_empty(list))
+++ + __list_splice_init_rcu(list, head, head->next, sync);
+++ +}
+++ +
+++ +/**
+++ + * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
+++ + * list, designed for queues.
+++ + * @list: the RCU-protected list to splice
+++ + * @head: the place in the existing list to splice the first list into
+++ + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+++ + */
+++ +static inline void list_splice_tail_init_rcu(struct list_head *list,
+++ + struct list_head *head,
+++ + void (*sync)(void))
+++ +{
+++ + if (!list_empty(list))
+++ + __list_splice_init_rcu(list, head->prev, head, sync);
}
/**
&pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
++ ++/**
++ ++ * list_entry_lockless - get the struct for this entry
++ ++ * @ptr: the &struct list_head pointer.
++ ++ * @type: the type of the struct this is embedded in.
++ ++ * @member: the name of the list_head within the struct.
++ ++ *
++ ++ * This primitive may safely run concurrently with the _rcu list-mutation
++ ++ * primitives such as list_add_rcu(), but requires some implicit RCU
++ ++ * read-side guarding. One example is running within a special
++ ++ * exception-time environment where preemption is disabled and where
++ ++ * lockdep cannot be invoked (in which case updaters must use RCU-sched,
++ ++ * as in synchronize_sched(), call_rcu_sched(), and friends). Another
++ ++ * example is when items are added to the list, but never deleted.
++ ++ */
++ ++#define list_entry_lockless(ptr, type, member) \
++ ++ container_of((typeof(ptr))lockless_dereference(ptr), type, member)
++ ++
++ ++/**
++ ++ * list_for_each_entry_lockless - iterate over rcu list of given type
++ ++ * @pos: the type * to use as a loop cursor.
++ ++ * @head: the head for your list.
++ ++ * @member: the name of the list_struct within the struct.
++ ++ *
++ ++ * This primitive may safely run concurrently with the _rcu list-mutation
++ ++ * primitives such as list_add_rcu(), but requires some implicit RCU
++ ++ * read-side guarding. One example is running within a special
++ ++ * exception-time environment where preemption is disabled and where
++ ++ * lockdep cannot be invoked (in which case updaters must use RCU-sched,
++ ++ * as in synchronize_sched(), call_rcu_sched(), and friends). Another
++ ++ * example is when items are added to the list, but never deleted.
++ ++ */
++ ++#define list_for_each_entry_lockless(pos, head, member) \
++ ++ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
++ ++ &pos->member != (head); \
++ ++ pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
++ ++
/**
* list_for_each_entry_continue_rcu - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
/* Data structures. */
-- --static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
-- --static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
-- --static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
-- --
/*
* In order to export the rcu_state name to the tracing tools, it
* needs to be added in the __tracepoint_string section.
*/
void rcu_sched_qs(void)
{
-- -- unsigned long flags;
-- --
-- -- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
-- -- trace_rcu_grace_period(TPS("rcu_sched"),
-- -- __this_cpu_read(rcu_sched_data.gpnum),
-- -- TPS("cpuqs"));
-- -- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
-- -- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
-- -- return;
-- -- local_irq_save(flags);
-- -- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
-- -- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
-- -- rcu_report_exp_rdp(&rcu_sched_state,
-- -- this_cpu_ptr(&rcu_sched_data),
-- -- true);
-- -- }
-- -- local_irq_restore(flags);
-- -- }
++ ++ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
++ ++ return;
++ ++ trace_rcu_grace_period(TPS("rcu_sched"),
++ ++ __this_cpu_read(rcu_sched_data.gpnum),
++ ++ TPS("cpuqs"));
++ ++ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
++ ++ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
++ ++ return;
++ ++ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
++ ++ rcu_report_exp_rdp(&rcu_sched_state,
++ ++ this_cpu_ptr(&rcu_sched_data), true);
}
void rcu_bh_qs(void)
* We inform the RCU core by emulating a zero-duration dyntick-idle
* period, which we in turn do by incrementing the ->dynticks counter
* by two.
++ ++ *
++ ++ * The caller must have disabled interrupts.
*/
static void rcu_momentary_dyntick_idle(void)
{
-- -- unsigned long flags;
struct rcu_data *rdp;
struct rcu_dynticks *rdtp;
int resched_mask;
struct rcu_state *rsp;
-- -- local_irq_save(flags);
-- --
/*
* Yes, we can lose flag-setting operations. This is OK, because
* the flag will be set again after some delay.
smp_mb__after_atomic(); /* Later stuff after QS. */
break;
}
-- -- local_irq_restore(flags);
}
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
-- -- * The caller must have disabled preemption.
++ ++ * The caller must have disabled interrupts.
*/
void rcu_note_context_switch(void)
{
*/
void rcu_all_qs(void)
{
++ ++ unsigned long flags;
++ ++
barrier(); /* Avoid RCU read-side critical sections leaking down. */
-- -- if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
++ ++ if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
++ ++ local_irq_save(flags);
rcu_momentary_dyntick_idle();
++ ++ local_irq_restore(flags);
++ ++ }
this_cpu_inc(rcu_qs_ctr);
barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
* The caller must have disabled interrupts to prevent races with
* normal callback registry.
*/
-- --static int
++ ++static bool
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
int i;
if (rcu_gp_in_progress(rsp))
-- -- return 0; /* No, a grace period is already in progress. */
++ ++ return false; /* No, a grace period is already in progress. */
if (rcu_future_needs_gp(rsp))
-- -- return 1; /* Yes, a no-CBs CPU needs one. */
++ ++ return true; /* Yes, a no-CBs CPU needs one. */
if (!rdp->nxttail[RCU_NEXT_TAIL])
-- -- return 0; /* No, this is a no-CBs (or offline) CPU. */
++ ++ return false; /* No, this is a no-CBs (or offline) CPU. */
if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
-- -- return 1; /* Yes, this CPU has newly registered callbacks. */
++ ++ return true; /* Yes, CPU has newly registered callbacks. */
for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
ULONG_CMP_LT(READ_ONCE(rsp->completed),
rdp->nxtcompleted[i]))
-- -- return 1; /* Yes, CBs for future grace period. */
-- -- return 0; /* No grace period needed. */
++ ++ return true; /* Yes, CBs for future grace period. */
++ ++ return false; /* No grace period needed. */
}
/*
*
* Exit from an interrupt handler, which might possibly result in entering
* idle mode, in other words, leaving the mode in which read-side critical
-- -- * sections can occur.
++ ++ * sections can occur. The caller must have disabled interrupts.
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your
*/
void rcu_irq_exit(void)
{
-- -- unsigned long flags;
long long oldval;
struct rcu_dynticks *rdtp;
-- -- local_irq_save(flags);
++ ++ RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
else
rcu_eqs_enter_common(oldval, true);
rcu_sysidle_enter(1);
++ ++}
++ ++
++ ++/*
++ ++ * Wrapper for rcu_irq_exit() where interrupts are enabled.
++ ++ */
++ ++void rcu_irq_exit_irqson(void)
++ ++{
++ ++ unsigned long flags;
++ ++
++ ++ local_irq_save(flags);
++ ++ rcu_irq_exit();
local_irq_restore(flags);
}
*
* Enter an interrupt handler, which might possibly result in exiting
* idle mode, in other words, entering the mode in which read-side critical
-- -- * sections can occur.
++ ++ * sections can occur. The caller must have disabled interrupts.
*
* Note that the Linux kernel is fully capable of entering an interrupt
* handler that it never exits, for example when doing upcalls to
*/
void rcu_irq_enter(void)
{
-- -- unsigned long flags;
struct rcu_dynticks *rdtp;
long long oldval;
-- -- local_irq_save(flags);
++ ++ RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
else
rcu_eqs_exit_common(oldval, true);
rcu_sysidle_exit(1);
++ ++}
++ ++
++ ++/*
++ ++ * Wrapper for rcu_irq_enter() where interrupts are enabled.
++ ++ */
++ ++void rcu_irq_enter_irqson(void)
++ ++{
++ ++ unsigned long flags;
++ ++
++ ++ local_irq_save(flags);
++ ++ rcu_irq_enter();
local_irq_restore(flags);
}
rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
}
++++ /*
++++ * Convert a ->gp_state value to a character string.
++++ */
++++ static const char *gp_state_getname(short gs)
++++ {
++++ if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
++++ return "???";
++++ return gp_state_names[gs];
++++ }
++++
/*
* Complain about starvation of grace-period kthread.
*/
j = jiffies;
gpa = READ_ONCE(rsp->gp_activity);
---- if (j - gpa > 2 * HZ)
---- pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x s%d ->state=%#lx\n",
++++ if (j - gpa > 2 * HZ) {
++++ pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n",
rsp->name, j - gpa,
rsp->gpnum, rsp->completed,
---- rsp->gp_flags, rsp->gp_state,
---- rsp->gp_kthread ? rsp->gp_kthread->state : 0);
++++ rsp->gp_flags,
++++ gp_state_getname(rsp->gp_state), rsp->gp_state,
++++ rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
++++ if (rsp->gp_kthread)
++++ sched_show_task(rsp->gp_kthread);
++++ }
}
/*
}
/*
-- -- * Initialize a new grace period. Return 0 if no grace period required.
++ ++ * Initialize a new grace period. Return false if no grace period required.
*/
-- --static int rcu_gp_init(struct rcu_state *rsp)
++ ++static bool rcu_gp_init(struct rcu_state *rsp)
{
unsigned long oldmask;
struct rcu_data *rdp;
if (!READ_ONCE(rsp->gp_flags)) {
/* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq(&rnp->lock);
-- -- return 0;
++ ++ return false;
}
WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
* Not supposed to be able to happen.
*/
raw_spin_unlock_irq(&rnp->lock);
-- -- return 0;
++ ++ return false;
}
/* Advance to a new grace period and initialize state. */
WRITE_ONCE(rsp->gp_activity, jiffies);
}
-- -- return 1;
++ ++ return true;
}
/*
{
unsigned long s;
- --- smp_mb(); /* Caller's modifications seen first by other CPUs. */
s = (READ_ONCE(*sp) + 3) & ~0x1;
smp_mb(); /* Above access must not bleed into critical section. */
return s;
}
static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
{
+ +++ smp_mb(); /* Caller's modifications seen first by other CPUs. */
return rcu_seq_snap(&rsp->expedited_sequence);
}
static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
*/
static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
{
- --- struct rcu_data *rdp;
+ +++ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
struct rcu_node *rnp0;
struct rcu_node *rnp1 = NULL;
if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
if (sync_exp_work_done(rsp, rnp0, NULL,
- --- &rsp->expedited_workdone0, s))
+ +++ &rdp->expedited_workdone0, s))
return NULL;
return rnp0;
}
* can be inexact, as it is just promoting locality and is not
* strictly needed for correctness.
*/
- --- rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
- --- if (sync_exp_work_done(rsp, NULL, NULL, &rsp->expedited_workdone1, s))
+ +++ if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
return NULL;
mutex_lock(&rdp->exp_funnel_mutex);
rnp0 = rdp->mynode;
for (; rnp0 != NULL; rnp0 = rnp0->parent) {
if (sync_exp_work_done(rsp, rnp1, rdp,
- --- &rsp->expedited_workdone2, s))
+ +++ &rdp->expedited_workdone2, s))
return NULL;
mutex_lock(&rnp0->exp_funnel_mutex);
if (rnp1)
rnp1 = rnp0;
}
if (sync_exp_work_done(rsp, rnp1, rdp,
- --- &rsp->expedited_workdone3, s))
+ +++ &rdp->expedited_workdone3, s))
return NULL;
return rnp1;
}
ret = smp_call_function_single(cpu, func, rsp, 0);
if (!ret) {
mask_ofl_ipi &= ~mask;
- --- } else {
- --- /* Failed, raced with offline. */
- --- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- --- if (cpu_online(cpu) &&
- --- (rnp->expmask & mask)) {
- --- raw_spin_unlock_irqrestore(&rnp->lock,
- --- flags);
- --- schedule_timeout_uninterruptible(1);
- --- if (cpu_online(cpu) &&
- --- (rnp->expmask & mask))
- --- goto retry_ipi;
- --- raw_spin_lock_irqsave_rcu_node(rnp,
- --- flags);
- --- }
- --- if (!(rnp->expmask & mask))
- --- mask_ofl_ipi &= ~mask;
+ +++ continue;
+ +++ }
+ +++ /* Failed, raced with offline. */
+ +++ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ +++ if (cpu_online(cpu) &&
+ +++ (rnp->expmask & mask)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ +++ schedule_timeout_uninterruptible(1);
+ +++ if (cpu_online(cpu) &&
+ +++ (rnp->expmask & mask))
+ +++ goto retry_ipi;
+ +++ raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
+ +++ if (!(rnp->expmask & mask))
+ +++ mask_ofl_ipi &= ~mask;
+ +++ raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/* Report quiescent states for those that went offline. */
mask_ofl_test |= mask_ofl_ipi;
unsigned long jiffies_stall;
unsigned long jiffies_start;
unsigned long mask;
+ +++ int ndetected;
struct rcu_node *rnp;
struct rcu_node *rnp_root = rcu_get_root(rsp);
int ret;
rsp->expedited_wq,
sync_rcu_preempt_exp_done(rnp_root),
jiffies_stall);
- --- if (ret > 0)
+ +++ if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
return;
if (ret < 0) {
/* Hit a signal, disable CPU stall warnings. */
}
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
rsp->name);
+ +++ ndetected = 0;
rcu_for_each_leaf_node(rsp, rnp) {
- --- (void)rcu_print_task_exp_stall(rnp);
+ +++ ndetected = rcu_print_task_exp_stall(rnp);
mask = 1;
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
struct rcu_data *rdp;
if (!(rnp->expmask & mask))
continue;
+ +++ ndetected++;
rdp = per_cpu_ptr(rsp->rda, cpu);
pr_cont(" %d-%c%c%c", cpu,
"O."[cpu_online(cpu)],
}
mask <<= 1;
}
- --- pr_cont(" } %lu jiffies s: %lu\n",
- --- jiffies - jiffies_start, rsp->expedited_sequence);
+ +++ pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
+ +++ jiffies - jiffies_start, rsp->expedited_sequence,
+ +++ rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
+ +++ if (!ndetected) {
+ +++ pr_err("blocking rcu_node structures:");
+ +++ rcu_for_each_node_breadth_first(rsp, rnp) {
+ +++ if (rnp == rnp_root)
+ +++ continue; /* printed unconditionally */
+ +++ if (sync_rcu_preempt_exp_done(rnp))
+ +++ continue;
+ +++ pr_cont(" l=%u:%d-%d:%#lx/%c",
+ +++ rnp->level, rnp->grplo, rnp->grphi,
+ +++ rnp->expmask,
+ +++ ".T"[!!rnp->exp_tasks]);
+ +++ }
+ +++ pr_cont("\n");
+ +++ }
rcu_for_each_leaf_node(rsp, rnp) {
mask = 1;
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
struct rcu_node *rnp;
struct rcu_state *rsp = &rcu_sched_state;
+ +++ /* If only one CPU, this is automatically a grace period. */
+ +++ if (rcu_blocking_is_gp())
+ +++ return;
+ +++
+ +++ /* If expedited grace periods are prohibited, fall back to normal. */
+ +++ if (rcu_gp_is_normal()) {
+ +++ wait_rcu_gp(call_rcu_sched);
+ +++ return;
+ +++ }
+ +++
/* Take a snapshot of the sequence number. */
s = rcu_exp_gp_seq_snap(rsp);
sp.sched_priority = kthread_prio;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
}
-- -- wake_up_process(t);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
++ ++ wake_up_process(t);
}
rcu_spawn_nocb_kthreads();
rcu_spawn_boost_kthreads();
/*
* Helper function for rcu_init() that initializes one rcu_state structure.
*/
-- --static void __init rcu_init_one(struct rcu_state *rsp,
-- -- struct rcu_data __percpu *rda)
++ ++static void __init rcu_init_one(struct rcu_state *rsp)
{
static const char * const buf[] = RCU_NODE_NAME_INIT;
static const char * const fqs[] = RCU_FQS_NAME_INIT;
static const char * const exp[] = RCU_EXP_NAME_INIT;
++ ++ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
++ ++ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
++ ++ static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
static u8 fl_mask = 0x1;
int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
rcu_bootup_announce();
rcu_init_geometry();
-- -- rcu_init_one(&rcu_bh_state, &rcu_bh_data);
-- -- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
++ ++ rcu_init_one(&rcu_bh_state);
++ ++ rcu_init_one(&rcu_sched_state);
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
/*
* Check the RCU kernel configuration parameters and print informative
-- -- * messages about anything out of the ordinary. If you like #ifdef, you
-- -- * will love this function.
++ ++ * messages about anything out of the ordinary.
*/
static void __init rcu_bootup_announce_oddness(void)
{
* the corresponding expedited grace period will also be the end of the
* normal grace period.
*/
-- --static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
-- -- unsigned long flags) __releases(rnp->lock)
++ ++static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
++ ++ __releases(rnp->lock) /* But leaves rrupts disabled. */
{
int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry;
-- -- raw_spin_unlock(&rnp->lock);
++ ++ raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
/*
* Report the quiescent state for the expedited GP. This expedited
} else {
WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
}
-- -- local_irq_restore(flags);
}
/*
* predating the current grace period drain, in other words, until
* rnp->gp_tasks becomes NULL.
*
-- -- * Caller must disable preemption.
++ ++ * Caller must disable interrupts.
*/
static void rcu_preempt_note_context_switch(void)
{
struct task_struct *t = current;
-- -- unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
/* Possibly blocking in an RCU read-side critical section. */
rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode;
-- -- raw_spin_lock_irqsave_rcu_node(rnp, flags);
++ ++ raw_spin_lock_rcu_node(rnp);
t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp;
(rnp->qsmask & rdp->grpmask)
? rnp->gpnum
: rnp->gpnum + 1);
-- -- rcu_preempt_ctxt_queue(rnp, rdp, flags);
++ ++ rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
/*
* Remove this task from the list it blocked on. The task
-- -- * now remains queued on the rcu_node corresponding to
-- -- * the CPU it first blocked on, so the first attempt to
-- -- * acquire the task's rcu_node's ->lock will succeed.
-- -- * Keep the loop and add a WARN_ON() out of sheer paranoia.
++ ++ * now remains queued on the rcu_node corresponding to the
++ ++ * CPU it first blocked on, so there is no longer any need
++ ++ * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
*/
-- -- for (;;) {
-- -- rnp = t->rcu_blocked_node;
-- -- raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
-- -- if (rnp == t->rcu_blocked_node)
-- -- break;
-- -- WARN_ON_ONCE(1);
-- -- raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-- -- }
++ ++ rnp = t->rcu_blocked_node;
++ ++ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
++ ++ WARN_ON_ONCE(rnp != t->rcu_blocked_node);
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
empty_exp = sync_rcu_preempt_exp_done(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
struct rcu_state *rsp = rcu_state_p;
unsigned long s;
+ +++ /* If expedited grace periods are prohibited, fall back to normal. */
+ +++ if (rcu_gp_is_normal()) {
+ +++ wait_rcu_gp(call_rcu);
+ +++ return;
+ +++ }
+ +++
s = rcu_exp_gp_seq_snap(rsp);
rnp_unlock = exp_funnel_lock(rsp, s);
*/
static void __init __rcu_init_preempt(void)
{
-- -- rcu_init_one(rcu_state_p, rcu_data_p);
++ ++ rcu_init_one(rcu_state_p);
}
/*
struct rcu_state *rsp;
int tne;
-- -- if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL))
++ ++ if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
++ ++ rcu_is_nocb_cpu(smp_processor_id()))
return;
/* Handle nohz enablement switches conservatively. */
if (!tne)
return;
-- -- /* If this is a no-CBs CPU, no callbacks, just return. */
-- -- if (rcu_is_nocb_cpu(smp_processor_id()))
-- -- return;
-- --
/*
* If a non-lazy callback arrived at a CPU having only lazy
* callbacks, invoke RCU core for the side-effect of recalculating