rcu: Per-CPU operation cleanups to rcu_*_qs() functions
The rcu_bh_qs(), rcu_preempt_qs(), and rcu_sched_qs() functions use old-style per-CPU variable access and write to ->passed_quiesce even if it is already set. This commit therefore updates to use the new-style per-CPU variable access functions and avoids the spurious writes. This commit also eliminates the "cpu" argument to these functions because they are always invoked on the indicated CPU. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
1d082fd061
commit
284a8c93af
6 changed files with 41 additions and 36 deletions
|
@ -261,8 +261,8 @@ static inline int rcu_preempt_depth(void)
|
||||||
|
|
||||||
/* Internal to kernel */
|
/* Internal to kernel */
|
||||||
void rcu_init(void);
|
void rcu_init(void);
|
||||||
void rcu_sched_qs(int cpu);
|
void rcu_sched_qs(void);
|
||||||
void rcu_bh_qs(int cpu);
|
void rcu_bh_qs(void);
|
||||||
void rcu_check_callbacks(int cpu, int user);
|
void rcu_check_callbacks(int cpu, int user);
|
||||||
struct notifier_block;
|
struct notifier_block;
|
||||||
void rcu_idle_enter(void);
|
void rcu_idle_enter(void);
|
||||||
|
|
|
@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
|
||||||
|
|
||||||
static inline void rcu_note_context_switch(int cpu)
|
static inline void rcu_note_context_switch(int cpu)
|
||||||
{
|
{
|
||||||
rcu_sched_qs(cpu);
|
rcu_sched_qs();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -72,7 +72,7 @@ static void rcu_idle_enter_common(long long newval)
|
||||||
current->pid, current->comm,
|
current->pid, current->comm,
|
||||||
idle->pid, idle->comm); /* must be idle task! */
|
idle->pid, idle->comm); /* must be idle task! */
|
||||||
}
|
}
|
||||||
rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
|
rcu_sched_qs(); /* implies rcu_bh_inc() */
|
||||||
barrier();
|
barrier();
|
||||||
rcu_dynticks_nesting = newval;
|
rcu_dynticks_nesting = newval;
|
||||||
}
|
}
|
||||||
|
@ -217,7 +217,7 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
|
||||||
* are at it, given that any rcu quiescent state is also an rcu_bh
|
* are at it, given that any rcu quiescent state is also an rcu_bh
|
||||||
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
|
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
|
||||||
*/
|
*/
|
||||||
void rcu_sched_qs(int cpu)
|
void rcu_sched_qs(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ void rcu_sched_qs(int cpu)
|
||||||
/*
|
/*
|
||||||
* Record an rcu_bh quiescent state.
|
* Record an rcu_bh quiescent state.
|
||||||
*/
|
*/
|
||||||
void rcu_bh_qs(int cpu)
|
void rcu_bh_qs(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -251,9 +251,9 @@ void rcu_check_callbacks(int cpu, int user)
|
||||||
{
|
{
|
||||||
RCU_TRACE(check_cpu_stalls());
|
RCU_TRACE(check_cpu_stalls());
|
||||||
if (user || rcu_is_cpu_rrupt_from_idle())
|
if (user || rcu_is_cpu_rrupt_from_idle())
|
||||||
rcu_sched_qs(cpu);
|
rcu_sched_qs();
|
||||||
else if (!in_softirq())
|
else if (!in_softirq())
|
||||||
rcu_bh_qs(cpu);
|
rcu_bh_qs();
|
||||||
if (user)
|
if (user)
|
||||||
rcu_note_voluntary_context_switch(current);
|
rcu_note_voluntary_context_switch(current);
|
||||||
}
|
}
|
||||||
|
|
|
@ -188,22 +188,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
|
||||||
* one since the start of the grace period, this just sets a flag.
|
* one since the start of the grace period, this just sets a flag.
|
||||||
* The caller must have disabled preemption.
|
* The caller must have disabled preemption.
|
||||||
*/
|
*/
|
||||||
void rcu_sched_qs(int cpu)
|
void rcu_sched_qs(void)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
|
if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
|
||||||
|
trace_rcu_grace_period(TPS("rcu_sched"),
|
||||||
if (rdp->passed_quiesce == 0)
|
__this_cpu_read(rcu_sched_data.gpnum),
|
||||||
trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
|
TPS("cpuqs"));
|
||||||
rdp->passed_quiesce = 1;
|
__this_cpu_write(rcu_sched_data.passed_quiesce, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void rcu_bh_qs(int cpu)
|
void rcu_bh_qs(void)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
|
if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
|
||||||
|
trace_rcu_grace_period(TPS("rcu_bh"),
|
||||||
if (rdp->passed_quiesce == 0)
|
__this_cpu_read(rcu_bh_data.gpnum),
|
||||||
trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
|
TPS("cpuqs"));
|
||||||
rdp->passed_quiesce = 1;
|
__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
|
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
|
||||||
|
@ -278,7 +280,7 @@ static void rcu_momentary_dyntick_idle(void)
|
||||||
void rcu_note_context_switch(int cpu)
|
void rcu_note_context_switch(int cpu)
|
||||||
{
|
{
|
||||||
trace_rcu_utilization(TPS("Start context switch"));
|
trace_rcu_utilization(TPS("Start context switch"));
|
||||||
rcu_sched_qs(cpu);
|
rcu_sched_qs();
|
||||||
rcu_preempt_note_context_switch(cpu);
|
rcu_preempt_note_context_switch(cpu);
|
||||||
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_dyntick_idle();
|
||||||
|
@ -2395,8 +2397,8 @@ void rcu_check_callbacks(int cpu, int user)
|
||||||
* at least not while the corresponding CPU is online.
|
* at least not while the corresponding CPU is online.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
rcu_sched_qs(cpu);
|
rcu_sched_qs();
|
||||||
rcu_bh_qs(cpu);
|
rcu_bh_qs();
|
||||||
|
|
||||||
} else if (!in_softirq()) {
|
} else if (!in_softirq()) {
|
||||||
|
|
||||||
|
@ -2407,7 +2409,7 @@ void rcu_check_callbacks(int cpu, int user)
|
||||||
* critical section, so note it.
|
* critical section, so note it.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
rcu_bh_qs(cpu);
|
rcu_bh_qs();
|
||||||
}
|
}
|
||||||
rcu_preempt_check_callbacks(cpu);
|
rcu_preempt_check_callbacks(cpu);
|
||||||
if (rcu_pending(cpu))
|
if (rcu_pending(cpu))
|
||||||
|
|
|
@ -158,14 +158,16 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||||
* As with the other rcu_*_qs() functions, callers to this function
|
* As with the other rcu_*_qs() functions, callers to this function
|
||||||
* must disable preemption.
|
* must disable preemption.
|
||||||
*/
|
*/
|
||||||
static void rcu_preempt_qs(int cpu)
|
static void rcu_preempt_qs(void)
|
||||||
{
|
{
|
||||||
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
|
if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
|
||||||
|
trace_rcu_grace_period(TPS("rcu_preempt"),
|
||||||
if (rdp->passed_quiesce == 0)
|
__this_cpu_read(rcu_preempt_data.gpnum),
|
||||||
trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
|
TPS("cpuqs"));
|
||||||
rdp->passed_quiesce = 1;
|
__this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
|
||||||
current->rcu_read_unlock_special.b.need_qs = false;
|
barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
|
||||||
|
current->rcu_read_unlock_special.b.need_qs = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -256,7 +258,7 @@ static void rcu_preempt_note_context_switch(int cpu)
|
||||||
* grace period, then the fact that the task has been enqueued
|
* grace period, then the fact that the task has been enqueued
|
||||||
* means that we continue to block the current grace period.
|
* means that we continue to block the current grace period.
|
||||||
*/
|
*/
|
||||||
rcu_preempt_qs(cpu);
|
rcu_preempt_qs();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -352,7 +354,7 @@ void rcu_read_unlock_special(struct task_struct *t)
|
||||||
*/
|
*/
|
||||||
special = t->rcu_read_unlock_special;
|
special = t->rcu_read_unlock_special;
|
||||||
if (special.b.need_qs) {
|
if (special.b.need_qs) {
|
||||||
rcu_preempt_qs(smp_processor_id());
|
rcu_preempt_qs();
|
||||||
if (!t->rcu_read_unlock_special.s) {
|
if (!t->rcu_read_unlock_special.s) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return;
|
return;
|
||||||
|
@ -651,11 +653,12 @@ static void rcu_preempt_check_callbacks(int cpu)
|
||||||
struct task_struct *t = current;
|
struct task_struct *t = current;
|
||||||
|
|
||||||
if (t->rcu_read_lock_nesting == 0) {
|
if (t->rcu_read_lock_nesting == 0) {
|
||||||
rcu_preempt_qs(cpu);
|
rcu_preempt_qs();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (t->rcu_read_lock_nesting > 0 &&
|
if (t->rcu_read_lock_nesting > 0 &&
|
||||||
per_cpu(rcu_preempt_data, cpu).qs_pending)
|
per_cpu(rcu_preempt_data, cpu).qs_pending &&
|
||||||
|
!per_cpu(rcu_preempt_data, cpu).passed_quiesce)
|
||||||
t->rcu_read_unlock_special.b.need_qs = true;
|
t->rcu_read_unlock_special.b.need_qs = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -278,7 +278,7 @@ asmlinkage __visible void __do_softirq(void)
|
||||||
pending >>= softirq_bit;
|
pending >>= softirq_bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_bh_qs(smp_processor_id());
|
rcu_bh_qs();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
pending = local_softirq_pending();
|
pending = local_softirq_pending();
|
||||||
|
|
Loading…
Reference in a new issue