rcu: Clean up code based on review feedback from Josh Triplett, part 4
These issues identified during an old-fashioned face-to-face code review extending over many hours. This group improves an existing abstraction and introduces two new ones. It also fixes an RCU stall-warning bug found while making the other changes. o Make RCU_INIT_FLAVOR() declare its own variables, removing the need to declare them at each call site. o Create an rcu_for_each_leaf() macro that scans the leaf nodes of the rcu_node tree. o Create an rcu_for_each_node_breadth_first() macro that does a breadth-first traversal of the rcu_node tree, AKA stepping through the array in index-number order. o If all CPUs corresponding to a given leaf rcu_node structure go offline, then any tasks queued on that leaf will be moved to the root rcu_node structure. Therefore, the stall-warning code must dump out tasks queued on the root rcu_node structure as well as those queued on the leaf rcu_node structures. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12541491934126-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3d76c08290
commit
a0b6c9a78c
3 changed files with 40 additions and 29 deletions
|
@ -462,8 +462,6 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||||
long delta;
|
long delta;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||||
struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
|
|
||||||
struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
|
|
||||||
|
|
||||||
/* Only let one CPU complain about others per time interval. */
|
/* Only let one CPU complain about others per time interval. */
|
||||||
|
|
||||||
|
@ -474,18 +472,24 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
|
rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Now rat on any tasks that got kicked up to the root rcu_node
|
||||||
|
* due to CPU offlining.
|
||||||
|
*/
|
||||||
|
rcu_print_task_stall(rnp);
|
||||||
spin_unlock_irqrestore(&rnp->lock, flags);
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
|
|
||||||
/* OK, time to rat on our buddy... */
|
/* OK, time to rat on our buddy... */
|
||||||
|
|
||||||
printk(KERN_ERR "INFO: RCU detected CPU stalls:");
|
printk(KERN_ERR "INFO: RCU detected CPU stalls:");
|
||||||
for (; rnp_cur < rnp_end; rnp_cur++) {
|
rcu_for_each_leaf_node(rsp, rnp) {
|
||||||
rcu_print_task_stall(rnp);
|
rcu_print_task_stall(rnp);
|
||||||
if (rnp_cur->qsmask == 0)
|
if (rnp->qsmask == 0)
|
||||||
continue;
|
continue;
|
||||||
for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
|
for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
|
||||||
if (rnp_cur->qsmask & (1UL << cpu))
|
if (rnp->qsmask & (1UL << cpu))
|
||||||
printk(" %d", rnp_cur->grplo + cpu);
|
printk(" %d", rnp->grplo + cpu);
|
||||||
}
|
}
|
||||||
printk(" (detected by %d, t=%ld jiffies)\n",
|
printk(" (detected by %d, t=%ld jiffies)\n",
|
||||||
smp_processor_id(), (long)(jiffies - rsp->gp_start));
|
smp_processor_id(), (long)(jiffies - rsp->gp_start));
|
||||||
|
@ -649,7 +653,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
||||||
* one corresponding to this CPU, due to the fact that we have
|
* one corresponding to this CPU, due to the fact that we have
|
||||||
* irqs disabled.
|
* irqs disabled.
|
||||||
*/
|
*/
|
||||||
for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) {
|
rcu_for_each_node_breadth_first(rsp, rnp) {
|
||||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||||
rcu_preempt_check_blocked_tasks(rnp);
|
rcu_preempt_check_blocked_tasks(rnp);
|
||||||
rnp->qsmask = rnp->qsmaskinit;
|
rnp->qsmask = rnp->qsmaskinit;
|
||||||
|
@ -1042,33 +1046,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
|
struct rcu_node *rnp;
|
||||||
struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
|
|
||||||
|
|
||||||
for (; rnp_cur < rnp_end; rnp_cur++) {
|
rcu_for_each_leaf_node(rsp, rnp) {
|
||||||
mask = 0;
|
mask = 0;
|
||||||
spin_lock_irqsave(&rnp_cur->lock, flags);
|
spin_lock_irqsave(&rnp->lock, flags);
|
||||||
if (rsp->completed != lastcomp) {
|
if (rsp->completed != lastcomp) {
|
||||||
spin_unlock_irqrestore(&rnp_cur->lock, flags);
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (rnp_cur->qsmask == 0) {
|
if (rnp->qsmask == 0) {
|
||||||
spin_unlock_irqrestore(&rnp_cur->lock, flags);
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
cpu = rnp_cur->grplo;
|
cpu = rnp->grplo;
|
||||||
bit = 1;
|
bit = 1;
|
||||||
for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) {
|
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
|
||||||
if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu]))
|
if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
|
||||||
mask |= bit;
|
mask |= bit;
|
||||||
}
|
}
|
||||||
if (mask != 0 && rsp->completed == lastcomp) {
|
if (mask != 0 && rsp->completed == lastcomp) {
|
||||||
|
|
||||||
/* cpu_quiet_msk() releases rnp_cur->lock. */
|
/* cpu_quiet_msk() releases rnp->lock. */
|
||||||
cpu_quiet_msk(mask, rsp, rnp_cur, flags);
|
cpu_quiet_msk(mask, rsp, rnp, flags);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&rnp_cur->lock, flags);
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1550,6 +1553,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
||||||
*/
|
*/
|
||||||
#define RCU_INIT_FLAVOR(rsp, rcu_data) \
|
#define RCU_INIT_FLAVOR(rsp, rcu_data) \
|
||||||
do { \
|
do { \
|
||||||
|
int i; \
|
||||||
|
int j; \
|
||||||
|
struct rcu_node *rnp; \
|
||||||
|
\
|
||||||
rcu_init_one(rsp); \
|
rcu_init_one(rsp); \
|
||||||
rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
|
rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
|
||||||
j = 0; \
|
j = 0; \
|
||||||
|
@ -1564,10 +1571,6 @@ do { \
|
||||||
|
|
||||||
void __init __rcu_init(void)
|
void __init __rcu_init(void)
|
||||||
{
|
{
|
||||||
int i; /* All used by RCU_INIT_FLAVOR(). */
|
|
||||||
int j;
|
|
||||||
struct rcu_node *rnp;
|
|
||||||
|
|
||||||
rcu_bootup_announce();
|
rcu_bootup_announce();
|
||||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||||
printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
|
printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
|
||||||
|
|
|
@ -106,6 +106,18 @@ struct rcu_node {
|
||||||
/* blocked_tasks[] array. */
|
/* blocked_tasks[] array. */
|
||||||
} ____cacheline_internodealigned_in_smp;
|
} ____cacheline_internodealigned_in_smp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do a full breadth-first scan of the rcu_node structures for the
|
||||||
|
* specified rcu_state structure.
|
||||||
|
*/
|
||||||
|
#define rcu_for_each_node_breadth_first(rsp, rnp) \
|
||||||
|
for ((rnp) = &(rsp)->node[0]; \
|
||||||
|
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|
||||||
|
|
||||||
|
#define rcu_for_each_leaf_node(rsp, rnp) \
|
||||||
|
for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
|
||||||
|
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|
||||||
|
|
||||||
/* Index values for nxttail array in struct rcu_data. */
|
/* Index values for nxttail array in struct rcu_data. */
|
||||||
#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
|
#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
|
||||||
#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
|
#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
|
||||||
|
|
|
@ -423,10 +423,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
|
||||||
*/
|
*/
|
||||||
static void __init __rcu_init_preempt(void)
|
static void __init __rcu_init_preempt(void)
|
||||||
{
|
{
|
||||||
int i; /* All used by RCU_INIT_FLAVOR(). */
|
|
||||||
int j;
|
|
||||||
struct rcu_node *rnp;
|
|
||||||
|
|
||||||
RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
|
RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue