Preempt-RCU: fix rcu_barrier for preemptive environment.
Fix rcu_barrier() to work properly in preemptive kernel environment. Also, the ordering of callback must be preserved while moving callbacks to another CPU during CPU hotplug. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
01c1c660f4
commit
e0ecfa7917
2 changed files with 11 additions and 1 deletions
|
@ -371,9 +371,9 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
|
||||||
if (rcp->cur != rcp->completed)
|
if (rcp->cur != rcp->completed)
|
||||||
cpu_quiet(rdp->cpu, rcp);
|
cpu_quiet(rdp->cpu, rcp);
|
||||||
spin_unlock_bh(&rcp->lock);
|
spin_unlock_bh(&rcp->lock);
|
||||||
|
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
|
||||||
rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
|
rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
|
||||||
rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
|
rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
|
||||||
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_offline_cpu(int cpu)
|
static void rcu_offline_cpu(int cpu)
|
||||||
|
|
|
@ -115,7 +115,17 @@ void rcu_barrier(void)
|
||||||
mutex_lock(&rcu_barrier_mutex);
|
mutex_lock(&rcu_barrier_mutex);
|
||||||
init_completion(&rcu_barrier_completion);
|
init_completion(&rcu_barrier_completion);
|
||||||
atomic_set(&rcu_barrier_cpu_count, 0);
|
atomic_set(&rcu_barrier_cpu_count, 0);
|
||||||
|
/*
|
||||||
|
* The queueing of callbacks in all CPUs must be atomic with
|
||||||
|
* respect to RCU, otherwise one CPU may queue a callback,
|
||||||
|
* wait for a grace period, decrement barrier count and call
|
||||||
|
* complete(), while other CPUs have not yet queued anything.
|
||||||
|
* So, we need to make sure that grace periods cannot complete
|
||||||
|
* until all the callbacks are queued.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
|
on_each_cpu(rcu_barrier_func, NULL, 0, 1);
|
||||||
|
rcu_read_unlock();
|
||||||
wait_for_completion(&rcu_barrier_completion);
|
wait_for_completion(&rcu_barrier_completion);
|
||||||
mutex_unlock(&rcu_barrier_mutex);
|
mutex_unlock(&rcu_barrier_mutex);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue