rcu: Permit rt_mutex_unlock() with irqs disabled
Create a separate lockdep class for the rt_mutex used for RCU priority boosting and enable use of rt_mutex_lock() with irqs disabled. This prevents RCU priority boosting from falling prey to deadlocks when someone begins an RCU read-side critical section in preemptible state, but releases it with an irq-disabled lock held. Unfortunately, the scheduler's runqueue and priority-inheritance locks still must either completely enclose or be completely enclosed by any overlapping RCU read-side critical section. This version removes a redundant local_irq_restore() noted by Yong Zhang. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
06ae115a1d
commit
5342e269b2
2 changed files with 13 additions and 0 deletions
|
@ -1149,6 +1149,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
|
||||||
|
|
||||||
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
||||||
|
|
||||||
|
static struct lock_class_key rcu_boost_class;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
|
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
|
||||||
* or ->boost_tasks, advancing the pointer to the next task in the
|
* or ->boost_tasks, advancing the pointer to the next task in the
|
||||||
|
@ -1211,6 +1213,9 @@ static int rcu_boost(struct rcu_node *rnp)
|
||||||
*/
|
*/
|
||||||
t = container_of(tb, struct task_struct, rcu_node_entry);
|
t = container_of(tb, struct task_struct, rcu_node_entry);
|
||||||
rt_mutex_init_proxy_locked(&mtx, t);
|
rt_mutex_init_proxy_locked(&mtx, t);
|
||||||
|
/* Avoid lockdep false positives. This rt_mutex is its own thing. */
|
||||||
|
lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
|
||||||
|
"rcu_boost_mutex");
|
||||||
t->rcu_boost_mutex = &mtx;
|
t->rcu_boost_mutex = &mtx;
|
||||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||||
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
|
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
|
||||||
|
|
|
@ -579,6 +579,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||||
struct rt_mutex_waiter *waiter)
|
struct rt_mutex_waiter *waiter)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
int was_disabled;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/* Try to acquire the lock: */
|
/* Try to acquire the lock: */
|
||||||
|
@ -601,10 +602,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||||
|
|
||||||
raw_spin_unlock(&lock->wait_lock);
|
raw_spin_unlock(&lock->wait_lock);
|
||||||
|
|
||||||
|
was_disabled = irqs_disabled();
|
||||||
|
if (was_disabled)
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
debug_rt_mutex_print_deadlock(waiter);
|
debug_rt_mutex_print_deadlock(waiter);
|
||||||
|
|
||||||
schedule_rt_mutex(lock);
|
schedule_rt_mutex(lock);
|
||||||
|
|
||||||
|
if (was_disabled)
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
raw_spin_lock(&lock->wait_lock);
|
raw_spin_lock(&lock->wait_lock);
|
||||||
set_current_state(state);
|
set_current_state(state);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue