sparc32: destroy_context() and switch_mm() needs to disable interrupts.

Load balancing can be triggered in the critical sections protected by
srmmu_context_spinlock in destroy_context() and switch_mm() and can hang
the cpu waiting for the rq lock of another cpu that in turn has called
switch_mm hangning on srmmu_context_spinlock leading to deadlock.

So, disable interrupt while taking srmmu_context_spinlock in
destroy_context() and switch_mm() so we don't deadlock.

See also commit 77b838fa1e ("[SPARC64]: destroy_context() needs to disable
interrupts.")

Signed-off-by: Andreas Larsson <andreas@gaisler.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Andreas Larsson 2014-12-18 13:23:23 +01:00 committed by David S. Miller
parent 44e8967d59
commit 66d0f7ec9f

View file

@ -460,10 +460,12 @@ static void __init sparc_context_init(int numctx)
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk)
{
unsigned long flags;
if (mm->context == NO_CONTEXT) {
spin_lock(&srmmu_context_spinlock);
spin_lock_irqsave(&srmmu_context_spinlock, flags);
alloc_context(old_mm, mm);
spin_unlock(&srmmu_context_spinlock);
spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
}
@ -986,14 +988,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
void destroy_context(struct mm_struct *mm)
{
unsigned long flags;
if (mm->context != NO_CONTEXT) {
flush_cache_mm(mm);
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
flush_tlb_mm(mm);
spin_lock(&srmmu_context_spinlock);
spin_lock_irqsave(&srmmu_context_spinlock, flags);
free_context(mm->context);
spin_unlock(&srmmu_context_spinlock);
spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
mm->context = NO_CONTEXT;
}
}