s390/mm: fix incorrect ASCE after crst_table_downgrade
The switch_mm function does nothing in case the prev and next mm
are the same. It can happen that a crst_table_downgrade has changed
the top-level pgd in the meantime on a different CPU. Always store
the new ASCE to be picked up in entry.S.
[heiko.carstens@de.ibm.com]: Bug was introduced with git commit
53e857f308
("s390/mm,tlb: race of lazy TLB flush vs. recreation
of TLB entries") and causes random crashes due to broken page tables
being used.
Reported-by: Dominik Vogt <vogt@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
This commit is contained in:
parent
a9ca8eb7af
commit
691d526415
1 changed files with 1 additions and 1 deletions
|
@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
|
||||
if (prev == next)
|
||||
return;
|
||||
if (MACHINE_HAS_TLB_LC)
|
||||
|
@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
atomic_dec(&prev->context.attach_count);
|
||||
if (MACHINE_HAS_TLB_LC)
|
||||
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
||||
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
|
||||
}
|
||||
|
||||
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
|
||||
|
|
Loading…
Reference in a new issue