[SPARC64] mm: context switch ptlock

sparc64 is unique among architectures in taking the page_table_lock in
its context switch (well, cris does too, but erroneously, and it's not
yet SMP anyway).

This seems to be a private affair between switch_mm and activate_mm,
using page_table_lock as a per-mm lock, without any relation to its uses
elsewhere.  That's fine, but comment it as such; and unlock sooner in
switch_mm, more like in activate_mm (preemption is disabled here).

There is a block of "if (0)"ed code in smp_flush_tlb_pending which would
have liked to rely on the page_table_lock, in switch_mm and elsewhere;
but its comment explains how dup_mmap's flush_tlb_mm defeated it.  And
though that could have been changed at any time over the past few years,
now the chance vanishes as we push the page_table_lock downwards, and
perhaps split it per page table page.  Just delete that block of code.

Which leaves the mysterious spin_unlock_wait(&oldmm->page_table_lock)
in kernel/fork.c copy_mm.  Textual analysis (supported by Nick Piggin)
suggests that the comment was written by DaveM, and that it relates to
the defeated approach in the sparc64 smp_flush_tlb_pending.  Just delete
this block too.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Hugh Dickins 2005-11-07 14:09:01 -08:00 committed by David S. Miller
parent b8ae48656d
commit dedeb0029b
3 changed files with 29 additions and 55 deletions

View file

@ -883,34 +883,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
} else {
/* This optimization is not valid. Normally
* we will be holding the page_table_lock, but
* there is an exception which is copy_page_range()
* when forking. The lock is held during the individual
* page table updates in the parent, but not at the
* top level, which is where we are invoked.
*/
if (0) {
cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
else
smp_cross_call_masked(&xcall_flush_tlb_pending,
ctx, nr, (unsigned long) vaddrs,
mm->cpu_vm_mask);
/* By virtue of running under the mm->page_table_lock,
* and mmu_context.h:switch_mm doing the same, the
* following operation is safe.
*/
if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
goto local_flush_and_out;
}
}
smp_cross_call_masked(&xcall_flush_tlb_pending,
ctx, nr, (unsigned long) vaddrs,
mm->cpu_vm_mask);
local_flush_and_out:
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();

View file

@ -87,37 +87,35 @@ extern void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid;
int cpu;
/* Note: page_table_lock is used here to serialize switch_mm
* and activate_mm, and their calls to get_new_mmu_context.
* This use of page_table_lock is unrelated to its other uses.
*/
spin_lock(&mm->page_table_lock);
if (CTX_VALID(mm->context))
ctx_valid = 1;
else
ctx_valid = 0;
ctx_valid = CTX_VALID(mm->context);
if (!ctx_valid)
get_new_mmu_context(mm);
spin_unlock(&mm->page_table_lock);
if (!ctx_valid || (old_mm != mm)) {
if (!ctx_valid)
get_new_mmu_context(mm);
load_secondary_context(mm);
reload_tlbmiss_state(tsk, mm);
}
{
int cpu = smp_processor_id();
/* Even if (mm == old_mm) we _must_ check
* the cpu_vm_mask. If we do not we could
* corrupt the TLB state because of how
* smp_flush_tlb_{page,range,mm} on sparc64
* and lazy tlb switches work. -DaveM
*/
if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
cpu_set(cpu, mm->cpu_vm_mask);
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
/* Even if (mm == old_mm) we _must_ check
* the cpu_vm_mask. If we do not we could
* corrupt the TLB state because of how
* smp_flush_tlb_{page,range,mm} on sparc64
* and lazy tlb switches work. -DaveM
*/
cpu = smp_processor_id();
if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
cpu_set(cpu, mm->cpu_vm_mask);
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
spin_unlock(&mm->page_table_lock);
}
#define deactivate_mm(tsk,mm) do { } while (0)
@ -127,6 +125,10 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
{
int cpu;
/* Note: page_table_lock is used here to serialize switch_mm
* and activate_mm, and their calls to get_new_mmu_context.
* This use of page_table_lock is unrelated to its other uses.
*/
spin_lock(&mm->page_table_lock);
if (!CTX_VALID(mm->context))
get_new_mmu_context(mm);

View file

@ -470,13 +470,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
/*
* There are cases where the PTL is held to ensure no
* new threads start up in user mode using an mm, which
* allows optimizing out ipis; the tlb_gather_mmu code
* is an example.
*/
spin_unlock_wait(&oldmm->page_table_lock);
goto good_mm;
}