mm: reduce atomic use on use_mm fast path
When the mm being switched to matches the active mm, we don't need to increment and then drop the mm count. In a simple benchmark this happens in about 50% of time. Making that conditional reduces contention on that cacheline on SMP systems. Acked-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3d2d827f5c
commit
f68e148050
1 changed files with 6 additions and 3 deletions
|
@ -26,13 +26,16 @@ void use_mm(struct mm_struct *mm)
|
|||
|
||||
task_lock(tsk);
|
||||
active_mm = tsk->active_mm;
|
||||
atomic_inc(&mm->mm_count);
|
||||
if (active_mm != mm) {
|
||||
atomic_inc(&mm->mm_count);
|
||||
tsk->active_mm = mm;
|
||||
}
|
||||
tsk->mm = mm;
|
||||
tsk->active_mm = mm;
|
||||
switch_mm(active_mm, mm, tsk);
|
||||
task_unlock(tsk);
|
||||
|
||||
mmdrop(active_mm);
|
||||
if (active_mm != mm)
|
||||
mmdrop(active_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue