ARM: 8203/1: mm: try to re-use old ASID assignments following a rollover
Rather than unconditionally allocating a fresh ASID to an mm from an older generation, attempt to re-use the old assignment where possible. This can bring performance benefits on systems where the ASID is used to tag things other than the TLB (e.g. branch prediction resources). Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
2b94fe2ac9
commit
a391263cd8
1 changed files with 34 additions and 24 deletions
|
@ -184,22 +184,31 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||||
u64 asid = atomic64_read(&mm->context.id);
|
u64 asid = atomic64_read(&mm->context.id);
|
||||||
u64 generation = atomic64_read(&asid_generation);
|
u64 generation = atomic64_read(&asid_generation);
|
||||||
|
|
||||||
if (asid != 0 && is_reserved_asid(asid)) {
|
if (asid != 0) {
|
||||||
/*
|
/*
|
||||||
* Our current ASID was active during a rollover, we can
|
* If our current ASID was active during a rollover, we
|
||||||
* continue to use it and this was just a false alarm.
|
* can continue to use it and this was just a false alarm.
|
||||||
*/
|
*/
|
||||||
asid = generation | (asid & ~ASID_MASK);
|
if (is_reserved_asid(asid))
|
||||||
} else {
|
return generation | (asid & ~ASID_MASK);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a free ASID. If we can't find one, take a
|
* We had a valid ASID in a previous life, so try to re-use
|
||||||
* note of the currently active ASIDs and mark the TLBs
|
* it if possible.,
|
||||||
* as requiring flushes. We always count from ASID #1,
|
*/
|
||||||
* as we reserve ASID #0 to switch via TTBR0 and to
|
asid &= ~ASID_MASK;
|
||||||
* avoid speculative page table walks from hitting in
|
if (!__test_and_set_bit(asid, asid_map))
|
||||||
* any partial walk caches, which could be populated
|
goto bump_gen;
|
||||||
* from overlapping level-1 descriptors used to map both
|
}
|
||||||
* the module area and the userspace stack.
|
|
||||||
|
/*
|
||||||
|
* Allocate a free ASID. If we can't find one, take a note of the
|
||||||
|
* currently active ASIDs and mark the TLBs as requiring flushes.
|
||||||
|
* We always count from ASID #1, as we reserve ASID #0 to switch
|
||||||
|
* via TTBR0 and to avoid speculative page table walks from hitting
|
||||||
|
* in any partial walk caches, which could be populated from
|
||||||
|
* overlapping level-1 descriptors used to map both the module
|
||||||
|
* area and the userspace stack.
|
||||||
*/
|
*/
|
||||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
|
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
|
||||||
if (asid == NUM_USER_ASIDS) {
|
if (asid == NUM_USER_ASIDS) {
|
||||||
|
@ -208,12 +217,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||||
flush_context(cpu);
|
flush_context(cpu);
|
||||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
__set_bit(asid, asid_map);
|
__set_bit(asid, asid_map);
|
||||||
cur_idx = asid;
|
cur_idx = asid;
|
||||||
|
|
||||||
|
bump_gen:
|
||||||
asid |= generation;
|
asid |= generation;
|
||||||
cpumask_clear(mm_cpumask(mm));
|
cpumask_clear(mm_cpumask(mm));
|
||||||
}
|
|
||||||
|
|
||||||
return asid;
|
return asid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue