xtensa: optimize local_flush_tlb_kernel_range
Don't flush whole TLB if only a small kernel range is requested. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
parent
8585b316bb
commit
04c6b3e2b5
3 changed files with 34 additions and 7 deletions
|
@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
|
|||
unsigned long page);
|
||||
void local_flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
|
@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
|
|||
void flush_tlb_page(struct vm_area_struct *, unsigned long);
|
||||
void flush_tlb_range(struct vm_area_struct *, unsigned long,
|
||||
unsigned long);
|
||||
|
||||
static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
flush_tlb_all();
|
||||
}
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
|
@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
|
|||
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
|
||||
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
|
||||
end)
|
||||
#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
|
||||
#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
|
||||
end)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
|||
on_each_cpu(ipi_flush_tlb_range, &fd, 1);
|
||||
}
|
||||
|
||||
static void ipi_flush_tlb_kernel_range(void *arg)
|
||||
{
|
||||
struct flush_data *fd = arg;
|
||||
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct flush_data fd = {
|
||||
.addr1 = start,
|
||||
.addr2 = end,
|
||||
};
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
|
||||
}
|
||||
|
||||
/* Cache flush functions */
|
||||
|
||||
static void ipi_flush_cache_all(void *arg)
|
||||
|
|
|
@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
|
||||
end - start < _TLB_ENTRIES << PAGE_SHIFT) {
|
||||
start &= PAGE_MASK;
|
||||
while (start < end) {
|
||||
invalidate_itlb_mapping(start);
|
||||
invalidate_dtlb_mapping(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
} else {
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_TLB_SANITY
|
||||
|
||||
static unsigned get_pte_for_vaddr(unsigned vaddr)
|
||||
|
|
Loading…
Reference in a new issue