b13d618b44
This and the following patches aim to optimize the code dealing with page tables and TLB operations. Each patch reduces the time it takes to gzip a 16 MB file slightly, but I expect things like fork() and mmap() will improve somewhat more. This patch deals with the low-level TLB operations: * Remove unused _TLBEHI_I define * Use gcc builtins instead of inline assembly * Remove a few unnecessary pipeline flushes and nops * Introduce NR_TLB_ENTRIES define and use it instead of hardcoding it to 32 a few places throughout the code. * Use sysreg bitops instead of hardcoded shifts and masks * Make a few needlessly global functions static Signed-off-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
32 lines
1.1 KiB
C
32 lines
1.1 KiB
C
/*
|
|
* Copyright (C) 2004-2006 Atmel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#ifndef __ASM_AVR32_TLBFLUSH_H
|
|
#define __ASM_AVR32_TLBFLUSH_H
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
/*
|
|
* TLB flushing:
|
|
*
|
|
* - flush_tlb() flushes the current mm struct TLBs
|
|
* - flush_tlb_all() flushes all processes' TLB entries
|
|
* - flush_tlb_mm(mm) flushes the specified mm context TLBs
|
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
|
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
|
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
|
*/
|
|
extern void flush_tlb(void);
|
|
extern void flush_tlb_all(void);
|
|
extern void flush_tlb_mm(struct mm_struct *mm);
|
|
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
|
|
|
|
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
|
|
#endif /* __ASM_AVR32_TLBFLUSH_H */
|