d6bf29b44d
Fix up powerpc to the new mmu_gather stuff. PPC has an extra batching queue to RCU free the actual pagetable allocations, use the ARCH extentions for that for now. For the ppc64_tlb_batch, which tracks the vaddrs to unhash from the hardware hash-table, keep using per-cpu arrays but flush on context switch and use a TLF bit to track the lazy_mmu state. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
53 lines
1.3 KiB
C
53 lines
1.3 KiB
C
#ifndef _ASM_POWERPC_PGALLOC_H
|
|
#define _ASM_POWERPC_PGALLOC_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#ifdef CONFIG_PPC_BOOK3E
|
|
extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
|
|
#else /* CONFIG_PPC_BOOK3E */
|
|
static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
|
|
unsigned long address)
|
|
{
|
|
}
|
|
#endif /* !CONFIG_PPC_BOOK3E */
|
|
|
|
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
free_page((unsigned long)pte);
|
|
}
|
|
|
|
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
|
{
|
|
pgtable_page_dtor(ptepage);
|
|
__free_page(ptepage);
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/pgalloc-64.h>
|
|
#else
|
|
#include <asm/pgalloc-32.h>
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
|
|
extern void pte_free_finish(struct mmu_gather *tlb);
|
|
#else /* CONFIG_SMP */
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
|
|
{
|
|
pgtable_free(table, shift);
|
|
}
|
|
static inline void pte_free_finish(struct mmu_gather *tlb) { }
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
|
|
unsigned long address)
|
|
{
|
|
tlb_flush_pgtable(tlb, address);
|
|
pgtable_page_dtor(ptepage);
|
|
pgtable_free_tlb(tlb, page_address(ptepage), 0);
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_PGALLOC_H */
|