9e695d2ecc
This is relatively easy since PMD's now cover exactly 4MB of memory. Our PMD entries are 32-bits each, so we use a special encoding. The lowest bit, PMD_ISHUGE, determines the interpretation. This is possible because sparc64's page tables are purely software entities so we can use whatever encoding scheme we want. We just have to make the TLB miss assembler page table walkers aware of the layout. set_pmd_at() works much like set_pte_at() but it has to operate in two page from a table of non-huge PTEs, so we have to queue up TLB flushes based upon what mappings are valid in the PTE table. In the second regime we are going from huge-page to non-huge-page, and in that case we need only queue up a single TLB flush to push out the huge page mapping. We still have 5 bits remaining in the huge PMD encoding so we can very likely support any new pieces of THP state tracking that might get added in the future. With lots of help from Johannes Weiner. Signed-off-by: David S. Miller <davem@davemloft.net> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
94 lines
2.3 KiB
C
94 lines
2.3 KiB
C
#ifndef _SPARC64_PGALLOC_H
|
|
#define _SPARC64_PGALLOC_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/spitfire.h>
|
|
#include <asm/cpudata.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/page.h>
|
|
|
|
/* Page table allocation/freeing. */
|
|
|
|
extern struct kmem_cache *pgtable_cache;
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
|
}
|
|
|
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
kmem_cache_free(pgtable_cache, pgd);
|
|
}
|
|
|
|
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return kmem_cache_alloc(pgtable_cache,
|
|
GFP_KERNEL|__GFP_REPEAT);
|
|
}
|
|
|
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
{
|
|
kmem_cache_free(pgtable_cache, pmd);
|
|
}
|
|
|
|
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|
unsigned long address);
|
|
extern pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|
unsigned long address);
|
|
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
|
|
extern void pte_free(struct mm_struct *mm, pgtable_t ptepage);
|
|
|
|
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
|
|
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
|
|
#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD))
|
|
|
|
#define check_pgt_cache() do { } while (0)
|
|
|
|
extern void pgtable_free(void *table, bool is_page);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
struct mmu_gather;
|
|
extern void tlb_remove_table(struct mmu_gather *, void *);
|
|
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
|
|
{
|
|
unsigned long pgf = (unsigned long)table;
|
|
if (is_page)
|
|
pgf |= 0x1UL;
|
|
tlb_remove_table(tlb, (void *)pgf);
|
|
}
|
|
|
|
static inline void __tlb_remove_table(void *_table)
|
|
{
|
|
void *table = (void *)((unsigned long)_table & ~0x1UL);
|
|
bool is_page = false;
|
|
|
|
if ((unsigned long)_table & 0x1UL)
|
|
is_page = true;
|
|
pgtable_free(table, is_page);
|
|
}
|
|
#else /* CONFIG_SMP */
|
|
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
|
|
{
|
|
pgtable_free(table, is_page);
|
|
}
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
|
|
unsigned long address)
|
|
{
|
|
pgtable_free_tlb(tlb, pte, true);
|
|
}
|
|
|
|
#define __pmd_free_tlb(tlb, pmd, addr) \
|
|
pgtable_free_tlb(tlb, pmd, false)
|
|
|
|
#endif /* _SPARC64_PGALLOC_H */
|