Merge branch 'jsgf/x86/unify' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen into x86/headers
This commit is contained in:
commit
790c7ebbe9
10 changed files with 280 additions and 294 deletions
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm-generic/int-ll64.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define build_mmio_read(name, size, type, reg, barrier) \
|
||||
static inline type name(const volatile void __iomem *addr) \
|
||||
|
@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
|
|||
#define readq readq
|
||||
#define writeq writeq
|
||||
|
||||
/**
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned physical address is the physical (CPU) mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses directly mapped or allocated via kmalloc.
|
||||
*
|
||||
* This function does not give bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline phys_addr_t virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* phys_to_virt - map physical address to virtual
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned virtual address is a current CPU mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses that have a kernel mapping
|
||||
*
|
||||
* This function does not handle bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline void *phys_to_virt(phys_addr_t address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include "io_32.h"
|
||||
#else
|
||||
|
|
|
@ -53,92 +53,6 @@
|
|||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
/**
|
||||
* virt_to_phys - map virtual addresses to physical
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned physical address is the physical (CPU) mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses directly mapped or allocated via kmalloc.
|
||||
*
|
||||
* This function does not give bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* phys_to_virt - map physical address to virtual
|
||||
* @address: address to remap
|
||||
*
|
||||
* The returned virtual address is a current CPU mapping for
|
||||
* the memory address given. It is only valid to use this function on
|
||||
* addresses that have a kernel mapping
|
||||
*
|
||||
* This function does not handle bus mappings for DMA transfers. In
|
||||
* almost all conceivable cases a device driver should not be using
|
||||
* this function
|
||||
*/
|
||||
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap performs a platform specific sequence of operations to
|
||||
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
||||
* writew/writel functions and the other mmio helpers. The returned
|
||||
* address is not guaranteed to be usable directly as a virtual
|
||||
* address.
|
||||
*
|
||||
* If the area you are trying to map is a PCI BAR you should have a
|
||||
* look at pci_iomap().
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
static inline void
|
||||
memset_io(volatile void __iomem *addr, unsigned char val, int count)
|
||||
{
|
||||
|
|
|
@ -142,67 +142,8 @@ __OUTS(l)
|
|||
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifndef __i386__
|
||||
/*
|
||||
* Change virtual addresses to physical addresses and vv.
|
||||
* These are pretty trivial
|
||||
*/
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa(address);
|
||||
}
|
||||
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change "struct page" to physical address.
|
||||
*/
|
||||
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
#include <asm-generic/iomap.h>
|
||||
|
||||
/*
|
||||
* This one maps high address device memory and turns off caching for that area.
|
||||
* it's useful if some control registers are in such an area and write combining
|
||||
* or read caching is not desirable:
|
||||
*/
|
||||
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
||||
/*
|
||||
* The default ioremap() behavior is non-cached:
|
||||
*/
|
||||
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
|
||||
|
||||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
#define isa_virt_to_bus virt_to_phys
|
||||
#define isa_page_to_bus page_to_phys
|
||||
#define isa_bus_to_virt phys_to_virt
|
||||
|
||||
/*
|
||||
* However PCI ones are not necessarily 1:1 and therefore these interfaces
|
||||
* are forbidden in portable PCI drivers.
|
||||
*
|
||||
* Allow them on x86 for legacy drivers, though.
|
||||
*/
|
||||
#define virt_to_bus virt_to_phys
|
||||
#define bus_to_virt phys_to_virt
|
||||
|
||||
void __memcpy_fromio(void *, unsigned long, unsigned);
|
||||
void __memcpy_toio(unsigned long, const void *, unsigned);
|
||||
|
||||
|
|
|
@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
|
|||
return pgd.pgd;
|
||||
}
|
||||
|
||||
static inline pgdval_t pgd_flags(pgd_t pgd)
|
||||
{
|
||||
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#if PAGETABLE_LEVELS == 4
|
||||
typedef struct { pudval_t pud; } pud_t;
|
||||
|
@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud)
|
|||
}
|
||||
#endif /* PAGETABLE_LEVELS == 4 */
|
||||
|
||||
static inline pudval_t pud_flags(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
typedef struct { pmdval_t pmd; } pmd_t;
|
||||
|
||||
static inline pmd_t native_make_pmd(pmdval_t val)
|
||||
|
@ -128,6 +138,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|||
{
|
||||
return pmd.pmd;
|
||||
}
|
||||
|
||||
static inline pmdval_t pmd_flags(pmd_t pmd)
|
||||
{
|
||||
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
#else /* PAGETABLE_LEVELS == 2 */
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
|
|
|
@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
|||
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
#define pte_none(x) (!(x).pte_low)
|
||||
|
||||
/*
|
||||
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
|
||||
* split up the 29 bits of offset into this range:
|
||||
|
|
|
@ -18,21 +18,6 @@
|
|||
printk("%s:%d: bad pgd %p(%016Lx).\n", \
|
||||
__FILE__, __LINE__, &(e), pgd_val(e))
|
||||
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) == 0;
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
|
||||
}
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return pud_val(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
/* Rules for using set_pte: the pte being assigned *must* be
|
||||
* either not present or in a state where the hardware will
|
||||
* not attempt to update the pte. In places where this is
|
||||
|
@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
|
|||
write_cr3(pgd);
|
||||
}
|
||||
|
||||
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
|
||||
|
||||
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
|
||||
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
|
||||
pmd_index(address))
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
||||
{
|
||||
|
@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
|||
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
static inline int pte_same(pte_t a, pte_t b)
|
||||
{
|
||||
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
|
||||
}
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.pte_low && !pte.pte_high;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits 0, 6 and 7 are taken in the low part of the pte,
|
||||
* put the 32 bits of offset into the high part.
|
||||
|
|
|
@ -236,7 +236,7 @@ static inline unsigned long pte_pfn(pte_t pte)
|
|||
|
||||
static inline int pmd_large(pmd_t pte)
|
||||
{
|
||||
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
|
@ -437,6 +437,180 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
|
|||
# include "pgtable_64.h"
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
static inline int pte_same(pte_t a, pte_t b)
|
||||
{
|
||||
return a.pte == b.pte;
|
||||
}
|
||||
|
||||
static inline int pte_present(pte_t a)
|
||||
{
|
||||
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
|
||||
}
|
||||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
{
|
||||
/* Only check low word on 32-bit platforms, since it might be
|
||||
out of sync with upper half. */
|
||||
return (unsigned long)native_pmd_val(pmd) == 0;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
static inline struct page *pmd_page(pmd_t pmd)
|
||||
{
|
||||
return pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* this macro returns the index of the entry in the pmd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned pmd_index(unsigned long address)
|
||||
{
|
||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*
|
||||
* (Currently stuck as a macro because of indirect forward reference
|
||||
* to linux/mm.h:page_to_nid())
|
||||
*/
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
/*
|
||||
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* this function returns the index of the entry in the pte page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline unsigned long pages_to_mb(unsigned long npg)
|
||||
{
|
||||
return npg >> (20 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) == 0;
|
||||
}
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return pud_flags(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_page_vaddr(pud_t pud)
|
||||
{
|
||||
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
static inline struct page *pud_page(pud_t pud)
|
||||
{
|
||||
return pfn_to_page(pud_val(pud) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
{
|
||||
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
{
|
||||
return pgd_flags(pgd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
||||
{
|
||||
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
static inline struct page *pgd_page(pgd_t pgd)
|
||||
{
|
||||
return pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* to find an entry in a page-table-directory. */
|
||||
static inline unsigned pud_index(unsigned long address)
|
||||
{
|
||||
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
|
||||
}
|
||||
|
||||
static inline int pgd_bad(pgd_t pgd)
|
||||
{
|
||||
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pgd_none(pgd_t pgd)
|
||||
{
|
||||
return !native_pgd_val(pgd);
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
|
||||
*
|
||||
|
|
|
@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
|||
/* The boot page tables (all created as a single array) */
|
||||
extern unsigned long pg0[];
|
||||
|
||||
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
||||
|
||||
/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
|
||||
#define pmd_none(x) (!(unsigned long)pmd_val((x)))
|
||||
#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
|
||||
#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# include <asm/pgtable-3level.h>
|
||||
#else
|
||||
# include <asm/pgtable-2level.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*/
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
|
||||
static inline int pud_large(pud_t pud) { return 0; }
|
||||
|
||||
/*
|
||||
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* this macro returns the index of the entry in the pmd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
#define pmd_index(address) \
|
||||
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
|
||||
/*
|
||||
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* this macro returns the index of the entry in the pte page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
#define pte_index(address) \
|
||||
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_offset_kernel(dir, address) \
|
||||
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
|
||||
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
|
||||
|
||||
#define pmd_page_vaddr(pmd) \
|
||||
((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
|
||||
|
||||
#if defined(CONFIG_HIGHPTE)
|
||||
#define pte_offset_map(dir, address) \
|
||||
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
|
||||
|
@ -176,7 +133,4 @@ do { \
|
|||
#define kern_addr_valid(kaddr) (0)
|
||||
#endif
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_32_H */
|
||||
|
|
|
@ -67,9 +67,6 @@ extern void paging_init(void);
|
|||
printk("%s:%d: bad pgd %p(%016lx).\n", \
|
||||
__FILE__, __LINE__, &(e), pgd_val(e))
|
||||
|
||||
#define pgd_none(x) (!pgd_val(x))
|
||||
#define pud_none(x) (!pud_val(x))
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
|
||||
|
@ -134,8 +131,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
|||
native_set_pgd(pgd, native_make_pgd(0));
|
||||
}
|
||||
|
||||
#define pte_same(a, b) ((a).pte == (b).pte)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
|
||||
|
@ -156,26 +151,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline int pgd_bad(pgd_t pgd)
|
||||
{
|
||||
return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
#define pte_none(x) (!pte_val((x)))
|
||||
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
||||
|
||||
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
|
@ -184,41 +159,12 @@ static inline int pmd_bad(pmd_t pmd)
|
|||
/*
|
||||
* Level 4 access.
|
||||
*/
|
||||
#define pgd_page_vaddr(pgd) \
|
||||
((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
|
||||
#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
|
||||
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
|
||||
static inline int pgd_large(pgd_t pgd) { return 0; }
|
||||
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
|
||||
|
||||
/* PUD - Level3 access */
|
||||
/* to find an entry in a page-table-directory. */
|
||||
#define pud_page_vaddr(pud) \
|
||||
((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
|
||||
#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
|
||||
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
||||
#define pud_offset(pgd, address) \
|
||||
((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
|
||||
#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
|
||||
|
||||
static inline int pud_large(pud_t pte)
|
||||
{
|
||||
return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
/* PMD - Level 2 access */
|
||||
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
|
||||
#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
|
||||
|
||||
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
|
||||
pmd_index(address))
|
||||
#define pmd_none(x) (!pmd_val((x)))
|
||||
#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
|
||||
#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
|
||||
#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
|
||||
|
||||
#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
|
||||
#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
|
||||
_PAGE_FILE })
|
||||
|
@ -226,13 +172,6 @@ static inline int pud_large(pud_t pte)
|
|||
|
||||
/* PTE - Level 1 access. */
|
||||
|
||||
/* page, protection -> pte */
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
|
||||
|
||||
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
|
||||
pte_index((address)))
|
||||
|
||||
/* x86-64 always has all page tables mapped. */
|
||||
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
||||
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
|
||||
|
@ -266,9 +205,6 @@ extern int direct_gbpages;
|
|||
extern int kern_addr_valid(unsigned long addr);
|
||||
extern void cleanup_highmap(void);
|
||||
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <asm/setup.h>
|
||||
#include <xen/hvc-console.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <linux/usb/ehci_def.h>
|
||||
|
||||
|
|
Loading…
Reference in a new issue