kernel-fxtec-pro1x/arch/x86/include/asm/pgtable-2level.h
Jan Beulich 1796316a8b x86: consolidate __swp_XXX() macros
Impact: cleanup, code robustization

The __swp_...() macros silently relied upon which bits are used for
_PAGE_FILE and _PAGE_PROTNONE. After having changed _PAGE_PROTNONE in
our Xen kernel to no longer overlap _PAGE_PAT, live locks and crashes
were reported that could have been avoided if these macros properly
used the symbolic constants. Since, as pointed out earlier, for Xen
Dom0 support mainline likewise will need to eliminate the conflict
between _PAGE_PAT and _PAGE_PROTNONE, this patch does all the necessary
adjustments, plus it introduces a mechanism to check consistency
between MAX_SWAPFILES_SHIFT and the actual encoding macros.

This also fixes a latent bug in that x86-64 used a 6-bit mask in
__swp_type(), and if MAX_SWAPFILES_SHIFT was increased beyond 5 in (the
seemingly unrelated) linux/swap.h, this would have resulted in a
collision with _PAGE_FILE.

Non-PAE 32-bit code gets similarly adjusted for its pte_to_pgoff() and
pgoff_to_pte() calculations.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-12-16 18:34:51 +01:00

111 lines
3.3 KiB
C

#ifndef _ASM_X86_PGTABLE_2LEVEL_H
#define _ASM_X86_PGTABLE_2LEVEL_H
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
static inline void native_set_pte(pte_t *ptep , pte_t pte)
{
*ptep = pte;
}
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline void native_set_pte_present(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline void native_pmd_clear(pmd_t *pmdp)
{
native_set_pmd(pmdp, __pmd(0));
}
static inline void native_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *xp)
{
*xp = native_make_pte(0);
}
#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *xp)
{
return __pte(xchg(&xp->pte_low, 0));
}
#else
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
#define pte_none(x) (!(x).pte_low)
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range:
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
#else
#define PTE_FILE_SHIFT2 (_PAGE_BIT_PROTNONE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_FILE + 1)
#endif
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define pte_to_pgoff(pte) \
((((pte).pte_low >> PTE_FILE_SHIFT1) \
& ((1U << PTE_FILE_BITS1) - 1)) \
+ ((((pte).pte_low >> PTE_FILE_SHIFT2) \
& ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \
+ (((pte).pte_low >> PTE_FILE_SHIFT3) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2)))
#define pgoff_to_pte(off) \
((pte_t) { .pte_low = \
(((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \
<< PTE_FILE_SHIFT2) \
+ (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
<< PTE_FILE_SHIFT3) \
+ _PAGE_FILE })
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
#else
#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
#endif
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
& ((1U << SWP_TYPE_BITS) - 1))
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
((type) << (_PAGE_BIT_PRESENT + 1)) \
| ((offset) << SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */