x86: strengthen 64-bit p?d_bad()
The x86_64 pgd_bad(), pud_bad(), pmd_bad() inlines have differed from their x86_32 counterparts in a couple of ways: they've been unnecessarily weak (e.g. letting 0 or 1 count as good), and were typed as unsigned long. Strengthen them and return int. The PAE pmd_bad was too weak before, allowing any junk in the upper half; but got strengthened by the patch correcting its ~PAGE_MASK to ~PTE_MASK. The PAE pud_bad already said ~PTE_MASK; and since it folds into pgd_bad, and we don't set the protection bits at that level, it'll do as is. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
cbb3077cbe
commit
a8375bd81c
1 changed files with 6 additions and 6 deletions
|
@ -151,19 +151,19 @@ static inline void native_pgd_clear(pgd_t *pgd)
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
static inline unsigned long pgd_bad(pgd_t pgd)
|
static inline int pgd_bad(pgd_t pgd)
|
||||||
{
|
{
|
||||||
return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
|
return (pgd_val(pgd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long pud_bad(pud_t pud)
|
static inline int pud_bad(pud_t pud)
|
||||||
{
|
{
|
||||||
return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
|
return (pud_val(pud) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long pmd_bad(pmd_t pmd)
|
static inline int pmd_bad(pmd_t pmd)
|
||||||
{
|
{
|
||||||
return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
|
return (pmd_val(pmd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define pte_none(x) (!pte_val((x)))
|
#define pte_none(x) (!pte_val((x)))
|
||||||
|
|
Loading…
Reference in a new issue