uml: 64-bit tlb fixes
Some 64-bit tlb fixes - moved pmd_page_vaddr to pgtable.h since it's the same for both 2-level and 3-level page tables fixed a bogus cast on pud_page_vaddr made the address checking in update_*_range more careful Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1adfd6095e
commit
909e90d3c4
4 changed files with 8 additions and 9 deletions
|
@ -207,7 +207,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
else if (pte_newprot(*pte))
|
||||
ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
|
||||
*pte = pte_mkuptodate(*pte);
|
||||
} while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
|
||||
} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
|
|||
}
|
||||
}
|
||||
else ret = update_pte_range(pmd, addr, next, hvc);
|
||||
} while (pmd++, addr = next, ((addr != end) && !ret));
|
||||
} while (pmd++, addr = next, ((addr < end) && !ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -251,7 +251,7 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
|
|||
}
|
||||
}
|
||||
else ret = update_pmd_range(pud, addr, next, hvc);
|
||||
} while (pud++, addr = next, ((addr != end) && !ret));
|
||||
} while (pud++, addr = next, ((addr < end) && !ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
|
|||
}
|
||||
}
|
||||
else ret = update_pud_range(pgd, addr, next, &hvc);
|
||||
} while (pgd++, addr = next, ((addr != end_addr) && !ret));
|
||||
} while (pgd++, addr = next, ((addr < end_addr) && !ret));
|
||||
|
||||
if (!ret)
|
||||
ret = do_ops(&hvc, hvc.index, 1);
|
||||
|
|
|
@ -41,9 +41,6 @@ static inline void pgd_mkuptodate(pgd_t pgd) { }
|
|||
#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
|
||||
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
|
||||
|
||||
#define pmd_page_vaddr(pmd) \
|
||||
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
|
||||
|
||||
/*
|
||||
* Bits 0 through 4 are taken
|
||||
*/
|
||||
|
|
|
@ -87,8 +87,7 @@ static inline void pud_clear (pud_t *pud)
|
|||
}
|
||||
|
||||
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
|
||||
#define pud_page_vaddr(pud) \
|
||||
((struct page *) __va(pud_val(pud) & PAGE_MASK))
|
||||
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
|
||||
|
|
|
@ -308,6 +308,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
|
||||
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
||||
|
||||
#define pmd_page_vaddr(pmd) \
|
||||
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
|
||||
|
||||
/*
|
||||
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
|
|
Loading…
Reference in a new issue