[PATCH] mm: parisc pte atomicity

There's a worrying function translation_exists in parisc cacheflush.h,
unaffected by split ptlock since flush_dcache_page is using it on some other
mm, without any relevant lock.  Oh well, make it a slightly more robust by
factoring the pfn check within it.  And it looked liable to confuse a
camouflaged swap or file entry with a good pte: fix that too.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Hugh Dickins 2005-10-29 18:16:36 -07:00 committed by Linus Torvalds
parent 69b0475456
commit 92dc6fcc84
2 changed files with 28 additions and 31 deletions

View file

@ -270,7 +270,6 @@ void flush_dcache_page(struct page *page)
unsigned long offset;
unsigned long addr;
pgoff_t pgoff;
pte_t *pte;
unsigned long pfn = page_to_pfn(page);
@ -301,21 +300,16 @@ void flush_dcache_page(struct page *page)
* taking a page fault if the pte doesn't exist.
* This is just for speed. If the page translation
* isn't there, there's no point exciting the
* nadtlb handler into a nullification frenzy */
if(!(pte = translation_exists(mpnt, addr)))
continue;
/* make sure we really have this page: the private
* nadtlb handler into a nullification frenzy.
*
* Make sure we really have this page: the private
* mappings may cover this area but have COW'd this
* particular page */
if(pte_pfn(*pte) != pfn)
continue;
__flush_cache_page(mpnt, addr);
break;
* particular page.
*/
if (translation_exists(mpnt, addr, pfn)) {
__flush_cache_page(mpnt, addr);
break;
}
}
flush_dcache_mmap_unlock(mapping);
}

View file

@ -100,30 +100,34 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
static inline pte_t *__translation_exists(struct mm_struct *mm,
unsigned long addr)
static inline int translation_exists(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
pgd_t *pgd = pgd_offset(mm, addr);
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
pte_t *pte;
pte_t pte;
if(pgd_none(*pgd))
return NULL;
return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
return NULL;
return 0;
pte = pte_offset_map(pmd, addr);
/* We cannot take the pte lock here: flush_cache_page is usually
* called with pte lock already held. Whereas flush_dcache_page
* takes flush_dcache_mmap_lock, which is lower in the hierarchy:
* the vma itself is secure, but the pte might come or go racily.
*/
pte = *pte_offset_map(pmd, addr);
/* But pte_unmap() does nothing on this architecture */
/* The PA flush mappings show up as pte_none, but they're
* valid none the less */
if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
return NULL;
return pte;
/* Filter out coincidental file entries and swap entries */
if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
return 0;
return pte_pfn(pte) == pfn;
}
#define translation_exists(vma, addr) __translation_exists((vma)->vm_mm, addr)
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
@ -175,9 +179,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
if(likely(translation_exists(vma, vmaddr)))
if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
}
#endif