mm: make GUP handle pfn mapping unless FOLL_GET is requested
With DAX, pfn mapping becoming more common. The patch adjusts GUP code to cover pfn mapping for cases when we don't need struct page to proceed. To make it possible, let's change follow_page() code to return -EEXIST error code if proper page table entry exists, but no corresponding struct page. __get_user_page() would ignore the error code and move to the next page frame. The immediate effect of the change is working MAP_POPULATE and mlock() on DAX mappings. [akpm@linux-foundation.org: fix arm64 build] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Toshi Kani <toshi.kani@hp.com> Acked-by: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d899844e9c
commit
1027e4436b
1 changed files with 50 additions and 10 deletions
60
mm/gup.c
60
mm/gup.c
|
@ -12,7 +12,9 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
@ -32,6 +34,30 @@ static struct page *no_page_table(struct vm_area_struct *vma,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte, unsigned int flags)
|
||||
{
|
||||
/* No page to get reference */
|
||||
if (flags & FOLL_GET)
|
||||
return -EFAULT;
|
||||
|
||||
if (flags & FOLL_TOUCH) {
|
||||
pte_t entry = *pte;
|
||||
|
||||
if (flags & FOLL_WRITE)
|
||||
entry = pte_mkdirty(entry);
|
||||
entry = pte_mkyoung(entry);
|
||||
|
||||
if (!pte_same(*pte, entry)) {
|
||||
set_pte_at(vma->vm_mm, address, pte, entry);
|
||||
update_mmu_cache(vma, address, pte);
|
||||
}
|
||||
}
|
||||
|
||||
/* Proper page table entry exists, but no corresponding struct page */
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
static struct page *follow_page_pte(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, unsigned int flags)
|
||||
{
|
||||
|
@ -73,10 +99,21 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
|
|||
|
||||
page = vm_normal_page(vma, address, pte);
|
||||
if (unlikely(!page)) {
|
||||
if ((flags & FOLL_DUMP) ||
|
||||
!is_zero_pfn(pte_pfn(pte)))
|
||||
goto bad_page;
|
||||
page = pte_page(pte);
|
||||
if (flags & FOLL_DUMP) {
|
||||
/* Avoid special (like zero) pages in core dumps */
|
||||
page = ERR_PTR(-EFAULT);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_zero_pfn(pte_pfn(pte))) {
|
||||
page = pte_page(pte);
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
ret = follow_pfn_pte(vma, address, ptep, flags);
|
||||
page = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & FOLL_GET)
|
||||
|
@ -114,12 +151,9 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
|
|||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
out:
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return page;
|
||||
bad_page:
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
no_page:
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
if (!pte_none(pte))
|
||||
|
@ -489,9 +523,15 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
goto next_page;
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
if (IS_ERR(page))
|
||||
} else if (PTR_ERR(page) == -EEXIST) {
|
||||
/*
|
||||
* Proper page table entry exists, but no corresponding
|
||||
* struct page.
|
||||
*/
|
||||
goto next_page;
|
||||
} else if (IS_ERR(page)) {
|
||||
return i ? i : PTR_ERR(page);
|
||||
}
|
||||
if (pages) {
|
||||
pages[i] = page;
|
||||
flush_anon_page(vma, page, start);
|
||||
|
|
Loading…
Reference in a new issue