mm/rmap: use rmap_walk() in try_to_unmap()
Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in try_to_unmap(). In this patch, I change following things. 1. enable rmap_walk() if !CONFIG_MIGRATION. 2. mechanical change to use rmap_walk() in try_to_unmap(). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0dd1c7bbce
commit
5262950642
3 changed files with 39 additions and 18 deletions
|
@ -190,7 +190,7 @@ int page_referenced_one(struct page *, struct vm_area_struct *,
|
|||
|
||||
int try_to_unmap(struct page *, enum ttu_flags flags);
|
||||
int try_to_unmap_one(struct page *, struct vm_area_struct *,
|
||||
unsigned long address, enum ttu_flags flags);
|
||||
unsigned long address, void *arg);
|
||||
|
||||
/*
|
||||
* Called from mm/filemap_xip.c to unmap empty zero page
|
||||
|
@ -256,9 +256,6 @@ struct rmap_walk_control {
|
|||
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
|
||||
};
|
||||
|
||||
/*
|
||||
* Called by migrate.c to remove migration ptes, but might be used more later.
|
||||
*/
|
||||
int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
|
|
4
mm/ksm.c
4
mm/ksm.c
|
@ -1982,7 +1982,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
|||
continue;
|
||||
|
||||
ret = try_to_unmap_one(page, vma,
|
||||
rmap_item->address, flags);
|
||||
rmap_item->address, (void *)flags);
|
||||
if (ret != SWAP_AGAIN || !page_mapped(page)) {
|
||||
anon_vma_unlock_read(anon_vma);
|
||||
goto out;
|
||||
|
@ -1996,7 +1996,6 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
|
||||
{
|
||||
struct stable_node *stable_node;
|
||||
|
@ -2054,6 +2053,7 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
|
||||
{
|
||||
struct stable_node *stable_node;
|
||||
|
|
48
mm/rmap.c
48
mm/rmap.c
|
@ -1179,15 +1179,18 @@ void page_remove_rmap(struct page *page)
|
|||
/*
|
||||
* Subfunctions of try_to_unmap: try_to_unmap_one called
|
||||
* repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
|
||||
*
|
||||
* @arg: enum ttu_flags will be passed to this argument
|
||||
*/
|
||||
int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address, enum ttu_flags flags)
|
||||
unsigned long address, void *arg)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pte_t *pte;
|
||||
pte_t pteval;
|
||||
spinlock_t *ptl;
|
||||
int ret = SWAP_AGAIN;
|
||||
enum ttu_flags flags = (enum ttu_flags)arg;
|
||||
|
||||
pte = page_check_address(page, mm, address, &ptl, 0);
|
||||
if (!pte)
|
||||
|
@ -1513,6 +1516,11 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
|
||||
{
|
||||
return is_vma_temporary_stack(vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
||||
* rmap method
|
||||
|
@ -1558,7 +1566,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
|||
continue;
|
||||
|
||||
address = vma_address(page, vma);
|
||||
ret = try_to_unmap_one(page, vma, address, flags);
|
||||
ret = try_to_unmap_one(page, vma, address, (void *)flags);
|
||||
if (ret != SWAP_AGAIN || !page_mapped(page))
|
||||
break;
|
||||
}
|
||||
|
@ -1592,7 +1600,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|||
mutex_lock(&mapping->i_mmap_mutex);
|
||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||
unsigned long address = vma_address(page, vma);
|
||||
ret = try_to_unmap_one(page, vma, address, flags);
|
||||
ret = try_to_unmap_one(page, vma, address, (void *)flags);
|
||||
if (ret != SWAP_AGAIN || !page_mapped(page))
|
||||
goto out;
|
||||
}
|
||||
|
@ -1614,6 +1622,11 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int page_not_mapped(struct page *page)
|
||||
{
|
||||
return !page_mapped(page);
|
||||
};
|
||||
|
||||
/**
|
||||
* try_to_unmap - try to remove all page table mappings to a page
|
||||
* @page: the page to get unmapped
|
||||
|
@ -1631,16 +1644,29 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
|||
int try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
int ret;
|
||||
struct rmap_walk_control rwc = {
|
||||
.rmap_one = try_to_unmap_one,
|
||||
.arg = (void *)flags,
|
||||
.done = page_not_mapped,
|
||||
.file_nonlinear = try_to_unmap_nonlinear,
|
||||
.anon_lock = page_lock_anon_vma_read,
|
||||
};
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
|
||||
|
||||
if (unlikely(PageKsm(page)))
|
||||
ret = try_to_unmap_ksm(page, flags);
|
||||
else if (PageAnon(page))
|
||||
ret = try_to_unmap_anon(page, flags);
|
||||
else
|
||||
ret = try_to_unmap_file(page, flags);
|
||||
/*
|
||||
* During exec, a temporary VMA is setup and later moved.
|
||||
* The VMA is moved under the anon_vma lock but not the
|
||||
* page tables leading to a race where migration cannot
|
||||
* find the migration ptes. Rather than increasing the
|
||||
* locking requirements of exec(), migration skips
|
||||
* temporary VMAs until after exec() completes.
|
||||
*/
|
||||
if (flags & TTU_MIGRATION && !PageKsm(page) && PageAnon(page))
|
||||
rwc.invalid_vma = invalid_migration_vma;
|
||||
|
||||
ret = rmap_walk(page, &rwc);
|
||||
|
||||
if (ret != SWAP_MLOCK && !page_mapped(page))
|
||||
ret = SWAP_SUCCESS;
|
||||
return ret;
|
||||
|
@ -1683,7 +1709,6 @@ void __put_anon_vma(struct anon_vma *anon_vma)
|
|||
anon_vma_free(anon_vma);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
||||
struct rmap_walk_control *rwc)
|
||||
{
|
||||
|
@ -1785,7 +1810,6 @@ int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
|
|||
else
|
||||
return rmap_walk_file(page, rwc);
|
||||
}
|
||||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue