mm: make remove_migration_ptes() beyond mm/migration.c
Make remove_migration_ptes() available to be used in split_huge_page(). New parameter 'locked' added: as with try_to_umap() we need a way to indicate that caller holds rmap lock. We also shouldn't try to mlock() pte-mapped huge pages: pte-mapeed THP pages are never mlocked. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2a52bcbcc6
commit
e388466de4
2 changed files with 11 additions and 6 deletions
|
@ -243,6 +243,8 @@ int page_mkclean(struct page *);
|
|||
*/
|
||||
int try_to_munlock(struct page *);
|
||||
|
||||
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
|
||||
|
||||
/*
|
||||
* Called by memory-failure.c to kill processes.
|
||||
*/
|
||||
|
|
15
mm/migrate.c
15
mm/migrate.c
|
@ -172,7 +172,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
|
|||
else
|
||||
page_add_file_rmap(new);
|
||||
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
|
||||
mlock_vma_page(new);
|
||||
|
||||
/* No need to invalidate - it was non-present before */
|
||||
|
@ -187,14 +187,17 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
|
|||
* Get rid of all migration entries and replace them by
|
||||
* references to the indicated page.
|
||||
*/
|
||||
static void remove_migration_ptes(struct page *old, struct page *new)
|
||||
void remove_migration_ptes(struct page *old, struct page *new, bool locked)
|
||||
{
|
||||
struct rmap_walk_control rwc = {
|
||||
.rmap_one = remove_migration_pte,
|
||||
.arg = old,
|
||||
};
|
||||
|
||||
rmap_walk(new, &rwc);
|
||||
if (locked)
|
||||
rmap_walk_locked(new, &rwc);
|
||||
else
|
||||
rmap_walk(new, &rwc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -702,7 +705,7 @@ static int writeout(struct address_space *mapping, struct page *page)
|
|||
* At this point we know that the migration attempt cannot
|
||||
* be successful.
|
||||
*/
|
||||
remove_migration_ptes(page, page);
|
||||
remove_migration_ptes(page, page, false);
|
||||
|
||||
rc = mapping->a_ops->writepage(page, &wbc);
|
||||
|
||||
|
@ -900,7 +903,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
|
||||
if (page_was_mapped)
|
||||
remove_migration_ptes(page,
|
||||
rc == MIGRATEPAGE_SUCCESS ? newpage : page);
|
||||
rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
|
||||
|
||||
out_unlock_both:
|
||||
unlock_page(newpage);
|
||||
|
@ -1070,7 +1073,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
|
||||
if (page_was_mapped)
|
||||
remove_migration_ptes(hpage,
|
||||
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage);
|
||||
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
|
||||
|
||||
unlock_page(new_hpage);
|
||||
|
||||
|
|
Loading…
Reference in a new issue