mm: fix swapops.h:131 bug if remap_file_pages raced migration
Add remove_linear_migration_ptes_from_nonlinear(), to fix an interesting little include/linux/swapops.h:131 BUG_ON(!PageLocked) found by trinity: indicating that remove_migration_ptes() failed to find one of the migration entries that was temporarily inserted. The problem comes from remap_file_pages()'s switch from vma_interval_tree (good for inserting the migration entry) to i_mmap_nonlinear list (no good for locating it again); but can only be a problem if the remap_file_pages() range does not cover the whole of the vma (zap_pte() clears the range). remove_migration_ptes() needs a file_nonlinear method to go down the i_mmap_nonlinear list, applying linear location to look for migration entries in those vmas too, just in case there was this race. The file_nonlinear method does need rmap_walk_control.arg to do this; but it never needed vma passed in - vma comes from its own iteration. Reported-and-tested-by: Dave Jones <davej@redhat.com> Reported-and-tested-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3fb725c48b
commit
7e09e738af
3 changed files with 36 additions and 4 deletions
|
@ -250,8 +250,7 @@ struct rmap_walk_control {
|
||||||
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
|
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
|
||||||
unsigned long addr, void *arg);
|
unsigned long addr, void *arg);
|
||||||
int (*done)(struct page *page);
|
int (*done)(struct page *page);
|
||||||
int (*file_nonlinear)(struct page *, struct address_space *,
|
int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
|
||||||
struct vm_area_struct *vma);
|
|
||||||
struct anon_vma *(*anon_lock)(struct page *page);
|
struct anon_vma *(*anon_lock)(struct page *page);
|
||||||
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
|
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
|
||||||
};
|
};
|
||||||
|
|
32
mm/migrate.c
32
mm/migrate.c
|
@ -177,6 +177,37 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
|
||||||
return SWAP_AGAIN;
|
return SWAP_AGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Congratulations to trinity for discovering this bug.
|
||||||
|
* mm/fremap.c's remap_file_pages() accepts any range within a single vma to
|
||||||
|
* convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
|
||||||
|
* replace the specified range by file ptes throughout (maybe populated after).
|
||||||
|
* If page migration finds a page within that range, while it's still located
|
||||||
|
* by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
|
||||||
|
* zap_pte() clears the temporary migration entry before mmap_sem is dropped.
|
||||||
|
* But if the migrating page is in a part of the vma outside the range to be
|
||||||
|
* remapped, then it will not be cleared, and remove_migration_ptes() needs to
|
||||||
|
* deal with it. Fortunately, this part of the vma is of course still linear,
|
||||||
|
* so we just need to use linear location on the nonlinear list.
|
||||||
|
*/
|
||||||
|
static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
|
||||||
|
struct address_space *mapping, void *arg)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
/* hugetlbfs does not support remap_pages, so no huge pgoff worries */
|
||||||
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||||
|
unsigned long addr;
|
||||||
|
|
||||||
|
list_for_each_entry(vma,
|
||||||
|
&mapping->i_mmap_nonlinear, shared.nonlinear) {
|
||||||
|
|
||||||
|
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
||||||
|
if (addr >= vma->vm_start && addr < vma->vm_end)
|
||||||
|
remove_migration_pte(page, vma, addr, arg);
|
||||||
|
}
|
||||||
|
return SWAP_AGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get rid of all migration entries and replace them by
|
* Get rid of all migration entries and replace them by
|
||||||
* references to the indicated page.
|
* references to the indicated page.
|
||||||
|
@ -186,6 +217,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
|
||||||
struct rmap_walk_control rwc = {
|
struct rmap_walk_control rwc = {
|
||||||
.rmap_one = remove_migration_pte,
|
.rmap_one = remove_migration_pte,
|
||||||
.arg = old,
|
.arg = old,
|
||||||
|
.file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
|
||||||
};
|
};
|
||||||
|
|
||||||
rmap_walk(new, &rwc);
|
rmap_walk(new, &rwc);
|
||||||
|
|
|
@ -1360,8 +1360,9 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int try_to_unmap_nonlinear(struct page *page,
|
static int try_to_unmap_nonlinear(struct page *page,
|
||||||
struct address_space *mapping, struct vm_area_struct *vma)
|
struct address_space *mapping, void *arg)
|
||||||
{
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
int ret = SWAP_AGAIN;
|
int ret = SWAP_AGAIN;
|
||||||
unsigned long cursor;
|
unsigned long cursor;
|
||||||
unsigned long max_nl_cursor = 0;
|
unsigned long max_nl_cursor = 0;
|
||||||
|
@ -1663,7 +1664,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
|
||||||
if (list_empty(&mapping->i_mmap_nonlinear))
|
if (list_empty(&mapping->i_mmap_nonlinear))
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
ret = rwc->file_nonlinear(page, mapping, vma);
|
ret = rwc->file_nonlinear(page, mapping, rwc->arg);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
mutex_unlock(&mapping->i_mmap_mutex);
|
mutex_unlock(&mapping->i_mmap_mutex);
|
||||||
|
|
Loading…
Reference in a new issue