mm/thp: fix __split_huge_pmd_locked() for migration PMD
[ Upstream commit ec0abae6dcdf7ef88607c869bf35a4b63ce1b370 ]
A migrating transparent huge page has to already be unmapped. Otherwise,
the page could be modified while it is being copied to a new page and data
could be lost. The function __split_huge_pmd() checks for a PMD migration
entry before calling __split_huge_pmd_locked() leading one to think that
__split_huge_pmd_locked() can handle splitting a migrating PMD.
However, the code always increments the page->_mapcount and adjusts the
memory control group accounting assuming the page is mapped.
Also, if the PMD entry is a migration PMD entry, the call to
is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead
of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). Fix these problems by
checking for a PMD migration entry.
Fixes: 84c3fc4e9c
("mm: thp: check pmd migration entry in common path")
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Bharata B Rao <bharata@linux.ibm.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: <stable@vger.kernel.org> [4.14+]
Link: https://lkml.kernel.org/r/20200903183140.19055-1-rcampbell@nvidia.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
d44a437826
commit
ec56646e3b
1 changed files with 23 additions and 17 deletions
|
@ -2145,7 +2145,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
put_page(page);
|
||||
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
|
||||
return;
|
||||
} else if (is_huge_zero_pmd(*pmd)) {
|
||||
} else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
|
||||
/*
|
||||
* FIXME: Do we want to invalidate secondary mmu by calling
|
||||
* mmu_notifier_invalidate_range() see comments below inside
|
||||
|
@ -2233,27 +2233,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
pte = pte_offset_map(&_pmd, addr);
|
||||
BUG_ON(!pte_none(*pte));
|
||||
set_pte_at(mm, addr, pte, entry);
|
||||
atomic_inc(&page[i]._mapcount);
|
||||
if (!pmd_migration)
|
||||
atomic_inc(&page[i]._mapcount);
|
||||
pte_unmap(pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set PG_double_map before dropping compound_mapcount to avoid
|
||||
* false-negative page_mapped().
|
||||
*/
|
||||
if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
|
||||
for (i = 0; i < HPAGE_PMD_NR; i++)
|
||||
atomic_inc(&page[i]._mapcount);
|
||||
}
|
||||
|
||||
if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
|
||||
/* Last compound_mapcount is gone. */
|
||||
__dec_node_page_state(page, NR_ANON_THPS);
|
||||
if (TestClearPageDoubleMap(page)) {
|
||||
/* No need in mapcount reference anymore */
|
||||
if (!pmd_migration) {
|
||||
/*
|
||||
* Set PG_double_map before dropping compound_mapcount to avoid
|
||||
* false-negative page_mapped().
|
||||
*/
|
||||
if (compound_mapcount(page) > 1 &&
|
||||
!TestSetPageDoubleMap(page)) {
|
||||
for (i = 0; i < HPAGE_PMD_NR; i++)
|
||||
atomic_dec(&page[i]._mapcount);
|
||||
atomic_inc(&page[i]._mapcount);
|
||||
}
|
||||
|
||||
lock_page_memcg(page);
|
||||
if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
|
||||
/* Last compound_mapcount is gone. */
|
||||
__dec_lruvec_page_state(page, NR_ANON_THPS);
|
||||
if (TestClearPageDoubleMap(page)) {
|
||||
/* No need in mapcount reference anymore */
|
||||
for (i = 0; i < HPAGE_PMD_NR; i++)
|
||||
atomic_dec(&page[i]._mapcount);
|
||||
}
|
||||
}
|
||||
unlock_page_memcg(page);
|
||||
}
|
||||
|
||||
smp_wmb(); /* make pte visible before pmd */
|
||||
|
|
Loading…
Reference in a new issue