mm/rmap: use rmap_walk() in page_mkclean()
Now, we have an infrastructure in rmap_walk() to handle difference from variants of rmap traversing functions. So, just use it in page_mkclean(). In this patch, I change following things. 1. remove some variants of rmap traversing functions. cf> page_mkclean_file 2. mechanical change to use rmap_walk() in page_mkclean(). Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9f32624be9
commit
9853a407b9
1 changed files with 26 additions and 25 deletions
51
mm/rmap.c
51
mm/rmap.c
|
@ -812,12 +812,13 @@ int page_referenced(struct page *page,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||||
unsigned long address)
|
unsigned long address, void *arg)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
int *cleaned = arg;
|
||||||
|
|
||||||
pte = page_check_address(page, mm, address, &ptl, 1);
|
pte = page_check_address(page, mm, address, &ptl, 1);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
|
@ -836,44 +837,44 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||||
|
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
|
|
||||||
if (ret)
|
if (ret) {
|
||||||
mmu_notifier_invalidate_page(mm, address);
|
mmu_notifier_invalidate_page(mm, address);
|
||||||
|
(*cleaned)++;
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
return ret;
|
return SWAP_AGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int page_mkclean_file(struct address_space *mapping, struct page *page)
|
static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
|
||||||
{
|
{
|
||||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
if (vma->vm_flags & VM_SHARED)
|
||||||
struct vm_area_struct *vma;
|
return 0;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
BUG_ON(PageAnon(page));
|
return 1;
|
||||||
|
|
||||||
mutex_lock(&mapping->i_mmap_mutex);
|
|
||||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
|
||||||
if (vma->vm_flags & VM_SHARED) {
|
|
||||||
unsigned long address = vma_address(page, vma);
|
|
||||||
ret += page_mkclean_one(page, vma, address);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mutex_unlock(&mapping->i_mmap_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int page_mkclean(struct page *page)
|
int page_mkclean(struct page *page)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int cleaned = 0;
|
||||||
|
struct address_space *mapping;
|
||||||
|
struct rmap_walk_control rwc = {
|
||||||
|
.arg = (void *)&cleaned,
|
||||||
|
.rmap_one = page_mkclean_one,
|
||||||
|
.invalid_vma = invalid_mkclean_vma,
|
||||||
|
};
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
|
|
||||||
if (page_mapped(page)) {
|
if (!page_mapped(page))
|
||||||
struct address_space *mapping = page_mapping(page);
|
return 0;
|
||||||
if (mapping)
|
|
||||||
ret = page_mkclean_file(mapping, page);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
mapping = page_mapping(page);
|
||||||
|
if (!mapping)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
rmap_walk(page, &rwc);
|
||||||
|
|
||||||
|
return cleaned;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(page_mkclean);
|
EXPORT_SYMBOL_GPL(page_mkclean);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue