mm: cleanup add_to_page_cache_locked()
Make add_to_page_cache_locked() cleaner: - unindent most code of the function by inverting one condition; - streamline code no-error path; - move insert error path outside normal code path; - call radix_tree_preload_end() earlier; No functional changes. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Hugh Dickins <hughd@google.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Andi Kleen <ak@linux.intel.com> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3cd14fcd3f
commit
66a0c8ee3d
1 changed files with 25 additions and 23 deletions
48
mm/filemap.c
48
mm/filemap.c
|
@ -467,32 +467,34 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
gfp_mask & GFP_RECLAIM_MASK);
|
||||
if (error)
|
||||
goto out;
|
||||
return error;
|
||||
|
||||
error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
|
||||
if (error == 0) {
|
||||
page_cache_get(page);
|
||||
page->mapping = mapping;
|
||||
page->index = offset;
|
||||
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
error = radix_tree_insert(&mapping->page_tree, offset, page);
|
||||
if (likely(!error)) {
|
||||
mapping->nrpages++;
|
||||
__inc_zone_page_state(page, NR_FILE_PAGES);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
trace_mm_filemap_add_to_page_cache(page);
|
||||
} else {
|
||||
page->mapping = NULL;
|
||||
/* Leave page->index set: truncation relies upon it */
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
radix_tree_preload_end();
|
||||
} else
|
||||
if (error) {
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
page_cache_get(page);
|
||||
page->mapping = mapping;
|
||||
page->index = offset;
|
||||
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
error = radix_tree_insert(&mapping->page_tree, offset, page);
|
||||
radix_tree_preload_end();
|
||||
if (unlikely(error))
|
||||
goto err_insert;
|
||||
mapping->nrpages++;
|
||||
__inc_zone_page_state(page, NR_FILE_PAGES);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
trace_mm_filemap_add_to_page_cache(page);
|
||||
return 0;
|
||||
err_insert:
|
||||
page->mapping = NULL;
|
||||
/* Leave page->index set: truncation relies upon it */
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
page_cache_release(page);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(add_to_page_cache_locked);
|
||||
|
|
Loading…
Reference in a new issue