mm: move highest_memmap_pfn
Move highest_memmap_pfn __read_mostly from page_alloc.c next to zero_pfn __read_mostly in memory.c: to help them share a cacheline, since they're very often tested together in vm_normal_page(). Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
62eede62da
commit
03f6462a3a
3 changed files with 3 additions and 2 deletions
|
@ -37,6 +37,8 @@ static inline void __put_page(struct page *page)
|
|||
atomic_dec(&page->_count);
|
||||
}
|
||||
|
||||
extern unsigned long highest_memmap_pfn;
|
||||
|
||||
/*
|
||||
* in mm/vmscan.c:
|
||||
*/
|
||||
|
@ -46,7 +48,6 @@ extern void putback_lru_page(struct page *page);
|
|||
/*
|
||||
* in mm/page_alloc.c
|
||||
*/
|
||||
extern unsigned long highest_memmap_pfn;
|
||||
extern void __free_pages_bootmem(struct page *page, unsigned int order);
|
||||
extern void prep_compound_page(struct page *page, unsigned long order);
|
||||
|
||||
|
|
|
@ -109,6 +109,7 @@ static int __init disable_randmaps(char *s)
|
|||
__setup("norandmaps", disable_randmaps);
|
||||
|
||||
unsigned long zero_pfn __read_mostly;
|
||||
unsigned long highest_memmap_pfn __read_mostly;
|
||||
|
||||
/*
|
||||
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
|
||||
|
|
|
@ -72,7 +72,6 @@ EXPORT_SYMBOL(node_states);
|
|||
|
||||
unsigned long totalram_pages __read_mostly;
|
||||
unsigned long totalreserve_pages __read_mostly;
|
||||
unsigned long highest_memmap_pfn __read_mostly;
|
||||
int percpu_pagelist_fraction;
|
||||
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
|
||||
|
||||
|
|
Loading…
Reference in a new issue