mm, sparse, page_ext: drop ugly N_HIGH_MEMORY branches for allocations
Commitf52407ce2d
("memory hotplug: alloc page from other node in memory online") has introduced N_HIGH_MEMORY checks to only use NUMA aware allocations when there is some memory present because the respective node might not have any memory yet at the time and so it could fail or even OOM. Things have changed since then though. Zonelists are now always initialized before we do any allocations even for hotplug (see959ecc48fc
("mm/memory_hotplug.c: fix building of node hotplug zonelist")). Therefore these checks are not really needed. In fact caller of the allocator should never care about whether the node is populated because that might change at any time. Link: http://lkml.kernel.org/r/20170721143915.14161-10-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Shaohua Li <shaohua.li@intel.com> Cc: Joonsoo Kim <js1304@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Toshi Kani <toshi.kani@hpe.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b93e0f329e
commit
b95046b047
3 changed files with 7 additions and 19 deletions
|
@ -222,10 +222,7 @@ static void *__meminit alloc_page_ext(size_t size, int nid)
|
|||
return addr;
|
||||
}
|
||||
|
||||
if (node_state(nid, N_HIGH_MEMORY))
|
||||
addr = vzalloc_node(size, nid);
|
||||
else
|
||||
addr = vzalloc(size);
|
||||
addr = vzalloc_node(size, nid);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
|
|
@ -54,14 +54,9 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
|
|||
if (slab_is_available()) {
|
||||
struct page *page;
|
||||
|
||||
if (node_state(node, N_HIGH_MEMORY))
|
||||
page = alloc_pages_node(
|
||||
node, GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
|
||||
get_order(size));
|
||||
else
|
||||
page = alloc_pages(
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
|
||||
get_order(size));
|
||||
page = alloc_pages_node(node,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
|
||||
get_order(size));
|
||||
if (page)
|
||||
return page_address(page);
|
||||
return NULL;
|
||||
|
|
10
mm/sparse.c
10
mm/sparse.c
|
@ -65,14 +65,10 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
|
|||
unsigned long array_size = SECTIONS_PER_ROOT *
|
||||
sizeof(struct mem_section);
|
||||
|
||||
if (slab_is_available()) {
|
||||
if (node_state(nid, N_HIGH_MEMORY))
|
||||
section = kzalloc_node(array_size, GFP_KERNEL, nid);
|
||||
else
|
||||
section = kzalloc(array_size, GFP_KERNEL);
|
||||
} else {
|
||||
if (slab_is_available())
|
||||
section = kzalloc_node(array_size, GFP_KERNEL, nid);
|
||||
else
|
||||
section = memblock_virt_alloc_node(array_size, nid);
|
||||
}
|
||||
|
||||
return section;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue