hugetlb: abstract numa round robin selection
Need this as a separate function for a future patch. No behaviour change. Acked-by: Adam Litke <agl@us.ibm.com> Acked-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a343787016
commit
5ced66c901
1 changed files with 22 additions and 15 deletions
37
mm/hugetlb.c
37
mm/hugetlb.c
|
@ -565,6 +565,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
|
|||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use a helper variable to find the next node and then
|
||||
* copy it back to hugetlb_next_nid afterwards:
|
||||
* otherwise there's a window in which a racer might
|
||||
* pass invalid nid MAX_NUMNODES to alloc_pages_node.
|
||||
* But we don't need to use a spin_lock here: it really
|
||||
* doesn't matter if occasionally a racer chooses the
|
||||
* same nid as we do. Move nid forward in the mask even
|
||||
* if we just successfully allocated a hugepage so that
|
||||
* the next caller gets hugepages on the next node.
|
||||
*/
|
||||
static int hstate_next_node(struct hstate *h)
|
||||
{
|
||||
int next_nid;
|
||||
next_nid = next_node(h->hugetlb_next_nid, node_online_map);
|
||||
if (next_nid == MAX_NUMNODES)
|
||||
next_nid = first_node(node_online_map);
|
||||
h->hugetlb_next_nid = next_nid;
|
||||
return next_nid;
|
||||
}
|
||||
|
||||
static int alloc_fresh_huge_page(struct hstate *h)
|
||||
{
|
||||
struct page *page;
|
||||
|
@ -578,21 +599,7 @@ static int alloc_fresh_huge_page(struct hstate *h)
|
|||
page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
|
||||
if (page)
|
||||
ret = 1;
|
||||
/*
|
||||
* Use a helper variable to find the next node and then
|
||||
* copy it back to hugetlb_next_nid afterwards:
|
||||
* otherwise there's a window in which a racer might
|
||||
* pass invalid nid MAX_NUMNODES to alloc_pages_node.
|
||||
* But we don't need to use a spin_lock here: it really
|
||||
* doesn't matter if occasionally a racer chooses the
|
||||
* same nid as we do. Move nid forward in the mask even
|
||||
* if we just successfully allocated a hugepage so that
|
||||
* the next caller gets hugepages on the next node.
|
||||
*/
|
||||
next_nid = next_node(h->hugetlb_next_nid, node_online_map);
|
||||
if (next_nid == MAX_NUMNODES)
|
||||
next_nid = first_node(node_online_map);
|
||||
h->hugetlb_next_nid = next_nid;
|
||||
next_nid = hstate_next_node(h);
|
||||
} while (!page && h->hugetlb_next_nid != start_nid);
|
||||
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in a new issue