[PATCH] mm: dequeue a huge page near to this node
This was discussed at http://marc.theaimsgroup.com/?l=linux-kernel&m=113166526217117&w=2 This patch changes the dequeueing to select a huge page near the node executing instead of always beginning to check for free nodes from node 0. This will result in a placement of the huge pages near the executing processor improving performance. The existing implementation can place the huge pages far away from the executing processor causing significant degradation of performance. The search starting from zero also means that the lower zones quickly run out of memory. Selecting a huge page near the process distributed the huge pages better. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Adam Litke <agl@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
1e8f889b10
commit
96df9333c9
1 changed files with 8 additions and 6 deletions
14
mm/hugetlb.c
14
mm/hugetlb.c
|
@ -40,14 +40,16 @@ static struct page *dequeue_huge_page(void)
|
|||
{
|
||||
int nid = numa_node_id();
|
||||
struct page *page = NULL;
|
||||
struct zonelist *zonelist = NODE_DATA(nid)->node_zonelists;
|
||||
struct zone **z;
|
||||
|
||||
if (list_empty(&hugepage_freelists[nid])) {
|
||||
for (nid = 0; nid < MAX_NUMNODES; ++nid)
|
||||
if (!list_empty(&hugepage_freelists[nid]))
|
||||
break;
|
||||
for (z = zonelist->zones; *z; z++) {
|
||||
nid = (*z)->zone_pgdat->node_id;
|
||||
if (!list_empty(&hugepage_freelists[nid]))
|
||||
break;
|
||||
}
|
||||
if (nid >= 0 && nid < MAX_NUMNODES &&
|
||||
!list_empty(&hugepage_freelists[nid])) {
|
||||
|
||||
if (*z) {
|
||||
page = list_entry(hugepage_freelists[nid].next,
|
||||
struct page, lru);
|
||||
list_del(&page->lru);
|
||||
|
|
Loading…
Reference in a new issue