mm: hugetlb: defer freeing pages when gathering surplus pages
When gathering surplus pages, the number of needed pages is recomputed after reacquiring hugetlb lock to catch changes in resv_huge_pages and free_huge_pages. Plus it is recomputed with the number of newly allocated pages involved. Thus freeing pages can be deferred a bit to see if the final page request is satisfied, though pages could be allocated less than needed. Signed-off-by: Hillf Danton <dhillf@gmail.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
cc715d99e5
commit
28073b02bf
1 changed files with 17 additions and 13 deletions
30
mm/hugetlb.c
30
mm/hugetlb.c
|
@ -852,6 +852,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
|
|||
struct page *page, *tmp;
|
||||
int ret, i;
|
||||
int needed, allocated;
|
||||
bool alloc_ok = true;
|
||||
|
||||
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
|
||||
if (needed <= 0) {
|
||||
|
@ -867,17 +868,13 @@ static int gather_surplus_pages(struct hstate *h, int delta)
|
|||
spin_unlock(&hugetlb_lock);
|
||||
for (i = 0; i < needed; i++) {
|
||||
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
||||
if (!page)
|
||||
/*
|
||||
* We were not able to allocate enough pages to
|
||||
* satisfy the entire reservation so we free what
|
||||
* we've allocated so far.
|
||||
*/
|
||||
goto free;
|
||||
|
||||
if (!page) {
|
||||
alloc_ok = false;
|
||||
break;
|
||||
}
|
||||
list_add(&page->lru, &surplus_list);
|
||||
}
|
||||
allocated += needed;
|
||||
allocated += i;
|
||||
|
||||
/*
|
||||
* After retaking hugetlb_lock, we need to recalculate 'needed'
|
||||
|
@ -886,9 +883,16 @@ static int gather_surplus_pages(struct hstate *h, int delta)
|
|||
spin_lock(&hugetlb_lock);
|
||||
needed = (h->resv_huge_pages + delta) -
|
||||
(h->free_huge_pages + allocated);
|
||||
if (needed > 0)
|
||||
goto retry;
|
||||
|
||||
if (needed > 0) {
|
||||
if (alloc_ok)
|
||||
goto retry;
|
||||
/*
|
||||
* We were not able to allocate enough pages to
|
||||
* satisfy the entire reservation so we free what
|
||||
* we've allocated so far.
|
||||
*/
|
||||
goto free;
|
||||
}
|
||||
/*
|
||||
* The surplus_list now contains _at_least_ the number of extra pages
|
||||
* needed to accommodate the reservation. Add the appropriate number
|
||||
|
@ -914,10 +918,10 @@ static int gather_surplus_pages(struct hstate *h, int delta)
|
|||
VM_BUG_ON(page_count(page));
|
||||
enqueue_huge_page(h, page);
|
||||
}
|
||||
free:
|
||||
spin_unlock(&hugetlb_lock);
|
||||
|
||||
/* Free unnecessary surplus pages to the buddy allocator */
|
||||
free:
|
||||
if (!list_empty(&surplus_list)) {
|
||||
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
||||
list_del(&page->lru);
|
||||
|
|
Loading…
Reference in a new issue