hugetlb: fix dynamic pool resize failure case
When gather_surplus_pages() fails to allocate enough huge pages to satisfy the requested reservation, it frees what it did allocate back to the buddy allocator. put_page() should be called instead of update_and_free_page() to ensure that pool counters are updated as appropriate and the page's refcount is decremented. Signed-off-by: Adam Litke <agl@us.ibm.com> Acked-by: Dave Hansen <haveblue@us.ibm.com> Cc: David Gibson <hermes@gibson.dropbear.id.au> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Cc: Ken Chen <kenchen@google.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
63b4613c3f
commit
af767cbdd7
1 changed files with 11 additions and 2 deletions
13
mm/hugetlb.c
13
mm/hugetlb.c
|
@ -302,8 +302,17 @@ static int gather_surplus_pages(int delta)
|
|||
list_del(&page->lru);
|
||||
if ((--needed) >= 0)
|
||||
enqueue_huge_page(page);
|
||||
else
|
||||
update_and_free_page(page);
|
||||
else {
|
||||
/*
|
||||
* Decrement the refcount and free the page using its
|
||||
* destructor. This must be done with hugetlb_lock
|
||||
* unlocked which is safe because free_huge_page takes
|
||||
* hugetlb_lock before deciding how to free the page.
|
||||
*/
|
||||
spin_unlock(&hugetlb_lock);
|
||||
put_page(page);
|
||||
spin_lock(&hugetlb_lock);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Reference in a new issue