hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list.
A page's hugetlb cgroup assignment and movement to the active list should occur with hugetlb_lock held. Otherwise when we remove the hugetlb cgroup we will iterate the active list and find pages with NULL hugetlb cgroup values. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
79dbb2368a
commit
94ae8ba717
2 changed files with 13 additions and 14 deletions
20
mm/hugetlb.c
20
mm/hugetlb.c
|
@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
|
||||||
page = dequeue_huge_page_node(h, nid);
|
page = dequeue_huge_page_node(h, nid);
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
|
|
||||||
if (!page) {
|
if (!page)
|
||||||
page = alloc_buddy_huge_page(h, nid);
|
page = alloc_buddy_huge_page(h, nid);
|
||||||
if (page) {
|
|
||||||
spin_lock(&hugetlb_lock);
|
|
||||||
list_move(&page->lru, &h->hugepage_activelist);
|
|
||||||
spin_unlock(&hugetlb_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
spin_lock(&hugetlb_lock);
|
spin_lock(&hugetlb_lock);
|
||||||
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
|
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
|
||||||
|
if (page) {
|
||||||
|
/* update page cgroup details */
|
||||||
|
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
|
||||||
|
h_cg, page);
|
||||||
|
spin_unlock(&hugetlb_lock);
|
||||||
|
} else {
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
|
|
||||||
if (!page) {
|
|
||||||
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
hugetlb_cgroup_uncharge_cgroup(idx,
|
hugetlb_cgroup_uncharge_cgroup(idx,
|
||||||
|
@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||||
return ERR_PTR(-ENOSPC);
|
return ERR_PTR(-ENOSPC);
|
||||||
}
|
}
|
||||||
spin_lock(&hugetlb_lock);
|
spin_lock(&hugetlb_lock);
|
||||||
|
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
|
||||||
|
h_cg, page);
|
||||||
list_move(&page->lru, &h->hugepage_activelist);
|
list_move(&page->lru, &h->hugepage_activelist);
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
}
|
}
|
||||||
|
@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||||
set_page_private(page, (unsigned long)spool);
|
set_page_private(page, (unsigned long)spool);
|
||||||
|
|
||||||
vma_commit_reservation(h, vma, addr);
|
vma_commit_reservation(h, vma, addr);
|
||||||
/* update page cgroup details */
|
|
||||||
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -213,6 +213,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Should be called with hugetlb_lock held */
|
||||||
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
|
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
|
||||||
struct hugetlb_cgroup *h_cg,
|
struct hugetlb_cgroup *h_cg,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
|
@ -220,9 +221,7 @@ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
|
||||||
if (hugetlb_cgroup_disabled() || !h_cg)
|
if (hugetlb_cgroup_disabled() || !h_cg)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&hugetlb_lock);
|
|
||||||
set_hugetlb_cgroup(page, h_cg);
|
set_hugetlb_cgroup(page, h_cg);
|
||||||
spin_unlock(&hugetlb_lock);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,6 +388,7 @@ int __init hugetlb_cgroup_file_init(int idx)
|
||||||
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
|
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
|
||||||
{
|
{
|
||||||
struct hugetlb_cgroup *h_cg;
|
struct hugetlb_cgroup *h_cg;
|
||||||
|
struct hstate *h = page_hstate(oldhpage);
|
||||||
|
|
||||||
if (hugetlb_cgroup_disabled())
|
if (hugetlb_cgroup_disabled())
|
||||||
return;
|
return;
|
||||||
|
@ -401,6 +401,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
|
||||||
|
|
||||||
/* move the h_cg details to new cgroup */
|
/* move the h_cg details to new cgroup */
|
||||||
set_hugetlb_cgroup(newhpage, h_cg);
|
set_hugetlb_cgroup(newhpage, h_cg);
|
||||||
|
list_move(&newhpage->lru, &h->hugepage_activelist);
|
||||||
spin_unlock(&hugetlb_lock);
|
spin_unlock(&hugetlb_lock);
|
||||||
cgroup_release_and_wakeup_rmdir(&h_cg->css);
|
cgroup_release_and_wakeup_rmdir(&h_cg->css);
|
||||||
return;
|
return;
|
||||||
|
|
Loading…
Reference in a new issue