hugetlb/cgroup: add the cgroup pointer to page lru

Add the hugetlb cgroup pointer to 3rd page lru.next.  This limit the usage
to hugetlb cgroup to only hugepages with 3 or more normal pages.  I guess
that is an acceptable limitation.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Aneesh Kumar K.V 2012-07-31 16:42:15 -07:00 committed by Linus Torvalds
parent 2bc64a2046
commit 9dd540e231
2 changed files with 41 additions and 0 deletions

View file

@ -18,8 +18,34 @@
#include <linux/res_counter.h> #include <linux/res_counter.h>
struct hugetlb_cgroup; struct hugetlb_cgroup;
/*
* Minimum page order trackable by hugetlb cgroup.
* At least 3 pages are necessary for all the tracking information.
*/
#define HUGETLB_CGROUP_MIN_ORDER 2
#ifdef CONFIG_CGROUP_HUGETLB #ifdef CONFIG_CGROUP_HUGETLB
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
VM_BUG_ON(!PageHuge(page));
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
return (struct hugetlb_cgroup *)page[2].lru.next;
}
static inline
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
{
VM_BUG_ON(!PageHuge(page));
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
page[2].lru.next = (void *)h_cg;
return 0;
}
static inline bool hugetlb_cgroup_disabled(void) static inline bool hugetlb_cgroup_disabled(void)
{ {
if (hugetlb_subsys.disabled) if (hugetlb_subsys.disabled)
@ -28,6 +54,17 @@ static inline bool hugetlb_cgroup_disabled(void)
} }
#else #else
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
return NULL;
}
static inline
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
{
return 0;
}
static inline bool hugetlb_cgroup_disabled(void) static inline bool hugetlb_cgroup_disabled(void)
{ {
return true; return true;

View file

@ -28,6 +28,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/node.h> #include <linux/node.h>
#include "internal.h" #include "internal.h"
@ -591,6 +592,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_active | 1 << PG_reserved | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback); 1 << PG_private | 1 << PG_writeback);
} }
VM_BUG_ON(hugetlb_cgroup_from_page(page));
set_compound_page_dtor(page, NULL); set_compound_page_dtor(page, NULL);
set_page_refcounted(page); set_page_refcounted(page);
arch_release_hugepage(page); arch_release_hugepage(page);
@ -643,6 +645,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
INIT_LIST_HEAD(&page->lru); INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, free_huge_page); set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, NULL);
h->nr_huge_pages++; h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++; h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
@ -892,6 +895,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
INIT_LIST_HEAD(&page->lru); INIT_LIST_HEAD(&page->lru);
r_nid = page_to_nid(page); r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page); set_compound_page_dtor(page, free_huge_page);
set_hugetlb_cgroup(page, NULL);
/* /*
* We incremented the global counters already * We incremented the global counters already
*/ */