memcg: fix page_cgroup fatal error in FLATMEM
Now, SLAB is configured in very early stage and it can be used in init routine now. But replacing alloc_bootmem() in FLAT/DISCONTIGMEM's page_cgroup() initialization breaks the allocation, now. (Works well in SPARSEMEM case...it supports MEMORY_HOTPLUG and size of page_cgroup is in reasonable size (< 1 << MAX_ORDER.) This patch revive FLATMEM+memory cgroup by using alloc_bootmem. In future, We stop to support FLATMEM (if no users) or rewrite codes for flatmem completely.But this will adds more messy codes and overheads. Reported-by: Li Zefan <lizf@cn.fujitsu.com> Tested-by: Li Zefan <lizf@cn.fujitsu.com> Tested-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
8ebf975608
commit
ca371c0d7e
3 changed files with 32 additions and 20 deletions
|
@ -18,7 +18,19 @@ struct page_cgroup {
|
||||||
};
|
};
|
||||||
|
|
||||||
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
|
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
|
||||||
void __init page_cgroup_init(void);
|
|
||||||
|
#ifdef CONFIG_SPARSEMEM
|
||||||
|
static inline void __init page_cgroup_init_flatmem(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
extern void __init page_cgroup_init(void);
|
||||||
|
#else
|
||||||
|
void __init page_cgroup_init_flatmem(void);
|
||||||
|
static inline void __init page_cgroup_init(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
struct page_cgroup *lookup_page_cgroup(struct page *page);
|
struct page_cgroup *lookup_page_cgroup(struct page *page);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -87,6 +99,10 @@ static inline void page_cgroup_init(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __init page_cgroup_init_flatmem(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
|
||||||
|
|
|
@ -539,6 +539,11 @@ void __init __weak thread_info_cache_init(void)
|
||||||
*/
|
*/
|
||||||
static void __init mm_init(void)
|
static void __init mm_init(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* page_cgroup requires countinous pages as memmap
|
||||||
|
* and it's bigger than MAX_ORDER unless SPARSEMEM.
|
||||||
|
*/
|
||||||
|
page_cgroup_init_flatmem();
|
||||||
mem_init();
|
mem_init();
|
||||||
kmem_cache_init();
|
kmem_cache_init();
|
||||||
vmalloc_init();
|
vmalloc_init();
|
||||||
|
|
|
@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
|
||||||
struct page_cgroup *base, *pc;
|
struct page_cgroup *base, *pc;
|
||||||
unsigned long table_size;
|
unsigned long table_size;
|
||||||
unsigned long start_pfn, nr_pages, index;
|
unsigned long start_pfn, nr_pages, index;
|
||||||
struct page *page;
|
|
||||||
unsigned int order;
|
|
||||||
|
|
||||||
start_pfn = NODE_DATA(nid)->node_start_pfn;
|
start_pfn = NODE_DATA(nid)->node_start_pfn;
|
||||||
nr_pages = NODE_DATA(nid)->node_spanned_pages;
|
nr_pages = NODE_DATA(nid)->node_spanned_pages;
|
||||||
|
@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
table_size = sizeof(struct page_cgroup) * nr_pages;
|
table_size = sizeof(struct page_cgroup) * nr_pages;
|
||||||
order = get_order(table_size);
|
|
||||||
page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
|
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
|
||||||
if (!page)
|
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
||||||
page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
|
if (!base)
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
base = page_address(page);
|
|
||||||
for (index = 0; index < nr_pages; index++) {
|
for (index = 0; index < nr_pages; index++) {
|
||||||
pc = base + index;
|
pc = base + index;
|
||||||
__init_page_cgroup(pc, start_pfn + index);
|
__init_page_cgroup(pc, start_pfn + index);
|
||||||
|
@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init page_cgroup_init(void)
|
void __init page_cgroup_init_flatmem(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
int nid, fail;
|
int nid, fail;
|
||||||
|
@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
|
||||||
if (!section->page_cgroup) {
|
if (!section->page_cgroup) {
|
||||||
nid = page_to_nid(pfn_to_page(pfn));
|
nid = page_to_nid(pfn_to_page(pfn));
|
||||||
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
||||||
if (slab_is_available()) {
|
VM_BUG_ON(!slab_is_available());
|
||||||
base = kmalloc_node(table_size,
|
base = kmalloc_node(table_size,
|
||||||
GFP_KERNEL | __GFP_NOWARN, nid);
|
GFP_KERNEL | __GFP_NOWARN, nid);
|
||||||
if (!base)
|
if (!base)
|
||||||
base = vmalloc_node(table_size, nid);
|
base = vmalloc_node(table_size, nid);
|
||||||
} else {
|
|
||||||
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
|
|
||||||
table_size,
|
|
||||||
PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* We don't have to allocate page_cgroup again, but
|
* We don't have to allocate page_cgroup again, but
|
||||||
|
|
Loading…
Reference in a new issue