mm: bootmem: try harder to free pages in bulk
The loop that frees pages to the page allocator while bootstrapping tries to free higher-order blocks only when the starting address is aligned to that block size. Otherwise it will free all pages on that node one-by-one. Change it to free individual pages up to the first aligned block and then try higher-order frees from there. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
560a036b3a
commit
799f933a82
1 changed files with 10 additions and 12 deletions
22
mm/bootmem.c
22
mm/bootmem.c
|
@ -171,7 +171,6 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
|
||||||
|
|
||||||
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
||||||
{
|
{
|
||||||
int aligned;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long start, end, pages, count = 0;
|
unsigned long start, end, pages, count = 0;
|
||||||
|
|
||||||
|
@ -181,14 +180,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
||||||
start = bdata->node_min_pfn;
|
start = bdata->node_min_pfn;
|
||||||
end = bdata->node_low_pfn;
|
end = bdata->node_low_pfn;
|
||||||
|
|
||||||
/*
|
bdebug("nid=%td start=%lx end=%lx\n",
|
||||||
* If the start is aligned to the machines wordsize, we might
|
bdata - bootmem_node_data, start, end);
|
||||||
* be able to free pages in bulks of that order.
|
|
||||||
*/
|
|
||||||
aligned = !(start & (BITS_PER_LONG - 1));
|
|
||||||
|
|
||||||
bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
|
|
||||||
bdata - bootmem_node_data, start, end, aligned);
|
|
||||||
|
|
||||||
while (start < end) {
|
while (start < end) {
|
||||||
unsigned long *map, idx, vec;
|
unsigned long *map, idx, vec;
|
||||||
|
@ -196,12 +189,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
||||||
map = bdata->node_bootmem_map;
|
map = bdata->node_bootmem_map;
|
||||||
idx = start - bdata->node_min_pfn;
|
idx = start - bdata->node_min_pfn;
|
||||||
vec = ~map[idx / BITS_PER_LONG];
|
vec = ~map[idx / BITS_PER_LONG];
|
||||||
|
/*
|
||||||
if (aligned && vec == ~0UL) {
|
* If we have a properly aligned and fully unreserved
|
||||||
|
* BITS_PER_LONG block of pages in front of us, free
|
||||||
|
* it in one go.
|
||||||
|
*/
|
||||||
|
if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
|
||||||
int order = ilog2(BITS_PER_LONG);
|
int order = ilog2(BITS_PER_LONG);
|
||||||
|
|
||||||
__free_pages_bootmem(pfn_to_page(start), order);
|
__free_pages_bootmem(pfn_to_page(start), order);
|
||||||
count += BITS_PER_LONG;
|
count += BITS_PER_LONG;
|
||||||
|
start += BITS_PER_LONG;
|
||||||
} else {
|
} else {
|
||||||
unsigned long off = 0;
|
unsigned long off = 0;
|
||||||
|
|
||||||
|
@ -214,8 +212,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
|
||||||
vec >>= 1;
|
vec >>= 1;
|
||||||
off++;
|
off++;
|
||||||
}
|
}
|
||||||
|
start = ALIGN(start + 1, BITS_PER_LONG);
|
||||||
}
|
}
|
||||||
start += BITS_PER_LONG;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
page = virt_to_page(bdata->node_bootmem_map);
|
page = virt_to_page(bdata->node_bootmem_map);
|
||||||
|
|
Loading…
Reference in a new issue