mm: fix alloc_bootmem_core to use fast searching for all nodes

Make the nodes other than node 0 use bdata->last_success for fast
search too.

We need to use __alloc_bootmem_core() for vmemmap allocation for other
nodes when numa and sparsemem/vmemmap are enabled.

Also, make fail_block path increase i with incr only after ALIGN
to avoid extra increase when size is larger than align.

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Yinghai Lu 2008-03-10 23:23:42 -07:00 committed by Ingo Molnar
parent e123dd3f0e
commit ad09315cad

View file

@ -238,28 +238,32 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
* We try to allocate bootmem pages above 'goal'
* first, then we try to allocate lower pages.
*/
if (goal && goal >= bdata->node_boot_start && PFN_DOWN(goal) < end_pfn) {
preferred = goal - bdata->node_boot_start;
preferred = 0;
if (goal && PFN_DOWN(goal) < end_pfn) {
if (goal > bdata->node_boot_start)
preferred = goal - bdata->node_boot_start;
if (bdata->last_success >= preferred)
if (!limit || (limit && limit > bdata->last_success))
preferred = bdata->last_success;
} else
preferred = 0;
}
preferred = PFN_DOWN(ALIGN(preferred, align)) + offset;
areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
incr = align >> PAGE_SHIFT ? : 1;
restart_scan:
for (i = preferred; i < eidx; i += incr) {
for (i = preferred; i < eidx;) {
unsigned long j;
i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
i = ALIGN(i, incr);
if (i >= eidx)
break;
if (test_bit(i, bdata->node_bootmem_map))
if (test_bit(i, bdata->node_bootmem_map)) {
i += incr;
continue;
}
for (j = i + 1; j < i + areasize; ++j) {
if (j >= eidx)
goto fail_block;
@ -270,6 +274,8 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
goto found;
fail_block:
i = ALIGN(j, incr);
if (i == j)
i += incr;
}
if (preferred > offset) {