mm: remove sparsemem allocation details from the bootmem allocator
alloc_bootmem_section() derives allocation area constraints from the specified sparsemem section. This is a bit specific for a generic memory allocator like bootmem, though, so move it over to sparsemem. As __alloc_bootmem_node_nopanic() already retries failed allocations with relaxed area constraints, the fallback code in sparsemem.c can be removed and the code becomes a bit more compact overall. [akpm@linux-foundation.org: fix build] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: David S. Miller <davem@davemloft.net> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Gavin Shan <shangw@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e9079911e6
commit
238305bb4d
4 changed files with 12 additions and 60 deletions
|
@ -135,9 +135,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
|||
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
|
||||
int flags);
|
||||
|
||||
extern void *alloc_bootmem_section(unsigned long size,
|
||||
unsigned long section_nr);
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
|
||||
extern void *alloc_remap(int nid, unsigned long size);
|
||||
#else
|
||||
|
|
22
mm/bootmem.c
22
mm/bootmem.c
|
@ -803,28 +803,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
|
|||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
/**
|
||||
* alloc_bootmem_section - allocate boot memory from a specific section
|
||||
* @size: size of the request in bytes
|
||||
* @section_nr: sparse map section to allocate from
|
||||
*
|
||||
* Return NULL on failure.
|
||||
*/
|
||||
void * __init alloc_bootmem_section(unsigned long size,
|
||||
unsigned long section_nr)
|
||||
{
|
||||
bootmem_data_t *bdata;
|
||||
unsigned long pfn, goal;
|
||||
|
||||
pfn = section_nr_to_pfn(section_nr);
|
||||
goal = pfn << PAGE_SHIFT;
|
||||
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
|
||||
|
||||
return alloc_bootmem_bdata(bdata, size, SMP_CACHE_BYTES, goal, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_LOW_ADDRESS_LIMIT
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
|
||||
#endif
|
||||
|
|
|
@ -355,28 +355,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
|
|||
return __alloc_bootmem_node(pgdat, size, align, goal);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
/**
|
||||
* alloc_bootmem_section - allocate boot memory from a specific section
|
||||
* @size: size of the request in bytes
|
||||
* @section_nr: sparse map section to allocate from
|
||||
*
|
||||
* Return NULL on failure.
|
||||
*/
|
||||
void * __init alloc_bootmem_section(unsigned long size,
|
||||
unsigned long section_nr)
|
||||
{
|
||||
unsigned long pfn, goal, limit;
|
||||
|
||||
pfn = section_nr_to_pfn(section_nr);
|
||||
goal = pfn << PAGE_SHIFT;
|
||||
limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
|
||||
|
||||
return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
|
||||
SMP_CACHE_BYTES, goal, limit);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_LOW_ADDRESS_LIMIT
|
||||
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
|
||||
#endif
|
||||
|
|
25
mm/sparse.c
25
mm/sparse.c
|
@ -273,10 +273,10 @@ static unsigned long *__kmalloc_section_usemap(void)
|
|||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
static unsigned long * __init
|
||||
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
|
||||
unsigned long count)
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long section_nr;
|
||||
|
||||
pg_data_t *host_pgdat;
|
||||
unsigned long goal;
|
||||
/*
|
||||
* A page may contain usemaps for other sections preventing the
|
||||
* page being freed and making a section unremovable while
|
||||
|
@ -287,8 +287,10 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
|
|||
* from the same section as the pgdat where possible to avoid
|
||||
* this problem.
|
||||
*/
|
||||
section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
|
||||
return alloc_bootmem_section(usemap_size() * count, section_nr);
|
||||
goal = __pa(pgdat) & PAGE_SECTION_MASK;
|
||||
host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
|
||||
return __alloc_bootmem_node_nopanic(host_pgdat, size,
|
||||
SMP_CACHE_BYTES, goal);
|
||||
}
|
||||
|
||||
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
|
||||
|
@ -332,9 +334,9 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
|
|||
#else
|
||||
static unsigned long * __init
|
||||
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
|
||||
unsigned long count)
|
||||
unsigned long size)
|
||||
{
|
||||
return NULL;
|
||||
return alloc_bootmem_node_nopanic(pgdat, size);
|
||||
}
|
||||
|
||||
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
|
||||
|
@ -352,13 +354,10 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
|
|||
int size = usemap_size();
|
||||
|
||||
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
|
||||
usemap_count);
|
||||
size * usemap_count);
|
||||
if (!usemap) {
|
||||
usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
|
||||
if (!usemap) {
|
||||
printk(KERN_WARNING "%s: allocation failed\n", __func__);
|
||||
return;
|
||||
}
|
||||
printk(KERN_WARNING "%s: allocation failed\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
|
||||
|
|
Loading…
Reference in a new issue