mm: make defensive checks around PFN values registered for memory usage
There are a number of different views to how much memory is currently active. There is the arch-independent zone-sizing view, the bootmem allocator and memory models view. Architectures register this information at different times and is not necessarily in sync particularly with respect to some SPARSEMEM limitations. This patch introduces mminit_validate_memmodel_limits() which is able to validate and correct PFN ranges with respect to the memory model. It is only SPARSEMEM that currently validates itself. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
708614e618
commit
2dbb51c49f
4 changed files with 44 additions and 8 deletions
|
@ -91,6 +91,7 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
|
||||||
bootmem_data_t *bdata = pgdat->bdata;
|
bootmem_data_t *bdata = pgdat->bdata;
|
||||||
unsigned long mapsize;
|
unsigned long mapsize;
|
||||||
|
|
||||||
|
mminit_validate_memmodel_limits(&start, &end);
|
||||||
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
|
bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
|
||||||
bdata->node_boot_start = PFN_PHYS(start);
|
bdata->node_boot_start = PFN_PHYS(start);
|
||||||
bdata->node_low_pfn = end;
|
bdata->node_low_pfn = end;
|
||||||
|
|
|
@ -98,4 +98,16 @@ static inline void mminit_verify_page_links(struct page *page,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_DEBUG_MEMORY_INIT */
|
#endif /* CONFIG_DEBUG_MEMORY_INIT */
|
||||||
|
|
||||||
|
/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
|
||||||
|
#if defined(CONFIG_SPARSEMEM)
|
||||||
|
extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
|
||||||
|
unsigned long *end_pfn);
|
||||||
|
#else
|
||||||
|
static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
|
||||||
|
unsigned long *end_pfn)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_SPARSEMEM */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3536,6 +3536,8 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
|
||||||
nid, start_pfn, end_pfn,
|
nid, start_pfn, end_pfn,
|
||||||
nr_nodemap_entries, MAX_ACTIVE_REGIONS);
|
nr_nodemap_entries, MAX_ACTIVE_REGIONS);
|
||||||
|
|
||||||
|
mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
|
||||||
|
|
||||||
/* Merge with existing active regions if possible */
|
/* Merge with existing active regions if possible */
|
||||||
for (i = 0; i < nr_nodemap_entries; i++) {
|
for (i = 0; i < nr_nodemap_entries; i++) {
|
||||||
if (early_node_map[i].nid != nid)
|
if (early_node_map[i].nid != nid)
|
||||||
|
|
37
mm/sparse.c
37
mm/sparse.c
|
@ -12,6 +12,7 @@
|
||||||
#include <asm/dma.h>
|
#include <asm/dma.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Permanent SPARSEMEM data:
|
* Permanent SPARSEMEM data:
|
||||||
|
@ -147,22 +148,41 @@ static inline int sparse_early_nid(struct mem_section *section)
|
||||||
return (section->section_mem_map >> SECTION_NID_SHIFT);
|
return (section->section_mem_map >> SECTION_NID_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Record a memory area against a node. */
|
/* Validate the physical addressing limitations of the model */
|
||||||
void __init memory_present(int nid, unsigned long start, unsigned long end)
|
void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
|
||||||
|
unsigned long *end_pfn)
|
||||||
{
|
{
|
||||||
unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
|
unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
|
||||||
unsigned long pfn;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sanity checks - do not allow an architecture to pass
|
* Sanity checks - do not allow an architecture to pass
|
||||||
* in larger pfns than the maximum scope of sparsemem:
|
* in larger pfns than the maximum scope of sparsemem:
|
||||||
*/
|
*/
|
||||||
if (start >= max_arch_pfn)
|
if (*start_pfn > max_sparsemem_pfn) {
|
||||||
return;
|
mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
|
||||||
if (end >= max_arch_pfn)
|
"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
|
||||||
end = max_arch_pfn;
|
*start_pfn, *end_pfn, max_sparsemem_pfn);
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
*start_pfn = max_sparsemem_pfn;
|
||||||
|
*end_pfn = max_sparsemem_pfn;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*end_pfn > max_sparsemem_pfn) {
|
||||||
|
mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
|
||||||
|
"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
|
||||||
|
*start_pfn, *end_pfn, max_sparsemem_pfn);
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
*end_pfn = max_sparsemem_pfn;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Record a memory area against a node. */
|
||||||
|
void __init memory_present(int nid, unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
start &= PAGE_SECTION_MASK;
|
start &= PAGE_SECTION_MASK;
|
||||||
|
mminit_validate_memmodel_limits(&start, &end);
|
||||||
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
|
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
|
||||||
unsigned long section = pfn_to_section_nr(pfn);
|
unsigned long section = pfn_to_section_nr(pfn);
|
||||||
struct mem_section *ms;
|
struct mem_section *ms;
|
||||||
|
@ -187,6 +207,7 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
unsigned long nr_pages = 0;
|
unsigned long nr_pages = 0;
|
||||||
|
|
||||||
|
mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
|
||||||
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
||||||
if (nid != early_pfn_to_nid(pfn))
|
if (nid != early_pfn_to_nid(pfn))
|
||||||
continue;
|
continue;
|
||||||
|
|
Loading…
Reference in a new issue