[PATCH] sparsemem extreme implementation
With cleanups from Dave Hansen <haveblue@us.ibm.com> SPARSEMEM_EXTREME makes mem_section a one dimensional array of pointers to mem_sections. This two level layout scheme is able to achieve smaller memory requirements for SPARSEMEM with the tradeoff of an additional shift and load when fetching the memory section. The current SPARSEMEM implementation is a one dimensional array of mem_sections which is the default SPARSEMEM configuration. The patch attempts isolates the implementation details of the physical layout of the sparsemem section array. SPARSEMEM_EXTREME requires bootmem to be functioning at the time of memory_present() calls. This is not always feasible, so architectures which do not need it may allocate everything statically by using SPARSEMEM_STATIC. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
802f192e4a
commit
3e347261a8
4 changed files with 50 additions and 38 deletions
|
@ -754,6 +754,7 @@ config NUMA
|
||||||
depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
|
depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
|
||||||
default n if X86_PC
|
default n if X86_PC
|
||||||
default y if (X86_NUMAQ || X86_SUMMIT)
|
default y if (X86_NUMAQ || X86_SUMMIT)
|
||||||
|
select SPARSEMEM_STATIC
|
||||||
|
|
||||||
# Need comments to help the hapless user trying to turn on NUMA support
|
# Need comments to help the hapless user trying to turn on NUMA support
|
||||||
comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
|
comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
|
||||||
|
|
|
@ -487,39 +487,29 @@ struct mem_section {
|
||||||
unsigned long section_mem_map;
|
unsigned long section_mem_map;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME
|
#ifdef CONFIG_SPARSEMEM_EXTREME
|
||||||
/*
|
#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
|
||||||
* Should we ever require GCC 4 or later then the flat array scheme
|
|
||||||
* can be eliminated and a uniform solution for EXTREME and !EXTREME can
|
|
||||||
* be arrived at.
|
|
||||||
*/
|
|
||||||
#define SECTION_ROOT_SHIFT (PAGE_SHIFT-3)
|
|
||||||
#define SECTION_ROOT_MASK ((1UL<<SECTION_ROOT_SHIFT) - 1)
|
|
||||||
#define SECTION_TO_ROOT(_sec) ((_sec) >> SECTION_ROOT_SHIFT)
|
|
||||||
#define NR_SECTION_ROOTS (NR_MEM_SECTIONS >> SECTION_ROOT_SHIFT)
|
|
||||||
|
|
||||||
extern struct mem_section *mem_section[NR_SECTION_ROOTS];
|
|
||||||
|
|
||||||
static inline struct mem_section *__nr_to_section(unsigned long nr)
|
|
||||||
{
|
|
||||||
if (!mem_section[SECTION_TO_ROOT(nr)])
|
|
||||||
return NULL;
|
|
||||||
return &mem_section[SECTION_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
#define SECTIONS_PER_ROOT 1
|
||||||
|
#endif
|
||||||
|
|
||||||
extern struct mem_section mem_section[NR_MEM_SECTIONS];
|
#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
|
||||||
|
#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
|
||||||
|
#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPARSEMEM_EXTREME
|
||||||
|
extern struct mem_section *mem_section[NR_SECTION_ROOTS];
|
||||||
|
#else
|
||||||
|
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline struct mem_section *__nr_to_section(unsigned long nr)
|
static inline struct mem_section *__nr_to_section(unsigned long nr)
|
||||||
{
|
{
|
||||||
return &mem_section[nr];
|
if (!mem_section[SECTION_NR_TO_ROOT(nr)])
|
||||||
|
return NULL;
|
||||||
|
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
|
||||||
}
|
}
|
||||||
|
|
||||||
#define sparse_index_init(_sec, _nid) do {} while (0)
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use the lower bits of the mem_map pointer to store
|
* We use the lower bits of the mem_map pointer to store
|
||||||
* a little bit of information. There should be at least
|
* a little bit of information. There should be at least
|
||||||
|
|
19
mm/Kconfig
19
mm/Kconfig
|
@ -90,11 +90,24 @@ config HAVE_MEMORY_PRESENT
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
|
depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
|
||||||
|
|
||||||
|
#
|
||||||
|
# SPARSEMEM_EXTREME (which is the default) does some bootmem
|
||||||
|
# allocations when memory_present() is called. If this can not
|
||||||
|
# be done on your architecture, select this option. However,
|
||||||
|
# statically allocating the mem_section[] array can potentially
|
||||||
|
# consume vast quantities of .bss, so be careful.
|
||||||
|
#
|
||||||
|
# This option will also potentially produce smaller runtime code
|
||||||
|
# with gcc 3.4 and later.
|
||||||
|
#
|
||||||
|
config SPARSEMEM_STATIC
|
||||||
|
def_bool n
|
||||||
|
|
||||||
#
|
#
|
||||||
# Architectecture platforms which require a two level mem_section in SPARSEMEM
|
# Architectecture platforms which require a two level mem_section in SPARSEMEM
|
||||||
# must select this option. This is usually for architecture platforms with
|
# must select this option. This is usually for architecture platforms with
|
||||||
# an extremely sparse physical address space.
|
# an extremely sparse physical address space.
|
||||||
#
|
#
|
||||||
config ARCH_SPARSEMEM_EXTREME
|
config SPARSEMEM_EXTREME
|
||||||
def_bool n
|
def_bool y
|
||||||
depends on SPARSEMEM && 64BIT
|
depends on SPARSEMEM && !SPARSEMEM_STATIC
|
||||||
|
|
26
mm/sparse.c
26
mm/sparse.c
|
@ -13,28 +13,36 @@
|
||||||
*
|
*
|
||||||
* 1) mem_section - memory sections, mem_map's for valid memory
|
* 1) mem_section - memory sections, mem_map's for valid memory
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME
|
#ifdef CONFIG_SPARSEMEM_EXTREME
|
||||||
struct mem_section *mem_section[NR_SECTION_ROOTS]
|
struct mem_section *mem_section[NR_SECTION_ROOTS]
|
||||||
____cacheline_maxaligned_in_smp;
|
____cacheline_maxaligned_in_smp;
|
||||||
|
#else
|
||||||
|
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
|
||||||
|
____cacheline_maxaligned_in_smp;
|
||||||
|
#endif
|
||||||
|
EXPORT_SYMBOL(mem_section);
|
||||||
|
|
||||||
|
static void sparse_alloc_root(unsigned long root, int nid)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_SPARSEMEM_EXTREME
|
||||||
|
mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static void sparse_index_init(unsigned long section, int nid)
|
static void sparse_index_init(unsigned long section, int nid)
|
||||||
{
|
{
|
||||||
unsigned long root = SECTION_TO_ROOT(section);
|
unsigned long root = SECTION_NR_TO_ROOT(section);
|
||||||
|
|
||||||
if (mem_section[root])
|
if (mem_section[root])
|
||||||
return;
|
return;
|
||||||
mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE);
|
|
||||||
|
sparse_alloc_root(root, nid);
|
||||||
|
|
||||||
if (mem_section[root])
|
if (mem_section[root])
|
||||||
memset(mem_section[root], 0, PAGE_SIZE);
|
memset(mem_section[root], 0, PAGE_SIZE);
|
||||||
else
|
else
|
||||||
panic("memory_present: NO MEMORY\n");
|
panic("memory_present: NO MEMORY\n");
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
struct mem_section mem_section[NR_MEM_SECTIONS]
|
|
||||||
____cacheline_maxaligned_in_smp;
|
|
||||||
#endif
|
|
||||||
EXPORT_SYMBOL(mem_section);
|
|
||||||
|
|
||||||
/* Record a memory area against a node. */
|
/* Record a memory area against a node. */
|
||||||
void memory_present(int nid, unsigned long start, unsigned long end)
|
void memory_present(int nid, unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in a new issue