c132937556
Impact: cleaner and consistent bootmem wrapping By setting CONFIG_HAVE_ARCH_BOOTMEM_NODE, archs can define arch-specific wrappers for bootmem allocation. However, this is done a bit strangely in that only the high level convenience macros can be changed while lower level, but still exported, interface functions can't be wrapped. This not only is messy but also leads to strange situation where alloc_bootmem() does what the arch wants it to do but the equivalent __alloc_bootmem() call doesn't although they should be able to be used interchangeably. This patch updates bootmem such that archs can override / wrap the backend function - alloc_bootmem_core() instead of the highlevel interface functions to allow simpler and consistent wrapping. Also, HAVE_ARCH_BOOTMEM_NODE is renamed to HAVE_ARCH_BOOTMEM. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Johannes Weiner <hannes@saeurebad.de>
105 lines
2.3 KiB
C
105 lines
2.3 KiB
C
/*
|
|
* Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
|
|
*
|
|
*/
|
|
|
|
#ifndef _ASM_X86_MMZONE_32_H
|
|
#define _ASM_X86_MMZONE_32_H
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern struct pglist_data *node_data[];
|
|
#define NODE_DATA(nid) (node_data[nid])
|
|
|
|
#include <asm/numaq.h>
|
|
/* summit or generic arch */
|
|
#include <asm/srat.h>
|
|
|
|
extern int get_memcfg_numa_flat(void);
|
|
/*
|
|
* This allows any one NUMA architecture to be compiled
|
|
* for, and still fall back to the flat function if it
|
|
* fails.
|
|
*/
|
|
static inline void get_memcfg_numa(void)
|
|
{
|
|
|
|
if (get_memcfg_numaq())
|
|
return;
|
|
if (get_memcfg_from_srat())
|
|
return;
|
|
get_memcfg_numa_flat();
|
|
}
|
|
|
|
extern int early_pfn_to_nid(unsigned long pfn);
|
|
|
|
extern void resume_map_numa_kva(pgd_t *pgd);
|
|
|
|
#else /* !CONFIG_NUMA */
|
|
|
|
#define get_memcfg_numa get_memcfg_numa_flat
|
|
|
|
static inline void resume_map_numa_kva(pgd_t *pgd) {}
|
|
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
#ifdef CONFIG_DISCONTIGMEM
|
|
|
|
/*
|
|
* generic node memory support, the following assumptions apply:
|
|
*
|
|
* 1) memory comes in 64Mb contigious chunks which are either present or not
|
|
* 2) we will not have more than 64Gb in total
|
|
*
|
|
* for now assume that 64Gb is max amount of RAM for whole system
|
|
* 64Gb / 4096bytes/page = 16777216 pages
|
|
*/
|
|
#define MAX_NR_PAGES 16777216
|
|
#define MAX_ELEMENTS 1024
|
|
#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
|
|
|
|
extern s8 physnode_map[];
|
|
|
|
static inline int pfn_to_nid(unsigned long pfn)
|
|
{
|
|
#ifdef CONFIG_NUMA
|
|
return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
|
|
#else
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Following are macros that each numa implmentation must define.
|
|
*/
|
|
|
|
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
|
|
#define node_end_pfn(nid) \
|
|
({ \
|
|
pg_data_t *__pgdat = NODE_DATA(nid); \
|
|
__pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
|
|
})
|
|
|
|
static inline int pfn_valid(int pfn)
|
|
{
|
|
int nid = pfn_to_nid(pfn);
|
|
|
|
if (nid >= 0)
|
|
return (pfn < node_end_pfn(nid));
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_DISCONTIGMEM */
|
|
|
|
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
|
/* always use node 0 for bootmem on this numa platform */
|
|
#define alloc_bootmem_core(__bdata, size, align, goal, limit) \
|
|
({ \
|
|
bootmem_data_t __maybe_unused * __abm_bdata_dummy = (__bdata); \
|
|
__alloc_bootmem_core(NODE_DATA(0)->bdata, \
|
|
(size), (align), (goal), (limit)); \
|
|
})
|
|
#endif /* CONFIG_NEED_MULTIPLE_NODES */
|
|
|
|
#endif /* _ASM_X86_MMZONE_32_H */
|