a387e95a49
NUMA boot code assumes that physical node ids start at 0, but the DIMMs that the apic id represents may not be reachable. If this is the case, node 0 is never online and cpus never end up getting appropriately assigned to a node. This causes the cpumask of all online nodes to be empty and machines crash with kernel code assuming online nodes have valid cpus. The fix is to appropriately map all the address ranges for physical nodes and ensure the cpu to node mapping function checks all possible nodes (up to MAX_NUMNODES) instead of simply checking nodes 0-N, where N is the number of physical nodes, for valid address ranges. This requires no longer "compressing" the address ranges of nodes in the physical node map from 0-N, but rather leave indices in physnodes[] to represent the actual node id of the physical node. Accordingly, the topology exported by both amd_get_nodes() and acpi_get_nodes() no longer must return the number of nodes to iterate through; all such iterations will now be to MAX_NUMNODES. This change also passes the end address of system RAM (which may be different from normal operation if mem= is specified on the command line) before the physnodes[] array is populated. ACPI parsed nodes are truncated to fit within the address range that respect the mem= boundaries and even some physical nodes may become unreachable in such cases. When NUMA emulation does succeed, any apicid to node mapping that exists for unreachable nodes are given default values so that proximity domains can still be assigned. This is important for node_distance() to function as desired. Signed-off-by: David Rientjes <rientjes@google.com> LKML-Reference: <alpine.DEB.2.00.1012221702090.3701@chino.kir.corp.google.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
60 lines
1.2 KiB
C
60 lines
1.2 KiB
C
#ifndef _ASM_X86_AMD_NB_H
|
|
#define _ASM_X86_AMD_NB_H
|
|
|
|
#include <linux/pci.h>
|
|
|
|
extern struct pci_device_id amd_nb_misc_ids[];
|
|
struct bootnode;
|
|
|
|
extern int early_is_amd_nb(u32 value);
|
|
extern int amd_cache_northbridges(void);
|
|
extern void amd_flush_garts(void);
|
|
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
|
|
extern int amd_scan_nodes(void);
|
|
|
|
#ifdef CONFIG_NUMA_EMU
|
|
extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
|
|
extern void amd_get_nodes(struct bootnode *nodes);
|
|
#endif
|
|
|
|
struct amd_northbridge {
|
|
struct pci_dev *misc;
|
|
};
|
|
|
|
struct amd_northbridge_info {
|
|
u16 num;
|
|
u64 flags;
|
|
struct amd_northbridge *nb;
|
|
};
|
|
extern struct amd_northbridge_info amd_northbridges;
|
|
|
|
#define AMD_NB_GART 0x1
|
|
#define AMD_NB_L3_INDEX_DISABLE 0x2
|
|
|
|
#ifdef CONFIG_AMD_NB
|
|
|
|
static inline int amd_nb_num(void)
|
|
{
|
|
return amd_northbridges.num;
|
|
}
|
|
|
|
static inline int amd_nb_has_feature(int feature)
|
|
{
|
|
return ((amd_northbridges.flags & feature) == feature);
|
|
}
|
|
|
|
static inline struct amd_northbridge *node_to_amd_nb(int node)
|
|
{
|
|
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
|
|
}
|
|
|
|
#else
|
|
|
|
#define amd_nb_num(x) 0
|
|
#define amd_nb_has_feature(x) false
|
|
#define node_to_amd_nb(x) NULL
|
|
|
|
#endif
|
|
|
|
|
|
#endif /* _ASM_X86_AMD_NB_H */
|