ia64: switch to NO_BOOTMEM
Since ia64 already uses memblock to register available physical memory it is only required to move the calls to register_active_ranges() that wrap memblock_add_node() earlier and replace bootmem memory reservations with memblock_reserve(). Of course, all the code that find the place to put the bootmem bitmap is removed. Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
fb63fbee42
commit
f62800992e
4 changed files with 22 additions and 135 deletions
|
@ -31,6 +31,7 @@ config IA64
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
select HAVE_MEMBLOCK_NODE_MAP
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
select NO_BOOTMEM
|
||||||
select HAVE_VIRT_CPU_ACCOUNTING
|
select HAVE_VIRT_CPU_ACCOUNTING
|
||||||
select ARCH_HAS_DMA_MARK_CLEAN
|
select ARCH_HAS_DMA_MARK_CLEAN
|
||||||
select ARCH_HAS_SG_CHAIN
|
select ARCH_HAS_SG_CHAIN
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/sched/mm.h>
|
#include <linux/sched/mm.h>
|
||||||
#include <linux/sched/clock.h>
|
#include <linux/sched/clock.h>
|
||||||
|
@ -383,8 +384,16 @@ reserve_memory (void)
|
||||||
|
|
||||||
sort_regions(rsvd_region, num_rsvd_regions);
|
sort_regions(rsvd_region, num_rsvd_regions);
|
||||||
num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
|
num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
|
||||||
}
|
|
||||||
|
|
||||||
|
/* reserve all regions except the end of memory marker with memblock */
|
||||||
|
for (n = 0; n < num_rsvd_regions - 1; n++) {
|
||||||
|
struct rsvd_region *region = &rsvd_region[n];
|
||||||
|
phys_addr_t addr = __pa(region->start);
|
||||||
|
phys_addr_t size = region->end - region->start;
|
||||||
|
|
||||||
|
memblock_reserve(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* find_initrd - get initrd parameters from the boot parameter structure
|
* find_initrd - get initrd parameters from the boot parameter structure
|
||||||
|
|
|
@ -34,53 +34,6 @@ static unsigned long max_gap;
|
||||||
/* physical address where the bootmem map is located */
|
/* physical address where the bootmem map is located */
|
||||||
unsigned long bootmap_start;
|
unsigned long bootmap_start;
|
||||||
|
|
||||||
/**
|
|
||||||
* find_bootmap_location - callback to find a memory area for the bootmap
|
|
||||||
* @start: start of region
|
|
||||||
* @end: end of region
|
|
||||||
* @arg: unused callback data
|
|
||||||
*
|
|
||||||
* Find a place to put the bootmap and return its starting address in
|
|
||||||
* bootmap_start. This address must be page-aligned.
|
|
||||||
*/
|
|
||||||
static int __init
|
|
||||||
find_bootmap_location (u64 start, u64 end, void *arg)
|
|
||||||
{
|
|
||||||
u64 needed = *(unsigned long *)arg;
|
|
||||||
u64 range_start, range_end, free_start;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
#if IGNORE_PFN0
|
|
||||||
if (start == PAGE_OFFSET) {
|
|
||||||
start += PAGE_SIZE;
|
|
||||||
if (start >= end)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
free_start = PAGE_OFFSET;
|
|
||||||
|
|
||||||
for (i = 0; i < num_rsvd_regions; i++) {
|
|
||||||
range_start = max(start, free_start);
|
|
||||||
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
|
|
||||||
|
|
||||||
free_start = PAGE_ALIGN(rsvd_region[i].end);
|
|
||||||
|
|
||||||
if (range_end <= range_start)
|
|
||||||
continue; /* skip over empty range */
|
|
||||||
|
|
||||||
if (range_end - range_start >= needed) {
|
|
||||||
bootmap_start = __pa(range_start);
|
|
||||||
return -1; /* done */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* nothing more available in this segment */
|
|
||||||
if (range_end == end)
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static void *cpu_data;
|
static void *cpu_data;
|
||||||
/**
|
/**
|
||||||
|
@ -196,8 +149,6 @@ setup_per_cpu_areas(void)
|
||||||
void __init
|
void __init
|
||||||
find_memory (void)
|
find_memory (void)
|
||||||
{
|
{
|
||||||
unsigned long bootmap_size;
|
|
||||||
|
|
||||||
reserve_memory();
|
reserve_memory();
|
||||||
|
|
||||||
/* first find highest page frame number */
|
/* first find highest page frame number */
|
||||||
|
@ -205,21 +156,12 @@ find_memory (void)
|
||||||
max_low_pfn = 0;
|
max_low_pfn = 0;
|
||||||
efi_memmap_walk(find_max_min_low_pfn, NULL);
|
efi_memmap_walk(find_max_min_low_pfn, NULL);
|
||||||
max_pfn = max_low_pfn;
|
max_pfn = max_low_pfn;
|
||||||
/* how many bytes to cover all the pages */
|
|
||||||
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
|
|
||||||
|
|
||||||
/* look for a location to hold the bootmap */
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
bootmap_start = ~0UL;
|
efi_memmap_walk(filter_memory, register_active_ranges);
|
||||||
efi_memmap_walk(find_bootmap_location, &bootmap_size);
|
#else
|
||||||
if (bootmap_start == ~0UL)
|
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
|
||||||
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
|
#endif
|
||||||
|
|
||||||
bootmap_size = init_bootmem_node(NODE_DATA(0),
|
|
||||||
(bootmap_start >> PAGE_SHIFT), 0, max_pfn);
|
|
||||||
|
|
||||||
/* Free all available memory, then mark bootmem-map as being in use. */
|
|
||||||
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
|
|
||||||
reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
|
|
||||||
|
|
||||||
find_initrd();
|
find_initrd();
|
||||||
|
|
||||||
|
@ -244,7 +186,6 @@ paging_init (void)
|
||||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
efi_memmap_walk(filter_memory, register_active_ranges);
|
|
||||||
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
|
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
|
||||||
if (max_gap < LARGE_GAP) {
|
if (max_gap < LARGE_GAP) {
|
||||||
vmem_map = (struct page *) 0;
|
vmem_map = (struct page *) 0;
|
||||||
|
@ -268,8 +209,6 @@ paging_init (void)
|
||||||
|
|
||||||
printk("Virtual mem_map starts at 0x%p\n", mem_map);
|
printk("Virtual mem_map starts at 0x%p\n", mem_map);
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_VIRTUAL_MEM_MAP */
|
|
||||||
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
|
|
||||||
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
|
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
|
||||||
free_area_init_nodes(max_zone_pfns);
|
free_area_init_nodes(max_zone_pfns);
|
||||||
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/acpi.h>
|
#include <linux/acpi.h>
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
|
@ -264,7 +265,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
|
||||||
{
|
{
|
||||||
void *cpu_data;
|
void *cpu_data;
|
||||||
int cpus = early_nr_cpus_node(node);
|
int cpus = early_nr_cpus_node(node);
|
||||||
struct bootmem_data *bdp = &bootmem_node_data[node];
|
|
||||||
|
|
||||||
mem_data[node].pernode_addr = pernode;
|
mem_data[node].pernode_addr = pernode;
|
||||||
mem_data[node].pernode_size = pernodesize;
|
mem_data[node].pernode_size = pernodesize;
|
||||||
|
@ -279,8 +279,6 @@ static void __init fill_pernode(int node, unsigned long pernode,
|
||||||
|
|
||||||
mem_data[node].node_data = __va(pernode);
|
mem_data[node].node_data = __va(pernode);
|
||||||
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
|
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
|
||||||
|
|
||||||
pgdat_list[node]->bdata = bdp;
|
|
||||||
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
|
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
|
||||||
|
|
||||||
cpu_data = per_cpu_node_setup(cpu_data, node);
|
cpu_data = per_cpu_node_setup(cpu_data, node);
|
||||||
|
@ -320,14 +318,11 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
|
||||||
int node)
|
int node)
|
||||||
{
|
{
|
||||||
unsigned long spfn, epfn;
|
unsigned long spfn, epfn;
|
||||||
unsigned long pernodesize = 0, pernode, pages, mapsize;
|
unsigned long pernodesize = 0, pernode;
|
||||||
|
|
||||||
spfn = start >> PAGE_SHIFT;
|
spfn = start >> PAGE_SHIFT;
|
||||||
epfn = (start + len) >> PAGE_SHIFT;
|
epfn = (start + len) >> PAGE_SHIFT;
|
||||||
|
|
||||||
pages = mem_data[node].max_pfn - mem_data[node].min_pfn;
|
|
||||||
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure this memory falls within this node's usable memory
|
* Make sure this memory falls within this node's usable memory
|
||||||
* since we may have thrown some away in build_maps().
|
* since we may have thrown some away in build_maps().
|
||||||
|
@ -347,31 +342,12 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
|
||||||
pernode = NODEDATA_ALIGN(start, node);
|
pernode = NODEDATA_ALIGN(start, node);
|
||||||
|
|
||||||
/* Is this range big enough for what we want to store here? */
|
/* Is this range big enough for what we want to store here? */
|
||||||
if (start + len > (pernode + pernodesize + mapsize))
|
if (start + len > (pernode + pernodesize))
|
||||||
fill_pernode(node, pernode, pernodesize);
|
fill_pernode(node, pernode, pernodesize);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* free_node_bootmem - free bootmem allocator memory for use
|
|
||||||
* @start: physical start of range
|
|
||||||
* @len: length of range
|
|
||||||
* @node: node where this range resides
|
|
||||||
*
|
|
||||||
* Simply calls the bootmem allocator to free the specified ranged from
|
|
||||||
* the given pg_data_t's bdata struct. After this function has been called
|
|
||||||
* for all the entries in the EFI memory map, the bootmem allocator will
|
|
||||||
* be ready to service allocation requests.
|
|
||||||
*/
|
|
||||||
static int __init free_node_bootmem(unsigned long start, unsigned long len,
|
|
||||||
int node)
|
|
||||||
{
|
|
||||||
free_bootmem_node(pgdat_list[node], start, len);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reserve_pernode_space - reserve memory for per-node space
|
* reserve_pernode_space - reserve memory for per-node space
|
||||||
*
|
*
|
||||||
|
@ -381,28 +357,17 @@ static int __init free_node_bootmem(unsigned long start, unsigned long len,
|
||||||
*/
|
*/
|
||||||
static void __init reserve_pernode_space(void)
|
static void __init reserve_pernode_space(void)
|
||||||
{
|
{
|
||||||
unsigned long base, size, pages;
|
unsigned long base, size;
|
||||||
struct bootmem_data *bdp;
|
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
for_each_online_node(node) {
|
for_each_online_node(node) {
|
||||||
pg_data_t *pdp = pgdat_list[node];
|
|
||||||
|
|
||||||
if (node_isset(node, memory_less_mask))
|
if (node_isset(node, memory_less_mask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
bdp = pdp->bdata;
|
|
||||||
|
|
||||||
/* First the bootmem_map itself */
|
|
||||||
pages = mem_data[node].max_pfn - mem_data[node].min_pfn;
|
|
||||||
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
|
|
||||||
base = __pa(bdp->node_bootmem_map);
|
|
||||||
reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
|
|
||||||
|
|
||||||
/* Now the per-node space */
|
/* Now the per-node space */
|
||||||
size = mem_data[node].pernode_size;
|
size = mem_data[node].pernode_size;
|
||||||
base = __pa(mem_data[node].pernode_addr);
|
base = __pa(mem_data[node].pernode_addr);
|
||||||
reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
|
memblock_reserve(base, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -522,6 +487,7 @@ void __init find_memory(void)
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
reserve_memory();
|
reserve_memory();
|
||||||
|
efi_memmap_walk(filter_memory, register_active_ranges);
|
||||||
|
|
||||||
if (num_online_nodes() == 0) {
|
if (num_online_nodes() == 0) {
|
||||||
printk(KERN_ERR "node info missing!\n");
|
printk(KERN_ERR "node info missing!\n");
|
||||||
|
@ -541,34 +507,6 @@ void __init find_memory(void)
|
||||||
if (mem_data[node].min_pfn)
|
if (mem_data[node].min_pfn)
|
||||||
node_clear(node, memory_less_mask);
|
node_clear(node, memory_less_mask);
|
||||||
|
|
||||||
efi_memmap_walk(filter_memory, register_active_ranges);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize the boot memory maps in reverse order since that's
|
|
||||||
* what the bootmem allocator expects
|
|
||||||
*/
|
|
||||||
for (node = MAX_NUMNODES - 1; node >= 0; node--) {
|
|
||||||
unsigned long pernode, pernodesize, map;
|
|
||||||
struct bootmem_data *bdp;
|
|
||||||
|
|
||||||
if (!node_online(node))
|
|
||||||
continue;
|
|
||||||
else if (node_isset(node, memory_less_mask))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
bdp = &bootmem_node_data[node];
|
|
||||||
pernode = mem_data[node].pernode_addr;
|
|
||||||
pernodesize = mem_data[node].pernode_size;
|
|
||||||
map = pernode + pernodesize;
|
|
||||||
|
|
||||||
init_bootmem_node(pgdat_list[node],
|
|
||||||
map>>PAGE_SHIFT,
|
|
||||||
mem_data[node].min_pfn,
|
|
||||||
mem_data[node].max_pfn);
|
|
||||||
}
|
|
||||||
|
|
||||||
efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
|
|
||||||
|
|
||||||
reserve_pernode_space();
|
reserve_pernode_space();
|
||||||
memory_less_nodes();
|
memory_less_nodes();
|
||||||
initialize_pernode_data();
|
initialize_pernode_data();
|
||||||
|
|
Loading…
Reference in a new issue