x86, cpa: remove cpa pool code

Interrupt context no longer splits large page in cpa(). So we can do away
with cpa memory pool code.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: arjan@linux.intel.com
Cc: venkatesh.pallipadi@intel.com
Cc: jeremy@goop.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Suresh Siddha 2008-09-23 14:00:41 -07:00 committed by Ingo Molnar
parent 55121b4369
commit 8311eb84bf
4 changed files with 5 additions and 157 deletions

View file

@ -1051,7 +1051,6 @@ void __init mem_init(void)
if (boot_cpu_data.wp_works_ok < 0)
test_wp_bit();
cpa_init();
save_pg_dir();
zap_low_mappings();
}

View file

@ -889,8 +889,6 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10);
cpa_init();
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)

View file

@ -447,114 +447,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
return do_split;
}
static LIST_HEAD(page_pool);
static unsigned long pool_size, pool_pages, pool_low;
static unsigned long pool_used, pool_failed;
static void cpa_fill_pool(struct page **ret)
{
gfp_t gfp = GFP_KERNEL;
unsigned long flags;
struct page *p;
/*
* Avoid recursion (on debug-pagealloc) and also signal
* our priority to get to these pagetables:
*/
if (current->flags & PF_MEMALLOC)
return;
current->flags |= PF_MEMALLOC;
/*
* Allocate atomically from atomic contexts:
*/
if (in_atomic() || irqs_disabled() || debug_pagealloc)
gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
while (pool_pages < pool_size || (ret && !*ret)) {
p = alloc_pages(gfp, 0);
if (!p) {
pool_failed++;
break;
}
/*
* If the call site needs a page right now, provide it:
*/
if (ret && !*ret) {
*ret = p;
continue;
}
spin_lock_irqsave(&pgd_lock, flags);
list_add(&p->lru, &page_pool);
pool_pages++;
spin_unlock_irqrestore(&pgd_lock, flags);
}
current->flags &= ~PF_MEMALLOC;
}
#define SHIFT_MB (20 - PAGE_SHIFT)
#define ROUND_MB_GB ((1 << 10) - 1)
#define SHIFT_MB_GB 10
#define POOL_PAGES_PER_GB 16
void __init cpa_init(void)
{
struct sysinfo si;
unsigned long gb;
si_meminfo(&si);
/*
* Calculate the number of pool pages:
*
* Convert totalram (nr of pages) to MiB and round to the next
* GiB. Shift MiB to Gib and multiply the result by
* POOL_PAGES_PER_GB:
*/
if (debug_pagealloc) {
gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
pool_size = POOL_PAGES_PER_GB * gb;
} else {
pool_size = 1;
}
pool_low = pool_size;
cpa_fill_pool(NULL);
printk(KERN_DEBUG
"CPA: page pool initialized %lu of %lu pages preallocated\n",
pool_pages, pool_size);
}
static int split_large_page(pte_t *kpte, unsigned long address)
{
unsigned long flags, pfn, pfninc = 1;
unsigned int i, level;
pte_t *pbase, *tmp;
pgprot_t ref_prot;
struct page *base;
struct page *base = alloc_pages(GFP_KERNEL, 0);
if (!base)
return -ENOMEM;
/*
* Get a page from the pool. The pool list is protected by the
* pgd_lock, which we have to take anyway for the split
* operation:
*/
spin_lock_irqsave(&pgd_lock, flags);
if (list_empty(&page_pool)) {
spin_unlock_irqrestore(&pgd_lock, flags);
base = NULL;
cpa_fill_pool(&base);
if (!base)
return -ENOMEM;
spin_lock_irqsave(&pgd_lock, flags);
} else {
base = list_first_entry(&page_pool, struct page, lru);
list_del(&base->lru);
pool_pages--;
if (pool_pages < pool_low)
pool_low = pool_pages;
}
/*
* Check for races, another CPU might have split this page
* up for us already:
@ -611,11 +514,8 @@ static int split_large_page(pte_t *kpte, unsigned long address)
* If we dropped out via the lookup_address check under
* pgd_lock then stick the page back into the pool:
*/
if (base) {
list_add(&base->lru, &page_pool);
pool_pages++;
} else
pool_used++;
if (base)
__free_page(base);
spin_unlock_irqrestore(&pgd_lock, flags);
return 0;
@ -899,8 +799,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
cpa_flush_all(cache);
out:
cpa_fill_pool(NULL);
return ret;
}
@ -1178,53 +1076,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
/*
* Try to refill the page pool here. We can do this only after
* the tlb flush.
*/
cpa_fill_pool(NULL);
}
#ifdef CONFIG_DEBUG_FS
static int dpa_show(struct seq_file *m, void *v)
{
seq_puts(m, "DEBUG_PAGEALLOC\n");
seq_printf(m, "pool_size : %lu\n", pool_size);
seq_printf(m, "pool_pages : %lu\n", pool_pages);
seq_printf(m, "pool_low : %lu\n", pool_low);
seq_printf(m, "pool_used : %lu\n", pool_used);
seq_printf(m, "pool_failed : %lu\n", pool_failed);
return 0;
}
static int dpa_open(struct inode *inode, struct file *filp)
{
return single_open(filp, dpa_show, NULL);
}
static const struct file_operations dpa_fops = {
.open = dpa_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init debug_pagealloc_proc_init(void)
{
struct dentry *de;
de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL,
&dpa_fops);
if (!de)
return -ENOMEM;
return 0;
}
__initcall(debug_pagealloc_proc_init);
#endif
#ifdef CONFIG_HIBERNATION
bool kernel_page_present(struct page *page)

View file

@ -99,8 +99,6 @@ int set_pages_rw(struct page *page, int numpages);
void clflush_cache_range(void *addr, unsigned int size);
void cpa_init(void);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;