Allocate and free vmalloc areas
Allocate/release a chunk of vmalloc address space: alloc_vm_area reserves a chunk of address space, and makes sure all the pagetables are constructed for that address range - but no pages. free_vm_area releases the address space range. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ian Pratt <ian.pratt@xensource.com> Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk> Signed-off-by: Chris Wright <chrisw@sous-sol.org> Cc: "Jan Beulich" <JBeulich@novell.com> Cc: "Andi Kleen" <ak@muc.de>
This commit is contained in:
parent
bdef40a6af
commit
5f4352fbff
2 changed files with 57 additions and 0 deletions
|
@ -70,6 +70,10 @@ extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
|||
struct page ***pages);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
|
||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
||||
extern struct vm_struct *alloc_vm_area(size_t size);
|
||||
extern void free_vm_area(struct vm_struct *area);
|
||||
|
||||
/*
|
||||
* Internals. Dont't use..
|
||||
*/
|
||||
|
|
53
mm/vmalloc.c
53
mm/vmalloc.c
|
@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range);
|
|||
void __attribute__((weak)) vmalloc_sync_all(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
|
||||
{
|
||||
/* apply_to_page_range() does all the hard work. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_vm_area - allocate a range of kernel address space
|
||||
* @size: size of the area
|
||||
* @returns: NULL on failure, vm_struct on success
|
||||
*
|
||||
* This function reserves a range of kernel address space, and
|
||||
* allocates pagetables to map that range. No actual mappings
|
||||
* are created. If the kernel address space is not shared
|
||||
* between processes, it syncs the pagetable across all
|
||||
* processes.
|
||||
*/
|
||||
struct vm_struct *alloc_vm_area(size_t size)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
area = get_vm_area(size, VM_IOREMAP);
|
||||
if (area == NULL)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This ensures that page tables are constructed for this region
|
||||
* of kernel virtual address space and mapped into init_mm.
|
||||
*/
|
||||
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
|
||||
area->size, f, NULL)) {
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Make sure the pagetables are constructed in process kernel
|
||||
mappings */
|
||||
vmalloc_sync_all();
|
||||
|
||||
return area;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alloc_vm_area);
|
||||
|
||||
void free_vm_area(struct vm_struct *area)
|
||||
{
|
||||
struct vm_struct *ret;
|
||||
ret = remove_vm_area(area->addr);
|
||||
BUG_ON(ret != area);
|
||||
kfree(area);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_vm_area);
|
||||
|
|
Loading…
Reference in a new issue