[PATCH] vmalloc_node

This patch adds

vmalloc_node(size, node)	-> Allocate necessary memory on the specified node

and

get_vm_area_node(size, flags, node)

and the other functions that it depends on.

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2005-10-29 18:15:41 -07:00 committed by Linus Torvalds
parent be15cd72d2
commit 930fc45a49
2 changed files with 64 additions and 17 deletions

View file

@ -32,10 +32,14 @@ struct vm_struct {
* Highlevel APIs for driver use * Highlevel APIs for driver use
*/ */
extern void *vmalloc(unsigned long size); extern void *vmalloc(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot);
extern void *__vmalloc_node(unsigned long size, gfp_t gfp_mask,
pgprot_t prot, int node);
extern void vfree(void *addr); extern void vfree(void *addr);
extern void *vmap(struct page **pages, unsigned int count, extern void *vmap(struct page **pages, unsigned int count,
@ -48,6 +52,8 @@ extern void vunmap(void *addr);
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern struct vm_struct *get_vm_area_node(unsigned long size,
unsigned long flags, int node);
extern struct vm_struct *remove_vm_area(void *addr); extern struct vm_struct *remove_vm_area(void *addr);
extern struct vm_struct *__remove_vm_area(void *addr); extern struct vm_struct *__remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,

View file

@ -5,6 +5,7 @@
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
* Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
* Numa awareness, Christoph Lameter, SGI, June 2005
*/ */
#include <linux/mm.h> #include <linux/mm.h>
@ -158,8 +159,8 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
return err; return err;
} }
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end) unsigned long start, unsigned long end, int node)
{ {
struct vm_struct **p, *tmp, *area; struct vm_struct **p, *tmp, *area;
unsigned long align = 1; unsigned long align = 1;
@ -178,7 +179,7 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
addr = ALIGN(start, align); addr = ALIGN(start, align);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
area = kmalloc(sizeof(*area), GFP_KERNEL); area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
if (unlikely(!area)) if (unlikely(!area))
return NULL; return NULL;
@ -231,6 +232,12 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
return NULL; return NULL;
} }
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
return __get_vm_area_node(size, flags, start, end, -1);
}
/** /**
* get_vm_area - reserve a contingous kernel virtual area * get_vm_area - reserve a contingous kernel virtual area
* *
@ -246,6 +253,11 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
} }
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
{
return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
}
/* Caller must hold vmlist_lock */ /* Caller must hold vmlist_lock */
struct vm_struct *__remove_vm_area(void *addr) struct vm_struct *__remove_vm_area(void *addr)
{ {
@ -342,7 +354,6 @@ void vfree(void *addr)
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
__vunmap(addr, 1); __vunmap(addr, 1);
} }
EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vfree);
/** /**
@ -360,7 +371,6 @@ void vunmap(void *addr)
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
__vunmap(addr, 0); __vunmap(addr, 0);
} }
EXPORT_SYMBOL(vunmap); EXPORT_SYMBOL(vunmap);
/** /**
@ -392,10 +402,10 @@ void *vmap(struct page **pages, unsigned int count,
return area->addr; return area->addr;
} }
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{ {
struct page **pages; struct page **pages;
unsigned int nr_pages, array_size, i; unsigned int nr_pages, array_size, i;
@ -406,9 +416,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
area->nr_pages = nr_pages; area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */ /* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) if (array_size > PAGE_SIZE)
pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
else else
pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
area->pages = pages; area->pages = pages;
if (!area->pages) { if (!area->pages) {
remove_vm_area(area->addr); remove_vm_area(area->addr);
@ -418,7 +428,10 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
memset(area->pages, 0, array_size); memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) { for (i = 0; i < area->nr_pages; i++) {
area->pages[i] = alloc_page(gfp_mask); if (node < 0)
area->pages[i] = alloc_page(gfp_mask);
else
area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
if (unlikely(!area->pages[i])) { if (unlikely(!area->pages[i])) {
/* Successfully allocated i pages, free them in __vunmap() */ /* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i; area->nr_pages = i;
@ -435,18 +448,25 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
return NULL; return NULL;
} }
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
return __vmalloc_area_node(area, gfp_mask, prot, -1);
}
/** /**
* __vmalloc - allocate virtually contiguous memory * __vmalloc_node - allocate virtually contiguous memory
* *
* @size: allocation size * @size: allocation size
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages * @prot: protection mask for the allocated pages
* @node node to use for allocation or -1
* *
* Allocate enough pages to cover @size from the page level * Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous * allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot. * kernel virtual space, using a pagetable protection of @prot.
*/ */
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
int node)
{ {
struct vm_struct *area; struct vm_struct *area;
@ -454,13 +474,18 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
if (!size || (size >> PAGE_SHIFT) > num_physpages) if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL; return NULL;
area = get_vm_area(size, VM_ALLOC); area = get_vm_area_node(size, VM_ALLOC, node);
if (!area) if (!area)
return NULL; return NULL;
return __vmalloc_area(area, gfp_mask, prot); return __vmalloc_area_node(area, gfp_mask, prot, node);
} }
EXPORT_SYMBOL(__vmalloc_node);
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
return __vmalloc_node(size, gfp_mask, prot, -1);
}
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
/** /**
@ -478,9 +503,26 @@ void *vmalloc(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
} }
EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vmalloc);
/**
* vmalloc_node - allocate memory on a specific node
*
* @size: allocation size
* @node; numa node
*
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
*
* For tight cotrol over page level allocator and protection flags
* use __vmalloc() instead.
*/
void *vmalloc_node(unsigned long size, int node)
{
return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
}
EXPORT_SYMBOL(vmalloc_node);
#ifndef PAGE_KERNEL_EXEC #ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL # define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif #endif
@ -515,7 +557,6 @@ void *vmalloc_32(unsigned long size)
{ {
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
} }
EXPORT_SYMBOL(vmalloc_32); EXPORT_SYMBOL(vmalloc_32);
long vread(char *buf, char *addr, unsigned long count) long vread(char *buf, char *addr, unsigned long count)