[PATCH] vmalloc: optimization, cleanup, bugfixes

- reorder 'struct vm_struct' to speedup lookups on CPUS with small cache
  lines.  The fields 'next,addr,size' should be now in the same cache line,
  to speedup lookups.

- One minor cleanup in __get_vm_area_node()

- Bugfixes in vmalloc_user() and vmalloc_32_user() NULL returns from
  __vmalloc() and __find_vm_area() were not tested.

[akpm@osdl.org: remove redundant BUG_ONs]
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Eric Dumazet 2006-11-10 12:27:48 -08:00 committed by Linus Torvalds
parent 088406bcf6
commit 2b4ac44e7c
2 changed files with 15 additions and 14 deletions

View file

@ -23,13 +23,14 @@ struct vm_area_struct;
#endif #endif
struct vm_struct { struct vm_struct {
/* keep next,addr,size together to speedup lookups */
struct vm_struct *next;
void *addr; void *addr;
unsigned long size; unsigned long size;
unsigned long flags; unsigned long flags;
struct page **pages; struct page **pages;
unsigned int nr_pages; unsigned int nr_pages;
unsigned long phys_addr; unsigned long phys_addr;
struct vm_struct *next;
}; };
/* /*

View file

@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
if (unlikely(!area)) if (unlikely(!area))
return NULL; return NULL;
if (unlikely(!size)) { if (unlikely(!size))
kfree (area);
return NULL; return NULL;
}
/* /*
* We always allocate a guard page. * We always allocate a guard page.
@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size)
void *ret; void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
write_lock(&vmlist_lock); if (ret) {
area = __find_vm_area(ret); write_lock(&vmlist_lock);
area->flags |= VM_USERMAP; area = __find_vm_area(ret);
write_unlock(&vmlist_lock); area->flags |= VM_USERMAP;
write_unlock(&vmlist_lock);
}
return ret; return ret;
} }
EXPORT_SYMBOL(vmalloc_user); EXPORT_SYMBOL(vmalloc_user);
@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size)
void *ret; void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
write_lock(&vmlist_lock); if (ret) {
area = __find_vm_area(ret); write_lock(&vmlist_lock);
area->flags |= VM_USERMAP; area = __find_vm_area(ret);
write_unlock(&vmlist_lock); area->flags |= VM_USERMAP;
write_unlock(&vmlist_lock);
}
return ret; return ret;
} }
EXPORT_SYMBOL(vmalloc_32_user); EXPORT_SYMBOL(vmalloc_32_user);