slub: kmalloc page allocator pass-through cleanup

This adds a proper function for kmalloc page allocator pass-through. While it
simplifies any code that does slab tracing code a lot, I think it's a
worthwhile cleanup in itself.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
This commit is contained in:
Pekka Enberg 2008-02-11 22:47:46 +02:00 committed by Christoph Lameter
parent e51bfd0ad1
commit eada35efcb
2 changed files with 12 additions and 10 deletions

View file

@ -188,12 +188,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags); void *__kmalloc(size_t size, gfp_t flags);
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
}
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
{ {
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE / 2) if (size > PAGE_SIZE / 2)
return (void *)__get_free_pages(flags | __GFP_COMP, return kmalloc_large(size, flags);
get_order(size));
if (!(flags & SLUB_DMA)) { if (!(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size); struct kmem_cache *s = kmalloc_slab(size);

View file

@ -2671,8 +2671,7 @@ void *__kmalloc(size_t size, gfp_t flags)
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2)) if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(flags | __GFP_COMP, return kmalloc_large(size, flags);
get_order(size));
s = get_slab(size, flags); s = get_slab(size, flags);
@ -2689,8 +2688,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2)) if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(flags | __GFP_COMP, return kmalloc_large(size, flags);
get_order(size));
s = get_slab(size, flags); s = get_slab(size, flags);
@ -3219,8 +3217,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2)) if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(gfpflags | __GFP_COMP, return kmalloc_large(size, gfpflags);
get_order(size));
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
@ -3235,8 +3233,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2)) if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(gfpflags | __GFP_COMP, return kmalloc_large(size, gfpflags);
get_order(size));
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))