kmemtrace: SLOB hooks.
This adds hooks for the SLOB allocator, to allow tracing with kmemtrace. We also convert some inline functions to __always_inline to make sure _RET_IP_, which expands to __builtin_return_address(0), always works as expected. Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
36555751c6
commit
3eae2cb24a
2 changed files with 37 additions and 11 deletions
|
@ -3,14 +3,15 @@
|
||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||||
|
|
||||||
static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
|
||||||
|
gfp_t flags)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc_node(cachep, flags, -1);
|
return kmem_cache_alloc_node(cachep, flags, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||||
|
|
||||||
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
return __kmalloc_node(size, flags, node);
|
return __kmalloc_node(size, flags, node);
|
||||||
}
|
}
|
||||||
|
@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||||
* kmalloc is the normal method of allocating memory
|
* kmalloc is the normal method of allocating memory
|
||||||
* in the kernel.
|
* in the kernel.
|
||||||
*/
|
*/
|
||||||
static inline void *kmalloc(size_t size, gfp_t flags)
|
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
return __kmalloc_node(size, flags, -1);
|
return __kmalloc_node(size, flags, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *__kmalloc(size_t size, gfp_t flags)
|
static __always_inline void *__kmalloc(size_t size, gfp_t flags)
|
||||||
{
|
{
|
||||||
return kmalloc(size, flags);
|
return kmalloc(size, flags);
|
||||||
}
|
}
|
||||||
|
|
39
mm/slob.c
39
mm/slob.c
|
@ -65,6 +65,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/kmemtrace.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
||||||
{
|
{
|
||||||
unsigned int *m;
|
unsigned int *m;
|
||||||
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
||||||
|
void *ret;
|
||||||
|
|
||||||
if (size < PAGE_SIZE - align) {
|
if (size < PAGE_SIZE - align) {
|
||||||
if (!size)
|
if (!size)
|
||||||
return ZERO_SIZE_PTR;
|
return ZERO_SIZE_PTR;
|
||||||
|
|
||||||
m = slob_alloc(size + align, gfp, align, node);
|
m = slob_alloc(size + align, gfp, align, node);
|
||||||
|
|
||||||
if (!m)
|
if (!m)
|
||||||
return NULL;
|
return NULL;
|
||||||
*m = size;
|
*m = size;
|
||||||
return (void *)m + align;
|
ret = (void *)m + align;
|
||||||
} else {
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
|
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||||
|
_RET_IP_, ret,
|
||||||
|
size, size + align, gfp, node);
|
||||||
|
} else {
|
||||||
|
unsigned int order = get_order(size);
|
||||||
|
|
||||||
|
ret = slob_new_page(gfp | __GFP_COMP, order, node);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
page = virt_to_page(ret);
|
page = virt_to_page(ret);
|
||||||
page->private = size;
|
page->private = size;
|
||||||
}
|
}
|
||||||
return ret;
|
|
||||||
|
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||||
|
_RET_IP_, ret,
|
||||||
|
size, PAGE_SIZE << order, gfp, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__kmalloc_node);
|
EXPORT_SYMBOL(__kmalloc_node);
|
||||||
|
|
||||||
|
@ -501,6 +513,8 @@ void kfree(const void *block)
|
||||||
slob_free(m, *m + align);
|
slob_free(m, *m + align);
|
||||||
} else
|
} else
|
||||||
put_page(&sp->page);
|
put_page(&sp->page);
|
||||||
|
|
||||||
|
kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kfree);
|
EXPORT_SYMBOL(kfree);
|
||||||
|
|
||||||
|
@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
void *b;
|
void *b;
|
||||||
|
|
||||||
if (c->size < PAGE_SIZE)
|
if (c->size < PAGE_SIZE) {
|
||||||
b = slob_alloc(c->size, flags, c->align, node);
|
b = slob_alloc(c->size, flags, c->align, node);
|
||||||
else
|
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
|
||||||
|
_RET_IP_, b, c->size,
|
||||||
|
SLOB_UNITS(c->size) * SLOB_UNIT,
|
||||||
|
flags, node);
|
||||||
|
} else {
|
||||||
b = slob_new_page(flags, get_order(c->size), node);
|
b = slob_new_page(flags, get_order(c->size), node);
|
||||||
|
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
|
||||||
|
_RET_IP_, b, c->size,
|
||||||
|
PAGE_SIZE << get_order(c->size),
|
||||||
|
flags, node);
|
||||||
|
}
|
||||||
|
|
||||||
if (c->ctor)
|
if (c->ctor)
|
||||||
c->ctor(b);
|
c->ctor(b);
|
||||||
|
@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
|
||||||
} else {
|
} else {
|
||||||
__kmem_cache_free(b, c->size);
|
__kmem_cache_free(b, c->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_free);
|
EXPORT_SYMBOL(kmem_cache_free);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue