SLUB: Replace __builtin_return_address(0) with _RET_IP_.

This patch replaces __builtin_return_address(0) with _RET_IP_, since a
previous patch moved _RET_IP_ and _THIS_IP_ to include/linux/kernel.h and
they're widely available now. This makes for shorter and easier to read
code.

[penberg@cs.helsinki.fi: remove _RET_IP_ casts to void pointer]
Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
Eduard - Gabriel Munteanu 2008-08-19 20:43:25 +03:00 committed by Pekka Enberg
parent ed313489ba
commit 35995a4d81
3 changed files with 32 additions and 32 deletions

View file

@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
__kmalloc_track_caller(size, flags, _RET_IP_)
#else
#define kmalloc_track_caller(size, flags) \
__kmalloc(size, flags)
@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
__builtin_return_address(0))
_RET_IP_)
#else
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node(size, flags, node)

View file

@ -3686,9 +3686,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, void *caller)
int node, unsigned long caller)
{
return __do_kmalloc_node(size, flags, node, caller);
return __do_kmalloc_node(size, flags, node, (void *)caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
@ -3730,9 +3730,9 @@ void *__kmalloc(size_t size, gfp_t flags)
}
EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{
return __do_kmalloc(size, flags, caller);
return __do_kmalloc(size, flags, (void *)caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);

View file

@ -178,7 +178,7 @@ static LIST_HEAD(slab_caches);
* Tracking user of a slab.
*/
struct track {
void *addr; /* Called from address */
unsigned long addr; /* Called from address */
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
@ -367,7 +367,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
}
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, void *addr)
enum track_item alloc, unsigned long addr)
{
struct track *p;
@ -391,8 +391,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
if (!(s->flags & SLAB_STORE_USER))
return;
set_track(s, object, TRACK_FREE, NULL);
set_track(s, object, TRACK_ALLOC, NULL);
set_track(s, object, TRACK_FREE, 0UL);
set_track(s, object, TRACK_ALLOC, 0UL);
}
static void print_track(const char *s, struct track *t)
@ -401,7 +401,7 @@ static void print_track(const char *s, struct track *t)
return;
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
s, t->addr, jiffies - t->when, t->cpu, t->pid);
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
}
static void print_tracking(struct kmem_cache *s, void *object)
@ -866,7 +866,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
}
static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
void *object, void *addr)
void *object, unsigned long addr)
{
if (!check_slab(s, page))
goto bad;
@ -906,7 +906,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
}
static int free_debug_processing(struct kmem_cache *s, struct page *page,
void *object, void *addr)
void *object, unsigned long addr)
{
if (!check_slab(s, page))
goto fail;
@ -1029,10 +1029,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, void *addr) { return 0; }
struct page *page, void *object, unsigned long addr) { return 0; }
static inline int free_debug_processing(struct kmem_cache *s,
struct page *page, void *object, void *addr) { return 0; }
struct page *page, void *object, unsigned long addr) { return 0; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
@ -1499,8 +1499,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*/
static void *__slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
void **object;
struct page *new;
@ -1584,7 +1584,7 @@ static void *__slab_alloc(struct kmem_cache *s,
* Otherwise we can simply pick the next object from the lockless free list.
*/
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr)
gfp_t gfpflags, int node, unsigned long addr)
{
void **object;
struct kmem_cache_cpu *c;
@ -1613,14 +1613,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
return slab_alloc(s, gfpflags, -1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
return slab_alloc(s, gfpflags, node, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
@ -1634,7 +1634,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
void *x, void *addr, unsigned int offset)
void *x, unsigned long addr, unsigned int offset)
{
void *prior;
void **object = (void *)x;
@ -1704,7 +1704,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* with all sorts of special processing.
*/
static __always_inline void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr)
struct page *page, void *x, unsigned long addr)
{
void **object = (void *)x;
struct kmem_cache_cpu *c;
@ -1731,7 +1731,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x);
slab_free(s, page, x, __builtin_return_address(0));
slab_free(s, page, x, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
@ -2659,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, -1, __builtin_return_address(0));
return slab_alloc(s, flags, -1, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);
@ -2687,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, node, __builtin_return_address(0));
return slab_alloc(s, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@ -2744,7 +2744,7 @@ void kfree(const void *x)
put_page(page);
return;
}
slab_free(page->slab, page, object, __builtin_return_address(0));
slab_free(page->slab, page, object, _RET_IP_);
}
EXPORT_SYMBOL(kfree);
@ -3200,7 +3200,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
#endif
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
@ -3216,7 +3216,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, void *caller)
int node, unsigned long caller)
{
struct kmem_cache *s;
@ -3427,7 +3427,7 @@ static void resiliency_test(void) {};
struct location {
unsigned long count;
void *addr;
unsigned long addr;
long long sum_time;
long min_time;
long max_time;
@ -3475,7 +3475,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
{
long start, end, pos;
struct location *l;
void *caddr;
unsigned long caddr;
unsigned long age = jiffies - track->when;
start = -1;