mm/sl[aou]b: Move kmem_cache allocations into common code
Shift the allocations to common code. That way the allocation and freeing of the kmem_cache structures is handled by common code. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
96d17b7be0
commit
278b1bb131
5 changed files with 52 additions and 70 deletions
34
mm/slab.c
34
mm/slab.c
|
@ -1676,7 +1676,8 @@ void __init kmem_cache_init(void)
|
|||
* bug.
|
||||
*/
|
||||
|
||||
sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
|
||||
sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name,
|
||||
sizes[INDEX_AC].cs_size,
|
||||
ARCH_KMALLOC_MINALIGN,
|
||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
||||
|
@ -1684,8 +1685,8 @@ void __init kmem_cache_init(void)
|
|||
|
||||
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
|
||||
if (INDEX_AC != INDEX_L3) {
|
||||
sizes[INDEX_L3].cs_cachep =
|
||||
__kmem_cache_create(names[INDEX_L3].name,
|
||||
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name,
|
||||
sizes[INDEX_L3].cs_size,
|
||||
ARCH_KMALLOC_MINALIGN,
|
||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
||||
|
@ -1704,7 +1705,8 @@ void __init kmem_cache_init(void)
|
|||
* allow tighter packing of the smaller caches.
|
||||
*/
|
||||
if (!sizes->cs_cachep) {
|
||||
sizes->cs_cachep = __kmem_cache_create(names->name,
|
||||
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||
__kmem_cache_create(sizes->cs_cachep, names->name,
|
||||
sizes->cs_size,
|
||||
ARCH_KMALLOC_MINALIGN,
|
||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
||||
|
@ -1712,7 +1714,8 @@ void __init kmem_cache_init(void)
|
|||
list_add(&sizes->cs_cachep->list, &slab_caches);
|
||||
}
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
sizes->cs_dmacachep = __kmem_cache_create(
|
||||
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||
__kmem_cache_create(sizes->cs_dmacachep,
|
||||
names->name_dma,
|
||||
sizes->cs_size,
|
||||
ARCH_KMALLOC_MINALIGN,
|
||||
|
@ -2356,13 +2359,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
|||
* cacheline. This can be beneficial if you're counting cycles as closely
|
||||
* as davem.
|
||||
*/
|
||||
struct kmem_cache *
|
||||
__kmem_cache_create (const char *name, size_t size, size_t align,
|
||||
int
|
||||
__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align,
|
||||
unsigned long flags, void (*ctor)(void *))
|
||||
{
|
||||
size_t left_over, slab_size, ralign;
|
||||
struct kmem_cache *cachep = NULL;
|
||||
gfp_t gfp;
|
||||
int err;
|
||||
|
||||
#if DEBUG
|
||||
#if FORCED_DEBUG
|
||||
|
@ -2450,11 +2453,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
else
|
||||
gfp = GFP_NOWAIT;
|
||||
|
||||
/* Get cache's description obj. */
|
||||
cachep = kmem_cache_zalloc(kmem_cache, gfp);
|
||||
if (!cachep)
|
||||
return NULL;
|
||||
|
||||
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
||||
cachep->object_size = size;
|
||||
cachep->align = align;
|
||||
|
@ -2509,8 +2507,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
if (!cachep->num) {
|
||||
printk(KERN_ERR
|
||||
"kmem_cache_create: couldn't create cache %s.\n", name);
|
||||
kmem_cache_free(kmem_cache, cachep);
|
||||
return NULL;
|
||||
return -E2BIG;
|
||||
}
|
||||
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
||||
+ sizeof(struct slab), align);
|
||||
|
@ -2567,9 +2564,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
cachep->name = name;
|
||||
cachep->refcount = 1;
|
||||
|
||||
if (setup_cpu_cache(cachep, gfp)) {
|
||||
err = setup_cpu_cache(cachep, gfp);
|
||||
if (err) {
|
||||
__kmem_cache_shutdown(cachep);
|
||||
return NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (flags & SLAB_DEBUG_OBJECTS) {
|
||||
|
@ -2582,7 +2580,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
|||
slab_set_debugobj_lock_classes(cachep);
|
||||
}
|
||||
|
||||
return cachep;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if DEBUG
|
||||
|
|
|
@ -33,8 +33,8 @@ extern struct list_head slab_caches;
|
|||
extern struct kmem_cache *kmem_cache;
|
||||
|
||||
/* Functions provided by the slab allocators */
|
||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
||||
size_t align, unsigned long flags, void (*ctor)(void *));
|
||||
extern int __kmem_cache_create(struct kmem_cache *, const char *name,
|
||||
size_t size, size_t align, unsigned long flags, void (*ctor)(void *));
|
||||
|
||||
#ifdef CONFIG_SLUB
|
||||
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
||||
|
|
|
@ -119,19 +119,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
|
|||
if (s)
|
||||
goto out_locked;
|
||||
|
||||
s = __kmem_cache_create(n, size, align, flags, ctor);
|
||||
|
||||
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
|
||||
if (s) {
|
||||
/*
|
||||
* Check if the slab has actually been created and if it was a
|
||||
* real instatiation. Aliases do not belong on the list
|
||||
*/
|
||||
if (s->refcount == 1)
|
||||
err = __kmem_cache_create(s, n, size, align, flags, ctor);
|
||||
if (!err)
|
||||
|
||||
list_add(&s->list, &slab_caches);
|
||||
|
||||
else {
|
||||
kfree(n);
|
||||
kmem_cache_free(kmem_cache, s);
|
||||
}
|
||||
|
||||
} else {
|
||||
kfree(n);
|
||||
err = -ENOSYS; /* Until __kmem_cache_create returns code */
|
||||
err = -ENOMEM;
|
||||
}
|
||||
|
||||
out_locked:
|
||||
|
|
42
mm/slob.c
42
mm/slob.c
|
@ -508,34 +508,26 @@ size_t ksize(const void *block)
|
|||
}
|
||||
EXPORT_SYMBOL(ksize);
|
||||
|
||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
||||
int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size,
|
||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||
{
|
||||
struct kmem_cache *c;
|
||||
|
||||
c = slob_alloc(sizeof(struct kmem_cache),
|
||||
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
|
||||
|
||||
if (c) {
|
||||
c->name = name;
|
||||
c->size = size;
|
||||
if (flags & SLAB_DESTROY_BY_RCU) {
|
||||
/* leave room for rcu footer at the end of object */
|
||||
c->size += sizeof(struct slob_rcu);
|
||||
}
|
||||
c->flags = flags;
|
||||
c->ctor = ctor;
|
||||
/* ignore alignment unless it's forced */
|
||||
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
|
||||
if (c->align < ARCH_SLAB_MINALIGN)
|
||||
c->align = ARCH_SLAB_MINALIGN;
|
||||
if (c->align < align)
|
||||
c->align = align;
|
||||
|
||||
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
|
||||
c->refcount = 1;
|
||||
c->name = name;
|
||||
c->size = size;
|
||||
if (flags & SLAB_DESTROY_BY_RCU) {
|
||||
/* leave room for rcu footer at the end of object */
|
||||
c->size += sizeof(struct slob_rcu);
|
||||
}
|
||||
return c;
|
||||
c->flags = flags;
|
||||
c->ctor = ctor;
|
||||
/* ignore alignment unless it's forced */
|
||||
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
|
||||
if (c->align < ARCH_SLAB_MINALIGN)
|
||||
c->align = ARCH_SLAB_MINALIGN;
|
||||
if (c->align < align)
|
||||
c->align = align;
|
||||
|
||||
c->refcount = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
||||
|
|
24
mm/slub.c
24
mm/slub.c
|
@ -3034,7 +3034,6 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||
size_t align, unsigned long flags,
|
||||
void (*ctor)(void *))
|
||||
{
|
||||
memset(s, 0, kmem_size);
|
||||
s->name = name;
|
||||
s->ctor = ctor;
|
||||
s->object_size = size;
|
||||
|
@ -3109,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||
goto error;
|
||||
|
||||
if (alloc_kmem_cache_cpus(s))
|
||||
return 1;
|
||||
return 0;
|
||||
|
||||
free_kmem_cache_nodes(s);
|
||||
error:
|
||||
|
@ -3118,7 +3117,7 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||
"order=%u offset=%u flags=%lx\n",
|
||||
s->name, (unsigned long)size, s->size, oo_order(s->oo),
|
||||
s->offset, flags);
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3260,13 +3259,13 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
|
|||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
||||
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||
|
||||
/*
|
||||
* This function is called with IRQs disabled during early-boot on
|
||||
* single CPU so there's no need to take slab_mutex here.
|
||||
*/
|
||||
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
|
||||
if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
|
||||
flags, NULL))
|
||||
goto panic;
|
||||
|
||||
|
@ -3944,20 +3943,11 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
|||
return s;
|
||||
}
|
||||
|
||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
||||
int __kmem_cache_create(struct kmem_cache *s,
|
||||
const char *name, size_t size,
|
||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
|
||||
s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
|
||||
if (s) {
|
||||
if (kmem_cache_open(s, name,
|
||||
size, align, flags, ctor)) {
|
||||
return s;
|
||||
}
|
||||
kmem_cache_free(kmem_cache, s);
|
||||
}
|
||||
return NULL;
|
||||
return kmem_cache_open(s, name, size, align, flags, ctor);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
Loading…
Reference in a new issue