slab: Common definition for kmem_cache_node
Put the definitions for the kmem_cache_node structures together so that we have one structure. That will allow us to create more common fields in the future which could yield more opportunities to share code. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
ce8eb6c424
commit
ca34956b80
3 changed files with 32 additions and 28 deletions
|
@ -53,17 +53,6 @@ struct kmem_cache_cpu {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kmem_cache_node {
|
|
||||||
spinlock_t list_lock; /* Protect partial list and nr_partial */
|
|
||||||
unsigned long nr_partial;
|
|
||||||
struct list_head partial;
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
|
||||||
atomic_long_t nr_slabs;
|
|
||||||
atomic_long_t total_objects;
|
|
||||||
struct list_head full;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Word size structure that can be atomically updated or read and that
|
* Word size structure that can be atomically updated or read and that
|
||||||
* contains both the order and the number of objects that a slab of the
|
* contains both the order and the number of objects that a slab of the
|
||||||
|
|
17
mm/slab.c
17
mm/slab.c
|
@ -285,23 +285,6 @@ struct arraycache_init {
|
||||||
void *entries[BOOT_CPUCACHE_ENTRIES];
|
void *entries[BOOT_CPUCACHE_ENTRIES];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* The slab lists for all objects.
|
|
||||||
*/
|
|
||||||
struct kmem_cache_node {
|
|
||||||
struct list_head slabs_partial; /* partial list first, better asm code */
|
|
||||||
struct list_head slabs_full;
|
|
||||||
struct list_head slabs_free;
|
|
||||||
unsigned long free_objects;
|
|
||||||
unsigned int free_limit;
|
|
||||||
unsigned int colour_next; /* Per-node cache coloring */
|
|
||||||
spinlock_t list_lock;
|
|
||||||
struct array_cache *shared; /* shared per node */
|
|
||||||
struct array_cache **alien; /* on other nodes */
|
|
||||||
unsigned long next_reap; /* updated without locking */
|
|
||||||
int free_touched; /* updated without locking */
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need this for bootstrapping a per node allocator.
|
* Need this for bootstrapping a per node allocator.
|
||||||
*/
|
*/
|
||||||
|
|
32
mm/slab.h
32
mm/slab.h
|
@ -239,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The slab lists for all objects.
|
||||||
|
*/
|
||||||
|
struct kmem_cache_node {
|
||||||
|
spinlock_t list_lock;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLAB
|
||||||
|
struct list_head slabs_partial; /* partial list first, better asm code */
|
||||||
|
struct list_head slabs_full;
|
||||||
|
struct list_head slabs_free;
|
||||||
|
unsigned long free_objects;
|
||||||
|
unsigned int free_limit;
|
||||||
|
unsigned int colour_next; /* Per-node cache coloring */
|
||||||
|
struct array_cache *shared; /* shared per node */
|
||||||
|
struct array_cache **alien; /* on other nodes */
|
||||||
|
unsigned long next_reap; /* updated without locking */
|
||||||
|
int free_touched; /* updated without locking */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLUB
|
||||||
|
unsigned long nr_partial;
|
||||||
|
struct list_head partial;
|
||||||
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
|
atomic_long_t nr_slabs;
|
||||||
|
atomic_long_t total_objects;
|
||||||
|
struct list_head full;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
};
|
||||||
|
|
Loading…
Reference in a new issue