[PATCH] slab: Remove SLAB_NO_REAP option
SLAB_NO_REAP is documented as an option that will cause this slab not to be reaped under memory pressure. However, that is not what happens. The only thing that SLAB_NO_REAP controls at the moment is the reclaim of the unused slab elements that were allocated in batch in cache_reap(). Cache_reap() is run every few seconds independently of memory pressure. Could we remove the whole thing? Its only used by three slabs anyways and I cannot find a reason for having this option. There is an additional problem with SLAB_NO_REAP. If set then the recovery of objects from alien caches is switched off. Objects not freed on the same node where they were initially allocated will only be reused if a certain amount of objects accumulates from one alien node (not very likely) or if the cache is explicitly shrunk. (Strangely __cache_shrink does not check for SLAB_NO_REAP) Getting rid of SLAB_NO_REAP fixes the problems with alien cache freeing. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Mark Fasheh <mark.fasheh@oracle.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
911851e6ee
commit
ac2b898ca6
4 changed files with 4 additions and 14 deletions
|
@ -3639,7 +3639,7 @@ iscsi_tcp_init(void)
|
||||||
|
|
||||||
taskcache = kmem_cache_create("iscsi_taskcache",
|
taskcache = kmem_cache_create("iscsi_taskcache",
|
||||||
sizeof(struct iscsi_data_task), 0,
|
sizeof(struct iscsi_data_task), 0,
|
||||||
SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL);
|
SLAB_HWCACHE_ALIGN, NULL, NULL);
|
||||||
if (!taskcache)
|
if (!taskcache)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -959,7 +959,7 @@ static int ocfs2_initialize_mem_caches(void)
|
||||||
ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
|
ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
|
||||||
sizeof(struct ocfs2_journal_lock),
|
sizeof(struct ocfs2_journal_lock),
|
||||||
0,
|
0,
|
||||||
SLAB_NO_REAP|SLAB_HWCACHE_ALIGN,
|
SLAB_HWCACHE_ALIGN,
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
if (!ocfs2_lock_cache)
|
if (!ocfs2_lock_cache)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
|
||||||
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
|
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
|
||||||
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
|
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
|
||||||
#define SLAB_POISON 0x00000800UL /* Poison objects */
|
#define SLAB_POISON 0x00000800UL /* Poison objects */
|
||||||
#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
|
|
||||||
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
|
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
|
||||||
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
|
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
|
||||||
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
|
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
|
||||||
|
|
13
mm/slab.c
13
mm/slab.c
|
@ -170,12 +170,12 @@
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
|
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
|
||||||
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
|
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
|
||||||
SLAB_NO_REAP | SLAB_CACHE_DMA | \
|
SLAB_CACHE_DMA | \
|
||||||
SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
|
SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
|
||||||
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
||||||
SLAB_DESTROY_BY_RCU)
|
SLAB_DESTROY_BY_RCU)
|
||||||
#else
|
#else
|
||||||
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
|
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
|
||||||
SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
|
SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
|
||||||
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
||||||
SLAB_DESTROY_BY_RCU)
|
SLAB_DESTROY_BY_RCU)
|
||||||
|
@ -662,7 +662,6 @@ static struct kmem_cache cache_cache = {
|
||||||
.limit = BOOT_CPUCACHE_ENTRIES,
|
.limit = BOOT_CPUCACHE_ENTRIES,
|
||||||
.shared = 1,
|
.shared = 1,
|
||||||
.buffer_size = sizeof(struct kmem_cache),
|
.buffer_size = sizeof(struct kmem_cache),
|
||||||
.flags = SLAB_NO_REAP,
|
|
||||||
.name = "kmem_cache",
|
.name = "kmem_cache",
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
.obj_size = sizeof(struct kmem_cache),
|
.obj_size = sizeof(struct kmem_cache),
|
||||||
|
@ -1848,9 +1847,6 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
|
||||||
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
|
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
|
||||||
* for buffer overruns.
|
* for buffer overruns.
|
||||||
*
|
*
|
||||||
* %SLAB_NO_REAP - Don't automatically reap this cache when we're under
|
|
||||||
* memory pressure.
|
|
||||||
*
|
|
||||||
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
|
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
|
||||||
* cacheline. This can be beneficial if you're counting cycles as closely
|
* cacheline. This can be beneficial if you're counting cycles as closely
|
||||||
* as davem.
|
* as davem.
|
||||||
|
@ -3584,10 +3580,6 @@ static void cache_reap(void *unused)
|
||||||
struct slab *slabp;
|
struct slab *slabp;
|
||||||
|
|
||||||
searchp = list_entry(walk, struct kmem_cache, next);
|
searchp = list_entry(walk, struct kmem_cache, next);
|
||||||
|
|
||||||
if (searchp->flags & SLAB_NO_REAP)
|
|
||||||
goto next;
|
|
||||||
|
|
||||||
check_irq_on();
|
check_irq_on();
|
||||||
|
|
||||||
l3 = searchp->nodelists[numa_node_id()];
|
l3 = searchp->nodelists[numa_node_id()];
|
||||||
|
@ -3635,7 +3627,6 @@ static void cache_reap(void *unused)
|
||||||
} while (--tofree > 0);
|
} while (--tofree > 0);
|
||||||
next_unlock:
|
next_unlock:
|
||||||
spin_unlock_irq(&l3->list_lock);
|
spin_unlock_irq(&l3->list_lock);
|
||||||
next:
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
check_irq_on();
|
check_irq_on();
|
||||||
|
|
Loading…
Reference in a new issue