mm: fault-inject take over bootstrap kmem_cache check

Remove the SLAB specific function slab_should_failslab(), by moving the
check against fault-injection for the bootstrap slab, into the shared
function should_failslab() (used by both SLAB and SLUB).

This is a step towards sharing alloc_hook's between SLUB and SLAB.

This bootstrap slab "kmem_cache" is used for allocating struct
kmem_cache objects to the allocator itself.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jesper Dangaard Brouer 2016-03-15 14:53:38 -07:00 committed by Linus Torvalds
parent 11c7aec2a9
commit fab9963a69
4 changed files with 14 additions and 17 deletions

View file

@ -62,10 +62,9 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB
extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
static inline bool should_failslab(size_t size, gfp_t gfpflags,
unsigned long flags)
static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}

View file

@ -1,5 +1,7 @@
#include <linux/fault-inject.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "slab.h"
static struct {
struct fault_attr attr;
@ -11,18 +13,22 @@ static struct {
.cache_filter = false,
};
bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
/* No fault-injection for bootstrap cache */
if (unlikely(s == kmem_cache))
return false;
if (gfpflags & __GFP_NOFAIL)
return false;
if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
return false;
if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
return false;
return should_fail(&failslab.attr, size);
return should_fail(&failslab.attr, s->object_size);
}
static int __init setup_failslab(char *str)

View file

@ -2926,14 +2926,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
{
if (unlikely(cachep == kmem_cache))
return false;
return should_failslab(cachep->object_size, flags, cachep->flags);
}
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *objp;
@ -3155,7 +3147,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
if (should_failslab(cachep, flags))
return NULL;
cachep = memcg_kmem_get_cache(cachep, flags);
@ -3243,7 +3235,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
if (should_failslab(cachep, flags))
return NULL;
cachep = memcg_kmem_get_cache(cachep, flags);

View file

@ -360,7 +360,7 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
lockdep_trace_alloc(flags);
might_sleep_if(gfpflags_allow_blocking(flags));
if (should_failslab(s->object_size, flags, s->flags))
if (should_failslab(s, flags))
return NULL;
return memcg_kmem_get_cache(s, flags);