slub: Drop fallback to page allocator method
There is now a generic method of falling back to a slab page of minimal order. No need anymore for the fallback to kmalloc_large(). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
65c3376aac
commit
319d1e2406
1 changed files with 2 additions and 41 deletions
43
mm/slub.c
43
mm/slub.c
|
@ -204,8 +204,6 @@ static inline void ClearSlabDebug(struct page *page)
|
|||
/* Internal SLUB flags */
|
||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
||||
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
||||
#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
|
||||
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
|
||||
|
||||
/* Not all arches define cache_line_size */
|
||||
#ifndef cache_line_size
|
||||
|
@ -1623,27 +1621,6 @@ static void *__slab_alloc(struct kmem_cache *s,
|
|||
c->page = new;
|
||||
goto load_freelist;
|
||||
}
|
||||
|
||||
/*
|
||||
* No memory available.
|
||||
*
|
||||
* If the slab uses higher order allocs but the object is
|
||||
* smaller than a page size then we can fallback in emergencies
|
||||
* to the page allocator via kmalloc_large. The page allocator may
|
||||
* have failed to obtain a higher order page and we can try to
|
||||
* allocate a single page if the object fits into a single page.
|
||||
* That is only possible if certain conditions are met that are being
|
||||
* checked when a slab is created.
|
||||
*/
|
||||
if (!(gfpflags & __GFP_NORETRY) &&
|
||||
(s->flags & __PAGE_ALLOC_FALLBACK)) {
|
||||
if (gfpflags & __GFP_WAIT)
|
||||
local_irq_enable();
|
||||
object = kmalloc_large(s->objsize, gfpflags);
|
||||
if (gfpflags & __GFP_WAIT)
|
||||
local_irq_disable();
|
||||
return object;
|
||||
}
|
||||
return NULL;
|
||||
debug:
|
||||
if (!alloc_debug_processing(s, c->page, object, addr))
|
||||
|
@ -2330,20 +2307,7 @@ static int calculate_sizes(struct kmem_cache *s)
|
|||
*/
|
||||
size = ALIGN(size, align);
|
||||
s->size = size;
|
||||
|
||||
if ((flags & __KMALLOC_CACHE) &&
|
||||
PAGE_SIZE / size < slub_min_objects) {
|
||||
/*
|
||||
* Kmalloc cache that would not have enough objects in
|
||||
* an order 0 page. Kmalloc slabs can fallback to
|
||||
* page allocator order 0 allocs so take a reasonably large
|
||||
* order that will allows us a good number of objects.
|
||||
*/
|
||||
order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
|
||||
s->flags |= __PAGE_ALLOC_FALLBACK;
|
||||
s->allocflags |= __GFP_NOWARN;
|
||||
} else
|
||||
order = calculate_order(size);
|
||||
order = calculate_order(size);
|
||||
|
||||
if (order < 0)
|
||||
return 0;
|
||||
|
@ -2589,7 +2553,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
|
|||
|
||||
down_write(&slub_lock);
|
||||
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
|
||||
flags | __KMALLOC_CACHE, NULL))
|
||||
flags, NULL))
|
||||
goto panic;
|
||||
|
||||
list_add(&s->list, &slab_caches);
|
||||
|
@ -3105,9 +3069,6 @@ static int slab_unmergeable(struct kmem_cache *s)
|
|||
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
|
||||
return 1;
|
||||
|
||||
if ((s->flags & __PAGE_ALLOC_FALLBACK))
|
||||
return 1;
|
||||
|
||||
if (s->ctor)
|
||||
return 1;
|
||||
|
||||
|
|
Loading…
Reference in a new issue