[PATCH] slab: allocate larger cache_cache if order 0 fails
kmem_cache_init() incorrectly assumes that the cache_cache object will fit in an order 0 allocation. On very large systems, this is not true. Change the code to try larger order allocations if order 0 fails. Signed-off-by: Jack Steiner <steiner@sgi.com> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
731805b494
commit
07ed76b2a0
1 changed files with 8 additions and 3 deletions
11
mm/slab.c
11
mm/slab.c
|
@ -1124,6 +1124,7 @@ void __init kmem_cache_init(void)
|
|||
struct cache_sizes *sizes;
|
||||
struct cache_names *names;
|
||||
int i;
|
||||
int order;
|
||||
|
||||
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
||||
kmem_list3_init(&initkmem_list3[i]);
|
||||
|
@ -1167,11 +1168,15 @@ void __init kmem_cache_init(void)
|
|||
|
||||
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
|
||||
|
||||
cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
|
||||
&left_over, &cache_cache.num);
|
||||
for (order = 0; order < MAX_ORDER; order++) {
|
||||
cache_estimate(order, cache_cache.buffer_size,
|
||||
cache_line_size(), 0, &left_over, &cache_cache.num);
|
||||
if (cache_cache.num)
|
||||
break;
|
||||
}
|
||||
if (!cache_cache.num)
|
||||
BUG();
|
||||
|
||||
cache_cache.gfporder = order;
|
||||
cache_cache.colour = left_over / cache_cache.colour_off;
|
||||
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
|
||||
sizeof(struct slab), cache_line_size());
|
||||
|
|
Loading…
Reference in a new issue