slub: fast release on full slab
Make deactivation occur implicitly while checking out the current freelist. This avoids one cmpxchg operation on a slab that is now fully in use. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
e36a2652d7
commit
03e404af26
2 changed files with 20 additions and 2 deletions
|
@ -32,6 +32,7 @@ enum stat_item {
|
|||
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
|
||||
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
|
||||
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
|
||||
DEACTIVATE_BYPASS, /* Implicit deactivation */
|
||||
ORDER_FALLBACK, /* Number of times fallback was necessary */
|
||||
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
|
||||
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
|
||||
|
|
21
mm/slub.c
21
mm/slub.c
|
@ -1977,9 +1977,21 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||
object = page->freelist;
|
||||
counters = page->counters;
|
||||
new.counters = counters;
|
||||
new.inuse = page->objects;
|
||||
VM_BUG_ON(!new.frozen);
|
||||
|
||||
/*
|
||||
* If there is no object left then we use this loop to
|
||||
* deactivate the slab which is simple since no objects
|
||||
* are left in the slab and therefore we do not need to
|
||||
* put the page back onto the partial list.
|
||||
*
|
||||
* If there are objects left then we retrieve them
|
||||
* and use them to refill the per cpu queue.
|
||||
*/
|
||||
|
||||
new.inuse = page->objects;
|
||||
new.frozen = object != NULL;
|
||||
|
||||
} while (!cmpxchg_double_slab(s, page,
|
||||
object, counters,
|
||||
NULL, new.counters,
|
||||
|
@ -1988,8 +2000,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||
load_freelist:
|
||||
VM_BUG_ON(!page->frozen);
|
||||
|
||||
if (unlikely(!object))
|
||||
if (unlikely(!object)) {
|
||||
c->page = NULL;
|
||||
stat(s, DEACTIVATE_BYPASS);
|
||||
goto new_slab;
|
||||
}
|
||||
|
||||
stat(s, ALLOC_REFILL);
|
||||
|
||||
|
@ -4680,6 +4695,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
|
|||
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
|
||||
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
|
||||
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
|
||||
STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
|
||||
STAT_ATTR(ORDER_FALLBACK, order_fallback);
|
||||
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
|
||||
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
|
||||
|
@ -4740,6 +4756,7 @@ static struct attribute *slab_attrs[] = {
|
|||
&deactivate_to_head_attr.attr,
|
||||
&deactivate_to_tail_attr.attr,
|
||||
&deactivate_remote_frees_attr.attr,
|
||||
&deactivate_bypass_attr.attr,
|
||||
&order_fallback_attr.attr,
|
||||
&cmpxchg_double_fail_attr.attr,
|
||||
&cmpxchg_double_cpu_fail_attr.attr,
|
||||
|
|
Loading…
Reference in a new issue