blkcg: let blkio_group point to blkio_cgroup directly

Currently, blkg points to the associated blkcg via its css_id.  This
unnecessarily complicates dereferencing blkcg.  Let blkg hold a
reference to the associated blkcg and point directly to it and disable
css_id on blkio_subsys.

This change requires splitting blkiocg_destroy() into
blkiocg_pre_destroy() and blkiocg_destroy() so that all blkg's can be
destroyed and all the blkcg references held by them dropped during
cgroup removal.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Tejun Heo 2012-03-05 13:15:11 -08:00 committed by Jens Axboe
parent 92616b5b3a
commit 7ee9c56205
4 changed files with 32 additions and 20 deletions

View file

@ -37,6 +37,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
struct cgroup_taskset *); struct cgroup_taskset *);
static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
struct cgroup_taskset *); struct cgroup_taskset *);
static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
@ -51,10 +52,10 @@ struct cgroup_subsys blkio_subsys = {
.create = blkiocg_create, .create = blkiocg_create,
.can_attach = blkiocg_can_attach, .can_attach = blkiocg_can_attach,
.attach = blkiocg_attach, .attach = blkiocg_attach,
.pre_destroy = blkiocg_pre_destroy,
.destroy = blkiocg_destroy, .destroy = blkiocg_destroy,
.populate = blkiocg_populate, .populate = blkiocg_populate,
.subsys_id = blkio_subsys_id, .subsys_id = blkio_subsys_id,
.use_id = 1,
.module = THIS_MODULE, .module = THIS_MODULE,
}; };
EXPORT_SYMBOL_GPL(blkio_subsys); EXPORT_SYMBOL_GPL(blkio_subsys);
@ -442,6 +443,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
if (blkg) if (blkg)
return blkg; return blkg;
/* blkg holds a reference to blkcg */
if (!css_tryget(&blkcg->css)) if (!css_tryget(&blkcg->css))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -463,15 +465,16 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
spin_lock_init(&new_blkg->stats_lock); spin_lock_init(&new_blkg->stats_lock);
rcu_assign_pointer(new_blkg->q, q); rcu_assign_pointer(new_blkg->q, q);
new_blkg->blkcg_id = css_id(&blkcg->css); new_blkg->blkcg = blkcg;
new_blkg->plid = plid; new_blkg->plid = plid;
cgroup_path(blkcg->css.cgroup, new_blkg->path, cgroup_path(blkcg->css.cgroup, new_blkg->path,
sizeof(new_blkg->path)); sizeof(new_blkg->path));
} else {
css_put(&blkcg->css);
} }
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
css_put(&blkcg->css);
/* did bypass get turned on inbetween? */ /* did bypass get turned on inbetween? */
if (unlikely(blk_queue_bypass(q)) && !for_root) { if (unlikely(blk_queue_bypass(q)) && !for_root) {
@ -500,6 +503,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
if (new_blkg) { if (new_blkg) {
free_percpu(new_blkg->stats_cpu); free_percpu(new_blkg->stats_cpu);
kfree(new_blkg); kfree(new_blkg);
css_put(&blkcg->css);
} }
return blkg; return blkg;
} }
@ -508,7 +512,6 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void __blkiocg_del_blkio_group(struct blkio_group *blkg) static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
{ {
hlist_del_init_rcu(&blkg->blkcg_node); hlist_del_init_rcu(&blkg->blkcg_node);
blkg->blkcg_id = 0;
} }
/* /*
@ -517,24 +520,17 @@ static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
*/ */
int blkiocg_del_blkio_group(struct blkio_group *blkg) int blkiocg_del_blkio_group(struct blkio_group *blkg)
{ {
struct blkio_cgroup *blkcg; struct blkio_cgroup *blkcg = blkg->blkcg;
unsigned long flags; unsigned long flags;
struct cgroup_subsys_state *css;
int ret = 1; int ret = 1;
rcu_read_lock(); spin_lock_irqsave(&blkcg->lock, flags);
css = css_lookup(&blkio_subsys, blkg->blkcg_id); if (!hlist_unhashed(&blkg->blkcg_node)) {
if (css) { __blkiocg_del_blkio_group(blkg);
blkcg = container_of(css, struct blkio_cgroup, css); ret = 0;
spin_lock_irqsave(&blkcg->lock, flags);
if (!hlist_unhashed(&blkg->blkcg_node)) {
__blkiocg_del_blkio_group(blkg);
ret = 0;
}
spin_unlock_irqrestore(&blkcg->lock, flags);
} }
spin_unlock_irqrestore(&blkcg->lock, flags);
rcu_read_unlock();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
@ -1387,7 +1383,8 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
ARRAY_SIZE(blkio_files)); ARRAY_SIZE(blkio_files));
} }
static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
struct cgroup *cgroup)
{ {
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
unsigned long flags; unsigned long flags;
@ -1396,6 +1393,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
struct blkio_policy_type *blkiop; struct blkio_policy_type *blkiop;
rcu_read_lock(); rcu_read_lock();
do { do {
spin_lock_irqsave(&blkcg->lock, flags); spin_lock_irqsave(&blkcg->lock, flags);
@ -1425,8 +1423,15 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
spin_unlock(&blkio_list_lock); spin_unlock(&blkio_list_lock);
} while (1); } while (1);
free_css_id(&blkio_subsys, &blkcg->css);
rcu_read_unlock(); rcu_read_unlock();
return 0;
}
static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
{
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
if (blkcg != &blkio_root_cgroup) if (blkcg != &blkio_root_cgroup)
kfree(blkcg); kfree(blkcg);
} }

View file

@ -163,7 +163,7 @@ struct blkio_group {
/* Pointer to the associated request_queue, RCU protected */ /* Pointer to the associated request_queue, RCU protected */
struct request_queue __rcu *q; struct request_queue __rcu *q;
struct hlist_node blkcg_node; struct hlist_node blkcg_node;
unsigned short blkcg_id; struct blkio_cgroup *blkcg;
/* Store cgroup path */ /* Store cgroup path */
char path[128]; char path[128];
/* policy which owns this blk group */ /* policy which owns this blk group */

View file

@ -169,6 +169,9 @@ static void throtl_put_tg(struct throtl_grp *tg)
if (!atomic_dec_and_test(&tg->ref)) if (!atomic_dec_and_test(&tg->ref))
return; return;
/* release the extra blkcg reference this blkg has been holding */
css_put(&tg->blkg.blkcg->css);
/* /*
* A group is freed in rcu manner. But having an rcu lock does not * A group is freed in rcu manner. But having an rcu lock does not
* mean that one can access all the fields of blkg and assume these * mean that one can access all the fields of blkg and assume these

View file

@ -1133,6 +1133,10 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
cfqg->ref--; cfqg->ref--;
if (cfqg->ref) if (cfqg->ref)
return; return;
/* release the extra blkcg reference this blkg has been holding */
css_put(&cfqg->blkg.blkcg->css);
for_each_cfqg_st(cfqg, i, j, st) for_each_cfqg_st(cfqg, i, j, st)
BUG_ON(!RB_EMPTY_ROOT(&st->rb)); BUG_ON(!RB_EMPTY_ROOT(&st->rb));
free_percpu(cfqg->blkg.stats_cpu); free_percpu(cfqg->blkg.stats_cpu);