block cfq: don't use atomic_t for cfq_group
cfq_group->ref is used with queue_lock hold, the only exception is cfq_set_request, which looks like a bug to me, so ref doesn't need to be an atomic and atomic operation is slower. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
30d7b9448f
commit
329a67815b
1 changed files with 12 additions and 11 deletions
|
@ -207,7 +207,7 @@ struct cfq_group {
|
|||
struct blkio_group blkg;
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
struct hlist_node cfqd_node;
|
||||
atomic_t ref;
|
||||
int ref;
|
||||
#endif
|
||||
/* number of requests that are on the dispatch list or inside driver */
|
||||
int dispatched;
|
||||
|
@ -1014,7 +1014,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
|||
* elevator which will be dropped by either elevator exit
|
||||
* or cgroup deletion path depending on who is exiting first.
|
||||
*/
|
||||
atomic_set(&cfqg->ref, 1);
|
||||
cfqg->ref = 1;
|
||||
|
||||
/*
|
||||
* Add group onto cgroup list. It might happen that bdi->dev is
|
||||
|
@ -1059,7 +1059,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
|
|||
|
||||
static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
|
||||
{
|
||||
atomic_inc(&cfqg->ref);
|
||||
cfqg->ref++;
|
||||
return cfqg;
|
||||
}
|
||||
|
||||
|
@ -1071,7 +1071,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
|
|||
|
||||
cfqq->cfqg = cfqg;
|
||||
/* cfqq reference on cfqg */
|
||||
atomic_inc(&cfqq->cfqg->ref);
|
||||
cfqq->cfqg->ref++;
|
||||
}
|
||||
|
||||
static void cfq_put_cfqg(struct cfq_group *cfqg)
|
||||
|
@ -1079,8 +1079,9 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
|
|||
struct cfq_rb_root *st;
|
||||
int i, j;
|
||||
|
||||
BUG_ON(atomic_read(&cfqg->ref) <= 0);
|
||||
if (!atomic_dec_and_test(&cfqg->ref))
|
||||
BUG_ON(cfqg->ref <= 0);
|
||||
cfqg->ref--;
|
||||
if (cfqg->ref)
|
||||
return;
|
||||
for_each_cfqg_st(cfqg, i, j, st)
|
||||
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
|
||||
|
@ -1188,7 +1189,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
cfq_group_service_tree_del(cfqd, cfqq->cfqg);
|
||||
cfqq->orig_cfqg = cfqq->cfqg;
|
||||
cfqq->cfqg = &cfqd->root_group;
|
||||
atomic_inc(&cfqd->root_group.ref);
|
||||
cfqd->root_group.ref++;
|
||||
group_changed = 1;
|
||||
} else if (!cfqd->cfq_group_isolation
|
||||
&& cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
|
||||
|
@ -3681,12 +3682,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
|||
|
||||
cfqq->allocated[rw]++;
|
||||
cfqq->ref++;
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
rq->elevator_private = cic;
|
||||
rq->elevator_private2 = cfqq;
|
||||
rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
|
||||
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
queue_fail:
|
||||
|
@ -3884,7 +3885,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
|||
* Take a reference to root group which we never drop. This is just
|
||||
* to make sure that cfq_put_cfqg() does not try to kfree root group
|
||||
*/
|
||||
atomic_set(&cfqg->ref, 1);
|
||||
cfqg->ref = 1;
|
||||
rcu_read_lock();
|
||||
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
|
||||
(void *)cfqd, 0);
|
||||
|
|
Loading…
Reference in a new issue