block: Update to io-controller stats

Changelog from v1:
o Call blkiocg_update_idle_time_stats() at cfq_rq_enqueued() instead of at
  dispatch time.

Changelog from original patchset: (in response to Vivek Goyal's comments)
o group blkiocg_update_blkio_group_dequeue_stats() with other DEBUG functions
o rename blkiocg_update_set_active_queue_stats() to
  blkiocg_update_avg_queue_size_stats()
o s/request/io/ in blkiocg_update_request_add_stats() and
  blkiocg_update_request_remove_stats()
o Call cfq_del_timer() at request dispatch() instead of
  blkiocg_update_idle_time_stats()

Signed-off-by: Divyesh Shah<dpshah@google.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Divyesh Shah 2010-04-13 19:59:17 +02:00 committed by Jens Axboe
parent da69da184c
commit a11cdaa7af
3 changed files with 28 additions and 32 deletions

View file

@ -202,7 +202,7 @@ void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
} }
EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg) void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
{ {
unsigned long flags; unsigned long flags;
struct blkio_group_stats *stats; struct blkio_group_stats *stats;
@ -216,14 +216,21 @@ void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
blkio_update_group_wait_time(stats); blkio_update_group_wait_time(stats);
spin_unlock_irqrestore(&blkg->stats_lock, flags); spin_unlock_irqrestore(&blkg->stats_lock, flags);
} }
EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats); EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue)
{
blkg->stats.dequeue += dequeue;
}
EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
#else #else
static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
struct blkio_group *curr_blkg) {} struct blkio_group *curr_blkg) {}
static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
#endif #endif
void blkiocg_update_request_add_stats(struct blkio_group *blkg, void blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, struct blkio_group *curr_blkg, bool direction,
bool sync) bool sync)
{ {
@ -236,9 +243,9 @@ void blkiocg_update_request_add_stats(struct blkio_group *blkg,
blkio_set_start_group_wait_time(blkg, curr_blkg); blkio_set_start_group_wait_time(blkg, curr_blkg);
spin_unlock_irqrestore(&blkg->stats_lock, flags); spin_unlock_irqrestore(&blkg->stats_lock, flags);
} }
EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats); EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
void blkiocg_update_request_remove_stats(struct blkio_group *blkg, void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync) bool direction, bool sync)
{ {
unsigned long flags; unsigned long flags;
@ -248,7 +255,7 @@ void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
direction, sync); direction, sync);
spin_unlock_irqrestore(&blkg->stats_lock, flags); spin_unlock_irqrestore(&blkg->stats_lock, flags);
} }
EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats); EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
{ {
@ -636,15 +643,6 @@ SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
#endif #endif
#undef SHOW_FUNCTION_PER_GROUP #undef SHOW_FUNCTION_PER_GROUP
#ifdef CONFIG_DEBUG_BLK_CGROUP
void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue)
{
blkg->stats.dequeue += dequeue;
}
EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
#endif
static int blkio_check_dev_num(dev_t dev) static int blkio_check_dev_num(dev_t dev)
{ {
int part = 0; int part = 0;

View file

@ -169,7 +169,7 @@ static inline char *blkg_path(struct blkio_group *blkg)
{ {
return blkg->path; return blkg->path;
} }
void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg); void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
void blkiocg_update_dequeue_stats(struct blkio_group *blkg, void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue); unsigned long dequeue);
void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg); void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
@ -198,7 +198,7 @@ BLKG_FLAG_FNS(empty)
#undef BLKG_FLAG_FNS #undef BLKG_FLAG_FNS
#else #else
static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
static inline void blkiocg_update_set_active_queue_stats( static inline void blkiocg_update_avg_queue_size_stats(
struct blkio_group *blkg) {} struct blkio_group *blkg) {}
static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue) {} unsigned long dequeue) {}
@ -226,9 +226,9 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
uint64_t start_time, uint64_t io_start_time, bool direction, bool sync); uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
bool sync); bool sync);
void blkiocg_update_request_add_stats(struct blkio_group *blkg, void blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync); struct blkio_group *curr_blkg, bool direction, bool sync);
void blkiocg_update_request_remove_stats(struct blkio_group *blkg, void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync); bool direction, bool sync);
#else #else
struct cgroup; struct cgroup;
@ -253,9 +253,9 @@ static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
bool sync) {} bool sync) {}
static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync) {} bool direction, bool sync) {}
static inline void blkiocg_update_request_add_stats(struct blkio_group *blkg, static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync) {} struct blkio_group *curr_blkg, bool direction, bool sync) {}
static inline void blkiocg_update_request_remove_stats(struct blkio_group *blkg, static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync) {} bool direction, bool sync) {}
#endif #endif
#endif /* _BLK_CGROUP_H */ #endif /* _BLK_CGROUP_H */

View file

@ -1381,10 +1381,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{ {
elv_rb_del(&cfqq->sort_list, rq); elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--; cfqq->queued[rq_is_sync(rq)]--;
blkiocg_update_request_remove_stats(&cfqq->cfqg->blkg, rq_data_dir(rq), blkiocg_update_io_remove_stats(&cfqq->cfqg->blkg, rq_data_dir(rq),
rq_is_sync(rq)); rq_is_sync(rq));
cfq_add_rq_rb(rq); cfq_add_rq_rb(rq);
blkiocg_update_request_add_stats( blkiocg_update_io_add_stats(
&cfqq->cfqg->blkg, &cfqq->cfqd->serving_group->blkg, &cfqq->cfqg->blkg, &cfqq->cfqd->serving_group->blkg,
rq_data_dir(rq), rq_is_sync(rq)); rq_data_dir(rq), rq_is_sync(rq));
} }
@ -1442,7 +1442,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq); cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--; cfqq->cfqd->rq_queued--;
blkiocg_update_request_remove_stats(&cfqq->cfqg->blkg, rq_data_dir(rq), blkiocg_update_io_remove_stats(&cfqq->cfqg->blkg, rq_data_dir(rq),
rq_is_sync(rq)); rq_is_sync(rq));
if (rq_is_meta(rq)) { if (rq_is_meta(rq)) {
WARN_ON(!cfqq->meta_pending); WARN_ON(!cfqq->meta_pending);
@ -1541,7 +1541,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
if (cfqq) { if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type); cfqd->serving_prio, cfqd->serving_type);
blkiocg_update_set_active_queue_stats(&cfqq->cfqg->blkg); blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
cfqq->slice_start = 0; cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies; cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0; cfqq->allocated_slice = 0;
@ -2395,11 +2395,6 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
} }
cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
/*
* This is needed since we don't exactly match the mod_timer() and
* del_timer() calls in CFQ.
*/
blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
return 1; return 1;
} }
@ -3208,8 +3203,11 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_del_timer(cfqd, cfqq); cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue); __blk_run_queue(cfqd->queue);
} else } else {
blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_must_dispatch(cfqq);
}
} }
} else if (cfq_should_preempt(cfqd, cfqq, rq)) { } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/* /*
@ -3235,7 +3233,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
list_add_tail(&rq->queuelist, &cfqq->fifo); list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq); cfq_add_rq_rb(rq);
blkiocg_update_request_add_stats(&cfqq->cfqg->blkg, blkiocg_update_io_add_stats(&cfqq->cfqg->blkg,
&cfqd->serving_group->blkg, rq_data_dir(rq), &cfqd->serving_group->blkg, rq_data_dir(rq),
rq_is_sync(rq)); rq_is_sync(rq));
cfq_rq_enqueued(cfqd, cfqq, rq); cfq_rq_enqueued(cfqd, cfqq, rq);