blkcg: simplify stat reset

blkiocg_reset_stats() implements stat reset for blkio.reset_stats
cgroupfs file.  This feature is very unconventional and something
which shouldn't have been merged.  It's only useful when there's only
one user or tool looking at the stats.  As soon as multiple users
and/or tools are involved, it becomes useless as resetting disrupts
other usages.  There are very good reasons why all other stats expect
readers to read values at the start and end of a period and subtract
to determine delta over the period.

The implementation is rather complex - some fields shouldn't be
cleared and it saves some fields, resets whole and restores for some
reason.  Reset of percpu stats is also racy.  The comment points to
64bit store atomicity for the reason but even without that stores for
zero can simply race with other CPUs doing RMW and get clobbered.

Simplify reset by

* Clear selectively instead of resetting and restoring.

* Grouping debug stat fields to be reset and using memset() over them.

* Not caring about stats_lock.

* Using memset() to reset percpu stats.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Tejun Heo 2012-03-08 10:53:58 -08:00 committed by Jens Axboe
parent 5fe224d2d5
commit 997a026c80
2 changed files with 36 additions and 56 deletions

View file

@ -779,83 +779,53 @@ EXPORT_SYMBOL_GPL(__blkg_release);
static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid) static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
{ {
struct blkg_policy_data *pd = blkg->pd[plid]; struct blkg_policy_data *pd = blkg->pd[plid];
struct blkio_group_stats_cpu *stats_cpu; int cpu;
int i, j, k;
if (pd->stats_cpu == NULL) if (pd->stats_cpu == NULL)
return; return;
/*
* Note: On 64 bit arch this should not be an issue. This has the for_each_possible_cpu(cpu) {
* possibility of returning some inconsistent value on 32bit arch struct blkio_group_stats_cpu *sc =
* as 64bit update on 32bit is non atomic. Taking care of this per_cpu_ptr(pd->stats_cpu, cpu);
* corner case makes code very complicated, like sending IPIs to
* cpus, taking care of stats of offline cpus etc. sc->sectors = 0;
* memset(sc->stat_arr_cpu, 0, sizeof(sc->stat_arr_cpu));
* reset stats is anyway more of a debug feature and this sounds a
* corner case. So I am not complicating the code yet until and
* unless this becomes a real issue.
*/
for_each_possible_cpu(i) {
stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
stats_cpu->sectors = 0;
for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
for (k = 0; k < BLKIO_STAT_TOTAL; k++)
stats_cpu->stat_arr_cpu[j][k] = 0;
} }
} }
static int static int
blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
{ {
struct blkio_cgroup *blkcg; struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
struct blkio_group *blkg; struct blkio_group *blkg;
struct blkio_group_stats *stats;
struct hlist_node *n; struct hlist_node *n;
uint64_t queued[BLKIO_STAT_TOTAL];
int i; int i;
#ifdef CONFIG_DEBUG_BLK_CGROUP
bool idling, waiting, empty;
unsigned long long now = sched_clock();
#endif
blkcg = cgroup_to_blkio_cgroup(cgroup);
spin_lock(&blkio_list_lock); spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
/*
* Note that stat reset is racy - it doesn't synchronize against
* stat updates. This is a debug feature which shouldn't exist
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
struct blkio_policy_type *pol; struct blkio_policy_type *pol;
list_for_each_entry(pol, &blkio_list, list) { list_for_each_entry(pol, &blkio_list, list) {
struct blkg_policy_data *pd = blkg->pd[pol->plid]; struct blkg_policy_data *pd = blkg->pd[pol->plid];
struct blkio_group_stats *stats = &pd->stats;
spin_lock(&blkg->stats_lock); /* queued stats shouldn't be cleared */
stats = &pd->stats; for (i = 0; i < ARRAY_SIZE(stats->stat_arr); i++)
if (i != BLKIO_STAT_QUEUED)
memset(stats->stat_arr[i], 0,
sizeof(stats->stat_arr[i]));
stats->time = 0;
#ifdef CONFIG_DEBUG_BLK_CGROUP #ifdef CONFIG_DEBUG_BLK_CGROUP
idling = blkio_blkg_idling(stats); memset((void *)stats + BLKG_STATS_DEBUG_CLEAR_START, 0,
waiting = blkio_blkg_waiting(stats); BLKG_STATS_DEBUG_CLEAR_SIZE);
empty = blkio_blkg_empty(stats);
#endif #endif
for (i = 0; i < BLKIO_STAT_TOTAL; i++)
queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
memset(stats, 0, sizeof(struct blkio_group_stats));
for (i = 0; i < BLKIO_STAT_TOTAL; i++)
stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
#ifdef CONFIG_DEBUG_BLK_CGROUP
if (idling) {
blkio_mark_blkg_idling(stats);
stats->start_idle_time = now;
}
if (waiting) {
blkio_mark_blkg_waiting(stats);
stats->start_group_wait_time = now;
}
if (empty) {
blkio_mark_blkg_empty(stats);
stats->start_empty_time = now;
}
#endif
spin_unlock(&blkg->stats_lock);
/* Reset Per cpu stats which don't take blkg->stats_lock */
blkio_reset_stats_cpu(blkg, pol->plid); blkio_reset_stats_cpu(blkg, pol->plid);
} }
} }

View file

@ -131,21 +131,31 @@ struct blkio_group_stats {
/* Total time spent waiting for it to be assigned a timeslice. */ /* Total time spent waiting for it to be assigned a timeslice. */
uint64_t group_wait_time; uint64_t group_wait_time;
uint64_t start_group_wait_time;
/* Time spent idling for this blkio_group */ /* Time spent idling for this blkio_group */
uint64_t idle_time; uint64_t idle_time;
uint64_t start_idle_time;
/* /*
* Total time when we have requests queued and do not contain the * Total time when we have requests queued and do not contain the
* current active queue. * current active queue.
*/ */
uint64_t empty_time; uint64_t empty_time;
/* fields after this shouldn't be cleared on stat reset */
uint64_t start_group_wait_time;
uint64_t start_idle_time;
uint64_t start_empty_time; uint64_t start_empty_time;
uint16_t flags; uint16_t flags;
#endif #endif
}; };
#ifdef CONFIG_DEBUG_BLK_CGROUP
#define BLKG_STATS_DEBUG_CLEAR_START \
offsetof(struct blkio_group_stats, unaccounted_time)
#define BLKG_STATS_DEBUG_CLEAR_SIZE \
(offsetof(struct blkio_group_stats, start_group_wait_time) - \
BLKG_STATS_DEBUG_CLEAR_START)
#endif
/* Per cpu blkio group stats */ /* Per cpu blkio group stats */
struct blkio_group_stats_cpu { struct blkio_group_stats_cpu {
uint64_t sectors; uint64_t sectors;