ANDROID: uid_sys_stats: defer io stats calulation for dead tasks

Store sum of dead task io stats in uid_entry and defer uid io
calulation until next uid proc stat change or dumpsys.

Bug: 37754877
Change-Id: I970f010a4c841c5ca26d0efc7e027414c3c952e0
Signed-off-by: Jin Qian <jinqian@google.com>
This commit is contained in:
Jin Qian 2017-05-22 12:08:06 -07:00 committed by Amit Pundir
parent 825dfe6228
commit 87ff693534

View file

@ -50,7 +50,8 @@ struct io_stats {
#define UID_STATE_TOTAL_CURR 2
#define UID_STATE_TOTAL_LAST 3
#define UID_STATE_SIZE 4
#define UID_STATE_DEAD_TASKS 4
#define UID_STATE_SIZE 5
struct uid_entry {
uid_t uid;
@ -212,35 +213,44 @@ static u64 compute_write_bytes(struct task_struct *task)
return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
}
static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
struct task_struct *task)
static void add_uid_io_stats(struct uid_entry *uid_entry,
struct task_struct *task, int slot)
{
struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
struct io_stats *io_slot = &uid_entry->io[slot];
io_curr->read_bytes += task->ioac.read_bytes;
io_curr->write_bytes += compute_write_bytes(task);
io_curr->rchar += task->ioac.rchar;
io_curr->wchar += task->ioac.wchar;
io_curr->fsync += task->ioac.syscfs;
io_slot->read_bytes += task->ioac.read_bytes;
io_slot->write_bytes += compute_write_bytes(task);
io_slot->rchar += task->ioac.rchar;
io_slot->wchar += task->ioac.wchar;
io_slot->fsync += task->ioac.syscfs;
}
static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
struct task_struct *task)
static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
struct io_stats *io_curr,
struct io_stats *io_last,
struct io_stats *io_dead)
{
struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
io_last->read_bytes;
io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
io_last->write_bytes;
io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
io_last->read_bytes -= task->ioac.read_bytes;
io_last->write_bytes -= compute_write_bytes(task);
io_last->rchar -= task->ioac.rchar;
io_last->wchar -= task->ioac.wchar;
io_last->fsync -= task->ioac.syscfs;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
memset(io_dead, 0, sizeof(struct io_stats));
}
static void update_io_stats_all_locked(void)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
struct io_stats *io_bucket, *io_curr, *io_last;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
uid_t uid;
@ -255,70 +265,38 @@ static void update_io_stats_all_locked(void)
uid_entry = find_or_register_uid(uid);
if (!uid_entry)
continue;
add_uid_io_curr_stats(uid_entry, task);
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
io_bucket = &uid_entry->io[uid_entry->state];
io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
io_bucket->read_bytes +=
io_curr->read_bytes - io_last->read_bytes;
io_bucket->write_bytes +=
io_curr->write_bytes - io_last->write_bytes;
io_bucket->rchar += io_curr->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync - io_last->fsync;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
}
static void update_io_stats_uid_locked(uid_t target_uid)
static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
{
struct uid_entry *uid_entry;
struct task_struct *task, *temp;
struct io_stats *io_bucket, *io_curr, *io_last;
struct user_namespace *user_ns = current_user_ns();
uid_entry = find_or_register_uid(target_uid);
if (!uid_entry)
return;
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
rcu_read_lock();
do_each_thread(temp, task) {
if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
continue;
add_uid_io_curr_stats(uid_entry, task);
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
} while_each_thread(temp, task);
rcu_read_unlock();
io_bucket = &uid_entry->io[uid_entry->state];
io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
io_bucket->read_bytes +=
io_curr->read_bytes - io_last->read_bytes;
io_bucket->write_bytes +=
io_curr->write_bytes - io_last->write_bytes;
io_bucket->rchar += io_curr->rchar - io_last->rchar;
io_bucket->wchar += io_curr->wchar - io_last->wchar;
io_bucket->fsync += io_curr->fsync - io_last->fsync;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
io_last->rchar = io_curr->rchar;
io_last->wchar = io_curr->wchar;
io_last->fsync = io_curr->fsync;
compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
}
static int uid_io_show(struct seq_file *m, void *v)
@ -403,7 +381,7 @@ static ssize_t uid_procstat_write(struct file *file,
return count;
}
update_io_stats_uid_locked(uid);
update_io_stats_uid_locked(uid_entry);
uid_entry->state = state;
@ -441,8 +419,7 @@ static int process_notifier(struct notifier_block *self,
uid_entry->utime += utime;
uid_entry->stime += stime;
update_io_stats_uid_locked(uid);
clean_uid_io_last_stats(uid_entry, task);
add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);