ANDROID: uid_cputime: add per-uid IO usage accounting
IO usages are accounted in foreground and background buckets. For each uid, io usage is calculated in two steps. delta = current total of all uid tasks - previus total current bucket += delta Bucket is determined by current uid stat. Userspace writes to /proc/uid_procstat/set <uid> <stat> when uid stat is updated. /proc/uid_io/stats shows IO usage in this format. <uid> <foreground IO> <background IO> Signed-off-by: Jin Qian <jinqian@google.com> Bug: 34198239 Change-Id: Ib8bebda53e7a56f45ea3eb0ec9a3153d44188102
This commit is contained in:
parent
35174db93b
commit
9b7e95b25f
1 changed files with 220 additions and 16 deletions
|
@ -30,7 +30,24 @@
|
|||
DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
|
||||
|
||||
static DEFINE_MUTEX(uid_lock);
|
||||
static struct proc_dir_entry *parent;
|
||||
static struct proc_dir_entry *cpu_parent;
|
||||
static struct proc_dir_entry *io_parent;
|
||||
static struct proc_dir_entry *proc_parent;
|
||||
|
||||
struct io_stats {
|
||||
u64 read_bytes;
|
||||
u64 write_bytes;
|
||||
u64 rchar;
|
||||
u64 wchar;
|
||||
};
|
||||
|
||||
#define UID_STATE_FOREGROUND 0
|
||||
#define UID_STATE_BACKGROUND 1
|
||||
#define UID_STATE_BUCKET_SIZE 2
|
||||
|
||||
#define UID_STATE_TOTAL_CURR 2
|
||||
#define UID_STATE_TOTAL_LAST 3
|
||||
#define UID_STATE_SIZE 4
|
||||
|
||||
struct uid_entry {
|
||||
uid_t uid;
|
||||
|
@ -38,6 +55,8 @@ struct uid_entry {
|
|||
u64 stime;
|
||||
u64 active_utime;
|
||||
u64 active_stime;
|
||||
int state;
|
||||
struct io_stats io[UID_STATE_SIZE];
|
||||
struct hlist_node hash;
|
||||
};
|
||||
|
||||
|
@ -70,7 +89,7 @@ static struct uid_entry *find_or_register_uid(uid_t uid)
|
|||
return uid_entry;
|
||||
}
|
||||
|
||||
static int uid_stat_show(struct seq_file *m, void *v)
|
||||
static int uid_cputime_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
struct task_struct *task, *temp;
|
||||
|
@ -116,13 +135,13 @@ static int uid_stat_show(struct seq_file *m, void *v)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int uid_stat_open(struct inode *inode, struct file *file)
|
||||
static int uid_cputime_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, uid_stat_show, PDE_DATA(inode));
|
||||
return single_open(file, uid_cputime_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations uid_stat_fops = {
|
||||
.open = uid_stat_open,
|
||||
static const struct file_operations uid_cputime_fops = {
|
||||
.open = uid_cputime_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
|
@ -181,6 +200,162 @@ static const struct file_operations uid_remove_fops = {
|
|||
.write = uid_remove_write,
|
||||
};
|
||||
|
||||
static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
|
||||
|
||||
io_curr->read_bytes += task->ioac.read_bytes;
|
||||
io_curr->write_bytes +=
|
||||
task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
|
||||
io_curr->rchar += task->ioac.rchar;
|
||||
io_curr->wchar += task->ioac.wchar;
|
||||
}
|
||||
|
||||
static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
|
||||
|
||||
io_last->read_bytes -= task->ioac.read_bytes;
|
||||
io_last->write_bytes -=
|
||||
task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
|
||||
io_last->rchar -= task->ioac.rchar;
|
||||
io_last->wchar -= task->ioac.wchar;
|
||||
}
|
||||
|
||||
static void update_io_stats_locked(void)
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
struct task_struct *task, *temp;
|
||||
struct io_stats *io_bucket, *io_curr, *io_last;
|
||||
unsigned long bkt;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&uid_lock));
|
||||
|
||||
hash_for_each(hash_table, bkt, uid_entry, hash)
|
||||
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
|
||||
sizeof(struct io_stats));
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(temp, task) {
|
||||
uid_entry = find_or_register_uid(from_kuid_munged(
|
||||
current_user_ns(), task_uid(task)));
|
||||
if (!uid_entry)
|
||||
continue;
|
||||
add_uid_io_curr_stats(uid_entry, task);
|
||||
} while_each_thread(temp, task);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
hash_for_each(hash_table, bkt, uid_entry, hash) {
|
||||
io_bucket = &uid_entry->io[uid_entry->state];
|
||||
io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
|
||||
io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
|
||||
|
||||
io_bucket->read_bytes +=
|
||||
io_curr->read_bytes - io_last->read_bytes;
|
||||
io_bucket->write_bytes +=
|
||||
io_curr->write_bytes - io_last->write_bytes;
|
||||
io_bucket->rchar += io_curr->rchar - io_last->rchar;
|
||||
io_bucket->wchar += io_curr->wchar - io_last->wchar;
|
||||
|
||||
io_last->read_bytes = io_curr->read_bytes;
|
||||
io_last->write_bytes = io_curr->write_bytes;
|
||||
io_last->rchar = io_curr->rchar;
|
||||
io_last->wchar = io_curr->wchar;
|
||||
}
|
||||
}
|
||||
|
||||
static int uid_io_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
unsigned long bkt;
|
||||
|
||||
mutex_lock(&uid_lock);
|
||||
|
||||
update_io_stats_locked();
|
||||
|
||||
hash_for_each(hash_table, bkt, uid_entry, hash) {
|
||||
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
|
||||
uid_entry->uid,
|
||||
uid_entry->io[UID_STATE_FOREGROUND].rchar,
|
||||
uid_entry->io[UID_STATE_FOREGROUND].wchar,
|
||||
uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
|
||||
uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
|
||||
uid_entry->io[UID_STATE_BACKGROUND].rchar,
|
||||
uid_entry->io[UID_STATE_BACKGROUND].wchar,
|
||||
uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
|
||||
uid_entry->io[UID_STATE_BACKGROUND].write_bytes);
|
||||
}
|
||||
|
||||
mutex_unlock(&uid_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uid_io_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, uid_io_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations uid_io_fops = {
|
||||
.open = uid_io_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int uid_procstat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, NULL, NULL);
|
||||
}
|
||||
|
||||
static ssize_t uid_procstat_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct uid_entry *uid_entry;
|
||||
uid_t uid;
|
||||
int argc, state;
|
||||
char input[128];
|
||||
|
||||
if (count >= sizeof(input))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(input, buffer, count))
|
||||
return -EFAULT;
|
||||
|
||||
input[count] = '\0';
|
||||
|
||||
argc = sscanf(input, "%u %d", &uid, &state);
|
||||
if (argc != 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&uid_lock);
|
||||
|
||||
uid_entry = find_or_register_uid(uid);
|
||||
if (!uid_entry || uid_entry->state == state) {
|
||||
mutex_unlock(&uid_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
update_io_stats_locked();
|
||||
|
||||
uid_entry->state = state;
|
||||
|
||||
mutex_unlock(&uid_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations uid_procstat_fops = {
|
||||
.open = uid_procstat_open,
|
||||
.release = single_release,
|
||||
.write = uid_procstat_write,
|
||||
};
|
||||
|
||||
static int process_notifier(struct notifier_block *self,
|
||||
unsigned long cmd, void *v)
|
||||
{
|
||||
|
@ -204,6 +379,9 @@ static int process_notifier(struct notifier_block *self,
|
|||
uid_entry->utime += utime;
|
||||
uid_entry->stime += stime;
|
||||
|
||||
update_io_stats_locked();
|
||||
clean_uid_io_last_stats(uid_entry, task);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&uid_lock);
|
||||
return NOTIFY_OK;
|
||||
|
@ -213,25 +391,51 @@ static struct notifier_block process_notifier_block = {
|
|||
.notifier_call = process_notifier,
|
||||
};
|
||||
|
||||
static int __init proc_uid_cputime_init(void)
|
||||
static int __init proc_uid_sys_stats_init(void)
|
||||
{
|
||||
hash_init(hash_table);
|
||||
|
||||
parent = proc_mkdir("uid_cputime", NULL);
|
||||
if (!parent) {
|
||||
pr_err("%s: failed to create proc entry\n", __func__);
|
||||
return -ENOMEM;
|
||||
cpu_parent = proc_mkdir("uid_cputime", NULL);
|
||||
if (!cpu_parent) {
|
||||
pr_err("%s: failed to create uid_cputime proc entry\n",
|
||||
__func__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
|
||||
NULL);
|
||||
proc_create_data("remove_uid_range", 0222, cpu_parent,
|
||||
&uid_remove_fops, NULL);
|
||||
proc_create_data("show_uid_stat", 0444, cpu_parent,
|
||||
&uid_cputime_fops, NULL);
|
||||
|
||||
proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
|
||||
NULL);
|
||||
io_parent = proc_mkdir("uid_io", NULL);
|
||||
if (!io_parent) {
|
||||
pr_err("%s: failed to create uid_io proc entry\n",
|
||||
__func__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
proc_create_data("stats", 0444, io_parent,
|
||||
&uid_io_fops, NULL);
|
||||
|
||||
proc_parent = proc_mkdir("uid_procstat", NULL);
|
||||
if (!proc_parent) {
|
||||
pr_err("%s: failed to create uid_procstat proc entry\n",
|
||||
__func__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
proc_create_data("set", 0222, proc_parent,
|
||||
&uid_procstat_fops, NULL);
|
||||
|
||||
profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
remove_proc_subtree("uid_cputime", NULL);
|
||||
remove_proc_subtree("uid_io", NULL);
|
||||
remove_proc_subtree("uid_procstat", NULL);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
early_initcall(proc_uid_cputime_init);
|
||||
early_initcall(proc_uid_sys_stats_init);
|
||||
|
|
Loading…
Add table
Reference in a new issue