ANDROID: cpufreq: times: track per-uid time in state

Add /proc/uid_time_in_state showing per uid/frequency/cluster
times. Allow uid removal through /proc/uid_cputime/remove_uid_range.

Signed-off-by: Connor O'Brien <connoro@google.com>
Bug: 72339335
Test: Read /proc/uid_time_in_state
Change-Id: I20ba3546a27c25b7e7991e2a86986e158aafa58c
This commit is contained in:
Connor O'Brien 2018-02-06 13:30:27 -08:00 committed by Amit Pundir
parent 07c734ef5c
commit 24236cbad2
3 changed files with 216 additions and 0 deletions

View file

@ -15,14 +15,30 @@
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpufreq_times.h> #include <linux/cpufreq_times.h>
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/proc_fs.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/threads.h> #include <linux/threads.h>
#define UID_HASH_BITS 10
static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
struct uid_entry {
uid_t uid;
unsigned int max_state;
struct hlist_node hash;
struct rcu_head rcu;
u64 time_in_state[0];
};
/** /**
* struct cpu_freqs - per-cpu frequency information * struct cpu_freqs - per-cpu frequency information
@ -42,6 +58,137 @@ static struct cpu_freqs *all_freqs[NR_CPUS];
static unsigned int next_offset; static unsigned int next_offset;
/* Caller must hold uid lock */
static struct uid_entry *find_uid_entry_locked(uid_t uid)
{
struct uid_entry *uid_entry;
hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
if (uid_entry->uid == uid)
return uid_entry;
}
return NULL;
}
/* Caller must hold uid lock */
static struct uid_entry *find_or_register_uid_locked(uid_t uid)
{
struct uid_entry *uid_entry, *temp;
unsigned int max_state = READ_ONCE(next_offset);
size_t alloc_size = sizeof(*uid_entry) + max_state *
sizeof(uid_entry->time_in_state[0]);
uid_entry = find_uid_entry_locked(uid);
if (uid_entry) {
if (uid_entry->max_state == max_state)
return uid_entry;
/* uid_entry->time_in_state is too small to track all freqs, so
* expand it.
*/
temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC);
if (!temp)
return uid_entry;
temp->max_state = max_state;
memset(temp->time_in_state + uid_entry->max_state, 0,
(max_state - uid_entry->max_state) *
sizeof(uid_entry->time_in_state[0]));
if (temp != uid_entry) {
hlist_replace_rcu(&uid_entry->hash, &temp->hash);
kfree_rcu(uid_entry, rcu);
}
return temp;
}
uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
if (!uid_entry)
return NULL;
uid_entry->uid = uid;
uid_entry->max_state = max_state;
hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
return uid_entry;
}
static bool freq_index_invalid(unsigned int index)
{
unsigned int cpu;
struct cpu_freqs *freqs;
for_each_possible_cpu(cpu) {
freqs = all_freqs[cpu];
if (!freqs || index < freqs->offset ||
freqs->offset + freqs->max_state <= index)
continue;
return freqs->freq_table[index - freqs->offset] ==
CPUFREQ_ENTRY_INVALID;
}
return true;
}
static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos >= HASH_SIZE(uid_hash_table))
return NULL;
return &uid_hash_table[*pos];
}
static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
if (*pos >= HASH_SIZE(uid_hash_table))
return NULL;
return &uid_hash_table[*pos];
}
static void uid_seq_stop(struct seq_file *seq, void *v) { }
static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
struct cpu_freqs *freqs, *last_freqs = NULL;
int i, cpu;
if (v == uid_hash_table) {
seq_puts(m, "uid:");
for_each_possible_cpu(cpu) {
freqs = all_freqs[cpu];
if (!freqs || freqs == last_freqs)
continue;
last_freqs = freqs;
for (i = 0; i < freqs->max_state; i++) {
if (freqs->freq_table[i] ==
CPUFREQ_ENTRY_INVALID)
continue;
seq_printf(m, " %d", freqs->freq_table[i]);
}
}
seq_putc(m, '\n');
}
rcu_read_lock();
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
if (uid_entry->max_state)
seq_printf(m, "%d:", uid_entry->uid);
for (i = 0; i < uid_entry->max_state; ++i) {
if (freq_index_invalid(i))
continue;
seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
uid_entry->time_in_state[i]));
}
if (uid_entry->max_state)
seq_putc(m, '\n');
}
rcu_read_unlock();
return 0;
}
void cpufreq_task_times_init(struct task_struct *p) void cpufreq_task_times_init(struct task_struct *p)
{ {
void *temp; void *temp;
@ -87,6 +234,9 @@ void cpufreq_task_times_exit(struct task_struct *p)
unsigned long flags; unsigned long flags;
void *temp; void *temp;
if (!p->time_in_state)
return;
spin_lock_irqsave(&task_time_in_state_lock, flags); spin_lock_irqsave(&task_time_in_state_lock, flags);
temp = p->time_in_state; temp = p->time_in_state;
p->time_in_state = NULL; p->time_in_state = NULL;
@ -130,7 +280,9 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
{ {
unsigned long flags; unsigned long flags;
unsigned int state; unsigned int state;
struct uid_entry *uid_entry;
struct cpu_freqs *freqs = all_freqs[task_cpu(p)]; struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
if (!freqs || p->flags & PF_EXITING) if (!freqs || p->flags & PF_EXITING)
return; return;
@ -142,6 +294,12 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
p->time_in_state) p->time_in_state)
p->time_in_state[state] += cputime; p->time_in_state[state] += cputime;
spin_unlock_irqrestore(&task_time_in_state_lock, flags); spin_unlock_irqrestore(&task_time_in_state_lock, flags);
spin_lock_irqsave(&uid_lock, flags);
uid_entry = find_or_register_uid_locked(uid);
if (uid_entry && state < uid_entry->max_state)
uid_entry->time_in_state[state] += cputime;
spin_unlock_irqrestore(&uid_lock, flags);
} }
void cpufreq_times_create_policy(struct cpufreq_policy *policy) void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@ -183,6 +341,27 @@ void cpufreq_times_create_policy(struct cpufreq_policy *policy)
all_freqs[cpu] = freqs; all_freqs[cpu] = freqs;
} }
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
{
struct uid_entry *uid_entry;
struct hlist_node *tmp;
unsigned long flags;
spin_lock_irqsave(&uid_lock, flags);
for (; uid_start <= uid_end; uid_start++) {
hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
hash, uid_start) {
if (uid_start == uid_entry->uid) {
hash_del_rcu(&uid_entry->hash);
kfree_rcu(uid_entry, rcu);
}
}
}
spin_unlock_irqrestore(&uid_lock, flags);
}
void cpufreq_times_record_transition(struct cpufreq_freqs *freq) void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
{ {
int index; int index;
@ -202,3 +381,32 @@ void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
} }
static const struct seq_operations uid_time_in_state_seq_ops = {
.start = uid_seq_start,
.next = uid_seq_next,
.stop = uid_seq_stop,
.show = uid_time_in_state_seq_show,
};
static int uid_time_in_state_open(struct inode *inode, struct file *file)
{
return seq_open(file, &uid_time_in_state_seq_ops);
}
static const struct file_operations uid_time_in_state_fops = {
.open = uid_time_in_state_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init cpufreq_times_init(void)
{
proc_create_data("uid_time_in_state", 0444, NULL,
&uid_time_in_state_fops, NULL);
return 0;
}
early_initcall(cpufreq_times_init);

View file

@ -14,6 +14,7 @@
*/ */
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/cpufreq_times.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/init.h> #include <linux/init.h>
@ -419,6 +420,10 @@ static ssize_t uid_remove_write(struct file *file,
kstrtol(end_uid, 10, &uid_end) != 0) { kstrtol(end_uid, 10, &uid_end) != 0) {
return -EINVAL; return -EINVAL;
} }
/* Also remove uids from /proc/uid_time_in_state */
cpufreq_task_times_remove_uids(uid_start, uid_end);
rt_mutex_lock(&uid_lock); rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) { for (; uid_start <= uid_end; uid_start++) {

View file

@ -27,9 +27,12 @@ int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
void cpufreq_acct_update_power(struct task_struct *p, u64 cputime); void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
void cpufreq_times_create_policy(struct cpufreq_policy *policy); void cpufreq_times_create_policy(struct cpufreq_policy *policy);
void cpufreq_times_record_transition(struct cpufreq_freqs *freq); void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
#else #else
static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {} static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
static inline void cpufreq_times_record_transition( static inline void cpufreq_times_record_transition(
struct cpufreq_freqs *freq) {} struct cpufreq_freqs *freq) {}
static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
uid_t uid_end) {}
#endif /* CONFIG_CPU_FREQ_TIMES */ #endif /* CONFIG_CPU_FREQ_TIMES */
#endif /* _LINUX_CPUFREQ_TIMES_H */ #endif /* _LINUX_CPUFREQ_TIMES_H */