Merge "sched/tune: Fix improper accounting of tasks"

This commit is contained in:
qctecmdr 2020-10-19 15:55:57 -07:00 committed by Gerrit - the friendly Code Review server
commit 33a5e16376
3 changed files with 79 additions and 89 deletions

View file

@ -885,6 +885,9 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
#ifdef CONFIG_SCHED_TUNE
int stune_idx;
#endif
struct sched_dl_entity dl;

View file

@ -93,6 +93,9 @@ struct task_struct init_task
#endif
#ifdef CONFIG_CGROUP_SCHED
.sched_task_group = &root_task_group,
#endif
#ifdef CONFIG_SCHED_TUNE
.stune_idx = 0,
#endif
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),

View file

@ -420,7 +420,6 @@ void schedtune_enqueue_task(struct task_struct *p, int cpu)
{
struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
unsigned long irq_flags;
struct schedtune *st;
int idx;
if (unlikely(!schedtune_initialized))
@ -432,90 +431,16 @@ void schedtune_enqueue_task(struct task_struct *p, int cpu)
* do_exit()::cgroup_exit() and task migration.
*/
raw_spin_lock_irqsave(&bg->lock, irq_flags);
rcu_read_lock();
st = task_schedtune(p);
idx = st->idx;
idx = p->stune_idx;
schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
rcu_read_unlock();
raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
}
int schedtune_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *css;
struct boost_groups *bg;
struct rq_flags rq_flags;
unsigned int cpu;
struct rq *rq;
int src_bg; /* Source boost group index */
int dst_bg; /* Destination boost group index */
int tasks;
u64 now;
if (unlikely(!schedtune_initialized))
return 0;
cgroup_taskset_for_each(task, css, tset) {
/*
* Lock the CPU's RQ the task is enqueued to avoid race
* conditions with migration code while the task is being
* accounted
*/
rq = task_rq_lock(task, &rq_flags);
if (!task->on_rq) {
task_rq_unlock(rq, task, &rq_flags);
continue;
}
/*
* Boost group accouting is protected by a per-cpu lock and requires
* interrupt to be disabled to avoid race conditions on...
*/
cpu = cpu_of(rq);
bg = &per_cpu(cpu_boost_groups, cpu);
raw_spin_lock(&bg->lock);
dst_bg = css_st(css)->idx;
src_bg = task_schedtune(task)->idx;
/*
* Current task is not changing boostgroup, which can
* happen when the new hierarchy is in use.
*/
if (unlikely(dst_bg == src_bg)) {
raw_spin_unlock(&bg->lock);
task_rq_unlock(rq, task, &rq_flags);
continue;
}
/*
* This is the case of a RUNNABLE task which is switching its
* current boost group.
*/
/* Move task from src to dst boost group */
tasks = bg->group[src_bg].tasks - 1;
bg->group[src_bg].tasks = max(0, tasks);
bg->group[dst_bg].tasks += 1;
/* Update boost hold start for this group */
now = sched_clock_cpu(cpu);
bg->group[dst_bg].ts = now;
/* Force boost group re-evaluation at next boost check */
bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS;
raw_spin_unlock(&bg->lock);
task_rq_unlock(rq, task, &rq_flags);
}
return 0;
}
@ -580,7 +505,6 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu)
{
struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
unsigned long irq_flags;
struct schedtune *st;
int idx;
if (unlikely(!schedtune_initialized))
@ -591,14 +515,11 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu)
* interrupt to be disabled to avoid race conditions on...
*/
raw_spin_lock_irqsave(&bg->lock, irq_flags);
rcu_read_lock();
st = task_schedtune(p);
idx = st->idx;
idx = p->stune_idx;
schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
rcu_read_unlock();
raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
}
@ -678,11 +599,19 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
return st->boost;
}
#ifdef CONFIG_SCHED_WALT
static void schedtune_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *css;
struct boost_groups *bg;
struct rq_flags rq_flags;
unsigned int cpu;
struct rq *rq;
int src_idx; /* Source boost group index */
int dst_idx; /* Destination boost group index */
int tasks;
u64 now;
#ifdef CONFIG_SCHED_WALT
struct schedtune *st;
bool colocate;
@ -693,13 +622,68 @@ static void schedtune_attach(struct cgroup_taskset *tset)
cgroup_taskset_for_each(task, css, tset)
sync_cgroup_colocation(task, colocate);
}
#else
static void schedtune_attach(struct cgroup_taskset *tset)
{
}
#endif
cgroup_taskset_for_each(task, css, tset) {
/*
* Lock the CPU's RQ the task is enqueued to avoid race
* conditions with migration code while the task is being
* accounted
*/
rq = task_rq_lock(task, &rq_flags);
/*
* Boost group accouting is protected by a per-cpu lock and
* requires interrupt to be disabled to avoid race conditions
* on...
*/
cpu = cpu_of(rq);
bg = &per_cpu(cpu_boost_groups, cpu);
raw_spin_lock(&bg->lock);
dst_idx = task_schedtune(task)->idx;
src_idx = task->stune_idx;
/*
* Current task is not changing boostgroup, which can
* happen when the new hierarchy is in use.
*/
if (unlikely(dst_idx == src_idx)) {
raw_spin_unlock(&bg->lock);
task_rq_unlock(rq, task, &rq_flags);
continue;
}
task->stune_idx = dst_idx;
if (!task_on_rq_queued(task)) {
raw_spin_unlock(&bg->lock);
task_rq_unlock(rq, task, &rq_flags);
continue;
}
/*
* This is the case of a RUNNABLE task which is switching its
* current boost group.
*/
/* Move task from src to dst boost group */
tasks = bg->group[src_idx].tasks - 1;
bg->group[src_idx].tasks = max(0, tasks);
bg->group[dst_idx].tasks += 1;
/* Update boost hold start for this group */
now = sched_clock_cpu(cpu);
bg->group[dst_idx].ts = now;
/* Force boost group re-evaluation at next boost check */
bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS;
raw_spin_unlock(&bg->lock);
task_rq_unlock(rq, task, &rq_flags);
}
}
static int
boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
s64 boost)
@ -832,8 +816,8 @@ struct cgroup_subsys schedtune_cgrp_subsys = {
.css_alloc = schedtune_css_alloc,
.css_free = schedtune_css_free,
.attach = schedtune_attach,
.can_attach = schedtune_can_attach,
.cancel_attach = schedtune_cancel_attach,
.can_attach = schedtune_can_attach,
.cancel_attach = schedtune_cancel_attach,
.legacy_cftypes = files,
.early_init = 1,
};