Merge "sched: Improve the scheduler"

This commit is contained in:
qctecmdr 2020-07-09 23:56:49 -07:00 committed by Gerrit - the friendly Code Review server
commit b13542950e
5 changed files with 76 additions and 5 deletions

View file

@ -1654,6 +1654,58 @@ static const struct file_operations proc_pid_sched_group_id_operations = {
.release = single_release,
};
static int sched_low_latency_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
struct task_struct *p;
bool low_latency;
p = get_proc_task(inode);
if (!p)
return -ESRCH;
low_latency = p->low_latency;
seq_printf(m, "%d\n", low_latency);
put_task_struct(p);
return 0;
}
static ssize_t
sched_low_latency_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
struct task_struct *p = get_proc_task(file_inode(file));
bool low_latency;
int err;
if (!p)
return -ESRCH;
err = kstrtobool_from_user(buf, count, &low_latency);
if (err)
goto out;
p->low_latency = low_latency;
out:
put_task_struct(p);
return err < 0 ? err : count;
}
static int sched_low_latency_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_low_latency_show, inode);
}
static const struct file_operations proc_pid_sched_low_latency_operations = {
.open = sched_low_latency_open,
.read = seq_read,
.write = sched_low_latency_write,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_SCHED_AUTOGROUP
@ -3326,6 +3378,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
REG("sched_boost", 0666, proc_task_boost_enabled_operations),
REG("sched_boost_period_ms", 0666, proc_task_boost_period_operations),
REG("sched_low_latency", 00666, proc_pid_sched_low_latency_operations),
#endif
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),

View file

@ -877,6 +877,7 @@ struct task_struct {
u64 cpu_cycles;
bool misfit;
u32 unfilter;
bool low_latency;
#endif
#ifdef CONFIG_CGROUP_SCHED

View file

@ -1243,6 +1243,7 @@ TRACE_EVENT(sched_task_util,
__field(int, start_cpu)
__field(u32, unfilter)
__field(unsigned long, cpus_allowed)
__field(bool, low_latency)
),
TP_fast_assign(
@ -1263,19 +1264,21 @@ TRACE_EVENT(sched_task_util,
__entry->start_cpu = start_cpu;
#ifdef CONFIG_SCHED_WALT
__entry->unfilter = p->unfilter;
__entry->low_latency = p->low_latency;
#else
__entry->unfilter = 0;
__entry->low_latency = 0;
#endif
__entry->cpus_allowed = cpumask_bits(&p->cpus_allowed)[0];
),
TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u affine=%#lx",
TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u affine=%#lx low_latency=%d`",
__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
__entry->candidates, __entry->best_energy_cpu, __entry->sync,
__entry->need_idle, __entry->fastpath, __entry->placement_boost,
__entry->latency, __entry->stune_boosted,
__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu,
__entry->unfilter, __entry->cpus_allowed)
__entry->unfilter, __entry->cpus_allowed, __entry->low_latency)
);
/*

View file

@ -2857,6 +2857,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->boost = 0;
p->boost_expires = 0;
p->boost_period = 0;
#ifdef CONFIG_SCHED_WALT
p->low_latency = 0;
#endif
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED

View file

@ -4125,9 +4125,20 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
thresh >>= 1;
vruntime -= thresh;
if (entity_is_task(se) && per_task_boost(task_of(se)) ==
if (entity_is_task(se)) {
if (per_task_boost(task_of(se)) ==
TASK_BOOST_STRICT_MAX)
vruntime -= sysctl_sched_latency;
#ifdef CONFIG_SCHED_WALT
else if (unlikely(task_of(se)->low_latency)) {
vruntime -= sysctl_sched_latency;
vruntime -= thresh;
se->vruntime = min_vruntime(vruntime,
se->vruntime);
return;
}
#endif
}
}
/* ensure we never gain time by being placed backwards. */