Merge "sched: Improve the scheduler"
This commit is contained in:
commit
b13542950e
5 changed files with 76 additions and 5 deletions
|
@ -1654,6 +1654,58 @@ static const struct file_operations proc_pid_sched_group_id_operations = {
|
||||||
.release = single_release,
|
.release = single_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int sched_low_latency_show(struct seq_file *m, void *v)
|
||||||
|
{
|
||||||
|
struct inode *inode = m->private;
|
||||||
|
struct task_struct *p;
|
||||||
|
bool low_latency;
|
||||||
|
|
||||||
|
p = get_proc_task(inode);
|
||||||
|
if (!p)
|
||||||
|
return -ESRCH;
|
||||||
|
|
||||||
|
low_latency = p->low_latency;
|
||||||
|
seq_printf(m, "%d\n", low_latency);
|
||||||
|
|
||||||
|
put_task_struct(p);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
sched_low_latency_write(struct file *file, const char __user *buf,
|
||||||
|
size_t count, loff_t *offset)
|
||||||
|
{
|
||||||
|
struct task_struct *p = get_proc_task(file_inode(file));
|
||||||
|
bool low_latency;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!p)
|
||||||
|
return -ESRCH;
|
||||||
|
|
||||||
|
err = kstrtobool_from_user(buf, count, &low_latency);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
p->low_latency = low_latency;
|
||||||
|
out:
|
||||||
|
put_task_struct(p);
|
||||||
|
return err < 0 ? err : count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sched_low_latency_open(struct inode *inode, struct file *filp)
|
||||||
|
{
|
||||||
|
return single_open(filp, sched_low_latency_show, inode);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations proc_pid_sched_low_latency_operations = {
|
||||||
|
.open = sched_low_latency_open,
|
||||||
|
.read = seq_read,
|
||||||
|
.write = sched_low_latency_write,
|
||||||
|
.llseek = seq_lseek,
|
||||||
|
.release = single_release,
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* CONFIG_SCHED_WALT */
|
#endif /* CONFIG_SCHED_WALT */
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||||
|
@ -3326,6 +3378,7 @@ static const struct pid_entry tgid_base_stuff[] = {
|
||||||
REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
|
REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
|
||||||
REG("sched_boost", 0666, proc_task_boost_enabled_operations),
|
REG("sched_boost", 0666, proc_task_boost_enabled_operations),
|
||||||
REG("sched_boost_period_ms", 0666, proc_task_boost_period_operations),
|
REG("sched_boost_period_ms", 0666, proc_task_boost_period_operations),
|
||||||
|
REG("sched_low_latency", 00666, proc_pid_sched_low_latency_operations),
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SCHED_DEBUG
|
#ifdef CONFIG_SCHED_DEBUG
|
||||||
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
|
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
|
||||||
|
|
|
@ -877,6 +877,7 @@ struct task_struct {
|
||||||
u64 cpu_cycles;
|
u64 cpu_cycles;
|
||||||
bool misfit;
|
bool misfit;
|
||||||
u32 unfilter;
|
u32 unfilter;
|
||||||
|
bool low_latency;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_SCHED
|
#ifdef CONFIG_CGROUP_SCHED
|
||||||
|
|
|
@ -1243,6 +1243,7 @@ TRACE_EVENT(sched_task_util,
|
||||||
__field(int, start_cpu)
|
__field(int, start_cpu)
|
||||||
__field(u32, unfilter)
|
__field(u32, unfilter)
|
||||||
__field(unsigned long, cpus_allowed)
|
__field(unsigned long, cpus_allowed)
|
||||||
|
__field(bool, low_latency)
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
|
@ -1263,19 +1264,21 @@ TRACE_EVENT(sched_task_util,
|
||||||
__entry->start_cpu = start_cpu;
|
__entry->start_cpu = start_cpu;
|
||||||
#ifdef CONFIG_SCHED_WALT
|
#ifdef CONFIG_SCHED_WALT
|
||||||
__entry->unfilter = p->unfilter;
|
__entry->unfilter = p->unfilter;
|
||||||
|
__entry->low_latency = p->low_latency;
|
||||||
#else
|
#else
|
||||||
__entry->unfilter = 0;
|
__entry->unfilter = 0;
|
||||||
|
__entry->low_latency = 0;
|
||||||
#endif
|
#endif
|
||||||
__entry->cpus_allowed = cpumask_bits(&p->cpus_allowed)[0];
|
__entry->cpus_allowed = cpumask_bits(&p->cpus_allowed)[0];
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u affine=%#lx",
|
TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%u affine=%#lx low_latency=%d`",
|
||||||
__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
|
__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
|
||||||
__entry->candidates, __entry->best_energy_cpu, __entry->sync,
|
__entry->candidates, __entry->best_energy_cpu, __entry->sync,
|
||||||
__entry->need_idle, __entry->fastpath, __entry->placement_boost,
|
__entry->need_idle, __entry->fastpath, __entry->placement_boost,
|
||||||
__entry->latency, __entry->stune_boosted,
|
__entry->latency, __entry->stune_boosted,
|
||||||
__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu,
|
__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu,
|
||||||
__entry->unfilter, __entry->cpus_allowed)
|
__entry->unfilter, __entry->cpus_allowed, __entry->low_latency)
|
||||||
);
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2857,6 +2857,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||||
p->boost = 0;
|
p->boost = 0;
|
||||||
p->boost_expires = 0;
|
p->boost_expires = 0;
|
||||||
p->boost_period = 0;
|
p->boost_period = 0;
|
||||||
|
#ifdef CONFIG_SCHED_WALT
|
||||||
|
p->low_latency = 0;
|
||||||
|
#endif
|
||||||
INIT_LIST_HEAD(&p->se.group_node);
|
INIT_LIST_HEAD(&p->se.group_node);
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
|
|
@ -4125,9 +4125,20 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||||
thresh >>= 1;
|
thresh >>= 1;
|
||||||
|
|
||||||
vruntime -= thresh;
|
vruntime -= thresh;
|
||||||
if (entity_is_task(se) && per_task_boost(task_of(se)) ==
|
if (entity_is_task(se)) {
|
||||||
TASK_BOOST_STRICT_MAX)
|
if (per_task_boost(task_of(se)) ==
|
||||||
vruntime -= sysctl_sched_latency;
|
TASK_BOOST_STRICT_MAX)
|
||||||
|
vruntime -= sysctl_sched_latency;
|
||||||
|
#ifdef CONFIG_SCHED_WALT
|
||||||
|
else if (unlikely(task_of(se)->low_latency)) {
|
||||||
|
vruntime -= sysctl_sched_latency;
|
||||||
|
vruntime -= thresh;
|
||||||
|
se->vruntime = min_vruntime(vruntime,
|
||||||
|
se->vruntime);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ensure we never gain time by being placed backwards. */
|
/* ensure we never gain time by being placed backwards. */
|
||||||
|
|
Loading…
Reference in a new issue