softirq: defer softirq processing to ksoftirqd if CPU is busy with RT

Defer the softirq processing to ksoftirqd if a RT task is running
or queued on the current CPU. This complements the RT task placement
algorithm which tries to find a CPU that is not currently busy with
softirqs.

Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only deferred
as they can potentially run for long time.

Change-Id: Id7665244af6bbd5a96d9e591cf26154e9eaa860c
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: trivial merge conflict resolution.]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Pavankumar Kondeti 2017-06-28 12:00:31 +05:30 committed by Satya Durga Srinivasu Prabhala
parent ad81ed5c7d
commit dc34fdcc49
3 changed files with 20 additions and 1 deletions

View file

@ -1659,6 +1659,7 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern bool cpupri_check_rt(void);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@ -1669,6 +1670,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
return -EINVAL;
return 0;
}
static inline bool cpupri_check_rt(void)
{
return false;
}
#endif
#ifndef cpu_relax_yield

View file

@ -272,3 +272,14 @@ void cpupri_cleanup(struct cpupri *cp)
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
free_cpumask_var(cp->pri_to_cpu[i].mask);
}
/*
* cpupri_check_rt - check if CPU has a RT task
* should be called from rcu-sched read section.
*/
bool cpupri_check_rt(void)
{
int cpu = raw_smp_processor_id();
return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
}

View file

@ -253,6 +253,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK)
#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt())
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@ -316,6 +318,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending = local_softirq_pending();
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
!defer_for_rt() &&
--max_restart)
goto restart;
@ -371,7 +374,7 @@ static inline void invoke_softirq(void)
if (ksoftirqd_running(local_softirq_pending()))
return;
if (!force_irqthreads) {
if (!force_irqthreads && !defer_for_rt()) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if