From dc34fdcc49bd64e18e967918b86a0f12bde5cd00 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Wed, 28 Jun 2017 12:00:31 +0530 Subject: [PATCH] softirq: defer softirq processing to ksoftirqd if CPU is busy with RT Defer the softirq processing to ksoftirqd if a RT task is running or queued on the current CPU. This complements the RT task placement algorithm which tries to find a CPU that is not currently busy with softirqs. Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only deferred as they can potentially run for long time. Change-Id: Id7665244af6bbd5a96d9e591cf26154e9eaa860c Signed-off-by: Pavankumar Kondeti [satyap@codeaurora.org: trivial merge conflict resolution.] Signed-off-by: Satya Durga Srinivasu Prabhala --- include/linux/sched.h | 5 +++++ kernel/sched/cpupri.c | 11 +++++++++++ kernel/softirq.c | 5 ++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index a29a50521c77..fee79824c175 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1659,6 +1659,7 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_ #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +extern bool cpupri_check_rt(void); #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -1669,6 +1670,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma return -EINVAL; return 0; } +static inline bool cpupri_check_rt(void) +{ + return false; +} #endif #ifndef cpu_relax_yield diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 4857566d28b5..8ad7003de0c9 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -272,3 +272,14 @@ void cpupri_cleanup(struct cpupri *cp) for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) free_cpumask_var(cp->pri_to_cpu[i].mask); } + +/* + * cpupri_check_rt - check if CPU has a RT task + * should be called from rcu-sched read section. + */ +bool cpupri_check_rt(void) +{ + int cpu = raw_smp_processor_id(); + + return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL; +} diff --git a/kernel/softirq.c b/kernel/softirq.c index 534164738ad9..6cea74fa24f3 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -253,6 +253,8 @@ static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } #endif +#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK) +#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt()) asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned long end = jiffies + MAX_SOFTIRQ_TIME; @@ -316,6 +318,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) pending = local_softirq_pending(); if (pending) { if (time_before(jiffies, end) && !need_resched() && + !defer_for_rt() && --max_restart) goto restart; @@ -371,7 +374,7 @@ static inline void invoke_softirq(void) if (ksoftirqd_running(local_softirq_pending())) return; - if (!force_irqthreads) { + if (!force_irqthreads && !defer_for_rt()) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* * We can safely execute softirq on the current stack if