summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2017-06-28 12:00:31 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-08-07 16:08:23 +0530
commita66b3eb5aff1536bc32e5e5eb60393610a21dd06 (patch)
tree863007cd12e7f421e6854355d0ffd8b244deadf5
parentca652b3d74c674ee4847ce56199889fd247106e6 (diff)
softirq: defer softirq processing to ksoftirqd if CPU is busy with RT
Defer the softirq processing to ksoftirqd if a RT task is running or queued on the current CPU. This complements the RT task placement algorithm which tries to find a CPU that is not currently busy with softirqs. Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only deferred as they can potentially run for long time. Change-Id: Id7665244af6bbd5a96d9e591cf26154e9eaa860c Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/sched/cpupri.c11
-rw-r--r--kernel/softirq.c5
3 files changed, 20 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 57042d91ae9c..baf107821d9a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2481,6 +2481,7 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+extern bool cpupri_check_rt(void);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2493,6 +2494,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
+static inline bool cpupri_check_rt(void)
+{
+ return false;
+}
#endif
struct sched_load {
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1d00cf8c00fa..14225d5d8617 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -279,3 +279,14 @@ void cpupri_cleanup(struct cpupri *cp)
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
free_cpumask_var(cp->pri_to_cpu[i].mask);
}
+
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 39ffd41594ce..9029227e5f57 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -234,6 +234,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
+#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK)
+#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt())
asmlinkage __visible void __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -297,6 +299,7 @@ restart:
pending = local_softirq_pending();
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
+ !defer_for_rt() &&
--max_restart)
goto restart;
@@ -349,7 +352,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
- if (!force_irqthreads) {
+ if (!force_irqthreads && !defer_for_rt()) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if