summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-08-06 16:50:39 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-10-07 16:01:12 -0700
commit6587a23b6b9bdb47205ec96c703e5bf8a2d39701 (patch)
treec5ac61d861ea8b1b6e3c4708c1db2f1f551f9b1a /kernel/rcu/tree.c
parent5b74c458906fc4a62f932ee8bb801d29baf15fec (diff)
rcu: Switch synchronize_sched_expedited() to IPI
This commit switches synchronize_sched_expedited() from stop_one_cpu_nowait() to smp_call_function_single(), thus moving from an IPI and a pair of context switches to an IPI and a single pass through the scheduler. Of course, if the scheduler actually does decide to switch to a different task, there will still be a pair of context switches, but there would likely have been a pair of context switches anyway, just a bit later. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3e2875b38eae..a42168911941 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -161,6 +161,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+static void rcu_report_exp_rdp(struct rcu_state *rsp,
+ struct rcu_data *rdp, bool wake);
/* rcuc/rcub kthread realtime priority */
#ifdef CONFIG_RCU_KTHREAD_PRIO
@@ -250,6 +252,12 @@ void rcu_sched_qs(void)
__this_cpu_read(rcu_sched_data.gpnum),
TPS("cpuqs"));
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
+ if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
+ rcu_report_exp_rdp(&rcu_sched_state,
+ this_cpu_ptr(&rcu_sched_data),
+ true);
+ }
}
}
@@ -3555,8 +3563,8 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
* Report expedited quiescent state for specified rcu_data (CPU).
* Caller must hold the root rcu_node's exp_funnel_mutex.
*/
-static void __maybe_unused rcu_report_exp_rdp(struct rcu_state *rsp,
- struct rcu_data *rdp, bool wake)
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
+ bool wake)
{
rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
}
@@ -3637,14 +3645,10 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
}
/* Invoked on each online non-idle CPU for expedited quiescent state. */
-static int synchronize_sched_expedited_cpu_stop(void *data)
+static void synchronize_sched_expedited_cpu_stop(void *data)
{
- struct rcu_data *rdp = data;
- struct rcu_state *rsp = rdp->rsp;
-
- /* Report the quiescent state. */
- rcu_report_exp_rdp(rsp, rdp, true);
- return 0;
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
+ resched_cpu(smp_processor_id());
}
/*
@@ -3659,6 +3663,7 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
unsigned long mask_ofl_test;
unsigned long mask_ofl_ipi;
struct rcu_data *rdp;
+ int ret;
struct rcu_node *rnp;
sync_exp_reset_tree(rsp);
@@ -3694,9 +3699,9 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
if (!(mask_ofl_ipi & mask))
continue;
rdp = per_cpu_ptr(rsp->rda, cpu);
- stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
- rdp, &rdp->exp_stop_work);
- mask_ofl_ipi &= ~mask;
+ ret = smp_call_function_single(cpu, synchronize_sched_expedited_cpu_stop, NULL, 0);
+ if (!ret)
+ mask_ofl_ipi &= ~mask;
}
/* Report quiescent states for those that went offline. */
mask_ofl_test |= mask_ofl_ipi;
@@ -4201,6 +4206,9 @@ int rcu_cpu_notify(struct notifier_block *self,
rcu_cleanup_dying_cpu(rsp);
break;
case CPU_DYING_IDLE:
+ /* QS for any half-done expedited RCU-sched GP. */
+ rcu_sched_qs();
+
for_each_rcu_flavor(rsp) {
rcu_cleanup_dying_idle_cpu(cpu, rsp);
}