summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorVikram Mulukutla <markivx@codeaurora.org>2016-11-08 15:21:41 -0800
committerVikram Mulukutla <markivx@codeaurora.org>2016-11-09 15:57:24 -0800
commit4142e30898e344a7b86782821ca200ca7d97ff76 (patch)
treea1f03bf36b8df714c633a437e92c46fa68662a3a /kernel
parent85d7e134cc5d95dfd3a1a5ee5a1d1435633288cd (diff)
timer: Don't wait for running timers when migrating during isolation
A CPU that is isolated needs to have its timers migrated off to another CPU. If while migrating timers, there is a running timer, acquiring the timer base lock after marking a CPU as isolated will ensure that: 1) No more timers can be queued on to the isolated CPU, and 2) A running timer will finish execution on the to-be-isolated CPU, and so will any just expired timers since they're all taken off of the CPU's tvec1 in one go while the base lock is held. Therefore there is no apparent reason to wait for the expired timers to finish execution, and isolation can proceed to migrate non-expired timers even when the expired ones are running concurrently. While we're here, also add a delay to the wait-loop inside migrate_hrtimer_list to allow for store-exclusive fairness when run_hrtimer is attempting to grab the hrtimer base lock. Change-Id: Ib697476c93c60e3d213aaa8fff0a2bcc2985bfce Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/hrtimer.c7
-rw-r--r--kernel/time/timer.c24
2 files changed, 17 insertions, 14 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 18d2fe271cf7..c3914e8f87b0 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,6 +49,7 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
@@ -1648,6 +1649,12 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
cpu_relax();
+ /*
+ * cpu_relax may just be a barrier. Grant the
+ * run_hrtimer_list code some time to obtain the
+ * spinlock.
+ */
+ udelay(2);
raw_spin_lock(&new_base->lock);
raw_spin_lock_nested(&old_base->lock,
SINGLE_DEPTH_NESTING);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 748eefb72a91..5ebefc7cfa4f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1640,7 +1640,7 @@ static void migrate_timer_list(struct tvec_base *new_base,
}
}
-static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
+static void __migrate_timers(int cpu, bool remove_pinned)
{
struct tvec_base *old_base;
struct tvec_base *new_base;
@@ -1656,18 +1656,14 @@ static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
spin_lock_irqsave(&new_base->lock, flags);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- if (wait) {
- /* Ensure timers are done running before continuing */
- while (old_base->running_timer) {
- spin_unlock(&old_base->lock);
- spin_unlock_irqrestore(&new_base->lock, flags);
- cpu_relax();
- spin_lock_irqsave(&new_base->lock, flags);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- }
- } else {
+ /*
+ * If we're in the hotplug path, kill the system if there's a running
+ * timer. It's ok to have a running timer in the isolation case - the
+ * currently running or just expired timers are off of the timer wheel
+ * and so everything else can be migrated off.
+ */
+ if (!cpu_online(cpu))
BUG_ON(old_base->running_timer);
- }
for (i = 0; i < TVR_SIZE; i++)
migrate_timer_list(new_base, old_base->tv1.vec + i,
@@ -1692,12 +1688,12 @@ static void __migrate_timers(int cpu, bool wait, bool remove_pinned)
static void migrate_timers(int cpu)
{
BUG_ON(cpu_online(cpu));
- __migrate_timers(cpu, false, true);
+ __migrate_timers(cpu, true);
}
void timer_quiesce_cpu(void *cpup)
{
- __migrate_timers(*(int *)cpup, true, false);
+ __migrate_timers(*(int *)cpup, false);
}
static int timer_cpu_notify(struct notifier_block *self,