diff options
author | Linux Build Service Account <lnxbuild@localhost> | 2016-11-10 22:49:41 -0800 |
---|---|---|
committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2016-11-10 22:49:40 -0800 |
commit | 1787801211de1e4a299feccd5f401077f5b05264 (patch) | |
tree | 6ef5f7c7863548588dd707159d10590216cf823e /kernel | |
parent | befd24230391166ccdf67e0e394e35dd7bf04857 (diff) | |
parent | 4142e30898e344a7b86782821ca200ca7d97ff76 (diff) |
Merge "timer: Don't wait for running timers when migrating during isolation"
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time/hrtimer.c | 7 | ||||
-rw-r--r-- | kernel/time/timer.c | 24 |
2 files changed, 17 insertions, 14 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 18d2fe271cf7..c3914e8f87b0 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -49,6 +49,7 @@ #include <linux/sched/deadline.h> #include <linux/timer.h> #include <linux/freezer.h> +#include <linux/delay.h> #include <asm/uaccess.h> @@ -1648,6 +1649,12 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base, raw_spin_unlock(&old_base->lock); raw_spin_unlock(&new_base->lock); cpu_relax(); + /* + * cpu_relax may just be a barrier. Grant the + * run_hrtimer_list code some time to obtain the + * spinlock. + */ + udelay(2); raw_spin_lock(&new_base->lock); raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 748eefb72a91..5ebefc7cfa4f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1640,7 +1640,7 @@ static void migrate_timer_list(struct tvec_base *new_base, } } -static void __migrate_timers(int cpu, bool wait, bool remove_pinned) +static void __migrate_timers(int cpu, bool remove_pinned) { struct tvec_base *old_base; struct tvec_base *new_base; @@ -1656,18 +1656,14 @@ static void __migrate_timers(int cpu, bool wait, bool remove_pinned) spin_lock_irqsave(&new_base->lock, flags); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); - if (wait) { - /* Ensure timers are done running before continuing */ - while (old_base->running_timer) { - spin_unlock(&old_base->lock); - spin_unlock_irqrestore(&new_base->lock, flags); - cpu_relax(); - spin_lock_irqsave(&new_base->lock, flags); - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); - } - } else { + /* + * If we're in the hotplug path, kill the system if there's a running + * timer. It's ok to have a running timer in the isolation case - the + * currently running or just expired timers are off of the timer wheel + * and so everything else can be migrated off. + */ + if (!cpu_online(cpu)) BUG_ON(old_base->running_timer); - } for (i = 0; i < TVR_SIZE; i++) migrate_timer_list(new_base, old_base->tv1.vec + i, @@ -1692,12 +1688,12 @@ static void __migrate_timers(int cpu, bool wait, bool remove_pinned) static void migrate_timers(int cpu) { BUG_ON(cpu_online(cpu)); - __migrate_timers(cpu, false, true); + __migrate_timers(cpu, true); } void timer_quiesce_cpu(void *cpup) { - __migrate_timers(*(int *)cpup, true, false); + __migrate_timers(*(int *)cpup, false); } static int timer_cpu_notify(struct notifier_block *self, |