summaryrefslogtreecommitdiff
path: root/kernel/time/hrtimer.c
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2017-05-16 15:29:04 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-05-22 13:29:16 +0530
commit3b8631c0d5bd7d0a4dfd6acfa20f2138f9262fd4 (patch)
tree6d80620461f628889f3a9e2ddc8146bbbaf97695 /kernel/time/hrtimer.c
parent60be71604a84d2e047215cb702d6324379a353bb (diff)
hrtimer: Don't drop the base lock when migration during isolation
The current code drops the base lock and wait for the running hrtimer's expiry event to be processed on the isolated CPU. This leaves a window, where the running hrtimer can migrate to a different CPU or even get freed. The pinned hrtimers that are maintained in a temporarily list also can get freed while the lock is dropped. The only reason for waiting for the running hrtimer is to make sure that this hrtimer is migrated away from the isolated CPU. This is a problem only if this hrtimer gets rearmed from it's callback. As the possibility of this race is very rare, it is better to have this limitation instead of fixing the above mentioned bugs with more intrusive changes. Change-Id: I14ba67cacb321d8b561195935592bb9979996a27 Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/time/hrtimer.c')
-rw-r--r--kernel/time/hrtimer.c49
1 files changed, 14 insertions, 35 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bf7fc4989e5c..3acba89f32d9 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,7 +49,6 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
-#include <linux/delay.h>
#include <asm/uaccess.h>
@@ -1631,42 +1630,22 @@ static void init_hrtimers_cpu(int cpu)
}
#if defined(CONFIG_HOTPLUG_CPU)
-static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base,
- unsigned int i,
- bool wait,
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base,
bool remove_pinned)
{
struct hrtimer *timer;
struct timerqueue_node *node;
struct timerqueue_head pinned;
int is_pinned;
- struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
- struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
+ bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);
timerqueue_init_head(&pinned);
- while ((node = timerqueue_getnext(&old_c_base->active))) {
+ while ((node = timerqueue_getnext(&old_base->active))) {
timer = container_of(node, struct hrtimer, node);
- if (wait) {
- /* Ensure timers are done running before continuing */
- while (hrtimer_callback_running(timer)) {
- raw_spin_unlock(&old_base->lock);
- raw_spin_unlock(&new_base->lock);
- cpu_relax();
- /*
- * cpu_relax may just be a barrier. Grant the
- * run_hrtimer_list code some time to obtain the
- * spinlock.
- */
- udelay(2);
- raw_spin_lock(&new_base->lock);
- raw_spin_lock_nested(&old_base->lock,
- SINGLE_DEPTH_NESTING);
- }
- } else {
+ if (is_hotplug)
BUG_ON(hrtimer_callback_running(timer));
- }
debug_deactivate(timer);
/*
@@ -1674,7 +1653,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
* timer could be seen as !active and just vanish away
* under us on another CPU
*/
- __remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
+ __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
is_pinned = timer->state & HRTIMER_STATE_PINNED;
if (!remove_pinned && is_pinned) {
@@ -1682,7 +1661,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
continue;
}
- timer->base = new_c_base;
+ timer->base = new_base;
/*
* Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer
@@ -1691,7 +1670,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_c_base);
+ enqueue_hrtimer(timer, new_base);
}
/* Re-queue pinned timers for non-hotplug usecase */
@@ -1699,11 +1678,11 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
timer = container_of(node, struct hrtimer, node);
timerqueue_del(&pinned, &timer->node);
- enqueue_hrtimer(timer, old_c_base);
+ enqueue_hrtimer(timer, old_base);
}
}
-static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned)
+static void __migrate_hrtimers(int scpu, bool remove_pinned)
{
struct hrtimer_cpu_base *old_base, *new_base;
unsigned long flags;
@@ -1720,8 +1699,8 @@ static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned)
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- migrate_hrtimer_list(old_base, new_base, i, wait,
- remove_pinned);
+ migrate_hrtimer_list(&old_base->clock_base[i],
+ &new_base->clock_base[i], remove_pinned);
}
raw_spin_unlock(&old_base->lock);
@@ -1737,12 +1716,12 @@ static void migrate_hrtimers(int scpu)
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
- __migrate_hrtimers(scpu, false, true);
+ __migrate_hrtimers(scpu, true);
}
void hrtimer_quiesce_cpu(void *cpup)
{
- __migrate_hrtimers(*(int *)cpup, true, false);
+ __migrate_hrtimers(*(int *)cpup, false);
}
#endif /* CONFIG_HOTPLUG_CPU */