summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorSaravana Kannan <skannan@codeaurora.org>2014-01-28 19:14:57 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 19:58:43 -0700
commit6f13a3351a4c06f89ea7954aae69e5aaf0cea3b0 (patch)
treea89ae7f68e3448737fa82d61791a9efe2fd28e4c /drivers/cpufreq
parentdf74672faf8421bf0f133d7fe7422a7cd2f757a1 (diff)
cpufreq: cpu-boost: Fix queue_delayed_work_on() race with hotplug
Calling queue_delayed_work_on() on a CPU that's in the process of getting hotplugged out can result in that CPU infinitely looping in msm_pm_wait_cpu_shutdown(). If queue_delayed_work_on() is called after the CPU is hotplugged out, it could wake up the CPU without going through the hotplug path and cause instability. To avoid this, make sure the CPU is and stays online while queuing a work on it. Change-Id: I1b4aae3db803e476b1a7676d08f495c1f38bb154 Signed-off-by: Saravana Kannan <skannan@codeaurora.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpu-boost.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
index f9ec03a92bdb..445b1798aa4a 100644
--- a/drivers/cpufreq/cpu-boost.c
+++ b/drivers/cpufreq/cpu-boost.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpufreq.h>
+#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
@@ -164,9 +165,15 @@ static int boost_mig_sync_thread(void *data)
s->boost_min = src_policy.cur;
/* Force policy re-evaluation to trigger adjust notifier. */
- cpufreq_update_policy(dest_cpu);
- queue_delayed_work_on(s->cpu, cpu_boost_wq,
- &s->boost_rem, msecs_to_jiffies(boost_ms));
+ get_online_cpus();
+ if (cpu_online(dest_cpu)) {
+ cpufreq_update_policy(dest_cpu);
+ queue_delayed_work_on(dest_cpu, cpu_boost_wq,
+ &s->boost_rem, msecs_to_jiffies(boost_ms));
+ } else {
+ s->boost_min = 0;
+ }
+ put_online_cpus();
}
return 0;
@@ -205,6 +212,7 @@ static void do_input_boost(struct work_struct *work)
struct cpu_sync *i_sync_info;
struct cpufreq_policy policy;
+ get_online_cpus();
for_each_online_cpu(i) {
i_sync_info = &per_cpu(sync_info, i);
@@ -221,6 +229,7 @@ static void do_input_boost(struct work_struct *work)
&i_sync_info->input_boost_rem,
msecs_to_jiffies(input_boost_ms));
}
+ put_online_cpus();
}
static void cpuboost_input_event(struct input_handle *handle,