summaryrefslogtreecommitdiff
path: root/kernel/sched/hmp.c
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2016-11-28 20:55:58 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2016-11-28 22:15:56 +0530
commitd0ff1c04e8e6b56eb75bf6c221a98ab939ff5e13 (patch)
treee34e6f2e149983394bd4b075a1f821109f78fc05 /kernel/sched/hmp.c
parent841264c5051b8cbd44b2e1b12db94f8cbcd0c43a (diff)
sched: Disable interrupts while holding related_thread_group_lock
There is a potential deadlock condition if interrupts are enabled while holding the related_thread_group_lock. Prevent this. ---------------- -------------------- CPU 0 CPU 1 --------------- -------------------- check_for_migration() cgroup_file_write(p) check_for_freq_change() cgroup_attach_task(p) send_notification() schedtune_attach(p) read_lock(&related_thread_group_lock) sched_set_group_id(p) raw_spin_lock_irqsave( &p->pi_lock, flags) write_lock_irqsave( &related_thread_group_lock) waiting on CPU#0 raw_spin_lock_irqsave(&rq->lock, flags) raw_spin_unlock_irqrestore(&rq->lock, flags) --> interrupt() ----> ttwu(p) -------> waiting for p's pi_lock on CPU#1 Change-Id: I6f0f8f742d6e1b3ff735dcbeabd54ef101329cdf Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/sched/hmp.c')
-rw-r--r--kernel/sched/hmp.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 968a41e0e81e..ad5d115bdd7f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1767,20 +1767,20 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
return 0;
} else {
- read_lock(&related_thread_group_lock);
+ read_lock_irqsave(&related_thread_group_lock, flags);
/*
* Protect from concurrent update of rq->prev_runnable_sum and
* group cpu load
*/
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock(&rq->lock);
if (check_groups)
_group_load_in_cpu(cpu_of(rq), &group_load, NULL);
new_load = rq->prev_runnable_sum + group_load;
new_load = freq_policy_load(rq, new_load);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- read_unlock(&related_thread_group_lock);
+ raw_spin_unlock(&rq->lock);
+ read_unlock_irqrestore(&related_thread_group_lock, flags);
cur_freq = load_to_freq(rq, rq->old_busy_time);
freq_required = load_to_freq(rq, new_load);
@@ -3202,14 +3202,16 @@ void sched_get_cpus_busy(struct sched_load *busy,
if (unlikely(cpus == 0))
return;
+ local_irq_save(flags);
+
+ read_lock(&related_thread_group_lock);
+
/*
* This function could be called in timer context, and the
* current task may have been executing for a long time. Ensure
* that the window stats are current by doing an update.
*/
- read_lock(&related_thread_group_lock);
- local_irq_save(flags);
for_each_cpu(cpu, query_cpus)
raw_spin_lock(&cpu_rq(cpu)->lock);
@@ -3309,10 +3311,11 @@ skip_early:
for_each_cpu(cpu, query_cpus)
raw_spin_unlock(&(cpu_rq(cpu))->lock);
- local_irq_restore(flags);
read_unlock(&related_thread_group_lock);
+ local_irq_restore(flags);
+
i = 0;
for_each_cpu(cpu, query_cpus) {
rq = cpu_rq(cpu);