summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2017-01-03 15:29:14 -0800
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2017-01-03 19:22:23 -0800
commit3997e768ac8a807af810e038af0ae1f0712a066b (patch)
tree2725a24a4b4cc77c1e6b16b0df7b1dee70d78944 /kernel/sched
parent2f817b68f06ed866aa2c4cc35b2e4293aeb1ef99 (diff)
sched: Fix new task accounting bug in transfer_busy_time()
In transfer_busy_time(), the new_task flag is set based on the active window count prior to the call to update_task_ravg(). update_task_ravg() however, can then increment the active window count and consequently the new_task flag above becomes stale. This is turn leads to inaccurate accounting whereby update_task_ravg() does accounting based on the fact that the task is not new whereas transfer_busy_time() then continues to do further accounting assuming that the task is new. The accounting discrepancies are sometimes caught by some of the scheduler BUGs. Fix the described problem by moving the check is_new_task() after the call to update_task_ravg(). Also add two missing BUGs that would catch the problem sooner rather than later. Change-Id: I8dc4822e97cc03ebf2ca1ee2de95eb4e5851f459 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/hmp.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index a8bf39c6d7d7..0c3ab3d45a2f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3897,7 +3897,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
struct migration_sum_data d;
int migrate_type;
int cpu = cpu_of(rq);
- bool new_task = is_new_task(p);
+ bool new_task;
int i;
if (!sched_freq_aggregate)
@@ -3907,6 +3907,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+ new_task = is_new_task(p);
/* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
cpu_time = _group_cpu_time(grp, cpu);
@@ -4001,6 +4002,8 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
BUG_ON((s64)*src_curr_runnable_sum < 0);
BUG_ON((s64)*src_prev_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}
static inline struct group_cpu_time *