summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-01 17:34:01 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-11-07 14:46:19 -0800
commit54052c3658daebb1dfe5c4a78dd8a1eeb34694e4 (patch)
treeb64290b17759361cabfa8d9d1c03bdf87d6c113a
parent85d7e134cc5d95dfd3a1a5ee5a1d1435633288cd (diff)
sched/hmp: Remove capping when reporting load to the cpufreq governor
Capping load when reporting to the governor was important prior to new scheduler guided frequency changes as intra-cluster migrations would sometimes lead to CPU loads well in excess of 100%. With the new top task approach however, load greater than 100% is no longer possible except for the same conditions that were previously exempted (i.e. inter-cluster migrations and frequency aggregation). Change-Id: I3e4f5e39ec9ae7eeaba9a567efd245a7aec1b7ad Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r--kernel/sched/hmp.c36
1 files changed, 6 insertions, 30 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 627da2346337..102fd2c27cbc 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -3296,7 +3296,7 @@ void sched_get_cpus_busy(struct sched_load *busy,
u64 load[cpus], group_load[cpus];
u64 nload[cpus], ngload[cpus];
u64 pload[cpus];
- unsigned int cur_freq[cpus], max_freq[cpus];
+ unsigned int max_freq[cpus];
int notifier_sent = 0;
int early_detection[cpus];
int cpu, i = 0;
@@ -3336,7 +3336,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
0);
- cur_freq[i] = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
account_load_subtractions(rq);
load[i] = rq->old_busy_time = rq->prev_runnable_sum;
@@ -3360,7 +3359,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
rq->cluster->notifier_sent = 0;
}
early_detection[i] = (rq->ed_task != NULL);
- cur_freq[i] = cpu_cur_freq(cpu);
max_freq[i] = cpu_max_freq(cpu);
i++;
}
@@ -3433,33 +3431,11 @@ skip_early:
goto exit_early;
}
- /*
- * When the load aggregation is controlled by
- * sched_freq_aggregate_threshold, allow reporting loads
- * greater than 100 @ Fcur to ramp up the frequency
- * faster.
- */
- if (notifier_sent || (aggregate_load &&
- sched_freq_aggregate_threshold)) {
- load[i] = scale_load_to_freq(load[i], max_freq[i],
- cpu_max_possible_freq(cpu));
- nload[i] = scale_load_to_freq(nload[i], max_freq[i],
- cpu_max_possible_freq(cpu));
- } else {
- load[i] = scale_load_to_freq(load[i], max_freq[i],
- cur_freq[i]);
- nload[i] = scale_load_to_freq(nload[i], max_freq[i],
- cur_freq[i]);
- if (load[i] > window_size)
- load[i] = window_size;
- if (nload[i] > window_size)
- nload[i] = window_size;
-
- load[i] = scale_load_to_freq(load[i], cur_freq[i],
- cpu_max_possible_freq(cpu));
- nload[i] = scale_load_to_freq(nload[i], cur_freq[i],
- cpu_max_possible_freq(cpu));
- }
+ load[i] = scale_load_to_freq(load[i], max_freq[i],
+ cpu_max_possible_freq(cpu));
+ nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+ cpu_max_possible_freq(cpu));
+
pload[i] = scale_load_to_freq(pload[i], max_freq[i],
rq->cluster->max_possible_freq);