summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-08-02 15:08:13 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-08-22 14:06:33 -0700
commit67e0df6e338edbf119ae2cb7abe52a460cb33851 (patch)
tree8508a1da431a3f36a80ed44bc59aa8c73232a35d
parent9095a09ab1d2bd5b1ab123d4714003ee77aa4fa1 (diff)
sched: Move notify_migration() under CONFIG_SCHED_HMP
notify_migration() is a HMP specific function that relies on all of its contents to be stubbed out for !CONFIG_SCHED_HMP. However, it still maintains calls to rcu_read_lock/unlock(). In the !HMP case these calls are simply redundant. Move the function under CONFIG_SCHED_HMP and add a stub when the config is not defined so that there is no overhead. Change-Id: Iad914f31b629e81e403b0e89796b2b0f1d081695 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/hmp.c19
-rw-r--r--kernel/sched/sched.h6
3 files changed, 25 insertions, 19 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6836851d416f..a790d101d120 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1134,25 +1134,6 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
return rq;
}
-static void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
- struct task_struct *p)
-{
- bool check_groups;
-
- rcu_read_lock();
- check_groups = task_in_related_thread_group(p);
- rcu_read_unlock();
-
- if (!same_freq_domain(src_cpu, dest_cpu)) {
- if (!src_cpu_dead)
- check_for_freq_change(cpu_rq(src_cpu), false,
- check_groups);
- check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
- } else {
- check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
- }
-}
-
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 84cce75c6e50..162d4a0c950c 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1786,6 +1786,25 @@ void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
(void *)(long)cpu);
}
+void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
+ struct task_struct *p)
+{
+ bool check_groups;
+
+ rcu_read_lock();
+ check_groups = task_in_related_thread_group(p);
+ rcu_read_unlock();
+
+ if (!same_freq_domain(src_cpu, dest_cpu)) {
+ if (!src_cpu_dead)
+ check_for_freq_change(cpu_rq(src_cpu), false,
+ check_groups);
+ check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
+ } else {
+ check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
+ }
+}
+
static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
u64 irqtime, int event)
{
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0bae93891647..b09d3a1a026f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1300,6 +1300,9 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
extern void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+extern void notify_migration(int src_cpu, int dest_cpu,
+ bool src_cpu_dead, struct task_struct *p);
+
struct group_cpu_time {
u64 curr_runnable_sum;
u64 prev_runnable_sum;
@@ -1580,6 +1583,9 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+static inline void notify_migration(int src_cpu, int dest_cpu,
+ bool src_cpu_dead, struct task_struct *p) { }
+
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
return 1;