diff options
author | Olav Haugan <ohaugan@codeaurora.org> | 2016-03-05 13:47:52 -0800 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:25:21 -0700 |
commit | b29f9a7a8403e7245ed9f939da67caa3fae7dce9 (patch) | |
tree | 1a589372e46a6df4e219559f0b0a87e868cde284 /kernel | |
parent | 615b6f6221f4a3e0df5bd403f3a14b0afe75f821 (diff) |
sched/core: Add protection against null-pointer dereference
p->grp is being accessed outside of lock which can cause null-pointer
dereference. Fix this and also add rcu critical section around access
of this data structure.
CRs-fixed: 985379
Change-Id: Ic82de6ae2821845d704f0ec18046cc6a24f98e39
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
[joonwoop@codeaurora.org: fixed conflict in init_new_task_load().]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 23 | ||||
-rw-r--r-- | kernel/sched/fair.c | 20 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 29 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4e5abc3bb294..d1f62e394714 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3367,7 +3367,7 @@ static void remove_task_from_group(struct task_struct *p) rq = __task_rq_lock(p); list_del_init(&p->grp_list); - p->grp = NULL; + rcu_assign_pointer(p->grp, NULL); __task_rq_unlock(rq); if (!list_empty(&grp->tasks)) { @@ -3397,7 +3397,7 @@ add_task_to_group(struct task_struct *p, struct related_thread_group *grp) * reference of p->grp in various hot-paths */ rq = __task_rq_lock(p); - p->grp = grp; + rcu_assign_pointer(p->grp, grp); list_add(&p->grp_list, &grp->tasks); __task_rq_unlock(rq); @@ -3466,12 +3466,13 @@ done: unsigned int sched_get_group_id(struct task_struct *p) { - unsigned long flags; unsigned int group_id; + struct related_thread_group *grp; - raw_spin_lock_irqsave(&p->pi_lock, flags); - group_id = p->grp ? p->grp->id : 0; - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + rcu_read_lock(); + grp = task_related_thread_group(p); + group_id = grp ? grp->id : 0; + rcu_read_unlock(); return group_id; } @@ -3661,7 +3662,7 @@ static inline int update_preferred_cluster(struct related_thread_group *grp, * has passed since we last updated preference */ if (abs(new_load - old_load) > sched_ravg_window / 4 || - sched_ktime_clock() - p->grp->last_update > sched_ravg_window) + sched_ktime_clock() - grp->last_update > sched_ravg_window) return 1; return 0; @@ -4692,15 +4693,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) raw_spin_lock(&rq->lock); old_load = task_load(p); - grp = task_related_thread_group(p); wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); heavy_task = heavy_task_wakeup(p, rq, TASK_WAKE); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); raw_spin_unlock(&rq->lock); + rcu_read_lock(); + grp = task_related_thread_group(p); if (update_preferred_cluster(grp, p, old_load)) set_preferred_cluster(grp); + rcu_read_unlock(); p->sched_contributes_to_load = !!task_contributes_to_load(p); p->state = TASK_WAKING; @@ -5699,7 +5702,6 @@ void scheduler_tick(void) raw_spin_lock(&rq->lock); old_load = task_load(curr); - grp = task_related_thread_group(curr); set_window_start(rq); update_rq_clock(rq); curr->sched_class->task_tick(rq, curr, 0); @@ -5722,8 +5724,11 @@ void scheduler_tick(void) #endif rq_last_tick_reset(rq); + rcu_read_lock(); + grp = task_related_thread_group(curr); if (update_preferred_cluster(grp, curr, old_load)) set_preferred_cluster(grp); + rcu_read_unlock(); if (curr->sched_class == &fair_sched_class) check_for_migration(rq, curr); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ce71727b5725..205ce2ea283a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3143,7 +3143,7 @@ preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) rcu_read_lock(); - grp = p->grp; + grp = task_related_thread_group(p); if (!grp || !sysctl_sched_enable_colocation) rc = 1; else @@ -3516,7 +3516,7 @@ static int select_best_cpu(struct task_struct *p, int target, int reason, rcu_read_lock(); - grp = p->grp; + grp = task_related_thread_group(p); if (grp && grp->preferred_cluster) { pref_cluster = grp->preferred_cluster; @@ -4060,6 +4060,7 @@ static inline void reset_balance_interval(int cpu) static inline int migration_needed(struct task_struct *p, int cpu) { int nice; + struct related_thread_group *grp; if (!sched_enable_hmp || p->state != TASK_RUNNING) return 0; @@ -4072,12 +4073,19 @@ static inline int migration_needed(struct task_struct *p, int cpu) return IRQLOAD_MIGRATION; nice = task_nice(p); - if (!p->grp && (nice > sched_upmigrate_min_nice || - upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) + rcu_read_lock(); + grp = task_related_thread_group(p); + if (!grp && (nice > sched_upmigrate_min_nice || + upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) { + rcu_read_unlock(); return DOWN_MIGRATION; + } - if (!p->grp && !task_will_fit(p, cpu)) + if (!grp && !task_will_fit(p, cpu)) { + rcu_read_unlock(); return UP_MIGRATION; + } + rcu_read_unlock(); return 0; } @@ -4624,7 +4632,7 @@ void init_new_task_load(struct task_struct *p) u32 init_load_pct = current->init_load_pct; p->init_load_pct = 0; - p->grp = NULL; + rcu_assign_pointer(p->grp, NULL); INIT_LIST_HEAD(&p->grp_list); memset(&p->ravg, 0, sizeof(struct ravg)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 96336313f251..a11e74c191f3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1247,7 +1247,7 @@ static inline int sched_cpu_high_irqload(int cpu) static inline struct related_thread_group *task_related_thread_group(struct task_struct *p) { - return p->grp; + return rcu_dereference(p->grp); } #else /* CONFIG_SCHED_HMP */ |