summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2015-04-13 18:36:39 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:02 -0700
commit1e920c599577d778119e149814bc7906c68fcc69 (patch)
tree9aa9f7f341b82e5db0c597d1d26ab0d8ba52ab26 /kernel
parent09417ad30eeee22816471313bf13417c3039b930 (diff)
sched/deadline: Add basic HMP extensions
Some HMP extensions have to be supported by all scheduling classes irrespective of them using HMP task placement or not. Add these basic extensions to make deadline scheduling work. Also during the tick, if a deadline task gets throttled, its HMP stats get decremented as part of the dequeue. However, the throttled task does not update its on_rq flag causing HMP stats to be double decremented when update_history() is called as part of a window rollover. Avoid this by checking for throttled deadline tasks before subtracting and adding the deadline tasks load from the rq cumulative runnable avg. Change-Id: I9e2ed6675a730f2ec830f764f911e71c00a7d87a Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/deadline.c30
2 files changed, 38 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7b3be71b6e2f..56253e1281a7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1689,7 +1689,13 @@ static void update_history(struct rq *rq, struct task_struct *p,
}
p->ravg.sum = 0;
- if (p->on_rq)
+
+ /*
+ * A throttled deadline sched class task gets dequeued without
+ * changing p->on_rq. Since the dequeue decrements hmp stats
+ * avoid decrementing it here again.
+ */
+ if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
p->sched_class->dec_hmp_sched_stats(rq, p);
avg = div64_u64(sum, sched_ravg_hist_size);
@@ -1705,7 +1711,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
p->ravg.demand = demand;
- if (p->on_rq)
+ if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
p->sched_class->inc_hmp_sched_stats(rq, p);
done:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8b0a15e285f9..ad92418a8fee 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -851,6 +851,30 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
@@ -860,6 +884,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
+ inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -874,6 +899,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+ dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -1846,6 +1872,10 @@ const struct sched_class dl_sched_class = {
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_dl,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_dl,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG