summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c10
-rw-r--r--kernel/sched/deadline.c30
2 files changed, 38 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7b3be71b6e2f..56253e1281a7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1689,7 +1689,13 @@ static void update_history(struct rq *rq, struct task_struct *p,
}
p->ravg.sum = 0;
- if (p->on_rq)
+
+ /*
+ * A throttled deadline sched class task gets dequeued without
+ * changing p->on_rq. Since the dequeue decrements hmp stats
+ * avoid decrementing it here again.
+ */
+ if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
p->sched_class->dec_hmp_sched_stats(rq, p);
avg = div64_u64(sum, sched_ravg_hist_size);
@@ -1705,7 +1711,7 @@ static void update_history(struct rq *rq, struct task_struct *p,
p->ravg.demand = demand;
- if (p->on_rq)
+ if (p->on_rq && (!task_has_dl_policy(p) || !p->dl.dl_throttled))
p->sched_class->inc_hmp_sched_stats(rq, p);
done:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8b0a15e285f9..ad92418a8fee 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -851,6 +851,30 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
@@ -860,6 +884,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
+ inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -874,6 +899,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+ dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -1846,6 +1872,10 @@ const struct sched_class dl_sched_class = {
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_dl,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_dl,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG