summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 18d607f9a417..4f11b84eaf0a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -91,6 +91,8 @@
#include <trace/events/sched.h>
#include "walt.h"
+static bool have_sched_energy_data(void);
+
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -193,6 +195,10 @@ static int sched_feat_set(char *cmp)
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
+ if (i == __SCHED_FEAT_ENERGY_AWARE)
+ WARN(!have_sched_energy_data(),
+ "Missing sched energy data\n");
+
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
@@ -6649,6 +6655,19 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
}
+static bool have_sched_energy_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (!rcu_dereference(per_cpu(sd_scs, cpu)) ||
+ !rcu_dereference(per_cpu(sd_ea, cpu)))
+ return false;
+ }
+
+ return true;
+}
+
/*
* Check that the per-cpu provided sd energy data is consistent for all cpus
* within the mask.
@@ -7461,6 +7480,9 @@ static int build_sched_domains(const struct cpumask *cpu_map,
}
rcu_read_unlock();
+ WARN(sched_feat(ENERGY_AWARE) && !have_sched_energy_data(),
+ "Missing data for energy aware scheduling\n");
+
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);