summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2017-04-13 16:58:57 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-04-15 17:48:48 +0530
commit706e1daf94ff70d62243d5db81be2ffaaf5801af (patch)
treef091b232de4972be2f2a3d788c157b07766657bb /kernel
parente0f82761c280d159481f7938b5d75786502c135c (diff)
core_ctl: Harden the adjustment_possible() check for unisolation
When the need for CPUs is more than the active CPUs and there are some isolated CPUs, we wakeup the core_ctl thread to unisolate some CPUs. The core_ctl task can't unisolate any CPU if all of them are isolated by other clients. Track the number of isolated CPUs by core_ctl and wakeup the core_ctl task when adjustment is really possible. Change-Id: I11ef10860532df25cbde572aabd4b925320db8fe Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core_ctl.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index e094cba26ea5..0b5f2dea18a1 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -35,6 +35,7 @@ struct cluster_data {
unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
unsigned int active_cpus;
unsigned int num_cpus;
+ unsigned int nr_isolated_cpus;
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
@@ -294,6 +295,9 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tNeed CPUs: %u\n", cluster->need_cpus);
count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNr isolated CPUs: %u\n",
+ cluster->nr_isolated_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
"\tBoost: %u\n", (unsigned int) cluster->boost);
}
spin_unlock_irq(&state_lock);
@@ -527,7 +531,7 @@ static bool adjustment_possible(const struct cluster_data *cluster,
unsigned int need)
{
return (need < cluster->active_cpus || (need > cluster->active_cpus &&
- sched_isolate_count(&cluster->cpu_mask, false)));
+ cluster->nr_isolated_cpus));
}
static bool eval_need(struct cluster_data *cluster)
@@ -718,6 +722,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_isolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -742,12 +747,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
/*
@@ -757,6 +764,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (cluster->active_cpus <= cluster->max_cpus)
return;
+ nr_isolated = 0;
num_cpus = cluster->num_cpus;
spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
@@ -774,12 +782,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -790,6 +800,7 @@ static void __try_to_unisolate(struct cluster_data *cluster,
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_unisolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -814,12 +825,14 @@ static void __try_to_unisolate(struct cluster_data *cluster,
if (!sched_unisolate_cpu(c->cpu)) {
c->isolated_by_us = false;
move_cpu_lru(c);
+ nr_unisolated++;
} else {
pr_debug("Unable to unisolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus -= nr_unisolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -885,6 +898,8 @@ static int __ref cpu_callback(struct notifier_block *nfb,
struct cpu_data *state = &per_cpu(cpu_state, cpu);
struct cluster_data *cluster = state->cluster;
unsigned int need;
+ bool do_wakeup, unisolated = false;
+ unsigned long flags;
if (unlikely(!cluster || !cluster->inited))
return NOTIFY_DONE;
@@ -910,6 +925,7 @@ static int __ref cpu_callback(struct notifier_block *nfb,
if (state->isolated_by_us) {
sched_unisolate_cpu_unlocked(cpu);
state->isolated_by_us = false;
+ unisolated = true;
}
/* Move a CPU to the end of the LRU when it goes offline. */
@@ -923,7 +939,12 @@ static int __ref cpu_callback(struct notifier_block *nfb,
}
need = apply_limits(cluster, cluster->need_cpus);
- if (adjustment_possible(cluster, need))
+ spin_lock_irqsave(&state_lock, flags);
+ if (unisolated)
+ cluster->nr_isolated_cpus--;
+ do_wakeup = adjustment_possible(cluster, need);
+ spin_unlock_irqrestore(&state_lock, flags);
+ if (do_wakeup)
wake_up_core_ctl_thread(cluster);
return NOTIFY_OK;