summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@google.com>2017-11-08 00:43:37 +0000
committerTodd Kjos <tkjos@google.com>2017-11-08 00:43:53 +0000
commit3822fe484cef0ef3e37e7106bfd684639f64e77b (patch)
tree2b3565657d82fcf1c42a750fbcdf16aa8c0b1f53 /kernel/sched
parentdf147c9e336cfcb4183db1eb9552b0429060cd0d (diff)
Revert "ANDROID: sched/rt: schedtune: Add boost retention to RT"
This reverts commit d194ba5d712f051ff6c025f3484bb72f219764e3. Reason for revert: Broke some builds. Will fix and resubmit. Change-Id: I4e6fa1562346eda1bbf058f1d5ace5ba6256ce07
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/rt.c154
-rw-r--r--kernel/sched/sched.h1
3 files changed, 0 insertions, 156 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 889fb1aff1e0..1eb91a696069 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2200,7 +2200,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
- init_rt_schedtune_timer(&p->rt);
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ebf0d9329c86..c8322ab130eb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,7 +7,6 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
-#include <linux/hrtimer.h>
#include "walt.h"
#include "tune.h"
@@ -987,73 +986,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
-#define RT_SCHEDTUNE_INTERVAL 50000000ULL
-
-static void sched_rt_update_capacity_req(struct rq *rq);
-
-static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
-{
- struct sched_rt_entity *rt_se = container_of(timer,
- struct sched_rt_entity,
- schedtune_timer);
- struct task_struct *p = rt_task_of(rt_se);
- struct rq *rq = task_rq(p);
-
- raw_spin_lock(&rq->lock);
-
- /*
- * Nothing to do if:
- * - task has switched runqueues
- * - task isn't RT anymore
- */
- if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
- goto out;
-
- /*
- * If task got enqueued back during callback time, it means we raced
- * with the enqueue on another cpu, that's Ok, just do nothing as
- * enqueue path would have tried to cancel us and we shouldn't run
- * Also check the schedtune_enqueued flag as class-switch on a
- * sleeping task may have already canceled the timer and done dq
- */
- if (p->on_rq || !rt_se->schedtune_enqueued)
- goto out;
-
- /*
- * RT task is no longer active, cancel boost
- */
- rt_se->schedtune_enqueued = false;
- schedtune_dequeue_task(p, cpu_of(rq));
- sched_rt_update_capacity_req(rq);
- cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
-out:
- raw_spin_unlock(&rq->lock);
-
- /*
- * This can free the task_struct if no more references.
- */
- put_task_struct(p);
-
- return HRTIMER_NORESTART;
-}
-
-void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
-{
- struct hrtimer *timer = &rt_se->schedtune_timer;
-
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer->function = rt_schedtune_timer;
- rt_se->schedtune_enqueued = false;
-}
-
-static void start_schedtune_timer(struct sched_rt_entity *rt_se)
-{
- struct hrtimer *timer = &rt_se->schedtune_timer;
-
- hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
- HRTIMER_MODE_REL_PINNED);
-}
-
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -1391,33 +1323,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
- if (!schedtune_task_boost(p))
- return;
-
- /*
- * If schedtune timer is active, that means a boost was already
- * done, just cancel the timer so that deboost doesn't happen.
- * Otherwise, increase the boost. If an enqueued timer was
- * cancelled, put the task reference.
- */
- if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
- put_task_struct(p);
-
- /*
- * schedtune_enqueued can be true in the following situation:
- * enqueue_task_rt grabs rq lock before timer fires
- * or before its callback acquires rq lock
- * schedtune_enqueued can be false if timer callback is running
- * and timer just released rq lock, or if the timer finished
- * running and canceling the boost
- */
- if (rt_se->schedtune_enqueued)
- return;
-
- rt_se->schedtune_enqueued = true;
schedtune_enqueue_task(p, cpu_of(rq));
- sched_rt_update_capacity_req(rq);
- cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1429,20 +1335,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
walt_dec_cumulative_runnable_avg(rq, p);
dequeue_pushable_task(rq, p);
-
- if (!rt_se->schedtune_enqueued)
- return;
-
- if (flags == DEQUEUE_SLEEP) {
- get_task_struct(p);
- start_schedtune_timer(rt_se);
- return;
- }
-
- rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
- sched_rt_update_capacity_req(rq);
- cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
/*
@@ -1482,33 +1375,6 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
-/*
- * Perform a schedtune dequeue and cancelation of boost timers if needed.
- * Should be called only with the rq->lock held.
- */
-static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
-{
- struct sched_rt_entity *rt_se = &p->rt;
-
- BUG_ON(!raw_spin_is_locked(&rq->lock));
-
- if (!rt_se->schedtune_enqueued)
- return;
-
- /*
- * Incase of class change cancel any active timers. If an enqueued
- * timer was cancelled, put the task ref.
- */
- if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
- put_task_struct(p);
-
- /* schedtune_enqueued is true, deboost it */
- rt_se->schedtune_enqueued = false;
- schedtune_dequeue_task(p, task_cpu(p));
- sched_rt_update_capacity_req(rq);
- cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
-}
-
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
@@ -1563,19 +1429,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
rcu_read_unlock();
out:
- /*
- * If previous CPU was different, make sure to cancel any active
- * schedtune timers and deboost.
- */
- if (task_cpu(p) != cpu) {
- unsigned long fl;
- struct rq *prq = task_rq(p);
-
- raw_spin_lock_irqsave(&prq->lock, fl);
- schedtune_dequeue_rt(prq, p);
- raw_spin_unlock_irqrestore(&prq->lock, fl);
- }
-
return cpu;
}
@@ -2309,13 +2162,6 @@ static void rq_offline_rt(struct rq *rq)
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
- * On class switch from rt, always cancel active schedtune timers,
- * this handles the cases where we switch class for a task that is
- * already rt-dequeued but has a running timer.
- */
- schedtune_dequeue_rt(rq, p);
-
- /*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 782746140711..203d64a0c947 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1398,7 +1398,6 @@ extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);