summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2017-02-01 19:23:56 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2017-02-01 19:23:56 -0800
commit70c97d9407d656f7c3c1a37d836306165fa09831 (patch)
treeba7fbbe8741ca1f602e6c0df59ddce003190fa5e
parent62e878fd30f71eb559b8e395619ae53b4233348a (diff)
parentb559daa261b6b12958259f06ae776f140afa92db (diff)
Merge "sched: maintain group busy time counters in runqueue"
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/trace/events/sched.h97
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/hmp.c282
-rw-r--r--kernel/sched/sched.h22
5 files changed, 131 insertions, 273 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0d1d21e9f081..aca5c5694e09 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -335,8 +335,6 @@ enum task_event {
enum migrate_types {
GROUP_TO_RQ,
RQ_TO_GROUP,
- RQ_TO_RQ,
- GROUP_TO_GROUP,
};
#include <linux/spinlock.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9d58d703527c..0cd236442864 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -292,6 +292,55 @@ const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
return ret;
}
+
+static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new)
+{
+ if (curr)
+ if (new)
+ return rq->nt_curr_runnable_sum;
+ else
+ return rq->curr_runnable_sum;
+ else
+ if (new)
+ return rq->nt_prev_runnable_sum;
+ else
+ return rq->prev_runnable_sum;
+}
+
+static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new)
+{
+ if (curr)
+ if (new)
+ return rq->grp_time.nt_curr_runnable_sum;
+ else
+ return rq->grp_time.curr_runnable_sum;
+ else
+ if (new)
+ return rq->grp_time.nt_prev_runnable_sum;
+ else
+ return rq->grp_time.prev_runnable_sum;
+}
+
+static inline s64
+__get_update_sum(struct rq *rq, enum migrate_types migrate_type,
+ bool src, bool new, bool curr)
+{
+ switch (migrate_type) {
+ case RQ_TO_GROUP:
+ if (src)
+ return __rq_update_sum(rq, curr, new);
+ else
+ return __grp_update_sum(rq, curr, new);
+ case GROUP_TO_RQ:
+ if (src)
+ return __grp_update_sum(rq, curr, new);
+ else
+ return __rq_update_sum(rq, curr, new);
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+}
#endif
TRACE_EVENT(sched_update_task_ravg,
@@ -534,17 +583,13 @@ TRACE_EVENT(sched_update_pred_demand,
TRACE_EVENT(sched_migration_update_sum,
- TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct migration_sum_data *d),
+ TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq),
- TP_ARGS(p, migrate_type, d),
+ TP_ARGS(p, migrate_type, rq),
TP_STRUCT__entry(
__field(int, tcpu )
__field(int, pid )
- __field( u64, cs )
- __field( u64, ps )
- __field( s64, nt_cs )
- __field( s64, nt_ps )
__field(enum migrate_types, migrate_type )
__field( s64, src_cs )
__field( s64, src_ps )
@@ -560,30 +605,22 @@ TRACE_EVENT(sched_migration_update_sum,
__entry->tcpu = task_cpu(p);
__entry->pid = p->pid;
__entry->migrate_type = migrate_type;
- __entry->src_cs = d->src_rq ?
- d->src_rq->curr_runnable_sum :
- d->src_cpu_time->curr_runnable_sum;
- __entry->src_ps = d->src_rq ?
- d->src_rq->prev_runnable_sum :
- d->src_cpu_time->prev_runnable_sum;
- __entry->dst_cs = d->dst_rq ?
- d->dst_rq->curr_runnable_sum :
- d->dst_cpu_time->curr_runnable_sum;
- __entry->dst_ps = d->dst_rq ?
- d->dst_rq->prev_runnable_sum :
- d->dst_cpu_time->prev_runnable_sum;
- __entry->src_nt_cs = d->src_rq ?
- d->src_rq->nt_curr_runnable_sum :
- d->src_cpu_time->nt_curr_runnable_sum;
- __entry->src_nt_ps = d->src_rq ?
- d->src_rq->nt_prev_runnable_sum :
- d->src_cpu_time->nt_prev_runnable_sum;
- __entry->dst_nt_cs = d->dst_rq ?
- d->dst_rq->nt_curr_runnable_sum :
- d->dst_cpu_time->nt_curr_runnable_sum;
- __entry->dst_nt_ps = d->dst_rq ?
- d->dst_rq->nt_prev_runnable_sum :
- d->dst_cpu_time->nt_prev_runnable_sum;
+ __entry->src_cs = __get_update_sum(rq, migrate_type,
+ true, false, true);
+ __entry->src_ps = __get_update_sum(rq, migrate_type,
+ true, false, false);
+ __entry->dst_cs = __get_update_sum(rq, migrate_type,
+ false, false, true);
+ __entry->dst_ps = __get_update_sum(rq, migrate_type,
+ false, false, false);
+ __entry->src_nt_cs = __get_update_sum(rq, migrate_type,
+ true, true, true);
+ __entry->src_nt_ps = __get_update_sum(rq, migrate_type,
+ true, true, false);
+ __entry->dst_nt_cs = __get_update_sum(rq, migrate_type,
+ false, true, true);
+ __entry->dst_nt_ps = __get_update_sum(rq, migrate_type,
+ false, true, false);
),
TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 519aee32e122..672ed90fc879 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8362,6 +8362,7 @@ void __init sched_init(void)
rq->cluster = &init_cluster;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
rq->old_busy_time = 0;
rq->old_estimated_time = 0;
rq->old_busy_time_group = 0;
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 95125c5518e2..0a74c8d23552 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -28,8 +28,7 @@ const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
-const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
- "RQ_TO_RQ", "GROUP_TO_GROUP"};
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP"};
static ktime_t ktime_last;
static bool sched_ktime_suspended;
@@ -1713,45 +1712,19 @@ static inline unsigned int load_to_freq(struct rq *rq, u64 load)
return freq;
}
-static inline struct group_cpu_time *
-_group_cpu_time(struct related_thread_group *grp, int cpu);
-
-/*
- * Return load from all related group in given cpu.
- * Caller must ensure that related_thread_group_lock is held.
- */
-static void _group_load_in_cpu(int cpu, u64 *grp_load, u64 *new_grp_load)
-{
- struct related_thread_group *grp;
-
- for_each_related_thread_group(grp) {
- struct group_cpu_time *cpu_time;
-
- cpu_time = _group_cpu_time(grp, cpu);
- *grp_load += cpu_time->prev_runnable_sum;
- if (new_grp_load)
- *new_grp_load += cpu_time->nt_prev_runnable_sum;
- }
-}
-
/*
* Return load from all related groups in given frequency domain.
- * Caller must ensure that related_thread_group_lock is held.
*/
static void group_load_in_freq_domain(struct cpumask *cpus,
u64 *grp_load, u64 *new_grp_load)
{
- struct related_thread_group *grp;
int j;
- for_each_related_thread_group(grp) {
- for_each_cpu(j, cpus) {
- struct group_cpu_time *cpu_time;
+ for_each_cpu(j, cpus) {
+ struct rq *rq = cpu_rq(j);
- cpu_time = _group_cpu_time(grp, j);
- *grp_load += cpu_time->prev_runnable_sum;
- *new_grp_load += cpu_time->nt_prev_runnable_sum;
- }
+ *grp_load += rq->grp_time.prev_runnable_sum;
+ *new_grp_load += rq->grp_time.nt_prev_runnable_sum;
}
}
@@ -1796,20 +1769,18 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
return 0;
} else {
- read_lock_irqsave(&related_thread_group_lock, flags);
/*
* Protect from concurrent update of rq->prev_runnable_sum and
* group cpu load
*/
- raw_spin_lock(&rq->lock);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (check_groups)
- _group_load_in_cpu(cpu_of(rq), &group_load, NULL);
+ group_load = rq->grp_time.prev_runnable_sum;
new_load = rq->prev_runnable_sum + group_load;
new_load = freq_policy_load(rq, new_load);
- raw_spin_unlock(&rq->lock);
- read_unlock_irqrestore(&related_thread_group_lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
cur_freq = load_to_freq(rq, rq->old_busy_time);
freq_required = load_to_freq(rq, new_load);
@@ -2283,6 +2254,31 @@ static void rollover_task_window(struct task_struct *p, bool full_window)
}
}
+static void rollover_cpu_window(struct rq *rq, bool full_window)
+{
+ u64 curr_sum = rq->curr_runnable_sum;
+ u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+ u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
+ u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
+
+ if (unlikely(full_window)) {
+ curr_sum = 0;
+ nt_curr_sum = 0;
+ grp_curr_sum = 0;
+ grp_nt_curr_sum = 0;
+ }
+
+ rq->prev_runnable_sum = curr_sum;
+ rq->nt_prev_runnable_sum = nt_curr_sum;
+ rq->grp_time.prev_runnable_sum = grp_curr_sum;
+ rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
+
+ rq->curr_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = 0;
+ rq->grp_time.curr_runnable_sum = 0;
+ rq->grp_time.nt_curr_runnable_sum = 0;
+}
+
/*
* Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
*/
@@ -2299,8 +2295,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
u64 *prev_runnable_sum = &rq->prev_runnable_sum;
u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
- int flip_counters = 0;
- int prev_sum_reset = 0;
bool new_task;
struct related_thread_group *grp;
int cpu = rq->cpu;
@@ -2315,51 +2309,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
new_task = is_new_task(p);
- grp = p->grp;
- if (grp && sched_freq_aggregate) {
- /* cpu_time protected by rq_lock */
- struct group_cpu_time *cpu_time =
- _group_cpu_time(grp, cpu_of(rq));
-
- curr_runnable_sum = &cpu_time->curr_runnable_sum;
- prev_runnable_sum = &cpu_time->prev_runnable_sum;
-
- nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
- if (cpu_time->window_start != rq->window_start) {
- int nr_windows;
-
- delta = rq->window_start - cpu_time->window_start;
- nr_windows = div64_u64(delta, window_size);
- if (nr_windows > 1)
- prev_sum_reset = 1;
-
- cpu_time->window_start = rq->window_start;
- flip_counters = 1;
- }
-
- if (p_is_curr_task && new_window) {
- u64 curr_sum = rq->curr_runnable_sum;
- u64 nt_curr_sum = rq->nt_curr_runnable_sum;
-
- if (full_window)
- curr_sum = nt_curr_sum = 0;
-
- rq->prev_runnable_sum = curr_sum;
- rq->nt_prev_runnable_sum = nt_curr_sum;
-
- rq->curr_runnable_sum = 0;
- rq->nt_curr_runnable_sum = 0;
- }
- } else {
- if (p_is_curr_task && new_window) {
- flip_counters = 1;
- if (full_window)
- prev_sum_reset = 1;
- }
- }
-
/*
* Handle per-task window rollover. We don't care about the idle
* task or exiting tasks.
@@ -2369,26 +2318,25 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
rollover_task_window(p, full_window);
}
- if (flip_counters) {
- u64 curr_sum = *curr_runnable_sum;
- u64 nt_curr_sum = *nt_curr_runnable_sum;
+ if (p_is_curr_task && new_window) {
+ rollover_cpu_window(rq, full_window);
+ rollover_top_tasks(rq, full_window);
+ }
- if (prev_sum_reset)
- curr_sum = nt_curr_sum = 0;
+ if (!account_busy_for_cpu_time(rq, p, irqtime, event))
+ goto done;
- *prev_runnable_sum = curr_sum;
- *nt_prev_runnable_sum = nt_curr_sum;
+ grp = p->grp;
+ if (grp && sched_freq_aggregate) {
+ struct group_cpu_time *cpu_time = &rq->grp_time;
- *curr_runnable_sum = 0;
- *nt_curr_runnable_sum = 0;
+ curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ prev_runnable_sum = &cpu_time->prev_runnable_sum;
- if (p_is_curr_task)
- rollover_top_tasks(rq, full_window);
+ nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
}
- if (!account_busy_for_cpu_time(rq, p, irqtime, event))
- goto done;
-
if (!new_window) {
/*
* account_busy_for_cpu_time() = 1 so busy time needs
@@ -2905,7 +2853,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
done:
trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
rq->cc.cycles, rq->cc.time,
- _group_cpu_time(p->grp, cpu_of(rq)));
+ p->grp ? &rq->grp_time : NULL);
p->ravg.mark_start = wallclock;
}
@@ -3063,7 +3011,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
u64 start_ts = sched_ktime_clock();
int reason = WINDOW_CHANGE;
unsigned int old = 0, new = 0;
- struct related_thread_group *grp;
local_irq_save(flags);
@@ -3081,19 +3028,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
read_unlock(&tasklist_lock);
- list_for_each_entry(grp, &active_related_thread_groups, list) {
- int j;
-
- for_each_possible_cpu(j) {
- struct group_cpu_time *cpu_time;
- /* Protected by rq lock */
- cpu_time = _group_cpu_time(grp, j);
- memset(cpu_time, 0, sizeof(struct group_cpu_time));
- if (window_start)
- cpu_time->window_start = window_start;
- }
- }
-
if (window_size) {
sched_ravg_window = window_size * TICK_NSEC;
set_hmp_defaults();
@@ -3109,6 +3043,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
rq->window_start = window_start;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
memset(&rq->load_subs[i], 0,
sizeof(struct load_subtractions));
@@ -3204,9 +3139,6 @@ static inline u64 freq_policy_load(struct rq *rq, u64 load)
return load;
}
-static inline void
-sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
-
void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus)
{
@@ -3223,7 +3155,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
unsigned int window_size;
u64 max_prev_sum = 0;
int max_busy_cpu = cpumask_first(query_cpus);
- struct related_thread_group *grp;
u64 total_group_load = 0, total_ngload = 0;
bool aggregate_load = false;
struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
@@ -3233,8 +3164,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
local_irq_save(flags);
- read_lock(&related_thread_group_lock);
-
/*
* This function could be called in timer context, and the
* current task may have been executing for a long time. Ensure
@@ -3287,15 +3216,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
raw_spin_unlock(&cluster->load_lock);
- for_each_related_thread_group(grp) {
- for_each_cpu(cpu, query_cpus) {
- /* Protected by rq_lock */
- struct group_cpu_time *cpu_time =
- _group_cpu_time(grp, cpu);
- sync_window_start(cpu_rq(cpu), cpu_time);
- }
- }
-
group_load_in_freq_domain(
&cpu_rq(max_busy_cpu)->freq_domain_cpumask,
&total_group_load, &total_ngload);
@@ -3316,7 +3236,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
ngload[i] = total_ngload;
}
} else {
- _group_load_in_cpu(cpu, &group_load[i], &ngload[i]);
+ group_load[i] = rq->grp_time.prev_runnable_sum;
+ ngload[i] = rq->grp_time.nt_prev_runnable_sum;
}
load[i] += group_load[i];
@@ -3341,8 +3262,6 @@ skip_early:
for_each_cpu(cpu, query_cpus)
raw_spin_unlock(&(cpu_rq(cpu))->lock);
- read_unlock(&related_thread_group_lock);
-
local_irq_restore(flags);
i = 0;
@@ -3659,18 +3578,17 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
if (grp && sched_freq_aggregate) {
struct group_cpu_time *cpu_time;
- cpu_time = _group_cpu_time(grp, cpu_of(src_rq));
+ cpu_time = &src_rq->grp_time;
src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- cpu_time = _group_cpu_time(grp, cpu_of(dest_rq));
+ cpu_time = &dest_rq->grp_time;
dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- sync_window_start(dest_rq, cpu_time);
if (p->ravg.curr_window) {
*src_curr_runnable_sum -= p->ravg.curr_window;
@@ -3799,61 +3717,6 @@ void set_preferred_cluster(struct related_thread_group *grp)
#define DEFAULT_CGROUP_COLOC_ID 1
-static inline void free_group_cputime(struct related_thread_group *grp)
-{
- free_percpu(grp->cpu_time);
-}
-
-static int alloc_group_cputime(struct related_thread_group *grp)
-{
- int i;
- struct group_cpu_time *cpu_time;
- int cpu = raw_smp_processor_id();
- struct rq *rq = cpu_rq(cpu);
- u64 window_start = rq->window_start;
-
- grp->cpu_time = alloc_percpu_gfp(struct group_cpu_time, GFP_ATOMIC);
- if (!grp->cpu_time)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- cpu_time = per_cpu_ptr(grp->cpu_time, i);
- memset(cpu_time, 0, sizeof(struct group_cpu_time));
- cpu_time->window_start = window_start;
- }
-
- return 0;
-}
-
-/*
- * A group's window_start may be behind. When moving it forward, flip prev/curr
- * counters. When moving forward > 1 window, prev counter is set to 0
- */
-static inline void
-sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time)
-{
- u64 delta;
- int nr_windows;
- u64 curr_sum = cpu_time->curr_runnable_sum;
- u64 nt_curr_sum = cpu_time->nt_curr_runnable_sum;
-
- delta = rq->window_start - cpu_time->window_start;
- if (!delta)
- return;
-
- nr_windows = div64_u64(delta, sched_ravg_window);
- if (nr_windows > 1)
- curr_sum = nt_curr_sum = 0;
-
- cpu_time->prev_runnable_sum = curr_sum;
- cpu_time->curr_runnable_sum = 0;
-
- cpu_time->nt_prev_runnable_sum = nt_curr_sum;
- cpu_time->nt_curr_runnable_sum = 0;
-
- cpu_time->window_start = rq->window_start;
-}
-
/*
* Task's cpu usage is accounted in:
* rq->curr/prev_runnable_sum, when its ->grp is NULL
@@ -3871,7 +3734,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
- struct migration_sum_data d;
int migrate_type;
int cpu = cpu_of(rq);
bool new_task;
@@ -3886,15 +3748,10 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
new_task = is_new_task(p);
- /* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
- cpu_time = _group_cpu_time(grp, cpu);
+ cpu_time = &rq->grp_time;
if (event == ADD_TASK) {
- sync_window_start(rq, cpu_time);
migrate_type = RQ_TO_GROUP;
- d.src_rq = rq;
- d.src_cpu_time = NULL;
- d.dst_rq = NULL;
- d.dst_cpu_time = cpu_time;
+
src_curr_runnable_sum = &rq->curr_runnable_sum;
dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
src_prev_runnable_sum = &rq->prev_runnable_sum;
@@ -3919,17 +3776,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
} else {
migrate_type = GROUP_TO_RQ;
- d.src_rq = NULL;
- d.src_cpu_time = cpu_time;
- d.dst_rq = rq;
- d.dst_cpu_time = NULL;
- /*
- * In case of REM_TASK, cpu_time->window_start would be
- * uptodate, because of the update_task_ravg() we called
- * above on the moving task. Hence no need for
- * sync_window_start()
- */
src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
dst_curr_runnable_sum = &rq->curr_runnable_sum;
src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
@@ -3975,7 +3822,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
- trace_sched_migration_update_sum(p, migrate_type, &d);
+ trace_sched_migration_update_sum(p, migrate_type, rq);
BUG_ON((s64)*src_curr_runnable_sum < 0);
BUG_ON((s64)*src_prev_runnable_sum < 0);
@@ -3983,18 +3830,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}
-static inline struct group_cpu_time *
-task_group_cpu_time(struct task_struct *p, int cpu)
-{
- return _group_cpu_time(rcu_dereference(p->grp), cpu);
-}
-
-static inline struct group_cpu_time *
-_group_cpu_time(struct related_thread_group *grp, int cpu)
-{
- return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
-}
-
static inline struct related_thread_group*
lookup_related_thread_group(unsigned int group_id)
{
@@ -4014,12 +3849,6 @@ int alloc_related_thread_groups(void)
goto err;
}
- if (alloc_group_cputime(grp)) {
- kfree(grp);
- ret = -ENOMEM;
- goto err;
- }
-
grp->id = i;
INIT_LIST_HEAD(&grp->tasks);
INIT_LIST_HEAD(&grp->list);
@@ -4034,7 +3863,6 @@ err:
for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
grp = lookup_related_thread_group(i);
if (grp) {
- free_group_cputime(grp);
kfree(grp);
related_thread_groups[i] = NULL;
} else {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d907eeb297a3..3e2ef7b0df3e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -366,6 +366,13 @@ struct load_subtractions {
u64 new_subs;
};
+struct group_cpu_time {
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+};
+
struct sched_cluster {
raw_spinlock_t load_lock;
struct list_head list;
@@ -407,12 +414,6 @@ struct related_thread_group {
struct sched_cluster *preferred_cluster;
struct rcu_head rcu;
u64 last_update;
- struct group_cpu_time __percpu *cpu_time; /* one per cluster */
-};
-
-struct migration_sum_data {
- struct rq *src_rq, *dst_rq;
- struct group_cpu_time *src_cpu_time, *dst_cpu_time;
};
extern struct list_head cluster_head;
@@ -776,6 +777,7 @@ struct rq {
u64 prev_runnable_sum;
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
+ struct group_cpu_time grp_time;
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
@@ -1350,14 +1352,6 @@ check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
extern void notify_migration(int src_cpu, int dest_cpu,
bool src_cpu_dead, struct task_struct *p);
-struct group_cpu_time {
- u64 curr_runnable_sum;
- u64 prev_runnable_sum;
- u64 nt_curr_runnable_sum;
- u64 nt_prev_runnable_sum;
- u64 window_start;
-};
-
/* Is frequency of two cpus synchronized with each other? */
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{