diff options
author | Joonwoo Park <joonwoop@codeaurora.org> | 2016-05-25 12:35:45 -0700 |
---|---|---|
committer | Kyle Yan <kyan@codeaurora.org> | 2016-06-03 14:48:05 -0700 |
commit | 9103cfbaa112a6fc8f587a861a917fcc37ad9e75 (patch) | |
tree | d1975ec0126599e9bed102d37f1b090b3d3f90f7 /kernel | |
parent | 11ad3c4f920a4df566d033a3858afbfce6c72460 (diff) |
Revert "sched: add scheduling latency tracking procfs node"
This reverts commit b40bf941f61756bcc ("sched: add scheduling latency
tracking procfs node") as this feature is no longer used.
Change-Id: I5de789b6349e6ea78ae3725af2a3ffa72b7b7f11
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 63 | ||||
-rw-r--r-- | kernel/sysctl.c | 5 |
2 files changed, 0 insertions, 68 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6c5d393da122..67bbbb7f4a55 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -125,14 +125,6 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; #ifdef CONFIG_SCHEDSTATS unsigned int sysctl_sched_latency_panic_threshold; unsigned int sysctl_sched_latency_warn_threshold; - -struct sched_max_latency { - unsigned int latency_us; - char comm[TASK_COMM_LEN]; - pid_t pid; -}; - -static DEFINE_PER_CPU(struct sched_max_latency, sched_max_latency); #endif /* CONFIG_SCHEDSTATS */ static inline void update_load_add(struct load_weight *lw, unsigned long inc) @@ -763,54 +755,6 @@ static void update_curr_fair(struct rq *rq) } #ifdef CONFIG_SCHEDSTATS -int sched_max_latency_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - int i, cpu = nr_cpu_ids; - char msg[256]; - unsigned long flags; - struct rq *rq; - struct sched_max_latency max, *lat; - - if (!write) { - max.latency_us = 0; - for_each_possible_cpu(i) { - rq = cpu_rq(i); - raw_spin_lock_irqsave(&rq->lock, flags); - - lat = &per_cpu(sched_max_latency, i); - if (max.latency_us < lat->latency_us) { - max = *lat; - cpu = i; - } - - raw_spin_unlock_irqrestore(&rq->lock, flags); - } - - if (cpu != nr_cpu_ids) { - table->maxlen = - snprintf(msg, sizeof(msg), - "cpu%d comm=%s pid=%u latency=%u(us)", - cpu, max.comm, max.pid, max.latency_us); - table->data = msg; - ret = proc_dostring(table, write, buffer, lenp, ppos); - } - } else { - for_each_possible_cpu(i) { - rq = cpu_rq(i); - raw_spin_lock_irqsave(&rq->lock, flags); - - memset(&per_cpu(sched_max_latency, i), 0, - sizeof(struct sched_max_latency)); - - raw_spin_unlock_irqrestore(&rq->lock, flags); - } - } - - return ret; -} - static inline void check_for_high_latency(struct task_struct *p, u64 latency_us) { int do_warn, do_panic; @@ -847,7 +791,6 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) { struct task_struct *p; u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start; - struct sched_max_latency *max; if (entity_is_task(se)) { p = task_of(se); @@ -861,12 +804,6 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) return; } trace_sched_stat_wait(p, delta); - max = this_cpu_ptr(&sched_max_latency); - if (max->latency_us < delta >> 10) { - max->latency_us = delta; - max->pid = task_of(se)->pid; - memcpy(max->comm, task_of(se)->comm, TASK_COMM_LEN); - } check_for_high_latency(p, delta >> 10); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3bfefc464569..99b413d75d73 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -611,11 +611,6 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, }, - { - .procname = "sched_max_latency_us", - .mode = 0644, - .proc_handler = sched_max_latency_sysctl, - }, #endif #ifdef CONFIG_PROVE_LOCKING { |