diff options
author | Tapas Kumar Kundu <tkundu@codeaurora.org> | 2015-04-21 19:10:13 -0700 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:22:30 -0700 |
commit | c022987819f67a15a2fcde5b5a0f8bdb93a2bdd7 (patch) | |
tree | c56d0a9367365934285e54d79ca2748ad78182b4 | |
parent | 9ccb569bd7ec6245b89ce69b3ede551eec535960 (diff) |
soc: qcom: msm_perf: Add support for enter/exit cycle for io detection
Add support for enter/exit cycle sysfs nodes for io detection
There are some usecases which may benefit from different enter/exit
cycle load criteria for IO load. This change adds support for
that.
Change-Id: Iff135ed11b92becc374ace4578e0efc212d2b731
Signed-off-by: Tapas Kumar Kundu <tkundu@codeaurora.org>
-rw-r--r-- | drivers/soc/qcom/msm_performance.c | 51 | ||||
-rw-r--r-- | include/trace/events/power.h | 28 |
2 files changed, 58 insertions, 21 deletions
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c index 326f3aeffba8..24ffa44f82ca 100644 --- a/drivers/soc/qcom/msm_performance.c +++ b/drivers/soc/qcom/msm_performance.c @@ -38,7 +38,8 @@ struct cluster { /* stats for load detection */ /* IO */ u64 last_io_check_ts; - unsigned int iowait_cycle_cnt; + unsigned int iowait_enter_cycle_cnt; + unsigned int iowait_exit_cycle_cnt; spinlock_t iowait_lock; unsigned int cur_io_busy; bool io_change; @@ -101,6 +102,7 @@ static unsigned int workload_detect; /* IOwait related tunables */ static unsigned int io_enter_cycles = 4; +static unsigned int io_exit_cycles = 4; static u64 iowait_ceiling_pct = 25; static u64 iowait_floor_pct = 8; #define LAST_IO_CHECK_TOL (3 * USEC_PER_MSEC) @@ -921,6 +923,29 @@ static const struct kernel_param_ops param_ops_io_enter_cycles = { }; device_param_cb(io_enter_cycles, ¶m_ops_io_enter_cycles, NULL, 0644); +static int set_io_exit_cycles(const char *buf, const struct kernel_param *kp) +{ + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + io_exit_cycles = val; + + return 0; +} + +static int get_io_exit_cycles(char *buf, const struct kernel_param *kp) +{ + return snprintf(buf, PAGE_SIZE, "%u", io_exit_cycles); +} + +static const struct kernel_param_ops param_ops_io_exit_cycles = { + .set = set_io_exit_cycles, + .get = get_io_exit_cycles, +}; +device_param_cb(io_exit_cycles, ¶m_ops_io_exit_cycles, NULL, 0644); + static int set_iowait_floor_pct(const char *buf, const struct kernel_param *kp) { u64 val; @@ -993,7 +1018,8 @@ static int set_workload_detect(const char *buf, const struct kernel_param *kp) for (i = 0; i < num_clusters; i++) { i_cl = managed_clusters[i]; spin_lock_irqsave(&i_cl->iowait_lock, flags); - i_cl->iowait_cycle_cnt = 0; + i_cl->iowait_enter_cycle_cnt = 0; + i_cl->iowait_exit_cycle_cnt = 0; i_cl->cur_io_busy = 0; i_cl->io_change = true; spin_unlock_irqrestore(&i_cl->iowait_lock, flags); @@ -1188,24 +1214,29 @@ static void check_cluster_iowait(struct cluster *cl, u64 now) if (!cl->cur_io_busy) { if (max_iowait > iowait_ceiling_pct) { - cl->iowait_cycle_cnt++; - if (cl->iowait_cycle_cnt >= io_enter_cycles) + cl->iowait_enter_cycle_cnt++; + if (cl->iowait_enter_cycle_cnt >= io_enter_cycles) { cl->cur_io_busy = 1; + cl->iowait_enter_cycle_cnt = 0; + } } else { - cl->iowait_cycle_cnt = 0; + cl->iowait_enter_cycle_cnt = 0; } } else { if (max_iowait < iowait_floor_pct) { - cl->iowait_cycle_cnt--; - if (!cl->iowait_cycle_cnt) + cl->iowait_exit_cycle_cnt++; + if (cl->iowait_exit_cycle_cnt >= io_exit_cycles) { cl->cur_io_busy = 0; + cl->iowait_exit_cycle_cnt = 0; + } } else { - cl->iowait_cycle_cnt = io_enter_cycles; + cl->iowait_exit_cycle_cnt = 0; } } + cl->last_io_check_ts = now; - trace_track_iowait(cpumask_first(cl->cpus), cl->iowait_cycle_cnt, - cl->cur_io_busy, max_iowait); + trace_track_iowait(cpumask_first(cl->cpus), cl->iowait_enter_cycle_cnt, + cl->iowait_exit_cycle_cnt, cl->cur_io_busy, max_iowait); if (temp_iobusy != cl->cur_io_busy) { cl->io_change = true; diff --git a/include/trace/events/power.h b/include/trace/events/power.h index be385800f9b4..5d9bb61be49c 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -797,34 +797,40 @@ DEFINE_EVENT(kpm_module, reevaluate_hotplug, DECLARE_EVENT_CLASS(kpm_module2, - TP_PROTO(unsigned int cpu, unsigned int cycles, unsigned int io_busy, - u64 iowait), + TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt, + unsigned int exit_cycle_cnt, + unsigned int io_busy, u64 iowait), - TP_ARGS(cpu, cycles, io_busy, iowait), + TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait), TP_STRUCT__entry( __field(u32, cpu) - __field(u32, cycles) + __field(u32, enter_cycle_cnt) + __field(u32, exit_cycle_cnt) __field(u32, io_busy) __field(u64, iowait) ), TP_fast_assign( __entry->cpu = cpu; - __entry->cycles = cycles; + __entry->enter_cycle_cnt = enter_cycle_cnt; + __entry->exit_cycle_cnt = exit_cycle_cnt; __entry->io_busy = io_busy; __entry->iowait = iowait; ), - TP_printk("CPU:%u cycles=%u io_busy=%u iowait=%lu", - (unsigned int)__entry->cpu, (unsigned int)__entry->cycles, - (unsigned int)__entry->io_busy, (unsigned long)__entry->iowait) + TP_printk("CPU:%u enter_cycles=%u exit_cycles=%u io_busy=%u iowait=%lu", + (unsigned int)__entry->cpu, + (unsigned int)__entry->enter_cycle_cnt, + (unsigned int)__entry->exit_cycle_cnt, + (unsigned int)__entry->io_busy, + (unsigned long)__entry->iowait) ); DEFINE_EVENT(kpm_module2, track_iowait, - TP_PROTO(unsigned int cpu, unsigned int cycles, unsigned int io_busy, - u64 iowait), - TP_ARGS(cpu, cycles, io_busy, iowait) + TP_PROTO(unsigned int cpu, unsigned int enter_cycle_cnt, + unsigned int exit_cycle_cnt, unsigned int io_busy, u64 iowait), + TP_ARGS(cpu, enter_cycle_cnt, exit_cycle_cnt, io_busy, iowait) ); DECLARE_EVENT_CLASS(cpu_modes, |