summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorNeil Leeder <nleeder@codeaurora.org>2014-08-29 15:55:59 -0400
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:58:13 -0700
commit182eeb0c0daf70acd8ebf739f77078f0c9425540 (patch)
treeac5d1c624125f8a9bb46f221ed10c8d8bcd69b15 /drivers
parent6470f7956ac6b479a1697e567a47bebd90095bb2 (diff)
Perf: arm64: stop counters when going into hotplug
Hotplug disables the pmu irq, but if counters are running in the window before the CPU is hotplugged off they can overflow and generate an interrupt. Because the interrupt is disabled, this prevents the cpu from going down. Events are stopped during hotplug processing. However, perf is hooked into the timer tick, and restarts enabled events on every tick, even if they were stopped. Change the event state to OFF to prevent this. CPUs can still be power-collapsed while being hotplugged off, but hotplug processing will save and restore the correct state, so don't process power-collapse save/restore while hotplug is in process. Processing for stop reads the counters, so a separate call is no longer needed. Start processing re-enables events so the from_idle flag is not needed during pmu_enable. Change-Id: I6a7f5b04955ebba8c4d76547f24e2be4071d7539 Signed-off-by: Neil Leeder <nleeder@codeaurora.org> [satyap: merge conflict resolution and move changes in arch/arm64/kernel/perf_event.c to drivers/perf/arm_pmu.c to align with kernel 4.4] Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/perf/arm_pmu.c53
1 files changed, 51 insertions, 2 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 67b6b5bc3482..9795842e700a 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -30,6 +30,7 @@
#include <asm/irq_regs.h>
static DEFINE_PER_CPU(u32, from_idle);
+static DEFINE_PER_CPU(u32, hotplug_down);
static int
armpmu_map_cache_event(const unsigned (*cache_map)
@@ -764,6 +765,48 @@ static void armpmu_update_counters(void *x)
}
}
+static void armpmu_hotplug_enable(void *parm_pmu)
+{
+ struct arm_pmu *armpmu = parm_pmu;
+ struct pmu *pmu = &(armpmu->pmu);
+ struct pmu_hw_events *hw_events = armpmu->hw_events;
+ int idx;
+
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = hw_events->events[idx];
+ if (!event)
+ continue;
+
+ event->state = event->hotplug_save_state;
+ pmu->start(event, 0);
+ }
+ per_cpu(hotplug_down, smp_processor_id()) = 0;
+}
+
+static void armpmu_hotplug_disable(void *parm_pmu)
+{
+ struct arm_pmu *armpmu = parm_pmu;
+ struct pmu *pmu = &(armpmu->pmu);
+ struct pmu_hw_events *hw_events = armpmu->hw_events;
+ int idx;
+
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = hw_events->events[idx];
+ if (!event)
+ continue;
+
+ event->hotplug_save_state = event->state;
+ /*
+ * Prevent timer tick handler perf callback from enabling
+ * this event and potentially generating an interrupt
+ * before the CPU goes down.
+ */
+ event->state = PERF_EVENT_STATE_OFF;
+ pmu->stop(event, 0);
+ }
+ per_cpu(hotplug_down, smp_processor_id()) = 1;
+}
+
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
@@ -781,6 +824,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
int ret = NOTIFY_DONE;
if ((masked_action != CPU_DOWN_PREPARE) &&
+ (masked_action != CPU_DOWN_FAILED) &&
(masked_action != CPU_STARTING))
return NOTIFY_DONE;
@@ -801,7 +845,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
if (cpu_has_active_perf(cpu, cpu_pmu))
smp_call_function_single(cpu,
- armpmu_update_counters, cpu_pmu, 1);
+ armpmu_hotplug_disable, cpu_pmu, 1);
/* Disarm the PMU IRQ before disappearing. */
if (cpu_pmu->plat_device) {
irq = cpu_pmu->percpu_irq;
@@ -812,6 +856,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
break;
case CPU_STARTING:
+ case CPU_DOWN_FAILED:
/* Reset PMU to clear counters for ftrace buffer */
if (cpu_pmu->reset)
cpu_pmu->reset(NULL);
@@ -824,7 +869,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
cpu_pmu_enable_percpu_irq(&irq);
}
if (cpu_has_active_perf(cpu, cpu_pmu)) {
- get_cpu_var(from_idle) = 1;
+ armpmu_hotplug_enable(cpu_pmu);
pmu = &cpu_pmu->pmu;
pmu->pmu_enable(pmu);
}
@@ -845,6 +890,10 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
if (!cpu_pmu)
return NOTIFY_OK;
+ /* If the cpu is going down, don't do anything here */
+ if (per_cpu(hotplug_down, cpu))
+ return NOTIFY_OK;
+
switch (cmd) {
case CPU_PM_ENTER:
if (cpu_pmu->save_pm_registers)