diff options
25 files changed, 424 insertions, 449 deletions
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index 1c044eb320cc..343781b9f246 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -7,8 +7,12 @@ representation in the device tree should be done as under:- Required properties: - compatible : should be one of + "arm,cortex-a15-pmu" "arm,cortex-a9-pmu" "arm,cortex-a8-pmu" + "arm,cortex-a7-pmu" + "arm,cortex-a5-pmu" + "arm,arm11mpcore-pmu" "arm,arm1176-pmu" "arm,arm1136-pmu" - interrupts : 1 combined interrupt or 1 per core. diff --git a/MAINTAINERS b/MAINTAINERS index fdc0119963e7..437a7dd36843 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -595,7 +595,6 @@ M: Will Deacon <will.deacon@arm.com> S: Maintained F: arch/arm/kernel/perf_event* F: arch/arm/oprofile/common.c -F: arch/arm/kernel/pmu.c F: arch/arm/include/asm/pmu.h F: arch/arm/kernel/hw_breakpoint.c F: arch/arm/include/asm/hw_breakpoint.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5f6928565324..fcedbb3c922d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1169,12 +1169,6 @@ config XSCALE_PMU depends on CPU_XSCALE default y -config CPU_HAS_PMU - depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \ - (!ARCH_OMAP3 || OMAP3_EMU) - default y - bool - config MULTI_IRQ_HANDLER bool help @@ -1747,7 +1741,7 @@ config HIGHPTE config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" - depends on PERF_EVENTS && CPU_HAS_PMU + depends on PERF_EVENTS default y help Enable hardware performance counter support for perf events. If diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index e074948d8143..625cd621a436 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,6 +12,13 @@ #ifndef __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__ -/* Nothing to see here... */ +/* + * The ARMv7 CPU PMU supports up to 32 event counters. + */ +#define ARMPMU_MAX_HWEVENTS 32 + +#define HW_OP_UNSUPPORTED 0xFFFF +#define C(_x) PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED 0xFFFF #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 4432305f4a2a..a26170dce02e 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -16,69 +16,30 @@ #include <linux/perf_event.h> /* - * Types of PMUs that can be accessed directly and require mutual - * exclusion between profiling tools. - */ -enum arm_pmu_type { - ARM_PMU_DEVICE_CPU = 0, - ARM_NUM_PMU_DEVICES, -}; - -/* * struct arm_pmu_platdata - ARM PMU platform data * * @handle_irq: an optional handler which will be called from the * interrupt and passed the address of the low level handler, * and can be used to implement any platform specific handling * before or after calling it. - * @enable_irq: an optional handler which will be called after - * request_irq and be used to handle some platform specific - * irq enablement - * @disable_irq: an optional handler which will be called before - * free_irq and be used to handle some platform specific - * irq disablement + * @runtime_resume: an optional handler which will be called by the + * runtime PM framework following a call to pm_runtime_get(). + * Note that if pm_runtime_get() is called more than once in + * succession this handler will only be called once. + * @runtime_suspend: an optional handler which will be called by the + * runtime PM framework following a call to pm_runtime_put(). + * Note that if pm_runtime_get() is called more than once in + * succession this handler will only be called following the + * final call to pm_runtime_put() that actually disables the + * hardware. */ struct arm_pmu_platdata { irqreturn_t (*handle_irq)(int irq, void *dev, irq_handler_t pmu_handler); - void (*enable_irq)(int irq); - void (*disable_irq)(int irq); + int (*runtime_resume)(struct device *dev); + int (*runtime_suspend)(struct device *dev); }; -#ifdef CONFIG_CPU_HAS_PMU - -/** - * reserve_pmu() - reserve the hardware performance counters - * - * Reserve the hardware performance counters in the system for exclusive use. - * Returns 0 on success or -EBUSY if the lock is already held. - */ -extern int -reserve_pmu(enum arm_pmu_type type); - -/** - * release_pmu() - Relinquish control of the performance counters - * - * Release the performance counters and allow someone else to use them. - */ -extern void -release_pmu(enum arm_pmu_type type); - -#else /* CONFIG_CPU_HAS_PMU */ - -#include <linux/err.h> - -static inline int -reserve_pmu(enum arm_pmu_type type) -{ - return -ENODEV; -} - -static inline void -release_pmu(enum arm_pmu_type type) { } - -#endif /* CONFIG_CPU_HAS_PMU */ - #ifdef CONFIG_HW_PERF_EVENTS /* The events for a given PMU register set. */ @@ -103,7 +64,6 @@ struct pmu_hw_events { struct arm_pmu { struct pmu pmu; - enum arm_pmu_type type; cpumask_t active_irqs; char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); @@ -118,6 +78,8 @@ struct arm_pmu { void (*start)(void); void (*stop)(void); void (*reset)(void *); + int (*request_irq)(irq_handler_t handler); + void (*free_irq)(void); int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; @@ -129,7 +91,9 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); +extern const struct dev_pm_ops armpmu_dev_pm_ops; + +int armpmu_register(struct arm_pmu *armpmu, char *name, int type); u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, @@ -139,6 +103,13 @@ int armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx); +int armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask); + #endif /* CONFIG_HW_PERF_EVENTS */ #endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 7ad2d5cf7008..1c4321430737 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -69,8 +69,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o -obj-$(CONFIG_CPU_HAS_PMU) += pmu.o -obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index ab243b87118d..93971b1a4f0b 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -12,68 +12,15 @@ */ #define pr_fmt(fmt) "hw perfevents: " fmt -#include <linux/bitmap.h> -#include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/export.h> -#include <linux/perf_event.h> #include <linux/platform_device.h> -#include <linux/spinlock.h> +#include <linux/pm_runtime.h> #include <linux/uaccess.h> -#include <asm/cputype.h> -#include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/pmu.h> #include <asm/stacktrace.h> -/* - * ARMv6 supports a maximum of 3 events, starting from index 0. If we add - * another platform that supports more, we need to increase this to be the - * largest of all platforms. - * - * ARMv7 supports up to 32 events: - * cycle counter CCNT + 31 events counters CNT0..30. - * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. - */ -#define ARMPMU_MAX_HWEVENTS 32 - -static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); -static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); -static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -/* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *cpu_pmu; - -const char *perf_pmu_name(void) -{ - if (!cpu_pmu) - return NULL; - - return cpu_pmu->pmu.name; -} -EXPORT_SYMBOL_GPL(perf_pmu_name); - -int perf_num_counters(void) -{ - int max_events = 0; - - if (cpu_pmu != NULL) - max_events = cpu_pmu->num_events; - - return max_events; -} -EXPORT_SYMBOL_GPL(perf_num_counters); - -#define HW_OP_UNSUPPORTED 0xFFFF - -#define C(_x) \ - PERF_COUNT_HW_CACHE_##_x - -#define CACHE_OP_UNSUPPORTED 0xFFFF - static int armpmu_map_cache_event(const unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] @@ -104,7 +51,7 @@ armpmu_map_cache_event(const unsigned (*cache_map) } static int -armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) +armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { int mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; @@ -116,19 +63,20 @@ armpmu_map_raw_event(u32 raw_event_mask, u64 config) return (int)(config & raw_event_mask); } -static int map_cpu_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map) - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask) +int +armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask) { u64 config = event->attr.config; switch (event->attr.type) { case PERF_TYPE_HARDWARE: - return armpmu_map_event(event_map, config); + return armpmu_map_hw_event(event_map, config); case PERF_TYPE_HW_CACHE: return armpmu_map_cache_event(cache_map, config); case PERF_TYPE_RAW: @@ -222,7 +170,6 @@ armpmu_stop(struct perf_event *event, int flags) */ if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); - barrier(); /* why? */ armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } @@ -350,99 +297,41 @@ validate_group(struct perf_event *event) return 0; } -static irqreturn_t armpmu_platform_irq(int irq, void *dev) +static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) { struct arm_pmu *armpmu = (struct arm_pmu *) dev; struct platform_device *plat_device = armpmu->plat_device; struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); - return plat->handle_irq(irq, dev, armpmu->handle_irq); + if (plat && plat->handle_irq) + return plat->handle_irq(irq, dev, armpmu->handle_irq); + else + return armpmu->handle_irq(irq, dev); } static void armpmu_release_hardware(struct arm_pmu *armpmu) { - int i, irq, irqs; - struct platform_device *pmu_device = armpmu->plat_device; - struct arm_pmu_platdata *plat = - dev_get_platdata(&pmu_device->dev); - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - - for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) { - if (plat && plat->disable_irq) - plat->disable_irq(irq); - free_irq(irq, armpmu); - } - } - - release_pmu(armpmu->type); + armpmu->free_irq(); + pm_runtime_put_sync(&armpmu->plat_device->dev); } static int armpmu_reserve_hardware(struct arm_pmu *armpmu) { - struct arm_pmu_platdata *plat; - irq_handler_t handle_irq; - int i, err, irq, irqs; + int err; struct platform_device *pmu_device = armpmu->plat_device; if (!pmu_device) return -ENODEV; - err = reserve_pmu(armpmu->type); + pm_runtime_get_sync(&pmu_device->dev); + err = armpmu->request_irq(armpmu_dispatch_irq); if (err) { - pr_warning("unable to reserve pmu\n"); + armpmu_release_hardware(armpmu); return err; } - plat = dev_get_platdata(&pmu_device->dev); - if (plat && plat->handle_irq) - handle_irq = armpmu_platform_irq; - else - handle_irq = armpmu->handle_irq; - - irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { - pr_err("no irqs for PMUs defined\n"); - return -ENODEV; - } - - for (i = 0; i < irqs; ++i) { - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); - continue; - } - - err = request_irq(irq, handle_irq, - IRQF_DISABLED | IRQF_NOBALANCING, - "arm-pmu", armpmu); - if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); - armpmu_release_hardware(armpmu); - return err; - } else if (plat && plat->enable_irq) - plat->enable_irq(irq); - - cpumask_set_cpu(i, &armpmu->active_irqs); - } - return 0; } @@ -581,6 +470,32 @@ static void armpmu_disable(struct pmu *pmu) armpmu->stop(); } +#ifdef CONFIG_PM_RUNTIME +static int armpmu_runtime_resume(struct device *dev) +{ + struct arm_pmu_platdata *plat = dev_get_platdata(dev); + + if (plat && plat->runtime_resume) + return plat->runtime_resume(dev); + + return 0; +} + +static int armpmu_runtime_suspend(struct device *dev) +{ + struct arm_pmu_platdata *plat = dev_get_platdata(dev); + + if (plat && plat->runtime_suspend) + return plat->runtime_suspend(dev); + + return 0; +} +#endif + +const struct dev_pm_ops armpmu_dev_pm_ops = { + SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) +}; + static void __init armpmu_init(struct arm_pmu *armpmu) { atomic_set(&armpmu->active_events, 0); @@ -598,174 +513,14 @@ static void __init armpmu_init(struct arm_pmu *armpmu) }; } -int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) +int armpmu_register(struct arm_pmu *armpmu, char *name, int type) { armpmu_init(armpmu); + pr_info("enabled with %s PMU driver, %d counters available\n", + armpmu->name, armpmu->num_events); return perf_pmu_register(&armpmu->pmu, name, type); } -/* Include the PMU-specific implementations. */ -#include "perf_event_xscale.c" -#include "perf_event_v6.c" -#include "perf_event_v7.c" - -/* - * Ensure the PMU has sane values out of reset. - * This requires SMP to be available, so exists as a separate initcall. - */ -static int __init -cpu_pmu_reset(void) -{ - if (cpu_pmu && cpu_pmu->reset) - return on_each_cpu(cpu_pmu->reset, NULL, 1); - return 0; -} -arch_initcall(cpu_pmu_reset); - -/* - * PMU platform driver and devicetree bindings. - */ -static struct of_device_id armpmu_of_device_ids[] = { - {.compatible = "arm,cortex-a9-pmu"}, - {.compatible = "arm,cortex-a8-pmu"}, - {.compatible = "arm,arm1136-pmu"}, - {.compatible = "arm,arm1176-pmu"}, - {}, -}; - -static struct platform_device_id armpmu_plat_device_ids[] = { - {.name = "arm-pmu"}, - {}, -}; - -static int __devinit armpmu_device_probe(struct platform_device *pdev) -{ - if (!cpu_pmu) - return -ENODEV; - - cpu_pmu->plat_device = pdev; - return 0; -} - -static struct platform_driver armpmu_driver = { - .driver = { - .name = "arm-pmu", - .of_match_table = armpmu_of_device_ids, - }, - .probe = armpmu_device_probe, - .id_table = armpmu_plat_device_ids, -}; - -static int __init register_pmu_driver(void) -{ - return platform_driver_register(&armpmu_driver); -} -device_initcall(register_pmu_driver); - -static struct pmu_hw_events *armpmu_get_cpu_events(void) -{ - return &__get_cpu_var(cpu_hw_events); -} - -static void __init cpu_pmu_init(struct arm_pmu *armpmu) -{ - int cpu; - for_each_possible_cpu(cpu) { - struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); - events->events = per_cpu(hw_events, cpu); - events->used_mask = per_cpu(used_mask, cpu); - raw_spin_lock_init(&events->pmu_lock); - } - armpmu->get_hw_events = armpmu_get_cpu_events; - armpmu->type = ARM_PMU_DEVICE_CPU; -} - -/* - * PMU hardware loses all context when a CPU goes offline. - * When a CPU is hotplugged back in, since some hardware registers are - * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading - * junk values out of them. - */ -static int __cpuinit pmu_cpu_notify(struct notifier_block *b, - unsigned long action, void *hcpu) -{ - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - - if (cpu_pmu && cpu_pmu->reset) - cpu_pmu->reset(NULL); - - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata pmu_cpu_notifier = { - .notifier_call = pmu_cpu_notify, -}; - -/* - * CPU PMU identification and registration. - */ -static int __init -init_hw_perf_events(void) -{ - unsigned long cpuid = read_cpuid_id(); - unsigned long implementor = (cpuid & 0xFF000000) >> 24; - unsigned long part_number = (cpuid & 0xFFF0); - - /* ARM Ltd CPUs. */ - if (0x41 == implementor) { - switch (part_number) { - case 0xB360: /* ARM1136 */ - case 0xB560: /* ARM1156 */ - case 0xB760: /* ARM1176 */ - cpu_pmu = armv6pmu_init(); - break; - case 0xB020: /* ARM11mpcore */ - cpu_pmu = armv6mpcore_pmu_init(); - break; - case 0xC080: /* Cortex-A8 */ - cpu_pmu = armv7_a8_pmu_init(); - break; - case 0xC090: /* Cortex-A9 */ - cpu_pmu = armv7_a9_pmu_init(); - break; - case 0xC050: /* Cortex-A5 */ - cpu_pmu = armv7_a5_pmu_init(); - break; - case 0xC0F0: /* Cortex-A15 */ - cpu_pmu = armv7_a15_pmu_init(); - break; - case 0xC070: /* Cortex-A7 */ - cpu_pmu = armv7_a7_pmu_init(); - break; - } - /* Intel CPUs [xscale]. */ - } else if (0x69 == implementor) { - part_number = (cpuid >> 13) & 0x7; - switch (part_number) { - case 1: - cpu_pmu = xscale1pmu_init(); - break; - case 2: - cpu_pmu = xscale2pmu_init(); - break; - } - } - - if (cpu_pmu) { - pr_info("enabled with %s PMU driver, %d counters available\n", - cpu_pmu->name, cpu_pmu->num_events); - cpu_pmu_init(cpu_pmu); - register_cpu_notifier(&pmu_cpu_notifier); - armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); - } else { - pr_info("no hardware support available\n"); - } - - return 0; -} -early_initcall(init_hw_perf_events); - /* * Callchain handling code. */ diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c new file mode 100644 index 000000000000..8d7d8d4de9d6 --- /dev/null +++ b/arch/arm/kernel/perf_event_cpu.c @@ -0,0 +1,295 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ +#define pr_fmt(fmt) "CPU PMU: " fmt + +#include <linux/bitmap.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <asm/cputype.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> + +/* Set at runtime when we know what CPU type we are. */ +static struct arm_pmu *cpu_pmu; + +static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); +static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); +static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); + +/* + * Despite the names, these two functions are CPU-specific and are used + * by the OProfile/perf code. + */ +const char *perf_pmu_name(void) +{ + if (!cpu_pmu) + return NULL; + + return cpu_pmu->pmu.name; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + +int perf_num_counters(void) +{ + int max_events = 0; + + if (cpu_pmu != NULL) + max_events = cpu_pmu->num_events; + + return max_events; +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +/* Include the PMU-specific implementations. */ +#include "perf_event_xscale.c" +#include "perf_event_v6.c" +#include "perf_event_v7.c" + +static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) +{ + return &__get_cpu_var(cpu_hw_events); +} + +static void cpu_pmu_free_irq(void) +{ + int i, irq, irqs; + struct platform_device *pmu_device = cpu_pmu->plat_device; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + + for (i = 0; i < irqs; ++i) { + if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, cpu_pmu); + } +} + +static int cpu_pmu_request_irq(irq_handler_t handler) +{ + int i, err, irq, irqs; + struct platform_device *pmu_device = cpu_pmu->plat_device; + + if (!pmu_device) + return -ENODEV; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (irqs < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + for (i = 0; i < irqs; ++i) { + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq < 0) + continue; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); + continue; + } + + err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", + cpu_pmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + return err; + } + + cpumask_set_cpu(i, &cpu_pmu->active_irqs); + } + + return 0; +} + +static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) +{ + int cpu; + for_each_possible_cpu(cpu) { + struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); + events->events = per_cpu(hw_events, cpu); + events->used_mask = per_cpu(used_mask, cpu); + raw_spin_lock_init(&events->pmu_lock); + } + + cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events; + cpu_pmu->request_irq = cpu_pmu_request_irq; + cpu_pmu->free_irq = cpu_pmu_free_irq; + + /* Ensure the PMU has sane values out of reset. */ + if (cpu_pmu && cpu_pmu->reset) + on_each_cpu(cpu_pmu->reset, NULL, 1); +} + +/* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int __cpuinit cpu_pmu_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) + return NOTIFY_DONE; + + if (cpu_pmu && cpu_pmu->reset) + cpu_pmu->reset(NULL); + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { + .notifier_call = cpu_pmu_notify, +}; + +/* + * PMU platform driver and devicetree bindings. + */ +static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = { + {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, + {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, + {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, + {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, + {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init}, + {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, + {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init}, + {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init}, + {}, +}; + +static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = { + {.name = "arm-pmu"}, + {}, +}; + +/* + * CPU PMU identification and probing. + */ +static struct arm_pmu *__devinit probe_current_pmu(void) +{ + struct arm_pmu *pmu = NULL; + int cpu = get_cpu(); + unsigned long cpuid = read_cpuid_id(); + unsigned long implementor = (cpuid & 0xFF000000) >> 24; + unsigned long part_number = (cpuid & 0xFFF0); + + pr_info("probing PMU on CPU %d\n", cpu); + + /* ARM Ltd CPUs. */ + if (0x41 == implementor) { + switch (part_number) { + case 0xB360: /* ARM1136 */ + case 0xB560: /* ARM1156 */ + case 0xB760: /* ARM1176 */ + pmu = armv6pmu_init(); + break; + case 0xB020: /* ARM11mpcore */ + pmu = armv6mpcore_pmu_init(); + break; + case 0xC080: /* Cortex-A8 */ + pmu = armv7_a8_pmu_init(); + break; + case 0xC090: /* Cortex-A9 */ + pmu = armv7_a9_pmu_init(); + break; + case 0xC050: /* Cortex-A5 */ + pmu = armv7_a5_pmu_init(); + break; + case 0xC0F0: /* Cortex-A15 */ + pmu = armv7_a15_pmu_init(); + break; + case 0xC070: /* Cortex-A7 */ + pmu = armv7_a7_pmu_init(); + break; + } + /* Intel CPUs [xscale]. */ + } else if (0x69 == implementor) { + part_number = (cpuid >> 13) & 0x7; + switch (part_number) { + case 1: + pmu = xscale1pmu_init(); + break; + case 2: + pmu = xscale2pmu_init(); + break; + } + } + + put_cpu(); + return pmu; +} + +static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + struct arm_pmu *(*init_fn)(void); + struct device_node *node = pdev->dev.of_node; + + if (cpu_pmu) { + pr_info("attempt to register multiple PMU devices!"); + return -ENOSPC; + } + + if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { + init_fn = of_id->data; + cpu_pmu = init_fn(); + } else { + cpu_pmu = probe_current_pmu(); + } + + if (!cpu_pmu) + return -ENODEV; + + cpu_pmu->plat_device = pdev; + cpu_pmu_init(cpu_pmu); + register_cpu_notifier(&cpu_pmu_hotplug_notifier); + armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); + + return 0; +} + +static struct platform_driver cpu_pmu_driver = { + .driver = { + .name = "arm-pmu", + .pm = &armpmu_dev_pm_ops, + .of_match_table = cpu_pmu_of_device_ids, + }, + .probe = cpu_pmu_device_probe, + .id_table = cpu_pmu_plat_device_ids, +}; + +static int __init register_pmu_driver(void) +{ + return platform_driver_register(&cpu_pmu_driver); +} +device_initcall(register_pmu_driver); diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c90fcb2b6967..6ccc07971745 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -645,7 +645,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, static int armv6_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv6_perf_map, + return armpmu_map_event(event, &armv6_perf_map, &armv6_perf_cache_map, 0xFF); } @@ -664,7 +664,7 @@ static struct arm_pmu armv6pmu = { .max_period = (1LLU << 32) - 1, }; -static struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__devinit armv6pmu_init(void) { return &armv6pmu; } @@ -679,7 +679,7 @@ static struct arm_pmu *__init armv6pmu_init(void) static int armv6mpcore_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv6mpcore_perf_map, + return armpmu_map_event(event, &armv6mpcore_perf_map, &armv6mpcore_perf_cache_map, 0xFF); } @@ -698,17 +698,17 @@ static struct arm_pmu armv6mpcore_pmu = { .max_period = (1LLU << 32) - 1, }; -static struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) { return &armv6mpcore_pmu; } #else -static struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__devinit armv6pmu_init(void) { return NULL; } -static struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) { return NULL; } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f04070bd2183..bd4b090ebcfd 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1204,31 +1204,31 @@ static void armv7pmu_reset(void *info) static int armv7_a8_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv7_a8_perf_map, + return armpmu_map_event(event, &armv7_a8_perf_map, &armv7_a8_perf_cache_map, 0xFF); } static int armv7_a9_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv7_a9_perf_map, + return armpmu_map_event(event, &armv7_a9_perf_map, &armv7_a9_perf_cache_map, 0xFF); } static int armv7_a5_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv7_a5_perf_map, + return armpmu_map_event(event, &armv7_a5_perf_map, &armv7_a5_perf_cache_map, 0xFF); } static int armv7_a15_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv7_a15_perf_map, + return armpmu_map_event(event, &armv7_a15_perf_map, &armv7_a15_perf_cache_map, 0xFF); } static int armv7_a7_map_event(struct perf_event *event) { - return map_cpu_event(event, &armv7_a7_perf_map, + return armpmu_map_event(event, &armv7_a7_perf_map, &armv7_a7_perf_cache_map, 0xFF); } @@ -1245,7 +1245,7 @@ static struct arm_pmu armv7pmu = { .max_period = (1LLU << 32) - 1, }; -static u32 __init armv7_read_num_pmnc_events(void) +static u32 __devinit armv7_read_num_pmnc_events(void) { u32 nb_cnt; @@ -1256,7 +1256,7 @@ static u32 __init armv7_read_num_pmnc_events(void) return nb_cnt + 1; } -static struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__devinit armv7_a8_pmu_init(void) { armv7pmu.name = "ARMv7 Cortex-A8"; armv7pmu.map_event = armv7_a8_map_event; @@ -1264,7 +1264,7 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void) return &armv7pmu; } -static struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__devinit armv7_a9_pmu_init(void) { armv7pmu.name = "ARMv7 Cortex-A9"; armv7pmu.map_event = armv7_a9_map_event; @@ -1272,7 +1272,7 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void) return &armv7pmu; } -static struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__devinit armv7_a5_pmu_init(void) { armv7pmu.name = "ARMv7 Cortex-A5"; armv7pmu.map_event = armv7_a5_map_event; @@ -1280,7 +1280,7 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void) return &armv7pmu; } -static struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__devinit armv7_a15_pmu_init(void) { armv7pmu.name = "ARMv7 Cortex-A15"; armv7pmu.map_event = armv7_a15_map_event; @@ -1289,7 +1289,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) return &armv7pmu; } -static struct arm_pmu *__init armv7_a7_pmu_init(void) +static struct arm_pmu *__devinit armv7_a7_pmu_init(void) { armv7pmu.name = "ARMv7 Cortex-A7"; armv7pmu.map_event = armv7_a7_map_event; @@ -1298,27 +1298,27 @@ static struct arm_pmu *__init armv7_a7_pmu_init(void) return &armv7pmu; } #else -static struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__devinit armv7_a8_pmu_init(void) { return NULL; } -static struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__devinit armv7_a9_pmu_init(void) { return NULL; } -static struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__devinit armv7_a5_pmu_init(void) { return NULL; } -static struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__devinit armv7_a15_pmu_init(void) { return NULL; } -static struct arm_pmu *__init armv7_a7_pmu_init(void) +static struct arm_pmu *__devinit armv7_a7_pmu_init(void) { return NULL; } diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f759fe0bab63..426e19f380a2 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -430,7 +430,7 @@ xscale1pmu_write_counter(int counter, u32 val) static int xscale_map_event(struct perf_event *event) { - return map_cpu_event(event, &xscale_perf_map, + return armpmu_map_event(event, &xscale_perf_map, &xscale_perf_cache_map, 0xFF); } @@ -449,7 +449,7 @@ static struct arm_pmu xscale1pmu = { .max_period = (1LLU << 32) - 1, }; -static struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__devinit xscale1pmu_init(void) { return &xscale1pmu; } @@ -816,17 +816,17 @@ static struct arm_pmu xscale2pmu = { .max_period = (1LLU << 32) - 1, }; -static struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__devinit xscale2pmu_init(void) { return &xscale2pmu; } #else -static struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__devinit xscale1pmu_init(void) { return NULL; } -static struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__devinit xscale2pmu_init(void) { return NULL; } diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c deleted file mode 100644 index 2334bf8a650a..000000000000 --- a/arch/arm/kernel/pmu.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * linux/arch/arm/kernel/pmu.c - * - * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - * Copyright (C) 2010 ARM Ltd, Will Deacon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/module.h> - -#include <asm/pmu.h> - -/* - * PMU locking to ensure mutual exclusion between different subsystems. - */ -static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)]; - -int -reserve_pmu(enum arm_pmu_type type) -{ - return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0; -} -EXPORT_SYMBOL_GPL(reserve_pmu); - -void -release_pmu(enum arm_pmu_type type) -{ - clear_bit_unlock(type, pmu_lock); -} -EXPORT_SYMBOL_GPL(release_pmu); diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c index 7799e910e271..c18a5048b6c5 100644 --- a/arch/arm/mach-bcmring/arch.c +++ b/arch/arm/mach-bcmring/arch.c @@ -29,7 +29,6 @@ #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/time.h> -#include <asm/pmu.h> #include <asm/mach/arch.h> #include <mach/dma.h> @@ -116,7 +115,7 @@ static struct resource pmu_resource = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .resource = &pmu_resource, .num_resources = 1, }; diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index c00c68961bb8..02b9478b786f 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -23,7 +23,6 @@ #include <mach/irqs.h> #include <asm/mach-types.h> #include <asm/mach/map.h> -#include <asm/pmu.h> #include "iomap.h" #include <plat/board.h> @@ -448,7 +447,7 @@ static struct resource omap3_pmu_resource = { static struct platform_device omap_pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = 1, }; diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index 166eee5b8a70..c1f3b1279d97 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -6,7 +6,6 @@ #include <linux/spi/pxa2xx_spi.h> #include <linux/i2c/pxa-i2c.h> -#include <asm/pmu.h> #include <mach/udc.h> #include <mach/pxa3xx-u2d.h> #include <mach/pxafb.h> @@ -42,7 +41,7 @@ static struct resource pxa_resource_pmu = { struct platform_device pxa_device_pmu = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .resource = &pxa_resource_pmu, .num_resources = 1, }; diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index baf382c5e776..d7a6e9cebba4 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -32,7 +32,6 @@ #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> #include <asm/hardware/cache-l2x0.h> @@ -297,7 +296,7 @@ static struct resource pmu_resources[] = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index b1d7cafa1a6d..361f898884c8 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c @@ -34,7 +34,6 @@ #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> #include <asm/hardware/cache-l2x0.h> @@ -280,7 +279,7 @@ static struct resource pmu_resource = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = 1, .resource = &pmu_resource, }; diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index a98c536e3327..c56bc8d4d11b 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c @@ -32,7 +32,6 @@ #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> #include <asm/hardware/cache-l2x0.h> @@ -263,7 +262,7 @@ static struct resource pmu_resources[] = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index 59650174e6ed..040937582453 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c @@ -31,7 +31,6 @@ #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> @@ -241,7 +240,7 @@ static struct resource pmu_resource = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = 1, .resource = &pmu_resource, }; diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 3f2f605624e9..97885dc11e8c 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -30,7 +30,6 @@ #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/pmu.h> #include <asm/smp_twd.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> @@ -280,7 +279,7 @@ static struct resource pmu_resources[] = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c index c70e65ffa36b..61e9603744a7 100644 --- a/arch/arm/mach-tegra/devices.c +++ b/arch/arm/mach-tegra/devices.c @@ -23,7 +23,6 @@ #include <linux/fsl_devices.h> #include <linux/serial_8250.h> #include <linux/i2c-tegra.h> -#include <asm/pmu.h> #include <mach/irqs.h> #include <mach/iomap.h> #include <mach/dma.h> @@ -516,7 +515,7 @@ static struct resource tegra_pmu_resources[] = { struct platform_device tegra_pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(tegra_pmu_resources), .resource = tegra_pmu_resources, }; diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index db3c52d56ca4..3ee761d3a86f 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -19,7 +19,6 @@ #include <linux/mfd/abx500/ab8500.h> #include <asm/mach/map.h> -#include <asm/pmu.h> #include <plat/gpio-nomadik.h> #include <mach/hardware.h> #include <mach/setup.h> @@ -122,7 +121,7 @@ struct arm_pmu_platdata db8500_pmu_platdata = { static struct platform_device db8500_pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(db8500_pmu_resources), .resource = db8500_pmu_resources, .dev.platform_data = &db8500_pmu_platdata, diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 61c492403b05..e4073a60a864 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -13,7 +13,6 @@ #include <asm/hardware/arm_timer.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/gic.h> -#include <asm/pmu.h> #include <asm/smp_scu.h> #include <asm/smp_twd.h> @@ -144,7 +143,7 @@ static struct resource pmu_resources[] = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; diff --git a/arch/arm/plat-iop/pmu.c b/arch/arm/plat-iop/pmu.c index a2024b8685a1..ad9f9744a82d 100644 --- a/arch/arm/plat-iop/pmu.c +++ b/arch/arm/plat-iop/pmu.c @@ -9,7 +9,6 @@ */ #include <linux/platform_device.h> -#include <asm/pmu.h> #include <mach/irqs.h> static struct resource pmu_resource = { @@ -26,7 +25,7 @@ static struct resource pmu_resource = { static struct platform_device pmu_device = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .resource = &pmu_resource, .num_resources = 1, }; diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 74e31ce35538..8154fab70de8 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c @@ -33,7 +33,6 @@ #include <linux/platform_data/s3c-hsotg.h> #include <asm/irq.h> -#include <asm/pmu.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> @@ -1105,7 +1104,7 @@ static struct resource s5p_pmu_resource[] = { static struct platform_device s5p_device_pmu = { .name = "arm-pmu", - .id = ARM_PMU_DEVICE_CPU, + .id = -1, .num_resources = ARRAY_SIZE(s5p_pmu_resource), .resource = s5p_pmu_resource, }; |