summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorVenkatesh Yadav Abbarapu <vabbar@codeaurora.org>2016-01-12 11:51:24 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:17:17 -0700
commit491b50336d6751d1d019f49f1b80d3245fd5aae8 (patch)
tree2151552faf2e006235960d28514cd59d73e4ab45 /arch
parent63b89ec01e7dec6476574b395a7c747e70a1e81b (diff)
msm: platsmp: snapshot of the smp ops
Add smp related files like platsmp, headsmp and hotplug. Copied from the below caf link https://www.codeaurora.org/cgit/quic/la/kernel/msm-3.18/ tree/arch/arm/mach-msm?h=caf/3.10/msm-3.10 &id=5724b421fc2db7413048fe5b18135d481d68597a Change-Id: I2b0be05f939dd97efa6bd187f65f6805d73cb64e Signed-off-by: Venkatesh Yadav Abbarapu <vabbar@codeaurora.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-msm/headsmp.S48
-rw-r--r--arch/arm/mach-msm/hotplug.c147
-rw-r--r--arch/arm/mach-msm/platsmp.c533
-rw-r--r--arch/arm/mach-msm/platsmp.h27
4 files changed, 755 insertions, 0 deletions
diff --git a/arch/arm/mach-msm/headsmp.S b/arch/arm/mach-msm/headsmp.S
new file mode 100644
index 000000000000..df084e7d8485
--- /dev/null
+++ b/arch/arm/mach-msm/headsmp.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ * Copyright (c) 2010, 2012, 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ .arm
+
+__CPUINIT
+
+/*
+ * MSM specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ *
+ * This is executing in physical space with cache's off.
+ */
+ENTRY(msm_secondary_startup)
+THUMB( adr r9, BSYM(2f) ) @ Kernel is always entered in ARM.
+THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
+THUMB( .thumb ) @ switch to Thumb now.
+THUMB(2: )
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ bic r0, #0xff000000 @ What CPU am I
+ adr r4, 1f @ address of
+ ldmia r4, {r5, r6} @ load curr addr and pen_rel addr
+ sub r4, r4, r5 @ determine virtual/phys offsets
+ add r6, r6, r4 @ apply
+pen:
+ ldr r7, [r6] @ pen_rel has cpu to remove from reset
+ cmp r7, r0 @ are we lucky?
+ bne pen
+
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+ENDPROC(msm_secondary_startup)
+
+1: .long .
+ .long pen_release
diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c
new file mode 100644
index 000000000000..c217ded0cd3d
--- /dev/null
+++ b/arch/arm/mach-msm/hotplug.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ * Copyright (c) 2011-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/smp_plat.h>
+#include <asm/vfp.h>
+
+#include <soc/qcom/jtag.h>
+
+static cpumask_t cpu_dying_mask;
+
+static DEFINE_PER_CPU(unsigned int, warm_boot_flag);
+
+static inline void cpu_enter_lowpower(void)
+{
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /* Just enter wfi for now. TODO: Properly shut off the cpu. */
+ for (;;) {
+
+ lpm_cpu_hotplug_enter(cpu);
+ if (pen_release == cpu_logical_map(cpu)) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * The trouble is, letting people know about this is not really
+ * possible, since we are currently running incoherently, and
+ * therefore cannot safely call printk() or anything else
+ * Read the pending interrupts to understand why we woke up
+ */
+#ifdef CONFIG_MSM_PM
+ gic_show_pending_irq();
+#endif
+ (*spurious)++;
+ }
+}
+
+int msm_cpu_kill(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask))
+ ret = msm_pm_wait_cpu_shutdown(cpu);
+
+ return ret ? 0 : 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref msm_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ if (unlikely(cpu != smp_processor_id())) {
+ pr_crit("%s: running on %u, should be %u\n",
+ __func__, smp_processor_id(), cpu);
+ BUG();
+ }
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__);
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+static int hotplug_dying_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_DYING:
+ cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+static struct notifier_block hotplug_dying_notifier = {
+ .notifier_call = hotplug_dying_callback,
+};
+
+int msm_platform_secondary_init(unsigned int cpu)
+{
+ int ret;
+ unsigned int *warm_boot = &__get_cpu_var(warm_boot_flag);
+
+ if (!(*warm_boot)) {
+ *warm_boot = 1;
+ /*
+ * All CPU0 boots are considered warm boots (restore needed)
+ * since CPU0 is the system boot CPU and never cold-booted
+ * by the kernel.
+ */
+ if (cpu)
+ return 0;
+ }
+ msm_jtag_restore_state();
+#if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM)
+ vfp_pm_resume();
+#endif
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+
+ return ret;
+}
+
+static int __init init_hotplug_dying(void)
+{
+ return register_hotcpu_notifier(&hotplug_dying_notifier);
+}
+early_initcall(init_hotplug_dying);
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
new file mode 100644
index 000000000000..67ea733de1b9
--- /dev/null
+++ b/arch/arm/mach-msm/platsmp.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/regulator/krait-regulator.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/scm-boot.h>
+#include <soc/qcom/cpu_pwr_ctl.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/mach-types.h>
+#include <asm/smp_plat.h>
+
+#include <soc/qcom/socinfo.h>
+#include <mach/hardware.h>
+#include <mach/msm_iomap.h>
+
+#include "platsmp.h"
+
+#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0
+#define SCSS_CPU1CORE_RESET 0xD80
+#define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64
+#define MSM8960_SAW2_BASE_ADDR 0x02089000
+#define APCS_ALIAS0_BASE_ADDR 0xF9088000
+
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+void __cpuinit write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit msm_secondary_init(unsigned int cpu)
+{
+ WARN_ON(msm_platform_secondary_init(cpu));
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ write_pen_release(-1);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+static int __cpuinit release_secondary_sim(unsigned long base, unsigned int cpu)
+{
+ void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
+ if (!base_ptr)
+ return -ENODEV;
+
+ writel_relaxed(0x800, base_ptr+0x04);
+ writel_relaxed(0x3FFF, base_ptr+0x14);
+
+ mb();
+ iounmap(base_ptr);
+ return 0;
+}
+
+static int __cpuinit scorpion_release_secondary(void)
+{
+ void *base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
+ if (!base_ptr)
+ return -EINVAL;
+
+ writel_relaxed(0, base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
+ dmb();
+ writel_relaxed(0, base_ptr + SCSS_CPU1CORE_RESET);
+ writel_relaxed(3, base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP);
+ mb();
+ iounmap(base_ptr);
+
+ return 0;
+}
+
+static int __cpuinit msm8960_release_secondary(unsigned long base,
+ unsigned int cpu)
+{
+ void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
+ if (!base_ptr)
+ return -ENODEV;
+
+ writel_relaxed(0x109, base_ptr+0x04);
+ writel_relaxed(0x101, base_ptr+0x04);
+ mb();
+ ndelay(300);
+
+ writel_relaxed(0x121, base_ptr+0x04);
+ mb();
+ udelay(2);
+
+ writel_relaxed(0x120, base_ptr+0x04);
+ mb();
+ udelay(2);
+
+ writel_relaxed(0x100, base_ptr+0x04);
+ mb();
+ udelay(100);
+
+ writel_relaxed(0x180, base_ptr+0x04);
+ mb();
+ iounmap(base_ptr);
+ return 0;
+}
+
+static int __cpuinit msm8974_release_secondary(unsigned long base,
+ unsigned int cpu)
+{
+ void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
+
+ if (!base_ptr)
+ return -ENODEV;
+
+ secondary_cpu_hs_init(base_ptr, cpu);
+
+ writel_relaxed(0x021, base_ptr+0x04);
+ mb();
+ udelay(2);
+
+ writel_relaxed(0x020, base_ptr+0x04);
+ mb();
+ udelay(2);
+
+ writel_relaxed(0x000, base_ptr+0x04);
+ mb();
+
+ writel_relaxed(0x080, base_ptr+0x04);
+ mb();
+ iounmap(base_ptr);
+ return 0;
+}
+
+static int __cpuinit arm_release_secondary(unsigned long base, unsigned int cpu)
+{
+ void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
+ if (!base_ptr)
+ return -ENODEV;
+
+ writel_relaxed(0x00000033, base_ptr+0x04);
+ mb();
+
+ writel_relaxed(0x10000001, base_ptr+0x14);
+ mb();
+ udelay(2);
+
+ writel_relaxed(0x00000031, base_ptr+0x04);
+ mb();
+
+ writel_relaxed(0x00000039, base_ptr+0x04);
+ mb();
+ udelay(2);
+
+ writel_relaxed(0x00020038, base_ptr+0x04);
+ mb();
+ udelay(2);
+
+
+ writel_relaxed(0x00020008, base_ptr+0x04);
+ mb();
+
+ writel_relaxed(0x00020088, base_ptr+0x04);
+ mb();
+
+ iounmap(base_ptr);
+ return 0;
+}
+
+static int __cpuinit release_from_pen(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /* Set preset_lpj to avoid subsequent lpj recalculations */
+ preset_lpj = loops_per_jiffy;
+
+ /*
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+ * the holding pen - release it, then wait for it to flag
+ * that it has been released by resetting pen_release.
+ *
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
+ */
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+DEFINE_PER_CPU(int, cold_boot_done);
+
+int __cpuinit scorpion_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ scorpion_release_secondary();
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+int __cpuinit msm8960_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ msm8960_release_secondary(0x02088000, cpu);
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+int __cpuinit msm8974_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ if (of_board_is_sim())
+ release_secondary_sim(APCS_ALIAS0_BASE_ADDR, cpu);
+ else if (!of_board_is_rumi())
+ msm8974_release_secondary(APCS_ALIAS0_BASE_ADDR, cpu);
+
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+static int __cpuinit msm8916_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ if (of_board_is_sim())
+ release_secondary_sim(0xb088000, cpu);
+ else if (!of_board_is_rumi())
+ arm_release_secondary(0xb088000, cpu);
+
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+static int __cpuinit msm8936_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ int ret = 0;
+
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ if (of_board_is_sim()) {
+ ret = msm_unclamp_secondary_arm_cpu_sim(cpu);
+ if (ret)
+ return ret;
+ } else if (!of_board_is_rumi()) {
+ ret = msm_unclamp_secondary_arm_cpu(cpu);
+ if (ret)
+ return ret;
+ }
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+static int __cpuinit msm8976_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ int ret = 0;
+ u32 mpidr = cpu_logical_map(cpu);
+
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ if (of_board_is_sim()) {
+ ret = msm_unclamp_secondary_arm_cpu_sim(cpu);
+ if (ret)
+ return ret;
+ } else if (!of_board_is_rumi()) {
+ ret = msm8976_unclamp_secondary_arm_cpu(cpu);
+ if (ret)
+ return ret;
+ }
+ if (MPIDR_AFFINITY_LEVEL(mpidr, 1)) {
+ ret = msm8976_cpu_ldo_config(cpu);
+ if (ret)
+ return ret;
+ }
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+int __cpuinit arm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ pr_debug("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ if (of_board_is_sim())
+ release_secondary_sim(APCS_ALIAS0_BASE_ADDR, cpu);
+ else if (!of_board_is_rumi())
+ arm_release_secondary(APCS_ALIAS0_BASE_ADDR, cpu);
+
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+static void __init msm_smp_init_cpus(void)
+{
+ unsigned int i, ncores = get_core_count();
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+static void __init arm_smp_init_cpus(void)
+{
+ unsigned int i, ncores;
+
+ ncores = (__raw_readl(MSM_APCS_GCC_BASE + 0x30)) & 0xF;
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+static int cold_boot_flags[] __initdata = {
+ 0,
+ SCM_FLAG_COLDBOOT_CPU1,
+ SCM_FLAG_COLDBOOT_CPU2,
+ SCM_FLAG_COLDBOOT_CPU3,
+};
+
+static void __init msm_platform_smp_prepare_cpus_mc(unsigned int max_cpus)
+{
+ int cpu, map;
+ u32 aff0_mask = 0;
+ u32 aff1_mask = 0;
+ u32 aff2_mask = 0;
+
+ for_each_present_cpu(cpu) {
+ map = cpu_logical_map(cpu);
+ aff0_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 0));
+ aff1_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 1));
+ aff2_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 2));
+ }
+
+ if (scm_set_boot_addr_mc(virt_to_phys(msm_secondary_startup),
+ aff0_mask, aff1_mask, aff2_mask, SCM_FLAG_COLDBOOT_MC))
+ pr_warn("Failed to set CPU boot address\n");
+
+ /* Mark CPU0 cold boot flag as done */
+ per_cpu(cold_boot_done, 0) = true;
+}
+
+static void __init msm_platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int cpu, map;
+ unsigned int flags = 0;
+
+ if (scm_is_mc_boot_available())
+ return msm_platform_smp_prepare_cpus_mc(max_cpus);
+
+ for_each_present_cpu(cpu) {
+ map = cpu_logical_map(cpu);
+ if (map >= ARRAY_SIZE(cold_boot_flags)) {
+ set_cpu_present(cpu, false);
+ __WARN();
+ continue;
+ }
+ flags |= cold_boot_flags[map];
+ }
+
+ if (scm_set_boot_addr(virt_to_phys(msm_secondary_startup), flags))
+ pr_warn("Failed to set CPU boot address\n");
+
+ /* Mark CPU0 cold boot flag as done */
+ per_cpu(cold_boot_done, 0) = true;
+}
+
+int msm_cpu_disable(unsigned int cpu)
+{
+ return 0; /* support hotplugging any cpu */
+}
+
+struct smp_operations arm_smp_ops __initdata = {
+ .smp_init_cpus = arm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = arm_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+#endif
+};
+
+struct smp_operations msm8916_smp_ops __initdata = {
+ .smp_init_cpus = arm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = msm8916_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+ .cpu_disable = msm_cpu_disable,
+#endif
+};
+
+struct smp_operations msm8976_smp_ops __initdata = {
+ .smp_init_cpus = arm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus_mc,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = msm8976_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+#endif
+};
+
+struct smp_operations msm8936_smp_ops __initdata = {
+ .smp_init_cpus = arm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = msm8936_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+ .cpu_disable = msm_cpu_disable,
+#endif
+};
+
+struct smp_operations msm8974_smp_ops __initdata = {
+ .smp_init_cpus = msm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = msm8974_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+#endif
+};
+
+struct smp_operations msm8960_smp_ops __initdata = {
+ .smp_init_cpus = msm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = msm8960_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+#endif
+};
+
+struct smp_operations scorpion_smp_ops __initdata = {
+ .smp_init_cpus = msm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = scorpion_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
+#endif
+};
diff --git a/arch/arm/mach-msm/platsmp.h b/arch/arm/mach-msm/platsmp.h
new file mode 100644
index 000000000000..960d42384852
--- /dev/null
+++ b/arch/arm/mach-msm/platsmp.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+void msm_secondary_startup(void);
+void write_pen_release(int val);
+
+void msm_cpu_die(unsigned int cpu);
+int msm_cpu_kill(unsigned int cpu);
+
+extern struct smp_operations arm_smp_ops;
+extern struct smp_operations msm8960_smp_ops;
+extern struct smp_operations msm8974_smp_ops;
+extern struct smp_operations msm8962_smp_ops;
+extern struct smp_operations msm8625_smp_ops;
+extern struct smp_operations scorpion_smp_ops;
+extern struct smp_operations msm8916_smp_ops;
+extern struct smp_operations msm8936_smp_ops;
+extern struct smp_operations msm8976_smp_ops;