summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-04-16 14:34:45 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-04-16 16:05:16 -0400
commit70dd4998cb85f0ecd6ac892cc7232abefa432efb (patch)
tree4dc18c8b7b61cc4118ae14c1cc35a71335d2ad85 /arch/x86/xen
parentcb9c6f15f318aa3aeb62fe525aa5c6dcf6eee159 (diff)
xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM
See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23 (xen: disable PV spinlocks on HVM) for details. But we did not disable it everywhere - which means that when we boot as PVHVM we end up allocating per-CPU irq line for spinlock. This fixes that. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/spinlock.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 47ae032f8d97..8b54603ce816 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -367,6 +367,13 @@ void __cpuinit xen_init_lock_cpu(int cpu)
WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
@@ -385,12 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
per_cpu(lock_kicker_irq, cpu) = -1;
}
void __init xen_init_spinlocks(void)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
pv_lock_ops.spin_is_locked = xen_spin_is_locked;