summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-06-04 16:42:29 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-06-10 08:43:31 -0400
commitee336e10d5650d408efb66f634d462b9eb39c191 (patch)
treecc9e3ef418118d97f83bfcea09e85bf500ed2e6f /arch/x86/xen
parent9547689fcdf0b223967edcbbe588d9f0489ee5aa (diff)
xen/smp: Set the per-cpu IRQ number to a valid default.
When we free it we want to make sure to set it to a default value of -1 so that we don't double-free it (in case somebody calls us twice). Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/smp.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index f5b29ecdf18d..6a483cdd28c9 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -43,10 +43,10 @@ struct xen_common_irq {
int irq;
char *name;
};
-static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq);
-static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq);
-static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq);
-static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work);
+static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
@@ -104,20 +104,30 @@ static void __cpuinit cpu_bringup_and_idle(void)
static void xen_smp_intr_free(unsigned int cpu)
{
- if (per_cpu(xen_resched_irq, cpu).irq >= 0)
+ if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
- if (per_cpu(xen_callfunc_irq, cpu).irq >= 0)
+ per_cpu(xen_resched_irq, cpu).irq = -1;
+ }
+ if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
- if (per_cpu(xen_debug_irq, cpu).irq >= 0)
+ per_cpu(xen_callfunc_irq, cpu).irq = -1;
+ }
+ if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
- if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0)
+ per_cpu(xen_debug_irq, cpu).irq = -1;
+ }
+ if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
NULL);
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
+ }
if (xen_hvm_domain())
return;
- if (per_cpu(xen_irq_work, cpu).irq >= 0)
+ if (per_cpu(xen_irq_work, cpu).irq >= 0) {
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
+ per_cpu(xen_irq_work, cpu).irq = -1;
+ }
};
static int xen_smp_intr_init(unsigned int cpu)
{