diff options
Diffstat (limited to 'kernel/smp.c')
-rw-r--r-- | kernel/smp.c | 68 |
1 files changed, 57 insertions, 11 deletions
diff --git a/kernel/smp.c b/kernel/smp.c index 96fc7c0edc59..f362a8553777 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -135,7 +135,8 @@ void generic_smp_call_function_interrupt(void) */ smp_wmb(); data->csd.flags &= ~CSD_FLAG_WAIT; - } else + } + if (data->csd.flags & CSD_FLAG_ALLOC) call_rcu(&data->rcu_head, rcu_free_call_data); } rcu_read_unlock(); @@ -209,8 +210,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, { struct call_single_data d; unsigned long flags; - /* prevent preemption and reschedule on another processor */ + /* prevent preemption and reschedule on another processor, + as well as CPU removal */ int me = get_cpu(); + int err = 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -219,7 +222,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, local_irq_save(flags); func(info); local_irq_restore(flags); - } else { + } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { struct call_single_data *data = NULL; if (!wait) { @@ -235,10 +238,12 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, data->func = func; data->info = info; generic_exec_single(cpu, data); + } else { + err = -ENXIO; /* CPU not online */ } put_cpu(); - return 0; + return err; } EXPORT_SYMBOL(smp_call_function_single); @@ -260,6 +265,42 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) generic_exec_single(cpu, data); } +/* Dummy function */ +static void quiesce_dummy(void *unused) +{ +} + +/* + * Ensure stack based data used in call function mask is safe to free. + * + * This is needed by smp_call_function_mask when using on-stack data, because + * a single call function queue is shared by all CPUs, and any CPU may pick up + * the data item on the queue at any time before it is deleted. So we need to + * ensure that all CPUs have transitioned through a quiescent state after + * this call. + * + * This is a very slow function, implemented by sending synchronous IPIs to + * all possible CPUs. For this reason, we have to alloc data rather than use + * stack based data even in the case of synchronous calls. The stack based + * data is then just used for deadlock/oom fallback which will be very rare. + * + * If a faster scheme can be made, we could go back to preferring stack based + * data -- the data allocation/free is non-zero cost. + */ +static void smp_call_function_mask_quiesce_stack(cpumask_t mask) +{ + struct call_single_data data; + int cpu; + + data.func = quiesce_dummy; + data.info = NULL; + + for_each_cpu_mask(cpu, mask) { + data.flags = CSD_FLAG_WAIT; + generic_exec_single(cpu, &data); + } +} + /** * smp_call_function_mask(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on. @@ -285,6 +326,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, cpumask_t allbutself; unsigned long flags; int cpu, num_cpus; + int slowpath = 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -306,15 +348,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, return smp_call_function_single(cpu, func, info, wait); } - if (!wait) { - data = kmalloc(sizeof(*data), GFP_ATOMIC); - if (data) - data->csd.flags = CSD_FLAG_ALLOC; - } - if (!data) { + data = kmalloc(sizeof(*data), GFP_ATOMIC); + if (data) { + data->csd.flags = CSD_FLAG_ALLOC; + if (wait) + data->csd.flags |= CSD_FLAG_WAIT; + } else { data = &d; data->csd.flags = CSD_FLAG_WAIT; wait = 1; + slowpath = 1; } spin_lock_init(&data->lock); @@ -331,8 +374,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, arch_send_call_function_ipi(mask); /* optionally wait for the CPUs to complete */ - if (wait) + if (wait) { csd_flag_wait(&data->csd); + if (unlikely(slowpath)) + smp_call_function_mask_quiesce_stack(mask); + } return 0; } |