Commit 66d16ed4 authored by Laurent Vivier's avatar Laurent Vivier Committed by Thomas Gleixner

x86: implement missing x86_64 function smp_call_function_mask()

This patch defines the missing function smp_call_function_mask() for x86_64,
this is more or less a cut&paste of i386 function. It removes also some
duplicate code.

This function is needed by KVM to execute a function on some CPUs.

AK: Fixed description
AK: Moved WARN_ON(irqs_disabled) one level up to not warn in the panic case.
[ tglx: arch/x86 adaptation ]
Signed-off-by: default avatarLaurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 9d1c6e7c
...@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void) ...@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void)
} }
/* /*
* this function sends a 'generic call function' IPI to one other CPU * this function sends a 'generic call function' IPI to all other CPU
* in the system. * of the system defined in the mask.
*
* cpu is a standard Linux logical CPU number.
*/ */
static void
__smp_call_function_single(int cpu, void (*func) (void *info), void *info, static int
int nonatomic, int wait) __smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
int cpus = 1; cpumask_t allbutself;
int cpus;
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
if (!cpus)
return 0;
data.func = func; data.func = func;
data.info = info; data.info = info;
...@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info, ...@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
call_data = &data; call_data = &data;
wmb(); wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); /* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */ /* Wait for response */
while (atomic_read(&data.started) != cpus) while (atomic_read(&data.started) != cpus)
cpu_relax(); cpu_relax();
if (!wait) if (!wait)
return; return 0;
while (atomic_read(&data.finished) != cpus) while (atomic_read(&data.finished) != cpus)
cpu_relax(); cpu_relax();
return 0;
}
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
int ret;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
spin_lock(&call_lock);
ret = __smp_call_function_mask(mask, func, info, wait);
spin_unlock(&call_lock);
return ret;
} }
EXPORT_SYMBOL(smp_call_function_mask);
/* /*
* smp_call_function_single - Run a function on a specific CPU * smp_call_function_single - Run a function on a specific CPU
...@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, ...@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait) int nonatomic, int wait)
{ {
/* prevent preemption and reschedule on another processor */ /* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu(); int me = get_cpu();
/* Can deadlock when called with interrupts disabled */ /* Can deadlock when called with interrupts disabled */
...@@ -387,50 +434,13 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, ...@@ -387,50 +434,13 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
return 0; return 0;
} }
spin_lock(&call_lock); ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
__smp_call_function_single(cpu, func, info, nonatomic, wait);
spin_unlock(&call_lock);
put_cpu(); put_cpu();
return 0; return ret;
} }
EXPORT_SYMBOL(smp_call_function_single); EXPORT_SYMBOL(smp_call_function_single);
/*
* this function sends a 'generic call function' IPI to all other CPUs
* in the system.
*/
static void __smp_call_function (void (*func) (void *info), void *info,
int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus()-1;
if (!cpus)
return;
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (!wait)
return;
while (atomic_read(&data.finished) != cpus)
cpu_relax();
}
/* /*
* smp_call_function - run a function on all other CPUs. * smp_call_function - run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking. * @func: The function to run. This must be fast and non-blocking.
...@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info, ...@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info,
int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
int wait) int wait)
{ {
spin_lock(&call_lock); return smp_call_function_mask(cpu_online_map, func, info, wait);
__smp_call_function(func,info,nonatomic,wait);
spin_unlock(&call_lock);
return 0;
} }
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
...@@ -479,7 +486,7 @@ void smp_send_stop(void) ...@@ -479,7 +486,7 @@ void smp_send_stop(void)
/* Don't deadlock on the call lock in panic */ /* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock); nolock = !spin_trylock(&call_lock);
local_irq_save(flags); local_irq_save(flags);
__smp_call_function(stop_this_cpu, NULL, 0, 0); __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
if (!nolock) if (!nolock)
spin_unlock(&call_lock); spin_unlock(&call_lock);
disable_local_APIC(); disable_local_APIC();
......
...@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void); ...@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings; extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu); extern void smp_send_reschedule(int cpu);
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
/* /*
* cpu_sibling_map and cpu_core_map now live * cpu_sibling_map and cpu_core_map now live
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment