Commit e545a614 authored by Manfred Spraul's avatar Manfred Spraul Committed by Ingo Molnar

kernel/cpu.c: create a CPU_STARTING cpu_chain notifier

Right now, there is no notifier that is called on a new cpu, before the new
cpu begins processing interrupts/softirqs.
Various kernel function would need that notification, e.g. kvm works around
by calling smp_call_function_single(), rcu polls cpu_online_map.

The patch adds a CPU_STARTING notification. It also adds a helper function
that sends the message to all cpu_chain handlers.

Tested on x86-64.
All other archs are untested. Especially on sparc, I'm not sure if I got
it right.
Signed-off-by: default avatarManfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7686ad56
......@@ -149,6 +149,9 @@ smp_callin(void)
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
/* inform the notifiers about the new cpu */
notify_cpu_starting(cpuid);
/* Must have completely accurate bogos. */
local_irq_enable();
......
......@@ -277,6 +277,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* Enable local interrupts.
*/
notify_cpu_starting(cpu);
local_irq_enable();
local_fiq_enable();
......
......@@ -178,6 +178,7 @@ void __init smp_callin(void)
unmask_irq(IPI_INTR_VECT);
unmask_irq(TIMER0_INTR_VECT);
preempt_disable();
notify_cpu_starting(cpu);
local_irq_enable();
cpu_set(cpu, cpu_online_map);
......
......@@ -401,6 +401,7 @@ smp_callin (void)
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
notify_cpu_starting(cpuid);
cpu_set(cpuid, cpu_online_map);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
......
......@@ -498,6 +498,8 @@ static void __init smp_online(void)
{
int cpu_id = smp_processor_id();
notify_cpu_starting(cpu_id);
local_irq_enable();
/* Get our bogomips. */
......
......@@ -121,6 +121,8 @@ asmlinkage __cpuinit void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
notify_cpu_starting(cpu);
mp_ops->smp_finish();
set_cpu_sibling_map(cpu);
......
......@@ -453,6 +453,7 @@ int __devinit start_secondary(void *unused)
secondary_cpu_time_init();
ipi_call_lock();
notify_cpu_starting(cpu);
cpu_set(cpu, cpu_online_map);
/* Update sibling maps */
base = cpu_first_thread_in_core(cpu);
......
......@@ -585,6 +585,8 @@ int __cpuinit start_secondary(void *cpuvoid)
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
/* call cpu notifiers */
notify_cpu_starting(smp_processor_id());
/* Mark this cpu as online */
spin_lock(&call_lock);
cpu_set(smp_processor_id(), cpu_online_map);
......
......@@ -82,6 +82,8 @@ asmlinkage void __cpuinit start_secondary(void)
preempt_disable();
notify_cpu_starting(smp_processor_id());
local_irq_enable();
calibrate_delay();
......
......@@ -88,6 +88,7 @@ void __init smp4d_callin(void)
local_flush_cache_all();
local_flush_tlb_all();
notify_cpu_starting(cpuid);
/*
* Unblock the master CPU _only_ when the scheduler state
* of all secondary CPUs will be up-to-date, so after
......
......@@ -71,6 +71,8 @@ void __cpuinit smp4m_callin(void)
local_flush_cache_all();
local_flush_tlb_all();
notify_cpu_starting(cpuid);
/* Get our local ticker going. */
smp_setup_percpu_timer();
......
......@@ -85,6 +85,7 @@ static int idle_proc(void *cpup)
while (!cpu_isset(cpu, smp_commenced_mask))
cpu_relax();
notify_cpu_starting(cpu);
cpu_set(cpu, cpu_online_map);
default_idle();
return 0;
......
......@@ -257,6 +257,7 @@ static void __cpuinit smp_callin(void)
end_local_APIC_setup();
map_cpu_to_logical_apicid();
notify_cpu_starting(cpuid);
/*
* Get our bogomips.
*
......
......@@ -448,6 +448,8 @@ static void __init start_secondary(void *unused)
VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
notify_cpu_starting(cpuid);
/* enable interrupts */
local_irq_enable();
......
......@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
#endif
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
......
......@@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
* not handling interrupts, soon dead */
* not handling interrupts, soon dead.
* Called on the dying cpu, interrupts
* are already disabled. Must not
* sleep, must not fail */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
* operation in progress
......@@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
/* Hibernation and suspend events */
#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
......
......@@ -453,6 +453,25 @@ out:
}
#endif /* CONFIG_PM_SLEEP_SMP */
/**
* notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
* @cpu: cpu that just started
*
* This function calls the cpu_chain notifiers with CPU_STARTING.
* It must be called by the arch code on the new cpu, before the new cpu
* enables interrupts and before the "boot" cpu returns from __cpu_up().
*/
void notify_cpu_starting(unsigned int cpu)
{
unsigned long val = CPU_STARTING;
#ifdef CONFIG_PM_SLEEP_SMP
if (cpu_isset(cpu, frozen_cpus))
val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
}
#endif /* CONFIG_SMP */
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment