Commit d331e739 authored by Venkatesh Pallipadi's avatar Venkatesh Pallipadi Committed by Andi Kleen

[PATCH] x86-64: Fix interrupt race in idle callback (3rd try)

Idle callbacks has some races when enter_idle() sets isidle and subsequent
interrupts that can happen on that CPU, before CPU goes to idle. Due to this,
an IDLE_END can get called before IDLE_START. To avoid these races, disable
interrupts before enter_idle and make sure that all idle routines do not
enable interrupts before entering idle.

Note that poll_idle() still has a this race as it has to enable interrupts
before going to idle. But, all other idle routines have the race fixed.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent a0429d0d
...@@ -127,6 +127,7 @@ static void default_idle(void) ...@@ -127,6 +127,7 @@ static void default_idle(void)
*/ */
static void poll_idle (void) static void poll_idle (void)
{ {
local_irq_enable();
cpu_relax(); cpu_relax();
} }
...@@ -208,6 +209,12 @@ void cpu_idle (void) ...@@ -208,6 +209,12 @@ void cpu_idle (void)
idle = default_idle; idle = default_idle;
if (cpu_is_offline(smp_processor_id())) if (cpu_is_offline(smp_processor_id()))
play_dead(); play_dead();
/*
* Idle routines should keep interrupts disabled
* from here on, until they go to idle.
* Otherwise, idle callbacks can misfire.
*/
local_irq_disable();
enter_idle(); enter_idle();
idle(); idle();
/* In many cases the interrupt that ended idle /* In many cases the interrupt that ended idle
...@@ -245,8 +252,16 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) ...@@ -245,8 +252,16 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
/* Default MONITOR/MWAIT with no hints, used for default C1 state */ /* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void) static void mwait_idle(void)
{ {
local_irq_enable(); if (!need_resched()) {
mwait_idle_with_hints(0,0); __monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__sti_mwait(0, 0);
else
local_irq_enable();
} else {
local_irq_enable();
}
} }
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
......
...@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) ...@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
: :"a" (eax), "c" (ecx)); : :"a" (eax), "c" (ecx));
} }
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax,%ecx;" */
asm volatile(
"sti; .byte 0x0f,0x01,0xc9;"
: :"a" (eax), "c" (ecx));
}
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
#define stack_current() \ #define stack_current() \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment