Commit 2469057e authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

x86: preempt-rt preparatory patches for x86 (32bit)

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 7f36d9de
...@@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ...@@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax() #define __raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax() #define __raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax() #define __raw_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */ /* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { } static inline void smp_mb__after_lock(void) { }
......
...@@ -17,7 +17,9 @@ ...@@ -17,7 +17,9 @@
static inline void __native_flush_tlb(void) static inline void __native_flush_tlb(void)
{ {
preempt_disable();
native_write_cr3(native_read_cr3()); native_write_cr3(native_read_cr3());
preempt_enable();
} }
static inline void __native_flush_tlb_global(void) static inline void __native_flush_tlb_global(void)
......
...@@ -90,7 +90,9 @@ static inline unsigned int get_timer_irqs(int cpu) ...@@ -90,7 +90,9 @@ static inline unsigned int get_timer_irqs(int cpu)
*/ */
static __init void nmi_cpu_busy(void *data) static __init void nmi_cpu_busy(void *data)
{ {
#ifndef CONFIG_PREEMPT_RT
local_irq_enable_in_hardirq(); local_irq_enable_in_hardirq();
#endif
/* /*
* Intentionally don't use cpu_relax here. This is * Intentionally don't use cpu_relax here. This is
* to make sure that the performance counter really ticks, * to make sure that the performance counter really ticks,
......
...@@ -881,7 +881,7 @@ static int __initdata early_console_initialized; ...@@ -881,7 +881,7 @@ static int __initdata early_console_initialized;
asmlinkage void early_printk(const char *fmt, ...) asmlinkage void early_printk(const char *fmt, ...)
{ {
char buf[512]; static char buf[512];
int n; int n;
va_list ap; va_list ap;
......
...@@ -30,7 +30,11 @@ static void __init zap_identity_mappings(void) ...@@ -30,7 +30,11 @@ static void __init zap_identity_mappings(void)
{ {
pgd_t *pgd = pgd_offset_k(0UL); pgd_t *pgd = pgd_offset_k(0UL);
pgd_clear(pgd); pgd_clear(pgd);
__flush_tlb_all(); /*
* preempt_disable/enable does not work this early in the
* bootup yet:
*/
write_cr3(read_cr3());
} }
/* Don't add a printk in there. printk relies on the PDA which is not initialized /* Don't add a printk in there. printk relies on the PDA which is not initialized
......
...@@ -152,9 +152,11 @@ void cpu_idle(void) ...@@ -152,9 +152,11 @@ void cpu_idle(void)
} }
tick_nohz_restart_sched_tick(); tick_nohz_restart_sched_tick();
local_irq_disable();
__preempt_enable_no_resched(); __preempt_enable_no_resched();
schedule(); __schedule();
preempt_disable(); preempt_disable();
local_irq_enable();
} }
} }
......
...@@ -782,6 +782,13 @@ static void do_signal(struct pt_regs *regs) ...@@ -782,6 +782,13 @@ static void do_signal(struct pt_regs *regs)
int signr; int signr;
sigset_t *oldset; sigset_t *oldset;
#ifdef CONFIG_PREEMPT_RT
/*
* Fully-preemptible kernel does not need interrupts disabled:
*/
local_irq_enable();
preempt_check_resched();
#endif
/* /*
* We want the common case to go fast, which is why we may in certain * We want the common case to go fast, which is why we may in certain
* cases get here from kernel mode. Just return without doing anything * cases get here from kernel mode. Just return without doing anything
......
...@@ -120,6 +120,16 @@ static void native_smp_send_reschedule(int cpu) ...@@ -120,6 +120,16 @@ static void native_smp_send_reschedule(int cpu)
apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
} }
/*
* this function sends a 'reschedule' IPI to all other CPUs.
* This is used when RT tasks are starving and other CPUs
* might be able to run them:
*/
void smp_send_reschedule_allbutself(void)
{
apic->send_IPI_allbutself(RESCHEDULE_VECTOR);
}
void native_send_call_func_single_ipi(int cpu) void native_send_call_func_single_ipi(int cpu)
{ {
apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment