Commit 514e0e29 authored by Thomas Gleixner's avatar Thomas Gleixner

sched: Debug missed preemption checks

Developers use preempt_enable_no_resched() in places where the code
calls schedule() immediately which is correct. But there are places
where preempt_enable_no_resched() is not followed by schedule().

Add debug infrastructre to find the offending code. The identified
correct users are converted to use __preempt_enable_no_resched().

For the ever repeating "preempt_enable_no_resched(); schedule();"
sequences a onvenience macro preempt_enable_and_schedule() is
introduced.

Based on a previous patch from Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 030dc4ad
...@@ -33,12 +33,24 @@ do { \ ...@@ -33,12 +33,24 @@ do { \
barrier(); \ barrier(); \
} while (0) } while (0)
#define preempt_enable_no_resched() \ #define __preempt_enable_no_resched() \
do { \ do { \
barrier(); \ barrier(); \
dec_preempt_count(); \ dec_preempt_count(); \
} while (0) } while (0)
#ifdef CONFIG_DEBUG_PREEMPT
extern void notrace preempt_enable_no_resched(void);
#else
# define preempt_enable_no_resched() __preempt_enable_no_resched()
#endif
#define preempt_enable_and_schedule() \
do { \
__preempt_enable_no_resched(); \
schedule(); \
} while (0)
#define preempt_check_resched() \ #define preempt_check_resched() \
do { \ do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
...@@ -47,7 +59,7 @@ do { \ ...@@ -47,7 +59,7 @@ do { \
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
preempt_enable_no_resched(); \ __preempt_enable_no_resched(); \
barrier(); \ barrier(); \
preempt_check_resched(); \ preempt_check_resched(); \
} while (0) } while (0)
...@@ -84,6 +96,8 @@ do { \ ...@@ -84,6 +96,8 @@ do { \
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0) #define preempt_enable_no_resched() do { } while (0)
#define __preempt_enable_no_resched() do { } while (0)
#define preempt_enable_and_schedule() schedule()
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0) #define preempt_check_resched() do { } while (0)
......
...@@ -40,7 +40,8 @@ ...@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0) do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \ #define __UNLOCK_BH(lock) \
do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) do { __preempt_enable_no_resched(); local_bh_enable(); __release(lock); \
(void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \ #define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0) do { local_irq_enable(); __UNLOCK(lock); } while (0)
......
...@@ -463,8 +463,7 @@ static noinline void __init_refok rest_init(void) ...@@ -463,8 +463,7 @@ static noinline void __init_refok rest_init(void)
*/ */
init_idle_bootup_task(current); init_idle_bootup_task(current);
rcu_scheduler_starting(); rcu_scheduler_starting();
preempt_enable_no_resched(); preempt_enable_and_schedule();
schedule();
preempt_disable(); preempt_disable();
/* Call into cpu_idle with preempt disabled */ /* Call into cpu_idle with preempt disabled */
......
...@@ -249,8 +249,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -249,8 +249,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* didnt get the lock, go to sleep: */ /* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags); spin_unlock_mutex(&lock->wait_lock, flags);
preempt_enable_no_resched(); preempt_enable_and_schedule();
schedule();
preempt_disable(); preempt_disable();
spin_lock_mutex(&lock->wait_lock, flags); spin_lock_mutex(&lock->wait_lock, flags);
} }
......
...@@ -5163,6 +5163,19 @@ notrace unsigned long get_parent_ip(unsigned long addr) ...@@ -5163,6 +5163,19 @@ notrace unsigned long get_parent_ip(unsigned long addr)
return addr; return addr;
} }
#ifdef CONFIG_DEBUG_PREEMPT
void notrace preempt_enable_no_resched(void)
{
barrier();
dec_preempt_count();
WARN_ONCE(!preempt_count(),
KERN_ERR "BUG: %s:%d task might have lost a preemption check!\n",
current->comm, current->pid);
}
EXPORT_SYMBOL(preempt_enable_no_resched);
#endif
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER)) defined(CONFIG_PREEMPT_TRACER))
...@@ -5381,7 +5394,7 @@ need_resched_nonpreemptible: ...@@ -5381,7 +5394,7 @@ need_resched_nonpreemptible:
if (unlikely(reacquire_kernel_lock(current) < 0)) if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible; goto need_resched_nonpreemptible;
preempt_enable_no_resched(); __preempt_enable_no_resched();
if (need_resched()) if (need_resched())
goto need_resched; goto need_resched;
} }
...@@ -6555,9 +6568,8 @@ SYSCALL_DEFINE0(sched_yield) ...@@ -6555,9 +6568,8 @@ SYSCALL_DEFINE0(sched_yield)
__release(rq->lock); __release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_); spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
_raw_spin_unlock(&rq->lock); _raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule(); preempt_enable_and_schedule();
return 0; return 0;
} }
......
...@@ -1622,8 +1622,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) ...@@ -1622,8 +1622,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
*/ */
preempt_disable(); preempt_disable();
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
preempt_enable_no_resched(); preempt_enable_and_schedule();
schedule();
} else { } else {
/* /*
* By the time we got the lock, our tracer went away. * By the time we got the lock, our tracer went away.
......
...@@ -308,7 +308,7 @@ void irq_exit(void) ...@@ -308,7 +308,7 @@ void irq_exit(void)
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
tick_nohz_stop_sched_tick(0); tick_nohz_stop_sched_tick(0);
#endif #endif
preempt_enable_no_resched(); __preempt_enable_no_resched();
} }
/* /*
...@@ -642,8 +642,7 @@ static int ksoftirqd(void * __bind_cpu) ...@@ -642,8 +642,7 @@ static int ksoftirqd(void * __bind_cpu)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
preempt_disable(); preempt_disable();
if (!local_softirq_pending()) { if (!local_softirq_pending()) {
preempt_enable_no_resched(); preempt_enable_and_schedule();
schedule();
preempt_disable(); preempt_disable();
} }
...@@ -656,7 +655,7 @@ static int ksoftirqd(void * __bind_cpu) ...@@ -656,7 +655,7 @@ static int ksoftirqd(void * __bind_cpu)
if (cpu_is_offline((long)__bind_cpu)) if (cpu_is_offline((long)__bind_cpu))
goto wait_to_die; goto wait_to_die;
do_softirq(); do_softirq();
preempt_enable_no_resched(); __preempt_enable_no_resched();
cond_resched(); cond_resched();
preempt_disable(); preempt_disable();
rcu_qsctr_inc((long)__bind_cpu); rcu_qsctr_inc((long)__bind_cpu);
......
...@@ -364,7 +364,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock) ...@@ -364,7 +364,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{ {
spin_release(&lock->dep_map, 1, _RET_IP_); spin_release(&lock->dep_map, 1, _RET_IP_);
_raw_spin_unlock(lock); _raw_spin_unlock(lock);
preempt_enable_no_resched(); __preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0)); local_bh_enable_ip((unsigned long)__builtin_return_address(0));
} }
EXPORT_SYMBOL(_spin_unlock_bh); EXPORT_SYMBOL(_spin_unlock_bh);
...@@ -391,7 +391,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock) ...@@ -391,7 +391,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
{ {
rwlock_release(&lock->dep_map, 1, _RET_IP_); rwlock_release(&lock->dep_map, 1, _RET_IP_);
_raw_read_unlock(lock); _raw_read_unlock(lock);
preempt_enable_no_resched(); __preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0)); local_bh_enable_ip((unsigned long)__builtin_return_address(0));
} }
EXPORT_SYMBOL(_read_unlock_bh); EXPORT_SYMBOL(_read_unlock_bh);
...@@ -418,7 +418,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) ...@@ -418,7 +418,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
{ {
rwlock_release(&lock->dep_map, 1, _RET_IP_); rwlock_release(&lock->dep_map, 1, _RET_IP_);
_raw_write_unlock(lock); _raw_write_unlock(lock);
preempt_enable_no_resched(); __preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0)); local_bh_enable_ip((unsigned long)__builtin_return_address(0));
} }
EXPORT_SYMBOL(_write_unlock_bh); EXPORT_SYMBOL(_write_unlock_bh);
...@@ -432,7 +432,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock) ...@@ -432,7 +432,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
return 1; return 1;
} }
preempt_enable_no_resched(); __preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0)); local_bh_enable_ip((unsigned long)__builtin_return_address(0));
return 0; return 0;
} }
......
...@@ -50,7 +50,7 @@ int __lockfunc __reacquire_kernel_lock(void) ...@@ -50,7 +50,7 @@ int __lockfunc __reacquire_kernel_lock(void)
void __lockfunc __release_kernel_lock(void) void __lockfunc __release_kernel_lock(void)
{ {
_raw_spin_unlock(&kernel_flag); _raw_spin_unlock(&kernel_flag);
preempt_enable_no_resched(); __preempt_enable_no_resched();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment