Commit 0a930ce9 authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

sched: preempt-rt support

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 42cd561b
...@@ -95,19 +95,6 @@ ...@@ -95,19 +95,6 @@
*/ */
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
#ifdef CONFIG_PREEMPT
# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_CHECK_OFFSET 0
#endif
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler)
*/
#define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
# define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
......
...@@ -100,6 +100,17 @@ struct fs_struct; ...@@ -100,6 +100,17 @@ struct fs_struct;
struct bts_context; struct bts_context;
struct perf_counter_context; struct perf_counter_context;
#ifdef CONFIG_PREEMPT
extern int kernel_preemption;
#else
# define kernel_preemption 0
#endif
#ifdef CONFIG_PREEMPT_VOLUNTARY
extern int voluntary_preemption;
#else
# define voluntary_preemption 0
#endif
#ifdef CONFIG_PREEMPT_SOFTIRQS #ifdef CONFIG_PREEMPT_SOFTIRQS
extern int softirq_preemption; extern int softirq_preemption;
#else #else
...@@ -225,6 +236,28 @@ extern struct semaphore kernel_sem; ...@@ -225,6 +236,28 @@ extern struct semaphore kernel_sem;
#define set_task_state(tsk, state_value) \ #define set_task_state(tsk, state_value) \
set_mb((tsk)->state, (state_value)) set_mb((tsk)->state, (state_value))
// #define PREEMPT_DIRECT
#ifdef CONFIG_X86_LOCAL_APIC
extern void nmi_show_all_regs(void);
#else
# define nmi_show_all_regs() do { } while (0)
#endif
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
#include <linux/securebits.h>
#include <linux/fs_struct.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/seccomp.h>
struct exec_domain;
/* /*
* set_current_state() includes a barrier so that the write of current->state * set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to * is correctly serialised wrt the caller's subsequent test of whether to
...@@ -354,6 +387,11 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout); ...@@ -354,6 +387,11 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void __schedule(void); asmlinkage void __schedule(void);
asmlinkage void schedule(void); asmlinkage void schedule(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
/*
* This one can be called with interrupts disabled, only
* to be used by lowlevel arch code!
*/
asmlinkage void __sched __schedule(void);
struct nsproxy; struct nsproxy;
struct user_namespace; struct user_namespace;
...@@ -1686,6 +1724,15 @@ extern struct pid *cad_pid; ...@@ -1686,6 +1724,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk); extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
#ifdef CONFIG_PREEMPT_RT
extern void __put_task_struct_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
if (atomic_dec_and_test(&t->usage))
call_rcu(&t->rcu, __put_task_struct_cb);
}
#else
extern void __put_task_struct(struct task_struct *t); extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t) static inline void put_task_struct(struct task_struct *t)
...@@ -1693,6 +1740,7 @@ static inline void put_task_struct(struct task_struct *t) ...@@ -1693,6 +1740,7 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage)) if (atomic_dec_and_test(&t->usage))
__put_task_struct(t); __put_task_struct(t);
} }
#endif
extern cputime_t task_utime(struct task_struct *p); extern cputime_t task_utime(struct task_struct *p);
extern cputime_t task_stime(struct task_struct *p); extern cputime_t task_stime(struct task_struct *p);
...@@ -1910,6 +1958,7 @@ extern struct task_struct *curr_task(int cpu); ...@@ -1910,6 +1958,7 @@ extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p); extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void); void yield(void);
void __yield(void);
/* /*
* The default (Linux) execution domain. * The default (Linux) execution domain.
......
...@@ -249,8 +249,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -249,8 +249,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* didnt get the lock, go to sleep: */ /* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags); spin_unlock_mutex(&lock->wait_lock, flags);
preempt_enable_and_schedule();
local_irq_disable();
__preempt_enable_no_resched();
__schedule();
preempt_disable(); preempt_disable();
local_irq_enable();
spin_lock_mutex(&lock->wait_lock, flags); spin_lock_mutex(&lock->wait_lock, flags);
} }
......
This diff is collapsed.
...@@ -860,6 +860,48 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) ...@@ -860,6 +860,48 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
} }
} }
static inline void incr_rt_nr_uninterruptible(struct task_struct *p,
struct rq *rq)
{
rq->rt.rt_nr_uninterruptible++;
}
static inline void decr_rt_nr_uninterruptible(struct task_struct *p,
struct rq *rq)
{
rq->rt.rt_nr_uninterruptible--;
}
unsigned long rt_nr_running(void)
{
unsigned long i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->rt.rt_nr_running;
return sum;
}
unsigned long rt_nr_running_cpu(int cpu)
{
return cpu_rq(cpu)->rt.rt_nr_running;
}
unsigned long rt_nr_uninterruptible(void)
{
unsigned long i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->rt.rt_nr_uninterruptible;
return sum;
}
unsigned long rt_nr_uninterruptible_cpu(int cpu)
{
return cpu_rq(cpu)->rt.rt_nr_uninterruptible;
}
/* /*
* Adding/removing a task to/from a priority array: * Adding/removing a task to/from a priority array:
*/ */
...@@ -872,6 +914,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) ...@@ -872,6 +914,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
enqueue_rt_entity(rt_se); enqueue_rt_entity(rt_se);
if (p->state == TASK_UNINTERRUPTIBLE)
decr_rt_nr_uninterruptible(p, rq);
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
...@@ -883,6 +928,10 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) ...@@ -883,6 +928,10 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq); update_curr_rt(rq);
if (p->state == TASK_UNINTERRUPTIBLE)
incr_rt_nr_uninterruptible(p, rq);
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se);
dequeue_pushable_task(rq, p); dequeue_pushable_task(rq, p);
...@@ -1462,8 +1511,10 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -1462,8 +1511,10 @@ static int pull_rt_task(struct rq *this_rq)
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{ {
/* Try to pull RT tasks here if we lower this rq's prio */ /* Try to pull RT tasks here if we lower this rq's prio */
if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) {
pull_rt_task(rq); pull_rt_task(rq);
schedstat_inc(rq, rto_schedule);
}
} }
/* /*
...@@ -1545,7 +1596,6 @@ static void set_cpus_allowed_rt(struct task_struct *p, ...@@ -1545,7 +1596,6 @@ static void set_cpus_allowed_rt(struct task_struct *p,
*/ */
if (weight > 1) if (weight > 1)
enqueue_pushable_task(rq, p); enqueue_pushable_task(rq, p);
} }
if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
......
...@@ -41,15 +41,15 @@ int __lockfunc __reacquire_kernel_lock(void) ...@@ -41,15 +41,15 @@ int __lockfunc __reacquire_kernel_lock(void)
struct task_struct *task = current; struct task_struct *task = current;
int saved_lock_depth = task->lock_depth; int saved_lock_depth = task->lock_depth;
local_irq_enable();
BUG_ON(saved_lock_depth < 0); BUG_ON(saved_lock_depth < 0);
task->lock_depth = -1; task->lock_depth = -1;
__preempt_enable_no_resched();
down(&kernel_sem); down(&kernel_sem);
preempt_disable();
task->lock_depth = saved_lock_depth; task->lock_depth = saved_lock_depth;
local_irq_enable();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment