Commit 529d35d4 authored by Gregory Haskins's avatar Gregory Haskins Committed by Thomas Gleixner

sched: make task->oncpu available in all configurations

We will use this later in the series to eliminate the need for a function
call.

[ Steven Rostedt: added task_is_current function ]
Signed-off-by: default avatarGregory Haskins <ghaskins@novell.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 23ebb33f
......@@ -1220,9 +1220,7 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
#endif
int prio, static_prio, normal_prio;
......@@ -2586,7 +2584,12 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
#define TASK_STATE_TO_CHAR_STR "RMSDTtZX"
extern int task_is_current(struct task_struct *task);
#ifdef CONFIG_SMP
static inline int task_is_current(struct task_struct *task)
{
return task->oncpu;
}
#endif
#endif /* __KERNEL__ */
......
......@@ -752,10 +752,12 @@ inline void update_rq_clock(struct rq *rq)
rq->clock = sched_clock_cpu(cpu_of(rq));
}
#ifndef CONFIG_SMP
int task_is_current(struct task_struct *task)
{
return task_rq(task)->curr == task;
}
#endif
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
......@@ -969,18 +971,39 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
return rq->curr == p;
}
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->oncpu;
#else
return task_current(rq, p);
#endif
}
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->oncpu = 1;
#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->oncpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*/
smp_wmb();
prev->oncpu = 0;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
......@@ -996,14 +1019,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
static inline int task_running(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->oncpu;
#else
return task_current(rq, p);
#endif
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
......@@ -2759,7 +2774,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
#if defined(CONFIG_SMP)
p->oncpu = 0;
#endif
#ifdef CONFIG_PREEMPT
......@@ -7235,7 +7250,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
__set_task_cpu(idle, cpu);
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
#if defined(CONFIG_SMP)
idle->oncpu = 1;
#endif
atomic_spin_unlock_irqrestore(&rq->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment