Commit 00597c3e authored by Ingo Molnar's avatar Ingo Molnar

sched: remove leftover debugging

remove leftover debugging.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6e1938d3
...@@ -253,8 +253,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) ...@@ -253,8 +253,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
struct list_head *queue; struct list_head *queue;
int idx; int idx;
assert_spin_locked(&rq->lock);
if (likely(rq->rt.rt_nr_running < 2)) if (likely(rq->rt.rt_nr_running < 2))
return NULL; return NULL;
...@@ -500,8 +498,6 @@ static int push_rt_task(struct rq *rq) ...@@ -500,8 +498,6 @@ static int push_rt_task(struct rq *rq)
int ret = 0; int ret = 0;
int paranoid = RT_MAX_TRIES; int paranoid = RT_MAX_TRIES;
assert_spin_locked(&rq->lock);
if (!rq->rt.overloaded) if (!rq->rt.overloaded)
return 0; return 0;
...@@ -546,8 +542,6 @@ static int push_rt_task(struct rq *rq) ...@@ -546,8 +542,6 @@ static int push_rt_task(struct rq *rq)
goto out; goto out;
} }
assert_spin_locked(&lowest_rq->lock);
deactivate_task(rq, next_task, 0); deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu); set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0); activate_task(lowest_rq, next_task, 0);
...@@ -589,8 +583,6 @@ static int pull_rt_task(struct rq *this_rq) ...@@ -589,8 +583,6 @@ static int pull_rt_task(struct rq *this_rq)
int cpu; int cpu;
int ret = 0; int ret = 0;
assert_spin_locked(&this_rq->lock);
/* /*
* If cpusets are used, and we have overlapping * If cpusets are used, and we have overlapping
* run queue cpusets, then this algorithm may not catch all. * run queue cpusets, then this algorithm may not catch all.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment