Commit fe2eea3f authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] sched: disable interrupts for locking in load_balance()

Interrupts must be disabled for request queue locks if we want to run
load_balance() with interrupts enabled.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Peter Williams <pwil3058@bigpond.net.au>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4211a9a2
...@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n) ...@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
/* /*
* Check this_cpu to ensure it is balanced within domain. Attempt to move * Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance. * tasks if there is an imbalance.
*
* Called with this_rq unlocked.
*/ */
static int load_balance(int this_cpu, struct rq *this_rq, static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum idle_type idle) struct sched_domain *sd, enum idle_type idle)
...@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
unsigned long imbalance; unsigned long imbalance;
struct rq *busiest; struct rq *busiest;
cpumask_t cpus = CPU_MASK_ALL; cpumask_t cpus = CPU_MASK_ALL;
unsigned long flags;
/* /*
* When power savings policy is enabled for the parent domain, idle * When power savings policy is enabled for the parent domain, idle
...@@ -2596,11 +2595,13 @@ redo: ...@@ -2596,11 +2595,13 @@ redo:
* still unbalanced. nr_moved simply stays zero, so it is * still unbalanced. nr_moved simply stays zero, so it is
* correctly treated as an imbalance. * correctly treated as an imbalance.
*/ */
local_irq_save(flags);
double_rq_lock(this_rq, busiest); double_rq_lock(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest, nr_moved = move_tasks(this_rq, this_cpu, busiest,
minus_1_or_zero(busiest->nr_running), minus_1_or_zero(busiest->nr_running),
imbalance, sd, idle, &all_pinned); imbalance, sd, idle, &all_pinned);
double_rq_unlock(this_rq, busiest); double_rq_unlock(this_rq, busiest);
local_irq_restore(flags);
/* All tasks on this runqueue were pinned by CPU affinity */ /* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(all_pinned)) { if (unlikely(all_pinned)) {
...@@ -2617,13 +2618,13 @@ redo: ...@@ -2617,13 +2618,13 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
spin_lock(&busiest->lock); spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr /* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu * task on busiest cpu can't be moved to this_cpu
*/ */
if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
spin_unlock(&busiest->lock); spin_unlock_irqrestore(&busiest->lock, flags);
all_pinned = 1; all_pinned = 1;
goto out_one_pinned; goto out_one_pinned;
} }
...@@ -2633,7 +2634,7 @@ redo: ...@@ -2633,7 +2634,7 @@ redo:
busiest->push_cpu = this_cpu; busiest->push_cpu = this_cpu;
active_balance = 1; active_balance = 1;
} }
spin_unlock(&busiest->lock); spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance) if (active_balance)
wake_up_process(busiest->migration_thread); wake_up_process(busiest->migration_thread);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment