Commit 5e710e37 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

lockdep: change scheduler annotation

While thinking about David's graph walk lockdep patch it _finally_
dawned on me that there is no reason we have a lock class per cpu ...

Sorry for being dense :-/

The below changes the annotation from a lock class per cpu, to a single
nested lock, as the scheduler never holds more that 2 rq locks at a time
anyway.

If there was code requiring holding all rq locks this would not work and
the original annotation would be the only option, but that not being the
case, this is a much lighter one.

Compiles and boots on a 2-way x86_64.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e0fdace1
...@@ -600,7 +600,6 @@ struct rq { ...@@ -600,7 +600,6 @@ struct rq {
/* BKL stats */ /* BKL stats */
unsigned int bkl_count; unsigned int bkl_count;
#endif #endif
struct lock_class_key rq_lock_key;
}; };
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
...@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) ...@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
} else { } else {
if (rq1 < rq2) { if (rq1 < rq2) {
spin_lock(&rq1->lock); spin_lock(&rq1->lock);
spin_lock(&rq2->lock); spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else { } else {
spin_lock(&rq2->lock); spin_lock(&rq2->lock);
spin_lock(&rq1->lock); spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
} }
} }
update_rq_clock(rq1); update_rq_clock(rq1);
...@@ -2805,10 +2804,10 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) ...@@ -2805,10 +2804,10 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
if (busiest < this_rq) { if (busiest < this_rq) {
spin_unlock(&this_rq->lock); spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock); spin_lock(&busiest->lock);
spin_lock(&this_rq->lock); spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1; ret = 1;
} else } else
spin_lock(&busiest->lock); spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
} }
return ret; return ret;
} }
...@@ -7998,7 +7997,6 @@ void __init sched_init(void) ...@@ -7998,7 +7997,6 @@ void __init sched_init(void)
rq = cpu_rq(i); rq = cpu_rq(i);
spin_lock_init(&rq->lock); spin_lock_init(&rq->lock);
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0; rq->nr_running = 0;
init_cfs_rq(&rq->cfs, rq); init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq); init_rt_rq(&rq->rt, rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment