Commit 50db04dd authored by Vegard Nossum's avatar Vegard Nossum Committed by Thomas Gleixner

debugobjects: fix lockdep warning

Daniel J Blueman reported:
| =======================================================
| [ INFO: possible circular locking dependency detected ]
| 2.6.26-rc5-201c #1
| -------------------------------------------------------
| nscd/3669 is trying to acquire lock:
|  (&n->list_lock){.+..}, at: [<ffffffff802bab03>] deactivate_slab+0x173/0x1e0
|
| but task is already holding lock:
|  (&obj_hash[i].lock){++..}, at: [<ffffffff803fa56f>]
| __debug_object_init+0x2f/0x350
|
| which lock already depends on the new lock.

There are two locks involved here; the first is a SLUB-local lock, and
the second is a debugobjects-local lock. They are basically taken in two
different orders:

1. SLUB { debugobjects { ... } }
2. debugobjects { SLUB { ... } }

This patch changes pattern #2 by trying to fill the memory pool (e.g.
the call into SLUB/kmalloc()) outside the debugobjects lock, so now the
two patterns look like this:

1. SLUB { debugobjects { ... } }
2. SLUB { } debugobjects { ... }

[ daniel.blueman@gmail.com: pool_lock needs to be taken irq safe in fill_pool ]
Reported-by: default avatarDaniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 952f4a0a
...@@ -68,6 +68,7 @@ static int fill_pool(void) ...@@ -68,6 +68,7 @@ static int fill_pool(void)
{ {
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
struct debug_obj *new; struct debug_obj *new;
unsigned long flags;
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
return obj_pool_free; return obj_pool_free;
...@@ -81,10 +82,10 @@ static int fill_pool(void) ...@@ -81,10 +82,10 @@ static int fill_pool(void)
if (!new) if (!new)
return obj_pool_free; return obj_pool_free;
spin_lock(&pool_lock); spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool); hlist_add_head(&new->node, &obj_pool);
obj_pool_free++; obj_pool_free++;
spin_unlock(&pool_lock); spin_unlock_irqrestore(&pool_lock, flags);
} }
return obj_pool_free; return obj_pool_free;
} }
...@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) ...@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
} }
/* /*
* Allocate a new object. If the pool is empty and no refill possible, * Allocate a new object. If the pool is empty, switch off the debugger.
* switch off the debugger.
*/ */
static struct debug_obj * static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{ {
struct debug_obj *obj = NULL; struct debug_obj *obj = NULL;
int retry = 0;
repeat:
spin_lock(&pool_lock); spin_lock(&pool_lock);
if (obj_pool.first) { if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node); obj = hlist_entry(obj_pool.first, typeof(*obj), node);
...@@ -141,9 +139,6 @@ repeat: ...@@ -141,9 +139,6 @@ repeat:
} }
spin_unlock(&pool_lock); spin_unlock(&pool_lock);
if (fill_pool() && !obj && !retry++)
goto repeat;
return obj; return obj;
} }
...@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) ...@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj; struct debug_obj *obj;
unsigned long flags; unsigned long flags;
fill_pool();
db = get_bucket((unsigned long) addr); db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags); spin_lock_irqsave(&db->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment