Commit 056c6241 authored by Ravikiran G Thirumalai's avatar Ravikiran G Thirumalai Committed by Linus Torvalds

[PATCH] slab: fix lockdep warnings

Place the alien array cache locks of on slab malloc slab caches on a
seperate lockdep class.  This avoids false positives from lockdep

[akpm@osdl.org: build fix]
Signed-off-by: default avatarRavikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: default avatarShai Fultheim <shai@scalex86.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 2ed3a4ef
...@@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = { ...@@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = {
#endif #endif
}; };
#define BAD_ALIEN_MAGIC 0x01020304ul
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
/* /*
...@@ -682,29 +684,53 @@ static struct kmem_cache cache_cache = { ...@@ -682,29 +684,53 @@ static struct kmem_cache cache_cache = {
* The locking for this is tricky in that it nests within the locks * The locking for this is tricky in that it nests within the locks
* of all other slabs in a few places; to deal with this special * of all other slabs in a few places; to deal with this special
* locking we put on-slab caches into a separate lock-class. * locking we put on-slab caches into a separate lock-class.
*
* We set lock class for alien array caches which are up during init.
* The lock annotation will be lost if all cpus of a node goes down and
* then comes back up during hotplug
*/ */
static struct lock_class_key on_slab_key; static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;
static inline void init_lock_keys(void)
static inline void init_lock_keys(struct cache_sizes *s)
{ {
int q; int q;
struct cache_sizes *s = malloc_sizes;
for (q = 0; q < MAX_NUMNODES; q++) {
if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep)) while (s->cs_size != ULONG_MAX) {
continue; for_each_node(q) {
lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock, struct array_cache **alc;
&on_slab_key); int r;
struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep))
continue;
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
alc = l3->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
continue;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock,
&on_slab_alc_key);
}
}
s++;
} }
} }
#else #else
static inline void init_lock_keys(struct cache_sizes *s) static inline void init_lock_keys(void)
{ {
} }
#endif #endif
/* Guard access to the cache-chain. */ /* Guard access to the cache-chain. */
static DEFINE_MUTEX(cache_chain_mutex); static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain; static struct list_head cache_chain;
...@@ -1091,7 +1117,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1091,7 +1117,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
static inline struct array_cache **alloc_alien_cache(int node, int limit) static inline struct array_cache **alloc_alien_cache(int node, int limit)
{ {
return (struct array_cache **) 0x01020304ul; return (struct array_cache **)BAD_ALIEN_MAGIC;
} }
static inline void free_alien_cache(struct array_cache **ac_ptr) static inline void free_alien_cache(struct array_cache **ac_ptr)
...@@ -1421,7 +1447,6 @@ void __init kmem_cache_init(void) ...@@ -1421,7 +1447,6 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC, ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL); NULL, NULL);
} }
init_lock_keys(sizes);
sizes->cs_dmacachep = kmem_cache_create(names->name_dma, sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size, sizes->cs_size,
...@@ -1495,6 +1520,10 @@ void __init kmem_cache_init(void) ...@@ -1495,6 +1520,10 @@ void __init kmem_cache_init(void)
mutex_unlock(&cache_chain_mutex); mutex_unlock(&cache_chain_mutex);
} }
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/* Done! */ /* Done! */
g_cpucache_up = FULL; g_cpucache_up = FULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment