Commit 07a7ee82 authored by Nick Piggin's avatar Nick Piggin Committed by Pekka Enberg

slqb: don't enable interrupts during early boot

Sebastian discovered that SLQB can enable interrupts early in boot due
to taking the rwsem. It should not be contended here, so XADD algorithm
implementations should not be affected, however spinlock algorithm
implementations do a spin_lock_irq in down_write fastpath and would be
affected.

Move the lock out of early init path, comment why. This also covers
a very small (and basically insignificant) race where kmem_cache_create_ok
checks succeed but kmem_cache_create still creates a duplicate named
cache because the lock was dropped and retaken.
Reported-by: default avatarSebastian Andrzej Siewior <sebastian@breakpoint.cc>
Tested-by: default avatarSebastian Andrzej Siewior <sebastian@breakpoint.cc>
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent fd8e43fd
...@@ -2234,6 +2234,10 @@ static void kmem_cache_dyn_array_free(void *array) ...@@ -2234,6 +2234,10 @@ static void kmem_cache_dyn_array_free(void *array)
} }
#endif #endif
/*
* Except in early boot, this should be called with slqb_lock held for write
* to lock out hotplug, and protect list modifications.
*/
static int kmem_cache_open(struct kmem_cache *s, static int kmem_cache_open(struct kmem_cache *s,
const char *name, size_t size, size_t align, const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *), int alloc) unsigned long flags, void (*ctor)(void *), int alloc)
...@@ -2259,16 +2263,10 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -2259,16 +2263,10 @@ static int kmem_cache_open(struct kmem_cache *s,
s->colour_range = 0; s->colour_range = 0;
} }
/*
* Protect all alloc_kmem_cache_cpus/nodes allocations with slqb_lock
* to lock out hotplug, just in case (probably not strictly needed
* here).
*/
down_write(&slqb_lock);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
s->cpu_slab = kmem_cache_dyn_array_alloc(nr_cpu_ids); s->cpu_slab = kmem_cache_dyn_array_alloc(nr_cpu_ids);
if (!s->cpu_slab) if (!s->cpu_slab)
goto error_lock; goto error;
# ifdef CONFIG_NUMA # ifdef CONFIG_NUMA
s->node_slab = kmem_cache_dyn_array_alloc(nr_node_ids); s->node_slab = kmem_cache_dyn_array_alloc(nr_node_ids);
if (!s->node_slab) if (!s->node_slab)
...@@ -2286,7 +2284,6 @@ static int kmem_cache_open(struct kmem_cache *s, ...@@ -2286,7 +2284,6 @@ static int kmem_cache_open(struct kmem_cache *s,
sysfs_slab_add(s); sysfs_slab_add(s);
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
up_write(&slqb_lock);
return 1; return 1;
...@@ -2299,9 +2296,7 @@ error_cpu_array: ...@@ -2299,9 +2296,7 @@ error_cpu_array:
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
kmem_cache_dyn_array_free(s->cpu_slab); kmem_cache_dyn_array_free(s->cpu_slab);
error_lock:
#endif #endif
up_write(&slqb_lock);
error: error:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("%s: failed to create slab `%s'\n", __func__, name); panic("%s: failed to create slab `%s'\n", __func__, name);
...@@ -2870,6 +2865,12 @@ void __init kmem_cache_init(void) ...@@ -2870,6 +2865,12 @@ void __init kmem_cache_init(void)
* All the ifdefs are rather ugly here, but it's just the setup code, * All the ifdefs are rather ugly here, but it's just the setup code,
* so it doesn't have to be too readable :) * so it doesn't have to be too readable :)
*/ */
/*
* No need to take slqb_lock here: there should be no concurrency
* anyway, and spin_unlock_irq in rwsem code could enable interrupts
* too early.
*/
kmem_cache_open(&kmem_cache_cache, "kmem_cache", kmem_cache_open(&kmem_cache_cache, "kmem_cache",
sizeof(struct kmem_cache), 0, flags, NULL, 0); sizeof(struct kmem_cache), 0, flags, NULL, 0);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -3011,8 +3012,6 @@ static int kmem_cache_create_ok(const char *name, size_t size, ...@@ -3011,8 +3012,6 @@ static int kmem_cache_create_ok(const char *name, size_t size,
return 0; return 0;
} }
down_read(&slqb_lock);
list_for_each_entry(tmp, &slab_caches, list) { list_for_each_entry(tmp, &slab_caches, list) {
char x; char x;
int res; int res;
...@@ -3034,14 +3033,11 @@ static int kmem_cache_create_ok(const char *name, size_t size, ...@@ -3034,14 +3033,11 @@ static int kmem_cache_create_ok(const char *name, size_t size,
printk(KERN_ERR printk(KERN_ERR
"SLAB: duplicate cache %s\n", name); "SLAB: duplicate cache %s\n", name);
dump_stack(); dump_stack();
up_read(&slqb_lock);
return 0; return 0;
} }
} }
up_read(&slqb_lock);
WARN_ON(strchr(name, ' ')); /* It confuses parsers */ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
if (flags & SLAB_DESTROY_BY_RCU) if (flags & SLAB_DESTROY_BY_RCU)
WARN_ON(flags & SLAB_POISON); WARN_ON(flags & SLAB_POISON);
...@@ -3054,6 +3050,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3054,6 +3050,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{ {
struct kmem_cache *s; struct kmem_cache *s;
down_write(&slqb_lock);
if (!kmem_cache_create_ok(name, size, align, flags)) if (!kmem_cache_create_ok(name, size, align, flags))
goto err; goto err;
...@@ -3061,12 +3058,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3061,12 +3058,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (!s) if (!s)
goto err; goto err;
if (kmem_cache_open(s, name, size, align, flags, ctor, 1)) if (kmem_cache_open(s, name, size, align, flags, ctor, 1)) {
up_write(&slqb_lock);
return s; return s;
}
kmem_cache_free(&kmem_cache_cache, s); kmem_cache_free(&kmem_cache_cache, s);
err: err:
up_write(&slqb_lock);
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("%s: failed to create slab `%s'\n", __func__, name); panic("%s: failed to create slab `%s'\n", __func__, name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment