Commit a492b25d authored by Pekka Enberg's avatar Pekka Enberg

SLQB: Coding style cleanups

This patch cleans up SQLB to make the code checkpatch clean.
Acked-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 474d29eb
...@@ -59,7 +59,7 @@ static inline void struct_slqb_page_wrong_size(void) ...@@ -59,7 +59,7 @@ static inline void struct_slqb_page_wrong_size(void)
/* /*
* slqb_min_order: minimum allocation order for slabs * slqb_min_order: minimum allocation order for slabs
*/ */
static int slqb_min_order = 0; static int slqb_min_order;
/* /*
* slqb_min_objects: minimum number of objects per slab. Increasing this * slqb_min_objects: minimum number of objects per slab. Increasing this
...@@ -193,12 +193,12 @@ static inline void __free_slqb_pages(struct slqb_page *page, unsigned int order) ...@@ -193,12 +193,12 @@ static inline void __free_slqb_pages(struct slqb_page *page, unsigned int order)
#ifdef CONFIG_SLQB_DEBUG #ifdef CONFIG_SLQB_DEBUG
static inline int slab_debug(struct kmem_cache *s) static inline int slab_debug(struct kmem_cache *s)
{ {
return (s->flags & return s->flags &
(SLAB_DEBUG_FREE | (SLAB_DEBUG_FREE |
SLAB_RED_ZONE | SLAB_RED_ZONE |
SLAB_POISON | SLAB_POISON |
SLAB_STORE_USER | SLAB_STORE_USER |
SLAB_TRACE)); SLAB_TRACE);
} }
static inline int slab_poison(struct kmem_cache *s) static inline int slab_poison(struct kmem_cache *s)
{ {
...@@ -945,7 +945,7 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, ...@@ -945,7 +945,7 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
return flags; return flags;
} }
static const int slqb_debug = 0; static const int slqb_debug;
#endif #endif
/* /*
...@@ -1954,8 +1954,11 @@ static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, ...@@ -1954,8 +1954,11 @@ static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
int cpu) int cpu)
{ {
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
int node;
node = cpu_to_node(cpu);
c = kmem_cache_alloc_node(&kmem_cpu_cache, GFP_KERNEL, cpu_to_node(cpu)); c = kmem_cache_alloc_node(&kmem_cpu_cache, GFP_KERNEL, node);
if (!c) if (!c)
return NULL; return NULL;
...@@ -2292,7 +2295,7 @@ error_lock: ...@@ -2292,7 +2295,7 @@ error_lock:
up_write(&slqb_lock); up_write(&slqb_lock);
error: error:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("kmem_cache_create(): failed to create slab `%s'\n", name); panic("%s: failed to create slab `%s'\n", __func__, name);
return 0; return 0;
} }
...@@ -3006,7 +3009,7 @@ static int kmem_cache_create_ok(const char *name, size_t size, ...@@ -3006,7 +3009,7 @@ static int kmem_cache_create_ok(const char *name, size_t size,
if (!strcmp(tmp->name, name)) { if (!strcmp(tmp->name, name)) {
printk(KERN_ERR printk(KERN_ERR
"kmem_cache_create(): duplicate cache %s\n", name); "SLAB: duplicate cache %s\n", name);
dump_stack(); dump_stack();
up_read(&slqb_lock); up_read(&slqb_lock);
...@@ -3042,7 +3045,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3042,7 +3045,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
err: err:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("kmem_cache_create(): failed to create slab `%s'\n", name); panic("%s: failed to create slab `%s'\n", __func__, name);
return NULL; return NULL;
} }
...@@ -3676,7 +3679,7 @@ static struct kset_uevent_ops slab_uevent_ops = { ...@@ -3676,7 +3679,7 @@ static struct kset_uevent_ops slab_uevent_ops = {
static struct kset *slab_kset; static struct kset *slab_kset;
static int sysfs_available __read_mostly = 0; static int sysfs_available __read_mostly;
static int sysfs_slab_add(struct kmem_cache *s) static int sysfs_slab_add(struct kmem_cache *s)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment