Commit a737b3e2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] slab cleanup

slab.c has become a bit revolting again.  Try to repair it.

- Coding style fixes

- Don't do assignments-in-if-statements.

- Don't typecast assignments to/from void*
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f30cf7d1
......@@ -50,7 +50,7 @@
* The head array is strictly LIFO and should improve the cache hit rates.
* On SMP, it additionally reduces the spinlock operations.
*
* The c_cpuarray may not be read with enabled local interrupts -
* The c_cpuarray may not be read with enabled local interrupts -
* it's changed with a smp_call_function().
*
* SMP synchronization:
......@@ -266,16 +266,17 @@ struct array_cache {
unsigned int batchcount;
unsigned int touched;
spinlock_t lock;
void *entry[0]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
* the entries.
* [0] is for gcc 2.95. It should really be [].
*/
void *entry[0]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
* the entries.
* [0] is for gcc 2.95. It should really be [].
*/
};
/* bootstrap: The caches do not work without cpuarrays anymore,
* but the cpuarrays are allocated from the generic caches...
/*
* bootstrap: The caches do not work without cpuarrays anymore, but the
* cpuarrays are allocated from the generic caches...
*/
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
......@@ -310,10 +311,8 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define SIZE_L3 (1 + MAX_NUMNODES)
/*
* This function must be completely optimized away if
* a constant is passed to it. Mostly the same as
* what is in linux/slab.h except it returns an
* index.
* This function must be completely optimized away if a constant is passed to
* it. Mostly the same as what is in linux/slab.h except it returns an index.
*/
static __always_inline int index_of(const size_t size)
{
......@@ -351,14 +350,14 @@ static void kmem_list3_init(struct kmem_list3 *parent)
parent->free_touched = 0;
}
#define MAKE_LIST(cachep, listp, slab, nodeid) \
do { \
INIT_LIST_HEAD(listp); \
list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
#define MAKE_LIST(cachep, listp, slab, nodeid) \
do { \
INIT_LIST_HEAD(listp); \
list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
} while (0)
#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
do { \
#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
do { \
MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
......@@ -379,8 +378,8 @@ struct kmem_cache {
unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
spinlock_t spinlock;
/* 3) cache_grow/shrink */
......@@ -390,11 +389,11 @@ struct kmem_cache {
/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;
size_t colour; /* cache colouring range */
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
unsigned int dflags; /* dynamic flags */
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);
......@@ -438,8 +437,9 @@ struct kmem_cache {
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
#define BATCHREFILL_LIMIT 16
/* Optimization question: fewer reaps means less
* probability for unnessary cpucache drain/refill cycles.
/*
* Optimization question: fewer reaps means less probability for unnessary
* cpucache drain/refill cycles.
*
* OTOH the cpuarrays can contain lots of objects,
* which could lock up otherwise freeable slabs.
......@@ -453,17 +453,19 @@ struct kmem_cache {
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
#define STATS_INC_REAPED(x) ((x)->reaped++)
#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
(x)->high_mark = (x)->num_active; \
} while (0)
#define STATS_SET_HIGH(x) \
do { \
if ((x)->num_active > (x)->high_mark) \
(x)->high_mark = (x)->num_active; \
} while (0)
#define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
#define STATS_SET_FREEABLE(x, i) \
do { if ((x)->max_freeable < i) \
(x)->max_freeable = i; \
} while (0)
#define STATS_SET_FREEABLE(x, i) \
do { \
if ((x)->max_freeable < i) \
(x)->max_freeable = i; \
} while (0)
#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
......@@ -478,9 +480,7 @@ struct kmem_cache {
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#define STATS_INC_NODEFREES(x) do { } while (0)
#define STATS_SET_FREEABLE(x, i) \
do { } while (0)
#define STATS_SET_FREEABLE(x, i) do { } while (0)
#define STATS_INC_ALLOCHIT(x) do { } while (0)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
#define STATS_INC_FREEHIT(x) do { } while (0)
......@@ -488,7 +488,8 @@ struct kmem_cache {
#endif
#if DEBUG
/* Magic nums for obj red zoning.
/*
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
......@@ -499,7 +500,8 @@ struct kmem_cache {
#define POISON_FREE 0x6b /* for use-after-free poisoning */
#define POISON_END 0xa5 /* end-byte of poisoning */
/* memory layout of objects:
/*
* memory layout of objects:
* 0 : objp
* 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
......@@ -508,7 +510,8 @@ struct kmem_cache {
* redzone word.
* cachep->obj_offset: The real object.
* cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
* [BYTES_PER_WORD long]
*/
static int obj_offset(struct kmem_cache *cachep)
{
......@@ -552,8 +555,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
/*
* Maximum size of an obj (in 2^order pages)
* and absolute limit for the gfp order.
* Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
* order.
*/
#if defined(CONFIG_LARGE_ALLOCS)
#define MAX_OBJ_ORDER 13 /* up to 32Mb */
......@@ -573,9 +576,10 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
/* Functions for storing/retrieving the cachep and or slab from the
* global 'mem_map'. These are used to find the slab an obj belongs to.
* With kfree(), these are used to find the cache which an obj belongs to.
/*
* Functions for storing/retrieving the cachep and or slab from the page
* allocator. These are used to find the slab an obj belongs to. With kfree(),
* these are used to find the cache which an obj belongs to.
*/
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
......@@ -621,7 +625,9 @@ static inline unsigned int obj_to_index(struct kmem_cache *cache,
return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
}
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
/*
* These are the default caches for kmalloc. Custom caches can have other sizes.
*/
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
......@@ -667,8 +673,8 @@ static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
/*
* vm_enough_memory() looks at this to determine how many
* slab-allocated pages are possibly freeable under pressure
* vm_enough_memory() looks at this to determine how many slab-allocated pages
* are possibly freeable under pressure
*
* SLAB_RECLAIM_ACCOUNT turns this on per-slab
*/
......@@ -687,7 +693,8 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused);
static int __node_shrink(struct kmem_cache *cachep, int node);
......@@ -697,7 +704,8 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
return cachep->array[smp_processor_id()];
}
static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
......@@ -732,8 +740,9 @@ static size_t slab_mgmt_size(size_t nr_objs, size_t align)
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
/* Calculate the number of objects and left-over bytes for a given
buffer size. */
/*
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
size_t align, int flags, size_t *left_over,
unsigned int *num)
......@@ -794,7 +803,8 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
static void __slab_error(const char *function, struct kmem_cache *cachep,
char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
......@@ -918,10 +928,8 @@ static void free_alien_cache(struct array_cache **ac_ptr)
if (!ac_ptr)
return;
for_each_node(i)
kfree(ac_ptr[i]);
kfree(ac_ptr);
}
......@@ -955,7 +963,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
}
}
static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
static void drain_alien_cache(struct kmem_cache *cachep,
struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
......@@ -998,20 +1007,22 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
mutex_lock(&cache_chain_mutex);
/* we need to do this right in the beginning since
/*
* We need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3
*/
list_for_each_entry(cachep, &cache_chain, next) {
/* setup the size64 kmemlist for cpu before we can
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
if (!cachep->nodelists[node]) {
if (!(l3 = kmalloc_node(memsize,
GFP_KERNEL, node)))
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3)
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
......@@ -1027,13 +1038,15 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
}
/* Now we can go ahead with allocating the shared array's
& array cache's */
/*
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
......@@ -1053,7 +1066,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
if (!alien)
goto bad;
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
......@@ -1073,7 +1085,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
}
#endif
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(alien);
}
......@@ -1095,7 +1106,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
/* fall thru */
case CPU_UP_CANCELED:
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
......@@ -1162,7 +1172,7 @@ free_array_cache:
#endif
}
return NOTIFY_OK;
bad:
bad:
mutex_unlock(&cache_chain_mutex);
return NOTIFY_BAD;
}
......@@ -1172,7 +1182,8 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
int nodeid)
{
struct kmem_list3 *ptr;
......@@ -1187,8 +1198,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int no
local_irq_enable();
}
/* Initialisation.
* Called after the gfp() functions have been enabled, and before smp_init().
/*
* Initialisation. Called after the page allocator have been initialised and
* before smp_init().
*/
void __init kmem_cache_init(void)
{
......@@ -1213,9 +1225,9 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
* 1) initialize the cache_cache cache: it contains the struct kmem_cache
* structures of all caches, except cache_cache itself: cache_cache
* is statically allocated.
* 1) initialize the cache_cache cache: it contains the struct
* kmem_cache structures of all caches, except cache_cache itself:
* cache_cache is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
......@@ -1238,7 +1250,8 @@ void __init kmem_cache_init(void)
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
cache_line_size());
for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size,
......@@ -1257,24 +1270,26 @@ void __init kmem_cache_init(void)
sizes = malloc_sizes;
names = cache_names;
/* Initialize the caches that provide memory for the array cache
* and the kmem_list3 structures first.
* Without this, further allocations will bug
/*
* Initialize the caches that provide memory for the array cache and the
* kmem_list3 structures first. Without this, further allocations will
* bug.
*/
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_PANIC), NULL, NULL);
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
if (INDEX_AC != INDEX_L3)
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
NULL);
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
while (sizes->cs_size != ULONG_MAX) {
/*
......@@ -1284,13 +1299,13 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
if (!sizes->cs_cachep)
if (!sizes->cs_cachep) {
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS
| SLAB_PANIC),
NULL, NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
......@@ -1299,13 +1314,11 @@ void __init kmem_cache_init(void)
}
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_CACHE_DMA |
SLAB_PANIC), NULL,
NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL, NULL);
sizes++;
names++;
}
......@@ -1357,20 +1370,22 @@ void __init kmem_cache_init(void)
struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
enable_cpucache(cachep);
mutex_unlock(&cache_chain_mutex);
}
/* Done! */
g_cpucache_up = FULL;
/* Register a cpu startup notifier callback
* that initializes cpu_cache_get for all new cpus
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
/* The reap timers are started later, with a module init call:
* That part of the kernel is not yet operational.
/*
* The reap timers are started later, with a module init call: That part
* of the kernel is not yet operational.
*/
}
......@@ -1378,16 +1393,13 @@ static int __init cpucache_init(void)
{
int cpu;
/*
* Register the timers that return unneeded
* pages to gfp.
/*
* Register the timers that return unneeded pages to the page allocator
*/
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
start_cpu_timer(cpu);
return 0;
}
__initcall(cpucache_init);
/*
......@@ -1501,9 +1513,8 @@ static void dump_line(char *data, int offset, int limit)
{
int i;
printk(KERN_ERR "%03x:", offset);
for (i = 0; i < limit; i++) {
for (i = 0; i < limit; i++)
printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
}
#endif
......@@ -1517,15 +1528,15 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]",
*dbg_userword(cachep, objp));
*dbg_userword(cachep, objp));
print_symbol("(%s)",
(unsigned long)*dbg_userword(cachep, objp));
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
realobj = (char *)objp + obj_offset(cachep);
......@@ -1558,8 +1569,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Print header */
if (lines == 0) {
printk(KERN_ERR
"Slab corruption: start=%p, len=%d\n",
realobj, size);
"Slab corruption: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 0);
}
/* Hexdump the affected line */
......@@ -1614,11 +1625,10 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->buffer_size % PAGE_SIZE) == 0
&& OFF_SLAB(cachep))
if (cachep->buffer_size % PAGE_SIZE == 0 &&
OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE,
1);
cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
......@@ -1650,10 +1660,10 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
}
#endif
/**
/*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed.
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{
......@@ -1674,8 +1684,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
}
}
/* For setting up all the kmem_list3s for cache whose buffer_size is same
as size of kmem_list3. */
/*
* For setting up all the kmem_list3s for cache whose buffer_size is same as
* size of kmem_list3.
*/
static void set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
......@@ -1701,13 +1713,13 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
static inline size_t calculate_slab_order(struct kmem_cache *cachep,
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
size_t left_over = 0;
int gfporder;
for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) {
for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
......@@ -1742,7 +1754,7 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep,
/*
* Acceptable internal fragmentation?
*/
if ((left_over * 8) <= (PAGE_SIZE << gfporder))
if (left_over * 8 <= (PAGE_SIZE << gfporder))
break;
}
return left_over;
......@@ -1817,9 +1829,8 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
* and the @dtor is run before the pages are handed back.
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting
* unloaded.
*
* the module calling this has to destroy the cache before getting unloaded.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
......@@ -1837,7 +1848,8 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
*/
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
unsigned long flags,
void (*ctor)(void*, struct kmem_cache *, unsigned long),
void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
......@@ -1847,12 +1859,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/*
* Sanity checks... these are all serious usage bugs.
*/
if ((!name) ||
in_interrupt() ||
(size < BYTES_PER_WORD) ||
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
(size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
printk(KERN_ERR "%s: Early error in slab %s\n",
__FUNCTION__, name);
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();
}
......@@ -1906,8 +1916,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
if ((size < 4096
|| fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
......@@ -1919,13 +1928,14 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG_ON(dtor);
/*
* Always checks flags, a caller might be expecting debug
* support which isn't available.
* Always checks flags, a caller might be expecting debug support which
* isn't available.
*/
if (flags & ~CREATE_MASK)
BUG();
/* Check that size is in terms of words. This is needed to avoid
/*
* Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
......@@ -1934,12 +1944,14 @@ kmem_cache_create (const char *name, size_t size, size_t align,
size &= ~(BYTES_PER_WORD - 1);
}
/* calculate out the final buffer alignment: */
/* calculate the final buffer alignment: */
/* 1) arch recommendation: can be overridden for debug */
if (flags & SLAB_HWCACHE_ALIGN) {
/* Default alignment: as specified by the arch code.
* Except if an object is really small, then squeeze multiple
* objects into one cacheline.
/*
* Default alignment: as specified by the arch code. Except if
* an object is really small, then squeeze multiple objects into
* one cacheline.
*/
ralign = cache_line_size();
while (size <= ralign / 2)
......@@ -1959,7 +1971,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign > BYTES_PER_WORD)
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
/* 4) Store it. Note that the debug code below can reduce
/*
* 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD.
*/
align = ralign;
......@@ -2058,7 +2071,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
oops:
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
......@@ -2109,7 +2122,6 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
{
check_irq_on();
preempt_disable();
local_irq_disable();
func(arg);
local_irq_enable();
......@@ -2120,12 +2132,12 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
preempt_enable();
}
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node);
static void drain_array_locked(struct kmem_cache *cachep,
struct array_cache *ac, int force, int node);
static void do_drain(void *arg)
{
struct kmem_cache *cachep = (struct kmem_cache *) arg;
struct kmem_cache *cachep = arg;
struct array_cache *ac;
int node = numa_node_id();
......@@ -2273,16 +2285,15 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
/* NUMA: free the list3 structures */
for_each_online_node(i) {
if ((l3 = cachep->nodelists[i])) {
l3 = cachep->nodelists[i];
if (l3) {
kfree(l3->shared);
free_alien_cache(l3->alien);
kfree(l3);
}
}
kmem_cache_free(&cache_cache, cachep);
unlock_cpu_hotplug();
return 0;
}
EXPORT_SYMBOL(kmem_cache_destroy);
......@@ -2305,7 +2316,6 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
slabp->inuse = 0;
slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off;
return slabp;
}
......@@ -2333,9 +2343,9 @@ static void cache_init_objs(struct kmem_cache *cachep,
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
/*
* Constructors are not allowed to allocate memory from
* the same cache which they are a constructor for.
* Otherwise, deadlock. They must also be threaded.
* Constructors are not allowed to allocate memory from the same
* cache which they are a constructor for. Otherwise, deadlock.
* They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp + obj_offset(cachep), cachep,
......@@ -2349,8 +2359,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
&& cachep->flags & SLAB_POISON)
if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 0);
#else
......@@ -2365,16 +2375,14 @@ static void cache_init_objs(struct kmem_cache *cachep,
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
BUG();
} else {
if (cachep->gfpflags & GFP_DMA)
BUG();
}
if (flags & SLAB_DMA)
BUG_ON(!(cachep->gfpflags & GFP_DMA));
else
BUG_ON(cachep->gfpflags & GFP_DMA);
}
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
int nodeid)
{
void *objp = index_to_obj(cachep, slabp, slabp->free);
kmem_bufctl_t next;
......@@ -2390,8 +2398,8 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nod
return objp;
}
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
int nodeid)
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
void *objp, int nodeid)
{
unsigned int objnr = obj_to_index(cachep, slabp, objp);
......@@ -2401,7 +2409,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *ob
if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp);
"'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
......@@ -2410,7 +2418,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *ob
slabp->inuse--;
}
static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
void *objp)
{
int i;
struct page *page;
......@@ -2438,8 +2447,9 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
unsigned long ctor_flags;
struct kmem_list3 *l3;
/* Be lazy and only check for valid flags here,
* keeping it out of the critical path in kmem_cache_alloc().
/*
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
BUG();
......@@ -2480,14 +2490,17 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
*/
kmem_flagcheck(cachep, flags);
/* Get mem for the objs.
* Attempt to allocate a physical page from 'nodeid',
/*
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
if (!(objp = kmem_getpages(cachep, flags, nodeid)))
objp = kmem_getpages(cachep, flags, nodeid);
if (!objp)
goto failed;
/* Get slab management. */
if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
slabp = alloc_slabmgmt(cachep, objp, offset, local_flags);
if (!slabp)
goto opps1;
slabp->nodeid = nodeid;
......@@ -2506,9 +2519,9 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
l3->free_objects += cachep->num;
spin_unlock(&l3->list_lock);
return 1;
opps1:
opps1:
kmem_freepages(cachep, objp);
failed:
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
return 0;
......@@ -2551,8 +2564,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
page = virt_to_page(objp);
if (page_get_cache(page) != cachep) {
printk(KERN_ERR
"mismatch in kmem_cache_free: expected cache %p, got %p\n",
printk(KERN_ERR "mismatch in kmem_cache_free: expected "
"cache %p, got %p\n",
page_get_cache(page), cachep);
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
......@@ -2562,13 +2575,12 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
|| *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
slab_error(cachep,
"double free, or memory outside"
" object was overwritten");
printk(KERN_ERR
"%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
*dbg_redzone2(cachep, objp) != RED_ACTIVE) {
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
printk(KERN_ERR "%p: redzone 1:0x%lx, "
"redzone 2:0x%lx.\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
......@@ -2584,9 +2596,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
if (cachep->flags & SLAB_DEBUG_INITIAL) {
/* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging).
* Called without the cache-lock held.
/*
* Need to call the slab's constructor so the caller can
* perform a verify of its state (debugging). Called without
* the cache-lock held.
*/
cachep->ctor(objp + obj_offset(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
......@@ -2599,7 +2612,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
}
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 0);
......@@ -2625,14 +2638,14 @@ static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
bad:
printk(KERN_ERR
"slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse);
bad:
printk(KERN_ERR "slab: Internal list corruption detected in "
"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse);
for (i = 0;
i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
i++) {
if ((i % 16) == 0)
if (i % 16 == 0)
printk("\n%03x:", i);
printk(" %02x", ((unsigned char *)slabp)[i]);
}
......@@ -2654,12 +2667,13 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
check_irq_off();
ac = cpu_cache_get(cachep);
retry:
retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/* if there was little recent activity on this
* cache, then perform only a partial refill.
* Otherwise we could generate refill bouncing.
/*
* If there was little recent activity on this cache, then
* perform only a partial refill. Otherwise we could generate
* refill bouncing.
*/
batchcount = BATCHREFILL_LIMIT;
}
......@@ -2715,29 +2729,29 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
list_add(&slabp->list, &l3->slabs_partial);
}
must_grow:
must_grow:
l3->free_objects -= ac->avail;
alloc_done:
alloc_done:
spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) {
int x;
x = cache_grow(cachep, flags, numa_node_id());
// cache_grow can reenable interrupts, then ac could change.
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
if (!x && ac->avail == 0) // no objects in sight? abort
if (!x && ac->avail == 0) /* no objects in sight? abort */
return NULL;
if (!ac->avail) // objects refilled by interrupt?
if (!ac->avail) /* objects refilled by interrupt? */
goto retry;
}
ac->touched = 1;
return ac->entry[--ac->avail];
}
static inline void
cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
......@@ -2746,8 +2760,8 @@ cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
}
#if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
void *objp, void *caller)
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, void *caller)
{
if (!objp)
return objp;
......@@ -2767,15 +2781,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags
*dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
|| *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep,
"double free, or memory outside"
" object was overwritten");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
printk(KERN_ERR
"%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
"%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
......@@ -2822,8 +2835,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
return objp;
}
static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;
......@@ -2843,7 +2856,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
/*
* A interface to enable slab creation on nodeid
*/
static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct list_head *entry;
struct slab *slabp;
......@@ -2854,7 +2868,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
l3 = cachep->nodelists[nodeid];
BUG_ON(!l3);
retry:
retry:
check_irq_off();
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
......@@ -2881,16 +2895,15 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
/* move slabp to correct slabp list: */
list_del(&slabp->list);
if (slabp->free == BUFCTL_END) {
if (slabp->free == BUFCTL_END)
list_add(&slabp->list, &l3->slabs_full);
} else {
else
list_add(&slabp->list, &l3->slabs_partial);
}
spin_unlock(&l3->list_lock);
goto done;
must_grow:
must_grow:
spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags, nodeid);
......@@ -2898,7 +2911,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
return NULL;
goto retry;
done:
done:
return obj;
}
#endif
......@@ -2971,7 +2984,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
}
free_block(cachep, ac->entry, batchcount, node);
free_done:
free_done:
#if STATS
{
int i = 0;
......@@ -2992,16 +3005,12 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
#endif
spin_unlock(&l3->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]),
sizeof(void *) * ac->avail);
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}
/*
* __cache_free
* Release an obj back to its cache. If the obj has a constructed
* state, it must be in this state _before_ it is released.
*
* Called with disabled ints.
* Release an obj back to its cache. If the obj has a constructed state, it must
* be in this state _before_ it is released. Called with disabled ints.
*/
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
......@@ -3020,9 +3029,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
struct kmem_list3 *l3 =
cachep->nodelists[numa_node_id()];
struct kmem_list3 *l3;
l3 = cachep->nodelists[numa_node_id()];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
......@@ -3106,7 +3115,7 @@ int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
if (unlikely(page_get_cache(page) != cachep))
goto out;
return 1;
out:
out:
return 0;
}
......@@ -3132,7 +3141,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
local_irq_save(save_flags);
if (nodeid == -1 || nodeid == numa_node_id() ||
!cachep->nodelists[nodeid])
!cachep->nodelists[nodeid])
ptr = ____cache_alloc(cachep, flags);
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
......@@ -3249,7 +3258,7 @@ void *__alloc_percpu(size_t size)
/* Catch derefs w/o wrappers */
return (void *)(~(unsigned long)pdata);
unwind_oom:
unwind_oom:
while (--i >= 0) {
if (!cpu_possible(i))
continue;
......@@ -3352,18 +3361,20 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
struct array_cache *nc = NULL, *new;
struct array_cache **new_alien = NULL;
#ifdef CONFIG_NUMA
if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
new_alien = alloc_alien_cache(node, cachep->limit);
if (!new_alien)
goto fail;
#endif
if (!(new = alloc_arraycache(node, (cachep->shared *
cachep->batchcount),
0xbaadf00d)))
new = alloc_arraycache(node, cachep->shared*cachep->batchcount,
0xbaadf00d);
if (!new)
goto fail;
if ((l3 = cachep->nodelists[node])) {
l3 = cachep->nodelists[node];
if (l3) {
spin_lock_irq(&l3->list_lock);
if ((nc = cachep->nodelists[node]->shared))
nc = cachep->nodelists[node]->shared;
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
l3->shared = new;
......@@ -3372,27 +3383,27 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
new_alien = NULL;
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock);
kfree(nc);
free_alien_cache(new_alien);
continue;
}
if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
GFP_KERNEL, node)))
l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
if (!l3)
goto fail;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
l3->shared = new;
l3->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3;
}
return err;
fail:
fail:
err = -ENOMEM;
return err;
}
......@@ -3404,7 +3415,7 @@ struct ccupdate_struct {
static void do_ccupdate_local(void *info)
{
struct ccupdate_struct *new = (struct ccupdate_struct *)info;
struct ccupdate_struct *new = info;
struct array_cache *old;
check_irq_off();
......@@ -3414,16 +3425,16 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old;
}
static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
int shared)
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared)
{
struct ccupdate_struct new;
int i, err;
memset(&new.new, 0, sizeof(new.new));
for_each_online_cpu(i) {
new.new[i] =
alloc_arraycache(cpu_to_node(i), limit, batchcount);
new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
batchcount);
if (!new.new[i]) {
for (i--; i >= 0; i--)
kfree(new.new[i]);
......@@ -3465,10 +3476,11 @@ static void enable_cpucache(struct kmem_cache *cachep)
int err;
int limit, shared;
/* The head array serves three purposes:
/*
* The head array serves three purposes:
* - create a LIFO ordering, i.e. return objects that are cache-warm
* - reduce the number of spinlock operations.
* - reduce the number of linked list operations on the slab and
* - reduce the number of linked list operations on the slab and
* bufctl chains: array operations are cheaper.
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
......@@ -3484,7 +3496,8 @@ static void enable_cpucache(struct kmem_cache *cachep)
else
limit = 120;
/* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
/*
* CPU bound tasks (e.g. network routing) can exhibit cpu bound
* allocation behaviour: Most allocs on one cpu, most free operations
* on another cpu. For these cases, an efficient object passing between
* cpus is necessary. This is provided by a shared array. The array
......@@ -3499,9 +3512,9 @@ static void enable_cpucache(struct kmem_cache *cachep)
#endif
#if DEBUG
/* With debugging enabled, large batchcount lead to excessively
* long periods with disabled local interrupts. Limit the
* batchcount
/*
* With debugging enabled, large batchcount lead to excessively long
* periods with disabled local interrupts. Limit the batchcount
*/
if (limit > 32)
limit = 32;
......@@ -3512,8 +3525,8 @@ static void enable_cpucache(struct kmem_cache *cachep)
cachep->name, -err);
}
static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node)
static void drain_array_locked(struct kmem_cache *cachep,
struct array_cache *ac, int force, int node)
{
int tofree;
......@@ -3522,9 +3535,8 @@ static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac
ac->touched = 0;
} else if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail) {
if (tofree > ac->avail)
tofree = (ac->avail + 1) / 2;
}
free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
......@@ -3541,8 +3553,8 @@ static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac
* - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool.
*
* If we cannot acquire the cache chain mutex then just give up - we'll
* try again on the next iteration.
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
static void cache_reap(void *unused)
{
......@@ -3590,9 +3602,8 @@ static void cache_reap(void *unused)
goto next_unlock;
}
tofree =
(l3->free_limit + 5 * searchp->num -
1) / (5 * searchp->num);
tofree = (l3->free_limit + 5 * searchp->num - 1) /
(5 * searchp->num);
do {
p = l3->slabs_free.next;
if (p == &(l3->slabs_free))
......@@ -3603,9 +3614,9 @@ static void cache_reap(void *unused)
list_del(&slabp->list);
STATS_INC_REAPED(searchp);
/* Safe to drop the lock. The slab is no longer
* linked to the cache.
* searchp cannot disappear, we hold
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache. searchp cannot disappear, we hold
* cache_chain_lock
*/
l3->free_objects -= searchp->num;
......@@ -3613,15 +3624,15 @@ static void cache_reap(void *unused)
slab_destroy(searchp, slabp);
spin_lock_irq(&l3->list_lock);
} while (--tofree > 0);
next_unlock:
next_unlock:
spin_unlock_irq(&l3->list_lock);
next:
next:
cond_resched();
}
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
/* Setup the next iteration */
/* Set up the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
}
......@@ -3671,8 +3682,8 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
struct kmem_cache *cachep = p;
++*pos;
return cachep->next.next == &cache_chain ? NULL
: list_entry(cachep->next.next, struct kmem_cache, next);
return cachep->next.next == &cache_chain ?
NULL : list_entry(cachep->next.next, struct kmem_cache, next);
}
static void s_stop(struct seq_file *m, void *p)
......@@ -3761,7 +3772,9 @@ static int s_show(struct seq_file *m, void *p)
unsigned long node_frees = cachep->node_frees;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
%4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
%4lu %4lu %4lu %4lu", allocs, high, grown,
reaped, errors, max_freeable, node_allocs,
node_frees);
}
/* cpu stats */
{
......@@ -3833,13 +3846,12 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
list_for_each(p, &cache_chain) {
struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
next);
struct kmem_cache *cachep;
cachep = list_entry(p, struct kmem_cache, next);
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 ||
batchcount < 1 ||
batchcount > limit || shared < 0) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
res = 0;
} else {
res = do_tune_cpucache(cachep, limit,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment