Commit 343e0d7a authored by Pekka Enberg's avatar Pekka Enberg Committed by Linus Torvalds

[PATCH] slab: replace kmem_cache_t with struct kmem_cache

Replace uses of kmem_cache_t with proper struct kmem_cache in mm/slab.c.
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9a2dba4b
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
* *
* SMP synchronization: * SMP synchronization:
* constructors and destructors are called without any locking. * constructors and destructors are called without any locking.
* Several members in kmem_cache_t and struct slab never change, they * Several members in struct kmem_cache and struct slab never change, they
* are accessed without any locking. * are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking, * The per-cpu arrays are never accessed from the wrong cpu, no locking,
* and local interrupts are disabled so slab code is preempt-safe. * and local interrupts are disabled so slab code is preempt-safe.
...@@ -244,7 +244,7 @@ struct slab { ...@@ -244,7 +244,7 @@ struct slab {
*/ */
struct slab_rcu { struct slab_rcu {
struct rcu_head head; struct rcu_head head;
kmem_cache_t *cachep; struct kmem_cache *cachep;
void *addr; void *addr;
}; };
...@@ -363,7 +363,7 @@ static void kmem_list3_init(struct kmem_list3 *parent) ...@@ -363,7 +363,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
} while (0) } while (0)
/* /*
* kmem_cache_t * struct kmem_cache
* *
* manages a cache. * manages a cache.
*/ */
...@@ -391,15 +391,15 @@ struct kmem_cache { ...@@ -391,15 +391,15 @@ struct kmem_cache {
size_t colour; /* cache colouring range */ size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
unsigned int colour_next; /* cache colouring */ unsigned int colour_next; /* cache colouring */
kmem_cache_t *slabp_cache; struct kmem_cache *slabp_cache;
unsigned int slab_size; unsigned int slab_size;
unsigned int dflags; /* dynamic flags */ unsigned int dflags; /* dynamic flags */
/* constructor func */ /* constructor func */
void (*ctor) (void *, kmem_cache_t *, unsigned long); void (*ctor) (void *, struct kmem_cache *, unsigned long);
/* de-constructor func */ /* de-constructor func */
void (*dtor) (void *, kmem_cache_t *, unsigned long); void (*dtor) (void *, struct kmem_cache *, unsigned long);
/* 4) cache creation/removal */ /* 4) cache creation/removal */
const char *name; const char *name;
...@@ -509,23 +509,23 @@ struct kmem_cache { ...@@ -509,23 +509,23 @@ struct kmem_cache {
* cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
*/ */
static int obj_offset(kmem_cache_t *cachep) static int obj_offset(struct kmem_cache *cachep)
{ {
return cachep->obj_offset; return cachep->obj_offset;
} }
static int obj_size(kmem_cache_t *cachep) static int obj_size(struct kmem_cache *cachep)
{ {
return cachep->obj_size; return cachep->obj_size;
} }
static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{ {
BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
} }
static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{ {
BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
...@@ -534,7 +534,7 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) ...@@ -534,7 +534,7 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
} }
static void **dbg_userword(kmem_cache_t *cachep, void *objp) static void **dbg_userword(struct kmem_cache *cachep, void *objp)
{ {
BUG_ON(!(cachep->flags & SLAB_STORE_USER)); BUG_ON(!(cachep->flags & SLAB_STORE_USER));
return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
...@@ -636,16 +636,16 @@ static struct arraycache_init initarray_generic = ...@@ -636,16 +636,16 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */ /* internal cache of cache description objs */
static kmem_cache_t cache_cache = { static struct kmem_cache cache_cache = {
.batchcount = 1, .batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES, .limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1, .shared = 1,
.buffer_size = sizeof(kmem_cache_t), .buffer_size = sizeof(struct kmem_cache),
.flags = SLAB_NO_REAP, .flags = SLAB_NO_REAP,
.spinlock = SPIN_LOCK_UNLOCKED, .spinlock = SPIN_LOCK_UNLOCKED,
.name = "kmem_cache", .name = "kmem_cache",
#if DEBUG #if DEBUG
.obj_size = sizeof(kmem_cache_t), .obj_size = sizeof(struct kmem_cache),
#endif #endif
}; };
...@@ -674,17 +674,17 @@ static enum { ...@@ -674,17 +674,17 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work); static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node); static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
static void enable_cpucache(kmem_cache_t *cachep); static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused); static void cache_reap(void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node); static int __node_shrink(struct kmem_cache *cachep, int node);
static inline struct array_cache *cpu_cache_get(kmem_cache_t *cachep) static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{ {
return cachep->array[smp_processor_id()]; return cachep->array[smp_processor_id()];
} }
static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
{ {
struct cache_sizes *csizep = malloc_sizes; struct cache_sizes *csizep = malloc_sizes;
...@@ -708,7 +708,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) ...@@ -708,7 +708,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
return csizep->cs_cachep; return csizep->cs_cachep;
} }
kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{ {
return __find_general_cachep(size, gfpflags); return __find_general_cachep(size, gfpflags);
} }
...@@ -781,7 +781,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -781,7 +781,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
{ {
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg); function, cachep->name, msg);
...@@ -828,7 +828,7 @@ static struct array_cache *alloc_arraycache(int node, int entries, ...@@ -828,7 +828,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int); static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
static struct array_cache **alloc_alien_cache(int node, int limit) static struct array_cache **alloc_alien_cache(int node, int limit)
{ {
...@@ -870,7 +870,7 @@ static void free_alien_cache(struct array_cache **ac_ptr) ...@@ -870,7 +870,7 @@ static void free_alien_cache(struct array_cache **ac_ptr)
kfree(ac_ptr); kfree(ac_ptr);
} }
static void __drain_alien_cache(kmem_cache_t *cachep, static void __drain_alien_cache(struct kmem_cache *cachep,
struct array_cache *ac, int node) struct array_cache *ac, int node)
{ {
struct kmem_list3 *rl3 = cachep->nodelists[node]; struct kmem_list3 *rl3 = cachep->nodelists[node];
...@@ -883,7 +883,7 @@ static void __drain_alien_cache(kmem_cache_t *cachep, ...@@ -883,7 +883,7 @@ static void __drain_alien_cache(kmem_cache_t *cachep,
} }
} }
static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
{ {
int i = 0; int i = 0;
struct array_cache *ac; struct array_cache *ac;
...@@ -908,7 +908,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -908,7 +908,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
long cpu = (long)hcpu; long cpu = (long)hcpu;
kmem_cache_t *cachep; struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL; struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu); int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3); int memsize = sizeof(struct kmem_list3);
...@@ -1046,7 +1046,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; ...@@ -1046,7 +1046,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/* /*
* swap the static kmem_list3 with kmalloced memory * swap the static kmem_list3 with kmalloced memory
*/ */
static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid) static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
{ {
struct kmem_list3 *ptr; struct kmem_list3 *ptr;
...@@ -1086,14 +1086,14 @@ void __init kmem_cache_init(void) ...@@ -1086,14 +1086,14 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated /* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet: * from caches that do not exist yet:
* 1) initialize the cache_cache cache: it contains the kmem_cache_t * 1) initialize the cache_cache cache: it contains the struct kmem_cache
* structures of all caches, except cache_cache itself: cache_cache * structures of all caches, except cache_cache itself: cache_cache
* is statically allocated. * is statically allocated.
* Initially an __init data area is used for the head array and the * Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated * kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap. * array at the end of the bootstrap.
* 2) Create the first kmalloc cache. * 2) Create the first kmalloc cache.
* The kmem_cache_t for the new cache is allocated normally. * The struct kmem_cache for the new cache is allocated normally.
* An __init data area is used for the head array. * An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized * 3) Create the remaining kmalloc caches, with minimally sized
* head arrays. * head arrays.
...@@ -1224,7 +1224,7 @@ void __init kmem_cache_init(void) ...@@ -1224,7 +1224,7 @@ void __init kmem_cache_init(void)
/* 6) resize the head arrays to their final sizes */ /* 6) resize the head arrays to their final sizes */
{ {
kmem_cache_t *cachep; struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep); enable_cpucache(cachep);
...@@ -1267,7 +1267,7 @@ __initcall(cpucache_init); ...@@ -1267,7 +1267,7 @@ __initcall(cpucache_init);
* did not request dmaable memory, we might get it, but that * did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable. * would be relatively rare and ignorable.
*/ */
static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
struct page *page; struct page *page;
void *addr; void *addr;
...@@ -1293,7 +1293,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -1293,7 +1293,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/* /*
* Interface to system's page release. * Interface to system's page release.
*/ */
static void kmem_freepages(kmem_cache_t *cachep, void *addr) static void kmem_freepages(struct kmem_cache *cachep, void *addr)
{ {
unsigned long i = (1 << cachep->gfporder); unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr); struct page *page = virt_to_page(addr);
...@@ -1315,7 +1315,7 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr) ...@@ -1315,7 +1315,7 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
static void kmem_rcu_free(struct rcu_head *head) static void kmem_rcu_free(struct rcu_head *head)
{ {
struct slab_rcu *slab_rcu = (struct slab_rcu *)head; struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
kmem_cache_t *cachep = slab_rcu->cachep; struct kmem_cache *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr); kmem_freepages(cachep, slab_rcu->addr);
if (OFF_SLAB(cachep)) if (OFF_SLAB(cachep))
...@@ -1325,7 +1325,7 @@ static void kmem_rcu_free(struct rcu_head *head) ...@@ -1325,7 +1325,7 @@ static void kmem_rcu_free(struct rcu_head *head)
#if DEBUG #if DEBUG
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller) unsigned long caller)
{ {
int size = obj_size(cachep); int size = obj_size(cachep);
...@@ -1358,7 +1358,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, ...@@ -1358,7 +1358,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
} }
#endif #endif
static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
{ {
int size = obj_size(cachep); int size = obj_size(cachep);
addr = &((char *)addr)[obj_offset(cachep)]; addr = &((char *)addr)[obj_offset(cachep)];
...@@ -1380,7 +1380,7 @@ static void dump_line(char *data, int offset, int limit) ...@@ -1380,7 +1380,7 @@ static void dump_line(char *data, int offset, int limit)
#if DEBUG #if DEBUG
static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
{ {
int i, size; int i, size;
char *realobj; char *realobj;
...@@ -1409,7 +1409,7 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) ...@@ -1409,7 +1409,7 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
} }
} }
static void check_poison_obj(kmem_cache_t *cachep, void *objp) static void check_poison_obj(struct kmem_cache *cachep, void *objp)
{ {
char *realobj; char *realobj;
int size, i; int size, i;
...@@ -1476,7 +1476,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) ...@@ -1476,7 +1476,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
* slab_destroy_objs - call the registered destructor for each object in * slab_destroy_objs - call the registered destructor for each object in
* a slab that is to be destroyed. * a slab that is to be destroyed.
*/ */
static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{ {
int i; int i;
for (i = 0; i < cachep->num; i++) { for (i = 0; i < cachep->num; i++) {
...@@ -1508,7 +1508,7 @@ static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) ...@@ -1508,7 +1508,7 @@ static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
} }
} }
#else #else
static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{ {
if (cachep->dtor) { if (cachep->dtor) {
int i; int i;
...@@ -1525,7 +1525,7 @@ static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) ...@@ -1525,7 +1525,7 @@ static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp)
* Before calling the slab must have been unlinked from the cache. * Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed. * The cache-lock is not held/needed.
*/ */
static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{ {
void *addr = slabp->s_mem - slabp->colouroff; void *addr = slabp->s_mem - slabp->colouroff;
...@@ -1546,7 +1546,7 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) ...@@ -1546,7 +1546,7 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
/* For setting up all the kmem_list3s for cache whose buffer_size is same /* For setting up all the kmem_list3s for cache whose buffer_size is same
as size of kmem_list3. */ as size of kmem_list3. */
static void set_up_list3s(kmem_cache_t *cachep, int index) static void set_up_list3s(struct kmem_cache *cachep, int index)
{ {
int node; int node;
...@@ -1566,7 +1566,7 @@ static void set_up_list3s(kmem_cache_t *cachep, int index) ...@@ -1566,7 +1566,7 @@ static void set_up_list3s(kmem_cache_t *cachep, int index)
* high order pages for slabs. When the gfp() functions are more friendly * high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed. * towards high-order requests, this should be changed.
*/ */
static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size,
size_t align, gfp_t flags) size_t align, gfp_t flags)
{ {
size_t left_over = 0; size_t left_over = 0;
...@@ -1638,13 +1638,13 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, ...@@ -1638,13 +1638,13 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
* cacheline. This can be beneficial if you're counting cycles as closely * cacheline. This can be beneficial if you're counting cycles as closely
* as davem. * as davem.
*/ */
kmem_cache_t * struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align, kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
void (*dtor)(void*, kmem_cache_t *, unsigned long)) void (*dtor)(void*, struct kmem_cache *, unsigned long))
{ {
size_t left_over, slab_size, ralign; size_t left_over, slab_size, ralign;
kmem_cache_t *cachep = NULL; struct kmem_cache *cachep = NULL;
struct list_head *p; struct list_head *p;
/* /*
...@@ -1662,7 +1662,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1662,7 +1662,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) { list_for_each(p, &cache_chain) {
kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
mm_segment_t old_fs = get_fs(); mm_segment_t old_fs = get_fs();
char tmp; char tmp;
int res; int res;
...@@ -1762,10 +1762,10 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1762,10 +1762,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
align = ralign; align = ralign;
/* Get cache's description obj. */ /* Get cache's description obj. */
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
if (!cachep) if (!cachep)
goto oops; goto oops;
memset(cachep, 0, sizeof(kmem_cache_t)); memset(cachep, 0, sizeof(struct kmem_cache));
#if DEBUG #if DEBUG
cachep->obj_size = size; cachep->obj_size = size;
...@@ -1941,7 +1941,7 @@ static void check_irq_on(void) ...@@ -1941,7 +1941,7 @@ static void check_irq_on(void)
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
} }
static void check_spinlock_acquired(kmem_cache_t *cachep) static void check_spinlock_acquired(struct kmem_cache *cachep)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
check_irq_off(); check_irq_off();
...@@ -1949,7 +1949,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep) ...@@ -1949,7 +1949,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep)
#endif #endif
} }
static void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
check_irq_off(); check_irq_off();
...@@ -1982,12 +1982,12 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg) ...@@ -1982,12 +1982,12 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
preempt_enable(); preempt_enable();
} }
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node); int force, int node);
static void do_drain(void *arg) static void do_drain(void *arg)
{ {
kmem_cache_t *cachep = (kmem_cache_t *) arg; struct kmem_cache *cachep = (struct kmem_cache *) arg;
struct array_cache *ac; struct array_cache *ac;
int node = numa_node_id(); int node = numa_node_id();
...@@ -1999,7 +1999,7 @@ static void do_drain(void *arg) ...@@ -1999,7 +1999,7 @@ static void do_drain(void *arg)
ac->avail = 0; ac->avail = 0;
} }
static void drain_cpu_caches(kmem_cache_t *cachep) static void drain_cpu_caches(struct kmem_cache *cachep)
{ {
struct kmem_list3 *l3; struct kmem_list3 *l3;
int node; int node;
...@@ -2020,7 +2020,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep) ...@@ -2020,7 +2020,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
spin_unlock_irq(&cachep->spinlock); spin_unlock_irq(&cachep->spinlock);
} }
static int __node_shrink(kmem_cache_t *cachep, int node) static int __node_shrink(struct kmem_cache *cachep, int node)
{ {
struct slab *slabp; struct slab *slabp;
struct kmem_list3 *l3 = cachep->nodelists[node]; struct kmem_list3 *l3 = cachep->nodelists[node];
...@@ -2049,7 +2049,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node) ...@@ -2049,7 +2049,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
return ret; return ret;
} }
static int __cache_shrink(kmem_cache_t *cachep) static int __cache_shrink(struct kmem_cache *cachep)
{ {
int ret = 0, i = 0; int ret = 0, i = 0;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2075,7 +2075,7 @@ static int __cache_shrink(kmem_cache_t *cachep) ...@@ -2075,7 +2075,7 @@ static int __cache_shrink(kmem_cache_t *cachep)
* Releases as many slabs as possible for a cache. * Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released. * To help debugging, a zero exit status indicates all slabs were released.
*/ */
int kmem_cache_shrink(kmem_cache_t *cachep) int kmem_cache_shrink(struct kmem_cache *cachep)
{ {
if (!cachep || in_interrupt()) if (!cachep || in_interrupt())
BUG(); BUG();
...@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); ...@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* kmem_cache_destroy - delete a cache * kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy * @cachep: the cache to destroy
* *
* Remove a kmem_cache_t object from the slab cache. * Remove a struct kmem_cache object from the slab cache.
* Returns 0 on success. * Returns 0 on success.
* *
* It is expected this function will be called by a module when it is * It is expected this function will be called by a module when it is
...@@ -2101,7 +2101,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); ...@@ -2101,7 +2101,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* The caller must guarantee that noone will allocate memory from the cache * The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy(). * during the kmem_cache_destroy().
*/ */
int kmem_cache_destroy(kmem_cache_t *cachep) int kmem_cache_destroy(struct kmem_cache *cachep)
{ {
int i; int i;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2152,7 +2152,7 @@ int kmem_cache_destroy(kmem_cache_t *cachep) ...@@ -2152,7 +2152,7 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */ /* Get the memory for a slab management obj. */
static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp, static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
int colour_off, gfp_t local_flags) int colour_off, gfp_t local_flags)
{ {
struct slab *slabp; struct slab *slabp;
...@@ -2178,7 +2178,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) ...@@ -2178,7 +2178,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
return (kmem_bufctl_t *) (slabp + 1); return (kmem_bufctl_t *) (slabp + 1);
} }
static void cache_init_objs(kmem_cache_t *cachep, static void cache_init_objs(struct kmem_cache *cachep,
struct slab *slabp, unsigned long ctor_flags) struct slab *slabp, unsigned long ctor_flags)
{ {
int i; int i;
...@@ -2227,7 +2227,7 @@ static void cache_init_objs(kmem_cache_t *cachep, ...@@ -2227,7 +2227,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
slabp->free = 0; slabp->free = 0;
} }
static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{ {
if (flags & SLAB_DMA) { if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA)) if (!(cachep->gfpflags & GFP_DMA))
...@@ -2238,7 +2238,7 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) ...@@ -2238,7 +2238,7 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
} }
} }
static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid) static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
{ {
void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
kmem_bufctl_t next; kmem_bufctl_t next;
...@@ -2254,7 +2254,7 @@ static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid) ...@@ -2254,7 +2254,7 @@ static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid)
return objp; return objp;
} }
static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp, static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
int nodeid) int nodeid)
{ {
unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
...@@ -2274,7 +2274,7 @@ static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp, ...@@ -2274,7 +2274,7 @@ static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp,
slabp->inuse--; slabp->inuse--;
} }
static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
{ {
int i; int i;
struct page *page; struct page *page;
...@@ -2293,7 +2293,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) ...@@ -2293,7 +2293,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
* Grow (by 1) the number of slabs within a cache. This is called by * Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache. * kmem_cache_alloc() when there are no active objs left in a cache.
*/ */
static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
struct slab *slabp; struct slab *slabp;
void *objp; void *objp;
...@@ -2404,7 +2404,7 @@ static void kfree_debugcheck(const void *objp) ...@@ -2404,7 +2404,7 @@ static void kfree_debugcheck(const void *objp)
} }
} }
static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller) void *caller)
{ {
struct page *page; struct page *page;
...@@ -2478,7 +2478,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, ...@@ -2478,7 +2478,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
return objp; return objp;
} }
static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
{ {
kmem_bufctl_t i; kmem_bufctl_t i;
int entries = 0; int entries = 0;
...@@ -2511,7 +2511,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) ...@@ -2511,7 +2511,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
#define check_slabp(x,y) do { } while(0) #define check_slabp(x,y) do { } while(0)
#endif #endif
static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
{ {
int batchcount; int batchcount;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2602,7 +2602,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) ...@@ -2602,7 +2602,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
} }
static inline void static inline void
cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
{ {
might_sleep_if(flags & __GFP_WAIT); might_sleep_if(flags & __GFP_WAIT);
#if DEBUG #if DEBUG
...@@ -2611,7 +2611,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) ...@@ -2611,7 +2611,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
} }
#if DEBUG #if DEBUG
static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
void *objp, void *caller) void *objp, void *caller)
{ {
if (!objp) if (!objp)
...@@ -2660,7 +2660,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, ...@@ -2660,7 +2660,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif #endif
static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
void *objp; void *objp;
struct array_cache *ac; struct array_cache *ac;
...@@ -2687,7 +2687,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) ...@@ -2687,7 +2687,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
return objp; return objp;
} }
static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
unsigned long save_flags; unsigned long save_flags;
void *objp; void *objp;
...@@ -2707,7 +2707,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) ...@@ -2707,7 +2707,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
/* /*
* A interface to enable slab creation on nodeid * A interface to enable slab creation on nodeid
*/ */
static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct slab *slabp;
...@@ -2769,7 +2769,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) ...@@ -2769,7 +2769,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/* /*
* Caller needs to acquire correct kmem_list's list_lock * Caller needs to acquire correct kmem_list's list_lock
*/ */
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node) int node)
{ {
int i; int i;
...@@ -2807,7 +2807,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, ...@@ -2807,7 +2807,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
} }
} }
static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{ {
int batchcount; int batchcount;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -2866,7 +2866,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) ...@@ -2866,7 +2866,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
* *
* Called with disabled ints. * Called with disabled ints.
*/ */
static inline void __cache_free(kmem_cache_t *cachep, void *objp) static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{ {
struct array_cache *ac = cpu_cache_get(cachep); struct array_cache *ac = cpu_cache_get(cachep);
...@@ -2925,7 +2925,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) ...@@ -2925,7 +2925,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
* Allocate an object from this cache. The flags are only relevant * Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects. * if the cache has no available objects.
*/ */
void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{ {
return __cache_alloc(cachep, flags); return __cache_alloc(cachep, flags);
} }
...@@ -2945,7 +2945,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); ...@@ -2945,7 +2945,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
* *
* Currently only used for dentry validation. * Currently only used for dentry validation.
*/ */
int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
{ {
unsigned long addr = (unsigned long)ptr; unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET; unsigned long min_addr = PAGE_OFFSET;
...@@ -2986,7 +2986,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) ...@@ -2986,7 +2986,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
* New and improved: it will now make sure that the object gets * New and improved: it will now make sure that the object gets
* put on the correct node list so that there is no false sharing. * put on the correct node list so that there is no false sharing.
*/ */
void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{ {
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
...@@ -3010,7 +3010,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); ...@@ -3010,7 +3010,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
void *kmalloc_node(size_t size, gfp_t flags, int node) void *kmalloc_node(size_t size, gfp_t flags, int node)
{ {
kmem_cache_t *cachep; struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags); cachep = kmem_find_general_cachep(size, flags);
if (unlikely(cachep == NULL)) if (unlikely(cachep == NULL))
...@@ -3043,7 +3043,7 @@ EXPORT_SYMBOL(kmalloc_node); ...@@ -3043,7 +3043,7 @@ EXPORT_SYMBOL(kmalloc_node);
*/ */
void *__kmalloc(size_t size, gfp_t flags) void *__kmalloc(size_t size, gfp_t flags)
{ {
kmem_cache_t *cachep; struct kmem_cache *cachep;
/* If you want to save a few bytes .text space: replace /* If you want to save a few bytes .text space: replace
* __ with kmem_. * __ with kmem_.
...@@ -3114,7 +3114,7 @@ EXPORT_SYMBOL(__alloc_percpu); ...@@ -3114,7 +3114,7 @@ EXPORT_SYMBOL(__alloc_percpu);
* Free an object which was previously allocated from this * Free an object which was previously allocated from this
* cache. * cache.
*/ */
void kmem_cache_free(kmem_cache_t *cachep, void *objp) void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{ {
unsigned long flags; unsigned long flags;
...@@ -3135,7 +3135,7 @@ EXPORT_SYMBOL(kmem_cache_free); ...@@ -3135,7 +3135,7 @@ EXPORT_SYMBOL(kmem_cache_free);
*/ */
void kfree(const void *objp) void kfree(const void *objp)
{ {
kmem_cache_t *c; struct kmem_cache *c;
unsigned long flags; unsigned long flags;
if (unlikely(!objp)) if (unlikely(!objp))
...@@ -3172,13 +3172,13 @@ void free_percpu(const void *objp) ...@@ -3172,13 +3172,13 @@ void free_percpu(const void *objp)
EXPORT_SYMBOL(free_percpu); EXPORT_SYMBOL(free_percpu);
#endif #endif
unsigned int kmem_cache_size(kmem_cache_t *cachep) unsigned int kmem_cache_size(struct kmem_cache *cachep)
{ {
return obj_size(cachep); return obj_size(cachep);
} }
EXPORT_SYMBOL(kmem_cache_size); EXPORT_SYMBOL(kmem_cache_size);
const char *kmem_cache_name(kmem_cache_t *cachep) const char *kmem_cache_name(struct kmem_cache *cachep)
{ {
return cachep->name; return cachep->name;
} }
...@@ -3187,7 +3187,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); ...@@ -3187,7 +3187,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
/* /*
* This initializes kmem_list3 for all nodes. * This initializes kmem_list3 for all nodes.
*/ */
static int alloc_kmemlist(kmem_cache_t *cachep) static int alloc_kmemlist(struct kmem_cache *cachep)
{ {
int node; int node;
struct kmem_list3 *l3; struct kmem_list3 *l3;
...@@ -3243,7 +3243,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep) ...@@ -3243,7 +3243,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
} }
struct ccupdate_struct { struct ccupdate_struct {
kmem_cache_t *cachep; struct kmem_cache *cachep;
struct array_cache *new[NR_CPUS]; struct array_cache *new[NR_CPUS];
}; };
...@@ -3259,7 +3259,7 @@ static void do_ccupdate_local(void *info) ...@@ -3259,7 +3259,7 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old; new->new[smp_processor_id()] = old;
} }
static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
int shared) int shared)
{ {
struct ccupdate_struct new; struct ccupdate_struct new;
...@@ -3305,7 +3305,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, ...@@ -3305,7 +3305,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
return 0; return 0;
} }
static void enable_cpucache(kmem_cache_t *cachep) static void enable_cpucache(struct kmem_cache *cachep)
{ {
int err; int err;
int limit, shared; int limit, shared;
...@@ -3357,7 +3357,7 @@ static void enable_cpucache(kmem_cache_t *cachep) ...@@ -3357,7 +3357,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
cachep->name, -err); cachep->name, -err);
} }
static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node) int force, int node)
{ {
int tofree; int tofree;
...@@ -3402,12 +3402,12 @@ static void cache_reap(void *unused) ...@@ -3402,12 +3402,12 @@ static void cache_reap(void *unused)
} }
list_for_each(walk, &cache_chain) { list_for_each(walk, &cache_chain) {
kmem_cache_t *searchp; struct kmem_cache *searchp;
struct list_head *p; struct list_head *p;
int tofree; int tofree;
struct slab *slabp; struct slab *slabp;
searchp = list_entry(walk, kmem_cache_t, next); searchp = list_entry(walk, struct kmem_cache, next);
if (searchp->flags & SLAB_NO_REAP) if (searchp->flags & SLAB_NO_REAP)
goto next; goto next;
...@@ -3510,15 +3510,15 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -3510,15 +3510,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
if (p == &cache_chain) if (p == &cache_chain)
return NULL; return NULL;
} }
return list_entry(p, kmem_cache_t, next); return list_entry(p, struct kmem_cache, next);
} }
static void *s_next(struct seq_file *m, void *p, loff_t *pos) static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{ {
kmem_cache_t *cachep = p; struct kmem_cache *cachep = p;
++*pos; ++*pos;
return cachep->next.next == &cache_chain ? NULL return cachep->next.next == &cache_chain ? NULL
: list_entry(cachep->next.next, kmem_cache_t, next); : list_entry(cachep->next.next, struct kmem_cache, next);
} }
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
...@@ -3528,7 +3528,7 @@ static void s_stop(struct seq_file *m, void *p) ...@@ -3528,7 +3528,7 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p)
{ {
kmem_cache_t *cachep = p; struct kmem_cache *cachep = p;
struct list_head *q; struct list_head *q;
struct slab *slabp; struct slab *slabp;
unsigned long active_objs; unsigned long active_objs;
...@@ -3678,7 +3678,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, ...@@ -3678,7 +3678,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
res = -EINVAL; res = -EINVAL;
list_for_each(p, &cache_chain) { list_for_each(p, &cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
next);
if (!strcmp(cachep->name, kbuf)) { if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || if (limit < 1 ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment