Commit ff12059e authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

SLUB: this_cpu: Remove slub kmem_cache fields

Remove the fields in struct kmem_cache_cpu that were used to cache data from
struct kmem_cache when they were in different cachelines. The cacheline that
holds the per cpu array pointer now also holds these values. We can cut down
the struct kmem_cache_cpu size to almost half.

The get_freepointer() and set_freepointer() functions that used to be only
intended for the slow path now are also useful for the hot path since access
to the size field does not require accessing an additional cacheline anymore.
This results in consistent use of functions for setting the freepointer of
objects throughout SLUB.

Also we initialize all possible kmem_cache_cpu structures when a slab is
created. No need to initialize them when a processor or node comes online.
Signed-off-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent 756dee75
...@@ -38,8 +38,6 @@ struct kmem_cache_cpu { ...@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */ void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */ struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */ int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS]; unsigned stat[NR_SLUB_STAT_ITEMS];
#endif #endif
......
...@@ -260,13 +260,6 @@ static inline int check_valid_pointer(struct kmem_cache *s, ...@@ -260,13 +260,6 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1; return 1;
} }
/*
* Slow version of get and set free pointer.
*
* This version requires touching the cache lines of kmem_cache which
* we avoid to do in the fast alloc free paths. There we obtain the offset
* from the page struct.
*/
static inline void *get_freepointer(struct kmem_cache *s, void *object) static inline void *get_freepointer(struct kmem_cache *s, void *object)
{ {
return *(void **)(object + s->offset); return *(void **)(object + s->offset);
...@@ -1473,10 +1466,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) ...@@ -1473,10 +1466,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
/* Retrieve object from cpu_freelist */ /* Retrieve object from cpu_freelist */
object = c->freelist; object = c->freelist;
c->freelist = c->freelist[c->offset]; c->freelist = get_freepointer(s, c->freelist);
/* And put onto the regular freelist */ /* And put onto the regular freelist */
object[c->offset] = page->freelist; set_freepointer(s, object, page->freelist);
page->freelist = object; page->freelist = object;
page->inuse--; page->inuse--;
} }
...@@ -1635,7 +1628,7 @@ load_freelist: ...@@ -1635,7 +1628,7 @@ load_freelist:
if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
goto debug; goto debug;
c->freelist = object[c->offset]; c->freelist = get_freepointer(s, object);
c->page->inuse = c->page->objects; c->page->inuse = c->page->objects;
c->page->freelist = NULL; c->page->freelist = NULL;
c->node = page_to_nid(c->page); c->node = page_to_nid(c->page);
...@@ -1681,7 +1674,7 @@ debug: ...@@ -1681,7 +1674,7 @@ debug:
goto another_slab; goto another_slab;
c->page->inuse++; c->page->inuse++;
c->page->freelist = object[c->offset]; c->page->freelist = get_freepointer(s, object);
c->node = -1; c->node = -1;
goto unlock_out; goto unlock_out;
} }
...@@ -1702,7 +1695,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1702,7 +1695,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object; void **object;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
unsigned long flags; unsigned long flags;
unsigned long objsize;
gfpflags &= gfp_allowed_mask; gfpflags &= gfp_allowed_mask;
...@@ -1715,22 +1707,21 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1715,22 +1707,21 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_save(flags); local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab); c = __this_cpu_ptr(s->cpu_slab);
object = c->freelist; object = c->freelist;
objsize = c->objsize;
if (unlikely(!object || !node_match(c, node))) if (unlikely(!object || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c); object = __slab_alloc(s, gfpflags, node, addr, c);
else { else {
c->freelist = object[c->offset]; c->freelist = get_freepointer(s, object);
stat(c, ALLOC_FASTPATH); stat(c, ALLOC_FASTPATH);
} }
local_irq_restore(flags); local_irq_restore(flags);
if (unlikely(gfpflags & __GFP_ZERO) && object) if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, objsize); memset(object, 0, s->objsize);
kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
return object; return object;
} }
...@@ -1785,7 +1776,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); ...@@ -1785,7 +1776,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
* handling required then we can return immediately. * handling required then we can return immediately.
*/ */
static void __slab_free(struct kmem_cache *s, struct page *page, static void __slab_free(struct kmem_cache *s, struct page *page,
void *x, unsigned long addr, unsigned int offset) void *x, unsigned long addr)
{ {
void *prior; void *prior;
void **object = (void *)x; void **object = (void *)x;
...@@ -1799,7 +1790,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -1799,7 +1790,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
goto debug; goto debug;
checks_ok: checks_ok:
prior = object[offset] = page->freelist; prior = page->freelist;
set_freepointer(s, object, prior);
page->freelist = object; page->freelist = object;
page->inuse--; page->inuse--;
...@@ -1864,16 +1856,16 @@ static __always_inline void slab_free(struct kmem_cache *s, ...@@ -1864,16 +1856,16 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive(x, s->flags); kmemleak_free_recursive(x, s->flags);
local_irq_save(flags); local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab); c = __this_cpu_ptr(s->cpu_slab);
kmemcheck_slab_free(s, object, c->objsize); kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, c->objsize); debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS)) if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, c->objsize); debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) { if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist; set_freepointer(s, object, c->freelist);
c->freelist = object; c->freelist = object;
stat(c, FREE_FASTPATH); stat(c, FREE_FASTPATH);
} else } else
__slab_free(s, page, x, addr, c->offset); __slab_free(s, page, x, addr);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -2060,19 +2052,6 @@ static unsigned long calculate_alignment(unsigned long flags, ...@@ -2060,19 +2052,6 @@ static unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *)); return ALIGN(align, sizeof(void *));
} }
static void init_kmem_cache_cpu(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
c->page = NULL;
c->freelist = NULL;
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
#ifdef CONFIG_SLUB_STATS
memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
#endif
}
static void static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{ {
...@@ -2090,8 +2069,6 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]); ...@@ -2090,8 +2069,6 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{ {
int cpu;
if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/* /*
* Boot time creation of the kmalloc array. Use static per cpu data * Boot time creation of the kmalloc array. Use static per cpu data
...@@ -2104,8 +2081,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) ...@@ -2104,8 +2081,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
if (!s->cpu_slab) if (!s->cpu_slab)
return 0; return 0;
for_each_possible_cpu(cpu)
init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
return 1; return 1;
} }
...@@ -2391,6 +2366,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, ...@@ -2391,6 +2366,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
return 1; return 1;
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
error: error:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
...@@ -3247,22 +3223,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, ...@@ -3247,22 +3223,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock); down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
int cpu;
s->refcount++; s->refcount++;
/* /*
* Adjust the object sizes so that we clear * Adjust the object sizes so that we clear
* the complete object on kzalloc. * the complete object on kzalloc.
*/ */
s->objsize = max(s->objsize, (int)size); s->objsize = max(s->objsize, (int)size);
/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu(cpu)
per_cpu_ptr(s->cpu_slab, cpu)->objsize = s->objsize;
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock); up_write(&slub_lock);
...@@ -3316,14 +3282,6 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, ...@@ -3316,14 +3282,6 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long flags; unsigned long flags;
switch (action) { switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list)
init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
up_read(&slub_lock);
break;
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD: case CPU_DEAD:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment