Commit 77c5e2d0 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

slub: fix object tracking

Object tracking did not work the right way for several call chains. Fix this up
by adding a new parameter to slub_alloc and slub_free that specifies the
caller address explicitly.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b49af68f
...@@ -297,9 +297,6 @@ static void set_track(struct kmem_cache *s, void *object, ...@@ -297,9 +297,6 @@ static void set_track(struct kmem_cache *s, void *object,
memset(p, 0, sizeof(struct track)); memset(p, 0, sizeof(struct track));
} }
#define set_tracking(__s, __o, __a) set_track(__s, __o, __a, \
__builtin_return_address(0))
static void init_tracking(struct kmem_cache *s, void *object) static void init_tracking(struct kmem_cache *s, void *object)
{ {
if (s->flags & SLAB_STORE_USER) { if (s->flags & SLAB_STORE_USER) {
...@@ -1163,8 +1160,8 @@ static void flush_all(struct kmem_cache *s) ...@@ -1163,8 +1160,8 @@ static void flush_all(struct kmem_cache *s)
* Fastpath is not possible if we need to get a new slab or have * Fastpath is not possible if we need to get a new slab or have
* debugging enabled (which means all slabs are marked with PageError) * debugging enabled (which means all slabs are marked with PageError)
*/ */
static __always_inline void *slab_alloc(struct kmem_cache *s, static void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node) gfp_t gfpflags, int node, void *addr)
{ {
struct page *page; struct page *page;
void **object; void **object;
...@@ -1238,20 +1235,20 @@ debug: ...@@ -1238,20 +1235,20 @@ debug:
if (!alloc_object_checks(s, page, object)) if (!alloc_object_checks(s, page, object))
goto another_slab; goto another_slab;
if (s->flags & SLAB_STORE_USER) if (s->flags & SLAB_STORE_USER)
set_tracking(s, object, TRACK_ALLOC); set_track(s, object, TRACK_ALLOC, addr);
goto have_object; goto have_object;
} }
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
return slab_alloc(s, gfpflags, -1); return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
} }
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
return slab_alloc(s, gfpflags, node); return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif #endif
...@@ -1262,7 +1259,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); ...@@ -1262,7 +1259,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
* *
* No special cachelines need to be read * No special cachelines need to be read
*/ */
static void slab_free(struct kmem_cache *s, struct page *page, void *x) static void slab_free(struct kmem_cache *s, struct page *page,
void *x, void *addr)
{ {
void *prior; void *prior;
void **object = (void *)x; void **object = (void *)x;
...@@ -1314,20 +1312,20 @@ slab_empty: ...@@ -1314,20 +1312,20 @@ slab_empty:
return; return;
debug: debug:
if (free_object_checks(s, page, x)) if (!free_object_checks(s, page, x))
goto checks_ok;
goto out_unlock; goto out_unlock;
if (s->flags & SLAB_STORE_USER)
set_track(s, x, TRACK_FREE, addr);
goto checks_ok;
} }
void kmem_cache_free(struct kmem_cache *s, void *x) void kmem_cache_free(struct kmem_cache *s, void *x)
{ {
struct page * page; struct page *page;
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) slab_free(s, page, x, __builtin_return_address(0));
set_tracking(s, x, TRACK_FREE);
slab_free(s, page, x);
} }
EXPORT_SYMBOL(kmem_cache_free); EXPORT_SYMBOL(kmem_cache_free);
...@@ -2018,7 +2016,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2018,7 +2016,7 @@ void *__kmalloc(size_t size, gfp_t flags)
struct kmem_cache *s = get_slab(size, flags); struct kmem_cache *s = get_slab(size, flags);
if (s) if (s)
return kmem_cache_alloc(s, flags); return slab_alloc(s, flags, -1, __builtin_return_address(0));
return NULL; return NULL;
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
...@@ -2029,7 +2027,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -2029,7 +2027,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s = get_slab(size, flags); struct kmem_cache *s = get_slab(size, flags);
if (s) if (s)
return kmem_cache_alloc_node(s, flags, node); return slab_alloc(s, flags, node, __builtin_return_address(0));
return NULL; return NULL;
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
...@@ -2075,12 +2073,9 @@ void kfree(const void *x) ...@@ -2075,12 +2073,9 @@ void kfree(const void *x)
return; return;
page = virt_to_head_page(x); page = virt_to_head_page(x);
s = page->slab; s = page->slab;
if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) slab_free(s, page, (void *)x, __builtin_return_address(0));
set_tracking(s, (void *)x, TRACK_FREE);
slab_free(s, page, (void *)x);
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
...@@ -2289,7 +2284,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags) ...@@ -2289,7 +2284,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
{ {
void *x; void *x;
x = kmem_cache_alloc(s, flags); x = slab_alloc(s, flags, -1, __builtin_return_address(0));
if (x) if (x)
memset(x, 0, s->objsize); memset(x, 0, s->objsize);
return x; return x;
...@@ -2497,34 +2492,22 @@ static void resiliency_test(void) {}; ...@@ -2497,34 +2492,22 @@ static void resiliency_test(void) {};
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{ {
struct kmem_cache *s = get_slab(size, gfpflags); struct kmem_cache *s = get_slab(size, gfpflags);
void *object;
if (!s) if (!s)
return NULL; return NULL;
object = kmem_cache_alloc(s, gfpflags); return slab_alloc(s, gfpflags, -1, caller);
if (object && (s->flags & SLAB_STORE_USER))
set_track(s, object, TRACK_ALLOC, caller);
return object;
} }
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, void *caller) int node, void *caller)
{ {
struct kmem_cache *s = get_slab(size, gfpflags); struct kmem_cache *s = get_slab(size, gfpflags);
void *object;
if (!s) if (!s)
return NULL; return NULL;
object = kmem_cache_alloc_node(s, gfpflags, node); return slab_alloc(s, gfpflags, node, caller);
if (object && (s->flags & SLAB_STORE_USER))
set_track(s, object, TRACK_ALLOC, caller);
return object;
} }
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment