Commit 8b98c169 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

[PATCH] leak tracking for kmalloc_node

We have variants of kmalloc and kmem_cache_alloc that leave leak tracking to
the caller.  This is used for subsystem-specific allocators like skb_alloc.

To make skb_alloc node-aware we need similar routines for the node-aware slab
allocator, which this patch adds.

Note that the code is rather ugly, but it mirrors the non-node-aware code 1:1:

[akpm@osdl.org: add module export]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 881e4aab
...@@ -236,7 +236,25 @@ found: ...@@ -236,7 +236,25 @@ found:
} }
return __kmalloc_node(size, flags, node); return __kmalloc_node(size, flags, node);
} }
/*
* kmalloc_node_track_caller is a special version of kmalloc_node that
* records the calling function of the routine calling it for slab leak
* tracking instead of just the calling function (confusing, eh?).
* It's useful when the call to kmalloc_node comes from a widely-used
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
#ifndef CONFIG_DEBUG_SLAB
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node(size, flags, node)
#else #else
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
__builtin_return_address(0))
#endif
#else /* CONFIG_NUMA */
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
{ {
return kmem_cache_alloc(cachep, flags); return kmem_cache_alloc(cachep, flags);
...@@ -245,6 +263,9 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -245,6 +263,9 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{ {
return kmalloc(size, flags); return kmalloc(size, flags);
} }
#define kmalloc_node_track_caller(size, flags, node) \
kmalloc_track_caller(size, flags)
#endif #endif
extern int FASTCALL(kmem_cache_reap(int)); extern int FASTCALL(kmem_cache_reap(int));
...@@ -283,6 +304,8 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) ...@@ -283,6 +304,8 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
#define kzalloc(s, f) __kzalloc(s, f) #define kzalloc(s, f) __kzalloc(s, f)
#define kmalloc_track_caller kmalloc #define kmalloc_track_caller kmalloc
#define kmalloc_node_track_caller kmalloc_node
#endif /* CONFIG_SLOB */ #endif /* CONFIG_SLOB */
/* System wide caches */ /* System wide caches */
......
...@@ -1015,7 +1015,7 @@ static inline void *alternate_node_alloc(struct kmem_cache *cachep, ...@@ -1015,7 +1015,7 @@ static inline void *alternate_node_alloc(struct kmem_cache *cachep,
return NULL; return NULL;
} }
static inline void *__cache_alloc_node(struct kmem_cache *cachep, static inline void *____cache_alloc_node(struct kmem_cache *cachep,
gfp_t flags, int nodeid) gfp_t flags, int nodeid)
{ {
return NULL; return NULL;
...@@ -1023,7 +1023,7 @@ static inline void *__cache_alloc_node(struct kmem_cache *cachep, ...@@ -1023,7 +1023,7 @@ static inline void *__cache_alloc_node(struct kmem_cache *cachep,
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
static void *alternate_node_alloc(struct kmem_cache *, gfp_t); static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
static struct array_cache **alloc_alien_cache(int node, int limit) static struct array_cache **alloc_alien_cache(int node, int limit)
...@@ -3130,10 +3130,10 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep, ...@@ -3130,10 +3130,10 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
objp = ____cache_alloc(cachep, flags); objp = ____cache_alloc(cachep, flags);
/* /*
* We may just have run out of memory on the local node. * We may just have run out of memory on the local node.
* __cache_alloc_node() knows how to locate memory on other nodes * ____cache_alloc_node() knows how to locate memory on other nodes
*/ */
if (NUMA_BUILD && !objp) if (NUMA_BUILD && !objp)
objp = __cache_alloc_node(cachep, flags, numa_node_id()); objp = ____cache_alloc_node(cachep, flags, numa_node_id());
local_irq_restore(save_flags); local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, objp = cache_alloc_debugcheck_after(cachep, flags, objp,
caller); caller);
...@@ -3160,7 +3160,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) ...@@ -3160,7 +3160,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
else if (current->mempolicy) else if (current->mempolicy)
nid_alloc = slab_node(current->mempolicy); nid_alloc = slab_node(current->mempolicy);
if (nid_alloc != nid_here) if (nid_alloc != nid_here)
return __cache_alloc_node(cachep, flags, nid_alloc); return ____cache_alloc_node(cachep, flags, nid_alloc);
return NULL; return NULL;
} }
...@@ -3183,7 +3183,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ...@@ -3183,7 +3183,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
if (zone_idx(*z) <= ZONE_NORMAL && if (zone_idx(*z) <= ZONE_NORMAL &&
cpuset_zone_allowed(*z, flags) && cpuset_zone_allowed(*z, flags) &&
cache->nodelists[nid]) cache->nodelists[nid])
obj = __cache_alloc_node(cache, obj = ____cache_alloc_node(cache,
flags | __GFP_THISNODE, nid); flags | __GFP_THISNODE, nid);
} }
return obj; return obj;
...@@ -3192,7 +3192,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ...@@ -3192,7 +3192,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
/* /*
* A interface to enable slab creation on nodeid * A interface to enable slab creation on nodeid
*/ */
static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid) int nodeid)
{ {
struct list_head *entry; struct list_head *entry;
...@@ -3465,7 +3465,9 @@ out: ...@@ -3465,7 +3465,9 @@ out:
* New and improved: it will now make sure that the object gets * New and improved: it will now make sure that the object gets
* put on the correct node list so that there is no false sharing. * put on the correct node list so that there is no false sharing.
*/ */
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid, void *caller)
{ {
unsigned long save_flags; unsigned long save_flags;
void *ptr; void *ptr;
...@@ -3477,17 +3479,23 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -3477,17 +3479,23 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
!cachep->nodelists[nodeid]) !cachep->nodelists[nodeid])
ptr = ____cache_alloc(cachep, flags); ptr = ____cache_alloc(cachep, flags);
else else
ptr = __cache_alloc_node(cachep, flags, nodeid); ptr = ____cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags); local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
__builtin_return_address(0));
return ptr; return ptr;
} }
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
void *__kmalloc_node(size_t size, gfp_t flags, int node) static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
...@@ -3496,8 +3504,29 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -3496,8 +3504,29 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return NULL; return NULL;
return kmem_cache_alloc_node(cachep, flags, node); return kmem_cache_alloc_node(cachep, flags, node);
} }
#ifdef CONFIG_DEBUG_SLAB
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif
void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, void *caller)
{
return __do_kmalloc_node(size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node, NULL);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB */
#endif /* CONFIG_NUMA */
/** /**
* __do_kmalloc - allocate memory * __do_kmalloc - allocate memory
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment