Commit 36555751 authored by Eduard - Gabriel Munteanu's avatar Eduard - Gabriel Munteanu Committed by Pekka Enberg

kmemtrace: SLAB hooks.

This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.

We also convert some inline functions to __always_inline to make sure
_RET_IP_, which expands to __builtin_return_address(0), always works
as expected.
Signed-off-by: default avatarEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent aa46a7e0
......@@ -14,6 +14,7 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
#include <linux/kmemtrace.h>
/* Size description struct for general caches. */
struct cache_sizes {
......@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
static inline void *kmalloc(size_t size, gfp_t flags)
#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return kmem_cache_alloc(cachep, flags);
}
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
return 0;
}
#endif
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *cachep;
void *ret;
if (__builtin_constant_p(size)) {
int i = 0;
......@@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
flags);
cachep = malloc_sizes[i].cs_dmacachep;
else
#endif
return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_notrace(cachep, flags);
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
size, slab_buffer_size(cachep), flags);
return ret;
}
return __kmalloc(size, flags);
}
......@@ -62,8 +88,25 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
#else
static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
{
return kmem_cache_alloc_node(cachep, flags, nodeid);
}
#endif
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *cachep;
void *ret;
if (__builtin_constant_p(size)) {
int i = 0;
......@@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
flags, node);
cachep = malloc_sizes[i].cs_dmacachep;
else
#endif
return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
cachep = malloc_sizes[i].cs_cachep;
ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
ret, size, slab_buffer_size(cachep),
flags, node);
return ret;
}
return __kmalloc_node(size, flags, node);
}
......
......@@ -112,6 +112,7 @@
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
#include <linux/kmemtrace.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
......@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
#ifdef CONFIG_KMEMTRACE
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->buffer_size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif
/*
* Do not go above this order unless 0 objects fit into the slab.
*/
......@@ -3613,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
obj_size(cachep), cachep->buffer_size, flags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
#endif
/**
* kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* @cachep: the cache we're checking against
......@@ -3661,23 +3683,47 @@ out:
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
void *ret = __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
obj_size(cachep), cachep->buffer_size,
flags, nodeid);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{
struct kmem_cache *cachep;
void *ret;
cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return kmem_cache_alloc_node(cachep, flags, node);
ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
(unsigned long) caller, ret,
size, cachep->buffer_size, flags, node);
return ret;
}
#ifdef CONFIG_DEBUG_SLAB
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
......@@ -3710,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
{
struct kmem_cache *cachep;
void *ret;
/* If you want to save a few bytes .text space: replace
* __ with kmem_.
......@@ -3719,11 +3766,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return __cache_alloc(cachep, flags, caller);
ret = __cache_alloc(cachep, flags, caller);
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
(unsigned long) caller, ret,
size, cachep->buffer_size, flags);
return ret;
}
#ifdef CONFIG_DEBUG_SLAB
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
......@@ -3762,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_obj_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);
kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
}
EXPORT_SYMBOL(kmem_cache_free);
......@@ -3788,6 +3843,8 @@ void kfree(const void *objp)
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
}
EXPORT_SYMBOL(kfree);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment