kmemtrace: SLUB hooks.
This adds hooks for the SLUB allocator, to allow tracing with kmemtrace. Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
3eae2cb24a
commit
5b882be4e0
2 changed files with 109 additions and 9 deletions
|
@ -10,6 +10,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
|
||||
enum stat_item {
|
||||
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
||||
|
@ -204,13 +205,31 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
|
|||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
||||
void *__kmalloc(size_t size, gfp_t flags);
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
|
||||
{
|
||||
return kmem_cache_alloc(s, gfpflags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
||||
{
|
||||
return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
|
||||
unsigned int order = get_order(size);
|
||||
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
|
||||
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
|
||||
size, PAGE_SIZE << order, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (__builtin_constant_p(size)) {
|
||||
if (size > PAGE_SIZE)
|
||||
return kmalloc_large(size, flags);
|
||||
|
@ -221,7 +240,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|||
if (!s)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
return kmem_cache_alloc(s, flags);
|
||||
ret = kmem_cache_alloc_notrace(s, flags);
|
||||
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
|
||||
_THIS_IP_, ret,
|
||||
size, s->size, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return __kmalloc(size, flags);
|
||||
|
@ -231,8 +256,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node);
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node)
|
||||
{
|
||||
return kmem_cache_alloc_node(s, gfpflags, node);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (__builtin_constant_p(size) &&
|
||||
size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
|
||||
struct kmem_cache *s = kmalloc_slab(size);
|
||||
|
@ -240,7 +281,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|||
if (!s)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
return kmem_cache_alloc_node(s, flags, node);
|
||||
ret = kmem_cache_alloc_node_notrace(s, flags, node);
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||
_THIS_IP_, ret,
|
||||
size, s->size, flags, node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
return __kmalloc_node(size, flags, node);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue