Let's use generic slab_start/next/stop for showing memcg caches info. In contrast to the current implementation, this will work even if all memcg caches' info doesn't fit into a seq buffer (a page), plus it simply looks neater. Actually, the main reason I do this isn't mere cleanup. I'm going to zap the memcg_slab_caches list, because I find it useless provided we have the slab_caches list, and this patch is a step in this direction. It should be noted that before this patch an attempt to read memory.kmem.slabinfo of a cgroup that doesn't have kmem limit set resulted in -EIO, while after this patch it will silently show nothing except the header, but I don't think it will frustrate anyone. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			1064 lines
		
	
	
	
		
			24 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			1064 lines
		
	
	
	
		
			24 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Slab allocator functions that are independent of the allocator strategy
 | 
						|
 *
 | 
						|
 * (C) 2012 Christoph Lameter <cl@linux.com>
 | 
						|
 */
 | 
						|
#include <linux/slab.h>
 | 
						|
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/poison.h>
 | 
						|
#include <linux/interrupt.h>
 | 
						|
#include <linux/memory.h>
 | 
						|
#include <linux/compiler.h>
 | 
						|
#include <linux/module.h>
 | 
						|
#include <linux/cpu.h>
 | 
						|
#include <linux/uaccess.h>
 | 
						|
#include <linux/seq_file.h>
 | 
						|
#include <linux/proc_fs.h>
 | 
						|
#include <asm/cacheflush.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
#include <asm/page.h>
 | 
						|
#include <linux/memcontrol.h>
 | 
						|
 | 
						|
#define CREATE_TRACE_POINTS
 | 
						|
#include <trace/events/kmem.h>
 | 
						|
 | 
						|
#include "slab.h"
 | 
						|
 | 
						|
enum slab_state slab_state;
 | 
						|
LIST_HEAD(slab_caches);
 | 
						|
DEFINE_MUTEX(slab_mutex);
 | 
						|
struct kmem_cache *kmem_cache;
 | 
						|
 | 
						|
/*
 | 
						|
 * Set of flags that will prevent slab merging
 | 
						|
 */
 | 
						|
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
 | 
						|
		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
 | 
						|
		SLAB_FAILSLAB)
 | 
						|
 | 
						|
#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
 | 
						|
		SLAB_CACHE_DMA | SLAB_NOTRACK)
 | 
						|
 | 
						|
/*
 | 
						|
 * Merge control. If this is set then no merging of slab caches will occur.
 | 
						|
 * (Could be removed. This was introduced to pacify the merge skeptics.)
 | 
						|
 */
 | 
						|
static int slab_nomerge;
 | 
						|
 | 
						|
static int __init setup_slab_nomerge(char *str)
 | 
						|
{
 | 
						|
	slab_nomerge = 1;
 | 
						|
	return 1;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_SLUB
 | 
						|
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
 | 
						|
#endif
 | 
						|
 | 
						|
__setup("slab_nomerge", setup_slab_nomerge);
 | 
						|
 | 
						|
/*
 | 
						|
 * Determine the size of a slab object
 | 
						|
 */
 | 
						|
unsigned int kmem_cache_size(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	return s->object_size;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kmem_cache_size);
 | 
						|
 | 
						|
#ifdef CONFIG_DEBUG_VM
 | 
						|
static int kmem_cache_sanity_check(const char *name, size_t size)
 | 
						|
{
 | 
						|
	struct kmem_cache *s = NULL;
 | 
						|
 | 
						|
	if (!name || in_interrupt() || size < sizeof(void *) ||
 | 
						|
		size > KMALLOC_MAX_SIZE) {
 | 
						|
		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
 | 
						|
		return -EINVAL;
 | 
						|
	}
 | 
						|
 | 
						|
	list_for_each_entry(s, &slab_caches, list) {
 | 
						|
		char tmp;
 | 
						|
		int res;
 | 
						|
 | 
						|
		/*
 | 
						|
		 * This happens when the module gets unloaded and doesn't
 | 
						|
		 * destroy its slab cache and no-one else reuses the vmalloc
 | 
						|
		 * area of the module.  Print a warning.
 | 
						|
		 */
 | 
						|
		res = probe_kernel_address(s->name, tmp);
 | 
						|
		if (res) {
 | 
						|
			pr_err("Slab cache with size %d has lost its name\n",
 | 
						|
			       s->object_size);
 | 
						|
			continue;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
#else
 | 
						|
static inline int kmem_cache_sanity_check(const char *name, size_t size)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef CONFIG_MEMCG_KMEM
 | 
						|
static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
 | 
						|
		struct kmem_cache *s, struct kmem_cache *root_cache)
 | 
						|
{
 | 
						|
	size_t size;
 | 
						|
 | 
						|
	if (!memcg_kmem_enabled())
 | 
						|
		return 0;
 | 
						|
 | 
						|
	if (!memcg) {
 | 
						|
		size = offsetof(struct memcg_cache_params, memcg_caches);
 | 
						|
		size += memcg_limited_groups_array_size * sizeof(void *);
 | 
						|
	} else
 | 
						|
		size = sizeof(struct memcg_cache_params);
 | 
						|
 | 
						|
	s->memcg_params = kzalloc(size, GFP_KERNEL);
 | 
						|
	if (!s->memcg_params)
 | 
						|
		return -ENOMEM;
 | 
						|
 | 
						|
	if (memcg) {
 | 
						|
		s->memcg_params->memcg = memcg;
 | 
						|
		s->memcg_params->root_cache = root_cache;
 | 
						|
	} else
 | 
						|
		s->memcg_params->is_root_cache = true;
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
static void memcg_free_cache_params(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	kfree(s->memcg_params);
 | 
						|
}
 | 
						|
 | 
						|
static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs)
 | 
						|
{
 | 
						|
	int size;
 | 
						|
	struct memcg_cache_params *new_params, *cur_params;
 | 
						|
 | 
						|
	BUG_ON(!is_root_cache(s));
 | 
						|
 | 
						|
	size = offsetof(struct memcg_cache_params, memcg_caches);
 | 
						|
	size += num_memcgs * sizeof(void *);
 | 
						|
 | 
						|
	new_params = kzalloc(size, GFP_KERNEL);
 | 
						|
	if (!new_params)
 | 
						|
		return -ENOMEM;
 | 
						|
 | 
						|
	cur_params = s->memcg_params;
 | 
						|
	memcpy(new_params->memcg_caches, cur_params->memcg_caches,
 | 
						|
	       memcg_limited_groups_array_size * sizeof(void *));
 | 
						|
 | 
						|
	new_params->is_root_cache = true;
 | 
						|
 | 
						|
	rcu_assign_pointer(s->memcg_params, new_params);
 | 
						|
	if (cur_params)
 | 
						|
		kfree_rcu(cur_params, rcu_head);
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
int memcg_update_all_caches(int num_memcgs)
 | 
						|
{
 | 
						|
	struct kmem_cache *s;
 | 
						|
	int ret = 0;
 | 
						|
	mutex_lock(&slab_mutex);
 | 
						|
 | 
						|
	list_for_each_entry(s, &slab_caches, list) {
 | 
						|
		if (!is_root_cache(s))
 | 
						|
			continue;
 | 
						|
 | 
						|
		ret = memcg_update_cache_params(s, num_memcgs);
 | 
						|
		/*
 | 
						|
		 * Instead of freeing the memory, we'll just leave the caches
 | 
						|
		 * up to this point in an updated state.
 | 
						|
		 */
 | 
						|
		if (ret)
 | 
						|
			goto out;
 | 
						|
	}
 | 
						|
 | 
						|
	memcg_update_array_size(num_memcgs);
 | 
						|
out:
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
#else
 | 
						|
static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
 | 
						|
		struct kmem_cache *s, struct kmem_cache *root_cache)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
static inline void memcg_free_cache_params(struct kmem_cache *s)
 | 
						|
{
 | 
						|
}
 | 
						|
#endif /* CONFIG_MEMCG_KMEM */
 | 
						|
 | 
						|
/*
 | 
						|
 * Find a mergeable slab cache
 | 
						|
 */
 | 
						|
int slab_unmergeable(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
 | 
						|
		return 1;
 | 
						|
 | 
						|
	if (!is_root_cache(s))
 | 
						|
		return 1;
 | 
						|
 | 
						|
	if (s->ctor)
 | 
						|
		return 1;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * We may have set a slab to be unmergeable during bootstrap.
 | 
						|
	 */
 | 
						|
	if (s->refcount < 0)
 | 
						|
		return 1;
 | 
						|
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
struct kmem_cache *find_mergeable(size_t size, size_t align,
 | 
						|
		unsigned long flags, const char *name, void (*ctor)(void *))
 | 
						|
{
 | 
						|
	struct kmem_cache *s;
 | 
						|
 | 
						|
	if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	if (ctor)
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	size = ALIGN(size, sizeof(void *));
 | 
						|
	align = calculate_alignment(flags, align, size);
 | 
						|
	size = ALIGN(size, align);
 | 
						|
	flags = kmem_cache_flags(size, flags, name, NULL);
 | 
						|
 | 
						|
	list_for_each_entry_reverse(s, &slab_caches, list) {
 | 
						|
		if (slab_unmergeable(s))
 | 
						|
			continue;
 | 
						|
 | 
						|
		if (size > s->size)
 | 
						|
			continue;
 | 
						|
 | 
						|
		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
 | 
						|
			continue;
 | 
						|
		/*
 | 
						|
		 * Check if alignment is compatible.
 | 
						|
		 * Courtesy of Adrian Drzewiecki
 | 
						|
		 */
 | 
						|
		if ((s->size & ~(align - 1)) != s->size)
 | 
						|
			continue;
 | 
						|
 | 
						|
		if (s->size - size >= sizeof(void *))
 | 
						|
			continue;
 | 
						|
 | 
						|
		if (IS_ENABLED(CONFIG_SLAB) && align &&
 | 
						|
			(align > s->align || s->align % align))
 | 
						|
			continue;
 | 
						|
 | 
						|
		return s;
 | 
						|
	}
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Figure out what the alignment of the objects will be given a set of
 | 
						|
 * flags, a user specified alignment and the size of the objects.
 | 
						|
 */
 | 
						|
unsigned long calculate_alignment(unsigned long flags,
 | 
						|
		unsigned long align, unsigned long size)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * If the user wants hardware cache aligned objects then follow that
 | 
						|
	 * suggestion if the object is sufficiently large.
 | 
						|
	 *
 | 
						|
	 * The hardware cache alignment cannot override the specified
 | 
						|
	 * alignment though. If that is greater then use it.
 | 
						|
	 */
 | 
						|
	if (flags & SLAB_HWCACHE_ALIGN) {
 | 
						|
		unsigned long ralign = cache_line_size();
 | 
						|
		while (size <= ralign / 2)
 | 
						|
			ralign /= 2;
 | 
						|
		align = max(align, ralign);
 | 
						|
	}
 | 
						|
 | 
						|
	if (align < ARCH_SLAB_MINALIGN)
 | 
						|
		align = ARCH_SLAB_MINALIGN;
 | 
						|
 | 
						|
	return ALIGN(align, sizeof(void *));
 | 
						|
}
 | 
						|
 | 
						|
static struct kmem_cache *
 | 
						|
do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
 | 
						|
		     unsigned long flags, void (*ctor)(void *),
 | 
						|
		     struct mem_cgroup *memcg, struct kmem_cache *root_cache)
 | 
						|
{
 | 
						|
	struct kmem_cache *s;
 | 
						|
	int err;
 | 
						|
 | 
						|
	err = -ENOMEM;
 | 
						|
	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
 | 
						|
	if (!s)
 | 
						|
		goto out;
 | 
						|
 | 
						|
	s->name = name;
 | 
						|
	s->object_size = object_size;
 | 
						|
	s->size = size;
 | 
						|
	s->align = align;
 | 
						|
	s->ctor = ctor;
 | 
						|
 | 
						|
	err = memcg_alloc_cache_params(memcg, s, root_cache);
 | 
						|
	if (err)
 | 
						|
		goto out_free_cache;
 | 
						|
 | 
						|
	err = __kmem_cache_create(s, flags);
 | 
						|
	if (err)
 | 
						|
		goto out_free_cache;
 | 
						|
 | 
						|
	s->refcount = 1;
 | 
						|
	list_add(&s->list, &slab_caches);
 | 
						|
out:
 | 
						|
	if (err)
 | 
						|
		return ERR_PTR(err);
 | 
						|
	return s;
 | 
						|
 | 
						|
out_free_cache:
 | 
						|
	memcg_free_cache_params(s);
 | 
						|
	kfree(s);
 | 
						|
	goto out;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * kmem_cache_create - Create a cache.
 | 
						|
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 | 
						|
 * @size: The size of objects to be created in this cache.
 | 
						|
 * @align: The required alignment for the objects.
 | 
						|
 * @flags: SLAB flags
 | 
						|
 * @ctor: A constructor for the objects.
 | 
						|
 *
 | 
						|
 * Returns a ptr to the cache on success, NULL on failure.
 | 
						|
 * Cannot be called within a interrupt, but can be interrupted.
 | 
						|
 * The @ctor is run when new pages are allocated by the cache.
 | 
						|
 *
 | 
						|
 * The flags are
 | 
						|
 *
 | 
						|
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 | 
						|
 * to catch references to uninitialised memory.
 | 
						|
 *
 | 
						|
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 | 
						|
 * for buffer overruns.
 | 
						|
 *
 | 
						|
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 | 
						|
 * cacheline.  This can be beneficial if you're counting cycles as closely
 | 
						|
 * as davem.
 | 
						|
 */
 | 
						|
struct kmem_cache *
 | 
						|
kmem_cache_create(const char *name, size_t size, size_t align,
 | 
						|
		  unsigned long flags, void (*ctor)(void *))
 | 
						|
{
 | 
						|
	struct kmem_cache *s;
 | 
						|
	char *cache_name;
 | 
						|
	int err;
 | 
						|
 | 
						|
	get_online_cpus();
 | 
						|
	get_online_mems();
 | 
						|
 | 
						|
	mutex_lock(&slab_mutex);
 | 
						|
 | 
						|
	err = kmem_cache_sanity_check(name, size);
 | 
						|
	if (err) {
 | 
						|
		s = NULL;	/* suppress uninit var warning */
 | 
						|
		goto out_unlock;
 | 
						|
	}
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Some allocators will constraint the set of valid flags to a subset
 | 
						|
	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
 | 
						|
	 * case, and we'll just provide them with a sanitized version of the
 | 
						|
	 * passed flags.
 | 
						|
	 */
 | 
						|
	flags &= CACHE_CREATE_MASK;
 | 
						|
 | 
						|
	s = __kmem_cache_alias(name, size, align, flags, ctor);
 | 
						|
	if (s)
 | 
						|
		goto out_unlock;
 | 
						|
 | 
						|
	cache_name = kstrdup(name, GFP_KERNEL);
 | 
						|
	if (!cache_name) {
 | 
						|
		err = -ENOMEM;
 | 
						|
		goto out_unlock;
 | 
						|
	}
 | 
						|
 | 
						|
	s = do_kmem_cache_create(cache_name, size, size,
 | 
						|
				 calculate_alignment(flags, align, size),
 | 
						|
				 flags, ctor, NULL, NULL);
 | 
						|
	if (IS_ERR(s)) {
 | 
						|
		err = PTR_ERR(s);
 | 
						|
		kfree(cache_name);
 | 
						|
	}
 | 
						|
 | 
						|
out_unlock:
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
 | 
						|
	put_online_mems();
 | 
						|
	put_online_cpus();
 | 
						|
 | 
						|
	if (err) {
 | 
						|
		if (flags & SLAB_PANIC)
 | 
						|
			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
 | 
						|
				name, err);
 | 
						|
		else {
 | 
						|
			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
 | 
						|
				name, err);
 | 
						|
			dump_stack();
 | 
						|
		}
 | 
						|
		return NULL;
 | 
						|
	}
 | 
						|
	return s;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kmem_cache_create);
 | 
						|
 | 
						|
#ifdef CONFIG_MEMCG_KMEM
 | 
						|
/*
 | 
						|
 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
 | 
						|
 * @memcg: The memory cgroup the new cache is for.
 | 
						|
 * @root_cache: The parent of the new cache.
 | 
						|
 * @memcg_name: The name of the memory cgroup (used for naming the new cache).
 | 
						|
 *
 | 
						|
 * This function attempts to create a kmem cache that will serve allocation
 | 
						|
 * requests going from @memcg to @root_cache. The new cache inherits properties
 | 
						|
 * from its parent.
 | 
						|
 */
 | 
						|
struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
 | 
						|
					   struct kmem_cache *root_cache,
 | 
						|
					   const char *memcg_name)
 | 
						|
{
 | 
						|
	struct kmem_cache *s = NULL;
 | 
						|
	char *cache_name;
 | 
						|
 | 
						|
	get_online_cpus();
 | 
						|
	get_online_mems();
 | 
						|
 | 
						|
	mutex_lock(&slab_mutex);
 | 
						|
 | 
						|
	cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
 | 
						|
			       memcg_cache_id(memcg), memcg_name);
 | 
						|
	if (!cache_name)
 | 
						|
		goto out_unlock;
 | 
						|
 | 
						|
	s = do_kmem_cache_create(cache_name, root_cache->object_size,
 | 
						|
				 root_cache->size, root_cache->align,
 | 
						|
				 root_cache->flags, root_cache->ctor,
 | 
						|
				 memcg, root_cache);
 | 
						|
	if (IS_ERR(s)) {
 | 
						|
		kfree(cache_name);
 | 
						|
		s = NULL;
 | 
						|
	}
 | 
						|
 | 
						|
out_unlock:
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
 | 
						|
	put_online_mems();
 | 
						|
	put_online_cpus();
 | 
						|
 | 
						|
	return s;
 | 
						|
}
 | 
						|
 | 
						|
static int memcg_cleanup_cache_params(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	int rc;
 | 
						|
 | 
						|
	if (!s->memcg_params ||
 | 
						|
	    !s->memcg_params->is_root_cache)
 | 
						|
		return 0;
 | 
						|
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
	rc = __memcg_cleanup_cache_params(s);
 | 
						|
	mutex_lock(&slab_mutex);
 | 
						|
 | 
						|
	return rc;
 | 
						|
}
 | 
						|
#else
 | 
						|
static int memcg_cleanup_cache_params(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
#endif /* CONFIG_MEMCG_KMEM */
 | 
						|
 | 
						|
void slab_kmem_cache_release(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	kfree(s->name);
 | 
						|
	kmem_cache_free(kmem_cache, s);
 | 
						|
}
 | 
						|
 | 
						|
void kmem_cache_destroy(struct kmem_cache *s)
 | 
						|
{
 | 
						|
	get_online_cpus();
 | 
						|
	get_online_mems();
 | 
						|
 | 
						|
	mutex_lock(&slab_mutex);
 | 
						|
 | 
						|
	s->refcount--;
 | 
						|
	if (s->refcount)
 | 
						|
		goto out_unlock;
 | 
						|
 | 
						|
	if (memcg_cleanup_cache_params(s) != 0)
 | 
						|
		goto out_unlock;
 | 
						|
 | 
						|
	if (__kmem_cache_shutdown(s) != 0) {
 | 
						|
		printk(KERN_ERR "kmem_cache_destroy %s: "
 | 
						|
		       "Slab cache still has objects\n", s->name);
 | 
						|
		dump_stack();
 | 
						|
		goto out_unlock;
 | 
						|
	}
 | 
						|
 | 
						|
	list_del(&s->list);
 | 
						|
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
	if (s->flags & SLAB_DESTROY_BY_RCU)
 | 
						|
		rcu_barrier();
 | 
						|
 | 
						|
	memcg_free_cache_params(s);
 | 
						|
#ifdef SLAB_SUPPORTS_SYSFS
 | 
						|
	sysfs_slab_remove(s);
 | 
						|
#else
 | 
						|
	slab_kmem_cache_release(s);
 | 
						|
#endif
 | 
						|
	goto out;
 | 
						|
 | 
						|
out_unlock:
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
out:
 | 
						|
	put_online_mems();
 | 
						|
	put_online_cpus();
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kmem_cache_destroy);
 | 
						|
 | 
						|
/**
 | 
						|
 * kmem_cache_shrink - Shrink a cache.
 | 
						|
 * @cachep: The cache to shrink.
 | 
						|
 *
 | 
						|
 * Releases as many slabs as possible for a cache.
 | 
						|
 * To help debugging, a zero exit status indicates all slabs were released.
 | 
						|
 */
 | 
						|
int kmem_cache_shrink(struct kmem_cache *cachep)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
 | 
						|
	get_online_cpus();
 | 
						|
	get_online_mems();
 | 
						|
	ret = __kmem_cache_shrink(cachep);
 | 
						|
	put_online_mems();
 | 
						|
	put_online_cpus();
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kmem_cache_shrink);
 | 
						|
 | 
						|
int slab_is_available(void)
 | 
						|
{
 | 
						|
	return slab_state >= UP;
 | 
						|
}
 | 
						|
 | 
						|
#ifndef CONFIG_SLOB
 | 
						|
/* Create a cache during boot when no slab services are available yet */
 | 
						|
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
 | 
						|
		unsigned long flags)
 | 
						|
{
 | 
						|
	int err;
 | 
						|
 | 
						|
	s->name = name;
 | 
						|
	s->size = s->object_size = size;
 | 
						|
	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
 | 
						|
	err = __kmem_cache_create(s, flags);
 | 
						|
 | 
						|
	if (err)
 | 
						|
		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
 | 
						|
					name, size, err);
 | 
						|
 | 
						|
	s->refcount = -1;	/* Exempt from merging for now */
 | 
						|
}
 | 
						|
 | 
						|
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
 | 
						|
				unsigned long flags)
 | 
						|
{
 | 
						|
	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
 | 
						|
 | 
						|
	if (!s)
 | 
						|
		panic("Out of memory when creating slab %s\n", name);
 | 
						|
 | 
						|
	create_boot_cache(s, name, size, flags);
 | 
						|
	list_add(&s->list, &slab_caches);
 | 
						|
	s->refcount = 1;
 | 
						|
	return s;
 | 
						|
}
 | 
						|
 | 
						|
struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
 | 
						|
EXPORT_SYMBOL(kmalloc_caches);
 | 
						|
 | 
						|
#ifdef CONFIG_ZONE_DMA
 | 
						|
struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
 | 
						|
EXPORT_SYMBOL(kmalloc_dma_caches);
 | 
						|
#endif
 | 
						|
 | 
						|
/*
 | 
						|
 * Conversion table for small slabs sizes / 8 to the index in the
 | 
						|
 * kmalloc array. This is necessary for slabs < 192 since we have non power
 | 
						|
 * of two cache sizes there. The size of larger slabs can be determined using
 | 
						|
 * fls.
 | 
						|
 */
 | 
						|
static s8 size_index[24] = {
 | 
						|
	3,	/* 8 */
 | 
						|
	4,	/* 16 */
 | 
						|
	5,	/* 24 */
 | 
						|
	5,	/* 32 */
 | 
						|
	6,	/* 40 */
 | 
						|
	6,	/* 48 */
 | 
						|
	6,	/* 56 */
 | 
						|
	6,	/* 64 */
 | 
						|
	1,	/* 72 */
 | 
						|
	1,	/* 80 */
 | 
						|
	1,	/* 88 */
 | 
						|
	1,	/* 96 */
 | 
						|
	7,	/* 104 */
 | 
						|
	7,	/* 112 */
 | 
						|
	7,	/* 120 */
 | 
						|
	7,	/* 128 */
 | 
						|
	2,	/* 136 */
 | 
						|
	2,	/* 144 */
 | 
						|
	2,	/* 152 */
 | 
						|
	2,	/* 160 */
 | 
						|
	2,	/* 168 */
 | 
						|
	2,	/* 176 */
 | 
						|
	2,	/* 184 */
 | 
						|
	2	/* 192 */
 | 
						|
};
 | 
						|
 | 
						|
static inline int size_index_elem(size_t bytes)
 | 
						|
{
 | 
						|
	return (bytes - 1) / 8;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Find the kmem_cache structure that serves a given size of
 | 
						|
 * allocation
 | 
						|
 */
 | 
						|
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
 | 
						|
{
 | 
						|
	int index;
 | 
						|
 | 
						|
	if (unlikely(size > KMALLOC_MAX_SIZE)) {
 | 
						|
		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 | 
						|
		return NULL;
 | 
						|
	}
 | 
						|
 | 
						|
	if (size <= 192) {
 | 
						|
		if (!size)
 | 
						|
			return ZERO_SIZE_PTR;
 | 
						|
 | 
						|
		index = size_index[size_index_elem(size)];
 | 
						|
	} else
 | 
						|
		index = fls(size - 1);
 | 
						|
 | 
						|
#ifdef CONFIG_ZONE_DMA
 | 
						|
	if (unlikely((flags & GFP_DMA)))
 | 
						|
		return kmalloc_dma_caches[index];
 | 
						|
 | 
						|
#endif
 | 
						|
	return kmalloc_caches[index];
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Create the kmalloc array. Some of the regular kmalloc arrays
 | 
						|
 * may already have been created because they were needed to
 | 
						|
 * enable allocations for slab creation.
 | 
						|
 */
 | 
						|
void __init create_kmalloc_caches(unsigned long flags)
 | 
						|
{
 | 
						|
	int i;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Patch up the size_index table if we have strange large alignment
 | 
						|
	 * requirements for the kmalloc array. This is only the case for
 | 
						|
	 * MIPS it seems. The standard arches will not generate any code here.
 | 
						|
	 *
 | 
						|
	 * Largest permitted alignment is 256 bytes due to the way we
 | 
						|
	 * handle the index determination for the smaller caches.
 | 
						|
	 *
 | 
						|
	 * Make sure that nothing crazy happens if someone starts tinkering
 | 
						|
	 * around with ARCH_KMALLOC_MINALIGN
 | 
						|
	 */
 | 
						|
	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
 | 
						|
		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
 | 
						|
 | 
						|
	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
 | 
						|
		int elem = size_index_elem(i);
 | 
						|
 | 
						|
		if (elem >= ARRAY_SIZE(size_index))
 | 
						|
			break;
 | 
						|
		size_index[elem] = KMALLOC_SHIFT_LOW;
 | 
						|
	}
 | 
						|
 | 
						|
	if (KMALLOC_MIN_SIZE >= 64) {
 | 
						|
		/*
 | 
						|
		 * The 96 byte size cache is not used if the alignment
 | 
						|
		 * is 64 byte.
 | 
						|
		 */
 | 
						|
		for (i = 64 + 8; i <= 96; i += 8)
 | 
						|
			size_index[size_index_elem(i)] = 7;
 | 
						|
 | 
						|
	}
 | 
						|
 | 
						|
	if (KMALLOC_MIN_SIZE >= 128) {
 | 
						|
		/*
 | 
						|
		 * The 192 byte sized cache is not used if the alignment
 | 
						|
		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
 | 
						|
		 * instead.
 | 
						|
		 */
 | 
						|
		for (i = 128 + 8; i <= 192; i += 8)
 | 
						|
			size_index[size_index_elem(i)] = 8;
 | 
						|
	}
 | 
						|
	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
 | 
						|
		if (!kmalloc_caches[i]) {
 | 
						|
			kmalloc_caches[i] = create_kmalloc_cache(NULL,
 | 
						|
							1 << i, flags);
 | 
						|
		}
 | 
						|
 | 
						|
		/*
 | 
						|
		 * Caches that are not of the two-to-the-power-of size.
 | 
						|
		 * These have to be created immediately after the
 | 
						|
		 * earlier power of two caches
 | 
						|
		 */
 | 
						|
		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
 | 
						|
			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
 | 
						|
 | 
						|
		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
 | 
						|
			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
 | 
						|
	}
 | 
						|
 | 
						|
	/* Kmalloc array is now usable */
 | 
						|
	slab_state = UP;
 | 
						|
 | 
						|
	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
 | 
						|
		struct kmem_cache *s = kmalloc_caches[i];
 | 
						|
		char *n;
 | 
						|
 | 
						|
		if (s) {
 | 
						|
			n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
 | 
						|
 | 
						|
			BUG_ON(!n);
 | 
						|
			s->name = n;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
#ifdef CONFIG_ZONE_DMA
 | 
						|
	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
 | 
						|
		struct kmem_cache *s = kmalloc_caches[i];
 | 
						|
 | 
						|
		if (s) {
 | 
						|
			int size = kmalloc_size(i);
 | 
						|
			char *n = kasprintf(GFP_NOWAIT,
 | 
						|
				 "dma-kmalloc-%d", size);
 | 
						|
 | 
						|
			BUG_ON(!n);
 | 
						|
			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
 | 
						|
				size, SLAB_CACHE_DMA | flags);
 | 
						|
		}
 | 
						|
	}
 | 
						|
#endif
 | 
						|
}
 | 
						|
#endif /* !CONFIG_SLOB */
 | 
						|
 | 
						|
/*
 | 
						|
 * To avoid unnecessary overhead, we pass through large allocation requests
 | 
						|
 * directly to the page allocator. We use __GFP_COMP, because we will need to
 | 
						|
 * know the allocation order to free the pages properly in kfree.
 | 
						|
 */
 | 
						|
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 | 
						|
{
 | 
						|
	void *ret;
 | 
						|
	struct page *page;
 | 
						|
 | 
						|
	flags |= __GFP_COMP;
 | 
						|
	page = alloc_kmem_pages(flags, order);
 | 
						|
	ret = page ? page_address(page) : NULL;
 | 
						|
	kmemleak_alloc(ret, size, 1, flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kmalloc_order);
 | 
						|
 | 
						|
#ifdef CONFIG_TRACING
 | 
						|
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 | 
						|
{
 | 
						|
	void *ret = kmalloc_order(size, flags, order);
 | 
						|
	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kmalloc_order_trace);
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef CONFIG_SLABINFO
 | 
						|
 | 
						|
#ifdef CONFIG_SLAB
 | 
						|
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
 | 
						|
#else
 | 
						|
#define SLABINFO_RIGHTS S_IRUSR
 | 
						|
#endif
 | 
						|
 | 
						|
static void print_slabinfo_header(struct seq_file *m)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Output format version, so at least we can change it
 | 
						|
	 * without _too_ many complaints.
 | 
						|
	 */
 | 
						|
#ifdef CONFIG_DEBUG_SLAB
 | 
						|
	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
 | 
						|
#else
 | 
						|
	seq_puts(m, "slabinfo - version: 2.1\n");
 | 
						|
#endif
 | 
						|
	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
 | 
						|
		 "<objperslab> <pagesperslab>");
 | 
						|
	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
 | 
						|
	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 | 
						|
#ifdef CONFIG_DEBUG_SLAB
 | 
						|
	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
 | 
						|
		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
 | 
						|
	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
 | 
						|
#endif
 | 
						|
	seq_putc(m, '\n');
 | 
						|
}
 | 
						|
 | 
						|
void *slab_start(struct seq_file *m, loff_t *pos)
 | 
						|
{
 | 
						|
	mutex_lock(&slab_mutex);
 | 
						|
	return seq_list_start(&slab_caches, *pos);
 | 
						|
}
 | 
						|
 | 
						|
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
 | 
						|
{
 | 
						|
	return seq_list_next(p, &slab_caches, pos);
 | 
						|
}
 | 
						|
 | 
						|
void slab_stop(struct seq_file *m, void *p)
 | 
						|
{
 | 
						|
	mutex_unlock(&slab_mutex);
 | 
						|
}
 | 
						|
 | 
						|
static void
 | 
						|
memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
 | 
						|
{
 | 
						|
	struct kmem_cache *c;
 | 
						|
	struct slabinfo sinfo;
 | 
						|
	int i;
 | 
						|
 | 
						|
	if (!is_root_cache(s))
 | 
						|
		return;
 | 
						|
 | 
						|
	for_each_memcg_cache_index(i) {
 | 
						|
		c = cache_from_memcg_idx(s, i);
 | 
						|
		if (!c)
 | 
						|
			continue;
 | 
						|
 | 
						|
		memset(&sinfo, 0, sizeof(sinfo));
 | 
						|
		get_slabinfo(c, &sinfo);
 | 
						|
 | 
						|
		info->active_slabs += sinfo.active_slabs;
 | 
						|
		info->num_slabs += sinfo.num_slabs;
 | 
						|
		info->shared_avail += sinfo.shared_avail;
 | 
						|
		info->active_objs += sinfo.active_objs;
 | 
						|
		info->num_objs += sinfo.num_objs;
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static void cache_show(struct kmem_cache *s, struct seq_file *m)
 | 
						|
{
 | 
						|
	struct slabinfo sinfo;
 | 
						|
 | 
						|
	memset(&sinfo, 0, sizeof(sinfo));
 | 
						|
	get_slabinfo(s, &sinfo);
 | 
						|
 | 
						|
	memcg_accumulate_slabinfo(s, &sinfo);
 | 
						|
 | 
						|
	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
 | 
						|
		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
 | 
						|
		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
 | 
						|
 | 
						|
	seq_printf(m, " : tunables %4u %4u %4u",
 | 
						|
		   sinfo.limit, sinfo.batchcount, sinfo.shared);
 | 
						|
	seq_printf(m, " : slabdata %6lu %6lu %6lu",
 | 
						|
		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
 | 
						|
	slabinfo_show_stats(m, s);
 | 
						|
	seq_putc(m, '\n');
 | 
						|
}
 | 
						|
 | 
						|
static int slab_show(struct seq_file *m, void *p)
 | 
						|
{
 | 
						|
	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
 | 
						|
 | 
						|
	if (p == slab_caches.next)
 | 
						|
		print_slabinfo_header(m);
 | 
						|
	if (is_root_cache(s))
 | 
						|
		cache_show(s, m);
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_MEMCG_KMEM
 | 
						|
int memcg_slab_show(struct seq_file *m, void *p)
 | 
						|
{
 | 
						|
	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
 | 
						|
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 | 
						|
 | 
						|
	if (p == slab_caches.next)
 | 
						|
		print_slabinfo_header(m);
 | 
						|
	if (!is_root_cache(s) && s->memcg_params->memcg == memcg)
 | 
						|
		cache_show(s, m);
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
/*
 | 
						|
 * slabinfo_op - iterator that generates /proc/slabinfo
 | 
						|
 *
 | 
						|
 * Output layout:
 | 
						|
 * cache-name
 | 
						|
 * num-active-objs
 | 
						|
 * total-objs
 | 
						|
 * object size
 | 
						|
 * num-active-slabs
 | 
						|
 * total-slabs
 | 
						|
 * num-pages-per-slab
 | 
						|
 * + further values on SMP and with statistics enabled
 | 
						|
 */
 | 
						|
static const struct seq_operations slabinfo_op = {
 | 
						|
	.start = slab_start,
 | 
						|
	.next = slab_next,
 | 
						|
	.stop = slab_stop,
 | 
						|
	.show = slab_show,
 | 
						|
};
 | 
						|
 | 
						|
static int slabinfo_open(struct inode *inode, struct file *file)
 | 
						|
{
 | 
						|
	return seq_open(file, &slabinfo_op);
 | 
						|
}
 | 
						|
 | 
						|
static const struct file_operations proc_slabinfo_operations = {
 | 
						|
	.open		= slabinfo_open,
 | 
						|
	.read		= seq_read,
 | 
						|
	.write          = slabinfo_write,
 | 
						|
	.llseek		= seq_lseek,
 | 
						|
	.release	= seq_release,
 | 
						|
};
 | 
						|
 | 
						|
static int __init slab_proc_init(void)
 | 
						|
{
 | 
						|
	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
 | 
						|
						&proc_slabinfo_operations);
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
module_init(slab_proc_init);
 | 
						|
#endif /* CONFIG_SLABINFO */
 | 
						|
 | 
						|
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
 | 
						|
					   gfp_t flags)
 | 
						|
{
 | 
						|
	void *ret;
 | 
						|
	size_t ks = 0;
 | 
						|
 | 
						|
	if (p)
 | 
						|
		ks = ksize(p);
 | 
						|
 | 
						|
	if (ks >= new_size)
 | 
						|
		return (void *)p;
 | 
						|
 | 
						|
	ret = kmalloc_track_caller(new_size, flags);
 | 
						|
	if (ret && p)
 | 
						|
		memcpy(ret, p, ks);
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * __krealloc - like krealloc() but don't free @p.
 | 
						|
 * @p: object to reallocate memory for.
 | 
						|
 * @new_size: how many bytes of memory are required.
 | 
						|
 * @flags: the type of memory to allocate.
 | 
						|
 *
 | 
						|
 * This function is like krealloc() except it never frees the originally
 | 
						|
 * allocated buffer. Use this if you don't want to free the buffer immediately
 | 
						|
 * like, for example, with RCU.
 | 
						|
 */
 | 
						|
void *__krealloc(const void *p, size_t new_size, gfp_t flags)
 | 
						|
{
 | 
						|
	if (unlikely(!new_size))
 | 
						|
		return ZERO_SIZE_PTR;
 | 
						|
 | 
						|
	return __do_krealloc(p, new_size, flags);
 | 
						|
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(__krealloc);
 | 
						|
 | 
						|
/**
 | 
						|
 * krealloc - reallocate memory. The contents will remain unchanged.
 | 
						|
 * @p: object to reallocate memory for.
 | 
						|
 * @new_size: how many bytes of memory are required.
 | 
						|
 * @flags: the type of memory to allocate.
 | 
						|
 *
 | 
						|
 * The contents of the object pointed to are preserved up to the
 | 
						|
 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
 | 
						|
 * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
 | 
						|
 * %NULL pointer, the object pointed to is freed.
 | 
						|
 */
 | 
						|
void *krealloc(const void *p, size_t new_size, gfp_t flags)
 | 
						|
{
 | 
						|
	void *ret;
 | 
						|
 | 
						|
	if (unlikely(!new_size)) {
 | 
						|
		kfree(p);
 | 
						|
		return ZERO_SIZE_PTR;
 | 
						|
	}
 | 
						|
 | 
						|
	ret = __do_krealloc(p, new_size, flags);
 | 
						|
	if (ret && p != ret)
 | 
						|
		kfree(p);
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(krealloc);
 | 
						|
 | 
						|
/**
 | 
						|
 * kzfree - like kfree but zero memory
 | 
						|
 * @p: object to free memory of
 | 
						|
 *
 | 
						|
 * The memory of the object @p points to is zeroed before freed.
 | 
						|
 * If @p is %NULL, kzfree() does nothing.
 | 
						|
 *
 | 
						|
 * Note: this function zeroes the whole allocated buffer which can be a good
 | 
						|
 * deal bigger than the requested buffer size passed to kmalloc(). So be
 | 
						|
 * careful when using this function in performance sensitive code.
 | 
						|
 */
 | 
						|
void kzfree(const void *p)
 | 
						|
{
 | 
						|
	size_t ks;
 | 
						|
	void *mem = (void *)p;
 | 
						|
 | 
						|
	if (unlikely(ZERO_OR_NULL_PTR(mem)))
 | 
						|
		return;
 | 
						|
	ks = ksize(mem);
 | 
						|
	memset(mem, 0, ks);
 | 
						|
	kfree(mem);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(kzfree);
 | 
						|
 | 
						|
/* Tracepoints definitions. */
 | 
						|
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 | 
						|
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
 | 
						|
EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
 | 
						|
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
 | 
						|
EXPORT_TRACEPOINT_SYMBOL(kfree);
 | 
						|
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
 |