 41a212859a
			
		
	
	
	41a212859a
	
	
	
		
			
			debugobjects warning during netfilter exit:
    ------------[ cut here ]------------
    WARNING: CPU: 6 PID: 4178 at lib/debugobjects.c:260 debug_print_object+0x8d/0xb0()
    ODEBUG: free active (active state 0) object type: timer_list hint: delayed_work_timer_fn+0x0/0x20
    Modules linked in:
    CPU: 6 PID: 4178 Comm: kworker/u16:2 Tainted: G        W 3.11.0-next-20130906-sasha #3984
    Workqueue: netns cleanup_net
    Call Trace:
      dump_stack+0x52/0x87
      warn_slowpath_common+0x8c/0xc0
      warn_slowpath_fmt+0x46/0x50
      debug_print_object+0x8d/0xb0
      __debug_check_no_obj_freed+0xa5/0x220
      debug_check_no_obj_freed+0x15/0x20
      kmem_cache_free+0x197/0x340
      kmem_cache_destroy+0x86/0xe0
      nf_conntrack_cleanup_net_list+0x131/0x170
      nf_conntrack_pernet_exit+0x5d/0x70
      ops_exit_list+0x5e/0x70
      cleanup_net+0xfb/0x1c0
      process_one_work+0x338/0x550
      worker_thread+0x215/0x350
      kthread+0xe7/0xf0
      ret_from_fork+0x7c/0xb0
Also during dcookie cleanup:
    WARNING: CPU: 12 PID: 9725 at lib/debugobjects.c:260 debug_print_object+0x8c/0xb0()
    ODEBUG: free active (active state 0) object type: timer_list hint: delayed_work_timer_fn+0x0/0x20
    Modules linked in:
    CPU: 12 PID: 9725 Comm: trinity-c141 Not tainted 3.15.0-rc2-next-20140423-sasha-00018-gc4ff6c4 #408
    Call Trace:
      dump_stack (lib/dump_stack.c:52)
      warn_slowpath_common (kernel/panic.c:430)
      warn_slowpath_fmt (kernel/panic.c:445)
      debug_print_object (lib/debugobjects.c:262)
      __debug_check_no_obj_freed (lib/debugobjects.c:697)
      debug_check_no_obj_freed (lib/debugobjects.c:726)
      kmem_cache_free (mm/slub.c:2689 mm/slub.c:2717)
      kmem_cache_destroy (mm/slab_common.c:363)
      dcookie_unregister (fs/dcookies.c:302 fs/dcookies.c:343)
      event_buffer_release (arch/x86/oprofile/../../../drivers/oprofile/event_buffer.c:153)
      __fput (fs/file_table.c:217)
      ____fput (fs/file_table.c:253)
      task_work_run (kernel/task_work.c:125 (discriminator 1))
      do_notify_resume (include/linux/tracehook.h:196 arch/x86/kernel/signal.c:751)
      int_signal (arch/x86/kernel/entry_64.S:807)
Sysfs has a release mechanism.  Use that to release the kmem_cache
structure if CONFIG_SYSFS is enabled.
Only slub is changed - slab currently only supports /proc/slabinfo and
not /sys/kernel/slab/*.  We talked about adding that and someone was
working on it.
[akpm@linux-foundation.org: fix CONFIG_SYSFS=n build]
[akpm@linux-foundation.org: fix CONFIG_SYSFS=n build even more]
Signed-off-by: Christoph Lameter <cl@linux.com>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Acked-by: Greg KH <greg@kroah.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Al Viro <viro@ZenIV.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			113 lines
		
	
	
	
		
			3.8 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			113 lines
		
	
	
	
		
			3.8 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _LINUX_SLUB_DEF_H
 | |
| #define _LINUX_SLUB_DEF_H
 | |
| 
 | |
| /*
 | |
|  * SLUB : A Slab allocator without object queues.
 | |
|  *
 | |
|  * (C) 2007 SGI, Christoph Lameter
 | |
|  */
 | |
| #include <linux/kobject.h>
 | |
| 
 | |
| enum stat_item {
 | |
| 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
 | |
| 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
 | |
| 	FREE_FASTPATH,		/* Free to cpu slab */
 | |
| 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
 | |
| 	FREE_FROZEN,		/* Freeing to frozen slab */
 | |
| 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
 | |
| 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
 | |
| 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
 | |
| 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
 | |
| 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
 | |
| 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
 | |
| 	FREE_SLAB,		/* Slab freed to the page allocator */
 | |
| 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
 | |
| 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
 | |
| 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
 | |
| 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
 | |
| 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
 | |
| 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
 | |
| 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
 | |
| 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
 | |
| 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
 | |
| 	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
 | |
| 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
 | |
| 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
 | |
| 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
 | |
| 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
 | |
| 	NR_SLUB_STAT_ITEMS };
 | |
| 
 | |
| struct kmem_cache_cpu {
 | |
| 	void **freelist;	/* Pointer to next available object */
 | |
| 	unsigned long tid;	/* Globally unique transaction id */
 | |
| 	struct page *page;	/* The slab from which we are allocating */
 | |
| 	struct page *partial;	/* Partially allocated frozen slabs */
 | |
| #ifdef CONFIG_SLUB_STATS
 | |
| 	unsigned stat[NR_SLUB_STAT_ITEMS];
 | |
| #endif
 | |
| };
 | |
| 
 | |
| /*
 | |
|  * Word size structure that can be atomically updated or read and that
 | |
|  * contains both the order and the number of objects that a slab of the
 | |
|  * given order would contain.
 | |
|  */
 | |
| struct kmem_cache_order_objects {
 | |
| 	unsigned long x;
 | |
| };
 | |
| 
 | |
| /*
 | |
|  * Slab cache management.
 | |
|  */
 | |
| struct kmem_cache {
 | |
| 	struct kmem_cache_cpu __percpu *cpu_slab;
 | |
| 	/* Used for retriving partial slabs etc */
 | |
| 	unsigned long flags;
 | |
| 	unsigned long min_partial;
 | |
| 	int size;		/* The size of an object including meta data */
 | |
| 	int object_size;	/* The size of an object without meta data */
 | |
| 	int offset;		/* Free pointer offset. */
 | |
| 	int cpu_partial;	/* Number of per cpu partial objects to keep around */
 | |
| 	struct kmem_cache_order_objects oo;
 | |
| 
 | |
| 	/* Allocation and freeing of slabs */
 | |
| 	struct kmem_cache_order_objects max;
 | |
| 	struct kmem_cache_order_objects min;
 | |
| 	gfp_t allocflags;	/* gfp flags to use on each alloc */
 | |
| 	int refcount;		/* Refcount for slab cache destroy */
 | |
| 	void (*ctor)(void *);
 | |
| 	int inuse;		/* Offset to metadata */
 | |
| 	int align;		/* Alignment */
 | |
| 	int reserved;		/* Reserved bytes at the end of slabs */
 | |
| 	const char *name;	/* Name (only for display!) */
 | |
| 	struct list_head list;	/* List of slab caches */
 | |
| #ifdef CONFIG_SYSFS
 | |
| 	struct kobject kobj;	/* For sysfs */
 | |
| #endif
 | |
| #ifdef CONFIG_MEMCG_KMEM
 | |
| 	struct memcg_cache_params *memcg_params;
 | |
| 	int max_attr_size; /* for propagation, maximum size of a stored attr */
 | |
| #ifdef CONFIG_SYSFS
 | |
| 	struct kset *memcg_kset;
 | |
| #endif
 | |
| #endif
 | |
| 
 | |
| #ifdef CONFIG_NUMA
 | |
| 	/*
 | |
| 	 * Defragmentation by allocating from a remote node.
 | |
| 	 */
 | |
| 	int remote_node_defrag_ratio;
 | |
| #endif
 | |
| 	struct kmem_cache_node *node[MAX_NUMNODES];
 | |
| };
 | |
| 
 | |
| #ifdef CONFIG_SYSFS
 | |
| #define SLAB_SUPPORTS_SYSFS
 | |
| void sysfs_slab_remove(struct kmem_cache *);
 | |
| #else
 | |
| static inline void sysfs_slab_remove(struct kmem_cache *s)
 | |
| {
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #endif /* _LINUX_SLUB_DEF_H */
 |