slub: fix a memory leak in get_partial_node()
In the case which is below, 1. acquire slab for cpu partial list 2. free object to it by remote cpu 3. page->freelist = t then memory leak is occurred. Change acquire_slab() not to zap freelist when it works for cpu partial list. I think it is a sufficient solution for fixing a memory leak. Below is output of 'slabinfo -r kmalloc-256' when './perf stat -r 30 hackbench 50 process 4000 > /dev/null' is done. ***Vanilla*** Sizes (bytes) Slabs Debug Memory ------------------------------------------------------------------------ Object : 256 Total : 468 Sanity Checks : Off Total: 3833856 SlabObj: 256 Full : 111 Redzoning : Off Used : 2004992 SlabSiz: 8192 Partial: 302 Poisoning : Off Loss : 1828864 Loss : 0 CpuSlab: 55 Tracking : Off Lalig: 0 Align : 8 Objects: 32 Tracing : Off Lpadd: 0 ***Patched*** Sizes (bytes) Slabs Debug Memory ------------------------------------------------------------------------ Object : 256 Total : 300 Sanity Checks : Off Total: 2457600 SlabObj: 256 Full : 204 Redzoning : Off Used : 2348800 SlabSiz: 8192 Partial: 33 Poisoning : Off Loss : 108800 Loss : 0 CpuSlab: 63 Tracking : Off Lalig: 0 Align : 8 Objects: 32 Tracing : Off Lpadd: 0 Total and loss number is the impact of this patch. Cc: <stable@vger.kernel.org> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
		
					parent
					
						
							
								4053497d6a
							
						
					
				
			
			
				commit
				
					
						02d7633fa5
					
				
			
		
					 1 changed files with 6 additions and 3 deletions
				
			
		|  | @ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s, | ||||||
| 		freelist = page->freelist; | 		freelist = page->freelist; | ||||||
| 		counters = page->counters; | 		counters = page->counters; | ||||||
| 		new.counters = counters; | 		new.counters = counters; | ||||||
| 		if (mode) | 		if (mode) { | ||||||
| 			new.inuse = page->objects; | 			new.inuse = page->objects; | ||||||
|  | 			new.freelist = NULL; | ||||||
|  | 		} else { | ||||||
|  | 			new.freelist = freelist; | ||||||
|  | 		} | ||||||
| 
 | 
 | ||||||
| 		VM_BUG_ON(new.frozen); | 		VM_BUG_ON(new.frozen); | ||||||
| 		new.frozen = 1; | 		new.frozen = 1; | ||||||
| 
 | 
 | ||||||
| 	} while (!__cmpxchg_double_slab(s, page, | 	} while (!__cmpxchg_double_slab(s, page, | ||||||
| 			freelist, counters, | 			freelist, counters, | ||||||
| 			NULL, new.counters, | 			new.freelist, new.counters, | ||||||
| 			"lock and freeze")); | 			"lock and freeze")); | ||||||
| 
 | 
 | ||||||
| 	remove_partial(n, page); | 	remove_partial(n, page); | ||||||
|  | @ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s, | ||||||
| 			object = t; | 			object = t; | ||||||
| 			available =  page->objects - page->inuse; | 			available =  page->objects - page->inuse; | ||||||
| 		} else { | 		} else { | ||||||
| 			page->freelist = t; |  | ||||||
| 			available = put_cpu_partial(s, page, 0); | 			available = put_cpu_partial(s, page, 0); | ||||||
| 			stat(s, CPU_PARTIAL_NODE); | 			stat(s, CPU_PARTIAL_NODE); | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Joonsoo Kim
				Joonsoo Kim