x86, hotplug: Serialize CPU hotplug to avoid bringup concurrency issues
When testing cpu hotplug code on 32-bit we kept hitting the "CPU%d: Stuck ??" message due to multiple cores concurrently accessing the cpu_callin_mask, among others. Since these codepaths are not protected from concurrent access due to the fact that there's no sane reason for making an already complex code unnecessarily more complex - we hit the issue only when insanely switching cores off- and online - serialize hotplugging cores on the sysfs level and be done with it. [ v2.1: fix !HOTPLUG_CPU build ] Cc: <stable@kernel.org> Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <20100819181029.GC17171@aftab> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
		
					parent
					
						
							
								8848a91068
							
						
					
				
			
			
				commit
				
					
						d7c53c9e82
					
				
			
		
					 2 changed files with 24 additions and 0 deletions
				
			
		| 
						 | 
					@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
config KTIME_SCALAR
 | 
					config KTIME_SCALAR
 | 
				
			||||||
	def_bool X86_32
 | 
						def_bool X86_32
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					config ARCH_CPU_PROBE_RELEASE
 | 
				
			||||||
 | 
						def_bool y
 | 
				
			||||||
 | 
						depends on HOTPLUG_CPU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
source "init/Kconfig"
 | 
					source "init/Kconfig"
 | 
				
			||||||
source "kernel/Kconfig.freezer"
 | 
					source "kernel/Kconfig.freezer"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -90,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
 | 
				
			||||||
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
 | 
					static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
 | 
				
			||||||
#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
 | 
					#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
 | 
				
			||||||
#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
 | 
					#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * We need this for trampoline_base protection from concurrent accesses when
 | 
				
			||||||
 | 
					 * off- and onlining cores wildly.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void cpu_hotplug_driver_lock()
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					        mutex_lock(&x86_cpu_hotplug_driver_mutex);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void cpu_hotplug_driver_unlock()
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					        mutex_unlock(&x86_cpu_hotplug_driver_mutex);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
 | 
				
			||||||
 | 
					ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 | 
					static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 | 
				
			||||||
#define get_idle_for_cpu(x)      (idle_thread_array[(x)])
 | 
					#define get_idle_for_cpu(x)      (idle_thread_array[(x)])
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue