x86: reduce stack usage in init_intel_cacheinfo
Impact: reduce stack usage. init_intel_cacheinfo() does not use the cpumask so define a subset of struct _cpuid4_info (_cpuid4_info_regs) that can be used instead. Signed-off-by: Mike Travis <travis@sgi.com>
This commit is contained in:
		
					parent
					
						
							
								a1c33bbeb7
							
						
					
				
			
			
				commit
				
					
						f9b90566cd
					
				
			
		
					 1 changed files with 44 additions and 19 deletions
				
			
		|  | @ -132,7 +132,16 @@ struct _cpuid4_info { | |||
| 	union _cpuid4_leaf_ecx ecx; | ||||
| 	unsigned long size; | ||||
| 	unsigned long can_disable; | ||||
| 	cpumask_t shared_cpu_map;	/* future?: only cpus/node is needed */ | ||||
| 	DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | ||||
| }; | ||||
| 
 | ||||
| /* subset of above _cpuid4_info w/o shared_cpu_map */ | ||||
| struct _cpuid4_info_regs { | ||||
| 	union _cpuid4_leaf_eax eax; | ||||
| 	union _cpuid4_leaf_ebx ebx; | ||||
| 	union _cpuid4_leaf_ecx ecx; | ||||
| 	unsigned long size; | ||||
| 	unsigned long can_disable; | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_PCI | ||||
|  | @ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| } | ||||
| 
 | ||||
| static void __cpuinit | ||||
| amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | ||||
| amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | ||||
| { | ||||
| 	if (index < 3) | ||||
| 		return; | ||||
|  | @ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
| } | ||||
| 
 | ||||
| static int | ||||
| __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||||
| __cpuinit cpuid4_cache_lookup_regs(int index, | ||||
| 				   struct _cpuid4_info_regs *this_leaf) | ||||
| { | ||||
| 	union _cpuid4_leaf_eax 	eax; | ||||
| 	union _cpuid4_leaf_ebx 	ebx; | ||||
|  | @ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||||
| { | ||||
| 	struct _cpuid4_info_regs *leaf_regs = | ||||
| 		(struct _cpuid4_info_regs *)this_leaf; | ||||
| 
 | ||||
| 	return cpuid4_cache_lookup_regs(index, leaf_regs); | ||||
| } | ||||
| 
 | ||||
| static int __cpuinit find_num_cache_leaves(void) | ||||
| { | ||||
| 	unsigned int		eax, ebx, ecx, edx; | ||||
|  | @ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 		 * parameters cpuid leaf to find the cache details | ||||
| 		 */ | ||||
| 		for (i = 0; i < num_cache_leaves; i++) { | ||||
| 			struct _cpuid4_info this_leaf; | ||||
| 
 | ||||
| 			struct _cpuid4_info_regs this_leaf; | ||||
| 			int retval; | ||||
| 
 | ||||
| 			retval = cpuid4_cache_lookup(i, &this_leaf); | ||||
| 			retval = cpuid4_cache_lookup_regs(i, &this_leaf); | ||||
| 			if (retval >= 0) { | ||||
| 				switch(this_leaf.eax.split.level) { | ||||
| 				    case 1: | ||||
|  | @ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | ||||
| 
 | ||||
| 	if (num_threads_sharing == 1) | ||||
| 		cpu_set(cpu, this_leaf->shared_cpu_map); | ||||
| 		cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); | ||||
| 	else { | ||||
| 		index_msb = get_count_order(num_threads_sharing); | ||||
| 
 | ||||
| 		for_each_online_cpu(i) { | ||||
| 			if (cpu_data(i).apicid >> index_msb == | ||||
| 			    c->apicid >> index_msb) { | ||||
| 				cpu_set(i, this_leaf->shared_cpu_map); | ||||
| 				cpumask_set_cpu(i, | ||||
| 					to_cpumask(this_leaf->shared_cpu_map)); | ||||
| 				if (i != cpu && per_cpu(cpuid4_info, i))  { | ||||
| 					sibling_leaf = CPUID4_INFO_IDX(i, index); | ||||
| 					cpu_set(cpu, sibling_leaf->shared_cpu_map); | ||||
| 					sibling_leaf = | ||||
| 						CPUID4_INFO_IDX(i, index); | ||||
| 					cpumask_set_cpu(cpu, to_cpumask( | ||||
| 						sibling_leaf->shared_cpu_map)); | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | @ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 	int sibling; | ||||
| 
 | ||||
| 	this_leaf = CPUID4_INFO_IDX(cpu, index); | ||||
| 	for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | ||||
| 	for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { | ||||
| 		sibling_leaf = CPUID4_INFO_IDX(sibling, index); | ||||
| 		cpu_clear(cpu, sibling_leaf->shared_cpu_map); | ||||
| 		cpumask_clear_cpu(cpu, | ||||
| 				  to_cpumask(sibling_leaf->shared_cpu_map)); | ||||
| 	} | ||||
| } | ||||
| #else | ||||
|  | @ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
| 	int n = 0; | ||||
| 
 | ||||
| 	if (len > 1) { | ||||
| 		cpumask_t *mask = &this_leaf->shared_cpu_map; | ||||
| 		const struct cpumask *mask; | ||||
| 
 | ||||
| 		mask = to_cpumask(this_leaf->shared_cpu_map); | ||||
| 		n = type? | ||||
| 			cpulist_scnprintf(buf, len-2, mask) : | ||||
| 			cpumask_scnprintf(buf, len-2, mask); | ||||
|  | @ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
| 
 | ||||
| static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | ||||
| { | ||||
| 	int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||||
| 	const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); | ||||
| 	int node = cpu_to_node(cpumask_first(mask)); | ||||
| 	struct pci_dev *dev = NULL; | ||||
| 	ssize_t ret = 0; | ||||
| 	int i; | ||||
|  | @ -718,7 +742,8 @@ static ssize_t | |||
| store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | ||||
| 		    size_t count) | ||||
| { | ||||
| 	int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | ||||
| 	const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); | ||||
| 	int node = cpu_to_node(cpumask_first(mask)); | ||||
| 	struct pci_dev *dev = NULL; | ||||
| 	unsigned int ret, index, val; | ||||
| 
 | ||||
|  | @ -863,7 +888,7 @@ err_out: | |||
| 	return -ENOMEM; | ||||
| } | ||||
| 
 | ||||
| static cpumask_t cache_dev_map = CPU_MASK_NONE; | ||||
| static DECLARE_BITMAP(cache_dev_map, NR_CPUS); | ||||
| 
 | ||||
| /* Add/Remove cache interface for CPU device */ | ||||
| static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | ||||
|  | @ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 		} | ||||
| 		kobject_uevent(&(this_object->kobj), KOBJ_ADD); | ||||
| 	} | ||||
| 	cpu_set(cpu, cache_dev_map); | ||||
| 	cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | ||||
| 
 | ||||
| 	kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | ||||
| 	return 0; | ||||
|  | @ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 
 | ||||
| 	if (per_cpu(cpuid4_info, cpu) == NULL) | ||||
| 		return; | ||||
| 	if (!cpu_isset(cpu, cache_dev_map)) | ||||
| 	if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | ||||
| 		return; | ||||
| 	cpu_clear(cpu, cache_dev_map); | ||||
| 	cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | ||||
| 
 | ||||
| 	for (i = 0; i < num_cache_leaves; i++) | ||||
| 		kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Mike Travis
				Mike Travis