netfilter: {ip,ip6,arp}_tables: dont block bottom half more than necessary
We currently disable BH for the whole duration of get_counters() On machines with a lot of cpus and large tables, this might be too long. We can disable preemption during the whole function, and disable BH only while fetching counters for the current cpu. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
This commit is contained in:
		
					parent
					
						
							
								7df0884ce1
							
						
					
				
			
			
				commit
				
					
						24b36f0193
					
				
			
		
					 3 changed files with 18 additions and 12 deletions
				
			
		|  | @ -710,7 +710,7 @@ static void get_counters(const struct xt_table_info *t, | |||
| 	struct arpt_entry *iter; | ||||
| 	unsigned int cpu; | ||||
| 	unsigned int i; | ||||
| 	unsigned int curcpu; | ||||
| 	unsigned int curcpu = get_cpu(); | ||||
| 
 | ||||
| 	/* Instead of clearing (by a previous call to memset())
 | ||||
| 	 * the counters and using adds, we set the counters | ||||
|  | @ -720,14 +720,16 @@ static void get_counters(const struct xt_table_info *t, | |||
| 	 * if new softirq were to run and call ipt_do_table | ||||
| 	 */ | ||||
| 	local_bh_disable(); | ||||
| 	curcpu = smp_processor_id(); | ||||
| 
 | ||||
| 	i = 0; | ||||
| 	xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||||
| 		SET_COUNTER(counters[i], iter->counters.bcnt, | ||||
| 			    iter->counters.pcnt); | ||||
| 		++i; | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	/* Processing counters from other cpus, we can let bottom half enabled,
 | ||||
| 	 * (preemption is disabled) | ||||
| 	 */ | ||||
| 
 | ||||
| 	for_each_possible_cpu(cpu) { | ||||
| 		if (cpu == curcpu) | ||||
|  | @ -741,7 +743,7 @@ static void get_counters(const struct xt_table_info *t, | |||
| 		} | ||||
| 		xt_info_wrunlock(cpu); | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	put_cpu(); | ||||
| } | ||||
| 
 | ||||
| static struct xt_counters *alloc_counters(const struct xt_table *table) | ||||
|  |  | |||
|  | @ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t, | |||
| 	struct ipt_entry *iter; | ||||
| 	unsigned int cpu; | ||||
| 	unsigned int i; | ||||
| 	unsigned int curcpu; | ||||
| 	unsigned int curcpu = get_cpu(); | ||||
| 
 | ||||
| 	/* Instead of clearing (by a previous call to memset())
 | ||||
| 	 * the counters and using adds, we set the counters | ||||
|  | @ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t, | |||
| 	 * if new softirq were to run and call ipt_do_table | ||||
| 	 */ | ||||
| 	local_bh_disable(); | ||||
| 	curcpu = smp_processor_id(); | ||||
| 
 | ||||
| 	i = 0; | ||||
| 	xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||||
| 		SET_COUNTER(counters[i], iter->counters.bcnt, | ||||
| 			    iter->counters.pcnt); | ||||
| 		++i; | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	/* Processing counters from other cpus, we can let bottom half enabled,
 | ||||
| 	 * (preemption is disabled) | ||||
| 	 */ | ||||
| 
 | ||||
| 	for_each_possible_cpu(cpu) { | ||||
| 		if (cpu == curcpu) | ||||
|  | @ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t, | |||
| 		} | ||||
| 		xt_info_wrunlock(cpu); | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	put_cpu(); | ||||
| } | ||||
| 
 | ||||
| static struct xt_counters *alloc_counters(const struct xt_table *table) | ||||
|  |  | |||
|  | @ -897,7 +897,7 @@ get_counters(const struct xt_table_info *t, | |||
| 	struct ip6t_entry *iter; | ||||
| 	unsigned int cpu; | ||||
| 	unsigned int i; | ||||
| 	unsigned int curcpu; | ||||
| 	unsigned int curcpu = get_cpu(); | ||||
| 
 | ||||
| 	/* Instead of clearing (by a previous call to memset())
 | ||||
| 	 * the counters and using adds, we set the counters | ||||
|  | @ -907,14 +907,16 @@ get_counters(const struct xt_table_info *t, | |||
| 	 * if new softirq were to run and call ipt_do_table | ||||
| 	 */ | ||||
| 	local_bh_disable(); | ||||
| 	curcpu = smp_processor_id(); | ||||
| 
 | ||||
| 	i = 0; | ||||
| 	xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||||
| 		SET_COUNTER(counters[i], iter->counters.bcnt, | ||||
| 			    iter->counters.pcnt); | ||||
| 		++i; | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	/* Processing counters from other cpus, we can let bottom half enabled,
 | ||||
| 	 * (preemption is disabled) | ||||
| 	 */ | ||||
| 
 | ||||
| 	for_each_possible_cpu(cpu) { | ||||
| 		if (cpu == curcpu) | ||||
|  | @ -928,7 +930,7 @@ get_counters(const struct xt_table_info *t, | |||
| 		} | ||||
| 		xt_info_wrunlock(cpu); | ||||
| 	} | ||||
| 	local_bh_enable(); | ||||
| 	put_cpu(); | ||||
| } | ||||
| 
 | ||||
| static struct xt_counters *alloc_counters(const struct xt_table *table) | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Eric Dumazet
				Eric Dumazet