ipv4: Kill ip_rt_frag_needed().
There is zero point to this function. It's only real substance is to perform an extremely outdated BSD4.2 ICMP check, which we can safely remove. If you really have a MTU limited link being routed by a BSD4.2 derived system, here's a nickel go buy yourself a real router. The other actions of ip_rt_frag_needed(), checking and conditionally updating the peer, are done by the per-protocol handlers of the ICMP event. TCP, UDP, et al. have a handler which will receive this event and transmit it back into the associated route via dst_ops->update_pmtu(). This simplification is important, because it eliminates the one place where we do not have a proper route context in which to make an inetpeer lookup. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
					parent
					
						
							
								97bab73f98
							
						
					
				
			
			
				commit
				
					
						46517008e1
					
				
			
		
					 4 changed files with 1 additions and 70 deletions
				
			
		|  | @ -215,8 +215,6 @@ static inline int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 s | ||||||
| 	return ip_route_input_common(skb, dst, src, tos, devin, true); | 	return ip_route_input_common(skb, dst, src, tos, devin, true); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| extern unsigned short	ip_rt_frag_needed(struct net *net, const struct iphdr *iph, |  | ||||||
| 					  unsigned short new_mtu, struct net_device *dev); |  | ||||||
| extern void		ip_rt_send_redirect(struct sk_buff *skb); | extern void		ip_rt_send_redirect(struct sk_buff *skb); | ||||||
| 
 | 
 | ||||||
| extern unsigned int		inet_addr_type(struct net *net, __be32 addr); | extern unsigned int		inet_addr_type(struct net *net, __be32 addr); | ||||||
|  |  | ||||||
|  | @ -673,9 +673,7 @@ static void icmp_unreach(struct sk_buff *skb) | ||||||
| 				LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), | 				LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), | ||||||
| 					       &iph->daddr); | 					       &iph->daddr); | ||||||
| 			} else { | 			} else { | ||||||
| 				info = ip_rt_frag_needed(net, iph, | 				info = ntohs(icmph->un.frag.mtu); | ||||||
| 							 ntohs(icmph->un.frag.mtu), |  | ||||||
| 							 skb->dev); |  | ||||||
| 				if (!info) | 				if (!info) | ||||||
| 					goto out; | 					goto out; | ||||||
| 			} | 			} | ||||||
|  |  | ||||||
|  | @ -1664,67 +1664,6 @@ out:	kfree_skb(skb); | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  *	The last two values are not from the RFC but |  | ||||||
|  *	are needed for AMPRnet AX.25 paths. |  | ||||||
|  */ |  | ||||||
| 
 |  | ||||||
| static const unsigned short mtu_plateau[] = |  | ||||||
| {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 }; |  | ||||||
| 
 |  | ||||||
| static inline unsigned short guess_mtu(unsigned short old_mtu) |  | ||||||
| { |  | ||||||
| 	int i; |  | ||||||
| 
 |  | ||||||
| 	for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) |  | ||||||
| 		if (old_mtu > mtu_plateau[i]) |  | ||||||
| 			return mtu_plateau[i]; |  | ||||||
| 	return 68; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph, |  | ||||||
| 				 unsigned short new_mtu, |  | ||||||
| 				 struct net_device *dev) |  | ||||||
| { |  | ||||||
| 	unsigned short old_mtu = ntohs(iph->tot_len); |  | ||||||
| 	unsigned short est_mtu = 0; |  | ||||||
| 	struct inet_peer *peer; |  | ||||||
| 
 |  | ||||||
| 	peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); |  | ||||||
| 	if (peer) { |  | ||||||
| 		unsigned short mtu = new_mtu; |  | ||||||
| 
 |  | ||||||
| 		if (new_mtu < 68 || new_mtu >= old_mtu) { |  | ||||||
| 			/* BSD 4.2 derived systems incorrectly adjust
 |  | ||||||
| 			 * tot_len by the IP header length, and report |  | ||||||
| 			 * a zero MTU in the ICMP message. |  | ||||||
| 			 */ |  | ||||||
| 			if (mtu == 0 && |  | ||||||
| 			    old_mtu >= 68 + (iph->ihl << 2)) |  | ||||||
| 				old_mtu -= iph->ihl << 2; |  | ||||||
| 			mtu = guess_mtu(old_mtu); |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if (mtu < ip_rt_min_pmtu) |  | ||||||
| 			mtu = ip_rt_min_pmtu; |  | ||||||
| 		if (!peer->pmtu_expires || mtu < peer->pmtu_learned) { |  | ||||||
| 			unsigned long pmtu_expires; |  | ||||||
| 
 |  | ||||||
| 			pmtu_expires = jiffies + ip_rt_mtu_expires; |  | ||||||
| 			if (!pmtu_expires) |  | ||||||
| 				pmtu_expires = 1UL; |  | ||||||
| 
 |  | ||||||
| 			est_mtu = mtu; |  | ||||||
| 			peer->pmtu_learned = mtu; |  | ||||||
| 			peer->pmtu_expires = pmtu_expires; |  | ||||||
| 			atomic_inc(&__rt_peer_genid); |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		inet_putpeer(peer); |  | ||||||
| 	} |  | ||||||
| 	return est_mtu ? : new_mtu; |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) | static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) | ||||||
| { | { | ||||||
| 	unsigned long expires = ACCESS_ONCE(peer->pmtu_expires); | 	unsigned long expires = ACCESS_ONCE(peer->pmtu_expires); | ||||||
|  |  | ||||||
|  | @ -81,10 +81,6 @@ void rxrpc_UDP_error_report(struct sock *sk) | ||||||
| 			_net("I/F MTU %u", mtu); | 			_net("I/F MTU %u", mtu); | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		/* ip_rt_frag_needed() may have eaten the info */ |  | ||||||
| 		if (mtu == 0) |  | ||||||
| 			mtu = ntohs(icmp_hdr(skb)->un.frag.mtu); |  | ||||||
| 
 |  | ||||||
| 		if (mtu == 0) { | 		if (mtu == 0) { | ||||||
| 			/* they didn't give us a size, estimate one */ | 			/* they didn't give us a size, estimate one */ | ||||||
| 			if (mtu > 1500) { | 			if (mtu > 1500) { | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 David S. Miller
				David S. Miller