This patch adds all the necessary data structure and support functions to implement TFO server side. It also documents a number of flags for the sysctl_tcp_fastopen knob, and adds a few Linux extension MIBs. In addition, it includes the following: 1. a new TCP_FASTOPEN socket option an application must call to supply a max backlog allowed in order to enable TFO on its listener. 2. A number of key data structures: "fastopen_rsk" in tcp_sock - for a big socket to access its request_sock for retransmission and ack processing purpose. It is non-NULL iff 3WHS not completed. "fastopenq" in request_sock_queue - points to a per Fast Open listener data structure "fastopen_queue" to keep track of qlen (# of outstanding Fast Open requests) and max_qlen, among other things. "listener" in tcp_request_sock - to point to the original listener for book-keeping purpose, i.e., to maintain qlen against max_qlen as part of defense against IP spoofing attack. 3. various data structure and functions, many in tcp_fastopen.c, to support server side Fast Open cookie operations, including /proc/sys/net/ipv4/tcp_fastopen_key to allow manual rekeying. Signed-off-by: H.K. Jerry Chu <hkchu@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
		
			
				
	
	
		
			92 lines
		
	
	
	
		
			2.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			92 lines
		
	
	
	
		
			2.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#include <linux/err.h>
 | 
						|
#include <linux/init.h>
 | 
						|
#include <linux/kernel.h>
 | 
						|
#include <linux/list.h>
 | 
						|
#include <linux/tcp.h>
 | 
						|
#include <linux/rcupdate.h>
 | 
						|
#include <linux/rculist.h>
 | 
						|
#include <net/inetpeer.h>
 | 
						|
#include <net/tcp.h>
 | 
						|
 | 
						|
int sysctl_tcp_fastopen __read_mostly;
 | 
						|
 | 
						|
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 | 
						|
 | 
						|
static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
 | 
						|
 | 
						|
static void tcp_fastopen_ctx_free(struct rcu_head *head)
 | 
						|
{
 | 
						|
	struct tcp_fastopen_context *ctx =
 | 
						|
	    container_of(head, struct tcp_fastopen_context, rcu);
 | 
						|
	crypto_free_cipher(ctx->tfm);
 | 
						|
	kfree(ctx);
 | 
						|
}
 | 
						|
 | 
						|
int tcp_fastopen_reset_cipher(void *key, unsigned int len)
 | 
						|
{
 | 
						|
	int err;
 | 
						|
	struct tcp_fastopen_context *ctx, *octx;
 | 
						|
 | 
						|
	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 | 
						|
	if (!ctx)
 | 
						|
		return -ENOMEM;
 | 
						|
	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
 | 
						|
 | 
						|
	if (IS_ERR(ctx->tfm)) {
 | 
						|
		err = PTR_ERR(ctx->tfm);
 | 
						|
error:		kfree(ctx);
 | 
						|
		pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
 | 
						|
		return err;
 | 
						|
	}
 | 
						|
	err = crypto_cipher_setkey(ctx->tfm, key, len);
 | 
						|
	if (err) {
 | 
						|
		pr_err("TCP: TFO cipher key error: %d\n", err);
 | 
						|
		crypto_free_cipher(ctx->tfm);
 | 
						|
		goto error;
 | 
						|
	}
 | 
						|
	memcpy(ctx->key, key, len);
 | 
						|
 | 
						|
	spin_lock(&tcp_fastopen_ctx_lock);
 | 
						|
 | 
						|
	octx = rcu_dereference_protected(tcp_fastopen_ctx,
 | 
						|
				lockdep_is_held(&tcp_fastopen_ctx_lock));
 | 
						|
	rcu_assign_pointer(tcp_fastopen_ctx, ctx);
 | 
						|
	spin_unlock(&tcp_fastopen_ctx_lock);
 | 
						|
 | 
						|
	if (octx)
 | 
						|
		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
 | 
						|
	return err;
 | 
						|
}
 | 
						|
 | 
						|
/* Computes the fastopen cookie for the peer.
 | 
						|
 * The peer address is a 128 bits long (pad with zeros for IPv4).
 | 
						|
 *
 | 
						|
 * The caller must check foc->len to determine if a valid cookie
 | 
						|
 * has been generated successfully.
 | 
						|
*/
 | 
						|
void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
 | 
						|
{
 | 
						|
	__be32 peer_addr[4] = { addr, 0, 0, 0 };
 | 
						|
	struct tcp_fastopen_context *ctx;
 | 
						|
 | 
						|
	rcu_read_lock();
 | 
						|
	ctx = rcu_dereference(tcp_fastopen_ctx);
 | 
						|
	if (ctx) {
 | 
						|
		crypto_cipher_encrypt_one(ctx->tfm,
 | 
						|
					  foc->val,
 | 
						|
					  (__u8 *)peer_addr);
 | 
						|
		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
 | 
						|
	}
 | 
						|
	rcu_read_unlock();
 | 
						|
}
 | 
						|
 | 
						|
static int __init tcp_fastopen_init(void)
 | 
						|
{
 | 
						|
	__u8 key[TCP_FASTOPEN_KEY_LENGTH];
 | 
						|
 | 
						|
	get_random_bytes(key, sizeof(key));
 | 
						|
	tcp_fastopen_reset_cipher(key, sizeof(key));
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
late_initcall(tcp_fastopen_init);
 |