1 #include <linux/crypto.h>
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/list.h>
7 #include <linux/rcupdate.h>
8 #include <linux/rculist.h>
9 #include <net/inetpeer.h>
12 void tcp_fastopen_init_key_once(struct net
*net
)
14 u8 key
[TCP_FASTOPEN_KEY_LENGTH
];
15 struct tcp_fastopen_context
*ctxt
;
18 ctxt
= rcu_dereference(net
->ipv4
.tcp_fastopen_ctx
);
25 /* tcp_fastopen_reset_cipher publishes the new context
26 * atomically, so we allow this race happening here.
28 * All call sites of tcp_fastopen_cookie_gen also check
29 * for a valid cookie, so this is an acceptable risk.
31 get_random_bytes(key
, sizeof(key
));
32 tcp_fastopen_reset_cipher(net
, key
, sizeof(key
));
35 static void tcp_fastopen_ctx_free(struct rcu_head
*head
)
37 struct tcp_fastopen_context
*ctx
=
38 container_of(head
, struct tcp_fastopen_context
, rcu
);
39 crypto_free_cipher(ctx
->tfm
);
43 void tcp_fastopen_ctx_destroy(struct net
*net
)
45 struct tcp_fastopen_context
*ctxt
;
47 spin_lock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
49 ctxt
= rcu_dereference_protected(net
->ipv4
.tcp_fastopen_ctx
,
50 lockdep_is_held(&net
->ipv4
.tcp_fastopen_ctx_lock
));
51 rcu_assign_pointer(net
->ipv4
.tcp_fastopen_ctx
, NULL
);
52 spin_unlock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
55 call_rcu(&ctxt
->rcu
, tcp_fastopen_ctx_free
);
58 int tcp_fastopen_reset_cipher(struct net
*net
, void *key
, unsigned int len
)
61 struct tcp_fastopen_context
*ctx
, *octx
;
63 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
66 ctx
->tfm
= crypto_alloc_cipher("aes", 0, 0);
68 if (IS_ERR(ctx
->tfm
)) {
69 err
= PTR_ERR(ctx
->tfm
);
71 pr_err("TCP: TFO aes cipher alloc error: %d\n", err
);
74 err
= crypto_cipher_setkey(ctx
->tfm
, key
, len
);
76 pr_err("TCP: TFO cipher key error: %d\n", err
);
77 crypto_free_cipher(ctx
->tfm
);
80 memcpy(ctx
->key
, key
, len
);
82 spin_lock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
84 octx
= rcu_dereference_protected(net
->ipv4
.tcp_fastopen_ctx
,
85 lockdep_is_held(&net
->ipv4
.tcp_fastopen_ctx_lock
));
86 rcu_assign_pointer(net
->ipv4
.tcp_fastopen_ctx
, ctx
);
87 spin_unlock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
90 call_rcu(&octx
->rcu
, tcp_fastopen_ctx_free
);
94 static bool __tcp_fastopen_cookie_gen(struct net
*net
,
96 struct tcp_fastopen_cookie
*foc
)
98 struct tcp_fastopen_context
*ctx
;
102 ctx
= rcu_dereference(net
->ipv4
.tcp_fastopen_ctx
);
104 crypto_cipher_encrypt_one(ctx
->tfm
, foc
->val
, path
);
105 foc
->len
= TCP_FASTOPEN_COOKIE_SIZE
;
112 /* Generate the fastopen cookie by doing aes128 encryption on both
113 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
114 * addresses. For the longer IPv6 addresses use CBC-MAC.
116 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
118 static bool tcp_fastopen_cookie_gen(struct net
*net
,
119 struct request_sock
*req
,
121 struct tcp_fastopen_cookie
*foc
)
123 if (req
->rsk_ops
->family
== AF_INET
) {
124 const struct iphdr
*iph
= ip_hdr(syn
);
126 __be32 path
[4] = { iph
->saddr
, iph
->daddr
, 0, 0 };
127 return __tcp_fastopen_cookie_gen(net
, path
, foc
);
130 #if IS_ENABLED(CONFIG_IPV6)
131 if (req
->rsk_ops
->family
== AF_INET6
) {
132 const struct ipv6hdr
*ip6h
= ipv6_hdr(syn
);
133 struct tcp_fastopen_cookie tmp
;
135 if (__tcp_fastopen_cookie_gen(net
, &ip6h
->saddr
, &tmp
)) {
136 struct in6_addr
*buf
= &tmp
.addr
;
139 for (i
= 0; i
< 4; i
++)
140 buf
->s6_addr32
[i
] ^= ip6h
->daddr
.s6_addr32
[i
];
141 return __tcp_fastopen_cookie_gen(net
, buf
, foc
);
149 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
150 * queue this additional data / FIN.
152 void tcp_fastopen_add_skb(struct sock
*sk
, struct sk_buff
*skb
)
154 struct tcp_sock
*tp
= tcp_sk(sk
);
156 if (TCP_SKB_CB(skb
)->end_seq
== tp
->rcv_nxt
)
159 skb
= skb_clone(skb
, GFP_ATOMIC
);
164 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
165 * Hence, reset segs_in to 0 before calling tcp_segs_in()
166 * to avoid double counting. Also, tcp_segs_in() expects
167 * skb->len to include the tcp_hdrlen. Hence, it should
168 * be called before __skb_pull().
171 tcp_segs_in(tp
, skb
);
172 __skb_pull(skb
, tcp_hdrlen(skb
));
173 sk_forced_mem_schedule(sk
, skb
->truesize
);
174 skb_set_owner_r(skb
, sk
);
176 TCP_SKB_CB(skb
)->seq
++;
177 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_SYN
;
179 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
180 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
181 tp
->syn_data_acked
= 1;
183 /* u64_stats_update_begin(&tp->syncp) not needed here,
184 * as we certainly are not changing upper 32bit value (0)
186 tp
->bytes_received
= skb
->len
;
188 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
192 static struct sock
*tcp_fastopen_create_child(struct sock
*sk
,
194 struct request_sock
*req
)
197 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
201 req
->num_retrans
= 0;
202 req
->num_timeout
= 0;
205 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
,
210 spin_lock(&queue
->fastopenq
.lock
);
211 queue
->fastopenq
.qlen
++;
212 spin_unlock(&queue
->fastopenq
.lock
);
214 /* Initialize the child socket. Have to fix some values to take
215 * into account the child is a Fast Open socket and is created
216 * only out of the bits carried in the SYN packet.
220 tp
->fastopen_rsk
= req
;
221 tcp_rsk(req
)->tfo_listener
= true;
223 /* RFC1323: The window in SYN & SYN/ACK segments is never
224 * scaled. So correct it appropriately.
226 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
227 tp
->max_window
= tp
->snd_wnd
;
229 /* Activate the retrans timer so that SYNACK can be retransmitted.
230 * The request socket is not added to the ehash
231 * because it's been added to the accept queue directly.
233 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
234 TCP_TIMEOUT_INIT
, TCP_RTO_MAX
);
236 refcount_set(&req
->rsk_refcnt
, 2);
238 /* Now finish processing the fastopen child socket. */
239 tcp_init_transfer(child
, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB
);
241 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->seq
+ 1;
243 tcp_fastopen_add_skb(child
, skb
);
245 tcp_rsk(req
)->rcv_nxt
= tp
->rcv_nxt
;
246 tp
->rcv_wup
= tp
->rcv_nxt
;
247 /* tcp_conn_request() is sending the SYNACK,
248 * and queues the child into listener accept queue.
253 static bool tcp_fastopen_queue_check(struct sock
*sk
)
255 struct fastopen_queue
*fastopenq
;
257 /* Make sure the listener has enabled fastopen, and we don't
258 * exceed the max # of pending TFO requests allowed before trying
259 * to validating the cookie in order to avoid burning CPU cycles
262 * XXX (TFO) - The implication of checking the max_qlen before
263 * processing a cookie request is that clients can't differentiate
264 * between qlen overflow causing Fast Open to be disabled
265 * temporarily vs a server not supporting Fast Open at all.
267 fastopenq
= &inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
268 if (fastopenq
->max_qlen
== 0)
271 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
272 struct request_sock
*req1
;
273 spin_lock(&fastopenq
->lock
);
274 req1
= fastopenq
->rskq_rst_head
;
275 if (!req1
|| time_after(req1
->rsk_timer
.expires
, jiffies
)) {
276 __NET_INC_STATS(sock_net(sk
),
277 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
278 spin_unlock(&fastopenq
->lock
);
281 fastopenq
->rskq_rst_head
= req1
->dl_next
;
283 spin_unlock(&fastopenq
->lock
);
289 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
290 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
291 * cookie request (foc->len == 0).
293 struct sock
*tcp_try_fastopen(struct sock
*sk
, struct sk_buff
*skb
,
294 struct request_sock
*req
,
295 struct tcp_fastopen_cookie
*foc
)
297 bool syn_data
= TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1;
298 int tcp_fastopen
= sock_net(sk
)->ipv4
.sysctl_tcp_fastopen
;
299 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
302 if (foc
->len
== 0) /* Client requests a cookie */
303 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
305 if (!((tcp_fastopen
& TFO_SERVER_ENABLE
) &&
306 (syn_data
|| foc
->len
>= 0) &&
307 tcp_fastopen_queue_check(sk
))) {
312 if (syn_data
&& (tcp_fastopen
& TFO_SERVER_COOKIE_NOT_REQD
))
315 if (foc
->len
>= 0 && /* Client presents or requests a cookie */
316 tcp_fastopen_cookie_gen(sock_net(sk
), req
, skb
, &valid_foc
) &&
317 foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
&&
318 foc
->len
== valid_foc
.len
&&
319 !memcmp(foc
->val
, valid_foc
.val
, foc
->len
)) {
320 /* Cookie is valid. Create a (full) child socket to accept
321 * the data in SYN before returning a SYN-ACK to ack the
322 * data. If we fail to create the socket, fall back and
323 * ack the ISN only but includes the same cookie.
325 * Note: Data-less SYN with valid cookie is allowed to send
326 * data in SYN_RECV state.
329 child
= tcp_fastopen_create_child(sk
, skb
, req
);
332 NET_INC_STATS(sock_net(sk
),
333 LINUX_MIB_TCPFASTOPENPASSIVE
);
336 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
337 } else if (foc
->len
> 0) /* Client presents an invalid cookie */
338 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
340 valid_foc
.exp
= foc
->exp
;
345 bool tcp_fastopen_cookie_check(struct sock
*sk
, u16
*mss
,
346 struct tcp_fastopen_cookie
*cookie
)
348 unsigned long last_syn_loss
= 0;
351 tcp_fastopen_cache_get(sk
, mss
, cookie
, &syn_loss
, &last_syn_loss
);
353 /* Recurring FO SYN losses: no cookie or data in SYN */
355 time_before(jiffies
, last_syn_loss
+ (60*HZ
<< syn_loss
))) {
360 /* Firewall blackhole issue check */
361 if (tcp_fastopen_active_should_disable(sk
)) {
366 if (sock_net(sk
)->ipv4
.sysctl_tcp_fastopen
& TFO_CLIENT_NO_COOKIE
) {
370 return cookie
->len
> 0;
373 /* This function checks if we want to defer sending SYN until the first
374 * write(). We defer under the following conditions:
375 * 1. fastopen_connect sockopt is set
376 * 2. we have a valid cookie
377 * Return value: return true if we want to defer until application writes data
378 * return false if we want to send out SYN immediately
380 bool tcp_fastopen_defer_connect(struct sock
*sk
, int *err
)
382 struct tcp_fastopen_cookie cookie
= { .len
= 0 };
383 struct tcp_sock
*tp
= tcp_sk(sk
);
386 if (tp
->fastopen_connect
&& !tp
->fastopen_req
) {
387 if (tcp_fastopen_cookie_check(sk
, &mss
, &cookie
)) {
388 inet_sk(sk
)->defer_connect
= 1;
392 /* Alloc fastopen_req in order for FO option to be included
395 tp
->fastopen_req
= kzalloc(sizeof(*tp
->fastopen_req
),
397 if (tp
->fastopen_req
)
398 tp
->fastopen_req
->cookie
= cookie
;
404 EXPORT_SYMBOL(tcp_fastopen_defer_connect
);
407 * The following code block is to deal with middle box issues with TFO:
408 * Middlebox firewall issues can potentially cause server's data being
409 * blackholed after a successful 3WHS using TFO.
410 * The proposed solution is to disable active TFO globally under the
411 * following circumstances:
412 * 1. client side TFO socket receives out of order FIN
413 * 2. client side TFO socket receives out of order RST
414 * We disable active side TFO globally for 1hr at first. Then if it
415 * happens again, we disable it for 2h, then 4h, 8h, ...
416 * And we reset the timeout back to 1hr when we see a successful active
417 * TFO connection with data exchanges.
420 /* Disable active TFO and record current jiffies and
421 * tfo_active_disable_times
423 void tcp_fastopen_active_disable(struct sock
*sk
)
425 struct net
*net
= sock_net(sk
);
427 atomic_inc(&net
->ipv4
.tfo_active_disable_times
);
428 net
->ipv4
.tfo_active_disable_stamp
= jiffies
;
429 NET_INC_STATS(net
, LINUX_MIB_TCPFASTOPENBLACKHOLE
);
432 /* Calculate timeout for tfo active disable
433 * Return true if we are still in the active TFO disable period
434 * Return false if timeout already expired and we should use active TFO
436 bool tcp_fastopen_active_should_disable(struct sock
*sk
)
438 unsigned int tfo_bh_timeout
= sock_net(sk
)->ipv4
.sysctl_tcp_fastopen_blackhole_timeout
;
439 int tfo_da_times
= atomic_read(&sock_net(sk
)->ipv4
.tfo_active_disable_times
);
440 unsigned long timeout
;
446 /* Limit timout to max: 2^6 * initial timeout */
447 multiplier
= 1 << min(tfo_da_times
- 1, 6);
448 timeout
= multiplier
* tfo_bh_timeout
* HZ
;
449 if (time_before(jiffies
, sock_net(sk
)->ipv4
.tfo_active_disable_stamp
+ timeout
))
452 /* Mark check bit so we can check for successful active TFO
453 * condition and reset tfo_active_disable_times
455 tcp_sk(sk
)->syn_fastopen_ch
= 1;
459 /* Disable active TFO if FIN is the only packet in the ofo queue
460 * and no data is received.
461 * Also check if we can reset tfo_active_disable_times if data is
462 * received successfully on a marked active TFO sockets opened on
463 * a non-loopback interface
465 void tcp_fastopen_active_disable_ofo_check(struct sock
*sk
)
467 struct tcp_sock
*tp
= tcp_sk(sk
);
468 struct dst_entry
*dst
;
471 if (!tp
->syn_fastopen
)
474 if (!tp
->data_segs_in
) {
475 skb
= skb_rb_first(&tp
->out_of_order_queue
);
476 if (skb
&& !skb_rb_next(skb
)) {
477 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
) {
478 tcp_fastopen_active_disable(sk
);
482 } else if (tp
->syn_fastopen_ch
&&
483 atomic_read(&sock_net(sk
)->ipv4
.tfo_active_disable_times
)) {
484 dst
= sk_dst_get(sk
);
485 if (!(dst
&& dst
->dev
&& (dst
->dev
->flags
& IFF_LOOPBACK
)))
486 atomic_set(&sock_net(sk
)->ipv4
.tfo_active_disable_times
, 0);