]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/ipv4/tcp_fastopen.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-focal-kernel.git] / net / ipv4 / tcp_fastopen.c
1 #include <linux/crypto.h>
2 #include <linux/err.h>
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/list.h>
6 #include <linux/tcp.h>
7 #include <linux/rcupdate.h>
8 #include <linux/rculist.h>
9 #include <net/inetpeer.h>
10 #include <net/tcp.h>
11
12 int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
13
14 struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
15
16 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
17
18 void tcp_fastopen_init_key_once(bool publish)
19 {
20 static u8 key[TCP_FASTOPEN_KEY_LENGTH];
21
22 /* tcp_fastopen_reset_cipher publishes the new context
23 * atomically, so we allow this race happening here.
24 *
25 * All call sites of tcp_fastopen_cookie_gen also check
26 * for a valid cookie, so this is an acceptable risk.
27 */
28 if (net_get_random_once(key, sizeof(key)) && publish)
29 tcp_fastopen_reset_cipher(key, sizeof(key));
30 }
31
32 static void tcp_fastopen_ctx_free(struct rcu_head *head)
33 {
34 struct tcp_fastopen_context *ctx =
35 container_of(head, struct tcp_fastopen_context, rcu);
36 crypto_free_cipher(ctx->tfm);
37 kfree(ctx);
38 }
39
40 int tcp_fastopen_reset_cipher(void *key, unsigned int len)
41 {
42 int err;
43 struct tcp_fastopen_context *ctx, *octx;
44
45 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
46 if (!ctx)
47 return -ENOMEM;
48 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
49
50 if (IS_ERR(ctx->tfm)) {
51 err = PTR_ERR(ctx->tfm);
52 error: kfree(ctx);
53 pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
54 return err;
55 }
56 err = crypto_cipher_setkey(ctx->tfm, key, len);
57 if (err) {
58 pr_err("TCP: TFO cipher key error: %d\n", err);
59 crypto_free_cipher(ctx->tfm);
60 goto error;
61 }
62 memcpy(ctx->key, key, len);
63
64 spin_lock(&tcp_fastopen_ctx_lock);
65
66 octx = rcu_dereference_protected(tcp_fastopen_ctx,
67 lockdep_is_held(&tcp_fastopen_ctx_lock));
68 rcu_assign_pointer(tcp_fastopen_ctx, ctx);
69 spin_unlock(&tcp_fastopen_ctx_lock);
70
71 if (octx)
72 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
73 return err;
74 }
75
76 static bool __tcp_fastopen_cookie_gen(const void *path,
77 struct tcp_fastopen_cookie *foc)
78 {
79 struct tcp_fastopen_context *ctx;
80 bool ok = false;
81
82 rcu_read_lock();
83 ctx = rcu_dereference(tcp_fastopen_ctx);
84 if (ctx) {
85 crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
86 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
87 ok = true;
88 }
89 rcu_read_unlock();
90 return ok;
91 }
92
93 /* Generate the fastopen cookie by doing aes128 encryption on both
94 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
95 * addresses. For the longer IPv6 addresses use CBC-MAC.
96 *
97 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
98 */
99 static bool tcp_fastopen_cookie_gen(struct request_sock *req,
100 struct sk_buff *syn,
101 struct tcp_fastopen_cookie *foc)
102 {
103 if (req->rsk_ops->family == AF_INET) {
104 const struct iphdr *iph = ip_hdr(syn);
105
106 __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
107 return __tcp_fastopen_cookie_gen(path, foc);
108 }
109
110 #if IS_ENABLED(CONFIG_IPV6)
111 if (req->rsk_ops->family == AF_INET6) {
112 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
113 struct tcp_fastopen_cookie tmp;
114
115 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
116 struct in6_addr *buf = &tmp.addr;
117 int i;
118
119 for (i = 0; i < 4; i++)
120 buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
121 return __tcp_fastopen_cookie_gen(buf, foc);
122 }
123 }
124 #endif
125 return false;
126 }
127
128
129 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
130 * queue this additional data / FIN.
131 */
132 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
133 {
134 struct tcp_sock *tp = tcp_sk(sk);
135
136 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
137 return;
138
139 skb = skb_clone(skb, GFP_ATOMIC);
140 if (!skb)
141 return;
142
143 skb_dst_drop(skb);
144 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
145 * Hence, reset segs_in to 0 before calling tcp_segs_in()
146 * to avoid double counting. Also, tcp_segs_in() expects
147 * skb->len to include the tcp_hdrlen. Hence, it should
148 * be called before __skb_pull().
149 */
150 tp->segs_in = 0;
151 tcp_segs_in(tp, skb);
152 __skb_pull(skb, tcp_hdrlen(skb));
153 sk_forced_mem_schedule(sk, skb->truesize);
154 skb_set_owner_r(skb, sk);
155
156 TCP_SKB_CB(skb)->seq++;
157 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
158
159 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
160 __skb_queue_tail(&sk->sk_receive_queue, skb);
161 tp->syn_data_acked = 1;
162
163 /* u64_stats_update_begin(&tp->syncp) not needed here,
164 * as we certainly are not changing upper 32bit value (0)
165 */
166 tp->bytes_received = skb->len;
167
168 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
169 tcp_fin(sk);
170 }
171
172 static struct sock *tcp_fastopen_create_child(struct sock *sk,
173 struct sk_buff *skb,
174 struct request_sock *req)
175 {
176 struct tcp_sock *tp;
177 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
178 struct sock *child;
179 bool own_req;
180
181 req->num_retrans = 0;
182 req->num_timeout = 0;
183 req->sk = NULL;
184
185 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
186 NULL, &own_req);
187 if (!child)
188 return NULL;
189
190 spin_lock(&queue->fastopenq.lock);
191 queue->fastopenq.qlen++;
192 spin_unlock(&queue->fastopenq.lock);
193
194 /* Initialize the child socket. Have to fix some values to take
195 * into account the child is a Fast Open socket and is created
196 * only out of the bits carried in the SYN packet.
197 */
198 tp = tcp_sk(child);
199
200 tp->fastopen_rsk = req;
201 tcp_rsk(req)->tfo_listener = true;
202
203 /* RFC1323: The window in SYN & SYN/ACK segments is never
204 * scaled. So correct it appropriately.
205 */
206 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
207 tp->max_window = tp->snd_wnd;
208
209 /* Activate the retrans timer so that SYNACK can be retransmitted.
210 * The request socket is not added to the ehash
211 * because it's been added to the accept queue directly.
212 */
213 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
214 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
215
216 refcount_set(&req->rsk_refcnt, 2);
217
218 /* Now finish processing the fastopen child socket. */
219 inet_csk(child)->icsk_af_ops->rebuild_header(child);
220 tcp_init_congestion_control(child);
221 tcp_mtup_init(child);
222 tcp_init_metrics(child);
223 tcp_call_bpf(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
224 tcp_init_buffer_space(child);
225
226 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
227
228 tcp_fastopen_add_skb(child, skb);
229
230 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
231 tp->rcv_wup = tp->rcv_nxt;
232 /* tcp_conn_request() is sending the SYNACK,
233 * and queues the child into listener accept queue.
234 */
235 return child;
236 }
237
238 static bool tcp_fastopen_queue_check(struct sock *sk)
239 {
240 struct fastopen_queue *fastopenq;
241
242 /* Make sure the listener has enabled fastopen, and we don't
243 * exceed the max # of pending TFO requests allowed before trying
244 * to validating the cookie in order to avoid burning CPU cycles
245 * unnecessarily.
246 *
247 * XXX (TFO) - The implication of checking the max_qlen before
248 * processing a cookie request is that clients can't differentiate
249 * between qlen overflow causing Fast Open to be disabled
250 * temporarily vs a server not supporting Fast Open at all.
251 */
252 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
253 if (fastopenq->max_qlen == 0)
254 return false;
255
256 if (fastopenq->qlen >= fastopenq->max_qlen) {
257 struct request_sock *req1;
258 spin_lock(&fastopenq->lock);
259 req1 = fastopenq->rskq_rst_head;
260 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
261 __NET_INC_STATS(sock_net(sk),
262 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
263 spin_unlock(&fastopenq->lock);
264 return false;
265 }
266 fastopenq->rskq_rst_head = req1->dl_next;
267 fastopenq->qlen--;
268 spin_unlock(&fastopenq->lock);
269 reqsk_put(req1);
270 }
271 return true;
272 }
273
274 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
275 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
276 * cookie request (foc->len == 0).
277 */
278 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
279 struct request_sock *req,
280 struct tcp_fastopen_cookie *foc)
281 {
282 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
283 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
284 struct sock *child;
285
286 if (foc->len == 0) /* Client requests a cookie */
287 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
288
289 if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
290 (syn_data || foc->len >= 0) &&
291 tcp_fastopen_queue_check(sk))) {
292 foc->len = -1;
293 return NULL;
294 }
295
296 if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
297 goto fastopen;
298
299 if (foc->len >= 0 && /* Client presents or requests a cookie */
300 tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
301 foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
302 foc->len == valid_foc.len &&
303 !memcmp(foc->val, valid_foc.val, foc->len)) {
304 /* Cookie is valid. Create a (full) child socket to accept
305 * the data in SYN before returning a SYN-ACK to ack the
306 * data. If we fail to create the socket, fall back and
307 * ack the ISN only but includes the same cookie.
308 *
309 * Note: Data-less SYN with valid cookie is allowed to send
310 * data in SYN_RECV state.
311 */
312 fastopen:
313 child = tcp_fastopen_create_child(sk, skb, req);
314 if (child) {
315 foc->len = -1;
316 NET_INC_STATS(sock_net(sk),
317 LINUX_MIB_TCPFASTOPENPASSIVE);
318 return child;
319 }
320 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
321 } else if (foc->len > 0) /* Client presents an invalid cookie */
322 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
323
324 valid_foc.exp = foc->exp;
325 *foc = valid_foc;
326 return NULL;
327 }
328
329 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
330 struct tcp_fastopen_cookie *cookie)
331 {
332 unsigned long last_syn_loss = 0;
333 int syn_loss = 0;
334
335 tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
336
337 /* Recurring FO SYN losses: no cookie or data in SYN */
338 if (syn_loss > 1 &&
339 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
340 cookie->len = -1;
341 return false;
342 }
343
344 /* Firewall blackhole issue check */
345 if (tcp_fastopen_active_should_disable(sk)) {
346 cookie->len = -1;
347 return false;
348 }
349
350 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) {
351 cookie->len = -1;
352 return true;
353 }
354 return cookie->len > 0;
355 }
356
357 /* This function checks if we want to defer sending SYN until the first
358 * write(). We defer under the following conditions:
359 * 1. fastopen_connect sockopt is set
360 * 2. we have a valid cookie
361 * Return value: return true if we want to defer until application writes data
362 * return false if we want to send out SYN immediately
363 */
364 bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
365 {
366 struct tcp_fastopen_cookie cookie = { .len = 0 };
367 struct tcp_sock *tp = tcp_sk(sk);
368 u16 mss;
369
370 if (tp->fastopen_connect && !tp->fastopen_req) {
371 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
372 inet_sk(sk)->defer_connect = 1;
373 return true;
374 }
375
376 /* Alloc fastopen_req in order for FO option to be included
377 * in SYN
378 */
379 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
380 sk->sk_allocation);
381 if (tp->fastopen_req)
382 tp->fastopen_req->cookie = cookie;
383 else
384 *err = -ENOBUFS;
385 }
386 return false;
387 }
388 EXPORT_SYMBOL(tcp_fastopen_defer_connect);
389
390 /*
391 * The following code block is to deal with middle box issues with TFO:
392 * Middlebox firewall issues can potentially cause server's data being
393 * blackholed after a successful 3WHS using TFO.
394 * The proposed solution is to disable active TFO globally under the
395 * following circumstances:
396 * 1. client side TFO socket receives out of order FIN
397 * 2. client side TFO socket receives out of order RST
398 * We disable active side TFO globally for 1hr at first. Then if it
399 * happens again, we disable it for 2h, then 4h, 8h, ...
400 * And we reset the timeout back to 1hr when we see a successful active
401 * TFO connection with data exchanges.
402 */
403
404 /* Default to 1hr */
405 unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly = 60 * 60;
406 static atomic_t tfo_active_disable_times __read_mostly = ATOMIC_INIT(0);
407 static unsigned long tfo_active_disable_stamp __read_mostly;
408
409 /* Disable active TFO and record current jiffies and
410 * tfo_active_disable_times
411 */
412 void tcp_fastopen_active_disable(struct sock *sk)
413 {
414 atomic_inc(&tfo_active_disable_times);
415 tfo_active_disable_stamp = jiffies;
416 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE);
417 }
418
419 /* Reset tfo_active_disable_times to 0 */
420 void tcp_fastopen_active_timeout_reset(void)
421 {
422 atomic_set(&tfo_active_disable_times, 0);
423 }
424
425 /* Calculate timeout for tfo active disable
426 * Return true if we are still in the active TFO disable period
427 * Return false if timeout already expired and we should use active TFO
428 */
429 bool tcp_fastopen_active_should_disable(struct sock *sk)
430 {
431 int tfo_da_times = atomic_read(&tfo_active_disable_times);
432 int multiplier;
433 unsigned long timeout;
434
435 if (!tfo_da_times)
436 return false;
437
438 /* Limit timout to max: 2^6 * initial timeout */
439 multiplier = 1 << min(tfo_da_times - 1, 6);
440 timeout = multiplier * sysctl_tcp_fastopen_blackhole_timeout * HZ;
441 if (time_before(jiffies, tfo_active_disable_stamp + timeout))
442 return true;
443
444 /* Mark check bit so we can check for successful active TFO
445 * condition and reset tfo_active_disable_times
446 */
447 tcp_sk(sk)->syn_fastopen_ch = 1;
448 return false;
449 }
450
451 /* Disable active TFO if FIN is the only packet in the ofo queue
452 * and no data is received.
453 * Also check if we can reset tfo_active_disable_times if data is
454 * received successfully on a marked active TFO sockets opened on
455 * a non-loopback interface
456 */
457 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
458 {
459 struct tcp_sock *tp = tcp_sk(sk);
460 struct rb_node *p;
461 struct sk_buff *skb;
462 struct dst_entry *dst;
463
464 if (!tp->syn_fastopen)
465 return;
466
467 if (!tp->data_segs_in) {
468 p = rb_first(&tp->out_of_order_queue);
469 if (p && !rb_next(p)) {
470 skb = rb_entry(p, struct sk_buff, rbnode);
471 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
472 tcp_fastopen_active_disable(sk);
473 return;
474 }
475 }
476 } else if (tp->syn_fastopen_ch &&
477 atomic_read(&tfo_active_disable_times)) {
478 dst = sk_dst_get(sk);
479 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
480 tcp_fastopen_active_timeout_reset();
481 dst_release(dst);
482 }
483 }