]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/tcp_minisocks.c
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_minisocks.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29
30 int sysctl_tcp_abort_on_overflow __read_mostly;
31
32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
33 {
34 if (seq == s_win)
35 return true;
36 if (after(end_seq, s_win) && before(seq, e_win))
37 return true;
38 return seq == e_win && seq == end_seq;
39 }
40
41 static enum tcp_tw_status
42 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 const struct sk_buff *skb, int mib_idx)
44 {
45 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46
47 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 &tcptw->tw_last_oow_ack_time)) {
49 /* Send ACK. Note, we do not put the bucket,
50 * it will be released by caller.
51 */
52 return TCP_TW_ACK;
53 }
54
55 /* We are rate-limiting, so just release the tw sock and drop skb. */
56 inet_twsk_put(tw);
57 return TCP_TW_SUCCESS;
58 }
59
60 /*
61 * * Main purpose of TIME-WAIT state is to close connection gracefully,
62 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63 * (and, probably, tail of data) and one or more our ACKs are lost.
64 * * What is TIME-WAIT timeout? It is associated with maximal packet
65 * lifetime in the internet, which results in wrong conclusion, that
66 * it is set to catch "old duplicate segments" wandering out of their path.
67 * It is not quite correct. This timeout is calculated so that it exceeds
68 * maximal retransmission timeout enough to allow to lose one (or more)
69 * segments sent by peer and our ACKs. This time may be calculated from RTO.
70 * * When TIME-WAIT socket receives RST, it means that another end
71 * finally closed and we are allowed to kill TIME-WAIT too.
72 * * Second purpose of TIME-WAIT is catching old duplicate segments.
73 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
74 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75 * * If we invented some more clever way to catch duplicates
76 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77 *
78 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80 * from the very beginning.
81 *
82 * NOTE. With recycling (and later with fin-wait-2) TW bucket
83 * is _not_ stateless. It means, that strictly speaking we must
84 * spinlock it. I do not want! Well, probability of misbehaviour
85 * is ridiculously low and, seems, we could use some mb() tricks
86 * to avoid misread sequence numbers, states etc. --ANK
87 *
88 * We don't need to initialize tmp_out.sack_ok as we don't use the results
89 */
90 enum tcp_tw_status
91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
93 {
94 struct tcp_options_received tmp_opt;
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 bool paws_reject = false;
97 struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row;
98
99 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 tcp_parse_options(skb, &tmp_opt, 0, NULL);
102
103 if (tmp_opt.saw_tstamp) {
104 if (tmp_opt.rcv_tsecr)
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109 }
110 }
111
112 if (tw->tw_substate == TCP_FIN_WAIT2) {
113 /* Just repeat all the checks of tcp_rcv_state_process() */
114
115 /* Out of window, send ACK */
116 if (paws_reject ||
117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118 tcptw->tw_rcv_nxt,
119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120 return tcp_timewait_check_oow_rate_limit(
121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122
123 if (th->rst)
124 goto kill;
125
126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127 return TCP_TW_RST;
128
129 /* Dup ACK? */
130 if (!th->ack ||
131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133 inet_twsk_put(tw);
134 return TCP_TW_SUCCESS;
135 }
136
137 /* New data or FIN. If new data arrive after half-duplex close,
138 * reset.
139 */
140 if (!th->fin ||
141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142 return TCP_TW_RST;
143
144 /* FIN arrived, enter true time-wait state. */
145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147 if (tmp_opt.saw_tstamp) {
148 tcptw->tw_ts_recent_stamp = get_seconds();
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 }
151
152 if (tcp_death_row->sysctl_tw_recycle &&
153 tcptw->tw_ts_recent_stamp &&
154 tcp_tw_remember_stamp(tw))
155 inet_twsk_reschedule(tw, tw->tw_timeout);
156 else
157 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
158 return TCP_TW_ACK;
159 }
160
161 /*
162 * Now real TIME-WAIT state.
163 *
164 * RFC 1122:
165 * "When a connection is [...] on TIME-WAIT state [...]
166 * [a TCP] MAY accept a new SYN from the remote TCP to
167 * reopen the connection directly, if it:
168 *
169 * (1) assigns its initial sequence number for the new
170 * connection to be larger than the largest sequence
171 * number it used on the previous connection incarnation,
172 * and
173 *
174 * (2) returns to TIME-WAIT state if the SYN turns out
175 * to be an old duplicate".
176 */
177
178 if (!paws_reject &&
179 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
180 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
181 /* In window segment, it may be only reset or bare ack. */
182
183 if (th->rst) {
184 /* This is TIME_WAIT assassination, in two flavors.
185 * Oh well... nobody has a sufficient solution to this
186 * protocol bug yet.
187 */
188 if (sysctl_tcp_rfc1337 == 0) {
189 kill:
190 inet_twsk_deschedule_put(tw);
191 return TCP_TW_SUCCESS;
192 }
193 }
194 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
195
196 if (tmp_opt.saw_tstamp) {
197 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
198 tcptw->tw_ts_recent_stamp = get_seconds();
199 }
200
201 inet_twsk_put(tw);
202 return TCP_TW_SUCCESS;
203 }
204
205 /* Out of window segment.
206
207 All the segments are ACKed immediately.
208
209 The only exception is new SYN. We accept it, if it is
210 not old duplicate and we are not in danger to be killed
211 by delayed old duplicates. RFC check is that it has
212 newer sequence number works at rates <40Mbit/sec.
213 However, if paws works, it is reliable AND even more,
214 we even may relax silly seq space cutoff.
215
216 RED-PEN: we violate main RFC requirement, if this SYN will appear
217 old duplicate (i.e. we receive RST in reply to SYN-ACK),
218 we must return socket to time-wait state. It is not good,
219 but not fatal yet.
220 */
221
222 if (th->syn && !th->rst && !th->ack && !paws_reject &&
223 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
224 (tmp_opt.saw_tstamp &&
225 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
226 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
227 if (isn == 0)
228 isn++;
229 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
230 return TCP_TW_SYN;
231 }
232
233 if (paws_reject)
234 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
235
236 if (!th->rst) {
237 /* In this case we must reset the TIMEWAIT timer.
238 *
239 * If it is ACKless SYN it may be both old duplicate
240 * and new good SYN with random sequence number <rcv_nxt.
241 * Do not reschedule in the last case.
242 */
243 if (paws_reject || th->ack)
244 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
245
246 return tcp_timewait_check_oow_rate_limit(
247 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
248 }
249 inet_twsk_put(tw);
250 return TCP_TW_SUCCESS;
251 }
252 EXPORT_SYMBOL(tcp_timewait_state_process);
253
254 /*
255 * Move a socket to time-wait or dead fin-wait-2 state.
256 */
257 void tcp_time_wait(struct sock *sk, int state, int timeo)
258 {
259 const struct inet_connection_sock *icsk = inet_csk(sk);
260 const struct tcp_sock *tp = tcp_sk(sk);
261 struct inet_timewait_sock *tw;
262 bool recycle_ok = false;
263 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
264
265 if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
266 recycle_ok = tcp_remember_stamp(sk);
267
268 tw = inet_twsk_alloc(sk, tcp_death_row, state);
269
270 if (tw) {
271 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
272 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
273 struct inet_sock *inet = inet_sk(sk);
274
275 tw->tw_transparent = inet->transparent;
276 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
277 tcptw->tw_rcv_nxt = tp->rcv_nxt;
278 tcptw->tw_snd_nxt = tp->snd_nxt;
279 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
280 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
281 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
282 tcptw->tw_ts_offset = tp->tsoffset;
283 tcptw->tw_last_oow_ack_time = 0;
284
285 #if IS_ENABLED(CONFIG_IPV6)
286 if (tw->tw_family == PF_INET6) {
287 struct ipv6_pinfo *np = inet6_sk(sk);
288
289 tw->tw_v6_daddr = sk->sk_v6_daddr;
290 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
291 tw->tw_tclass = np->tclass;
292 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
293 tw->tw_ipv6only = sk->sk_ipv6only;
294 }
295 #endif
296
297 #ifdef CONFIG_TCP_MD5SIG
298 /*
299 * The timewait bucket does not have the key DB from the
300 * sock structure. We just make a quick copy of the
301 * md5 key being used (if indeed we are using one)
302 * so the timewait ack generating code has the key.
303 */
304 do {
305 struct tcp_md5sig_key *key;
306 tcptw->tw_md5_key = NULL;
307 key = tp->af_specific->md5_lookup(sk, sk);
308 if (key) {
309 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
310 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
311 BUG();
312 }
313 } while (0);
314 #endif
315
316 /* Get the TIME_WAIT timeout firing. */
317 if (timeo < rto)
318 timeo = rto;
319
320 if (recycle_ok) {
321 tw->tw_timeout = rto;
322 } else {
323 tw->tw_timeout = TCP_TIMEWAIT_LEN;
324 if (state == TCP_TIME_WAIT)
325 timeo = TCP_TIMEWAIT_LEN;
326 }
327
328 inet_twsk_schedule(tw, timeo);
329 /* Linkage updates. */
330 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
331 inet_twsk_put(tw);
332 } else {
333 /* Sorry, if we're out of memory, just CLOSE this
334 * socket up. We've got bigger problems than
335 * non-graceful socket closings.
336 */
337 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
338 }
339
340 tcp_update_metrics(sk);
341 tcp_done(sk);
342 }
343
344 void tcp_twsk_destructor(struct sock *sk)
345 {
346 #ifdef CONFIG_TCP_MD5SIG
347 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
348
349 if (twsk->tw_md5_key)
350 kfree_rcu(twsk->tw_md5_key, rcu);
351 #endif
352 }
353 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
354
355 /* Warning : This function is called without sk_listener being locked.
356 * Be sure to read socket fields once, as their value could change under us.
357 */
358 void tcp_openreq_init_rwin(struct request_sock *req,
359 const struct sock *sk_listener,
360 const struct dst_entry *dst)
361 {
362 struct inet_request_sock *ireq = inet_rsk(req);
363 const struct tcp_sock *tp = tcp_sk(sk_listener);
364 int full_space = tcp_full_space(sk_listener);
365 u32 window_clamp;
366 __u8 rcv_wscale;
367 int mss;
368
369 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
370 window_clamp = READ_ONCE(tp->window_clamp);
371 /* Set this up on the first call only */
372 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
373
374 /* limit the window selection if the user enforce a smaller rx buffer */
375 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
376 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
377 req->rsk_window_clamp = full_space;
378
379 /* tcp_full_space because it is guaranteed to be the first packet */
380 tcp_select_initial_window(full_space,
381 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
382 &req->rsk_rcv_wnd,
383 &req->rsk_window_clamp,
384 ireq->wscale_ok,
385 &rcv_wscale,
386 dst_metric(dst, RTAX_INITRWND));
387 ireq->rcv_wscale = rcv_wscale;
388 }
389 EXPORT_SYMBOL(tcp_openreq_init_rwin);
390
391 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
392 const struct request_sock *req)
393 {
394 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
395 }
396
397 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
398 {
399 struct inet_connection_sock *icsk = inet_csk(sk);
400 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
401 bool ca_got_dst = false;
402
403 if (ca_key != TCP_CA_UNSPEC) {
404 const struct tcp_congestion_ops *ca;
405
406 rcu_read_lock();
407 ca = tcp_ca_find_key(ca_key);
408 if (likely(ca && try_module_get(ca->owner))) {
409 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
410 icsk->icsk_ca_ops = ca;
411 ca_got_dst = true;
412 }
413 rcu_read_unlock();
414 }
415
416 /* If no valid choice made yet, assign current system default ca. */
417 if (!ca_got_dst &&
418 (!icsk->icsk_ca_setsockopt ||
419 !try_module_get(icsk->icsk_ca_ops->owner)))
420 tcp_assign_congestion_control(sk);
421
422 tcp_set_ca_state(sk, TCP_CA_Open);
423 }
424 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
425
426 /* This is not only more efficient than what we used to do, it eliminates
427 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
428 *
429 * Actually, we could lots of memory writes here. tp of listening
430 * socket contains all necessary default parameters.
431 */
432 struct sock *tcp_create_openreq_child(const struct sock *sk,
433 struct request_sock *req,
434 struct sk_buff *skb)
435 {
436 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
437
438 if (newsk) {
439 const struct inet_request_sock *ireq = inet_rsk(req);
440 struct tcp_request_sock *treq = tcp_rsk(req);
441 struct inet_connection_sock *newicsk = inet_csk(newsk);
442 struct tcp_sock *newtp = tcp_sk(newsk);
443
444 /* Now setup tcp_sock */
445 newtp->pred_flags = 0;
446
447 newtp->rcv_wup = newtp->copied_seq =
448 newtp->rcv_nxt = treq->rcv_isn + 1;
449 newtp->segs_in = 1;
450
451 newtp->snd_sml = newtp->snd_una =
452 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
453
454 tcp_prequeue_init(newtp);
455 INIT_LIST_HEAD(&newtp->tsq_node);
456
457 tcp_init_wl(newtp, treq->rcv_isn);
458
459 newtp->srtt_us = 0;
460 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
461 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
462 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
463
464 newtp->packets_out = 0;
465 newtp->retrans_out = 0;
466 newtp->sacked_out = 0;
467 newtp->fackets_out = 0;
468 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
469 newtp->tlp_high_seq = 0;
470 newtp->lsndtime = treq->snt_synack.stamp_jiffies;
471 newsk->sk_txhash = treq->txhash;
472 newtp->last_oow_ack_time = 0;
473 newtp->total_retrans = req->num_retrans;
474
475 /* So many TCP implementations out there (incorrectly) count the
476 * initial SYN frame in their delayed-ACK and congestion control
477 * algorithms that we must have the following bandaid to talk
478 * efficiently to them. -DaveM
479 */
480 newtp->snd_cwnd = TCP_INIT_CWND;
481 newtp->snd_cwnd_cnt = 0;
482
483 /* There's a bubble in the pipe until at least the first ACK. */
484 newtp->app_limited = ~0U;
485
486 tcp_init_xmit_timers(newsk);
487 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
488
489 newtp->rx_opt.saw_tstamp = 0;
490
491 newtp->rx_opt.dsack = 0;
492 newtp->rx_opt.num_sacks = 0;
493
494 newtp->urg_data = 0;
495
496 if (sock_flag(newsk, SOCK_KEEPOPEN))
497 inet_csk_reset_keepalive_timer(newsk,
498 keepalive_time_when(newtp));
499
500 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
501 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
502 if (sysctl_tcp_fack)
503 tcp_enable_fack(newtp);
504 }
505 newtp->window_clamp = req->rsk_window_clamp;
506 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
507 newtp->rcv_wnd = req->rsk_rcv_wnd;
508 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
509 if (newtp->rx_opt.wscale_ok) {
510 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
511 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
512 } else {
513 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
514 newtp->window_clamp = min(newtp->window_clamp, 65535U);
515 }
516 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
517 newtp->rx_opt.snd_wscale);
518 newtp->max_window = newtp->snd_wnd;
519
520 if (newtp->rx_opt.tstamp_ok) {
521 newtp->rx_opt.ts_recent = req->ts_recent;
522 newtp->rx_opt.ts_recent_stamp = get_seconds();
523 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
524 } else {
525 newtp->rx_opt.ts_recent_stamp = 0;
526 newtp->tcp_header_len = sizeof(struct tcphdr);
527 }
528 newtp->tsoffset = treq->ts_off;
529 #ifdef CONFIG_TCP_MD5SIG
530 newtp->md5sig_info = NULL; /*XXX*/
531 if (newtp->af_specific->md5_lookup(sk, newsk))
532 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
533 #endif
534 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
535 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
536 newtp->rx_opt.mss_clamp = req->mss;
537 tcp_ecn_openreq_child(newtp, req);
538 newtp->fastopen_rsk = NULL;
539 newtp->syn_data_acked = 0;
540 newtp->rack.mstamp.v64 = 0;
541 newtp->rack.advanced = 0;
542
543 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
544 }
545 return newsk;
546 }
547 EXPORT_SYMBOL(tcp_create_openreq_child);
548
549 /*
550 * Process an incoming packet for SYN_RECV sockets represented as a
551 * request_sock. Normally sk is the listener socket but for TFO it
552 * points to the child socket.
553 *
554 * XXX (TFO) - The current impl contains a special check for ack
555 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
556 *
557 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
558 */
559
560 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
561 struct request_sock *req,
562 bool fastopen)
563 {
564 struct tcp_options_received tmp_opt;
565 struct sock *child;
566 const struct tcphdr *th = tcp_hdr(skb);
567 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
568 bool paws_reject = false;
569 bool own_req;
570
571 tmp_opt.saw_tstamp = 0;
572 if (th->doff > (sizeof(struct tcphdr)>>2)) {
573 tcp_parse_options(skb, &tmp_opt, 0, NULL);
574
575 if (tmp_opt.saw_tstamp) {
576 tmp_opt.ts_recent = req->ts_recent;
577 if (tmp_opt.rcv_tsecr)
578 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
579 /* We do not store true stamp, but it is not required,
580 * it can be estimated (approximately)
581 * from another data.
582 */
583 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
584 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
585 }
586 }
587
588 /* Check for pure retransmitted SYN. */
589 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
590 flg == TCP_FLAG_SYN &&
591 !paws_reject) {
592 /*
593 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
594 * this case on figure 6 and figure 8, but formal
595 * protocol description says NOTHING.
596 * To be more exact, it says that we should send ACK,
597 * because this segment (at least, if it has no data)
598 * is out of window.
599 *
600 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
601 * describe SYN-RECV state. All the description
602 * is wrong, we cannot believe to it and should
603 * rely only on common sense and implementation
604 * experience.
605 *
606 * Enforce "SYN-ACK" according to figure 8, figure 6
607 * of RFC793, fixed by RFC1122.
608 *
609 * Note that even if there is new data in the SYN packet
610 * they will be thrown away too.
611 *
612 * Reset timer after retransmitting SYNACK, similar to
613 * the idea of fast retransmit in recovery.
614 */
615 if (!tcp_oow_rate_limited(sock_net(sk), skb,
616 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
617 &tcp_rsk(req)->last_oow_ack_time) &&
618
619 !inet_rtx_syn_ack(sk, req)) {
620 unsigned long expires = jiffies;
621
622 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
623 TCP_RTO_MAX);
624 if (!fastopen)
625 mod_timer_pending(&req->rsk_timer, expires);
626 else
627 req->rsk_timer.expires = expires;
628 }
629 return NULL;
630 }
631
632 /* Further reproduces section "SEGMENT ARRIVES"
633 for state SYN-RECEIVED of RFC793.
634 It is broken, however, it does not work only
635 when SYNs are crossed.
636
637 You would think that SYN crossing is impossible here, since
638 we should have a SYN_SENT socket (from connect()) on our end,
639 but this is not true if the crossed SYNs were sent to both
640 ends by a malicious third party. We must defend against this,
641 and to do that we first verify the ACK (as per RFC793, page
642 36) and reset if it is invalid. Is this a true full defense?
643 To convince ourselves, let us consider a way in which the ACK
644 test can still pass in this 'malicious crossed SYNs' case.
645 Malicious sender sends identical SYNs (and thus identical sequence
646 numbers) to both A and B:
647
648 A: gets SYN, seq=7
649 B: gets SYN, seq=7
650
651 By our good fortune, both A and B select the same initial
652 send sequence number of seven :-)
653
654 A: sends SYN|ACK, seq=7, ack_seq=8
655 B: sends SYN|ACK, seq=7, ack_seq=8
656
657 So we are now A eating this SYN|ACK, ACK test passes. So
658 does sequence test, SYN is truncated, and thus we consider
659 it a bare ACK.
660
661 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
662 bare ACK. Otherwise, we create an established connection. Both
663 ends (listening sockets) accept the new incoming connection and try
664 to talk to each other. 8-)
665
666 Note: This case is both harmless, and rare. Possibility is about the
667 same as us discovering intelligent life on another plant tomorrow.
668
669 But generally, we should (RFC lies!) to accept ACK
670 from SYNACK both here and in tcp_rcv_state_process().
671 tcp_rcv_state_process() does not, hence, we do not too.
672
673 Note that the case is absolutely generic:
674 we cannot optimize anything here without
675 violating protocol. All the checks must be made
676 before attempt to create socket.
677 */
678
679 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
680 * and the incoming segment acknowledges something not yet
681 * sent (the segment carries an unacceptable ACK) ...
682 * a reset is sent."
683 *
684 * Invalid ACK: reset will be sent by listening socket.
685 * Note that the ACK validity check for a Fast Open socket is done
686 * elsewhere and is checked directly against the child socket rather
687 * than req because user data may have been sent out.
688 */
689 if ((flg & TCP_FLAG_ACK) && !fastopen &&
690 (TCP_SKB_CB(skb)->ack_seq !=
691 tcp_rsk(req)->snt_isn + 1))
692 return sk;
693
694 /* Also, it would be not so bad idea to check rcv_tsecr, which
695 * is essentially ACK extension and too early or too late values
696 * should cause reset in unsynchronized states.
697 */
698
699 /* RFC793: "first check sequence number". */
700
701 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
702 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
703 /* Out of window: send ACK and drop. */
704 if (!(flg & TCP_FLAG_RST) &&
705 !tcp_oow_rate_limited(sock_net(sk), skb,
706 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
707 &tcp_rsk(req)->last_oow_ack_time))
708 req->rsk_ops->send_ack(sk, skb, req);
709 if (paws_reject)
710 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
711 return NULL;
712 }
713
714 /* In sequence, PAWS is OK. */
715
716 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
717 req->ts_recent = tmp_opt.rcv_tsval;
718
719 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
720 /* Truncate SYN, it is out of window starting
721 at tcp_rsk(req)->rcv_isn + 1. */
722 flg &= ~TCP_FLAG_SYN;
723 }
724
725 /* RFC793: "second check the RST bit" and
726 * "fourth, check the SYN bit"
727 */
728 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
729 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
730 goto embryonic_reset;
731 }
732
733 /* ACK sequence verified above, just make sure ACK is
734 * set. If ACK not set, just silently drop the packet.
735 *
736 * XXX (TFO) - if we ever allow "data after SYN", the
737 * following check needs to be removed.
738 */
739 if (!(flg & TCP_FLAG_ACK))
740 return NULL;
741
742 /* For Fast Open no more processing is needed (sk is the
743 * child socket).
744 */
745 if (fastopen)
746 return sk;
747
748 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
749 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
750 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
751 inet_rsk(req)->acked = 1;
752 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
753 return NULL;
754 }
755
756 /* OK, ACK is valid, create big socket and
757 * feed this segment to it. It will repeat all
758 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
759 * ESTABLISHED STATE. If it will be dropped after
760 * socket is created, wait for troubles.
761 */
762 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
763 req, &own_req);
764 if (!child)
765 goto listen_overflow;
766
767 sock_rps_save_rxhash(child, skb);
768 tcp_synack_rtt_meas(child, req);
769 return inet_csk_complete_hashdance(sk, child, req, own_req);
770
771 listen_overflow:
772 if (!sysctl_tcp_abort_on_overflow) {
773 inet_rsk(req)->acked = 1;
774 return NULL;
775 }
776
777 embryonic_reset:
778 if (!(flg & TCP_FLAG_RST)) {
779 /* Received a bad SYN pkt - for TFO We try not to reset
780 * the local connection unless it's really necessary to
781 * avoid becoming vulnerable to outside attack aiming at
782 * resetting legit local connections.
783 */
784 req->rsk_ops->send_reset(sk, skb);
785 } else if (fastopen) { /* received a valid RST pkt */
786 reqsk_fastopen_remove(sk, req, true);
787 tcp_reset(sk);
788 }
789 if (!fastopen) {
790 inet_csk_reqsk_queue_drop(sk, req);
791 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
792 }
793 return NULL;
794 }
795 EXPORT_SYMBOL(tcp_check_req);
796
797 /*
798 * Queue segment on the new socket if the new socket is active,
799 * otherwise we just shortcircuit this and continue with
800 * the new socket.
801 *
802 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
803 * when entering. But other states are possible due to a race condition
804 * where after __inet_lookup_established() fails but before the listener
805 * locked is obtained, other packets cause the same connection to
806 * be created.
807 */
808
809 int tcp_child_process(struct sock *parent, struct sock *child,
810 struct sk_buff *skb)
811 {
812 int ret = 0;
813 int state = child->sk_state;
814
815 tcp_segs_in(tcp_sk(child), skb);
816 if (!sock_owned_by_user(child)) {
817 ret = tcp_rcv_state_process(child, skb);
818 /* Wakeup parent, send SIGIO */
819 if (state == TCP_SYN_RECV && child->sk_state != state)
820 parent->sk_data_ready(parent);
821 } else {
822 /* Alas, it is possible again, because we do lookup
823 * in main socket hash table and lock on listening
824 * socket does not protect us more.
825 */
826 __sk_add_backlog(child, skb);
827 }
828
829 bh_unlock_sock(child);
830 sock_put(child);
831 return ret;
832 }
833 EXPORT_SYMBOL(tcp_child_process);