]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/tcp_minisocks.c
bridge: multicast router list manipulation
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_minisocks.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
32
33 int sysctl_tcp_abort_on_overflow __read_mostly;
34
35 struct inet_timewait_death_row tcp_death_row = {
36 .sysctl_max_tw_buckets = NR_FILE * 2,
37 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39 .hashinfo = &tcp_hashinfo,
40 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
41 (unsigned long)&tcp_death_row),
42 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
43 inet_twdr_twkill_work),
44 /* Short-time timewait calendar */
45
46 .twcal_hand = -1,
47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 (unsigned long)&tcp_death_row),
49 };
50
51 EXPORT_SYMBOL_GPL(tcp_death_row);
52
53 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
54 {
55 if (seq == s_win)
56 return 1;
57 if (after(end_seq, s_win) && before(seq, e_win))
58 return 1;
59 return (seq == e_win && seq == end_seq);
60 }
61
62 /*
63 * * Main purpose of TIME-WAIT state is to close connection gracefully,
64 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
65 * (and, probably, tail of data) and one or more our ACKs are lost.
66 * * What is TIME-WAIT timeout? It is associated with maximal packet
67 * lifetime in the internet, which results in wrong conclusion, that
68 * it is set to catch "old duplicate segments" wandering out of their path.
69 * It is not quite correct. This timeout is calculated so that it exceeds
70 * maximal retransmission timeout enough to allow to lose one (or more)
71 * segments sent by peer and our ACKs. This time may be calculated from RTO.
72 * * When TIME-WAIT socket receives RST, it means that another end
73 * finally closed and we are allowed to kill TIME-WAIT too.
74 * * Second purpose of TIME-WAIT is catching old duplicate segments.
75 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
76 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
77 * * If we invented some more clever way to catch duplicates
78 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
79 *
80 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
81 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
82 * from the very beginning.
83 *
84 * NOTE. With recycling (and later with fin-wait-2) TW bucket
85 * is _not_ stateless. It means, that strictly speaking we must
86 * spinlock it. I do not want! Well, probability of misbehaviour
87 * is ridiculously low and, seems, we could use some mb() tricks
88 * to avoid misread sequence numbers, states etc. --ANK
89 */
90 enum tcp_tw_status
91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 const struct tcphdr *th)
93 {
94 struct tcp_options_received tmp_opt;
95 u8 *hash_location;
96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97 int paws_reject = 0;
98
99 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
102
103 if (tmp_opt.saw_tstamp) {
104 tmp_opt.ts_recent = tcptw->tw_ts_recent;
105 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
106 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
107 }
108 }
109
110 if (tw->tw_substate == TCP_FIN_WAIT2) {
111 /* Just repeat all the checks of tcp_rcv_state_process() */
112
113 /* Out of window, send ACK */
114 if (paws_reject ||
115 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
116 tcptw->tw_rcv_nxt,
117 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
118 return TCP_TW_ACK;
119
120 if (th->rst)
121 goto kill;
122
123 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
124 goto kill_with_rst;
125
126 /* Dup ACK? */
127 if (!th->ack ||
128 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
129 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
130 inet_twsk_put(tw);
131 return TCP_TW_SUCCESS;
132 }
133
134 /* New data or FIN. If new data arrive after half-duplex close,
135 * reset.
136 */
137 if (!th->fin ||
138 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
139 kill_with_rst:
140 inet_twsk_deschedule(tw, &tcp_death_row);
141 inet_twsk_put(tw);
142 return TCP_TW_RST;
143 }
144
145 /* FIN arrived, enter true time-wait state. */
146 tw->tw_substate = TCP_TIME_WAIT;
147 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
148 if (tmp_opt.saw_tstamp) {
149 tcptw->tw_ts_recent_stamp = get_seconds();
150 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
151 }
152
153 /* I am shamed, but failed to make it more elegant.
154 * Yes, it is direct reference to IP, which is impossible
155 * to generalize to IPv6. Taking into account that IPv6
156 * do not understand recycling in any case, it not
157 * a big problem in practice. --ANK */
158 if (tw->tw_family == AF_INET &&
159 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
160 tcp_v4_tw_remember_stamp(tw))
161 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
162 TCP_TIMEWAIT_LEN);
163 else
164 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
165 TCP_TIMEWAIT_LEN);
166 return TCP_TW_ACK;
167 }
168
169 /*
170 * Now real TIME-WAIT state.
171 *
172 * RFC 1122:
173 * "When a connection is [...] on TIME-WAIT state [...]
174 * [a TCP] MAY accept a new SYN from the remote TCP to
175 * reopen the connection directly, if it:
176 *
177 * (1) assigns its initial sequence number for the new
178 * connection to be larger than the largest sequence
179 * number it used on the previous connection incarnation,
180 * and
181 *
182 * (2) returns to TIME-WAIT state if the SYN turns out
183 * to be an old duplicate".
184 */
185
186 if (!paws_reject &&
187 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
188 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
189 /* In window segment, it may be only reset or bare ack. */
190
191 if (th->rst) {
192 /* This is TIME_WAIT assassination, in two flavors.
193 * Oh well... nobody has a sufficient solution to this
194 * protocol bug yet.
195 */
196 if (sysctl_tcp_rfc1337 == 0) {
197 kill:
198 inet_twsk_deschedule(tw, &tcp_death_row);
199 inet_twsk_put(tw);
200 return TCP_TW_SUCCESS;
201 }
202 }
203 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
204 TCP_TIMEWAIT_LEN);
205
206 if (tmp_opt.saw_tstamp) {
207 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
208 tcptw->tw_ts_recent_stamp = get_seconds();
209 }
210
211 inet_twsk_put(tw);
212 return TCP_TW_SUCCESS;
213 }
214
215 /* Out of window segment.
216
217 All the segments are ACKed immediately.
218
219 The only exception is new SYN. We accept it, if it is
220 not old duplicate and we are not in danger to be killed
221 by delayed old duplicates. RFC check is that it has
222 newer sequence number works at rates <40Mbit/sec.
223 However, if paws works, it is reliable AND even more,
224 we even may relax silly seq space cutoff.
225
226 RED-PEN: we violate main RFC requirement, if this SYN will appear
227 old duplicate (i.e. we receive RST in reply to SYN-ACK),
228 we must return socket to time-wait state. It is not good,
229 but not fatal yet.
230 */
231
232 if (th->syn && !th->rst && !th->ack && !paws_reject &&
233 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
234 (tmp_opt.saw_tstamp &&
235 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
236 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
237 if (isn == 0)
238 isn++;
239 TCP_SKB_CB(skb)->when = isn;
240 return TCP_TW_SYN;
241 }
242
243 if (paws_reject)
244 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
245
246 if (!th->rst) {
247 /* In this case we must reset the TIMEWAIT timer.
248 *
249 * If it is ACKless SYN it may be both old duplicate
250 * and new good SYN with random sequence number <rcv_nxt.
251 * Do not reschedule in the last case.
252 */
253 if (paws_reject || th->ack)
254 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
255 TCP_TIMEWAIT_LEN);
256
257 /* Send ACK. Note, we do not put the bucket,
258 * it will be released by caller.
259 */
260 return TCP_TW_ACK;
261 }
262 inet_twsk_put(tw);
263 return TCP_TW_SUCCESS;
264 }
265
266 /*
267 * Move a socket to time-wait or dead fin-wait-2 state.
268 */
269 void tcp_time_wait(struct sock *sk, int state, int timeo)
270 {
271 struct inet_timewait_sock *tw = NULL;
272 const struct inet_connection_sock *icsk = inet_csk(sk);
273 const struct tcp_sock *tp = tcp_sk(sk);
274 int recycle_ok = 0;
275
276 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
277 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
278
279 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
280 tw = inet_twsk_alloc(sk, state);
281
282 if (tw != NULL) {
283 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
284 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
285
286 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
287 tcptw->tw_rcv_nxt = tp->rcv_nxt;
288 tcptw->tw_snd_nxt = tp->snd_nxt;
289 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
290 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
291 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
292
293 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
294 if (tw->tw_family == PF_INET6) {
295 struct ipv6_pinfo *np = inet6_sk(sk);
296 struct inet6_timewait_sock *tw6;
297
298 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
299 tw6 = inet6_twsk((struct sock *)tw);
300 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
301 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
302 tw->tw_ipv6only = np->ipv6only;
303 }
304 #endif
305
306 #ifdef CONFIG_TCP_MD5SIG
307 /*
308 * The timewait bucket does not have the key DB from the
309 * sock structure. We just make a quick copy of the
310 * md5 key being used (if indeed we are using one)
311 * so the timewait ack generating code has the key.
312 */
313 do {
314 struct tcp_md5sig_key *key;
315 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
316 tcptw->tw_md5_keylen = 0;
317 key = tp->af_specific->md5_lookup(sk, sk);
318 if (key != NULL) {
319 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
320 tcptw->tw_md5_keylen = key->keylen;
321 if (tcp_alloc_md5sig_pool(sk) == NULL)
322 BUG();
323 }
324 } while (0);
325 #endif
326
327 /* Linkage updates. */
328 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
329
330 /* Get the TIME_WAIT timeout firing. */
331 if (timeo < rto)
332 timeo = rto;
333
334 if (recycle_ok) {
335 tw->tw_timeout = rto;
336 } else {
337 tw->tw_timeout = TCP_TIMEWAIT_LEN;
338 if (state == TCP_TIME_WAIT)
339 timeo = TCP_TIMEWAIT_LEN;
340 }
341
342 inet_twsk_schedule(tw, &tcp_death_row, timeo,
343 TCP_TIMEWAIT_LEN);
344 inet_twsk_put(tw);
345 } else {
346 /* Sorry, if we're out of memory, just CLOSE this
347 * socket up. We've got bigger problems than
348 * non-graceful socket closings.
349 */
350 LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
351 }
352
353 tcp_update_metrics(sk);
354 tcp_done(sk);
355 }
356
357 void tcp_twsk_destructor(struct sock *sk)
358 {
359 #ifdef CONFIG_TCP_MD5SIG
360 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
361 if (twsk->tw_md5_keylen)
362 tcp_free_md5sig_pool();
363 #endif
364 }
365
366 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
367
368 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
369 struct request_sock *req)
370 {
371 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
372 }
373
374 /* This is not only more efficient than what we used to do, it eliminates
375 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
376 *
377 * Actually, we could lots of memory writes here. tp of listening
378 * socket contains all necessary default parameters.
379 */
380 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
381 {
382 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
383
384 if (newsk != NULL) {
385 const struct inet_request_sock *ireq = inet_rsk(req);
386 struct tcp_request_sock *treq = tcp_rsk(req);
387 struct inet_connection_sock *newicsk = inet_csk(newsk);
388 struct tcp_sock *newtp = tcp_sk(newsk);
389 struct tcp_sock *oldtp = tcp_sk(sk);
390 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
391
392 /* TCP Cookie Transactions require space for the cookie pair,
393 * as it differs for each connection. There is no need to
394 * copy any s_data_payload stored at the original socket.
395 * Failure will prevent resuming the connection.
396 *
397 * Presumed copied, in order of appearance:
398 * cookie_in_always, cookie_out_never
399 */
400 if (oldcvp != NULL) {
401 struct tcp_cookie_values *newcvp =
402 kzalloc(sizeof(*newtp->cookie_values),
403 GFP_ATOMIC);
404
405 if (newcvp != NULL) {
406 kref_init(&newcvp->kref);
407 newcvp->cookie_desired =
408 oldcvp->cookie_desired;
409 newtp->cookie_values = newcvp;
410 } else {
411 /* Not Yet Implemented */
412 newtp->cookie_values = NULL;
413 }
414 }
415
416 /* Now setup tcp_sock */
417 newtp->pred_flags = 0;
418
419 newtp->rcv_wup = newtp->copied_seq =
420 newtp->rcv_nxt = treq->rcv_isn + 1;
421
422 newtp->snd_sml = newtp->snd_una =
423 newtp->snd_nxt = newtp->snd_up =
424 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
425
426 tcp_prequeue_init(newtp);
427
428 tcp_init_wl(newtp, treq->rcv_isn);
429
430 newtp->srtt = 0;
431 newtp->mdev = TCP_TIMEOUT_INIT;
432 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
433
434 newtp->packets_out = 0;
435 newtp->retrans_out = 0;
436 newtp->sacked_out = 0;
437 newtp->fackets_out = 0;
438 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
439
440 /* So many TCP implementations out there (incorrectly) count the
441 * initial SYN frame in their delayed-ACK and congestion control
442 * algorithms that we must have the following bandaid to talk
443 * efficiently to them. -DaveM
444 */
445 newtp->snd_cwnd = 2;
446 newtp->snd_cwnd_cnt = 0;
447 newtp->bytes_acked = 0;
448
449 newtp->frto_counter = 0;
450 newtp->frto_highmark = 0;
451
452 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
453
454 tcp_set_ca_state(newsk, TCP_CA_Open);
455 tcp_init_xmit_timers(newsk);
456 skb_queue_head_init(&newtp->out_of_order_queue);
457 newtp->write_seq = newtp->pushed_seq =
458 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
459
460 newtp->rx_opt.saw_tstamp = 0;
461
462 newtp->rx_opt.dsack = 0;
463 newtp->rx_opt.num_sacks = 0;
464
465 newtp->urg_data = 0;
466
467 if (sock_flag(newsk, SOCK_KEEPOPEN))
468 inet_csk_reset_keepalive_timer(newsk,
469 keepalive_time_when(newtp));
470
471 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
472 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
473 if (sysctl_tcp_fack)
474 tcp_enable_fack(newtp);
475 }
476 newtp->window_clamp = req->window_clamp;
477 newtp->rcv_ssthresh = req->rcv_wnd;
478 newtp->rcv_wnd = req->rcv_wnd;
479 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
480 if (newtp->rx_opt.wscale_ok) {
481 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
482 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
483 } else {
484 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
485 newtp->window_clamp = min(newtp->window_clamp, 65535U);
486 }
487 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
488 newtp->rx_opt.snd_wscale);
489 newtp->max_window = newtp->snd_wnd;
490
491 if (newtp->rx_opt.tstamp_ok) {
492 newtp->rx_opt.ts_recent = req->ts_recent;
493 newtp->rx_opt.ts_recent_stamp = get_seconds();
494 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
495 } else {
496 newtp->rx_opt.ts_recent_stamp = 0;
497 newtp->tcp_header_len = sizeof(struct tcphdr);
498 }
499 #ifdef CONFIG_TCP_MD5SIG
500 newtp->md5sig_info = NULL; /*XXX*/
501 if (newtp->af_specific->md5_lookup(sk, newsk))
502 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
503 #endif
504 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
505 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
506 newtp->rx_opt.mss_clamp = req->mss;
507 TCP_ECN_openreq_child(newtp, req);
508
509 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
510 }
511 return newsk;
512 }
513
514 /*
515 * Process an incoming packet for SYN_RECV sockets represented
516 * as a request_sock.
517 */
518
519 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
520 struct request_sock *req,
521 struct request_sock **prev)
522 {
523 struct tcp_options_received tmp_opt;
524 u8 *hash_location;
525 struct sock *child;
526 const struct tcphdr *th = tcp_hdr(skb);
527 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
528 int paws_reject = 0;
529
530 tmp_opt.saw_tstamp = 0;
531 if (th->doff > (sizeof(struct tcphdr)>>2)) {
532 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
533
534 if (tmp_opt.saw_tstamp) {
535 tmp_opt.ts_recent = req->ts_recent;
536 /* We do not store true stamp, but it is not required,
537 * it can be estimated (approximately)
538 * from another data.
539 */
540 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
541 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
542 }
543 }
544
545 /* Check for pure retransmitted SYN. */
546 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
547 flg == TCP_FLAG_SYN &&
548 !paws_reject) {
549 /*
550 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
551 * this case on figure 6 and figure 8, but formal
552 * protocol description says NOTHING.
553 * To be more exact, it says that we should send ACK,
554 * because this segment (at least, if it has no data)
555 * is out of window.
556 *
557 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
558 * describe SYN-RECV state. All the description
559 * is wrong, we cannot believe to it and should
560 * rely only on common sense and implementation
561 * experience.
562 *
563 * Enforce "SYN-ACK" according to figure 8, figure 6
564 * of RFC793, fixed by RFC1122.
565 */
566 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
567 return NULL;
568 }
569
570 /* Further reproduces section "SEGMENT ARRIVES"
571 for state SYN-RECEIVED of RFC793.
572 It is broken, however, it does not work only
573 when SYNs are crossed.
574
575 You would think that SYN crossing is impossible here, since
576 we should have a SYN_SENT socket (from connect()) on our end,
577 but this is not true if the crossed SYNs were sent to both
578 ends by a malicious third party. We must defend against this,
579 and to do that we first verify the ACK (as per RFC793, page
580 36) and reset if it is invalid. Is this a true full defense?
581 To convince ourselves, let us consider a way in which the ACK
582 test can still pass in this 'malicious crossed SYNs' case.
583 Malicious sender sends identical SYNs (and thus identical sequence
584 numbers) to both A and B:
585
586 A: gets SYN, seq=7
587 B: gets SYN, seq=7
588
589 By our good fortune, both A and B select the same initial
590 send sequence number of seven :-)
591
592 A: sends SYN|ACK, seq=7, ack_seq=8
593 B: sends SYN|ACK, seq=7, ack_seq=8
594
595 So we are now A eating this SYN|ACK, ACK test passes. So
596 does sequence test, SYN is truncated, and thus we consider
597 it a bare ACK.
598
599 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
600 bare ACK. Otherwise, we create an established connection. Both
601 ends (listening sockets) accept the new incoming connection and try
602 to talk to each other. 8-)
603
604 Note: This case is both harmless, and rare. Possibility is about the
605 same as us discovering intelligent life on another plant tomorrow.
606
607 But generally, we should (RFC lies!) to accept ACK
608 from SYNACK both here and in tcp_rcv_state_process().
609 tcp_rcv_state_process() does not, hence, we do not too.
610
611 Note that the case is absolutely generic:
612 we cannot optimize anything here without
613 violating protocol. All the checks must be made
614 before attempt to create socket.
615 */
616
617 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
618 * and the incoming segment acknowledges something not yet
619 * sent (the segment carries an unacceptable ACK) ...
620 * a reset is sent."
621 *
622 * Invalid ACK: reset will be sent by listening socket
623 */
624 if ((flg & TCP_FLAG_ACK) &&
625 (TCP_SKB_CB(skb)->ack_seq !=
626 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
627 return sk;
628
629 /* Also, it would be not so bad idea to check rcv_tsecr, which
630 * is essentially ACK extension and too early or too late values
631 * should cause reset in unsynchronized states.
632 */
633
634 /* RFC793: "first check sequence number". */
635
636 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
637 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
638 /* Out of window: send ACK and drop. */
639 if (!(flg & TCP_FLAG_RST))
640 req->rsk_ops->send_ack(sk, skb, req);
641 if (paws_reject)
642 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
643 return NULL;
644 }
645
646 /* In sequence, PAWS is OK. */
647
648 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
649 req->ts_recent = tmp_opt.rcv_tsval;
650
651 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
652 /* Truncate SYN, it is out of window starting
653 at tcp_rsk(req)->rcv_isn + 1. */
654 flg &= ~TCP_FLAG_SYN;
655 }
656
657 /* RFC793: "second check the RST bit" and
658 * "fourth, check the SYN bit"
659 */
660 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
661 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
662 goto embryonic_reset;
663 }
664
665 /* ACK sequence verified above, just make sure ACK is
666 * set. If ACK not set, just silently drop the packet.
667 */
668 if (!(flg & TCP_FLAG_ACK))
669 return NULL;
670
671 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
672 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
673 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
674 inet_rsk(req)->acked = 1;
675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
676 return NULL;
677 }
678
679 /* OK, ACK is valid, create big socket and
680 * feed this segment to it. It will repeat all
681 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
682 * ESTABLISHED STATE. If it will be dropped after
683 * socket is created, wait for troubles.
684 */
685 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
686 if (child == NULL)
687 goto listen_overflow;
688
689 inet_csk_reqsk_queue_unlink(sk, req, prev);
690 inet_csk_reqsk_queue_removed(sk, req);
691
692 inet_csk_reqsk_queue_add(sk, req, child);
693 return child;
694
695 listen_overflow:
696 if (!sysctl_tcp_abort_on_overflow) {
697 inet_rsk(req)->acked = 1;
698 return NULL;
699 }
700
701 embryonic_reset:
702 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
703 if (!(flg & TCP_FLAG_RST))
704 req->rsk_ops->send_reset(sk, skb);
705
706 inet_csk_reqsk_queue_drop(sk, req, prev);
707 return NULL;
708 }
709
710 /*
711 * Queue segment on the new socket if the new socket is active,
712 * otherwise we just shortcircuit this and continue with
713 * the new socket.
714 */
715
716 int tcp_child_process(struct sock *parent, struct sock *child,
717 struct sk_buff *skb)
718 {
719 int ret = 0;
720 int state = child->sk_state;
721
722 if (!sock_owned_by_user(child)) {
723 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
724 skb->len);
725 /* Wakeup parent, send SIGIO */
726 if (state == TCP_SYN_RECV && child->sk_state != state)
727 parent->sk_data_ready(parent, 0);
728 } else {
729 /* Alas, it is possible again, because we do lookup
730 * in main socket hash table and lock on listening
731 * socket does not protect us more.
732 */
733 __sk_add_backlog(child, skb);
734 }
735
736 bh_unlock_sock(child);
737 sock_put(child);
738 return ret;
739 }
740
741 EXPORT_SYMBOL(tcp_check_req);
742 EXPORT_SYMBOL(tcp_child_process);
743 EXPORT_SYMBOL(tcp_create_openreq_child);
744 EXPORT_SYMBOL(tcp_timewait_state_process);