]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv4/tcp_minisocks.c
net: rename sk_clone to sk_clone_lock
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / tcp_minisocks.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
32
33 int sysctl_tcp_abort_on_overflow __read_mostly;
34
35 struct inet_timewait_death_row tcp_death_row = {
36 .sysctl_max_tw_buckets = NR_FILE * 2,
37 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39 .hashinfo = &tcp_hashinfo,
40 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
41 (unsigned long)&tcp_death_row),
42 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
43 inet_twdr_twkill_work),
44 /* Short-time timewait calendar */
45
46 .twcal_hand = -1,
47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 (unsigned long)&tcp_death_row),
49 };
50 EXPORT_SYMBOL_GPL(tcp_death_row);
51
52 /* VJ's idea. Save last timestamp seen from this destination
53 * and hold it at least for normal timewait interval to use for duplicate
54 * segment detection in subsequent connections, before they enter synchronized
55 * state.
56 */
57
58 static int tcp_remember_stamp(struct sock *sk)
59 {
60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk);
62 struct inet_peer *peer;
63 bool release_it;
64
65 peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
66 if (peer) {
67 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
68 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
69 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
70 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
71 peer->tcp_ts = tp->rx_opt.ts_recent;
72 }
73 if (release_it)
74 inet_putpeer(peer);
75 return 1;
76 }
77
78 return 0;
79 }
80
81 static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82 {
83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer;
85
86 peer = twsk_getpeer(sk);
87 if (peer) {
88 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
89
90 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
91 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
92 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
93 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
94 peer->tcp_ts = tcptw->tw_ts_recent;
95 }
96 inet_putpeer(peer);
97 return 1;
98 }
99 return 0;
100 }
101
102 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
103 {
104 if (seq == s_win)
105 return 1;
106 if (after(end_seq, s_win) && before(seq, e_win))
107 return 1;
108 return seq == e_win && seq == end_seq;
109 }
110
111 /*
112 * * Main purpose of TIME-WAIT state is to close connection gracefully,
113 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
114 * (and, probably, tail of data) and one or more our ACKs are lost.
115 * * What is TIME-WAIT timeout? It is associated with maximal packet
116 * lifetime in the internet, which results in wrong conclusion, that
117 * it is set to catch "old duplicate segments" wandering out of their path.
118 * It is not quite correct. This timeout is calculated so that it exceeds
119 * maximal retransmission timeout enough to allow to lose one (or more)
120 * segments sent by peer and our ACKs. This time may be calculated from RTO.
121 * * When TIME-WAIT socket receives RST, it means that another end
122 * finally closed and we are allowed to kill TIME-WAIT too.
123 * * Second purpose of TIME-WAIT is catching old duplicate segments.
124 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
125 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
126 * * If we invented some more clever way to catch duplicates
127 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
128 *
129 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
130 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
131 * from the very beginning.
132 *
133 * NOTE. With recycling (and later with fin-wait-2) TW bucket
134 * is _not_ stateless. It means, that strictly speaking we must
135 * spinlock it. I do not want! Well, probability of misbehaviour
136 * is ridiculously low and, seems, we could use some mb() tricks
137 * to avoid misread sequence numbers, states etc. --ANK
138 */
139 enum tcp_tw_status
140 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
141 const struct tcphdr *th)
142 {
143 struct tcp_options_received tmp_opt;
144 const u8 *hash_location;
145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
146 int paws_reject = 0;
147
148 tmp_opt.saw_tstamp = 0;
149 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
150 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
151
152 if (tmp_opt.saw_tstamp) {
153 tmp_opt.ts_recent = tcptw->tw_ts_recent;
154 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
155 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
156 }
157 }
158
159 if (tw->tw_substate == TCP_FIN_WAIT2) {
160 /* Just repeat all the checks of tcp_rcv_state_process() */
161
162 /* Out of window, send ACK */
163 if (paws_reject ||
164 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
165 tcptw->tw_rcv_nxt,
166 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
167 return TCP_TW_ACK;
168
169 if (th->rst)
170 goto kill;
171
172 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
173 goto kill_with_rst;
174
175 /* Dup ACK? */
176 if (!th->ack ||
177 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
178 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
179 inet_twsk_put(tw);
180 return TCP_TW_SUCCESS;
181 }
182
183 /* New data or FIN. If new data arrive after half-duplex close,
184 * reset.
185 */
186 if (!th->fin ||
187 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
188 kill_with_rst:
189 inet_twsk_deschedule(tw, &tcp_death_row);
190 inet_twsk_put(tw);
191 return TCP_TW_RST;
192 }
193
194 /* FIN arrived, enter true time-wait state. */
195 tw->tw_substate = TCP_TIME_WAIT;
196 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
197 if (tmp_opt.saw_tstamp) {
198 tcptw->tw_ts_recent_stamp = get_seconds();
199 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
200 }
201
202 if (tcp_death_row.sysctl_tw_recycle &&
203 tcptw->tw_ts_recent_stamp &&
204 tcp_tw_remember_stamp(tw))
205 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
206 TCP_TIMEWAIT_LEN);
207 else
208 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
209 TCP_TIMEWAIT_LEN);
210 return TCP_TW_ACK;
211 }
212
213 /*
214 * Now real TIME-WAIT state.
215 *
216 * RFC 1122:
217 * "When a connection is [...] on TIME-WAIT state [...]
218 * [a TCP] MAY accept a new SYN from the remote TCP to
219 * reopen the connection directly, if it:
220 *
221 * (1) assigns its initial sequence number for the new
222 * connection to be larger than the largest sequence
223 * number it used on the previous connection incarnation,
224 * and
225 *
226 * (2) returns to TIME-WAIT state if the SYN turns out
227 * to be an old duplicate".
228 */
229
230 if (!paws_reject &&
231 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
232 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
233 /* In window segment, it may be only reset or bare ack. */
234
235 if (th->rst) {
236 /* This is TIME_WAIT assassination, in two flavors.
237 * Oh well... nobody has a sufficient solution to this
238 * protocol bug yet.
239 */
240 if (sysctl_tcp_rfc1337 == 0) {
241 kill:
242 inet_twsk_deschedule(tw, &tcp_death_row);
243 inet_twsk_put(tw);
244 return TCP_TW_SUCCESS;
245 }
246 }
247 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
248 TCP_TIMEWAIT_LEN);
249
250 if (tmp_opt.saw_tstamp) {
251 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
252 tcptw->tw_ts_recent_stamp = get_seconds();
253 }
254
255 inet_twsk_put(tw);
256 return TCP_TW_SUCCESS;
257 }
258
259 /* Out of window segment.
260
261 All the segments are ACKed immediately.
262
263 The only exception is new SYN. We accept it, if it is
264 not old duplicate and we are not in danger to be killed
265 by delayed old duplicates. RFC check is that it has
266 newer sequence number works at rates <40Mbit/sec.
267 However, if paws works, it is reliable AND even more,
268 we even may relax silly seq space cutoff.
269
270 RED-PEN: we violate main RFC requirement, if this SYN will appear
271 old duplicate (i.e. we receive RST in reply to SYN-ACK),
272 we must return socket to time-wait state. It is not good,
273 but not fatal yet.
274 */
275
276 if (th->syn && !th->rst && !th->ack && !paws_reject &&
277 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
278 (tmp_opt.saw_tstamp &&
279 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
280 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
281 if (isn == 0)
282 isn++;
283 TCP_SKB_CB(skb)->when = isn;
284 return TCP_TW_SYN;
285 }
286
287 if (paws_reject)
288 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
289
290 if (!th->rst) {
291 /* In this case we must reset the TIMEWAIT timer.
292 *
293 * If it is ACKless SYN it may be both old duplicate
294 * and new good SYN with random sequence number <rcv_nxt.
295 * Do not reschedule in the last case.
296 */
297 if (paws_reject || th->ack)
298 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
299 TCP_TIMEWAIT_LEN);
300
301 /* Send ACK. Note, we do not put the bucket,
302 * it will be released by caller.
303 */
304 return TCP_TW_ACK;
305 }
306 inet_twsk_put(tw);
307 return TCP_TW_SUCCESS;
308 }
309 EXPORT_SYMBOL(tcp_timewait_state_process);
310
311 /*
312 * Move a socket to time-wait or dead fin-wait-2 state.
313 */
314 void tcp_time_wait(struct sock *sk, int state, int timeo)
315 {
316 struct inet_timewait_sock *tw = NULL;
317 const struct inet_connection_sock *icsk = inet_csk(sk);
318 const struct tcp_sock *tp = tcp_sk(sk);
319 int recycle_ok = 0;
320
321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
322 recycle_ok = tcp_remember_stamp(sk);
323
324 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
325 tw = inet_twsk_alloc(sk, state);
326
327 if (tw != NULL) {
328 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
329 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
330
331 tw->tw_transparent = inet_sk(sk)->transparent;
332 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
333 tcptw->tw_rcv_nxt = tp->rcv_nxt;
334 tcptw->tw_snd_nxt = tp->snd_nxt;
335 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
336 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
337 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
338
339 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
340 if (tw->tw_family == PF_INET6) {
341 struct ipv6_pinfo *np = inet6_sk(sk);
342 struct inet6_timewait_sock *tw6;
343
344 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
345 tw6 = inet6_twsk((struct sock *)tw);
346 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
347 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
348 tw->tw_tclass = np->tclass;
349 tw->tw_ipv6only = np->ipv6only;
350 }
351 #endif
352
353 #ifdef CONFIG_TCP_MD5SIG
354 /*
355 * The timewait bucket does not have the key DB from the
356 * sock structure. We just make a quick copy of the
357 * md5 key being used (if indeed we are using one)
358 * so the timewait ack generating code has the key.
359 */
360 do {
361 struct tcp_md5sig_key *key;
362 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
363 tcptw->tw_md5_keylen = 0;
364 key = tp->af_specific->md5_lookup(sk, sk);
365 if (key != NULL) {
366 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
367 tcptw->tw_md5_keylen = key->keylen;
368 if (tcp_alloc_md5sig_pool(sk) == NULL)
369 BUG();
370 }
371 } while (0);
372 #endif
373
374 /* Linkage updates. */
375 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
376
377 /* Get the TIME_WAIT timeout firing. */
378 if (timeo < rto)
379 timeo = rto;
380
381 if (recycle_ok) {
382 tw->tw_timeout = rto;
383 } else {
384 tw->tw_timeout = TCP_TIMEWAIT_LEN;
385 if (state == TCP_TIME_WAIT)
386 timeo = TCP_TIMEWAIT_LEN;
387 }
388
389 inet_twsk_schedule(tw, &tcp_death_row, timeo,
390 TCP_TIMEWAIT_LEN);
391 inet_twsk_put(tw);
392 } else {
393 /* Sorry, if we're out of memory, just CLOSE this
394 * socket up. We've got bigger problems than
395 * non-graceful socket closings.
396 */
397 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
398 }
399
400 tcp_update_metrics(sk);
401 tcp_done(sk);
402 }
403
404 void tcp_twsk_destructor(struct sock *sk)
405 {
406 #ifdef CONFIG_TCP_MD5SIG
407 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
408 if (twsk->tw_md5_keylen)
409 tcp_free_md5sig_pool();
410 #endif
411 }
412 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
413
414 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
415 struct request_sock *req)
416 {
417 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
418 }
419
420 /* This is not only more efficient than what we used to do, it eliminates
421 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
422 *
423 * Actually, we could lots of memory writes here. tp of listening
424 * socket contains all necessary default parameters.
425 */
426 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
427 {
428 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
429
430 if (newsk != NULL) {
431 const struct inet_request_sock *ireq = inet_rsk(req);
432 struct tcp_request_sock *treq = tcp_rsk(req);
433 struct inet_connection_sock *newicsk = inet_csk(newsk);
434 struct tcp_sock *newtp = tcp_sk(newsk);
435 struct tcp_sock *oldtp = tcp_sk(sk);
436 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
437
438 /* TCP Cookie Transactions require space for the cookie pair,
439 * as it differs for each connection. There is no need to
440 * copy any s_data_payload stored at the original socket.
441 * Failure will prevent resuming the connection.
442 *
443 * Presumed copied, in order of appearance:
444 * cookie_in_always, cookie_out_never
445 */
446 if (oldcvp != NULL) {
447 struct tcp_cookie_values *newcvp =
448 kzalloc(sizeof(*newtp->cookie_values),
449 GFP_ATOMIC);
450
451 if (newcvp != NULL) {
452 kref_init(&newcvp->kref);
453 newcvp->cookie_desired =
454 oldcvp->cookie_desired;
455 newtp->cookie_values = newcvp;
456 } else {
457 /* Not Yet Implemented */
458 newtp->cookie_values = NULL;
459 }
460 }
461
462 /* Now setup tcp_sock */
463 newtp->pred_flags = 0;
464
465 newtp->rcv_wup = newtp->copied_seq =
466 newtp->rcv_nxt = treq->rcv_isn + 1;
467
468 newtp->snd_sml = newtp->snd_una =
469 newtp->snd_nxt = newtp->snd_up =
470 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
471
472 tcp_prequeue_init(newtp);
473
474 tcp_init_wl(newtp, treq->rcv_isn);
475
476 newtp->srtt = 0;
477 newtp->mdev = TCP_TIMEOUT_INIT;
478 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
479
480 newtp->packets_out = 0;
481 newtp->retrans_out = 0;
482 newtp->sacked_out = 0;
483 newtp->fackets_out = 0;
484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
485
486 /* So many TCP implementations out there (incorrectly) count the
487 * initial SYN frame in their delayed-ACK and congestion control
488 * algorithms that we must have the following bandaid to talk
489 * efficiently to them. -DaveM
490 */
491 newtp->snd_cwnd = TCP_INIT_CWND;
492 newtp->snd_cwnd_cnt = 0;
493 newtp->bytes_acked = 0;
494
495 newtp->frto_counter = 0;
496 newtp->frto_highmark = 0;
497
498 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
499
500 tcp_set_ca_state(newsk, TCP_CA_Open);
501 tcp_init_xmit_timers(newsk);
502 skb_queue_head_init(&newtp->out_of_order_queue);
503 newtp->write_seq = newtp->pushed_seq =
504 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
505
506 newtp->rx_opt.saw_tstamp = 0;
507
508 newtp->rx_opt.dsack = 0;
509 newtp->rx_opt.num_sacks = 0;
510
511 newtp->urg_data = 0;
512
513 if (sock_flag(newsk, SOCK_KEEPOPEN))
514 inet_csk_reset_keepalive_timer(newsk,
515 keepalive_time_when(newtp));
516
517 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
518 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
519 if (sysctl_tcp_fack)
520 tcp_enable_fack(newtp);
521 }
522 newtp->window_clamp = req->window_clamp;
523 newtp->rcv_ssthresh = req->rcv_wnd;
524 newtp->rcv_wnd = req->rcv_wnd;
525 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
526 if (newtp->rx_opt.wscale_ok) {
527 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
528 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
529 } else {
530 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
531 newtp->window_clamp = min(newtp->window_clamp, 65535U);
532 }
533 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
534 newtp->rx_opt.snd_wscale);
535 newtp->max_window = newtp->snd_wnd;
536
537 if (newtp->rx_opt.tstamp_ok) {
538 newtp->rx_opt.ts_recent = req->ts_recent;
539 newtp->rx_opt.ts_recent_stamp = get_seconds();
540 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
541 } else {
542 newtp->rx_opt.ts_recent_stamp = 0;
543 newtp->tcp_header_len = sizeof(struct tcphdr);
544 }
545 #ifdef CONFIG_TCP_MD5SIG
546 newtp->md5sig_info = NULL; /*XXX*/
547 if (newtp->af_specific->md5_lookup(sk, newsk))
548 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
549 #endif
550 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
551 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
552 newtp->rx_opt.mss_clamp = req->mss;
553 TCP_ECN_openreq_child(newtp, req);
554
555 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
556 }
557 return newsk;
558 }
559 EXPORT_SYMBOL(tcp_create_openreq_child);
560
561 /*
562 * Process an incoming packet for SYN_RECV sockets represented
563 * as a request_sock.
564 */
565
566 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
567 struct request_sock *req,
568 struct request_sock **prev)
569 {
570 struct tcp_options_received tmp_opt;
571 const u8 *hash_location;
572 struct sock *child;
573 const struct tcphdr *th = tcp_hdr(skb);
574 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
575 int paws_reject = 0;
576
577 tmp_opt.saw_tstamp = 0;
578 if (th->doff > (sizeof(struct tcphdr)>>2)) {
579 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
580
581 if (tmp_opt.saw_tstamp) {
582 tmp_opt.ts_recent = req->ts_recent;
583 /* We do not store true stamp, but it is not required,
584 * it can be estimated (approximately)
585 * from another data.
586 */
587 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
588 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
589 }
590 }
591
592 /* Check for pure retransmitted SYN. */
593 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
594 flg == TCP_FLAG_SYN &&
595 !paws_reject) {
596 /*
597 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
598 * this case on figure 6 and figure 8, but formal
599 * protocol description says NOTHING.
600 * To be more exact, it says that we should send ACK,
601 * because this segment (at least, if it has no data)
602 * is out of window.
603 *
604 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
605 * describe SYN-RECV state. All the description
606 * is wrong, we cannot believe to it and should
607 * rely only on common sense and implementation
608 * experience.
609 *
610 * Enforce "SYN-ACK" according to figure 8, figure 6
611 * of RFC793, fixed by RFC1122.
612 */
613 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
614 return NULL;
615 }
616
617 /* Further reproduces section "SEGMENT ARRIVES"
618 for state SYN-RECEIVED of RFC793.
619 It is broken, however, it does not work only
620 when SYNs are crossed.
621
622 You would think that SYN crossing is impossible here, since
623 we should have a SYN_SENT socket (from connect()) on our end,
624 but this is not true if the crossed SYNs were sent to both
625 ends by a malicious third party. We must defend against this,
626 and to do that we first verify the ACK (as per RFC793, page
627 36) and reset if it is invalid. Is this a true full defense?
628 To convince ourselves, let us consider a way in which the ACK
629 test can still pass in this 'malicious crossed SYNs' case.
630 Malicious sender sends identical SYNs (and thus identical sequence
631 numbers) to both A and B:
632
633 A: gets SYN, seq=7
634 B: gets SYN, seq=7
635
636 By our good fortune, both A and B select the same initial
637 send sequence number of seven :-)
638
639 A: sends SYN|ACK, seq=7, ack_seq=8
640 B: sends SYN|ACK, seq=7, ack_seq=8
641
642 So we are now A eating this SYN|ACK, ACK test passes. So
643 does sequence test, SYN is truncated, and thus we consider
644 it a bare ACK.
645
646 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
647 bare ACK. Otherwise, we create an established connection. Both
648 ends (listening sockets) accept the new incoming connection and try
649 to talk to each other. 8-)
650
651 Note: This case is both harmless, and rare. Possibility is about the
652 same as us discovering intelligent life on another plant tomorrow.
653
654 But generally, we should (RFC lies!) to accept ACK
655 from SYNACK both here and in tcp_rcv_state_process().
656 tcp_rcv_state_process() does not, hence, we do not too.
657
658 Note that the case is absolutely generic:
659 we cannot optimize anything here without
660 violating protocol. All the checks must be made
661 before attempt to create socket.
662 */
663
664 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
665 * and the incoming segment acknowledges something not yet
666 * sent (the segment carries an unacceptable ACK) ...
667 * a reset is sent."
668 *
669 * Invalid ACK: reset will be sent by listening socket
670 */
671 if ((flg & TCP_FLAG_ACK) &&
672 (TCP_SKB_CB(skb)->ack_seq !=
673 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
674 return sk;
675
676 /* Also, it would be not so bad idea to check rcv_tsecr, which
677 * is essentially ACK extension and too early or too late values
678 * should cause reset in unsynchronized states.
679 */
680
681 /* RFC793: "first check sequence number". */
682
683 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
684 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
685 /* Out of window: send ACK and drop. */
686 if (!(flg & TCP_FLAG_RST))
687 req->rsk_ops->send_ack(sk, skb, req);
688 if (paws_reject)
689 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
690 return NULL;
691 }
692
693 /* In sequence, PAWS is OK. */
694
695 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
696 req->ts_recent = tmp_opt.rcv_tsval;
697
698 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
699 /* Truncate SYN, it is out of window starting
700 at tcp_rsk(req)->rcv_isn + 1. */
701 flg &= ~TCP_FLAG_SYN;
702 }
703
704 /* RFC793: "second check the RST bit" and
705 * "fourth, check the SYN bit"
706 */
707 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
708 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
709 goto embryonic_reset;
710 }
711
712 /* ACK sequence verified above, just make sure ACK is
713 * set. If ACK not set, just silently drop the packet.
714 */
715 if (!(flg & TCP_FLAG_ACK))
716 return NULL;
717
718 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
719 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
720 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
721 inet_rsk(req)->acked = 1;
722 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
723 return NULL;
724 }
725 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
726 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
727 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
728 tcp_rsk(req)->snt_synack = 0;
729
730 /* OK, ACK is valid, create big socket and
731 * feed this segment to it. It will repeat all
732 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
733 * ESTABLISHED STATE. If it will be dropped after
734 * socket is created, wait for troubles.
735 */
736 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
737 if (child == NULL)
738 goto listen_overflow;
739
740 inet_csk_reqsk_queue_unlink(sk, req, prev);
741 inet_csk_reqsk_queue_removed(sk, req);
742
743 inet_csk_reqsk_queue_add(sk, req, child);
744 return child;
745
746 listen_overflow:
747 if (!sysctl_tcp_abort_on_overflow) {
748 inet_rsk(req)->acked = 1;
749 return NULL;
750 }
751
752 embryonic_reset:
753 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
754 if (!(flg & TCP_FLAG_RST))
755 req->rsk_ops->send_reset(sk, skb);
756
757 inet_csk_reqsk_queue_drop(sk, req, prev);
758 return NULL;
759 }
760 EXPORT_SYMBOL(tcp_check_req);
761
762 /*
763 * Queue segment on the new socket if the new socket is active,
764 * otherwise we just shortcircuit this and continue with
765 * the new socket.
766 */
767
768 int tcp_child_process(struct sock *parent, struct sock *child,
769 struct sk_buff *skb)
770 {
771 int ret = 0;
772 int state = child->sk_state;
773
774 if (!sock_owned_by_user(child)) {
775 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
776 skb->len);
777 /* Wakeup parent, send SIGIO */
778 if (state == TCP_SYN_RECV && child->sk_state != state)
779 parent->sk_data_ready(parent, 0);
780 } else {
781 /* Alas, it is possible again, because we do lookup
782 * in main socket hash table and lock on listening
783 * socket does not protect us more.
784 */
785 __sk_add_backlog(child, skb);
786 }
787
788 bh_unlock_sock(child);
789 sock_put(child);
790 return ret;
791 }
792 EXPORT_SYMBOL(tcp_child_process);