]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/tcp_ipv4.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
149 struct flowi4 *fl4;
150 struct rtable *rt;
151 int err;
152 struct ip_options_rcu *inet_opt;
153
154 if (addr_len < sizeof(struct sockaddr_in))
155 return -EINVAL;
156
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
159
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
164 if (!daddr)
165 return -EINVAL;
166 nexthop = inet_opt->opt.faddr;
167 }
168
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true);
176 if (IS_ERR(rt)) {
177 err = PTR_ERR(rt);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 return err;
181 }
182
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 ip_rt_put(rt);
185 return -ENETUNREACH;
186 }
187
188 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = fl4->daddr;
190
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
194
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
200 tp->write_seq = 0;
201 }
202
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
206
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
209
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
211 if (inet_opt)
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
213
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
215
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
220 */
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
223 if (err)
224 goto failure;
225
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
231 goto failure;
232 }
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
236
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
241 usin->sin_port);
242
243 inet->inet_id = tp->write_seq ^ jiffies;
244
245 err = tcp_connect(sk);
246
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253 failure:
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
262 return err;
263 }
264 EXPORT_SYMBOL(tcp_v4_connect);
265
266 /*
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
270 */
271 static void tcp_v4_mtu_reduced(struct sock *sk)
272 {
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
284 dst = inet_csk_update_pmtu(sk, mtu);
285 if (!dst)
286 return;
287
288 /* Something is about to be wrong... Remember soft error
289 * for the case, if this connection will not able to recover.
290 */
291 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292 sk->sk_err_soft = EMSGSIZE;
293
294 mtu = dst_mtu(dst);
295
296 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
297 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
298 tcp_sync_mss(sk, mtu);
299
300 /* Resend the TCP packet because it's
301 * clear that the old packet has been
302 * dropped. This is the new "fast" path mtu
303 * discovery.
304 */
305 tcp_simple_retransmit(sk);
306 } /* else let the usual retransmit timer handle it */
307 }
308
309 static void do_redirect(struct sk_buff *skb, struct sock *sk)
310 {
311 struct dst_entry *dst = __sk_dst_check(sk, 0);
312
313 if (dst)
314 dst->ops->redirect(dst, sk, skb);
315 }
316
317 /*
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
324 *
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
330 *
331 */
332
333 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
334 {
335 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
336 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
337 struct inet_connection_sock *icsk;
338 struct tcp_sock *tp;
339 struct inet_sock *inet;
340 const int type = icmp_hdr(icmp_skb)->type;
341 const int code = icmp_hdr(icmp_skb)->code;
342 struct sock *sk;
343 struct sk_buff *skb;
344 struct request_sock *req;
345 __u32 seq;
346 __u32 remaining;
347 int err;
348 struct net *net = dev_net(icmp_skb->dev);
349
350 if (icmp_skb->len < (iph->ihl << 2) + 8) {
351 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
352 return;
353 }
354
355 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
356 iph->saddr, th->source, inet_iif(icmp_skb));
357 if (!sk) {
358 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
359 return;
360 }
361 if (sk->sk_state == TCP_TIME_WAIT) {
362 inet_twsk_put(inet_twsk(sk));
363 return;
364 }
365
366 bh_lock_sock(sk);
367 /* If too many ICMPs get dropped on busy
368 * servers this needs to be solved differently.
369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held.
371 */
372 if (sock_owned_by_user(sk)) {
373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 }
376 if (sk->sk_state == TCP_CLOSE)
377 goto out;
378
379 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
380 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 goto out;
382 }
383
384 icsk = inet_csk(sk);
385 tp = tcp_sk(sk);
386 req = tp->fastopen_rsk;
387 seq = ntohl(th->seq);
388 if (sk->sk_state != TCP_LISTEN &&
389 !between(seq, tp->snd_una, tp->snd_nxt) &&
390 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
391 /* For a Fast Open socket, allow seq to be snt_isn. */
392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
393 goto out;
394 }
395
396 switch (type) {
397 case ICMP_REDIRECT:
398 do_redirect(icmp_skb, sk);
399 goto out;
400 case ICMP_SOURCE_QUENCH:
401 /* Just silently ignore these. */
402 goto out;
403 case ICMP_PARAMETERPROB:
404 err = EPROTO;
405 break;
406 case ICMP_DEST_UNREACH:
407 if (code > NR_ICMP_UNREACH)
408 goto out;
409
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
411 tp->mtu_info = info;
412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk);
414 } else {
415 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
416 sock_hold(sk);
417 }
418 goto out;
419 }
420
421 err = icmp_err_convert[code].errno;
422 /* check if icmp_skb allows revert of backoff
423 * (see draft-zimmermann-tcp-lcd) */
424 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425 break;
426 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
427 !icsk->icsk_backoff)
428 break;
429
430 /* XXX (TFO) - revisit the following logic for TFO */
431
432 if (sock_owned_by_user(sk))
433 break;
434
435 icsk->icsk_backoff--;
436 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
438 tcp_bound_rto(sk);
439
440 skb = tcp_write_queue_head(sk);
441 BUG_ON(!skb);
442
443 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444 tcp_time_stamp - TCP_SKB_CB(skb)->when);
445
446 if (remaining) {
447 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448 remaining, TCP_RTO_MAX);
449 } else {
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk);
453 }
454
455 break;
456 case ICMP_TIME_EXCEEDED:
457 err = EHOSTUNREACH;
458 break;
459 default:
460 goto out;
461 }
462
463 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464 * than following the TCP_SYN_RECV case and closing the socket,
465 * we ignore the ICMP error and keep trying like a fully established
466 * socket. Is this the right thing to do?
467 */
468 if (req && req->sk == NULL)
469 goto out;
470
471 switch (sk->sk_state) {
472 struct request_sock *req, **prev;
473 case TCP_LISTEN:
474 if (sock_owned_by_user(sk))
475 goto out;
476
477 req = inet_csk_search_req(sk, &prev, th->dest,
478 iph->daddr, iph->saddr);
479 if (!req)
480 goto out;
481
482 /* ICMPs are not backlogged, hence we cannot get
483 an established socket here.
484 */
485 WARN_ON(req->sk);
486
487 if (seq != tcp_rsk(req)->snt_isn) {
488 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
489 goto out;
490 }
491
492 /*
493 * Still in SYN_RECV, just remove it silently.
494 * There is no good way to pass the error to the newly
495 * created socket, and POSIX does not want network
496 * errors returned from accept().
497 */
498 inet_csk_reqsk_queue_drop(sk, req, prev);
499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
500 goto out;
501
502 case TCP_SYN_SENT:
503 case TCP_SYN_RECV: /* Cannot happen.
504 It can f.e. if SYNs crossed,
505 or Fast Open.
506 */
507 if (!sock_owned_by_user(sk)) {
508 sk->sk_err = err;
509
510 sk->sk_error_report(sk);
511
512 tcp_done(sk);
513 } else {
514 sk->sk_err_soft = err;
515 }
516 goto out;
517 }
518
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
521 *
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
525 *
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
530 *
531 * Now we are in compliance with RFCs.
532 * --ANK (980905)
533 */
534
535 inet = inet_sk(sk);
536 if (!sock_owned_by_user(sk) && inet->recverr) {
537 sk->sk_err = err;
538 sk->sk_error_report(sk);
539 } else { /* Only an error on timeout */
540 sk->sk_err_soft = err;
541 }
542
543 out:
544 bh_unlock_sock(sk);
545 sock_put(sk);
546 }
547
548 static void __tcp_v4_send_check(struct sk_buff *skb,
549 __be32 saddr, __be32 daddr)
550 {
551 struct tcphdr *th = tcp_hdr(skb);
552
553 if (skb->ip_summed == CHECKSUM_PARTIAL) {
554 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
555 skb->csum_start = skb_transport_header(skb) - skb->head;
556 skb->csum_offset = offsetof(struct tcphdr, check);
557 } else {
558 th->check = tcp_v4_check(skb->len, saddr, daddr,
559 csum_partial(th,
560 th->doff << 2,
561 skb->csum));
562 }
563 }
564
565 /* This routine computes an IPv4 TCP checksum. */
566 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
567 {
568 const struct inet_sock *inet = inet_sk(sk);
569
570 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
571 }
572 EXPORT_SYMBOL(tcp_v4_send_check);
573
574 int tcp_v4_gso_send_check(struct sk_buff *skb)
575 {
576 const struct iphdr *iph;
577 struct tcphdr *th;
578
579 if (!pskb_may_pull(skb, sizeof(*th)))
580 return -EINVAL;
581
582 iph = ip_hdr(skb);
583 th = tcp_hdr(skb);
584
585 th->check = 0;
586 skb->ip_summed = CHECKSUM_PARTIAL;
587 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
588 return 0;
589 }
590
591 /*
592 * This routine will send an RST to the other tcp.
593 *
594 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
595 * for reset.
596 * Answer: if a packet caused RST, it is not for a socket
597 * existing in our system, if it is matched to a socket,
598 * it is just duplicate segment or bug in other side's TCP.
599 * So that we build reply only basing on parameters
600 * arrived with segment.
601 * Exception: precedence violation. We do not implement it in any case.
602 */
603
604 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
605 {
606 const struct tcphdr *th = tcp_hdr(skb);
607 struct {
608 struct tcphdr th;
609 #ifdef CONFIG_TCP_MD5SIG
610 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
611 #endif
612 } rep;
613 struct ip_reply_arg arg;
614 #ifdef CONFIG_TCP_MD5SIG
615 struct tcp_md5sig_key *key;
616 const __u8 *hash_location = NULL;
617 unsigned char newhash[16];
618 int genhash;
619 struct sock *sk1 = NULL;
620 #endif
621 struct net *net;
622
623 /* Never send a reset in response to a reset. */
624 if (th->rst)
625 return;
626
627 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
628 return;
629
630 /* Swap the send and the receive. */
631 memset(&rep, 0, sizeof(rep));
632 rep.th.dest = th->source;
633 rep.th.source = th->dest;
634 rep.th.doff = sizeof(struct tcphdr) / 4;
635 rep.th.rst = 1;
636
637 if (th->ack) {
638 rep.th.seq = th->ack_seq;
639 } else {
640 rep.th.ack = 1;
641 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
642 skb->len - (th->doff << 2));
643 }
644
645 memset(&arg, 0, sizeof(arg));
646 arg.iov[0].iov_base = (unsigned char *)&rep;
647 arg.iov[0].iov_len = sizeof(rep.th);
648
649 #ifdef CONFIG_TCP_MD5SIG
650 hash_location = tcp_parse_md5sig_option(th);
651 if (!sk && hash_location) {
652 /*
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
658 */
659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660 &tcp_hashinfo, ip_hdr(skb)->saddr,
661 th->source, ip_hdr(skb)->daddr,
662 ntohs(th->source), inet_iif(skb));
663 /* don't send rst if it can't find key */
664 if (!sk1)
665 return;
666 rcu_read_lock();
667 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
668 &ip_hdr(skb)->saddr, AF_INET);
669 if (!key)
670 goto release_sk1;
671
672 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
673 if (genhash || memcmp(hash_location, newhash, 16) != 0)
674 goto release_sk1;
675 } else {
676 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
677 &ip_hdr(skb)->saddr,
678 AF_INET) : NULL;
679 }
680
681 if (key) {
682 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
683 (TCPOPT_NOP << 16) |
684 (TCPOPT_MD5SIG << 8) |
685 TCPOLEN_MD5SIG);
686 /* Update length and the length the header thinks exists */
687 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
688 rep.th.doff = arg.iov[0].iov_len / 4;
689
690 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
691 key, ip_hdr(skb)->saddr,
692 ip_hdr(skb)->daddr, &rep.th);
693 }
694 #endif
695 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
696 ip_hdr(skb)->saddr, /* XXX */
697 arg.iov[0].iov_len, IPPROTO_TCP, 0);
698 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
699 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700 /* When socket is gone, all binding information is lost.
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
703 */
704 if (sk)
705 arg.bound_dev_if = sk->sk_bound_dev_if;
706
707 net = dev_net(skb_dst(skb)->dev);
708 arg.tos = ip_hdr(skb)->tos;
709 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
710 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
711
712 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
714
715 #ifdef CONFIG_TCP_MD5SIG
716 release_sk1:
717 if (sk1) {
718 rcu_read_unlock();
719 sock_put(sk1);
720 }
721 #endif
722 }
723
724 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
725 outside socket context is ugly, certainly. What can I do?
726 */
727
728 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
729 u32 win, u32 tsval, u32 tsecr, int oif,
730 struct tcp_md5sig_key *key,
731 int reply_flags, u8 tos)
732 {
733 const struct tcphdr *th = tcp_hdr(skb);
734 struct {
735 struct tcphdr th;
736 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
737 #ifdef CONFIG_TCP_MD5SIG
738 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
739 #endif
740 ];
741 } rep;
742 struct ip_reply_arg arg;
743 struct net *net = dev_net(skb_dst(skb)->dev);
744
745 memset(&rep.th, 0, sizeof(struct tcphdr));
746 memset(&arg, 0, sizeof(arg));
747
748 arg.iov[0].iov_base = (unsigned char *)&rep;
749 arg.iov[0].iov_len = sizeof(rep.th);
750 if (tsecr) {
751 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
752 (TCPOPT_TIMESTAMP << 8) |
753 TCPOLEN_TIMESTAMP);
754 rep.opt[1] = htonl(tsval);
755 rep.opt[2] = htonl(tsecr);
756 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
757 }
758
759 /* Swap the send and the receive. */
760 rep.th.dest = th->source;
761 rep.th.source = th->dest;
762 rep.th.doff = arg.iov[0].iov_len / 4;
763 rep.th.seq = htonl(seq);
764 rep.th.ack_seq = htonl(ack);
765 rep.th.ack = 1;
766 rep.th.window = htons(win);
767
768 #ifdef CONFIG_TCP_MD5SIG
769 if (key) {
770 int offset = (tsecr) ? 3 : 0;
771
772 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
773 (TCPOPT_NOP << 16) |
774 (TCPOPT_MD5SIG << 8) |
775 TCPOLEN_MD5SIG);
776 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
777 rep.th.doff = arg.iov[0].iov_len/4;
778
779 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
780 key, ip_hdr(skb)->saddr,
781 ip_hdr(skb)->daddr, &rep.th);
782 }
783 #endif
784 arg.flags = reply_flags;
785 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
786 ip_hdr(skb)->saddr, /* XXX */
787 arg.iov[0].iov_len, IPPROTO_TCP, 0);
788 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
789 if (oif)
790 arg.bound_dev_if = oif;
791 arg.tos = tos;
792 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
793 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
794
795 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
796 }
797
798 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
799 {
800 struct inet_timewait_sock *tw = inet_twsk(sk);
801 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
802
803 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
804 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805 tcp_time_stamp + tcptw->tw_ts_offset,
806 tcptw->tw_ts_recent,
807 tw->tw_bound_dev_if,
808 tcp_twsk_md5_key(tcptw),
809 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
810 tw->tw_tos
811 );
812
813 inet_twsk_put(tw);
814 }
815
816 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
817 struct request_sock *req)
818 {
819 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
820 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
821 */
822 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
823 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
824 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825 tcp_time_stamp,
826 req->ts_recent,
827 0,
828 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
829 AF_INET),
830 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
831 ip_hdr(skb)->tos);
832 }
833
834 /*
835 * Send a SYN-ACK after having received a SYN.
836 * This still operates on a request_sock only, not on a big
837 * socket.
838 */
839 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 struct request_sock *req,
841 struct request_values *rvp,
842 u16 queue_mapping,
843 bool nocache)
844 {
845 const struct inet_request_sock *ireq = inet_rsk(req);
846 struct flowi4 fl4;
847 int err = -1;
848 struct sk_buff * skb;
849
850 /* First, grab a route. */
851 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
852 return -1;
853
854 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
855
856 if (skb) {
857 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
858
859 skb_set_queue_mapping(skb, queue_mapping);
860 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
861 ireq->rmt_addr,
862 ireq->opt);
863 err = net_xmit_eval(err);
864 if (!tcp_rsk(req)->snt_synack && !err)
865 tcp_rsk(req)->snt_synack = tcp_time_stamp;
866 }
867
868 return err;
869 }
870
871 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
872 struct request_values *rvp)
873 {
874 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
875
876 if (!res)
877 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
878 return res;
879 }
880
881 /*
882 * IPv4 request_sock destructor.
883 */
884 static void tcp_v4_reqsk_destructor(struct request_sock *req)
885 {
886 kfree(inet_rsk(req)->opt);
887 }
888
889 /*
890 * Return true if a syncookie should be sent
891 */
892 bool tcp_syn_flood_action(struct sock *sk,
893 const struct sk_buff *skb,
894 const char *proto)
895 {
896 const char *msg = "Dropping request";
897 bool want_cookie = false;
898 struct listen_sock *lopt;
899
900
901
902 #ifdef CONFIG_SYN_COOKIES
903 if (sysctl_tcp_syncookies) {
904 msg = "Sending cookies";
905 want_cookie = true;
906 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
907 } else
908 #endif
909 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
910
911 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
912 if (!lopt->synflood_warned) {
913 lopt->synflood_warned = 1;
914 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
915 proto, ntohs(tcp_hdr(skb)->dest), msg);
916 }
917 return want_cookie;
918 }
919 EXPORT_SYMBOL(tcp_syn_flood_action);
920
921 /*
922 * Save and compile IPv4 options into the request_sock if needed.
923 */
924 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
925 {
926 const struct ip_options *opt = &(IPCB(skb)->opt);
927 struct ip_options_rcu *dopt = NULL;
928
929 if (opt && opt->optlen) {
930 int opt_size = sizeof(*dopt) + opt->optlen;
931
932 dopt = kmalloc(opt_size, GFP_ATOMIC);
933 if (dopt) {
934 if (ip_options_echo(&dopt->opt, skb)) {
935 kfree(dopt);
936 dopt = NULL;
937 }
938 }
939 }
940 return dopt;
941 }
942
943 #ifdef CONFIG_TCP_MD5SIG
944 /*
945 * RFC2385 MD5 checksumming requires a mapping of
946 * IP address->MD5 Key.
947 * We need to maintain these in the sk structure.
948 */
949
950 /* Find the Key structure for an address. */
951 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
952 const union tcp_md5_addr *addr,
953 int family)
954 {
955 struct tcp_sock *tp = tcp_sk(sk);
956 struct tcp_md5sig_key *key;
957 struct hlist_node *pos;
958 unsigned int size = sizeof(struct in_addr);
959 struct tcp_md5sig_info *md5sig;
960
961 /* caller either holds rcu_read_lock() or socket lock */
962 md5sig = rcu_dereference_check(tp->md5sig_info,
963 sock_owned_by_user(sk) ||
964 lockdep_is_held(&sk->sk_lock.slock));
965 if (!md5sig)
966 return NULL;
967 #if IS_ENABLED(CONFIG_IPV6)
968 if (family == AF_INET6)
969 size = sizeof(struct in6_addr);
970 #endif
971 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
972 if (key->family != family)
973 continue;
974 if (!memcmp(&key->addr, addr, size))
975 return key;
976 }
977 return NULL;
978 }
979 EXPORT_SYMBOL(tcp_md5_do_lookup);
980
981 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
982 struct sock *addr_sk)
983 {
984 union tcp_md5_addr *addr;
985
986 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
987 return tcp_md5_do_lookup(sk, addr, AF_INET);
988 }
989 EXPORT_SYMBOL(tcp_v4_md5_lookup);
990
991 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
992 struct request_sock *req)
993 {
994 union tcp_md5_addr *addr;
995
996 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
997 return tcp_md5_do_lookup(sk, addr, AF_INET);
998 }
999
1000 /* This can be called on a newly created socket, from other files */
1001 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1002 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1003 {
1004 /* Add Key to the list */
1005 struct tcp_md5sig_key *key;
1006 struct tcp_sock *tp = tcp_sk(sk);
1007 struct tcp_md5sig_info *md5sig;
1008
1009 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1010 if (key) {
1011 /* Pre-existing entry - just update that one. */
1012 memcpy(key->key, newkey, newkeylen);
1013 key->keylen = newkeylen;
1014 return 0;
1015 }
1016
1017 md5sig = rcu_dereference_protected(tp->md5sig_info,
1018 sock_owned_by_user(sk));
1019 if (!md5sig) {
1020 md5sig = kmalloc(sizeof(*md5sig), gfp);
1021 if (!md5sig)
1022 return -ENOMEM;
1023
1024 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1025 INIT_HLIST_HEAD(&md5sig->head);
1026 rcu_assign_pointer(tp->md5sig_info, md5sig);
1027 }
1028
1029 key = sock_kmalloc(sk, sizeof(*key), gfp);
1030 if (!key)
1031 return -ENOMEM;
1032 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1033 sock_kfree_s(sk, key, sizeof(*key));
1034 return -ENOMEM;
1035 }
1036
1037 memcpy(key->key, newkey, newkeylen);
1038 key->keylen = newkeylen;
1039 key->family = family;
1040 memcpy(&key->addr, addr,
1041 (family == AF_INET6) ? sizeof(struct in6_addr) :
1042 sizeof(struct in_addr));
1043 hlist_add_head_rcu(&key->node, &md5sig->head);
1044 return 0;
1045 }
1046 EXPORT_SYMBOL(tcp_md5_do_add);
1047
1048 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1049 {
1050 struct tcp_sock *tp = tcp_sk(sk);
1051 struct tcp_md5sig_key *key;
1052 struct tcp_md5sig_info *md5sig;
1053
1054 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1055 if (!key)
1056 return -ENOENT;
1057 hlist_del_rcu(&key->node);
1058 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1059 kfree_rcu(key, rcu);
1060 md5sig = rcu_dereference_protected(tp->md5sig_info,
1061 sock_owned_by_user(sk));
1062 if (hlist_empty(&md5sig->head))
1063 tcp_free_md5sig_pool();
1064 return 0;
1065 }
1066 EXPORT_SYMBOL(tcp_md5_do_del);
1067
1068 static void tcp_clear_md5_list(struct sock *sk)
1069 {
1070 struct tcp_sock *tp = tcp_sk(sk);
1071 struct tcp_md5sig_key *key;
1072 struct hlist_node *pos, *n;
1073 struct tcp_md5sig_info *md5sig;
1074
1075 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1076
1077 if (!hlist_empty(&md5sig->head))
1078 tcp_free_md5sig_pool();
1079 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1080 hlist_del_rcu(&key->node);
1081 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1082 kfree_rcu(key, rcu);
1083 }
1084 }
1085
1086 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1087 int optlen)
1088 {
1089 struct tcp_md5sig cmd;
1090 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1091
1092 if (optlen < sizeof(cmd))
1093 return -EINVAL;
1094
1095 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1096 return -EFAULT;
1097
1098 if (sin->sin_family != AF_INET)
1099 return -EINVAL;
1100
1101 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1102 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1103 AF_INET);
1104
1105 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1106 return -EINVAL;
1107
1108 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1109 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1110 GFP_KERNEL);
1111 }
1112
1113 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1114 __be32 daddr, __be32 saddr, int nbytes)
1115 {
1116 struct tcp4_pseudohdr *bp;
1117 struct scatterlist sg;
1118
1119 bp = &hp->md5_blk.ip4;
1120
1121 /*
1122 * 1. the TCP pseudo-header (in the order: source IP address,
1123 * destination IP address, zero-padded protocol number, and
1124 * segment length)
1125 */
1126 bp->saddr = saddr;
1127 bp->daddr = daddr;
1128 bp->pad = 0;
1129 bp->protocol = IPPROTO_TCP;
1130 bp->len = cpu_to_be16(nbytes);
1131
1132 sg_init_one(&sg, bp, sizeof(*bp));
1133 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1134 }
1135
1136 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1137 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1138 {
1139 struct tcp_md5sig_pool *hp;
1140 struct hash_desc *desc;
1141
1142 hp = tcp_get_md5sig_pool();
1143 if (!hp)
1144 goto clear_hash_noput;
1145 desc = &hp->md5_desc;
1146
1147 if (crypto_hash_init(desc))
1148 goto clear_hash;
1149 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1150 goto clear_hash;
1151 if (tcp_md5_hash_header(hp, th))
1152 goto clear_hash;
1153 if (tcp_md5_hash_key(hp, key))
1154 goto clear_hash;
1155 if (crypto_hash_final(desc, md5_hash))
1156 goto clear_hash;
1157
1158 tcp_put_md5sig_pool();
1159 return 0;
1160
1161 clear_hash:
1162 tcp_put_md5sig_pool();
1163 clear_hash_noput:
1164 memset(md5_hash, 0, 16);
1165 return 1;
1166 }
1167
1168 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1169 const struct sock *sk, const struct request_sock *req,
1170 const struct sk_buff *skb)
1171 {
1172 struct tcp_md5sig_pool *hp;
1173 struct hash_desc *desc;
1174 const struct tcphdr *th = tcp_hdr(skb);
1175 __be32 saddr, daddr;
1176
1177 if (sk) {
1178 saddr = inet_sk(sk)->inet_saddr;
1179 daddr = inet_sk(sk)->inet_daddr;
1180 } else if (req) {
1181 saddr = inet_rsk(req)->loc_addr;
1182 daddr = inet_rsk(req)->rmt_addr;
1183 } else {
1184 const struct iphdr *iph = ip_hdr(skb);
1185 saddr = iph->saddr;
1186 daddr = iph->daddr;
1187 }
1188
1189 hp = tcp_get_md5sig_pool();
1190 if (!hp)
1191 goto clear_hash_noput;
1192 desc = &hp->md5_desc;
1193
1194 if (crypto_hash_init(desc))
1195 goto clear_hash;
1196
1197 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1198 goto clear_hash;
1199 if (tcp_md5_hash_header(hp, th))
1200 goto clear_hash;
1201 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1202 goto clear_hash;
1203 if (tcp_md5_hash_key(hp, key))
1204 goto clear_hash;
1205 if (crypto_hash_final(desc, md5_hash))
1206 goto clear_hash;
1207
1208 tcp_put_md5sig_pool();
1209 return 0;
1210
1211 clear_hash:
1212 tcp_put_md5sig_pool();
1213 clear_hash_noput:
1214 memset(md5_hash, 0, 16);
1215 return 1;
1216 }
1217 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1218
1219 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1220 {
1221 /*
1222 * This gets called for each TCP segment that arrives
1223 * so we want to be efficient.
1224 * We have 3 drop cases:
1225 * o No MD5 hash and one expected.
1226 * o MD5 hash and we're not expecting one.
1227 * o MD5 hash and its wrong.
1228 */
1229 const __u8 *hash_location = NULL;
1230 struct tcp_md5sig_key *hash_expected;
1231 const struct iphdr *iph = ip_hdr(skb);
1232 const struct tcphdr *th = tcp_hdr(skb);
1233 int genhash;
1234 unsigned char newhash[16];
1235
1236 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1237 AF_INET);
1238 hash_location = tcp_parse_md5sig_option(th);
1239
1240 /* We've parsed the options - do we have a hash? */
1241 if (!hash_expected && !hash_location)
1242 return false;
1243
1244 if (hash_expected && !hash_location) {
1245 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1246 return true;
1247 }
1248
1249 if (!hash_expected && hash_location) {
1250 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1251 return true;
1252 }
1253
1254 /* Okay, so this is hash_expected and hash_location -
1255 * so we need to calculate the checksum.
1256 */
1257 genhash = tcp_v4_md5_hash_skb(newhash,
1258 hash_expected,
1259 NULL, NULL, skb);
1260
1261 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1262 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1263 &iph->saddr, ntohs(th->source),
1264 &iph->daddr, ntohs(th->dest),
1265 genhash ? " tcp_v4_calc_md5_hash failed"
1266 : "");
1267 return true;
1268 }
1269 return false;
1270 }
1271
1272 #endif
1273
1274 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1275 .family = PF_INET,
1276 .obj_size = sizeof(struct tcp_request_sock),
1277 .rtx_syn_ack = tcp_v4_rtx_synack,
1278 .send_ack = tcp_v4_reqsk_send_ack,
1279 .destructor = tcp_v4_reqsk_destructor,
1280 .send_reset = tcp_v4_send_reset,
1281 .syn_ack_timeout = tcp_syn_ack_timeout,
1282 };
1283
1284 #ifdef CONFIG_TCP_MD5SIG
1285 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1286 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1287 .calc_md5_hash = tcp_v4_md5_hash_skb,
1288 };
1289 #endif
1290
1291 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1292 struct request_sock *req,
1293 struct tcp_fastopen_cookie *foc,
1294 struct tcp_fastopen_cookie *valid_foc)
1295 {
1296 bool skip_cookie = false;
1297 struct fastopen_queue *fastopenq;
1298
1299 if (likely(!fastopen_cookie_present(foc))) {
1300 /* See include/net/tcp.h for the meaning of these knobs */
1301 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1302 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1303 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1304 skip_cookie = true; /* no cookie to validate */
1305 else
1306 return false;
1307 }
1308 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1309 /* A FO option is present; bump the counter. */
1310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1311
1312 /* Make sure the listener has enabled fastopen, and we don't
1313 * exceed the max # of pending TFO requests allowed before trying
1314 * to validating the cookie in order to avoid burning CPU cycles
1315 * unnecessarily.
1316 *
1317 * XXX (TFO) - The implication of checking the max_qlen before
1318 * processing a cookie request is that clients can't differentiate
1319 * between qlen overflow causing Fast Open to be disabled
1320 * temporarily vs a server not supporting Fast Open at all.
1321 */
1322 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1323 fastopenq == NULL || fastopenq->max_qlen == 0)
1324 return false;
1325
1326 if (fastopenq->qlen >= fastopenq->max_qlen) {
1327 struct request_sock *req1;
1328 spin_lock(&fastopenq->lock);
1329 req1 = fastopenq->rskq_rst_head;
1330 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1331 spin_unlock(&fastopenq->lock);
1332 NET_INC_STATS_BH(sock_net(sk),
1333 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1334 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1335 foc->len = -1;
1336 return false;
1337 }
1338 fastopenq->rskq_rst_head = req1->dl_next;
1339 fastopenq->qlen--;
1340 spin_unlock(&fastopenq->lock);
1341 reqsk_free(req1);
1342 }
1343 if (skip_cookie) {
1344 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1345 return true;
1346 }
1347 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1348 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1349 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1350 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1351 memcmp(&foc->val[0], &valid_foc->val[0],
1352 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1353 return false;
1354 valid_foc->len = -1;
1355 }
1356 /* Acknowledge the data received from the peer. */
1357 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1358 return true;
1359 } else if (foc->len == 0) { /* Client requesting a cookie */
1360 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1361 NET_INC_STATS_BH(sock_net(sk),
1362 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1363 } else {
1364 /* Client sent a cookie with wrong size. Treat it
1365 * the same as invalid and return a valid one.
1366 */
1367 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1368 }
1369 return false;
1370 }
1371
1372 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1373 struct sk_buff *skb,
1374 struct sk_buff *skb_synack,
1375 struct request_sock *req,
1376 struct request_values *rvp)
1377 {
1378 struct tcp_sock *tp = tcp_sk(sk);
1379 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1380 const struct inet_request_sock *ireq = inet_rsk(req);
1381 struct sock *child;
1382 int err;
1383
1384 req->num_retrans = 0;
1385 req->num_timeout = 0;
1386 req->sk = NULL;
1387
1388 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1389 if (child == NULL) {
1390 NET_INC_STATS_BH(sock_net(sk),
1391 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1392 kfree_skb(skb_synack);
1393 return -1;
1394 }
1395 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1396 ireq->rmt_addr, ireq->opt);
1397 err = net_xmit_eval(err);
1398 if (!err)
1399 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1400 /* XXX (TFO) - is it ok to ignore error and continue? */
1401
1402 spin_lock(&queue->fastopenq->lock);
1403 queue->fastopenq->qlen++;
1404 spin_unlock(&queue->fastopenq->lock);
1405
1406 /* Initialize the child socket. Have to fix some values to take
1407 * into account the child is a Fast Open socket and is created
1408 * only out of the bits carried in the SYN packet.
1409 */
1410 tp = tcp_sk(child);
1411
1412 tp->fastopen_rsk = req;
1413 /* Do a hold on the listner sk so that if the listener is being
1414 * closed, the child that has been accepted can live on and still
1415 * access listen_lock.
1416 */
1417 sock_hold(sk);
1418 tcp_rsk(req)->listener = sk;
1419
1420 /* RFC1323: The window in SYN & SYN/ACK segments is never
1421 * scaled. So correct it appropriately.
1422 */
1423 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1424
1425 /* Activate the retrans timer so that SYNACK can be retransmitted.
1426 * The request socket is not added to the SYN table of the parent
1427 * because it's been added to the accept queue directly.
1428 */
1429 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1430 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1431
1432 /* Add the child socket directly into the accept queue */
1433 inet_csk_reqsk_queue_add(sk, req, child);
1434
1435 /* Now finish processing the fastopen child socket. */
1436 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1437 tcp_init_congestion_control(child);
1438 tcp_mtup_init(child);
1439 tcp_init_buffer_space(child);
1440 tcp_init_metrics(child);
1441
1442 /* Queue the data carried in the SYN packet. We need to first
1443 * bump skb's refcnt because the caller will attempt to free it.
1444 *
1445 * XXX (TFO) - we honor a zero-payload TFO request for now.
1446 * (Any reason not to?)
1447 */
1448 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1449 /* Don't queue the skb if there is no payload in SYN.
1450 * XXX (TFO) - How about SYN+FIN?
1451 */
1452 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1453 } else {
1454 skb = skb_get(skb);
1455 skb_dst_drop(skb);
1456 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1457 skb_set_owner_r(skb, child);
1458 __skb_queue_tail(&child->sk_receive_queue, skb);
1459 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1460 tp->syn_data_acked = 1;
1461 }
1462 sk->sk_data_ready(sk, 0);
1463 bh_unlock_sock(child);
1464 sock_put(child);
1465 WARN_ON(req->sk == NULL);
1466 return 0;
1467 }
1468
1469 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1470 {
1471 struct tcp_extend_values tmp_ext;
1472 struct tcp_options_received tmp_opt;
1473 const u8 *hash_location;
1474 struct request_sock *req;
1475 struct inet_request_sock *ireq;
1476 struct tcp_sock *tp = tcp_sk(sk);
1477 struct dst_entry *dst = NULL;
1478 __be32 saddr = ip_hdr(skb)->saddr;
1479 __be32 daddr = ip_hdr(skb)->daddr;
1480 __u32 isn = TCP_SKB_CB(skb)->when;
1481 bool want_cookie = false;
1482 struct flowi4 fl4;
1483 struct tcp_fastopen_cookie foc = { .len = -1 };
1484 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1485 struct sk_buff *skb_synack;
1486 int do_fastopen;
1487
1488 /* Never answer to SYNs send to broadcast or multicast */
1489 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1490 goto drop;
1491
1492 /* TW buckets are converted to open requests without
1493 * limitations, they conserve resources and peer is
1494 * evidently real one.
1495 */
1496 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1497 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1498 if (!want_cookie)
1499 goto drop;
1500 }
1501
1502 /* Accept backlog is full. If we have already queued enough
1503 * of warm entries in syn queue, drop request. It is better than
1504 * clogging syn queue with openreqs with exponentially increasing
1505 * timeout.
1506 */
1507 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1508 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1509 goto drop;
1510 }
1511
1512 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1513 if (!req)
1514 goto drop;
1515
1516 #ifdef CONFIG_TCP_MD5SIG
1517 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1518 #endif
1519
1520 tcp_clear_options(&tmp_opt);
1521 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1522 tmp_opt.user_mss = tp->rx_opt.user_mss;
1523 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1524 want_cookie ? NULL : &foc);
1525
1526 if (tmp_opt.cookie_plus > 0 &&
1527 tmp_opt.saw_tstamp &&
1528 !tp->rx_opt.cookie_out_never &&
1529 (sysctl_tcp_cookie_size > 0 ||
1530 (tp->cookie_values != NULL &&
1531 tp->cookie_values->cookie_desired > 0))) {
1532 u8 *c;
1533 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1534 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1535
1536 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1537 goto drop_and_release;
1538
1539 /* Secret recipe starts with IP addresses */
1540 *mess++ ^= (__force u32)daddr;
1541 *mess++ ^= (__force u32)saddr;
1542
1543 /* plus variable length Initiator Cookie */
1544 c = (u8 *)mess;
1545 while (l-- > 0)
1546 *c++ ^= *hash_location++;
1547
1548 want_cookie = false; /* not our kind of cookie */
1549 tmp_ext.cookie_out_never = 0; /* false */
1550 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1551 } else if (!tp->rx_opt.cookie_in_always) {
1552 /* redundant indications, but ensure initialization. */
1553 tmp_ext.cookie_out_never = 1; /* true */
1554 tmp_ext.cookie_plus = 0;
1555 } else {
1556 goto drop_and_release;
1557 }
1558 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1559
1560 if (want_cookie && !tmp_opt.saw_tstamp)
1561 tcp_clear_options(&tmp_opt);
1562
1563 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1564 tcp_openreq_init(req, &tmp_opt, skb);
1565
1566 ireq = inet_rsk(req);
1567 ireq->loc_addr = daddr;
1568 ireq->rmt_addr = saddr;
1569 ireq->no_srccheck = inet_sk(sk)->transparent;
1570 ireq->opt = tcp_v4_save_options(skb);
1571
1572 if (security_inet_conn_request(sk, skb, req))
1573 goto drop_and_free;
1574
1575 if (!want_cookie || tmp_opt.tstamp_ok)
1576 TCP_ECN_create_request(req, skb, sock_net(sk));
1577
1578 if (want_cookie) {
1579 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1580 req->cookie_ts = tmp_opt.tstamp_ok;
1581 } else if (!isn) {
1582 /* VJ's idea. We save last timestamp seen
1583 * from the destination in peer table, when entering
1584 * state TIME-WAIT, and check against it before
1585 * accepting new connection request.
1586 *
1587 * If "isn" is not zero, this request hit alive
1588 * timewait bucket, so that all the necessary checks
1589 * are made in the function processing timewait state.
1590 */
1591 if (tmp_opt.saw_tstamp &&
1592 tcp_death_row.sysctl_tw_recycle &&
1593 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1594 fl4.daddr == saddr) {
1595 if (!tcp_peer_is_proven(req, dst, true)) {
1596 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1597 goto drop_and_release;
1598 }
1599 }
1600 /* Kill the following clause, if you dislike this way. */
1601 else if (!sysctl_tcp_syncookies &&
1602 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1603 (sysctl_max_syn_backlog >> 2)) &&
1604 !tcp_peer_is_proven(req, dst, false)) {
1605 /* Without syncookies last quarter of
1606 * backlog is filled with destinations,
1607 * proven to be alive.
1608 * It means that we continue to communicate
1609 * to destinations, already remembered
1610 * to the moment of synflood.
1611 */
1612 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1613 &saddr, ntohs(tcp_hdr(skb)->source));
1614 goto drop_and_release;
1615 }
1616
1617 isn = tcp_v4_init_sequence(skb);
1618 }
1619 tcp_rsk(req)->snt_isn = isn;
1620
1621 if (dst == NULL) {
1622 dst = inet_csk_route_req(sk, &fl4, req);
1623 if (dst == NULL)
1624 goto drop_and_free;
1625 }
1626 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1627
1628 /* We don't call tcp_v4_send_synack() directly because we need
1629 * to make sure a child socket can be created successfully before
1630 * sending back synack!
1631 *
1632 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1633 * (or better yet, call tcp_send_synack() in the child context
1634 * directly, but will have to fix bunch of other code first)
1635 * after syn_recv_sock() except one will need to first fix the
1636 * latter to remove its dependency on the current implementation
1637 * of tcp_v4_send_synack()->tcp_select_initial_window().
1638 */
1639 skb_synack = tcp_make_synack(sk, dst, req,
1640 (struct request_values *)&tmp_ext,
1641 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1642
1643 if (skb_synack) {
1644 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1645 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1646 } else
1647 goto drop_and_free;
1648
1649 if (likely(!do_fastopen)) {
1650 int err;
1651 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1652 ireq->rmt_addr, ireq->opt);
1653 err = net_xmit_eval(err);
1654 if (err || want_cookie)
1655 goto drop_and_free;
1656
1657 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1658 tcp_rsk(req)->listener = NULL;
1659 /* Add the request_sock to the SYN table */
1660 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1661 if (fastopen_cookie_present(&foc) && foc.len != 0)
1662 NET_INC_STATS_BH(sock_net(sk),
1663 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1664 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1665 (struct request_values *)&tmp_ext))
1666 goto drop_and_free;
1667
1668 return 0;
1669
1670 drop_and_release:
1671 dst_release(dst);
1672 drop_and_free:
1673 reqsk_free(req);
1674 drop:
1675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1676 return 0;
1677 }
1678 EXPORT_SYMBOL(tcp_v4_conn_request);
1679
1680
1681 /*
1682 * The three way handshake has completed - we got a valid synack -
1683 * now create the new socket.
1684 */
1685 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1686 struct request_sock *req,
1687 struct dst_entry *dst)
1688 {
1689 struct inet_request_sock *ireq;
1690 struct inet_sock *newinet;
1691 struct tcp_sock *newtp;
1692 struct sock *newsk;
1693 #ifdef CONFIG_TCP_MD5SIG
1694 struct tcp_md5sig_key *key;
1695 #endif
1696 struct ip_options_rcu *inet_opt;
1697
1698 if (sk_acceptq_is_full(sk))
1699 goto exit_overflow;
1700
1701 newsk = tcp_create_openreq_child(sk, req, skb);
1702 if (!newsk)
1703 goto exit_nonewsk;
1704
1705 newsk->sk_gso_type = SKB_GSO_TCPV4;
1706 inet_sk_rx_dst_set(newsk, skb);
1707
1708 newtp = tcp_sk(newsk);
1709 newinet = inet_sk(newsk);
1710 ireq = inet_rsk(req);
1711 newinet->inet_daddr = ireq->rmt_addr;
1712 newinet->inet_rcv_saddr = ireq->loc_addr;
1713 newinet->inet_saddr = ireq->loc_addr;
1714 inet_opt = ireq->opt;
1715 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1716 ireq->opt = NULL;
1717 newinet->mc_index = inet_iif(skb);
1718 newinet->mc_ttl = ip_hdr(skb)->ttl;
1719 newinet->rcv_tos = ip_hdr(skb)->tos;
1720 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1721 if (inet_opt)
1722 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1723 newinet->inet_id = newtp->write_seq ^ jiffies;
1724
1725 if (!dst) {
1726 dst = inet_csk_route_child_sock(sk, newsk, req);
1727 if (!dst)
1728 goto put_and_exit;
1729 } else {
1730 /* syncookie case : see end of cookie_v4_check() */
1731 }
1732 sk_setup_caps(newsk, dst);
1733
1734 tcp_mtup_init(newsk);
1735 tcp_sync_mss(newsk, dst_mtu(dst));
1736 newtp->advmss = dst_metric_advmss(dst);
1737 if (tcp_sk(sk)->rx_opt.user_mss &&
1738 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1739 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1740
1741 tcp_initialize_rcv_mss(newsk);
1742 tcp_synack_rtt_meas(newsk, req);
1743 newtp->total_retrans = req->num_retrans;
1744
1745 #ifdef CONFIG_TCP_MD5SIG
1746 /* Copy over the MD5 key from the original socket */
1747 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1748 AF_INET);
1749 if (key != NULL) {
1750 /*
1751 * We're using one, so create a matching key
1752 * on the newsk structure. If we fail to get
1753 * memory, then we end up not copying the key
1754 * across. Shucks.
1755 */
1756 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1757 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1758 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1759 }
1760 #endif
1761
1762 if (__inet_inherit_port(sk, newsk) < 0)
1763 goto put_and_exit;
1764 __inet_hash_nolisten(newsk, NULL);
1765
1766 return newsk;
1767
1768 exit_overflow:
1769 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1770 exit_nonewsk:
1771 dst_release(dst);
1772 exit:
1773 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1774 return NULL;
1775 put_and_exit:
1776 inet_csk_prepare_forced_close(newsk);
1777 tcp_done(newsk);
1778 goto exit;
1779 }
1780 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1781
1782 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1783 {
1784 struct tcphdr *th = tcp_hdr(skb);
1785 const struct iphdr *iph = ip_hdr(skb);
1786 struct sock *nsk;
1787 struct request_sock **prev;
1788 /* Find possible connection requests. */
1789 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1790 iph->saddr, iph->daddr);
1791 if (req)
1792 return tcp_check_req(sk, skb, req, prev, false);
1793
1794 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1795 th->source, iph->daddr, th->dest, inet_iif(skb));
1796
1797 if (nsk) {
1798 if (nsk->sk_state != TCP_TIME_WAIT) {
1799 bh_lock_sock(nsk);
1800 return nsk;
1801 }
1802 inet_twsk_put(inet_twsk(nsk));
1803 return NULL;
1804 }
1805
1806 #ifdef CONFIG_SYN_COOKIES
1807 if (!th->syn)
1808 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1809 #endif
1810 return sk;
1811 }
1812
1813 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1814 {
1815 const struct iphdr *iph = ip_hdr(skb);
1816
1817 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1818 if (!tcp_v4_check(skb->len, iph->saddr,
1819 iph->daddr, skb->csum)) {
1820 skb->ip_summed = CHECKSUM_UNNECESSARY;
1821 return 0;
1822 }
1823 }
1824
1825 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1826 skb->len, IPPROTO_TCP, 0);
1827
1828 if (skb->len <= 76) {
1829 return __skb_checksum_complete(skb);
1830 }
1831 return 0;
1832 }
1833
1834
1835 /* The socket must have it's spinlock held when we get
1836 * here.
1837 *
1838 * We have a potential double-lock case here, so even when
1839 * doing backlog processing we use the BH locking scheme.
1840 * This is because we cannot sleep with the original spinlock
1841 * held.
1842 */
1843 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1844 {
1845 struct sock *rsk;
1846 #ifdef CONFIG_TCP_MD5SIG
1847 /*
1848 * We really want to reject the packet as early as possible
1849 * if:
1850 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1851 * o There is an MD5 option and we're not expecting one
1852 */
1853 if (tcp_v4_inbound_md5_hash(sk, skb))
1854 goto discard;
1855 #endif
1856
1857 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1858 struct dst_entry *dst = sk->sk_rx_dst;
1859
1860 sock_rps_save_rxhash(sk, skb);
1861 if (dst) {
1862 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1863 dst->ops->check(dst, 0) == NULL) {
1864 dst_release(dst);
1865 sk->sk_rx_dst = NULL;
1866 }
1867 }
1868 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1869 rsk = sk;
1870 goto reset;
1871 }
1872 return 0;
1873 }
1874
1875 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1876 goto csum_err;
1877
1878 if (sk->sk_state == TCP_LISTEN) {
1879 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1880 if (!nsk)
1881 goto discard;
1882
1883 if (nsk != sk) {
1884 sock_rps_save_rxhash(nsk, skb);
1885 if (tcp_child_process(sk, nsk, skb)) {
1886 rsk = nsk;
1887 goto reset;
1888 }
1889 return 0;
1890 }
1891 } else
1892 sock_rps_save_rxhash(sk, skb);
1893
1894 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1895 rsk = sk;
1896 goto reset;
1897 }
1898 return 0;
1899
1900 reset:
1901 tcp_v4_send_reset(rsk, skb);
1902 discard:
1903 kfree_skb(skb);
1904 /* Be careful here. If this function gets more complicated and
1905 * gcc suffers from register pressure on the x86, sk (in %ebx)
1906 * might be destroyed here. This current version compiles correctly,
1907 * but you have been warned.
1908 */
1909 return 0;
1910
1911 csum_err:
1912 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1913 goto discard;
1914 }
1915 EXPORT_SYMBOL(tcp_v4_do_rcv);
1916
1917 void tcp_v4_early_demux(struct sk_buff *skb)
1918 {
1919 const struct iphdr *iph;
1920 const struct tcphdr *th;
1921 struct sock *sk;
1922
1923 if (skb->pkt_type != PACKET_HOST)
1924 return;
1925
1926 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1927 return;
1928
1929 iph = ip_hdr(skb);
1930 th = tcp_hdr(skb);
1931
1932 if (th->doff < sizeof(struct tcphdr) / 4)
1933 return;
1934
1935 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1936 iph->saddr, th->source,
1937 iph->daddr, ntohs(th->dest),
1938 skb->skb_iif);
1939 if (sk) {
1940 skb->sk = sk;
1941 skb->destructor = sock_edemux;
1942 if (sk->sk_state != TCP_TIME_WAIT) {
1943 struct dst_entry *dst = sk->sk_rx_dst;
1944
1945 if (dst)
1946 dst = dst_check(dst, 0);
1947 if (dst &&
1948 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1949 skb_dst_set_noref(skb, dst);
1950 }
1951 }
1952 }
1953
1954 /*
1955 * From tcp_input.c
1956 */
1957
1958 int tcp_v4_rcv(struct sk_buff *skb)
1959 {
1960 const struct iphdr *iph;
1961 const struct tcphdr *th;
1962 struct sock *sk;
1963 int ret;
1964 struct net *net = dev_net(skb->dev);
1965
1966 if (skb->pkt_type != PACKET_HOST)
1967 goto discard_it;
1968
1969 /* Count it even if it's bad */
1970 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1971
1972 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1973 goto discard_it;
1974
1975 th = tcp_hdr(skb);
1976
1977 if (th->doff < sizeof(struct tcphdr) / 4)
1978 goto bad_packet;
1979 if (!pskb_may_pull(skb, th->doff * 4))
1980 goto discard_it;
1981
1982 /* An explanation is required here, I think.
1983 * Packet length and doff are validated by header prediction,
1984 * provided case of th->doff==0 is eliminated.
1985 * So, we defer the checks. */
1986 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1987 goto bad_packet;
1988
1989 th = tcp_hdr(skb);
1990 iph = ip_hdr(skb);
1991 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1992 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1993 skb->len - th->doff * 4);
1994 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1995 TCP_SKB_CB(skb)->when = 0;
1996 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1997 TCP_SKB_CB(skb)->sacked = 0;
1998
1999 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2000 if (!sk)
2001 goto no_tcp_socket;
2002
2003 process:
2004 if (sk->sk_state == TCP_TIME_WAIT)
2005 goto do_time_wait;
2006
2007 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2008 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2009 goto discard_and_relse;
2010 }
2011
2012 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2013 goto discard_and_relse;
2014 nf_reset(skb);
2015
2016 if (sk_filter(sk, skb))
2017 goto discard_and_relse;
2018
2019 skb->dev = NULL;
2020
2021 bh_lock_sock_nested(sk);
2022 ret = 0;
2023 if (!sock_owned_by_user(sk)) {
2024 #ifdef CONFIG_NET_DMA
2025 struct tcp_sock *tp = tcp_sk(sk);
2026 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2027 tp->ucopy.dma_chan = net_dma_find_channel();
2028 if (tp->ucopy.dma_chan)
2029 ret = tcp_v4_do_rcv(sk, skb);
2030 else
2031 #endif
2032 {
2033 if (!tcp_prequeue(sk, skb))
2034 ret = tcp_v4_do_rcv(sk, skb);
2035 }
2036 } else if (unlikely(sk_add_backlog(sk, skb,
2037 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2038 bh_unlock_sock(sk);
2039 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2040 goto discard_and_relse;
2041 }
2042 bh_unlock_sock(sk);
2043
2044 sock_put(sk);
2045
2046 return ret;
2047
2048 no_tcp_socket:
2049 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2050 goto discard_it;
2051
2052 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2053 bad_packet:
2054 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2055 } else {
2056 tcp_v4_send_reset(NULL, skb);
2057 }
2058
2059 discard_it:
2060 /* Discard frame. */
2061 kfree_skb(skb);
2062 return 0;
2063
2064 discard_and_relse:
2065 sock_put(sk);
2066 goto discard_it;
2067
2068 do_time_wait:
2069 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2070 inet_twsk_put(inet_twsk(sk));
2071 goto discard_it;
2072 }
2073
2074 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2075 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2076 inet_twsk_put(inet_twsk(sk));
2077 goto discard_it;
2078 }
2079 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2080 case TCP_TW_SYN: {
2081 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2082 &tcp_hashinfo,
2083 iph->saddr, th->source,
2084 iph->daddr, th->dest,
2085 inet_iif(skb));
2086 if (sk2) {
2087 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2088 inet_twsk_put(inet_twsk(sk));
2089 sk = sk2;
2090 goto process;
2091 }
2092 /* Fall through to ACK */
2093 }
2094 case TCP_TW_ACK:
2095 tcp_v4_timewait_ack(sk, skb);
2096 break;
2097 case TCP_TW_RST:
2098 goto no_tcp_socket;
2099 case TCP_TW_SUCCESS:;
2100 }
2101 goto discard_it;
2102 }
2103
2104 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2105 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2106 .twsk_unique = tcp_twsk_unique,
2107 .twsk_destructor= tcp_twsk_destructor,
2108 };
2109
2110 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2111 {
2112 struct dst_entry *dst = skb_dst(skb);
2113
2114 dst_hold(dst);
2115 sk->sk_rx_dst = dst;
2116 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2117 }
2118 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2119
2120 const struct inet_connection_sock_af_ops ipv4_specific = {
2121 .queue_xmit = ip_queue_xmit,
2122 .send_check = tcp_v4_send_check,
2123 .rebuild_header = inet_sk_rebuild_header,
2124 .sk_rx_dst_set = inet_sk_rx_dst_set,
2125 .conn_request = tcp_v4_conn_request,
2126 .syn_recv_sock = tcp_v4_syn_recv_sock,
2127 .net_header_len = sizeof(struct iphdr),
2128 .setsockopt = ip_setsockopt,
2129 .getsockopt = ip_getsockopt,
2130 .addr2sockaddr = inet_csk_addr2sockaddr,
2131 .sockaddr_len = sizeof(struct sockaddr_in),
2132 .bind_conflict = inet_csk_bind_conflict,
2133 #ifdef CONFIG_COMPAT
2134 .compat_setsockopt = compat_ip_setsockopt,
2135 .compat_getsockopt = compat_ip_getsockopt,
2136 #endif
2137 };
2138 EXPORT_SYMBOL(ipv4_specific);
2139
2140 #ifdef CONFIG_TCP_MD5SIG
2141 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2142 .md5_lookup = tcp_v4_md5_lookup,
2143 .calc_md5_hash = tcp_v4_md5_hash_skb,
2144 .md5_parse = tcp_v4_parse_md5_keys,
2145 };
2146 #endif
2147
2148 /* NOTE: A lot of things set to zero explicitly by call to
2149 * sk_alloc() so need not be done here.
2150 */
2151 static int tcp_v4_init_sock(struct sock *sk)
2152 {
2153 struct inet_connection_sock *icsk = inet_csk(sk);
2154
2155 tcp_init_sock(sk);
2156
2157 icsk->icsk_af_ops = &ipv4_specific;
2158
2159 #ifdef CONFIG_TCP_MD5SIG
2160 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2161 #endif
2162
2163 return 0;
2164 }
2165
2166 void tcp_v4_destroy_sock(struct sock *sk)
2167 {
2168 struct tcp_sock *tp = tcp_sk(sk);
2169
2170 tcp_clear_xmit_timers(sk);
2171
2172 tcp_cleanup_congestion_control(sk);
2173
2174 /* Cleanup up the write buffer. */
2175 tcp_write_queue_purge(sk);
2176
2177 /* Cleans up our, hopefully empty, out_of_order_queue. */
2178 __skb_queue_purge(&tp->out_of_order_queue);
2179
2180 #ifdef CONFIG_TCP_MD5SIG
2181 /* Clean up the MD5 key list, if any */
2182 if (tp->md5sig_info) {
2183 tcp_clear_md5_list(sk);
2184 kfree_rcu(tp->md5sig_info, rcu);
2185 tp->md5sig_info = NULL;
2186 }
2187 #endif
2188
2189 #ifdef CONFIG_NET_DMA
2190 /* Cleans up our sk_async_wait_queue */
2191 __skb_queue_purge(&sk->sk_async_wait_queue);
2192 #endif
2193
2194 /* Clean prequeue, it must be empty really */
2195 __skb_queue_purge(&tp->ucopy.prequeue);
2196
2197 /* Clean up a referenced TCP bind bucket. */
2198 if (inet_csk(sk)->icsk_bind_hash)
2199 inet_put_port(sk);
2200
2201 /* TCP Cookie Transactions */
2202 if (tp->cookie_values != NULL) {
2203 kref_put(&tp->cookie_values->kref,
2204 tcp_cookie_values_release);
2205 tp->cookie_values = NULL;
2206 }
2207 BUG_ON(tp->fastopen_rsk != NULL);
2208
2209 /* If socket is aborted during connect operation */
2210 tcp_free_fastopen_req(tp);
2211
2212 sk_sockets_allocated_dec(sk);
2213 sock_release_memcg(sk);
2214 }
2215 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2216
2217 #ifdef CONFIG_PROC_FS
2218 /* Proc filesystem TCP sock list dumping. */
2219
2220 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2221 {
2222 return hlist_nulls_empty(head) ? NULL :
2223 list_entry(head->first, struct inet_timewait_sock, tw_node);
2224 }
2225
2226 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2227 {
2228 return !is_a_nulls(tw->tw_node.next) ?
2229 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2230 }
2231
2232 /*
2233 * Get next listener socket follow cur. If cur is NULL, get first socket
2234 * starting from bucket given in st->bucket; when st->bucket is zero the
2235 * very first socket in the hash table is returned.
2236 */
2237 static void *listening_get_next(struct seq_file *seq, void *cur)
2238 {
2239 struct inet_connection_sock *icsk;
2240 struct hlist_nulls_node *node;
2241 struct sock *sk = cur;
2242 struct inet_listen_hashbucket *ilb;
2243 struct tcp_iter_state *st = seq->private;
2244 struct net *net = seq_file_net(seq);
2245
2246 if (!sk) {
2247 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2248 spin_lock_bh(&ilb->lock);
2249 sk = sk_nulls_head(&ilb->head);
2250 st->offset = 0;
2251 goto get_sk;
2252 }
2253 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2254 ++st->num;
2255 ++st->offset;
2256
2257 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2258 struct request_sock *req = cur;
2259
2260 icsk = inet_csk(st->syn_wait_sk);
2261 req = req->dl_next;
2262 while (1) {
2263 while (req) {
2264 if (req->rsk_ops->family == st->family) {
2265 cur = req;
2266 goto out;
2267 }
2268 req = req->dl_next;
2269 }
2270 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2271 break;
2272 get_req:
2273 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2274 }
2275 sk = sk_nulls_next(st->syn_wait_sk);
2276 st->state = TCP_SEQ_STATE_LISTENING;
2277 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2278 } else {
2279 icsk = inet_csk(sk);
2280 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2281 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2282 goto start_req;
2283 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2284 sk = sk_nulls_next(sk);
2285 }
2286 get_sk:
2287 sk_nulls_for_each_from(sk, node) {
2288 if (!net_eq(sock_net(sk), net))
2289 continue;
2290 if (sk->sk_family == st->family) {
2291 cur = sk;
2292 goto out;
2293 }
2294 icsk = inet_csk(sk);
2295 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2296 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2297 start_req:
2298 st->uid = sock_i_uid(sk);
2299 st->syn_wait_sk = sk;
2300 st->state = TCP_SEQ_STATE_OPENREQ;
2301 st->sbucket = 0;
2302 goto get_req;
2303 }
2304 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2305 }
2306 spin_unlock_bh(&ilb->lock);
2307 st->offset = 0;
2308 if (++st->bucket < INET_LHTABLE_SIZE) {
2309 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2310 spin_lock_bh(&ilb->lock);
2311 sk = sk_nulls_head(&ilb->head);
2312 goto get_sk;
2313 }
2314 cur = NULL;
2315 out:
2316 return cur;
2317 }
2318
2319 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2320 {
2321 struct tcp_iter_state *st = seq->private;
2322 void *rc;
2323
2324 st->bucket = 0;
2325 st->offset = 0;
2326 rc = listening_get_next(seq, NULL);
2327
2328 while (rc && *pos) {
2329 rc = listening_get_next(seq, rc);
2330 --*pos;
2331 }
2332 return rc;
2333 }
2334
2335 static inline bool empty_bucket(struct tcp_iter_state *st)
2336 {
2337 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2338 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2339 }
2340
2341 /*
2342 * Get first established socket starting from bucket given in st->bucket.
2343 * If st->bucket is zero, the very first socket in the hash is returned.
2344 */
2345 static void *established_get_first(struct seq_file *seq)
2346 {
2347 struct tcp_iter_state *st = seq->private;
2348 struct net *net = seq_file_net(seq);
2349 void *rc = NULL;
2350
2351 st->offset = 0;
2352 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2353 struct sock *sk;
2354 struct hlist_nulls_node *node;
2355 struct inet_timewait_sock *tw;
2356 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2357
2358 /* Lockless fast path for the common case of empty buckets */
2359 if (empty_bucket(st))
2360 continue;
2361
2362 spin_lock_bh(lock);
2363 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2364 if (sk->sk_family != st->family ||
2365 !net_eq(sock_net(sk), net)) {
2366 continue;
2367 }
2368 rc = sk;
2369 goto out;
2370 }
2371 st->state = TCP_SEQ_STATE_TIME_WAIT;
2372 inet_twsk_for_each(tw, node,
2373 &tcp_hashinfo.ehash[st->bucket].twchain) {
2374 if (tw->tw_family != st->family ||
2375 !net_eq(twsk_net(tw), net)) {
2376 continue;
2377 }
2378 rc = tw;
2379 goto out;
2380 }
2381 spin_unlock_bh(lock);
2382 st->state = TCP_SEQ_STATE_ESTABLISHED;
2383 }
2384 out:
2385 return rc;
2386 }
2387
2388 static void *established_get_next(struct seq_file *seq, void *cur)
2389 {
2390 struct sock *sk = cur;
2391 struct inet_timewait_sock *tw;
2392 struct hlist_nulls_node *node;
2393 struct tcp_iter_state *st = seq->private;
2394 struct net *net = seq_file_net(seq);
2395
2396 ++st->num;
2397 ++st->offset;
2398
2399 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2400 tw = cur;
2401 tw = tw_next(tw);
2402 get_tw:
2403 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2404 tw = tw_next(tw);
2405 }
2406 if (tw) {
2407 cur = tw;
2408 goto out;
2409 }
2410 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2411 st->state = TCP_SEQ_STATE_ESTABLISHED;
2412
2413 /* Look for next non empty bucket */
2414 st->offset = 0;
2415 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2416 empty_bucket(st))
2417 ;
2418 if (st->bucket > tcp_hashinfo.ehash_mask)
2419 return NULL;
2420
2421 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2422 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2423 } else
2424 sk = sk_nulls_next(sk);
2425
2426 sk_nulls_for_each_from(sk, node) {
2427 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2428 goto found;
2429 }
2430
2431 st->state = TCP_SEQ_STATE_TIME_WAIT;
2432 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2433 goto get_tw;
2434 found:
2435 cur = sk;
2436 out:
2437 return cur;
2438 }
2439
2440 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2441 {
2442 struct tcp_iter_state *st = seq->private;
2443 void *rc;
2444
2445 st->bucket = 0;
2446 rc = established_get_first(seq);
2447
2448 while (rc && pos) {
2449 rc = established_get_next(seq, rc);
2450 --pos;
2451 }
2452 return rc;
2453 }
2454
2455 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2456 {
2457 void *rc;
2458 struct tcp_iter_state *st = seq->private;
2459
2460 st->state = TCP_SEQ_STATE_LISTENING;
2461 rc = listening_get_idx(seq, &pos);
2462
2463 if (!rc) {
2464 st->state = TCP_SEQ_STATE_ESTABLISHED;
2465 rc = established_get_idx(seq, pos);
2466 }
2467
2468 return rc;
2469 }
2470
2471 static void *tcp_seek_last_pos(struct seq_file *seq)
2472 {
2473 struct tcp_iter_state *st = seq->private;
2474 int offset = st->offset;
2475 int orig_num = st->num;
2476 void *rc = NULL;
2477
2478 switch (st->state) {
2479 case TCP_SEQ_STATE_OPENREQ:
2480 case TCP_SEQ_STATE_LISTENING:
2481 if (st->bucket >= INET_LHTABLE_SIZE)
2482 break;
2483 st->state = TCP_SEQ_STATE_LISTENING;
2484 rc = listening_get_next(seq, NULL);
2485 while (offset-- && rc)
2486 rc = listening_get_next(seq, rc);
2487 if (rc)
2488 break;
2489 st->bucket = 0;
2490 /* Fallthrough */
2491 case TCP_SEQ_STATE_ESTABLISHED:
2492 case TCP_SEQ_STATE_TIME_WAIT:
2493 st->state = TCP_SEQ_STATE_ESTABLISHED;
2494 if (st->bucket > tcp_hashinfo.ehash_mask)
2495 break;
2496 rc = established_get_first(seq);
2497 while (offset-- && rc)
2498 rc = established_get_next(seq, rc);
2499 }
2500
2501 st->num = orig_num;
2502
2503 return rc;
2504 }
2505
2506 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2507 {
2508 struct tcp_iter_state *st = seq->private;
2509 void *rc;
2510
2511 if (*pos && *pos == st->last_pos) {
2512 rc = tcp_seek_last_pos(seq);
2513 if (rc)
2514 goto out;
2515 }
2516
2517 st->state = TCP_SEQ_STATE_LISTENING;
2518 st->num = 0;
2519 st->bucket = 0;
2520 st->offset = 0;
2521 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2522
2523 out:
2524 st->last_pos = *pos;
2525 return rc;
2526 }
2527
2528 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2529 {
2530 struct tcp_iter_state *st = seq->private;
2531 void *rc = NULL;
2532
2533 if (v == SEQ_START_TOKEN) {
2534 rc = tcp_get_idx(seq, 0);
2535 goto out;
2536 }
2537
2538 switch (st->state) {
2539 case TCP_SEQ_STATE_OPENREQ:
2540 case TCP_SEQ_STATE_LISTENING:
2541 rc = listening_get_next(seq, v);
2542 if (!rc) {
2543 st->state = TCP_SEQ_STATE_ESTABLISHED;
2544 st->bucket = 0;
2545 st->offset = 0;
2546 rc = established_get_first(seq);
2547 }
2548 break;
2549 case TCP_SEQ_STATE_ESTABLISHED:
2550 case TCP_SEQ_STATE_TIME_WAIT:
2551 rc = established_get_next(seq, v);
2552 break;
2553 }
2554 out:
2555 ++*pos;
2556 st->last_pos = *pos;
2557 return rc;
2558 }
2559
2560 static void tcp_seq_stop(struct seq_file *seq, void *v)
2561 {
2562 struct tcp_iter_state *st = seq->private;
2563
2564 switch (st->state) {
2565 case TCP_SEQ_STATE_OPENREQ:
2566 if (v) {
2567 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2568 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2569 }
2570 case TCP_SEQ_STATE_LISTENING:
2571 if (v != SEQ_START_TOKEN)
2572 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2573 break;
2574 case TCP_SEQ_STATE_TIME_WAIT:
2575 case TCP_SEQ_STATE_ESTABLISHED:
2576 if (v)
2577 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2578 break;
2579 }
2580 }
2581
2582 int tcp_seq_open(struct inode *inode, struct file *file)
2583 {
2584 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2585 struct tcp_iter_state *s;
2586 int err;
2587
2588 err = seq_open_net(inode, file, &afinfo->seq_ops,
2589 sizeof(struct tcp_iter_state));
2590 if (err < 0)
2591 return err;
2592
2593 s = ((struct seq_file *)file->private_data)->private;
2594 s->family = afinfo->family;
2595 s->last_pos = 0;
2596 return 0;
2597 }
2598 EXPORT_SYMBOL(tcp_seq_open);
2599
2600 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2601 {
2602 int rc = 0;
2603 struct proc_dir_entry *p;
2604
2605 afinfo->seq_ops.start = tcp_seq_start;
2606 afinfo->seq_ops.next = tcp_seq_next;
2607 afinfo->seq_ops.stop = tcp_seq_stop;
2608
2609 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2610 afinfo->seq_fops, afinfo);
2611 if (!p)
2612 rc = -ENOMEM;
2613 return rc;
2614 }
2615 EXPORT_SYMBOL(tcp_proc_register);
2616
2617 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2618 {
2619 proc_net_remove(net, afinfo->name);
2620 }
2621 EXPORT_SYMBOL(tcp_proc_unregister);
2622
2623 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2624 struct seq_file *f, int i, kuid_t uid, int *len)
2625 {
2626 const struct inet_request_sock *ireq = inet_rsk(req);
2627 long delta = req->expires - jiffies;
2628
2629 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2630 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2631 i,
2632 ireq->loc_addr,
2633 ntohs(inet_sk(sk)->inet_sport),
2634 ireq->rmt_addr,
2635 ntohs(ireq->rmt_port),
2636 TCP_SYN_RECV,
2637 0, 0, /* could print option size, but that is af dependent. */
2638 1, /* timers active (only the expire timer) */
2639 jiffies_delta_to_clock_t(delta),
2640 req->num_timeout,
2641 from_kuid_munged(seq_user_ns(f), uid),
2642 0, /* non standard timer */
2643 0, /* open_requests have no inode */
2644 atomic_read(&sk->sk_refcnt),
2645 req,
2646 len);
2647 }
2648
2649 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2650 {
2651 int timer_active;
2652 unsigned long timer_expires;
2653 const struct tcp_sock *tp = tcp_sk(sk);
2654 const struct inet_connection_sock *icsk = inet_csk(sk);
2655 const struct inet_sock *inet = inet_sk(sk);
2656 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2657 __be32 dest = inet->inet_daddr;
2658 __be32 src = inet->inet_rcv_saddr;
2659 __u16 destp = ntohs(inet->inet_dport);
2660 __u16 srcp = ntohs(inet->inet_sport);
2661 int rx_queue;
2662
2663 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2664 timer_active = 1;
2665 timer_expires = icsk->icsk_timeout;
2666 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2667 timer_active = 4;
2668 timer_expires = icsk->icsk_timeout;
2669 } else if (timer_pending(&sk->sk_timer)) {
2670 timer_active = 2;
2671 timer_expires = sk->sk_timer.expires;
2672 } else {
2673 timer_active = 0;
2674 timer_expires = jiffies;
2675 }
2676
2677 if (sk->sk_state == TCP_LISTEN)
2678 rx_queue = sk->sk_ack_backlog;
2679 else
2680 /*
2681 * because we dont lock socket, we might find a transient negative value
2682 */
2683 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2684
2685 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2686 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2687 i, src, srcp, dest, destp, sk->sk_state,
2688 tp->write_seq - tp->snd_una,
2689 rx_queue,
2690 timer_active,
2691 jiffies_delta_to_clock_t(timer_expires - jiffies),
2692 icsk->icsk_retransmits,
2693 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2694 icsk->icsk_probes_out,
2695 sock_i_ino(sk),
2696 atomic_read(&sk->sk_refcnt), sk,
2697 jiffies_to_clock_t(icsk->icsk_rto),
2698 jiffies_to_clock_t(icsk->icsk_ack.ato),
2699 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2700 tp->snd_cwnd,
2701 sk->sk_state == TCP_LISTEN ?
2702 (fastopenq ? fastopenq->max_qlen : 0) :
2703 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2704 len);
2705 }
2706
2707 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2708 struct seq_file *f, int i, int *len)
2709 {
2710 __be32 dest, src;
2711 __u16 destp, srcp;
2712 long delta = tw->tw_ttd - jiffies;
2713
2714 dest = tw->tw_daddr;
2715 src = tw->tw_rcv_saddr;
2716 destp = ntohs(tw->tw_dport);
2717 srcp = ntohs(tw->tw_sport);
2718
2719 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2720 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2721 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2722 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2723 atomic_read(&tw->tw_refcnt), tw, len);
2724 }
2725
2726 #define TMPSZ 150
2727
2728 static int tcp4_seq_show(struct seq_file *seq, void *v)
2729 {
2730 struct tcp_iter_state *st;
2731 int len;
2732
2733 if (v == SEQ_START_TOKEN) {
2734 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2735 " sl local_address rem_address st tx_queue "
2736 "rx_queue tr tm->when retrnsmt uid timeout "
2737 "inode");
2738 goto out;
2739 }
2740 st = seq->private;
2741
2742 switch (st->state) {
2743 case TCP_SEQ_STATE_LISTENING:
2744 case TCP_SEQ_STATE_ESTABLISHED:
2745 get_tcp4_sock(v, seq, st->num, &len);
2746 break;
2747 case TCP_SEQ_STATE_OPENREQ:
2748 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2749 break;
2750 case TCP_SEQ_STATE_TIME_WAIT:
2751 get_timewait4_sock(v, seq, st->num, &len);
2752 break;
2753 }
2754 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2755 out:
2756 return 0;
2757 }
2758
2759 static const struct file_operations tcp_afinfo_seq_fops = {
2760 .owner = THIS_MODULE,
2761 .open = tcp_seq_open,
2762 .read = seq_read,
2763 .llseek = seq_lseek,
2764 .release = seq_release_net
2765 };
2766
2767 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2768 .name = "tcp",
2769 .family = AF_INET,
2770 .seq_fops = &tcp_afinfo_seq_fops,
2771 .seq_ops = {
2772 .show = tcp4_seq_show,
2773 },
2774 };
2775
2776 static int __net_init tcp4_proc_init_net(struct net *net)
2777 {
2778 return tcp_proc_register(net, &tcp4_seq_afinfo);
2779 }
2780
2781 static void __net_exit tcp4_proc_exit_net(struct net *net)
2782 {
2783 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2784 }
2785
2786 static struct pernet_operations tcp4_net_ops = {
2787 .init = tcp4_proc_init_net,
2788 .exit = tcp4_proc_exit_net,
2789 };
2790
2791 int __init tcp4_proc_init(void)
2792 {
2793 return register_pernet_subsys(&tcp4_net_ops);
2794 }
2795
2796 void tcp4_proc_exit(void)
2797 {
2798 unregister_pernet_subsys(&tcp4_net_ops);
2799 }
2800 #endif /* CONFIG_PROC_FS */
2801
2802 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2803 {
2804 const struct iphdr *iph = skb_gro_network_header(skb);
2805 __wsum wsum;
2806 __sum16 sum;
2807
2808 switch (skb->ip_summed) {
2809 case CHECKSUM_COMPLETE:
2810 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2811 skb->csum)) {
2812 skb->ip_summed = CHECKSUM_UNNECESSARY;
2813 break;
2814 }
2815 flush:
2816 NAPI_GRO_CB(skb)->flush = 1;
2817 return NULL;
2818
2819 case CHECKSUM_NONE:
2820 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2821 skb_gro_len(skb), IPPROTO_TCP, 0);
2822 sum = csum_fold(skb_checksum(skb,
2823 skb_gro_offset(skb),
2824 skb_gro_len(skb),
2825 wsum));
2826 if (sum)
2827 goto flush;
2828
2829 skb->ip_summed = CHECKSUM_UNNECESSARY;
2830 break;
2831 }
2832
2833 return tcp_gro_receive(head, skb);
2834 }
2835
2836 int tcp4_gro_complete(struct sk_buff *skb)
2837 {
2838 const struct iphdr *iph = ip_hdr(skb);
2839 struct tcphdr *th = tcp_hdr(skb);
2840
2841 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2842 iph->saddr, iph->daddr, 0);
2843 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2844
2845 return tcp_gro_complete(skb);
2846 }
2847
2848 struct proto tcp_prot = {
2849 .name = "TCP",
2850 .owner = THIS_MODULE,
2851 .close = tcp_close,
2852 .connect = tcp_v4_connect,
2853 .disconnect = tcp_disconnect,
2854 .accept = inet_csk_accept,
2855 .ioctl = tcp_ioctl,
2856 .init = tcp_v4_init_sock,
2857 .destroy = tcp_v4_destroy_sock,
2858 .shutdown = tcp_shutdown,
2859 .setsockopt = tcp_setsockopt,
2860 .getsockopt = tcp_getsockopt,
2861 .recvmsg = tcp_recvmsg,
2862 .sendmsg = tcp_sendmsg,
2863 .sendpage = tcp_sendpage,
2864 .backlog_rcv = tcp_v4_do_rcv,
2865 .release_cb = tcp_release_cb,
2866 .mtu_reduced = tcp_v4_mtu_reduced,
2867 .hash = inet_hash,
2868 .unhash = inet_unhash,
2869 .get_port = inet_csk_get_port,
2870 .enter_memory_pressure = tcp_enter_memory_pressure,
2871 .sockets_allocated = &tcp_sockets_allocated,
2872 .orphan_count = &tcp_orphan_count,
2873 .memory_allocated = &tcp_memory_allocated,
2874 .memory_pressure = &tcp_memory_pressure,
2875 .sysctl_wmem = sysctl_tcp_wmem,
2876 .sysctl_rmem = sysctl_tcp_rmem,
2877 .max_header = MAX_TCP_HEADER,
2878 .obj_size = sizeof(struct tcp_sock),
2879 .slab_flags = SLAB_DESTROY_BY_RCU,
2880 .twsk_prot = &tcp_timewait_sock_ops,
2881 .rsk_prot = &tcp_request_sock_ops,
2882 .h.hashinfo = &tcp_hashinfo,
2883 .no_autobind = true,
2884 #ifdef CONFIG_COMPAT
2885 .compat_setsockopt = compat_tcp_setsockopt,
2886 .compat_getsockopt = compat_tcp_getsockopt,
2887 #endif
2888 #ifdef CONFIG_MEMCG_KMEM
2889 .init_cgroup = tcp_init_cgroup,
2890 .destroy_cgroup = tcp_destroy_cgroup,
2891 .proto_cgroup = tcp_proto_cgroup,
2892 #endif
2893 };
2894 EXPORT_SYMBOL(tcp_prot);
2895
2896 static int __net_init tcp_sk_init(struct net *net)
2897 {
2898 net->ipv4.sysctl_tcp_ecn = 2;
2899 return 0;
2900 }
2901
2902 static void __net_exit tcp_sk_exit(struct net *net)
2903 {
2904 }
2905
2906 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2907 {
2908 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2909 }
2910
2911 static struct pernet_operations __net_initdata tcp_sk_ops = {
2912 .init = tcp_sk_init,
2913 .exit = tcp_sk_exit,
2914 .exit_batch = tcp_sk_exit_batch,
2915 };
2916
2917 void __init tcp_v4_init(void)
2918 {
2919 inet_hashinfo_init(&tcp_hashinfo);
2920 if (register_pernet_subsys(&tcp_sk_ops))
2921 panic("Failed to create the TCP control socket.\n");
2922 }