2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #include <trace/events/tcp.h>
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
92 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
95 struct inet_hashinfo tcp_hashinfo
;
96 EXPORT_SYMBOL(tcp_hashinfo
);
98 static u32
tcp_v4_init_seq(const struct sk_buff
*skb
)
100 return secure_tcp_seq(ip_hdr(skb
)->daddr
,
103 tcp_hdr(skb
)->source
);
106 static u32
tcp_v4_init_ts_off(const struct net
*net
, const struct sk_buff
*skb
)
108 return secure_tcp_ts_off(net
, ip_hdr(skb
)->daddr
, ip_hdr(skb
)->saddr
);
111 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
113 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
114 struct tcp_sock
*tp
= tcp_sk(sk
);
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
127 if (tcptw
->tw_ts_recent_stamp
&&
128 (!twp
|| (sock_net(sk
)->ipv4
.sysctl_tcp_tw_reuse
&&
129 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
130 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
131 if (tp
->write_seq
== 0)
133 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
134 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
141 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
143 /* This will initiate an outgoing connection. */
144 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
146 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
147 struct inet_sock
*inet
= inet_sk(sk
);
148 struct tcp_sock
*tp
= tcp_sk(sk
);
149 __be16 orig_sport
, orig_dport
;
150 __be32 daddr
, nexthop
;
154 struct ip_options_rcu
*inet_opt
;
155 struct inet_timewait_death_row
*tcp_death_row
= &sock_net(sk
)->ipv4
.tcp_death_row
;
157 if (addr_len
< sizeof(struct sockaddr_in
))
160 if (usin
->sin_family
!= AF_INET
)
161 return -EAFNOSUPPORT
;
163 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
164 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
165 lockdep_sock_is_held(sk
));
166 if (inet_opt
&& inet_opt
->opt
.srr
) {
169 nexthop
= inet_opt
->opt
.faddr
;
172 orig_sport
= inet
->inet_sport
;
173 orig_dport
= usin
->sin_port
;
174 fl4
= &inet
->cork
.fl
.u
.ip4
;
175 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
176 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
178 orig_sport
, orig_dport
, sk
);
181 if (err
== -ENETUNREACH
)
182 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
186 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
191 if (!inet_opt
|| !inet_opt
->opt
.srr
)
194 if (!inet
->inet_saddr
)
195 inet
->inet_saddr
= fl4
->saddr
;
196 sk_rcv_saddr_set(sk
, inet
->inet_saddr
);
198 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
199 /* Reset inherited state */
200 tp
->rx_opt
.ts_recent
= 0;
201 tp
->rx_opt
.ts_recent_stamp
= 0;
202 if (likely(!tp
->repair
))
206 inet
->inet_dport
= usin
->sin_port
;
207 sk_daddr_set(sk
, daddr
);
209 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
211 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
213 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk
, TCP_SYN_SENT
);
221 err
= inet_hash_connect(tcp_death_row
, sk
);
227 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
228 inet
->inet_sport
, inet
->inet_dport
, sk
);
234 /* OK, now commit destination to socket. */
235 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
236 sk_setup_caps(sk
, &rt
->dst
);
239 if (likely(!tp
->repair
)) {
241 tp
->write_seq
= secure_tcp_seq(inet
->inet_saddr
,
245 tp
->tsoffset
= secure_tcp_ts_off(sock_net(sk
),
250 inet
->inet_id
= prandom_u32();
252 if (tcp_fastopen_defer_connect(sk
, &err
))
257 err
= tcp_connect(sk
);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk
, TCP_CLOSE
);
271 sk
->sk_route_caps
= 0;
272 inet
->inet_dport
= 0;
275 EXPORT_SYMBOL(tcp_v4_connect
);
278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
282 void tcp_v4_mtu_reduced(struct sock
*sk
)
284 struct inet_sock
*inet
= inet_sk(sk
);
285 struct dst_entry
*dst
;
288 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
290 mtu
= tcp_sk(sk
)->mtu_info
;
291 dst
= inet_csk_update_pmtu(sk
, mtu
);
295 /* Something is about to be wrong... Remember soft error
296 * for the case, if this connection will not able to recover.
298 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
299 sk
->sk_err_soft
= EMSGSIZE
;
303 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
304 ip_sk_accept_pmtu(sk
) &&
305 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
306 tcp_sync_mss(sk
, mtu
);
308 /* Resend the TCP packet because it's
309 * clear that the old packet has been
310 * dropped. This is the new "fast" path mtu
313 tcp_simple_retransmit(sk
);
314 } /* else let the usual retransmit timer handle it */
316 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
318 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
320 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
323 dst
->ops
->redirect(dst
, sk
, skb
);
327 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
328 void tcp_req_err(struct sock
*sk
, u32 seq
, bool abort
)
330 struct request_sock
*req
= inet_reqsk(sk
);
331 struct net
*net
= sock_net(sk
);
333 /* ICMPs are not backlogged, hence we cannot get
334 * an established socket here.
336 if (seq
!= tcp_rsk(req
)->snt_isn
) {
337 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
340 * Still in SYN_RECV, just remove it silently.
341 * There is no good way to pass the error to the newly
342 * created socket, and POSIX does not want network
343 * errors returned from accept().
345 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
346 tcp_listendrop(req
->rsk_listener
);
350 EXPORT_SYMBOL(tcp_req_err
);
353 * This routine is called by the ICMP module when it gets some
354 * sort of error condition. If err < 0 then the socket should
355 * be closed and the error returned to the user. If err > 0
356 * it's just the icmp type << 8 | icmp code. After adjustment
357 * header points to the first 8 bytes of the tcp header. We need
358 * to find the appropriate port.
360 * The locking strategy used here is very "optimistic". When
361 * someone else accesses the socket the ICMP is just dropped
362 * and for some paths there is no check at all.
363 * A more general error queue to queue errors for later handling
364 * is probably better.
368 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
370 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
371 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
372 struct inet_connection_sock
*icsk
;
374 struct inet_sock
*inet
;
375 const int type
= icmp_hdr(icmp_skb
)->type
;
376 const int code
= icmp_hdr(icmp_skb
)->code
;
379 struct request_sock
*fastopen
;
384 struct net
*net
= dev_net(icmp_skb
->dev
);
386 sk
= __inet_lookup_established(net
, &tcp_hashinfo
, iph
->daddr
,
387 th
->dest
, iph
->saddr
, ntohs(th
->source
),
388 inet_iif(icmp_skb
), 0);
390 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
393 if (sk
->sk_state
== TCP_TIME_WAIT
) {
394 inet_twsk_put(inet_twsk(sk
));
397 seq
= ntohl(th
->seq
);
398 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
399 return tcp_req_err(sk
, seq
,
400 type
== ICMP_PARAMETERPROB
||
401 type
== ICMP_TIME_EXCEEDED
||
402 (type
== ICMP_DEST_UNREACH
&&
403 (code
== ICMP_NET_UNREACH
||
404 code
== ICMP_HOST_UNREACH
)));
407 /* If too many ICMPs get dropped on busy
408 * servers this needs to be solved differently.
409 * We do take care of PMTU discovery (RFC1191) special case :
410 * we can receive locally generated ICMP messages while socket is held.
412 if (sock_owned_by_user(sk
)) {
413 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
414 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
416 if (sk
->sk_state
== TCP_CLOSE
)
419 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
420 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
426 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
427 fastopen
= tp
->fastopen_rsk
;
428 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
429 if (sk
->sk_state
!= TCP_LISTEN
&&
430 !between(seq
, snd_una
, tp
->snd_nxt
)) {
431 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
437 if (!sock_owned_by_user(sk
))
438 do_redirect(icmp_skb
, sk
);
440 case ICMP_SOURCE_QUENCH
:
441 /* Just silently ignore these. */
443 case ICMP_PARAMETERPROB
:
446 case ICMP_DEST_UNREACH
:
447 if (code
> NR_ICMP_UNREACH
)
450 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
451 /* We are not interested in TCP_LISTEN and open_requests
452 * (SYN-ACKs send out by Linux are always <576bytes so
453 * they should go through unfragmented).
455 if (sk
->sk_state
== TCP_LISTEN
)
459 if (!sock_owned_by_user(sk
)) {
460 tcp_v4_mtu_reduced(sk
);
462 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &sk
->sk_tsq_flags
))
468 err
= icmp_err_convert
[code
].errno
;
469 /* check if icmp_skb allows revert of backoff
470 * (see draft-zimmermann-tcp-lcd) */
471 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
473 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
474 !icsk
->icsk_backoff
|| fastopen
)
477 if (sock_owned_by_user(sk
))
480 skb
= tcp_rtx_queue_head(sk
);
481 if (WARN_ON_ONCE(!skb
))
484 icsk
->icsk_backoff
--;
485 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
487 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
489 tcp_mstamp_refresh(tp
);
490 delta_us
= (u32
)(tp
->tcp_mstamp
- skb
->skb_mstamp
);
491 remaining
= icsk
->icsk_rto
-
492 usecs_to_jiffies(delta_us
);
495 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
496 remaining
, TCP_RTO_MAX
);
498 /* RTO revert clocked out retransmission.
499 * Will retransmit now */
500 tcp_retransmit_timer(sk
);
504 case ICMP_TIME_EXCEEDED
:
511 switch (sk
->sk_state
) {
514 /* Only in fast or simultaneous open. If a fast open socket is
515 * is already accepted it is treated as a connected one below.
517 if (fastopen
&& !fastopen
->sk
)
520 if (!sock_owned_by_user(sk
)) {
523 sk
->sk_error_report(sk
);
527 sk
->sk_err_soft
= err
;
532 /* If we've already connected we will keep trying
533 * until we time out, or the user gives up.
535 * rfc1122 4.2.3.9 allows to consider as hard errors
536 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
537 * but it is obsoleted by pmtu discovery).
539 * Note, that in modern internet, where routing is unreliable
540 * and in each dark corner broken firewalls sit, sending random
541 * errors ordered by their masters even this two messages finally lose
542 * their original sense (even Linux sends invalid PORT_UNREACHs)
544 * Now we are in compliance with RFCs.
549 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
551 sk
->sk_error_report(sk
);
552 } else { /* Only an error on timeout */
553 sk
->sk_err_soft
= err
;
561 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
563 struct tcphdr
*th
= tcp_hdr(skb
);
565 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
566 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
567 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
568 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
570 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
577 /* This routine computes an IPv4 TCP checksum. */
578 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
580 const struct inet_sock
*inet
= inet_sk(sk
);
582 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
584 EXPORT_SYMBOL(tcp_v4_send_check
);
587 * This routine will send an RST to the other tcp.
589 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
591 * Answer: if a packet caused RST, it is not for a socket
592 * existing in our system, if it is matched to a socket,
593 * it is just duplicate segment or bug in other side's TCP.
594 * So that we build reply only basing on parameters
595 * arrived with segment.
596 * Exception: precedence violation. We do not implement it in any case.
599 static void tcp_v4_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
601 const struct tcphdr
*th
= tcp_hdr(skb
);
604 #ifdef CONFIG_TCP_MD5SIG
605 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
608 struct ip_reply_arg arg
;
609 #ifdef CONFIG_TCP_MD5SIG
610 struct tcp_md5sig_key
*key
= NULL
;
611 const __u8
*hash_location
= NULL
;
612 unsigned char newhash
[16];
614 struct sock
*sk1
= NULL
;
618 /* Never send a reset in response to a reset. */
622 /* If sk not NULL, it means we did a successful lookup and incoming
623 * route had to be correct. prequeue might have dropped our dst.
625 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
628 /* Swap the send and the receive. */
629 memset(&rep
, 0, sizeof(rep
));
630 rep
.th
.dest
= th
->source
;
631 rep
.th
.source
= th
->dest
;
632 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
636 rep
.th
.seq
= th
->ack_seq
;
639 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
640 skb
->len
- (th
->doff
<< 2));
643 memset(&arg
, 0, sizeof(arg
));
644 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
645 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
647 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
648 #ifdef CONFIG_TCP_MD5SIG
650 hash_location
= tcp_parse_md5sig_option(th
);
651 if (sk
&& sk_fullsock(sk
)) {
652 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
653 &ip_hdr(skb
)->saddr
, AF_INET
);
654 } else if (hash_location
) {
656 * active side is lost. Try to find listening socket through
657 * source port, and then find md5 key through listening socket.
658 * we are not loose security here:
659 * Incoming packet is checked with md5 hash with finding key,
660 * no RST generated if md5 hash doesn't match.
662 sk1
= __inet_lookup_listener(net
, &tcp_hashinfo
, NULL
, 0,
664 th
->source
, ip_hdr(skb
)->daddr
,
665 ntohs(th
->source
), inet_iif(skb
),
667 /* don't send rst if it can't find key */
671 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
672 &ip_hdr(skb
)->saddr
, AF_INET
);
677 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, skb
);
678 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
684 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
686 (TCPOPT_MD5SIG
<< 8) |
688 /* Update length and the length the header thinks exists */
689 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
690 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
692 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
693 key
, ip_hdr(skb
)->saddr
,
694 ip_hdr(skb
)->daddr
, &rep
.th
);
697 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
698 ip_hdr(skb
)->saddr
, /* XXX */
699 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
700 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
701 arg
.flags
= (sk
&& inet_sk_transparent(sk
)) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
703 /* When socket is gone, all binding information is lost.
704 * routing might fail in this case. No choice here, if we choose to force
705 * input interface, we will misroute in case of asymmetric route.
708 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
710 trace_tcp_send_reset(sk
, skb
);
713 BUILD_BUG_ON(offsetof(struct sock
, sk_bound_dev_if
) !=
714 offsetof(struct inet_timewait_sock
, tw_bound_dev_if
));
716 arg
.tos
= ip_hdr(skb
)->tos
;
717 arg
.uid
= sock_net_uid(net
, sk
&& sk_fullsock(sk
) ? sk
: NULL
);
719 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
720 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
721 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
722 &arg
, arg
.iov
[0].iov_len
);
724 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
725 __TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
728 #ifdef CONFIG_TCP_MD5SIG
734 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
735 outside socket context is ugly, certainly. What can I do?
738 static void tcp_v4_send_ack(const struct sock
*sk
,
739 struct sk_buff
*skb
, u32 seq
, u32 ack
,
740 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
741 struct tcp_md5sig_key
*key
,
742 int reply_flags
, u8 tos
)
744 const struct tcphdr
*th
= tcp_hdr(skb
);
747 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
748 #ifdef CONFIG_TCP_MD5SIG
749 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
753 struct net
*net
= sock_net(sk
);
754 struct ip_reply_arg arg
;
756 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
757 memset(&arg
, 0, sizeof(arg
));
759 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
760 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
762 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
763 (TCPOPT_TIMESTAMP
<< 8) |
765 rep
.opt
[1] = htonl(tsval
);
766 rep
.opt
[2] = htonl(tsecr
);
767 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
770 /* Swap the send and the receive. */
771 rep
.th
.dest
= th
->source
;
772 rep
.th
.source
= th
->dest
;
773 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
774 rep
.th
.seq
= htonl(seq
);
775 rep
.th
.ack_seq
= htonl(ack
);
777 rep
.th
.window
= htons(win
);
779 #ifdef CONFIG_TCP_MD5SIG
781 int offset
= (tsecr
) ? 3 : 0;
783 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
785 (TCPOPT_MD5SIG
<< 8) |
787 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
788 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
790 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
791 key
, ip_hdr(skb
)->saddr
,
792 ip_hdr(skb
)->daddr
, &rep
.th
);
795 arg
.flags
= reply_flags
;
796 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
797 ip_hdr(skb
)->saddr
, /* XXX */
798 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
799 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
801 arg
.bound_dev_if
= oif
;
803 arg
.uid
= sock_net_uid(net
, sk_fullsock(sk
) ? sk
: NULL
);
805 ip_send_unicast_reply(*this_cpu_ptr(net
->ipv4
.tcp_sk
),
806 skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
807 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
808 &arg
, arg
.iov
[0].iov_len
);
810 __TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
814 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
816 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
817 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
819 tcp_v4_send_ack(sk
, skb
,
820 tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
821 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
822 tcp_time_stamp_raw() + tcptw
->tw_ts_offset
,
825 tcp_twsk_md5_key(tcptw
),
826 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
833 static void tcp_v4_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
834 struct request_sock
*req
)
836 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
837 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
839 u32 seq
= (sk
->sk_state
== TCP_LISTEN
) ? tcp_rsk(req
)->snt_isn
+ 1 :
843 * The window field (SEG.WND) of every outgoing segment, with the
844 * exception of <SYN> segments, MUST be right-shifted by
845 * Rcv.Wind.Shift bits:
847 tcp_v4_send_ack(sk
, skb
, seq
,
848 tcp_rsk(req
)->rcv_nxt
,
849 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
850 tcp_time_stamp_raw() + tcp_rsk(req
)->ts_off
,
853 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->saddr
,
855 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
860 * Send a SYN-ACK after having received a SYN.
861 * This still operates on a request_sock only, not on a big
864 static int tcp_v4_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
866 struct request_sock
*req
,
867 struct tcp_fastopen_cookie
*foc
,
868 enum tcp_synack_type synack_type
)
870 const struct inet_request_sock
*ireq
= inet_rsk(req
);
875 /* First, grab a route. */
876 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
879 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
882 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
885 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
887 rcu_dereference(ireq
->ireq_opt
));
889 err
= net_xmit_eval(err
);
896 * IPv4 request_sock destructor.
898 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
900 kfree(rcu_dereference_protected(inet_rsk(req
)->ireq_opt
, 1));
903 #ifdef CONFIG_TCP_MD5SIG
905 * RFC2385 MD5 checksumming requires a mapping of
906 * IP address->MD5 Key.
907 * We need to maintain these in the sk structure.
910 /* Find the Key structure for an address. */
911 struct tcp_md5sig_key
*tcp_md5_do_lookup(const struct sock
*sk
,
912 const union tcp_md5_addr
*addr
,
915 const struct tcp_sock
*tp
= tcp_sk(sk
);
916 struct tcp_md5sig_key
*key
;
917 const struct tcp_md5sig_info
*md5sig
;
919 struct tcp_md5sig_key
*best_match
= NULL
;
922 /* caller either holds rcu_read_lock() or socket lock */
923 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
924 lockdep_sock_is_held(sk
));
928 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
929 if (key
->family
!= family
)
932 if (family
== AF_INET
) {
933 mask
= inet_make_mask(key
->prefixlen
);
934 match
= (key
->addr
.a4
.s_addr
& mask
) ==
935 (addr
->a4
.s_addr
& mask
);
936 #if IS_ENABLED(CONFIG_IPV6)
937 } else if (family
== AF_INET6
) {
938 match
= ipv6_prefix_equal(&key
->addr
.a6
, &addr
->a6
,
945 if (match
&& (!best_match
||
946 key
->prefixlen
> best_match
->prefixlen
))
951 EXPORT_SYMBOL(tcp_md5_do_lookup
);
953 static struct tcp_md5sig_key
*tcp_md5_do_lookup_exact(const struct sock
*sk
,
954 const union tcp_md5_addr
*addr
,
955 int family
, u8 prefixlen
)
957 const struct tcp_sock
*tp
= tcp_sk(sk
);
958 struct tcp_md5sig_key
*key
;
959 unsigned int size
= sizeof(struct in_addr
);
960 const struct tcp_md5sig_info
*md5sig
;
962 /* caller either holds rcu_read_lock() or socket lock */
963 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
964 lockdep_sock_is_held(sk
));
967 #if IS_ENABLED(CONFIG_IPV6)
968 if (family
== AF_INET6
)
969 size
= sizeof(struct in6_addr
);
971 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
972 if (key
->family
!= family
)
974 if (!memcmp(&key
->addr
, addr
, size
) &&
975 key
->prefixlen
== prefixlen
)
981 struct tcp_md5sig_key
*tcp_v4_md5_lookup(const struct sock
*sk
,
982 const struct sock
*addr_sk
)
984 const union tcp_md5_addr
*addr
;
986 addr
= (const union tcp_md5_addr
*)&addr_sk
->sk_daddr
;
987 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
989 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
991 /* This can be called on a newly created socket, from other files */
992 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
993 int family
, u8 prefixlen
, const u8
*newkey
, u8 newkeylen
,
996 /* Add Key to the list */
997 struct tcp_md5sig_key
*key
;
998 struct tcp_sock
*tp
= tcp_sk(sk
);
999 struct tcp_md5sig_info
*md5sig
;
1001 key
= tcp_md5_do_lookup_exact(sk
, addr
, family
, prefixlen
);
1003 /* Pre-existing entry - just update that one. */
1004 memcpy(key
->key
, newkey
, newkeylen
);
1005 key
->keylen
= newkeylen
;
1009 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1010 lockdep_sock_is_held(sk
));
1012 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
1016 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1017 INIT_HLIST_HEAD(&md5sig
->head
);
1018 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
1021 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
1024 if (!tcp_alloc_md5sig_pool()) {
1025 sock_kfree_s(sk
, key
, sizeof(*key
));
1029 memcpy(key
->key
, newkey
, newkeylen
);
1030 key
->keylen
= newkeylen
;
1031 key
->family
= family
;
1032 key
->prefixlen
= prefixlen
;
1033 memcpy(&key
->addr
, addr
,
1034 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1035 sizeof(struct in_addr
));
1036 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1039 EXPORT_SYMBOL(tcp_md5_do_add
);
1041 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
,
1044 struct tcp_md5sig_key
*key
;
1046 key
= tcp_md5_do_lookup_exact(sk
, addr
, family
, prefixlen
);
1049 hlist_del_rcu(&key
->node
);
1050 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1051 kfree_rcu(key
, rcu
);
1054 EXPORT_SYMBOL(tcp_md5_do_del
);
1056 static void tcp_clear_md5_list(struct sock
*sk
)
1058 struct tcp_sock
*tp
= tcp_sk(sk
);
1059 struct tcp_md5sig_key
*key
;
1060 struct hlist_node
*n
;
1061 struct tcp_md5sig_info
*md5sig
;
1063 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1065 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
1066 hlist_del_rcu(&key
->node
);
1067 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1068 kfree_rcu(key
, rcu
);
1072 static int tcp_v4_parse_md5_keys(struct sock
*sk
, int optname
,
1073 char __user
*optval
, int optlen
)
1075 struct tcp_md5sig cmd
;
1076 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1079 if (optlen
< sizeof(cmd
))
1082 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1085 if (sin
->sin_family
!= AF_INET
)
1088 if (optname
== TCP_MD5SIG_EXT
&&
1089 cmd
.tcpm_flags
& TCP_MD5SIG_FLAG_PREFIX
) {
1090 prefixlen
= cmd
.tcpm_prefixlen
;
1095 if (!cmd
.tcpm_keylen
)
1096 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1097 AF_INET
, prefixlen
);
1099 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1102 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1103 AF_INET
, prefixlen
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1107 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
1108 __be32 daddr
, __be32 saddr
,
1109 const struct tcphdr
*th
, int nbytes
)
1111 struct tcp4_pseudohdr
*bp
;
1112 struct scatterlist sg
;
1119 bp
->protocol
= IPPROTO_TCP
;
1120 bp
->len
= cpu_to_be16(nbytes
);
1122 _th
= (struct tcphdr
*)(bp
+ 1);
1123 memcpy(_th
, th
, sizeof(*th
));
1126 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
1127 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
1128 sizeof(*bp
) + sizeof(*th
));
1129 return crypto_ahash_update(hp
->md5_req
);
1132 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1133 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1135 struct tcp_md5sig_pool
*hp
;
1136 struct ahash_request
*req
;
1138 hp
= tcp_get_md5sig_pool();
1140 goto clear_hash_noput
;
1143 if (crypto_ahash_init(req
))
1145 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
1147 if (tcp_md5_hash_key(hp
, key
))
1149 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1150 if (crypto_ahash_final(req
))
1153 tcp_put_md5sig_pool();
1157 tcp_put_md5sig_pool();
1159 memset(md5_hash
, 0, 16);
1163 int tcp_v4_md5_hash_skb(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1164 const struct sock
*sk
,
1165 const struct sk_buff
*skb
)
1167 struct tcp_md5sig_pool
*hp
;
1168 struct ahash_request
*req
;
1169 const struct tcphdr
*th
= tcp_hdr(skb
);
1170 __be32 saddr
, daddr
;
1172 if (sk
) { /* valid for establish/request sockets */
1173 saddr
= sk
->sk_rcv_saddr
;
1174 daddr
= sk
->sk_daddr
;
1176 const struct iphdr
*iph
= ip_hdr(skb
);
1181 hp
= tcp_get_md5sig_pool();
1183 goto clear_hash_noput
;
1186 if (crypto_ahash_init(req
))
1189 if (tcp_v4_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
1191 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1193 if (tcp_md5_hash_key(hp
, key
))
1195 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
1196 if (crypto_ahash_final(req
))
1199 tcp_put_md5sig_pool();
1203 tcp_put_md5sig_pool();
1205 memset(md5_hash
, 0, 16);
1208 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1212 /* Called with rcu_read_lock() */
1213 static bool tcp_v4_inbound_md5_hash(const struct sock
*sk
,
1214 const struct sk_buff
*skb
)
1216 #ifdef CONFIG_TCP_MD5SIG
1218 * This gets called for each TCP segment that arrives
1219 * so we want to be efficient.
1220 * We have 3 drop cases:
1221 * o No MD5 hash and one expected.
1222 * o MD5 hash and we're not expecting one.
1223 * o MD5 hash and its wrong.
1225 const __u8
*hash_location
= NULL
;
1226 struct tcp_md5sig_key
*hash_expected
;
1227 const struct iphdr
*iph
= ip_hdr(skb
);
1228 const struct tcphdr
*th
= tcp_hdr(skb
);
1230 unsigned char newhash
[16];
1232 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1234 hash_location
= tcp_parse_md5sig_option(th
);
1236 /* We've parsed the options - do we have a hash? */
1237 if (!hash_expected
&& !hash_location
)
1240 if (hash_expected
&& !hash_location
) {
1241 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1245 if (!hash_expected
&& hash_location
) {
1246 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1250 /* Okay, so this is hash_expected and hash_location -
1251 * so we need to calculate the checksum.
1253 genhash
= tcp_v4_md5_hash_skb(newhash
,
1257 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1258 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
1259 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1260 &iph
->saddr
, ntohs(th
->source
),
1261 &iph
->daddr
, ntohs(th
->dest
),
1262 genhash
? " tcp_v4_calc_md5_hash failed"
1271 static void tcp_v4_init_req(struct request_sock
*req
,
1272 const struct sock
*sk_listener
,
1273 struct sk_buff
*skb
)
1275 struct inet_request_sock
*ireq
= inet_rsk(req
);
1276 struct net
*net
= sock_net(sk_listener
);
1278 sk_rcv_saddr_set(req_to_sk(req
), ip_hdr(skb
)->daddr
);
1279 sk_daddr_set(req_to_sk(req
), ip_hdr(skb
)->saddr
);
1280 RCU_INIT_POINTER(ireq
->ireq_opt
, tcp_v4_save_options(net
, skb
));
1283 static struct dst_entry
*tcp_v4_route_req(const struct sock
*sk
,
1285 const struct request_sock
*req
)
1287 return inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1290 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1292 .obj_size
= sizeof(struct tcp_request_sock
),
1293 .rtx_syn_ack
= tcp_rtx_synack
,
1294 .send_ack
= tcp_v4_reqsk_send_ack
,
1295 .destructor
= tcp_v4_reqsk_destructor
,
1296 .send_reset
= tcp_v4_send_reset
,
1297 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1300 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1301 .mss_clamp
= TCP_MSS_DEFAULT
,
1302 #ifdef CONFIG_TCP_MD5SIG
1303 .req_md5_lookup
= tcp_v4_md5_lookup
,
1304 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1306 .init_req
= tcp_v4_init_req
,
1307 #ifdef CONFIG_SYN_COOKIES
1308 .cookie_init_seq
= cookie_v4_init_sequence
,
1310 .route_req
= tcp_v4_route_req
,
1311 .init_seq
= tcp_v4_init_seq
,
1312 .init_ts_off
= tcp_v4_init_ts_off
,
1313 .send_synack
= tcp_v4_send_synack
,
1316 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1318 /* Never answer to SYNs send to broadcast or multicast */
1319 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1322 return tcp_conn_request(&tcp_request_sock_ops
,
1323 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1329 EXPORT_SYMBOL(tcp_v4_conn_request
);
1333 * The three way handshake has completed - we got a valid synack -
1334 * now create the new socket.
1336 struct sock
*tcp_v4_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1337 struct request_sock
*req
,
1338 struct dst_entry
*dst
,
1339 struct request_sock
*req_unhash
,
1342 struct inet_request_sock
*ireq
;
1343 struct inet_sock
*newinet
;
1344 struct tcp_sock
*newtp
;
1346 #ifdef CONFIG_TCP_MD5SIG
1347 struct tcp_md5sig_key
*key
;
1349 struct ip_options_rcu
*inet_opt
;
1351 if (sk_acceptq_is_full(sk
))
1354 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1358 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1359 inet_sk_rx_dst_set(newsk
, skb
);
1361 newtp
= tcp_sk(newsk
);
1362 newinet
= inet_sk(newsk
);
1363 ireq
= inet_rsk(req
);
1364 sk_daddr_set(newsk
, ireq
->ir_rmt_addr
);
1365 sk_rcv_saddr_set(newsk
, ireq
->ir_loc_addr
);
1366 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1367 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1368 inet_opt
= rcu_dereference(ireq
->ireq_opt
);
1369 RCU_INIT_POINTER(newinet
->inet_opt
, inet_opt
);
1370 newinet
->mc_index
= inet_iif(skb
);
1371 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1372 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1373 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1375 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1376 newinet
->inet_id
= prandom_u32();
1379 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1383 /* syncookie case : see end of cookie_v4_check() */
1385 sk_setup_caps(newsk
, dst
);
1387 tcp_ca_openreq_child(newsk
, dst
);
1389 tcp_sync_mss(newsk
, dst_mtu(dst
));
1390 newtp
->advmss
= tcp_mss_clamp(tcp_sk(sk
), dst_metric_advmss(dst
));
1392 tcp_initialize_rcv_mss(newsk
);
1394 #ifdef CONFIG_TCP_MD5SIG
1395 /* Copy over the MD5 key from the original socket */
1396 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1400 * We're using one, so create a matching key
1401 * on the newsk structure. If we fail to get
1402 * memory, then we end up not copying the key
1405 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1406 AF_INET
, 32, key
->key
, key
->keylen
, GFP_ATOMIC
);
1407 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1411 if (__inet_inherit_port(sk
, newsk
) < 0)
1413 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1414 if (likely(*own_req
)) {
1415 tcp_move_syn(newtp
, req
);
1416 ireq
->ireq_opt
= NULL
;
1418 newinet
->inet_opt
= NULL
;
1423 NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1430 newinet
->inet_opt
= NULL
;
1431 inet_csk_prepare_forced_close(newsk
);
1435 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1437 static struct sock
*tcp_v4_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1439 #ifdef CONFIG_SYN_COOKIES
1440 const struct tcphdr
*th
= tcp_hdr(skb
);
1443 sk
= cookie_v4_check(sk
, skb
);
1448 /* The socket must have it's spinlock held when we get
1449 * here, unless it is a TCP_LISTEN socket.
1451 * We have a potential double-lock case here, so even when
1452 * doing backlog processing we use the BH locking scheme.
1453 * This is because we cannot sleep with the original spinlock
1456 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1460 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1461 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1463 sock_rps_save_rxhash(sk
, skb
);
1464 sk_mark_napi_id(sk
, skb
);
1466 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1467 !dst
->ops
->check(dst
, 0)) {
1469 sk
->sk_rx_dst
= NULL
;
1472 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
));
1476 if (tcp_checksum_complete(skb
))
1479 if (sk
->sk_state
== TCP_LISTEN
) {
1480 struct sock
*nsk
= tcp_v4_cookie_check(sk
, skb
);
1485 if (tcp_child_process(sk
, nsk
, skb
)) {
1492 sock_rps_save_rxhash(sk
, skb
);
1494 if (tcp_rcv_state_process(sk
, skb
)) {
1501 tcp_v4_send_reset(rsk
, skb
);
1504 /* Be careful here. If this function gets more complicated and
1505 * gcc suffers from register pressure on the x86, sk (in %ebx)
1506 * might be destroyed here. This current version compiles correctly,
1507 * but you have been warned.
1512 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1513 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1516 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1518 int tcp_v4_early_demux(struct sk_buff
*skb
)
1520 const struct iphdr
*iph
;
1521 const struct tcphdr
*th
;
1524 if (skb
->pkt_type
!= PACKET_HOST
)
1527 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1533 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1536 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1537 iph
->saddr
, th
->source
,
1538 iph
->daddr
, ntohs(th
->dest
),
1539 skb
->skb_iif
, inet_sdif(skb
));
1542 skb
->destructor
= sock_edemux
;
1543 if (sk_fullsock(sk
)) {
1544 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1547 dst
= dst_check(dst
, 0);
1549 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1550 skb_dst_set_noref(skb
, dst
);
1556 bool tcp_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
1558 u32 limit
= sk
->sk_rcvbuf
+ sk
->sk_sndbuf
;
1560 /* Only socket owner can try to collapse/prune rx queues
1561 * to reduce memory overhead, so add a little headroom here.
1562 * Few sockets backlog are possibly concurrently non empty.
1566 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1567 * we can fix skb->truesize to its real value to avoid future drops.
1568 * This is valid because skb is not yet charged to the socket.
1569 * It has been noticed pure SACK packets were sometimes dropped
1570 * (if cooked by drivers without copybreak feature).
1574 if (unlikely(sk_add_backlog(sk
, skb
, limit
))) {
1576 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPBACKLOGDROP
);
1581 EXPORT_SYMBOL(tcp_add_backlog
);
1583 int tcp_filter(struct sock
*sk
, struct sk_buff
*skb
)
1585 struct tcphdr
*th
= (struct tcphdr
*)skb
->data
;
1587 return sk_filter_trim_cap(sk
, skb
, th
->doff
* 4);
1589 EXPORT_SYMBOL(tcp_filter
);
1591 static void tcp_v4_restore_cb(struct sk_buff
*skb
)
1593 memmove(IPCB(skb
), &TCP_SKB_CB(skb
)->header
.h4
,
1594 sizeof(struct inet_skb_parm
));
1597 static void tcp_v4_fill_cb(struct sk_buff
*skb
, const struct iphdr
*iph
,
1598 const struct tcphdr
*th
)
1600 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1601 * barrier() makes sure compiler wont play fool^Waliasing games.
1603 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1604 sizeof(struct inet_skb_parm
));
1607 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1608 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1609 skb
->len
- th
->doff
* 4);
1610 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1611 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1612 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1613 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1614 TCP_SKB_CB(skb
)->sacked
= 0;
1615 TCP_SKB_CB(skb
)->has_rxtstamp
=
1616 skb
->tstamp
|| skb_hwtstamps(skb
)->hwtstamp
;
1623 int tcp_v4_rcv(struct sk_buff
*skb
)
1625 struct net
*net
= dev_net(skb
->dev
);
1626 int sdif
= inet_sdif(skb
);
1627 const struct iphdr
*iph
;
1628 const struct tcphdr
*th
;
1633 if (skb
->pkt_type
!= PACKET_HOST
)
1636 /* Count it even if it's bad */
1637 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1639 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1642 th
= (const struct tcphdr
*)skb
->data
;
1644 if (unlikely(th
->doff
< sizeof(struct tcphdr
) / 4))
1646 if (!pskb_may_pull(skb
, th
->doff
* 4))
1649 /* An explanation is required here, I think.
1650 * Packet length and doff are validated by header prediction,
1651 * provided case of th->doff==0 is eliminated.
1652 * So, we defer the checks. */
1654 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1657 th
= (const struct tcphdr
*)skb
->data
;
1660 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
), th
->source
,
1661 th
->dest
, sdif
, &refcounted
);
1666 if (sk
->sk_state
== TCP_TIME_WAIT
)
1669 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1670 struct request_sock
*req
= inet_reqsk(sk
);
1673 sk
= req
->rsk_listener
;
1674 if (unlikely(tcp_v4_inbound_md5_hash(sk
, skb
))) {
1675 sk_drops_add(sk
, skb
);
1679 if (tcp_checksum_complete(skb
)) {
1683 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1684 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1687 /* We own a reference on the listener, increase it again
1688 * as we might lose it too soon.
1693 if (!tcp_filter(sk
, skb
)) {
1694 th
= (const struct tcphdr
*)skb
->data
;
1696 tcp_v4_fill_cb(skb
, iph
, th
);
1697 nsk
= tcp_check_req(sk
, skb
, req
, false);
1701 goto discard_and_relse
;
1705 tcp_v4_restore_cb(skb
);
1706 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1707 tcp_v4_send_reset(nsk
, skb
);
1708 goto discard_and_relse
;
1714 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1715 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1716 goto discard_and_relse
;
1719 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1720 goto discard_and_relse
;
1722 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1723 goto discard_and_relse
;
1727 if (tcp_filter(sk
, skb
))
1728 goto discard_and_relse
;
1729 th
= (const struct tcphdr
*)skb
->data
;
1731 tcp_v4_fill_cb(skb
, iph
, th
);
1735 if (sk
->sk_state
== TCP_LISTEN
) {
1736 ret
= tcp_v4_do_rcv(sk
, skb
);
1737 goto put_and_return
;
1740 sk_incoming_cpu_update(sk
);
1742 bh_lock_sock_nested(sk
);
1743 tcp_segs_in(tcp_sk(sk
), skb
);
1745 if (!sock_owned_by_user(sk
)) {
1746 ret
= tcp_v4_do_rcv(sk
, skb
);
1747 } else if (tcp_add_backlog(sk
, skb
)) {
1748 goto discard_and_relse
;
1759 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1762 tcp_v4_fill_cb(skb
, iph
, th
);
1764 if (tcp_checksum_complete(skb
)) {
1766 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1768 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1770 tcp_v4_send_reset(NULL
, skb
);
1774 /* Discard frame. */
1779 sk_drops_add(sk
, skb
);
1785 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1786 inet_twsk_put(inet_twsk(sk
));
1790 tcp_v4_fill_cb(skb
, iph
, th
);
1792 if (tcp_checksum_complete(skb
)) {
1793 inet_twsk_put(inet_twsk(sk
));
1796 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1798 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1801 iph
->saddr
, th
->source
,
1802 iph
->daddr
, th
->dest
,
1806 inet_twsk_deschedule_put(inet_twsk(sk
));
1808 tcp_v4_restore_cb(skb
);
1816 tcp_v4_timewait_ack(sk
, skb
);
1819 tcp_v4_send_reset(sk
, skb
);
1820 inet_twsk_deschedule_put(inet_twsk(sk
));
1822 case TCP_TW_SUCCESS
:;
1827 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1828 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1829 .twsk_unique
= tcp_twsk_unique
,
1830 .twsk_destructor
= tcp_twsk_destructor
,
1833 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1835 struct dst_entry
*dst
= skb_dst(skb
);
1837 if (dst
&& dst_hold_safe(dst
)) {
1838 sk
->sk_rx_dst
= dst
;
1839 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1842 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1844 const struct inet_connection_sock_af_ops ipv4_specific
= {
1845 .queue_xmit
= ip_queue_xmit
,
1846 .send_check
= tcp_v4_send_check
,
1847 .rebuild_header
= inet_sk_rebuild_header
,
1848 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1849 .conn_request
= tcp_v4_conn_request
,
1850 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1851 .net_header_len
= sizeof(struct iphdr
),
1852 .setsockopt
= ip_setsockopt
,
1853 .getsockopt
= ip_getsockopt
,
1854 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1855 .sockaddr_len
= sizeof(struct sockaddr_in
),
1856 #ifdef CONFIG_COMPAT
1857 .compat_setsockopt
= compat_ip_setsockopt
,
1858 .compat_getsockopt
= compat_ip_getsockopt
,
1860 .mtu_reduced
= tcp_v4_mtu_reduced
,
1862 EXPORT_SYMBOL(ipv4_specific
);
1864 #ifdef CONFIG_TCP_MD5SIG
1865 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1866 .md5_lookup
= tcp_v4_md5_lookup
,
1867 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1868 .md5_parse
= tcp_v4_parse_md5_keys
,
1872 /* NOTE: A lot of things set to zero explicitly by call to
1873 * sk_alloc() so need not be done here.
1875 static int tcp_v4_init_sock(struct sock
*sk
)
1877 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1881 icsk
->icsk_af_ops
= &ipv4_specific
;
1883 #ifdef CONFIG_TCP_MD5SIG
1884 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1890 void tcp_v4_destroy_sock(struct sock
*sk
)
1892 struct tcp_sock
*tp
= tcp_sk(sk
);
1894 trace_tcp_destroy_sock(sk
);
1896 tcp_clear_xmit_timers(sk
);
1898 tcp_cleanup_congestion_control(sk
);
1900 tcp_cleanup_ulp(sk
);
1902 /* Cleanup up the write buffer. */
1903 tcp_write_queue_purge(sk
);
1905 /* Check if we want to disable active TFO */
1906 tcp_fastopen_active_disable_ofo_check(sk
);
1908 /* Cleans up our, hopefully empty, out_of_order_queue. */
1909 skb_rbtree_purge(&tp
->out_of_order_queue
);
1911 #ifdef CONFIG_TCP_MD5SIG
1912 /* Clean up the MD5 key list, if any */
1913 if (tp
->md5sig_info
) {
1914 tcp_clear_md5_list(sk
);
1915 kfree_rcu(tp
->md5sig_info
, rcu
);
1916 tp
->md5sig_info
= NULL
;
1920 /* Clean up a referenced TCP bind bucket. */
1921 if (inet_csk(sk
)->icsk_bind_hash
)
1924 BUG_ON(tp
->fastopen_rsk
);
1926 /* If socket is aborted during connect operation */
1927 tcp_free_fastopen_req(tp
);
1928 tcp_fastopen_destroy_cipher(sk
);
1929 tcp_saved_syn_free(tp
);
1931 sk_sockets_allocated_dec(sk
);
1933 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1935 #ifdef CONFIG_PROC_FS
1936 /* Proc filesystem TCP sock list dumping. */
1939 * Get next listener socket follow cur. If cur is NULL, get first socket
1940 * starting from bucket given in st->bucket; when st->bucket is zero the
1941 * very first socket in the hash table is returned.
1943 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1945 struct tcp_iter_state
*st
= seq
->private;
1946 struct net
*net
= seq_file_net(seq
);
1947 struct inet_listen_hashbucket
*ilb
;
1948 struct hlist_nulls_node
*node
;
1949 struct sock
*sk
= cur
;
1953 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1954 spin_lock(&ilb
->lock
);
1955 sk
= sk_nulls_head(&ilb
->nulls_head
);
1959 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1963 sk
= sk_nulls_next(sk
);
1965 sk_nulls_for_each_from(sk
, node
) {
1966 if (!net_eq(sock_net(sk
), net
))
1968 if (sk
->sk_family
== st
->family
)
1971 spin_unlock(&ilb
->lock
);
1973 if (++st
->bucket
< INET_LHTABLE_SIZE
)
1978 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1980 struct tcp_iter_state
*st
= seq
->private;
1985 rc
= listening_get_next(seq
, NULL
);
1987 while (rc
&& *pos
) {
1988 rc
= listening_get_next(seq
, rc
);
1994 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1996 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2000 * Get first established socket starting from bucket given in st->bucket.
2001 * If st->bucket is zero, the very first socket in the hash is returned.
2003 static void *established_get_first(struct seq_file
*seq
)
2005 struct tcp_iter_state
*st
= seq
->private;
2006 struct net
*net
= seq_file_net(seq
);
2010 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2012 struct hlist_nulls_node
*node
;
2013 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2015 /* Lockless fast path for the common case of empty buckets */
2016 if (empty_bucket(st
))
2020 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2021 if (sk
->sk_family
!= st
->family
||
2022 !net_eq(sock_net(sk
), net
)) {
2028 spin_unlock_bh(lock
);
2034 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2036 struct sock
*sk
= cur
;
2037 struct hlist_nulls_node
*node
;
2038 struct tcp_iter_state
*st
= seq
->private;
2039 struct net
*net
= seq_file_net(seq
);
2044 sk
= sk_nulls_next(sk
);
2046 sk_nulls_for_each_from(sk
, node
) {
2047 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2051 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2053 return established_get_first(seq
);
2056 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2058 struct tcp_iter_state
*st
= seq
->private;
2062 rc
= established_get_first(seq
);
2065 rc
= established_get_next(seq
, rc
);
2071 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2074 struct tcp_iter_state
*st
= seq
->private;
2076 st
->state
= TCP_SEQ_STATE_LISTENING
;
2077 rc
= listening_get_idx(seq
, &pos
);
2080 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2081 rc
= established_get_idx(seq
, pos
);
2087 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2089 struct tcp_iter_state
*st
= seq
->private;
2090 int offset
= st
->offset
;
2091 int orig_num
= st
->num
;
2094 switch (st
->state
) {
2095 case TCP_SEQ_STATE_LISTENING
:
2096 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2098 st
->state
= TCP_SEQ_STATE_LISTENING
;
2099 rc
= listening_get_next(seq
, NULL
);
2100 while (offset
-- && rc
)
2101 rc
= listening_get_next(seq
, rc
);
2105 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2107 case TCP_SEQ_STATE_ESTABLISHED
:
2108 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2110 rc
= established_get_first(seq
);
2111 while (offset
-- && rc
)
2112 rc
= established_get_next(seq
, rc
);
2120 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2122 struct tcp_iter_state
*st
= seq
->private;
2125 if (*pos
&& *pos
== st
->last_pos
) {
2126 rc
= tcp_seek_last_pos(seq
);
2131 st
->state
= TCP_SEQ_STATE_LISTENING
;
2135 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2138 st
->last_pos
= *pos
;
2142 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2144 struct tcp_iter_state
*st
= seq
->private;
2147 if (v
== SEQ_START_TOKEN
) {
2148 rc
= tcp_get_idx(seq
, 0);
2152 switch (st
->state
) {
2153 case TCP_SEQ_STATE_LISTENING
:
2154 rc
= listening_get_next(seq
, v
);
2156 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2159 rc
= established_get_first(seq
);
2162 case TCP_SEQ_STATE_ESTABLISHED
:
2163 rc
= established_get_next(seq
, v
);
2168 st
->last_pos
= *pos
;
2172 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2174 struct tcp_iter_state
*st
= seq
->private;
2176 switch (st
->state
) {
2177 case TCP_SEQ_STATE_LISTENING
:
2178 if (v
!= SEQ_START_TOKEN
)
2179 spin_unlock(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2181 case TCP_SEQ_STATE_ESTABLISHED
:
2183 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2188 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2190 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2191 struct tcp_iter_state
*s
;
2194 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2195 sizeof(struct tcp_iter_state
));
2199 s
= ((struct seq_file
*)file
->private_data
)->private;
2200 s
->family
= afinfo
->family
;
2204 EXPORT_SYMBOL(tcp_seq_open
);
2206 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2209 struct proc_dir_entry
*p
;
2211 afinfo
->seq_ops
.start
= tcp_seq_start
;
2212 afinfo
->seq_ops
.next
= tcp_seq_next
;
2213 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2215 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2216 afinfo
->seq_fops
, afinfo
);
2221 EXPORT_SYMBOL(tcp_proc_register
);
2223 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2225 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2227 EXPORT_SYMBOL(tcp_proc_unregister
);
2229 static void get_openreq4(const struct request_sock
*req
,
2230 struct seq_file
*f
, int i
)
2232 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2233 long delta
= req
->rsk_timer
.expires
- jiffies
;
2235 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2236 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2241 ntohs(ireq
->ir_rmt_port
),
2243 0, 0, /* could print option size, but that is af dependent. */
2244 1, /* timers active (only the expire timer) */
2245 jiffies_delta_to_clock_t(delta
),
2247 from_kuid_munged(seq_user_ns(f
),
2248 sock_i_uid(req
->rsk_listener
)),
2249 0, /* non standard timer */
2250 0, /* open_requests have no inode */
2255 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2258 unsigned long timer_expires
;
2259 const struct tcp_sock
*tp
= tcp_sk(sk
);
2260 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2261 const struct inet_sock
*inet
= inet_sk(sk
);
2262 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
2263 __be32 dest
= inet
->inet_daddr
;
2264 __be32 src
= inet
->inet_rcv_saddr
;
2265 __u16 destp
= ntohs(inet
->inet_dport
);
2266 __u16 srcp
= ntohs(inet
->inet_sport
);
2270 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2271 icsk
->icsk_pending
== ICSK_TIME_REO_TIMEOUT
||
2272 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2274 timer_expires
= icsk
->icsk_timeout
;
2275 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2277 timer_expires
= icsk
->icsk_timeout
;
2278 } else if (timer_pending(&sk
->sk_timer
)) {
2280 timer_expires
= sk
->sk_timer
.expires
;
2283 timer_expires
= jiffies
;
2286 state
= sk_state_load(sk
);
2287 if (state
== TCP_LISTEN
)
2288 rx_queue
= sk
->sk_ack_backlog
;
2290 /* Because we don't lock the socket,
2291 * we might find a transient negative value.
2293 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2295 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2296 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2297 i
, src
, srcp
, dest
, destp
, state
,
2298 tp
->write_seq
- tp
->snd_una
,
2301 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2302 icsk
->icsk_retransmits
,
2303 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2304 icsk
->icsk_probes_out
,
2306 refcount_read(&sk
->sk_refcnt
), sk
,
2307 jiffies_to_clock_t(icsk
->icsk_rto
),
2308 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2309 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2311 state
== TCP_LISTEN
?
2312 fastopenq
->max_qlen
:
2313 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2316 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2317 struct seq_file
*f
, int i
)
2319 long delta
= tw
->tw_timer
.expires
- jiffies
;
2323 dest
= tw
->tw_daddr
;
2324 src
= tw
->tw_rcv_saddr
;
2325 destp
= ntohs(tw
->tw_dport
);
2326 srcp
= ntohs(tw
->tw_sport
);
2328 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2329 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2330 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2331 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2332 refcount_read(&tw
->tw_refcnt
), tw
);
2337 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2339 struct tcp_iter_state
*st
;
2340 struct sock
*sk
= v
;
2342 seq_setwidth(seq
, TMPSZ
- 1);
2343 if (v
== SEQ_START_TOKEN
) {
2344 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2345 "rx_queue tr tm->when retrnsmt uid timeout "
2351 if (sk
->sk_state
== TCP_TIME_WAIT
)
2352 get_timewait4_sock(v
, seq
, st
->num
);
2353 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
2354 get_openreq4(v
, seq
, st
->num
);
2356 get_tcp4_sock(v
, seq
, st
->num
);
2362 static const struct file_operations tcp_afinfo_seq_fops
= {
2363 .owner
= THIS_MODULE
,
2364 .open
= tcp_seq_open
,
2366 .llseek
= seq_lseek
,
2367 .release
= seq_release_net
2370 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2373 .seq_fops
= &tcp_afinfo_seq_fops
,
2375 .show
= tcp4_seq_show
,
2379 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2381 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2384 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2386 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2389 static struct pernet_operations tcp4_net_ops
= {
2390 .init
= tcp4_proc_init_net
,
2391 .exit
= tcp4_proc_exit_net
,
2394 int __init
tcp4_proc_init(void)
2396 return register_pernet_subsys(&tcp4_net_ops
);
2399 void tcp4_proc_exit(void)
2401 unregister_pernet_subsys(&tcp4_net_ops
);
2403 #endif /* CONFIG_PROC_FS */
2405 struct proto tcp_prot
= {
2407 .owner
= THIS_MODULE
,
2409 .connect
= tcp_v4_connect
,
2410 .disconnect
= tcp_disconnect
,
2411 .accept
= inet_csk_accept
,
2413 .init
= tcp_v4_init_sock
,
2414 .destroy
= tcp_v4_destroy_sock
,
2415 .shutdown
= tcp_shutdown
,
2416 .setsockopt
= tcp_setsockopt
,
2417 .getsockopt
= tcp_getsockopt
,
2418 .keepalive
= tcp_set_keepalive
,
2419 .recvmsg
= tcp_recvmsg
,
2420 .sendmsg
= tcp_sendmsg
,
2421 .sendpage
= tcp_sendpage
,
2422 .backlog_rcv
= tcp_v4_do_rcv
,
2423 .release_cb
= tcp_release_cb
,
2425 .unhash
= inet_unhash
,
2426 .get_port
= inet_csk_get_port
,
2427 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2428 .leave_memory_pressure
= tcp_leave_memory_pressure
,
2429 .stream_memory_free
= tcp_stream_memory_free
,
2430 .sockets_allocated
= &tcp_sockets_allocated
,
2431 .orphan_count
= &tcp_orphan_count
,
2432 .memory_allocated
= &tcp_memory_allocated
,
2433 .memory_pressure
= &tcp_memory_pressure
,
2434 .sysctl_mem
= sysctl_tcp_mem
,
2435 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
2436 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_rmem
),
2437 .max_header
= MAX_TCP_HEADER
,
2438 .obj_size
= sizeof(struct tcp_sock
),
2439 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
2440 .twsk_prot
= &tcp_timewait_sock_ops
,
2441 .rsk_prot
= &tcp_request_sock_ops
,
2442 .h
.hashinfo
= &tcp_hashinfo
,
2443 .no_autobind
= true,
2444 #ifdef CONFIG_COMPAT
2445 .compat_setsockopt
= compat_tcp_setsockopt
,
2446 .compat_getsockopt
= compat_tcp_getsockopt
,
2448 .diag_destroy
= tcp_abort
,
2450 EXPORT_SYMBOL(tcp_prot
);
2452 static void __net_exit
tcp_sk_exit(struct net
*net
)
2456 if (net
->ipv4
.tcp_congestion_control
)
2457 module_put(net
->ipv4
.tcp_congestion_control
->owner
);
2459 for_each_possible_cpu(cpu
)
2460 inet_ctl_sock_destroy(*per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
));
2461 free_percpu(net
->ipv4
.tcp_sk
);
2464 static int __net_init
tcp_sk_init(struct net
*net
)
2468 net
->ipv4
.tcp_sk
= alloc_percpu(struct sock
*);
2469 if (!net
->ipv4
.tcp_sk
)
2472 for_each_possible_cpu(cpu
) {
2475 res
= inet_ctl_sock_create(&sk
, PF_INET
, SOCK_RAW
,
2479 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
2481 /* Please enforce IP_DF and IPID==0 for RST and
2482 * ACK sent in SYN-RECV and TIME-WAIT state.
2484 inet_sk(sk
)->pmtudisc
= IP_PMTUDISC_DO
;
2486 *per_cpu_ptr(net
->ipv4
.tcp_sk
, cpu
) = sk
;
2489 net
->ipv4
.sysctl_tcp_ecn
= 2;
2490 net
->ipv4
.sysctl_tcp_ecn_fallback
= 1;
2492 net
->ipv4
.sysctl_tcp_base_mss
= TCP_BASE_MSS
;
2493 net
->ipv4
.sysctl_tcp_min_snd_mss
= TCP_MIN_SND_MSS
;
2494 net
->ipv4
.sysctl_tcp_probe_threshold
= TCP_PROBE_THRESHOLD
;
2495 net
->ipv4
.sysctl_tcp_probe_interval
= TCP_PROBE_INTERVAL
;
2497 net
->ipv4
.sysctl_tcp_keepalive_time
= TCP_KEEPALIVE_TIME
;
2498 net
->ipv4
.sysctl_tcp_keepalive_probes
= TCP_KEEPALIVE_PROBES
;
2499 net
->ipv4
.sysctl_tcp_keepalive_intvl
= TCP_KEEPALIVE_INTVL
;
2501 net
->ipv4
.sysctl_tcp_syn_retries
= TCP_SYN_RETRIES
;
2502 net
->ipv4
.sysctl_tcp_synack_retries
= TCP_SYNACK_RETRIES
;
2503 net
->ipv4
.sysctl_tcp_syncookies
= 1;
2504 net
->ipv4
.sysctl_tcp_reordering
= TCP_FASTRETRANS_THRESH
;
2505 net
->ipv4
.sysctl_tcp_retries1
= TCP_RETR1
;
2506 net
->ipv4
.sysctl_tcp_retries2
= TCP_RETR2
;
2507 net
->ipv4
.sysctl_tcp_orphan_retries
= 0;
2508 net
->ipv4
.sysctl_tcp_fin_timeout
= TCP_FIN_TIMEOUT
;
2509 net
->ipv4
.sysctl_tcp_notsent_lowat
= UINT_MAX
;
2510 net
->ipv4
.sysctl_tcp_tw_reuse
= 0;
2512 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
2513 net
->ipv4
.tcp_death_row
.sysctl_max_tw_buckets
= (cnt
+ 1) / 2;
2514 net
->ipv4
.tcp_death_row
.hashinfo
= &tcp_hashinfo
;
2516 net
->ipv4
.sysctl_max_syn_backlog
= max(128, cnt
/ 256);
2517 net
->ipv4
.sysctl_tcp_sack
= 1;
2518 net
->ipv4
.sysctl_tcp_window_scaling
= 1;
2519 net
->ipv4
.sysctl_tcp_timestamps
= 1;
2520 net
->ipv4
.sysctl_tcp_early_retrans
= 3;
2521 net
->ipv4
.sysctl_tcp_recovery
= TCP_RACK_LOSS_DETECTION
;
2522 net
->ipv4
.sysctl_tcp_slow_start_after_idle
= 1; /* By default, RFC2861 behavior. */
2523 net
->ipv4
.sysctl_tcp_retrans_collapse
= 1;
2524 net
->ipv4
.sysctl_tcp_max_reordering
= 300;
2525 net
->ipv4
.sysctl_tcp_dsack
= 1;
2526 net
->ipv4
.sysctl_tcp_app_win
= 31;
2527 net
->ipv4
.sysctl_tcp_adv_win_scale
= 1;
2528 net
->ipv4
.sysctl_tcp_frto
= 2;
2529 net
->ipv4
.sysctl_tcp_moderate_rcvbuf
= 1;
2530 /* This limits the percentage of the congestion window which we
2531 * will allow a single TSO frame to consume. Building TSO frames
2532 * which are too large can cause TCP streams to be bursty.
2534 net
->ipv4
.sysctl_tcp_tso_win_divisor
= 3;
2535 /* Default TSQ limit of four TSO segments */
2536 net
->ipv4
.sysctl_tcp_limit_output_bytes
= 262144;
2537 /* rfc5961 challenge ack rate limiting */
2538 net
->ipv4
.sysctl_tcp_challenge_ack_limit
= 1000;
2539 net
->ipv4
.sysctl_tcp_min_tso_segs
= 2;
2540 net
->ipv4
.sysctl_tcp_min_rtt_wlen
= 300;
2541 net
->ipv4
.sysctl_tcp_autocorking
= 1;
2542 net
->ipv4
.sysctl_tcp_invalid_ratelimit
= HZ
/2;
2543 net
->ipv4
.sysctl_tcp_pacing_ss_ratio
= 200;
2544 net
->ipv4
.sysctl_tcp_pacing_ca_ratio
= 120;
2545 if (net
!= &init_net
) {
2546 memcpy(net
->ipv4
.sysctl_tcp_rmem
,
2547 init_net
.ipv4
.sysctl_tcp_rmem
,
2548 sizeof(init_net
.ipv4
.sysctl_tcp_rmem
));
2549 memcpy(net
->ipv4
.sysctl_tcp_wmem
,
2550 init_net
.ipv4
.sysctl_tcp_wmem
,
2551 sizeof(init_net
.ipv4
.sysctl_tcp_wmem
));
2553 net
->ipv4
.sysctl_tcp_fastopen
= TFO_CLIENT_ENABLE
;
2554 spin_lock_init(&net
->ipv4
.tcp_fastopen_ctx_lock
);
2555 net
->ipv4
.sysctl_tcp_fastopen_blackhole_timeout
= 60 * 60;
2556 atomic_set(&net
->ipv4
.tfo_active_disable_times
, 0);
2558 /* Reno is always built in */
2559 if (!net_eq(net
, &init_net
) &&
2560 try_module_get(init_net
.ipv4
.tcp_congestion_control
->owner
))
2561 net
->ipv4
.tcp_congestion_control
= init_net
.ipv4
.tcp_congestion_control
;
2563 net
->ipv4
.tcp_congestion_control
= &tcp_reno
;
2572 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2576 inet_twsk_purge(&tcp_hashinfo
, AF_INET
);
2578 list_for_each_entry(net
, net_exit_list
, exit_list
)
2579 tcp_fastopen_ctx_destroy(net
);
2582 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2583 .init
= tcp_sk_init
,
2584 .exit
= tcp_sk_exit
,
2585 .exit_batch
= tcp_sk_exit_batch
,
2588 void __init
tcp_v4_init(void)
2590 if (register_pernet_subsys(&tcp_sk_ops
))
2591 panic("Failed to create the TCP control socket.\n");