2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
63 #include <linux/slab.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly
;
86 int sysctl_tcp_low_latency __read_mostly
;
87 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
90 #ifdef CONFIG_TCP_MD5SIG
91 static struct tcp_md5sig_key
*tcp_v4_md5_do_lookup(struct sock
*sk
,
93 static int tcp_v4_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
94 __be32 daddr
, __be32 saddr
, struct tcphdr
*th
);
97 struct tcp_md5sig_key
*tcp_v4_md5_do_lookup(struct sock
*sk
, __be32 addr
)
103 struct inet_hashinfo tcp_hashinfo
;
104 EXPORT_SYMBOL(tcp_hashinfo
);
106 static inline __u32
tcp_v4_init_sequence(struct sk_buff
*skb
)
108 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
111 tcp_hdr(skb
)->source
);
114 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
116 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
117 struct tcp_sock
*tp
= tcp_sk(sk
);
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
130 if (tcptw
->tw_ts_recent_stamp
&&
131 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
132 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
133 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
134 if (tp
->write_seq
== 0)
136 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
137 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
144 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
146 /* This will initiate an outgoing connection. */
147 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
149 struct inet_sock
*inet
= inet_sk(sk
);
150 struct tcp_sock
*tp
= tcp_sk(sk
);
151 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
153 __be32 daddr
, nexthop
;
157 if (addr_len
< sizeof(struct sockaddr_in
))
160 if (usin
->sin_family
!= AF_INET
)
161 return -EAFNOSUPPORT
;
163 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
164 if (inet
->opt
&& inet
->opt
->srr
) {
167 nexthop
= inet
->opt
->faddr
;
170 tmp
= ip_route_connect(&rt
, nexthop
, inet
->inet_saddr
,
171 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
173 inet
->inet_sport
, usin
->sin_port
, sk
, 1);
175 if (tmp
== -ENETUNREACH
)
176 IP_INC_STATS_BH(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
180 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
185 if (!inet
->opt
|| !inet
->opt
->srr
)
188 if (!inet
->inet_saddr
)
189 inet
->inet_saddr
= rt
->rt_src
;
190 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
192 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
193 /* Reset inherited state */
194 tp
->rx_opt
.ts_recent
= 0;
195 tp
->rx_opt
.ts_recent_stamp
= 0;
199 if (tcp_death_row
.sysctl_tw_recycle
&&
200 !tp
->rx_opt
.ts_recent_stamp
&& rt
->rt_dst
== daddr
) {
201 struct inet_peer
*peer
= rt_get_peer(rt
);
203 * VJ's idea. We save last timestamp seen from
204 * the destination in peer table, when entering state
205 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
206 * when trying new connection.
209 inet_peer_refcheck(peer
);
210 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
211 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
212 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
217 inet
->inet_dport
= usin
->sin_port
;
218 inet
->inet_daddr
= daddr
;
220 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
222 inet_csk(sk
)->icsk_ext_hdr_len
= inet
->opt
->optlen
;
224 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
226 /* Socket identity is still unknown (sport may be zero).
227 * However we set state to SYN-SENT and not releasing socket
228 * lock select source port, enter ourselves into the hash tables and
229 * complete initialization after this.
231 tcp_set_state(sk
, TCP_SYN_SENT
);
232 err
= inet_hash_connect(&tcp_death_row
, sk
);
236 err
= ip_route_newports(&rt
, IPPROTO_TCP
,
237 inet
->inet_sport
, inet
->inet_dport
, sk
);
241 /* OK, now commit destination to socket. */
242 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
243 sk_setup_caps(sk
, &rt
->dst
);
246 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
251 inet
->inet_id
= tp
->write_seq
^ jiffies
;
253 err
= tcp_connect(sk
);
262 * This unhashes the socket and releases the local port,
265 tcp_set_state(sk
, TCP_CLOSE
);
267 sk
->sk_route_caps
= 0;
268 inet
->inet_dport
= 0;
271 EXPORT_SYMBOL(tcp_v4_connect
);
274 * This routine does path mtu discovery as defined in RFC1191.
276 static void do_pmtu_discovery(struct sock
*sk
, struct iphdr
*iph
, u32 mtu
)
278 struct dst_entry
*dst
;
279 struct inet_sock
*inet
= inet_sk(sk
);
281 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
282 * send out by Linux are always <576bytes so they should go through
285 if (sk
->sk_state
== TCP_LISTEN
)
288 /* We don't check in the destentry if pmtu discovery is forbidden
289 * on this route. We just assume that no packet_to_big packets
290 * are send back when pmtu discovery is not active.
291 * There is a small race when the user changes this flag in the
292 * route, but I think that's acceptable.
294 if ((dst
= __sk_dst_check(sk
, 0)) == NULL
)
297 dst
->ops
->update_pmtu(dst
, mtu
);
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
302 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
303 sk
->sk_err_soft
= EMSGSIZE
;
307 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
308 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
309 tcp_sync_mss(sk
, mtu
);
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
316 tcp_simple_retransmit(sk
);
317 } /* else let the usual retransmit timer handle it */
321 * This routine is called by the ICMP module when it gets some
322 * sort of error condition. If err < 0 then the socket should
323 * be closed and the error returned to the user. If err > 0
324 * it's just the icmp type << 8 | icmp code. After adjustment
325 * header points to the first 8 bytes of the tcp header. We need
326 * to find the appropriate port.
328 * The locking strategy used here is very "optimistic". When
329 * someone else accesses the socket the ICMP is just dropped
330 * and for some paths there is no check at all.
331 * A more general error queue to queue errors for later handling
332 * is probably better.
336 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
338 struct iphdr
*iph
= (struct iphdr
*)icmp_skb
->data
;
339 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
340 struct inet_connection_sock
*icsk
;
342 struct inet_sock
*inet
;
343 const int type
= icmp_hdr(icmp_skb
)->type
;
344 const int code
= icmp_hdr(icmp_skb
)->code
;
350 struct net
*net
= dev_net(icmp_skb
->dev
);
352 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
353 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
357 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
358 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
360 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
363 if (sk
->sk_state
== TCP_TIME_WAIT
) {
364 inet_twsk_put(inet_twsk(sk
));
369 /* If too many ICMPs get dropped on busy
370 * servers this needs to be solved differently.
372 if (sock_owned_by_user(sk
))
373 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
375 if (sk
->sk_state
== TCP_CLOSE
)
378 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
379 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
385 seq
= ntohl(th
->seq
);
386 if (sk
->sk_state
!= TCP_LISTEN
&&
387 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
388 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
393 case ICMP_SOURCE_QUENCH
:
394 /* Just silently ignore these. */
396 case ICMP_PARAMETERPROB
:
399 case ICMP_DEST_UNREACH
:
400 if (code
> NR_ICMP_UNREACH
)
403 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
404 if (!sock_owned_by_user(sk
))
405 do_pmtu_discovery(sk
, iph
, info
);
409 err
= icmp_err_convert
[code
].errno
;
410 /* check if icmp_skb allows revert of backoff
411 * (see draft-zimmermann-tcp-lcd) */
412 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
414 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
418 if (sock_owned_by_user(sk
))
421 icsk
->icsk_backoff
--;
422 inet_csk(sk
)->icsk_rto
= __tcp_set_rto(tp
) <<
426 skb
= tcp_write_queue_head(sk
);
429 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
430 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
433 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
434 remaining
, TCP_RTO_MAX
);
436 /* RTO revert clocked out retransmission.
437 * Will retransmit now */
438 tcp_retransmit_timer(sk
);
442 case ICMP_TIME_EXCEEDED
:
449 switch (sk
->sk_state
) {
450 struct request_sock
*req
, **prev
;
452 if (sock_owned_by_user(sk
))
455 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
456 iph
->daddr
, iph
->saddr
);
460 /* ICMPs are not backlogged, hence we cannot get
461 an established socket here.
465 if (seq
!= tcp_rsk(req
)->snt_isn
) {
466 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
471 * Still in SYN_RECV, just remove it silently.
472 * There is no good way to pass the error to the newly
473 * created socket, and POSIX does not want network
474 * errors returned from accept().
476 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
480 case TCP_SYN_RECV
: /* Cannot happen.
481 It can f.e. if SYNs crossed.
483 if (!sock_owned_by_user(sk
)) {
486 sk
->sk_error_report(sk
);
490 sk
->sk_err_soft
= err
;
495 /* If we've already connected we will keep trying
496 * until we time out, or the user gives up.
498 * rfc1122 4.2.3.9 allows to consider as hard errors
499 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
500 * but it is obsoleted by pmtu discovery).
502 * Note, that in modern internet, where routing is unreliable
503 * and in each dark corner broken firewalls sit, sending random
504 * errors ordered by their masters even this two messages finally lose
505 * their original sense (even Linux sends invalid PORT_UNREACHs)
507 * Now we are in compliance with RFCs.
512 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
514 sk
->sk_error_report(sk
);
515 } else { /* Only an error on timeout */
516 sk
->sk_err_soft
= err
;
524 static void __tcp_v4_send_check(struct sk_buff
*skb
,
525 __be32 saddr
, __be32 daddr
)
527 struct tcphdr
*th
= tcp_hdr(skb
);
529 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
530 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
531 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
532 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
534 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
541 /* This routine computes an IPv4 TCP checksum. */
542 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
544 struct inet_sock
*inet
= inet_sk(sk
);
546 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
548 EXPORT_SYMBOL(tcp_v4_send_check
);
550 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
552 const struct iphdr
*iph
;
555 if (!pskb_may_pull(skb
, sizeof(*th
)))
562 skb
->ip_summed
= CHECKSUM_PARTIAL
;
563 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
568 * This routine will send an RST to the other tcp.
570 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * Answer: if a packet caused RST, it is not for a socket
573 * existing in our system, if it is matched to a socket,
574 * it is just duplicate segment or bug in other side's TCP.
575 * So that we build reply only basing on parameters
576 * arrived with segment.
577 * Exception: precedence violation. We do not implement it in any case.
580 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
582 struct tcphdr
*th
= tcp_hdr(skb
);
585 #ifdef CONFIG_TCP_MD5SIG
586 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
589 struct ip_reply_arg arg
;
590 #ifdef CONFIG_TCP_MD5SIG
591 struct tcp_md5sig_key
*key
;
595 /* Never send a reset in response to a reset. */
599 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
602 /* Swap the send and the receive. */
603 memset(&rep
, 0, sizeof(rep
));
604 rep
.th
.dest
= th
->source
;
605 rep
.th
.source
= th
->dest
;
606 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
610 rep
.th
.seq
= th
->ack_seq
;
613 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
614 skb
->len
- (th
->doff
<< 2));
617 memset(&arg
, 0, sizeof(arg
));
618 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
619 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
621 #ifdef CONFIG_TCP_MD5SIG
622 key
= sk
? tcp_v4_md5_do_lookup(sk
, ip_hdr(skb
)->daddr
) : NULL
;
624 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
626 (TCPOPT_MD5SIG
<< 8) |
628 /* Update length and the length the header thinks exists */
629 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
630 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
632 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
633 key
, ip_hdr(skb
)->saddr
,
634 ip_hdr(skb
)->daddr
, &rep
.th
);
637 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
638 ip_hdr(skb
)->saddr
, /* XXX */
639 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
640 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
641 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
643 net
= dev_net(skb_dst(skb
)->dev
);
644 ip_send_reply(net
->ipv4
.tcp_sock
, skb
,
645 &arg
, arg
.iov
[0].iov_len
);
647 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
648 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
651 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
652 outside socket context is ugly, certainly. What can I do?
655 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
656 u32 win
, u32 ts
, int oif
,
657 struct tcp_md5sig_key
*key
,
660 struct tcphdr
*th
= tcp_hdr(skb
);
663 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
664 #ifdef CONFIG_TCP_MD5SIG
665 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
669 struct ip_reply_arg arg
;
670 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
672 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
673 memset(&arg
, 0, sizeof(arg
));
675 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
676 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
678 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
679 (TCPOPT_TIMESTAMP
<< 8) |
681 rep
.opt
[1] = htonl(tcp_time_stamp
);
682 rep
.opt
[2] = htonl(ts
);
683 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
686 /* Swap the send and the receive. */
687 rep
.th
.dest
= th
->source
;
688 rep
.th
.source
= th
->dest
;
689 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
690 rep
.th
.seq
= htonl(seq
);
691 rep
.th
.ack_seq
= htonl(ack
);
693 rep
.th
.window
= htons(win
);
695 #ifdef CONFIG_TCP_MD5SIG
697 int offset
= (ts
) ? 3 : 0;
699 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
701 (TCPOPT_MD5SIG
<< 8) |
703 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
704 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
706 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
707 key
, ip_hdr(skb
)->saddr
,
708 ip_hdr(skb
)->daddr
, &rep
.th
);
711 arg
.flags
= reply_flags
;
712 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
713 ip_hdr(skb
)->saddr
, /* XXX */
714 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
715 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
717 arg
.bound_dev_if
= oif
;
719 ip_send_reply(net
->ipv4
.tcp_sock
, skb
,
720 &arg
, arg
.iov
[0].iov_len
);
722 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
725 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
727 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
728 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
730 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
731 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
734 tcp_twsk_md5_key(tcptw
),
735 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0
741 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
742 struct request_sock
*req
)
744 tcp_v4_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1,
745 tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
,
748 tcp_v4_md5_do_lookup(sk
, ip_hdr(skb
)->daddr
),
749 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0);
753 * Send a SYN-ACK after having received a SYN.
754 * This still operates on a request_sock only, not on a big
757 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
758 struct request_sock
*req
,
759 struct request_values
*rvp
)
761 const struct inet_request_sock
*ireq
= inet_rsk(req
);
763 struct sk_buff
* skb
;
765 /* First, grab a route. */
766 if (!dst
&& (dst
= inet_csk_route_req(sk
, req
)) == NULL
)
769 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
772 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
774 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
777 err
= net_xmit_eval(err
);
784 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
785 struct request_values
*rvp
)
787 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
788 return tcp_v4_send_synack(sk
, NULL
, req
, rvp
);
792 * IPv4 request_sock destructor.
794 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
796 kfree(inet_rsk(req
)->opt
);
799 static void syn_flood_warning(const struct sk_buff
*skb
)
803 #ifdef CONFIG_SYN_COOKIES
804 if (sysctl_tcp_syncookies
)
805 msg
= "Sending cookies";
808 msg
= "Dropping request";
810 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
811 ntohs(tcp_hdr(skb
)->dest
), msg
);
815 * Save and compile IPv4 options into the request_sock if needed.
817 static struct ip_options
*tcp_v4_save_options(struct sock
*sk
,
820 struct ip_options
*opt
= &(IPCB(skb
)->opt
);
821 struct ip_options
*dopt
= NULL
;
823 if (opt
&& opt
->optlen
) {
824 int opt_size
= optlength(opt
);
825 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
827 if (ip_options_echo(dopt
, skb
)) {
836 #ifdef CONFIG_TCP_MD5SIG
838 * RFC2385 MD5 checksumming requires a mapping of
839 * IP address->MD5 Key.
840 * We need to maintain these in the sk structure.
843 /* Find the Key structure for an address. */
844 static struct tcp_md5sig_key
*
845 tcp_v4_md5_do_lookup(struct sock
*sk
, __be32 addr
)
847 struct tcp_sock
*tp
= tcp_sk(sk
);
850 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries4
)
852 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++) {
853 if (tp
->md5sig_info
->keys4
[i
].addr
== addr
)
854 return &tp
->md5sig_info
->keys4
[i
].base
;
859 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
860 struct sock
*addr_sk
)
862 return tcp_v4_md5_do_lookup(sk
, inet_sk(addr_sk
)->inet_daddr
);
864 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
866 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
867 struct request_sock
*req
)
869 return tcp_v4_md5_do_lookup(sk
, inet_rsk(req
)->rmt_addr
);
872 /* This can be called on a newly created socket, from other files */
873 int tcp_v4_md5_do_add(struct sock
*sk
, __be32 addr
,
874 u8
*newkey
, u8 newkeylen
)
876 /* Add Key to the list */
877 struct tcp_md5sig_key
*key
;
878 struct tcp_sock
*tp
= tcp_sk(sk
);
879 struct tcp4_md5sig_key
*keys
;
881 key
= tcp_v4_md5_do_lookup(sk
, addr
);
883 /* Pre-existing entry - just update that one. */
886 key
->keylen
= newkeylen
;
888 struct tcp_md5sig_info
*md5sig
;
890 if (!tp
->md5sig_info
) {
891 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
),
893 if (!tp
->md5sig_info
) {
897 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
899 if (tcp_alloc_md5sig_pool(sk
) == NULL
) {
903 md5sig
= tp
->md5sig_info
;
905 if (md5sig
->alloced4
== md5sig
->entries4
) {
906 keys
= kmalloc((sizeof(*keys
) *
907 (md5sig
->entries4
+ 1)), GFP_ATOMIC
);
910 tcp_free_md5sig_pool();
914 if (md5sig
->entries4
)
915 memcpy(keys
, md5sig
->keys4
,
916 sizeof(*keys
) * md5sig
->entries4
);
918 /* Free old key list, and reference new one */
919 kfree(md5sig
->keys4
);
920 md5sig
->keys4
= keys
;
924 md5sig
->keys4
[md5sig
->entries4
- 1].addr
= addr
;
925 md5sig
->keys4
[md5sig
->entries4
- 1].base
.key
= newkey
;
926 md5sig
->keys4
[md5sig
->entries4
- 1].base
.keylen
= newkeylen
;
930 EXPORT_SYMBOL(tcp_v4_md5_do_add
);
932 static int tcp_v4_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
933 u8
*newkey
, u8 newkeylen
)
935 return tcp_v4_md5_do_add(sk
, inet_sk(addr_sk
)->inet_daddr
,
939 int tcp_v4_md5_do_del(struct sock
*sk
, __be32 addr
)
941 struct tcp_sock
*tp
= tcp_sk(sk
);
944 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++) {
945 if (tp
->md5sig_info
->keys4
[i
].addr
== addr
) {
947 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
948 tp
->md5sig_info
->entries4
--;
950 if (tp
->md5sig_info
->entries4
== 0) {
951 kfree(tp
->md5sig_info
->keys4
);
952 tp
->md5sig_info
->keys4
= NULL
;
953 tp
->md5sig_info
->alloced4
= 0;
954 } else if (tp
->md5sig_info
->entries4
!= i
) {
955 /* Need to do some manipulation */
956 memmove(&tp
->md5sig_info
->keys4
[i
],
957 &tp
->md5sig_info
->keys4
[i
+1],
958 (tp
->md5sig_info
->entries4
- i
) *
959 sizeof(struct tcp4_md5sig_key
));
961 tcp_free_md5sig_pool();
967 EXPORT_SYMBOL(tcp_v4_md5_do_del
);
969 static void tcp_v4_clear_md5_list(struct sock
*sk
)
971 struct tcp_sock
*tp
= tcp_sk(sk
);
973 /* Free each key, then the set of key keys,
974 * the crypto element, and then decrement our
975 * hold on the last resort crypto.
977 if (tp
->md5sig_info
->entries4
) {
979 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
980 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
981 tp
->md5sig_info
->entries4
= 0;
982 tcp_free_md5sig_pool();
984 if (tp
->md5sig_info
->keys4
) {
985 kfree(tp
->md5sig_info
->keys4
);
986 tp
->md5sig_info
->keys4
= NULL
;
987 tp
->md5sig_info
->alloced4
= 0;
991 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
994 struct tcp_md5sig cmd
;
995 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
998 if (optlen
< sizeof(cmd
))
1001 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1004 if (sin
->sin_family
!= AF_INET
)
1007 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
) {
1008 if (!tcp_sk(sk
)->md5sig_info
)
1010 return tcp_v4_md5_do_del(sk
, sin
->sin_addr
.s_addr
);
1013 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1016 if (!tcp_sk(sk
)->md5sig_info
) {
1017 struct tcp_sock
*tp
= tcp_sk(sk
);
1018 struct tcp_md5sig_info
*p
;
1020 p
= kzalloc(sizeof(*p
), sk
->sk_allocation
);
1024 tp
->md5sig_info
= p
;
1025 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1028 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, sk
->sk_allocation
);
1031 return tcp_v4_md5_do_add(sk
, sin
->sin_addr
.s_addr
,
1032 newkey
, cmd
.tcpm_keylen
);
1035 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1036 __be32 daddr
, __be32 saddr
, int nbytes
)
1038 struct tcp4_pseudohdr
*bp
;
1039 struct scatterlist sg
;
1041 bp
= &hp
->md5_blk
.ip4
;
1044 * 1. the TCP pseudo-header (in the order: source IP address,
1045 * destination IP address, zero-padded protocol number, and
1051 bp
->protocol
= IPPROTO_TCP
;
1052 bp
->len
= cpu_to_be16(nbytes
);
1054 sg_init_one(&sg
, bp
, sizeof(*bp
));
1055 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1058 static int tcp_v4_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
1059 __be32 daddr
, __be32 saddr
, struct tcphdr
*th
)
1061 struct tcp_md5sig_pool
*hp
;
1062 struct hash_desc
*desc
;
1064 hp
= tcp_get_md5sig_pool();
1066 goto clear_hash_noput
;
1067 desc
= &hp
->md5_desc
;
1069 if (crypto_hash_init(desc
))
1071 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1073 if (tcp_md5_hash_header(hp
, th
))
1075 if (tcp_md5_hash_key(hp
, key
))
1077 if (crypto_hash_final(desc
, md5_hash
))
1080 tcp_put_md5sig_pool();
1084 tcp_put_md5sig_pool();
1086 memset(md5_hash
, 0, 16);
1090 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1091 struct sock
*sk
, struct request_sock
*req
,
1092 struct sk_buff
*skb
)
1094 struct tcp_md5sig_pool
*hp
;
1095 struct hash_desc
*desc
;
1096 struct tcphdr
*th
= tcp_hdr(skb
);
1097 __be32 saddr
, daddr
;
1100 saddr
= inet_sk(sk
)->inet_saddr
;
1101 daddr
= inet_sk(sk
)->inet_daddr
;
1103 saddr
= inet_rsk(req
)->loc_addr
;
1104 daddr
= inet_rsk(req
)->rmt_addr
;
1106 const struct iphdr
*iph
= ip_hdr(skb
);
1111 hp
= tcp_get_md5sig_pool();
1113 goto clear_hash_noput
;
1114 desc
= &hp
->md5_desc
;
1116 if (crypto_hash_init(desc
))
1119 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1121 if (tcp_md5_hash_header(hp
, th
))
1123 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1125 if (tcp_md5_hash_key(hp
, key
))
1127 if (crypto_hash_final(desc
, md5_hash
))
1130 tcp_put_md5sig_pool();
1134 tcp_put_md5sig_pool();
1136 memset(md5_hash
, 0, 16);
1139 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1141 static int tcp_v4_inbound_md5_hash(struct sock
*sk
, struct sk_buff
*skb
)
1144 * This gets called for each TCP segment that arrives
1145 * so we want to be efficient.
1146 * We have 3 drop cases:
1147 * o No MD5 hash and one expected.
1148 * o MD5 hash and we're not expecting one.
1149 * o MD5 hash and its wrong.
1151 __u8
*hash_location
= NULL
;
1152 struct tcp_md5sig_key
*hash_expected
;
1153 const struct iphdr
*iph
= ip_hdr(skb
);
1154 struct tcphdr
*th
= tcp_hdr(skb
);
1156 unsigned char newhash
[16];
1158 hash_expected
= tcp_v4_md5_do_lookup(sk
, iph
->saddr
);
1159 hash_location
= tcp_parse_md5sig_option(th
);
1161 /* We've parsed the options - do we have a hash? */
1162 if (!hash_expected
&& !hash_location
)
1165 if (hash_expected
&& !hash_location
) {
1166 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1170 if (!hash_expected
&& hash_location
) {
1171 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1175 /* Okay, so this is hash_expected and hash_location -
1176 * so we need to calculate the checksum.
1178 genhash
= tcp_v4_md5_hash_skb(newhash
,
1182 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1183 if (net_ratelimit()) {
1184 printk(KERN_INFO
"MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1185 &iph
->saddr
, ntohs(th
->source
),
1186 &iph
->daddr
, ntohs(th
->dest
),
1187 genhash
? " tcp_v4_calc_md5_hash failed" : "");
1196 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1198 .obj_size
= sizeof(struct tcp_request_sock
),
1199 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1200 .send_ack
= tcp_v4_reqsk_send_ack
,
1201 .destructor
= tcp_v4_reqsk_destructor
,
1202 .send_reset
= tcp_v4_send_reset
,
1203 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1206 #ifdef CONFIG_TCP_MD5SIG
1207 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1208 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1209 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1213 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1214 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1215 .twsk_unique
= tcp_twsk_unique
,
1216 .twsk_destructor
= tcp_twsk_destructor
,
1219 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1221 struct tcp_extend_values tmp_ext
;
1222 struct tcp_options_received tmp_opt
;
1224 struct request_sock
*req
;
1225 struct inet_request_sock
*ireq
;
1226 struct tcp_sock
*tp
= tcp_sk(sk
);
1227 struct dst_entry
*dst
= NULL
;
1228 __be32 saddr
= ip_hdr(skb
)->saddr
;
1229 __be32 daddr
= ip_hdr(skb
)->daddr
;
1230 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1231 #ifdef CONFIG_SYN_COOKIES
1232 int want_cookie
= 0;
1234 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1237 /* Never answer to SYNs send to broadcast or multicast */
1238 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1241 /* TW buckets are converted to open requests without
1242 * limitations, they conserve resources and peer is
1243 * evidently real one.
1245 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1246 if (net_ratelimit())
1247 syn_flood_warning(skb
);
1248 #ifdef CONFIG_SYN_COOKIES
1249 if (sysctl_tcp_syncookies
) {
1256 /* Accept backlog is full. If we have already queued enough
1257 * of warm entries in syn queue, drop request. It is better than
1258 * clogging syn queue with openreqs with exponentially increasing
1261 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1264 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1268 #ifdef CONFIG_TCP_MD5SIG
1269 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1272 tcp_clear_options(&tmp_opt
);
1273 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1274 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1275 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1277 if (tmp_opt
.cookie_plus
> 0 &&
1278 tmp_opt
.saw_tstamp
&&
1279 !tp
->rx_opt
.cookie_out_never
&&
1280 (sysctl_tcp_cookie_size
> 0 ||
1281 (tp
->cookie_values
!= NULL
&&
1282 tp
->cookie_values
->cookie_desired
> 0))) {
1284 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1285 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1287 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1288 goto drop_and_release
;
1290 /* Secret recipe starts with IP addresses */
1291 *mess
++ ^= (__force u32
)daddr
;
1292 *mess
++ ^= (__force u32
)saddr
;
1294 /* plus variable length Initiator Cookie */
1297 *c
++ ^= *hash_location
++;
1299 #ifdef CONFIG_SYN_COOKIES
1300 want_cookie
= 0; /* not our kind of cookie */
1302 tmp_ext
.cookie_out_never
= 0; /* false */
1303 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1304 } else if (!tp
->rx_opt
.cookie_in_always
) {
1305 /* redundant indications, but ensure initialization. */
1306 tmp_ext
.cookie_out_never
= 1; /* true */
1307 tmp_ext
.cookie_plus
= 0;
1309 goto drop_and_release
;
1311 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1313 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1314 tcp_clear_options(&tmp_opt
);
1316 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1317 tcp_openreq_init(req
, &tmp_opt
, skb
);
1319 ireq
= inet_rsk(req
);
1320 ireq
->loc_addr
= daddr
;
1321 ireq
->rmt_addr
= saddr
;
1322 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1323 ireq
->opt
= tcp_v4_save_options(sk
, skb
);
1325 if (security_inet_conn_request(sk
, skb
, req
))
1328 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1329 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1332 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1333 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1335 struct inet_peer
*peer
= NULL
;
1337 /* VJ's idea. We save last timestamp seen
1338 * from the destination in peer table, when entering
1339 * state TIME-WAIT, and check against it before
1340 * accepting new connection request.
1342 * If "isn" is not zero, this request hit alive
1343 * timewait bucket, so that all the necessary checks
1344 * are made in the function processing timewait state.
1346 if (tmp_opt
.saw_tstamp
&&
1347 tcp_death_row
.sysctl_tw_recycle
&&
1348 (dst
= inet_csk_route_req(sk
, req
)) != NULL
&&
1349 (peer
= rt_get_peer((struct rtable
*)dst
)) != NULL
&&
1350 peer
->daddr
.a4
== saddr
) {
1351 inet_peer_refcheck(peer
);
1352 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1353 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1355 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1356 goto drop_and_release
;
1359 /* Kill the following clause, if you dislike this way. */
1360 else if (!sysctl_tcp_syncookies
&&
1361 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1362 (sysctl_max_syn_backlog
>> 2)) &&
1363 (!peer
|| !peer
->tcp_ts_stamp
) &&
1364 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1365 /* Without syncookies last quarter of
1366 * backlog is filled with destinations,
1367 * proven to be alive.
1368 * It means that we continue to communicate
1369 * to destinations, already remembered
1370 * to the moment of synflood.
1372 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI4/%u\n",
1373 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1374 goto drop_and_release
;
1377 isn
= tcp_v4_init_sequence(skb
);
1379 tcp_rsk(req
)->snt_isn
= isn
;
1381 if (tcp_v4_send_synack(sk
, dst
, req
,
1382 (struct request_values
*)&tmp_ext
) ||
1386 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1396 EXPORT_SYMBOL(tcp_v4_conn_request
);
1400 * The three way handshake has completed - we got a valid synack -
1401 * now create the new socket.
1403 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1404 struct request_sock
*req
,
1405 struct dst_entry
*dst
)
1407 struct inet_request_sock
*ireq
;
1408 struct inet_sock
*newinet
;
1409 struct tcp_sock
*newtp
;
1411 #ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key
*key
;
1415 if (sk_acceptq_is_full(sk
))
1418 if (!dst
&& (dst
= inet_csk_route_req(sk
, req
)) == NULL
)
1421 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1425 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1426 sk_setup_caps(newsk
, dst
);
1428 newtp
= tcp_sk(newsk
);
1429 newinet
= inet_sk(newsk
);
1430 ireq
= inet_rsk(req
);
1431 newinet
->inet_daddr
= ireq
->rmt_addr
;
1432 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1433 newinet
->inet_saddr
= ireq
->loc_addr
;
1434 newinet
->opt
= ireq
->opt
;
1436 newinet
->mc_index
= inet_iif(skb
);
1437 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1438 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1440 inet_csk(newsk
)->icsk_ext_hdr_len
= newinet
->opt
->optlen
;
1441 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1443 tcp_mtup_init(newsk
);
1444 tcp_sync_mss(newsk
, dst_mtu(dst
));
1445 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1446 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1447 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1448 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1450 tcp_initialize_rcv_mss(newsk
);
1452 #ifdef CONFIG_TCP_MD5SIG
1453 /* Copy over the MD5 key from the original socket */
1454 key
= tcp_v4_md5_do_lookup(sk
, newinet
->inet_daddr
);
1457 * We're using one, so create a matching key
1458 * on the newsk structure. If we fail to get
1459 * memory, then we end up not copying the key
1462 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1464 tcp_v4_md5_do_add(newsk
, newinet
->inet_daddr
,
1465 newkey
, key
->keylen
);
1466 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1470 if (__inet_inherit_port(sk
, newsk
) < 0) {
1474 __inet_hash_nolisten(newsk
, NULL
);
1479 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1483 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1486 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1488 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1490 struct tcphdr
*th
= tcp_hdr(skb
);
1491 const struct iphdr
*iph
= ip_hdr(skb
);
1493 struct request_sock
**prev
;
1494 /* Find possible connection requests. */
1495 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1496 iph
->saddr
, iph
->daddr
);
1498 return tcp_check_req(sk
, skb
, req
, prev
);
1500 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1501 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1504 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1508 inet_twsk_put(inet_twsk(nsk
));
1512 #ifdef CONFIG_SYN_COOKIES
1514 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1519 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1521 const struct iphdr
*iph
= ip_hdr(skb
);
1523 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1524 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1525 iph
->daddr
, skb
->csum
)) {
1526 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1531 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1532 skb
->len
, IPPROTO_TCP
, 0);
1534 if (skb
->len
<= 76) {
1535 return __skb_checksum_complete(skb
);
1541 /* The socket must have it's spinlock held when we get
1544 * We have a potential double-lock case here, so even when
1545 * doing backlog processing we use the BH locking scheme.
1546 * This is because we cannot sleep with the original spinlock
1549 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1552 #ifdef CONFIG_TCP_MD5SIG
1554 * We really want to reject the packet as early as possible
1556 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1557 * o There is an MD5 option and we're not expecting one
1559 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1563 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1564 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1565 TCP_CHECK_TIMER(sk
);
1566 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1570 TCP_CHECK_TIMER(sk
);
1574 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1577 if (sk
->sk_state
== TCP_LISTEN
) {
1578 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1583 if (tcp_child_process(sk
, nsk
, skb
)) {
1590 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1593 TCP_CHECK_TIMER(sk
);
1594 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1598 TCP_CHECK_TIMER(sk
);
1602 tcp_v4_send_reset(rsk
, skb
);
1605 /* Be careful here. If this function gets more complicated and
1606 * gcc suffers from register pressure on the x86, sk (in %ebx)
1607 * might be destroyed here. This current version compiles correctly,
1608 * but you have been warned.
1613 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1616 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1622 int tcp_v4_rcv(struct sk_buff
*skb
)
1624 const struct iphdr
*iph
;
1628 struct net
*net
= dev_net(skb
->dev
);
1630 if (skb
->pkt_type
!= PACKET_HOST
)
1633 /* Count it even if it's bad */
1634 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1636 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1641 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1643 if (!pskb_may_pull(skb
, th
->doff
* 4))
1646 /* An explanation is required here, I think.
1647 * Packet length and doff are validated by header prediction,
1648 * provided case of th->doff==0 is eliminated.
1649 * So, we defer the checks. */
1650 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1655 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1656 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1657 skb
->len
- th
->doff
* 4);
1658 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1659 TCP_SKB_CB(skb
)->when
= 0;
1660 TCP_SKB_CB(skb
)->flags
= iph
->tos
;
1661 TCP_SKB_CB(skb
)->sacked
= 0;
1663 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1668 if (sk
->sk_state
== TCP_TIME_WAIT
)
1671 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1672 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1673 goto discard_and_relse
;
1676 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1677 goto discard_and_relse
;
1680 if (sk_filter(sk
, skb
))
1681 goto discard_and_relse
;
1685 bh_lock_sock_nested(sk
);
1687 if (!sock_owned_by_user(sk
)) {
1688 #ifdef CONFIG_NET_DMA
1689 struct tcp_sock
*tp
= tcp_sk(sk
);
1690 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1691 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1692 if (tp
->ucopy
.dma_chan
)
1693 ret
= tcp_v4_do_rcv(sk
, skb
);
1697 if (!tcp_prequeue(sk
, skb
))
1698 ret
= tcp_v4_do_rcv(sk
, skb
);
1700 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1702 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1703 goto discard_and_relse
;
1712 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1715 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1717 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1719 tcp_v4_send_reset(NULL
, skb
);
1723 /* Discard frame. */
1732 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1733 inet_twsk_put(inet_twsk(sk
));
1737 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1738 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1739 inet_twsk_put(inet_twsk(sk
));
1742 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1744 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1746 iph
->daddr
, th
->dest
,
1749 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
1750 inet_twsk_put(inet_twsk(sk
));
1754 /* Fall through to ACK */
1757 tcp_v4_timewait_ack(sk
, skb
);
1761 case TCP_TW_SUCCESS
:;
1766 struct inet_peer
*tcp_v4_get_peer(struct sock
*sk
, bool *release_it
)
1768 struct rtable
*rt
= (struct rtable
*) __sk_dst_get(sk
);
1769 struct inet_sock
*inet
= inet_sk(sk
);
1770 struct inet_peer
*peer
;
1772 if (!rt
|| rt
->rt_dst
!= inet
->inet_daddr
) {
1773 peer
= inet_getpeer_v4(inet
->inet_daddr
, 1);
1777 rt_bind_peer(rt
, 1);
1779 *release_it
= false;
1784 EXPORT_SYMBOL(tcp_v4_get_peer
);
1786 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock
*tw
)
1788 struct inet_peer
*peer
= inet_getpeer_v4(tw
->tw_daddr
, 1);
1791 const struct tcp_timewait_sock
*tcptw
= tcp_twsk((struct sock
*)tw
);
1793 if ((s32
)(peer
->tcp_ts
- tcptw
->tw_ts_recent
) <= 0 ||
1794 ((u32
)get_seconds() - peer
->tcp_ts_stamp
> TCP_PAWS_MSL
&&
1795 peer
->tcp_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
1796 peer
->tcp_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
1797 peer
->tcp_ts
= tcptw
->tw_ts_recent
;
1806 const struct inet_connection_sock_af_ops ipv4_specific
= {
1807 .queue_xmit
= ip_queue_xmit
,
1808 .send_check
= tcp_v4_send_check
,
1809 .rebuild_header
= inet_sk_rebuild_header
,
1810 .conn_request
= tcp_v4_conn_request
,
1811 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1812 .get_peer
= tcp_v4_get_peer
,
1813 .net_header_len
= sizeof(struct iphdr
),
1814 .setsockopt
= ip_setsockopt
,
1815 .getsockopt
= ip_getsockopt
,
1816 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1817 .sockaddr_len
= sizeof(struct sockaddr_in
),
1818 .bind_conflict
= inet_csk_bind_conflict
,
1819 #ifdef CONFIG_COMPAT
1820 .compat_setsockopt
= compat_ip_setsockopt
,
1821 .compat_getsockopt
= compat_ip_getsockopt
,
1824 EXPORT_SYMBOL(ipv4_specific
);
1826 #ifdef CONFIG_TCP_MD5SIG
1827 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1828 .md5_lookup
= tcp_v4_md5_lookup
,
1829 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1830 .md5_add
= tcp_v4_md5_add_func
,
1831 .md5_parse
= tcp_v4_parse_md5_keys
,
1835 /* NOTE: A lot of things set to zero explicitly by call to
1836 * sk_alloc() so need not be done here.
1838 static int tcp_v4_init_sock(struct sock
*sk
)
1840 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1841 struct tcp_sock
*tp
= tcp_sk(sk
);
1843 skb_queue_head_init(&tp
->out_of_order_queue
);
1844 tcp_init_xmit_timers(sk
);
1845 tcp_prequeue_init(tp
);
1847 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1848 tp
->mdev
= TCP_TIMEOUT_INIT
;
1850 /* So many TCP implementations out there (incorrectly) count the
1851 * initial SYN frame in their delayed-ACK and congestion control
1852 * algorithms that we must have the following bandaid to talk
1853 * efficiently to them. -DaveM
1857 /* See draft-stevens-tcpca-spec-01 for discussion of the
1858 * initialization of these values.
1860 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1861 tp
->snd_cwnd_clamp
= ~0;
1862 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1864 tp
->reordering
= sysctl_tcp_reordering
;
1865 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1867 sk
->sk_state
= TCP_CLOSE
;
1869 sk
->sk_write_space
= sk_stream_write_space
;
1870 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1872 icsk
->icsk_af_ops
= &ipv4_specific
;
1873 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1874 #ifdef CONFIG_TCP_MD5SIG
1875 tp
->af_specific
= &tcp_sock_ipv4_specific
;
1878 /* TCP Cookie Transactions */
1879 if (sysctl_tcp_cookie_size
> 0) {
1880 /* Default, cookies without s_data_payload. */
1882 kzalloc(sizeof(*tp
->cookie_values
),
1884 if (tp
->cookie_values
!= NULL
)
1885 kref_init(&tp
->cookie_values
->kref
);
1887 /* Presumed zeroed, in order of appearance:
1888 * cookie_in_always, cookie_out_never,
1889 * s_data_constant, s_data_in, s_data_out
1891 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1892 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1895 percpu_counter_inc(&tcp_sockets_allocated
);
1901 void tcp_v4_destroy_sock(struct sock
*sk
)
1903 struct tcp_sock
*tp
= tcp_sk(sk
);
1905 tcp_clear_xmit_timers(sk
);
1907 tcp_cleanup_congestion_control(sk
);
1909 /* Cleanup up the write buffer. */
1910 tcp_write_queue_purge(sk
);
1912 /* Cleans up our, hopefully empty, out_of_order_queue. */
1913 __skb_queue_purge(&tp
->out_of_order_queue
);
1915 #ifdef CONFIG_TCP_MD5SIG
1916 /* Clean up the MD5 key list, if any */
1917 if (tp
->md5sig_info
) {
1918 tcp_v4_clear_md5_list(sk
);
1919 kfree(tp
->md5sig_info
);
1920 tp
->md5sig_info
= NULL
;
1924 #ifdef CONFIG_NET_DMA
1925 /* Cleans up our sk_async_wait_queue */
1926 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1929 /* Clean prequeue, it must be empty really */
1930 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1932 /* Clean up a referenced TCP bind bucket. */
1933 if (inet_csk(sk
)->icsk_bind_hash
)
1937 * If sendmsg cached page exists, toss it.
1939 if (sk
->sk_sndmsg_page
) {
1940 __free_page(sk
->sk_sndmsg_page
);
1941 sk
->sk_sndmsg_page
= NULL
;
1944 /* TCP Cookie Transactions */
1945 if (tp
->cookie_values
!= NULL
) {
1946 kref_put(&tp
->cookie_values
->kref
,
1947 tcp_cookie_values_release
);
1948 tp
->cookie_values
= NULL
;
1951 percpu_counter_dec(&tcp_sockets_allocated
);
1953 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1955 #ifdef CONFIG_PROC_FS
1956 /* Proc filesystem TCP sock list dumping. */
1958 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
1960 return hlist_nulls_empty(head
) ? NULL
:
1961 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
1964 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
1966 return !is_a_nulls(tw
->tw_node
.next
) ?
1967 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
1971 * Get next listener socket follow cur. If cur is NULL, get first socket
1972 * starting from bucket given in st->bucket; when st->bucket is zero the
1973 * very first socket in the hash table is returned.
1975 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1977 struct inet_connection_sock
*icsk
;
1978 struct hlist_nulls_node
*node
;
1979 struct sock
*sk
= cur
;
1980 struct inet_listen_hashbucket
*ilb
;
1981 struct tcp_iter_state
*st
= seq
->private;
1982 struct net
*net
= seq_file_net(seq
);
1985 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1986 spin_lock_bh(&ilb
->lock
);
1987 sk
= sk_nulls_head(&ilb
->head
);
1991 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1995 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
1996 struct request_sock
*req
= cur
;
1998 icsk
= inet_csk(st
->syn_wait_sk
);
2002 if (req
->rsk_ops
->family
== st
->family
) {
2009 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2012 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2014 sk
= sk_next(st
->syn_wait_sk
);
2015 st
->state
= TCP_SEQ_STATE_LISTENING
;
2016 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2018 icsk
= inet_csk(sk
);
2019 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2020 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2022 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2026 sk_nulls_for_each_from(sk
, node
) {
2027 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
)) {
2031 icsk
= inet_csk(sk
);
2032 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2033 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2035 st
->uid
= sock_i_uid(sk
);
2036 st
->syn_wait_sk
= sk
;
2037 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2041 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2043 spin_unlock_bh(&ilb
->lock
);
2045 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2046 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2047 spin_lock_bh(&ilb
->lock
);
2048 sk
= sk_nulls_head(&ilb
->head
);
2056 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2058 struct tcp_iter_state
*st
= seq
->private;
2063 rc
= listening_get_next(seq
, NULL
);
2065 while (rc
&& *pos
) {
2066 rc
= listening_get_next(seq
, rc
);
2072 static inline int empty_bucket(struct tcp_iter_state
*st
)
2074 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2075 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2079 * Get first established socket starting from bucket given in st->bucket.
2080 * If st->bucket is zero, the very first socket in the hash is returned.
2082 static void *established_get_first(struct seq_file
*seq
)
2084 struct tcp_iter_state
*st
= seq
->private;
2085 struct net
*net
= seq_file_net(seq
);
2089 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2091 struct hlist_nulls_node
*node
;
2092 struct inet_timewait_sock
*tw
;
2093 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2095 /* Lockless fast path for the common case of empty buckets */
2096 if (empty_bucket(st
))
2100 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2101 if (sk
->sk_family
!= st
->family
||
2102 !net_eq(sock_net(sk
), net
)) {
2108 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2109 inet_twsk_for_each(tw
, node
,
2110 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2111 if (tw
->tw_family
!= st
->family
||
2112 !net_eq(twsk_net(tw
), net
)) {
2118 spin_unlock_bh(lock
);
2119 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2125 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2127 struct sock
*sk
= cur
;
2128 struct inet_timewait_sock
*tw
;
2129 struct hlist_nulls_node
*node
;
2130 struct tcp_iter_state
*st
= seq
->private;
2131 struct net
*net
= seq_file_net(seq
);
2136 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2140 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2147 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2148 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2150 /* Look for next non empty bucket */
2152 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2155 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2158 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2159 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2161 sk
= sk_nulls_next(sk
);
2163 sk_nulls_for_each_from(sk
, node
) {
2164 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2168 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2169 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2177 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2179 struct tcp_iter_state
*st
= seq
->private;
2183 rc
= established_get_first(seq
);
2186 rc
= established_get_next(seq
, rc
);
2192 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2195 struct tcp_iter_state
*st
= seq
->private;
2197 st
->state
= TCP_SEQ_STATE_LISTENING
;
2198 rc
= listening_get_idx(seq
, &pos
);
2201 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2202 rc
= established_get_idx(seq
, pos
);
2208 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2210 struct tcp_iter_state
*st
= seq
->private;
2211 int offset
= st
->offset
;
2212 int orig_num
= st
->num
;
2215 switch (st
->state
) {
2216 case TCP_SEQ_STATE_OPENREQ
:
2217 case TCP_SEQ_STATE_LISTENING
:
2218 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2220 st
->state
= TCP_SEQ_STATE_LISTENING
;
2221 rc
= listening_get_next(seq
, NULL
);
2222 while (offset
-- && rc
)
2223 rc
= listening_get_next(seq
, rc
);
2228 case TCP_SEQ_STATE_ESTABLISHED
:
2229 case TCP_SEQ_STATE_TIME_WAIT
:
2230 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2231 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2233 rc
= established_get_first(seq
);
2234 while (offset
-- && rc
)
2235 rc
= established_get_next(seq
, rc
);
2243 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2245 struct tcp_iter_state
*st
= seq
->private;
2248 if (*pos
&& *pos
== st
->last_pos
) {
2249 rc
= tcp_seek_last_pos(seq
);
2254 st
->state
= TCP_SEQ_STATE_LISTENING
;
2258 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2261 st
->last_pos
= *pos
;
2265 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2267 struct tcp_iter_state
*st
= seq
->private;
2270 if (v
== SEQ_START_TOKEN
) {
2271 rc
= tcp_get_idx(seq
, 0);
2275 switch (st
->state
) {
2276 case TCP_SEQ_STATE_OPENREQ
:
2277 case TCP_SEQ_STATE_LISTENING
:
2278 rc
= listening_get_next(seq
, v
);
2280 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2283 rc
= established_get_first(seq
);
2286 case TCP_SEQ_STATE_ESTABLISHED
:
2287 case TCP_SEQ_STATE_TIME_WAIT
:
2288 rc
= established_get_next(seq
, v
);
2293 st
->last_pos
= *pos
;
2297 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2299 struct tcp_iter_state
*st
= seq
->private;
2301 switch (st
->state
) {
2302 case TCP_SEQ_STATE_OPENREQ
:
2304 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2305 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2307 case TCP_SEQ_STATE_LISTENING
:
2308 if (v
!= SEQ_START_TOKEN
)
2309 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2311 case TCP_SEQ_STATE_TIME_WAIT
:
2312 case TCP_SEQ_STATE_ESTABLISHED
:
2314 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2319 static int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2321 struct tcp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
2322 struct tcp_iter_state
*s
;
2325 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2326 sizeof(struct tcp_iter_state
));
2330 s
= ((struct seq_file
*)file
->private_data
)->private;
2331 s
->family
= afinfo
->family
;
2336 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2339 struct proc_dir_entry
*p
;
2341 afinfo
->seq_fops
.open
= tcp_seq_open
;
2342 afinfo
->seq_fops
.read
= seq_read
;
2343 afinfo
->seq_fops
.llseek
= seq_lseek
;
2344 afinfo
->seq_fops
.release
= seq_release_net
;
2346 afinfo
->seq_ops
.start
= tcp_seq_start
;
2347 afinfo
->seq_ops
.next
= tcp_seq_next
;
2348 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2350 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2351 &afinfo
->seq_fops
, afinfo
);
2356 EXPORT_SYMBOL(tcp_proc_register
);
2358 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2360 proc_net_remove(net
, afinfo
->name
);
2362 EXPORT_SYMBOL(tcp_proc_unregister
);
2364 static void get_openreq4(struct sock
*sk
, struct request_sock
*req
,
2365 struct seq_file
*f
, int i
, int uid
, int *len
)
2367 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2368 int ttd
= req
->expires
- jiffies
;
2370 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2371 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2374 ntohs(inet_sk(sk
)->inet_sport
),
2376 ntohs(ireq
->rmt_port
),
2378 0, 0, /* could print option size, but that is af dependent. */
2379 1, /* timers active (only the expire timer) */
2380 jiffies_to_clock_t(ttd
),
2383 0, /* non standard timer */
2384 0, /* open_requests have no inode */
2385 atomic_read(&sk
->sk_refcnt
),
2390 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2393 unsigned long timer_expires
;
2394 struct tcp_sock
*tp
= tcp_sk(sk
);
2395 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2396 struct inet_sock
*inet
= inet_sk(sk
);
2397 __be32 dest
= inet
->inet_daddr
;
2398 __be32 src
= inet
->inet_rcv_saddr
;
2399 __u16 destp
= ntohs(inet
->inet_dport
);
2400 __u16 srcp
= ntohs(inet
->inet_sport
);
2403 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2405 timer_expires
= icsk
->icsk_timeout
;
2406 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2408 timer_expires
= icsk
->icsk_timeout
;
2409 } else if (timer_pending(&sk
->sk_timer
)) {
2411 timer_expires
= sk
->sk_timer
.expires
;
2414 timer_expires
= jiffies
;
2417 if (sk
->sk_state
== TCP_LISTEN
)
2418 rx_queue
= sk
->sk_ack_backlog
;
2421 * because we dont lock socket, we might find a transient negative value
2423 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2425 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2426 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2427 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2428 tp
->write_seq
- tp
->snd_una
,
2431 jiffies_to_clock_t(timer_expires
- jiffies
),
2432 icsk
->icsk_retransmits
,
2434 icsk
->icsk_probes_out
,
2436 atomic_read(&sk
->sk_refcnt
), sk
,
2437 jiffies_to_clock_t(icsk
->icsk_rto
),
2438 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2439 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2441 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
,
2445 static void get_timewait4_sock(struct inet_timewait_sock
*tw
,
2446 struct seq_file
*f
, int i
, int *len
)
2450 int ttd
= tw
->tw_ttd
- jiffies
;
2455 dest
= tw
->tw_daddr
;
2456 src
= tw
->tw_rcv_saddr
;
2457 destp
= ntohs(tw
->tw_dport
);
2458 srcp
= ntohs(tw
->tw_sport
);
2460 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2461 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2462 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2463 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2464 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2469 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2471 struct tcp_iter_state
*st
;
2474 if (v
== SEQ_START_TOKEN
) {
2475 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2476 " sl local_address rem_address st tx_queue "
2477 "rx_queue tr tm->when retrnsmt uid timeout "
2483 switch (st
->state
) {
2484 case TCP_SEQ_STATE_LISTENING
:
2485 case TCP_SEQ_STATE_ESTABLISHED
:
2486 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2488 case TCP_SEQ_STATE_OPENREQ
:
2489 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2491 case TCP_SEQ_STATE_TIME_WAIT
:
2492 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2495 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2500 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2504 .owner
= THIS_MODULE
,
2507 .show
= tcp4_seq_show
,
2511 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2513 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2516 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2518 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2521 static struct pernet_operations tcp4_net_ops
= {
2522 .init
= tcp4_proc_init_net
,
2523 .exit
= tcp4_proc_exit_net
,
2526 int __init
tcp4_proc_init(void)
2528 return register_pernet_subsys(&tcp4_net_ops
);
2531 void tcp4_proc_exit(void)
2533 unregister_pernet_subsys(&tcp4_net_ops
);
2535 #endif /* CONFIG_PROC_FS */
2537 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2539 struct iphdr
*iph
= skb_gro_network_header(skb
);
2541 switch (skb
->ip_summed
) {
2542 case CHECKSUM_COMPLETE
:
2543 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2545 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2551 NAPI_GRO_CB(skb
)->flush
= 1;
2555 return tcp_gro_receive(head
, skb
);
2558 int tcp4_gro_complete(struct sk_buff
*skb
)
2560 struct iphdr
*iph
= ip_hdr(skb
);
2561 struct tcphdr
*th
= tcp_hdr(skb
);
2563 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2564 iph
->saddr
, iph
->daddr
, 0);
2565 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2567 return tcp_gro_complete(skb
);
2570 struct proto tcp_prot
= {
2572 .owner
= THIS_MODULE
,
2574 .connect
= tcp_v4_connect
,
2575 .disconnect
= tcp_disconnect
,
2576 .accept
= inet_csk_accept
,
2578 .init
= tcp_v4_init_sock
,
2579 .destroy
= tcp_v4_destroy_sock
,
2580 .shutdown
= tcp_shutdown
,
2581 .setsockopt
= tcp_setsockopt
,
2582 .getsockopt
= tcp_getsockopt
,
2583 .recvmsg
= tcp_recvmsg
,
2584 .sendmsg
= tcp_sendmsg
,
2585 .sendpage
= tcp_sendpage
,
2586 .backlog_rcv
= tcp_v4_do_rcv
,
2588 .unhash
= inet_unhash
,
2589 .get_port
= inet_csk_get_port
,
2590 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2591 .sockets_allocated
= &tcp_sockets_allocated
,
2592 .orphan_count
= &tcp_orphan_count
,
2593 .memory_allocated
= &tcp_memory_allocated
,
2594 .memory_pressure
= &tcp_memory_pressure
,
2595 .sysctl_mem
= sysctl_tcp_mem
,
2596 .sysctl_wmem
= sysctl_tcp_wmem
,
2597 .sysctl_rmem
= sysctl_tcp_rmem
,
2598 .max_header
= MAX_TCP_HEADER
,
2599 .obj_size
= sizeof(struct tcp_sock
),
2600 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2601 .twsk_prot
= &tcp_timewait_sock_ops
,
2602 .rsk_prot
= &tcp_request_sock_ops
,
2603 .h
.hashinfo
= &tcp_hashinfo
,
2604 .no_autobind
= true,
2605 #ifdef CONFIG_COMPAT
2606 .compat_setsockopt
= compat_tcp_setsockopt
,
2607 .compat_getsockopt
= compat_tcp_getsockopt
,
2610 EXPORT_SYMBOL(tcp_prot
);
2613 static int __net_init
tcp_sk_init(struct net
*net
)
2615 return inet_ctl_sock_create(&net
->ipv4
.tcp_sock
,
2616 PF_INET
, SOCK_RAW
, IPPROTO_TCP
, net
);
2619 static void __net_exit
tcp_sk_exit(struct net
*net
)
2621 inet_ctl_sock_destroy(net
->ipv4
.tcp_sock
);
2624 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2626 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2629 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2630 .init
= tcp_sk_init
,
2631 .exit
= tcp_sk_exit
,
2632 .exit_batch
= tcp_sk_exit_batch
,
2635 void __init
tcp_v4_init(void)
2637 inet_hashinfo_init(&tcp_hashinfo
);
2638 if (register_pernet_subsys(&tcp_sk_ops
))
2639 panic("Failed to create the TCP control socket.\n");