3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static const struct inet_connection_sock_af_ops ipv6_mapped
;
79 static const struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
85 const struct in6_addr
*addr
)
91 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
95 if (dst
&& dst_hold_safe(dst
)) {
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
99 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
100 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
104 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
107 ipv6_hdr(skb
)->saddr
.s6_addr32
,
109 tcp_hdr(skb
)->source
);
112 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
115 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
116 struct inet_sock
*inet
= inet_sk(sk
);
117 struct inet_connection_sock
*icsk
= inet_csk(sk
);
118 struct ipv6_pinfo
*np
= inet6_sk(sk
);
119 struct tcp_sock
*tp
= tcp_sk(sk
);
120 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
121 struct ipv6_txoptions
*opt
;
123 struct dst_entry
*dst
;
127 if (addr_len
< SIN6_LEN_RFC2133
)
130 if (usin
->sin6_family
!= AF_INET6
)
131 return -EAFNOSUPPORT
;
133 memset(&fl6
, 0, sizeof(fl6
));
136 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
137 IP6_ECN_flow_init(fl6
.flowlabel
);
138 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
139 struct ip6_flowlabel
*flowlabel
;
140 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
143 fl6_sock_release(flowlabel
);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin
->sin6_addr
))
152 usin
->sin6_addr
.s6_addr
[15] = 0x1;
154 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
156 if (addr_type
& IPV6_ADDR_MULTICAST
)
159 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
160 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
161 usin
->sin6_scope_id
) {
162 /* If interface is set while binding, indices
165 if (sk
->sk_bound_dev_if
&&
166 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
169 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
172 /* Connect to link-local address requires an interface */
173 if (!sk
->sk_bound_dev_if
)
177 if (tp
->rx_opt
.ts_recent_stamp
&&
178 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
179 tp
->rx_opt
.ts_recent
= 0;
180 tp
->rx_opt
.ts_recent_stamp
= 0;
184 sk
->sk_v6_daddr
= usin
->sin6_addr
;
185 np
->flow_label
= fl6
.flowlabel
;
191 if (addr_type
== IPV6_ADDR_MAPPED
) {
192 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
193 struct sockaddr_in sin
;
195 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk
))
200 sin
.sin_family
= AF_INET
;
201 sin
.sin_port
= usin
->sin6_port
;
202 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
204 icsk
->icsk_af_ops
= &ipv6_mapped
;
205 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
210 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
213 icsk
->icsk_ext_hdr_len
= exthdrlen
;
214 icsk
->icsk_af_ops
= &ipv6_specific
;
215 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp
->af_specific
= &tcp_sock_ipv6_specific
;
221 np
->saddr
= sk
->sk_v6_rcv_saddr
;
226 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
227 saddr
= &sk
->sk_v6_rcv_saddr
;
229 fl6
.flowi6_proto
= IPPROTO_TCP
;
230 fl6
.daddr
= sk
->sk_v6_daddr
;
231 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
232 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
233 fl6
.flowi6_mark
= sk
->sk_mark
;
234 fl6
.fl6_dport
= usin
->sin6_port
;
235 fl6
.fl6_sport
= inet
->inet_sport
;
237 opt
= rcu_dereference_protected(np
->opt
, lockdep_sock_is_held(sk
));
238 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
240 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
242 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
250 sk
->sk_v6_rcv_saddr
= *saddr
;
253 /* set the source address */
255 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
257 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
258 ip6_dst_store(sk
, dst
, NULL
, NULL
);
260 if (tcp_death_row
.sysctl_tw_recycle
&&
261 !tp
->rx_opt
.ts_recent_stamp
&&
262 ipv6_addr_equal(&fl6
.daddr
, &sk
->sk_v6_daddr
))
263 tcp_fetch_timewait_stamp(sk
, dst
);
265 icsk
->icsk_ext_hdr_len
= 0;
267 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
270 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
272 inet
->inet_dport
= usin
->sin6_port
;
274 tcp_set_state(sk
, TCP_SYN_SENT
);
275 err
= inet6_hash_connect(&tcp_death_row
, sk
);
281 if (!tp
->write_seq
&& likely(!tp
->repair
))
282 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
283 sk
->sk_v6_daddr
.s6_addr32
,
287 err
= tcp_connect(sk
);
294 tcp_set_state(sk
, TCP_CLOSE
);
297 inet
->inet_dport
= 0;
298 sk
->sk_route_caps
= 0;
302 static void tcp_v6_mtu_reduced(struct sock
*sk
)
304 struct dst_entry
*dst
;
306 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
309 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
313 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
314 tcp_sync_mss(sk
, dst_mtu(dst
));
315 tcp_simple_retransmit(sk
);
319 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
320 u8 type
, u8 code
, int offset
, __be32 info
)
322 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
323 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
324 struct net
*net
= dev_net(skb
->dev
);
325 struct request_sock
*fastopen
;
326 struct ipv6_pinfo
*np
;
333 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
334 &hdr
->daddr
, th
->dest
,
335 &hdr
->saddr
, ntohs(th
->source
),
339 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
344 if (sk
->sk_state
== TCP_TIME_WAIT
) {
345 inet_twsk_put(inet_twsk(sk
));
348 seq
= ntohl(th
->seq
);
349 fatal
= icmpv6_err_convert(type
, code
, &err
);
350 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
351 return tcp_req_err(sk
, seq
, fatal
);
354 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
355 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
357 if (sk
->sk_state
== TCP_CLOSE
)
360 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
361 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen
= tp
->fastopen_rsk
;
368 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
369 if (sk
->sk_state
!= TCP_LISTEN
&&
370 !between(seq
, snd_una
, tp
->snd_nxt
)) {
371 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
377 if (type
== NDISC_REDIRECT
) {
378 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
381 dst
->ops
->redirect(dst
, sk
, skb
);
385 if (type
== ICMPV6_PKT_TOOBIG
) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
390 if (sk
->sk_state
== TCP_LISTEN
)
393 if (!ip6_sk_accept_pmtu(sk
))
396 tp
->mtu_info
= ntohl(info
);
397 if (!sock_owned_by_user(sk
))
398 tcp_v6_mtu_reduced(sk
);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
406 /* Might be for an request_sock */
407 switch (sk
->sk_state
) {
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
413 if (fastopen
&& !fastopen
->sk
)
416 if (!sock_owned_by_user(sk
)) {
418 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
422 sk
->sk_err_soft
= err
;
426 if (!sock_owned_by_user(sk
) && np
->recverr
) {
428 sk
->sk_error_report(sk
);
430 sk
->sk_err_soft
= err
;
438 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
440 struct request_sock
*req
,
441 struct tcp_fastopen_cookie
*foc
,
442 enum tcp_synack_type synack_type
)
444 struct inet_request_sock
*ireq
= inet_rsk(req
);
445 struct ipv6_pinfo
*np
= inet6_sk(sk
);
446 struct flowi6
*fl6
= &fl
->u
.ip6
;
450 /* First, grab a route. */
451 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
452 IPPROTO_TCP
)) == NULL
)
455 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
458 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
459 &ireq
->ir_v6_rmt_addr
);
461 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
462 if (np
->repflow
&& ireq
->pktopts
)
463 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
466 err
= ip6_xmit(sk
, skb
, fl6
, rcu_dereference(np
->opt
),
469 err
= net_xmit_eval(err
);
477 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
479 kfree_skb(inet_rsk(req
)->pktopts
);
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
484 const struct in6_addr
*addr
)
486 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
489 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
490 const struct sock
*addr_sk
)
492 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
495 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
498 struct tcp_md5sig cmd
;
499 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
501 if (optlen
< sizeof(cmd
))
504 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
507 if (sin6
->sin6_family
!= AF_INET6
)
510 if (!cmd
.tcpm_keylen
) {
511 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
512 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
514 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
518 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
521 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
522 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
523 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
525 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
526 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
529 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
530 const struct in6_addr
*daddr
,
531 const struct in6_addr
*saddr
,
532 const struct tcphdr
*th
, int nbytes
)
534 struct tcp6_pseudohdr
*bp
;
535 struct scatterlist sg
;
539 /* 1. TCP pseudo-header (RFC2460) */
542 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
543 bp
->len
= cpu_to_be32(nbytes
);
545 _th
= (struct tcphdr
*)(bp
+ 1);
546 memcpy(_th
, th
, sizeof(*th
));
549 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
550 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
551 sizeof(*bp
) + sizeof(*th
));
552 return crypto_ahash_update(hp
->md5_req
);
555 static int tcp_v6_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
556 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
557 const struct tcphdr
*th
)
559 struct tcp_md5sig_pool
*hp
;
560 struct ahash_request
*req
;
562 hp
= tcp_get_md5sig_pool();
564 goto clear_hash_noput
;
567 if (crypto_ahash_init(req
))
569 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
571 if (tcp_md5_hash_key(hp
, key
))
573 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
574 if (crypto_ahash_final(req
))
577 tcp_put_md5sig_pool();
581 tcp_put_md5sig_pool();
583 memset(md5_hash
, 0, 16);
587 static int tcp_v6_md5_hash_skb(char *md5_hash
,
588 const struct tcp_md5sig_key
*key
,
589 const struct sock
*sk
,
590 const struct sk_buff
*skb
)
592 const struct in6_addr
*saddr
, *daddr
;
593 struct tcp_md5sig_pool
*hp
;
594 struct ahash_request
*req
;
595 const struct tcphdr
*th
= tcp_hdr(skb
);
597 if (sk
) { /* valid for establish/request sockets */
598 saddr
= &sk
->sk_v6_rcv_saddr
;
599 daddr
= &sk
->sk_v6_daddr
;
601 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
602 saddr
= &ip6h
->saddr
;
603 daddr
= &ip6h
->daddr
;
606 hp
= tcp_get_md5sig_pool();
608 goto clear_hash_noput
;
611 if (crypto_ahash_init(req
))
614 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
616 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
618 if (tcp_md5_hash_key(hp
, key
))
620 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
621 if (crypto_ahash_final(req
))
624 tcp_put_md5sig_pool();
628 tcp_put_md5sig_pool();
630 memset(md5_hash
, 0, 16);
636 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
637 const struct sk_buff
*skb
)
639 #ifdef CONFIG_TCP_MD5SIG
640 const __u8
*hash_location
= NULL
;
641 struct tcp_md5sig_key
*hash_expected
;
642 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
643 const struct tcphdr
*th
= tcp_hdr(skb
);
647 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
648 hash_location
= tcp_parse_md5sig_option(th
);
650 /* We've parsed the options - do we have a hash? */
651 if (!hash_expected
&& !hash_location
)
654 if (hash_expected
&& !hash_location
) {
655 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
659 if (!hash_expected
&& hash_location
) {
660 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
664 /* check the signature */
665 genhash
= tcp_v6_md5_hash_skb(newhash
,
669 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
670 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
671 genhash
? "failed" : "mismatch",
672 &ip6h
->saddr
, ntohs(th
->source
),
673 &ip6h
->daddr
, ntohs(th
->dest
));
680 static void tcp_v6_init_req(struct request_sock
*req
,
681 const struct sock
*sk_listener
,
684 struct inet_request_sock
*ireq
= inet_rsk(req
);
685 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
687 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
688 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
690 /* So that link locals have meaning */
691 if (!sk_listener
->sk_bound_dev_if
&&
692 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
693 ireq
->ir_iif
= tcp_v6_iif(skb
);
695 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
696 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
697 np
->rxopt
.bits
.rxinfo
||
698 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
699 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
700 atomic_inc(&skb
->users
);
705 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
707 const struct request_sock
*req
,
712 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
715 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
717 .obj_size
= sizeof(struct tcp6_request_sock
),
718 .rtx_syn_ack
= tcp_rtx_synack
,
719 .send_ack
= tcp_v6_reqsk_send_ack
,
720 .destructor
= tcp_v6_reqsk_destructor
,
721 .send_reset
= tcp_v6_send_reset
,
722 .syn_ack_timeout
= tcp_syn_ack_timeout
,
725 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
726 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
727 sizeof(struct ipv6hdr
),
728 #ifdef CONFIG_TCP_MD5SIG
729 .req_md5_lookup
= tcp_v6_md5_lookup
,
730 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
732 .init_req
= tcp_v6_init_req
,
733 #ifdef CONFIG_SYN_COOKIES
734 .cookie_init_seq
= cookie_v6_init_sequence
,
736 .route_req
= tcp_v6_route_req
,
737 .init_seq
= tcp_v6_init_sequence
,
738 .send_synack
= tcp_v6_send_synack
,
741 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
742 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
743 int oif
, struct tcp_md5sig_key
*key
, int rst
,
744 u8 tclass
, __be32 label
)
746 const struct tcphdr
*th
= tcp_hdr(skb
);
748 struct sk_buff
*buff
;
750 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
751 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
752 unsigned int tot_len
= sizeof(struct tcphdr
);
753 struct dst_entry
*dst
;
757 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
758 #ifdef CONFIG_TCP_MD5SIG
760 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
763 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
768 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
770 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
771 skb_reset_transport_header(buff
);
773 /* Swap the send and the receive. */
774 memset(t1
, 0, sizeof(*t1
));
775 t1
->dest
= th
->source
;
776 t1
->source
= th
->dest
;
777 t1
->doff
= tot_len
/ 4;
778 t1
->seq
= htonl(seq
);
779 t1
->ack_seq
= htonl(ack
);
780 t1
->ack
= !rst
|| !th
->ack
;
782 t1
->window
= htons(win
);
784 topt
= (__be32
*)(t1
+ 1);
787 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
788 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
789 *topt
++ = htonl(tsval
);
790 *topt
++ = htonl(tsecr
);
793 #ifdef CONFIG_TCP_MD5SIG
795 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
796 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
797 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
798 &ipv6_hdr(skb
)->saddr
,
799 &ipv6_hdr(skb
)->daddr
, t1
);
803 memset(&fl6
, 0, sizeof(fl6
));
804 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
805 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
806 fl6
.flowlabel
= label
;
808 buff
->ip_summed
= CHECKSUM_PARTIAL
;
811 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
813 fl6
.flowi6_proto
= IPPROTO_TCP
;
814 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
815 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
817 if (!oif
&& netif_index_is_l3_master(net
, skb
->skb_iif
))
820 fl6
.flowi6_oif
= oif
;
823 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
824 fl6
.fl6_dport
= t1
->dest
;
825 fl6
.fl6_sport
= t1
->source
;
826 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
828 /* Pass a socket to ip6_dst_lookup either it is for RST
829 * Underlying function will use this to retrieve the network
832 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
834 skb_dst_set(buff
, dst
);
835 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
836 TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
838 TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
845 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
847 const struct tcphdr
*th
= tcp_hdr(skb
);
848 u32 seq
= 0, ack_seq
= 0;
849 struct tcp_md5sig_key
*key
= NULL
;
850 #ifdef CONFIG_TCP_MD5SIG
851 const __u8
*hash_location
= NULL
;
852 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
853 unsigned char newhash
[16];
855 struct sock
*sk1
= NULL
;
862 /* If sk not NULL, it means we did a successful lookup and incoming
863 * route had to be correct. prequeue might have dropped our dst.
865 if (!sk
&& !ipv6_unicast_destination(skb
))
868 #ifdef CONFIG_TCP_MD5SIG
870 hash_location
= tcp_parse_md5sig_option(th
);
871 if (sk
&& sk_fullsock(sk
)) {
872 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
873 } else if (hash_location
) {
875 * active side is lost. Try to find listening socket through
876 * source port, and then find md5 key through listening socket.
877 * we are not loose security here:
878 * Incoming packet is checked with md5 hash with finding key,
879 * no RST generated if md5 hash doesn't match.
881 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
882 &tcp_hashinfo
, NULL
, 0,
884 th
->source
, &ipv6h
->daddr
,
885 ntohs(th
->source
), tcp_v6_iif(skb
));
889 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
893 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
894 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
900 seq
= ntohl(th
->ack_seq
);
902 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
905 oif
= sk
? sk
->sk_bound_dev_if
: 0;
906 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
908 #ifdef CONFIG_TCP_MD5SIG
914 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
915 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
916 struct tcp_md5sig_key
*key
, u8 tclass
,
919 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
923 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
925 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
926 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
928 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
929 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
930 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
931 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
932 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
937 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
938 struct request_sock
*req
)
940 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
941 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
943 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
944 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
945 tcp_rsk(req
)->rcv_nxt
, req
->rsk_rcv_wnd
,
946 tcp_time_stamp
, req
->ts_recent
, sk
->sk_bound_dev_if
,
947 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
952 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
954 #ifdef CONFIG_SYN_COOKIES
955 const struct tcphdr
*th
= tcp_hdr(skb
);
958 sk
= cookie_v6_check(sk
, skb
);
963 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
965 if (skb
->protocol
== htons(ETH_P_IP
))
966 return tcp_v4_conn_request(sk
, skb
);
968 if (!ipv6_unicast_destination(skb
))
971 return tcp_conn_request(&tcp6_request_sock_ops
,
972 &tcp_request_sock_ipv6_ops
, sk
, skb
);
976 return 0; /* don't send reset */
979 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
980 struct request_sock
*req
,
981 struct dst_entry
*dst
,
982 struct request_sock
*req_unhash
,
985 struct inet_request_sock
*ireq
;
986 struct ipv6_pinfo
*newnp
;
987 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
988 struct ipv6_txoptions
*opt
;
989 struct tcp6_sock
*newtcp6sk
;
990 struct inet_sock
*newinet
;
991 struct tcp_sock
*newtp
;
993 #ifdef CONFIG_TCP_MD5SIG
994 struct tcp_md5sig_key
*key
;
998 if (skb
->protocol
== htons(ETH_P_IP
)) {
1003 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
1004 req_unhash
, own_req
);
1009 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1010 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1012 newinet
= inet_sk(newsk
);
1013 newnp
= inet6_sk(newsk
);
1014 newtp
= tcp_sk(newsk
);
1016 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1018 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1020 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1021 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1022 #ifdef CONFIG_TCP_MD5SIG
1023 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1026 newnp
->ipv6_ac_list
= NULL
;
1027 newnp
->ipv6_fl_list
= NULL
;
1028 newnp
->pktoptions
= NULL
;
1030 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1031 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1032 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1034 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1037 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038 * here, tcp_create_openreq_child now does this for us, see the comment in
1039 * that function for the gory details. -acme
1042 /* It is tricky place. Until this moment IPv4 tcp
1043 worked with IPv6 icsk.icsk_af_ops.
1046 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1051 ireq
= inet_rsk(req
);
1053 if (sk_acceptq_is_full(sk
))
1057 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1062 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1067 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068 * count here, tcp_create_openreq_child now does this for us, see the
1069 * comment in that function for the gory details. -acme
1072 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1073 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1074 inet6_sk_rx_dst_set(newsk
, skb
);
1076 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1077 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1079 newtp
= tcp_sk(newsk
);
1080 newinet
= inet_sk(newsk
);
1081 newnp
= inet6_sk(newsk
);
1083 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1085 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1086 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1087 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1088 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1090 /* Now IPv6 options...
1092 First: no IPv4 options.
1094 newinet
->inet_opt
= NULL
;
1095 newnp
->ipv6_ac_list
= NULL
;
1096 newnp
->ipv6_fl_list
= NULL
;
1099 newnp
->rxopt
.all
= np
->rxopt
.all
;
1101 newnp
->pktoptions
= NULL
;
1103 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1104 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1105 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1107 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1109 /* Clone native IPv6 options from listening socket (if any)
1111 Yes, keeping reference count would be much more clever,
1112 but we make one more one thing there: reattach optmem
1115 opt
= rcu_dereference(np
->opt
);
1117 opt
= ipv6_dup_options(newsk
, opt
);
1118 RCU_INIT_POINTER(newnp
->opt
, opt
);
1120 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1122 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1125 tcp_ca_openreq_child(newsk
, dst
);
1127 tcp_sync_mss(newsk
, dst_mtu(dst
));
1128 newtp
->advmss
= dst_metric_advmss(dst
);
1129 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1130 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1131 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1133 tcp_initialize_rcv_mss(newsk
);
1135 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1136 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1138 #ifdef CONFIG_TCP_MD5SIG
1139 /* Copy over the MD5 key from the original socket */
1140 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1142 /* We're using one, so create a matching key
1143 * on the newsk structure. If we fail to get
1144 * memory, then we end up not copying the key
1147 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1148 AF_INET6
, key
->key
, key
->keylen
,
1149 sk_gfp_mask(sk
, GFP_ATOMIC
));
1153 if (__inet_inherit_port(sk
, newsk
) < 0) {
1154 inet_csk_prepare_forced_close(newsk
);
1158 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1160 tcp_move_syn(newtp
, req
);
1162 /* Clone pktoptions received with SYN, if we own the req */
1163 if (ireq
->pktopts
) {
1164 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1165 sk_gfp_mask(sk
, GFP_ATOMIC
));
1166 consume_skb(ireq
->pktopts
);
1167 ireq
->pktopts
= NULL
;
1168 if (newnp
->pktoptions
)
1169 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1176 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1184 /* The socket must have it's spinlock held when we get
1185 * here, unless it is a TCP_LISTEN socket.
1187 * We have a potential double-lock case here, so even when
1188 * doing backlog processing we use the BH locking scheme.
1189 * This is because we cannot sleep with the original spinlock
1192 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1194 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1195 struct tcp_sock
*tp
;
1196 struct sk_buff
*opt_skb
= NULL
;
1198 /* Imagine: socket is IPv6. IPv4 packet arrives,
1199 goes to IPv4 receive handler and backlogged.
1200 From backlog it always goes here. Kerboom...
1201 Fortunately, tcp_rcv_established and rcv_established
1202 handle them correctly, but it is not case with
1203 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1206 if (skb
->protocol
== htons(ETH_P_IP
))
1207 return tcp_v4_do_rcv(sk
, skb
);
1209 if (sk_filter(sk
, skb
))
1213 * socket locking is here for SMP purposes as backlog rcv
1214 * is currently called with bh processing disabled.
1217 /* Do Stevens' IPV6_PKTOPTIONS.
1219 Yes, guys, it is the only place in our code, where we
1220 may make it not affecting IPv4.
1221 The rest of code is protocol independent,
1222 and I do not like idea to uglify IPv4.
1224 Actually, all the idea behind IPV6_PKTOPTIONS
1225 looks not very well thought. For now we latch
1226 options, received in the last packet, enqueued
1227 by tcp. Feel free to propose better solution.
1231 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1233 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1234 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1236 sock_rps_save_rxhash(sk
, skb
);
1237 sk_mark_napi_id(sk
, skb
);
1239 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1240 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1242 sk
->sk_rx_dst
= NULL
;
1246 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1248 goto ipv6_pktoptions
;
1252 if (tcp_checksum_complete(skb
))
1255 if (sk
->sk_state
== TCP_LISTEN
) {
1256 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1262 sock_rps_save_rxhash(nsk
, skb
);
1263 sk_mark_napi_id(nsk
, skb
);
1264 if (tcp_child_process(sk
, nsk
, skb
))
1267 __kfree_skb(opt_skb
);
1271 sock_rps_save_rxhash(sk
, skb
);
1273 if (tcp_rcv_state_process(sk
, skb
))
1276 goto ipv6_pktoptions
;
1280 tcp_v6_send_reset(sk
, skb
);
1283 __kfree_skb(opt_skb
);
1287 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1288 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1293 /* Do you ask, what is it?
1295 1. skb was enqueued by tcp.
1296 2. skb is added to tail of read queue, rather than out of order.
1297 3. socket is not in passive state.
1298 4. Finally, it really contains options, which user wants to receive.
1301 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1302 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1303 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1304 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1305 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1306 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1307 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1308 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1310 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1311 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1312 skb_set_owner_r(opt_skb
, sk
);
1313 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1315 __kfree_skb(opt_skb
);
1316 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1324 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1325 const struct tcphdr
*th
)
1327 /* This is tricky: we move IP6CB at its correct location into
1328 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1329 * _decode_session6() uses IP6CB().
1330 * barrier() makes sure compiler won't play aliasing games.
1332 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1333 sizeof(struct inet6_skb_parm
));
1336 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1337 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1338 skb
->len
- th
->doff
*4);
1339 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1340 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1341 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1342 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1343 TCP_SKB_CB(skb
)->sacked
= 0;
1346 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1348 /* We need to move header back to the beginning if xfrm6_policy_check()
1349 * and tcp_v6_fill_cb() are going to be called again.
1351 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1352 sizeof(struct inet6_skb_parm
));
1355 static int tcp_v6_rcv(struct sk_buff
*skb
)
1357 const struct tcphdr
*th
;
1358 const struct ipv6hdr
*hdr
;
1362 struct net
*net
= dev_net(skb
->dev
);
1364 if (skb
->pkt_type
!= PACKET_HOST
)
1368 * Count it even if it's bad.
1370 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1372 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1375 th
= (const struct tcphdr
*)skb
->data
;
1377 if (unlikely(th
->doff
< sizeof(struct tcphdr
)/4))
1379 if (!pskb_may_pull(skb
, th
->doff
*4))
1382 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1385 th
= (const struct tcphdr
*)skb
->data
;
1386 hdr
= ipv6_hdr(skb
);
1389 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
),
1390 th
->source
, th
->dest
, inet6_iif(skb
),
1396 if (sk
->sk_state
== TCP_TIME_WAIT
)
1399 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1400 struct request_sock
*req
= inet_reqsk(sk
);
1403 sk
= req
->rsk_listener
;
1404 tcp_v6_fill_cb(skb
, hdr
, th
);
1405 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1409 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1410 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1415 nsk
= tcp_check_req(sk
, skb
, req
, false);
1418 goto discard_and_relse
;
1422 tcp_v6_restore_cb(skb
);
1423 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1424 tcp_v6_send_reset(nsk
, skb
);
1425 goto discard_and_relse
;
1431 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1432 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1433 goto discard_and_relse
;
1436 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1437 goto discard_and_relse
;
1439 tcp_v6_fill_cb(skb
, hdr
, th
);
1441 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1442 goto discard_and_relse
;
1444 if (sk_filter(sk
, skb
))
1445 goto discard_and_relse
;
1449 if (sk
->sk_state
== TCP_LISTEN
) {
1450 ret
= tcp_v6_do_rcv(sk
, skb
);
1451 goto put_and_return
;
1454 sk_incoming_cpu_update(sk
);
1456 bh_lock_sock_nested(sk
);
1457 tcp_segs_in(tcp_sk(sk
), skb
);
1459 if (!sock_owned_by_user(sk
)) {
1460 if (!tcp_prequeue(sk
, skb
))
1461 ret
= tcp_v6_do_rcv(sk
, skb
);
1462 } else if (unlikely(sk_add_backlog(sk
, skb
,
1463 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1465 __NET_INC_STATS(net
, LINUX_MIB_TCPBACKLOGDROP
);
1466 goto discard_and_relse
;
1473 return ret
? -1 : 0;
1476 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1479 tcp_v6_fill_cb(skb
, hdr
, th
);
1481 if (tcp_checksum_complete(skb
)) {
1483 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1485 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1487 tcp_v6_send_reset(NULL
, skb
);
1495 sk_drops_add(sk
, skb
);
1501 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1502 inet_twsk_put(inet_twsk(sk
));
1506 tcp_v6_fill_cb(skb
, hdr
, th
);
1508 if (tcp_checksum_complete(skb
)) {
1509 inet_twsk_put(inet_twsk(sk
));
1513 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1518 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1519 skb
, __tcp_hdrlen(th
),
1520 &ipv6_hdr(skb
)->saddr
, th
->source
,
1521 &ipv6_hdr(skb
)->daddr
,
1522 ntohs(th
->dest
), tcp_v6_iif(skb
));
1524 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1525 inet_twsk_deschedule_put(tw
);
1527 tcp_v6_restore_cb(skb
);
1531 /* Fall through to ACK */
1534 tcp_v6_timewait_ack(sk
, skb
);
1537 tcp_v6_restore_cb(skb
);
1538 tcp_v6_send_reset(sk
, skb
);
1539 inet_twsk_deschedule_put(inet_twsk(sk
));
1541 case TCP_TW_SUCCESS
:
1547 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1549 const struct ipv6hdr
*hdr
;
1550 const struct tcphdr
*th
;
1553 if (skb
->pkt_type
!= PACKET_HOST
)
1556 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1559 hdr
= ipv6_hdr(skb
);
1562 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1565 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1566 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1567 &hdr
->saddr
, th
->source
,
1568 &hdr
->daddr
, ntohs(th
->dest
),
1572 skb
->destructor
= sock_edemux
;
1573 if (sk_fullsock(sk
)) {
1574 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1577 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1579 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1580 skb_dst_set_noref(skb
, dst
);
1585 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1586 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1587 .twsk_unique
= tcp_twsk_unique
,
1588 .twsk_destructor
= tcp_twsk_destructor
,
1591 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1592 .queue_xmit
= inet6_csk_xmit
,
1593 .send_check
= tcp_v6_send_check
,
1594 .rebuild_header
= inet6_sk_rebuild_header
,
1595 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1596 .conn_request
= tcp_v6_conn_request
,
1597 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1598 .net_header_len
= sizeof(struct ipv6hdr
),
1599 .net_frag_header_len
= sizeof(struct frag_hdr
),
1600 .setsockopt
= ipv6_setsockopt
,
1601 .getsockopt
= ipv6_getsockopt
,
1602 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1603 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1604 .bind_conflict
= inet6_csk_bind_conflict
,
1605 #ifdef CONFIG_COMPAT
1606 .compat_setsockopt
= compat_ipv6_setsockopt
,
1607 .compat_getsockopt
= compat_ipv6_getsockopt
,
1609 .mtu_reduced
= tcp_v6_mtu_reduced
,
1612 #ifdef CONFIG_TCP_MD5SIG
1613 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1614 .md5_lookup
= tcp_v6_md5_lookup
,
1615 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1616 .md5_parse
= tcp_v6_parse_md5_keys
,
1621 * TCP over IPv4 via INET6 API
1623 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1624 .queue_xmit
= ip_queue_xmit
,
1625 .send_check
= tcp_v4_send_check
,
1626 .rebuild_header
= inet_sk_rebuild_header
,
1627 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1628 .conn_request
= tcp_v6_conn_request
,
1629 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1630 .net_header_len
= sizeof(struct iphdr
),
1631 .setsockopt
= ipv6_setsockopt
,
1632 .getsockopt
= ipv6_getsockopt
,
1633 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1634 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1635 .bind_conflict
= inet6_csk_bind_conflict
,
1636 #ifdef CONFIG_COMPAT
1637 .compat_setsockopt
= compat_ipv6_setsockopt
,
1638 .compat_getsockopt
= compat_ipv6_getsockopt
,
1640 .mtu_reduced
= tcp_v4_mtu_reduced
,
1643 #ifdef CONFIG_TCP_MD5SIG
1644 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1645 .md5_lookup
= tcp_v4_md5_lookup
,
1646 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1647 .md5_parse
= tcp_v6_parse_md5_keys
,
1651 /* NOTE: A lot of things set to zero explicitly by call to
1652 * sk_alloc() so need not be done here.
1654 static int tcp_v6_init_sock(struct sock
*sk
)
1656 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1660 icsk
->icsk_af_ops
= &ipv6_specific
;
1662 #ifdef CONFIG_TCP_MD5SIG
1663 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1669 static void tcp_v6_destroy_sock(struct sock
*sk
)
1671 tcp_v4_destroy_sock(sk
);
1672 inet6_destroy_sock(sk
);
1675 #ifdef CONFIG_PROC_FS
1676 /* Proc filesystem TCPv6 sock list dumping. */
1677 static void get_openreq6(struct seq_file
*seq
,
1678 const struct request_sock
*req
, int i
)
1680 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1681 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1682 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1688 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1689 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1691 src
->s6_addr32
[0], src
->s6_addr32
[1],
1692 src
->s6_addr32
[2], src
->s6_addr32
[3],
1693 inet_rsk(req
)->ir_num
,
1694 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1695 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1696 ntohs(inet_rsk(req
)->ir_rmt_port
),
1698 0, 0, /* could print option size, but that is af dependent. */
1699 1, /* timers active (only the expire timer) */
1700 jiffies_to_clock_t(ttd
),
1702 from_kuid_munged(seq_user_ns(seq
),
1703 sock_i_uid(req
->rsk_listener
)),
1704 0, /* non standard timer */
1705 0, /* open_requests have no inode */
1709 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1711 const struct in6_addr
*dest
, *src
;
1714 unsigned long timer_expires
;
1715 const struct inet_sock
*inet
= inet_sk(sp
);
1716 const struct tcp_sock
*tp
= tcp_sk(sp
);
1717 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1718 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1722 dest
= &sp
->sk_v6_daddr
;
1723 src
= &sp
->sk_v6_rcv_saddr
;
1724 destp
= ntohs(inet
->inet_dport
);
1725 srcp
= ntohs(inet
->inet_sport
);
1727 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
1728 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
1729 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
1731 timer_expires
= icsk
->icsk_timeout
;
1732 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1734 timer_expires
= icsk
->icsk_timeout
;
1735 } else if (timer_pending(&sp
->sk_timer
)) {
1737 timer_expires
= sp
->sk_timer
.expires
;
1740 timer_expires
= jiffies
;
1743 state
= sk_state_load(sp
);
1744 if (state
== TCP_LISTEN
)
1745 rx_queue
= sp
->sk_ack_backlog
;
1747 /* Because we don't lock the socket,
1748 * we might find a transient negative value.
1750 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1753 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1754 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1756 src
->s6_addr32
[0], src
->s6_addr32
[1],
1757 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1758 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1759 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1761 tp
->write_seq
- tp
->snd_una
,
1764 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1765 icsk
->icsk_retransmits
,
1766 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1767 icsk
->icsk_probes_out
,
1769 atomic_read(&sp
->sk_refcnt
), sp
,
1770 jiffies_to_clock_t(icsk
->icsk_rto
),
1771 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1772 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1774 state
== TCP_LISTEN
?
1775 fastopenq
->max_qlen
:
1776 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1780 static void get_timewait6_sock(struct seq_file
*seq
,
1781 struct inet_timewait_sock
*tw
, int i
)
1783 long delta
= tw
->tw_timer
.expires
- jiffies
;
1784 const struct in6_addr
*dest
, *src
;
1787 dest
= &tw
->tw_v6_daddr
;
1788 src
= &tw
->tw_v6_rcv_saddr
;
1789 destp
= ntohs(tw
->tw_dport
);
1790 srcp
= ntohs(tw
->tw_sport
);
1793 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1794 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1796 src
->s6_addr32
[0], src
->s6_addr32
[1],
1797 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1798 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1799 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1800 tw
->tw_substate
, 0, 0,
1801 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1802 atomic_read(&tw
->tw_refcnt
), tw
);
1805 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1807 struct tcp_iter_state
*st
;
1808 struct sock
*sk
= v
;
1810 if (v
== SEQ_START_TOKEN
) {
1815 "st tx_queue rx_queue tr tm->when retrnsmt"
1816 " uid timeout inode\n");
1821 if (sk
->sk_state
== TCP_TIME_WAIT
)
1822 get_timewait6_sock(seq
, v
, st
->num
);
1823 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1824 get_openreq6(seq
, v
, st
->num
);
1826 get_tcp6_sock(seq
, v
, st
->num
);
1831 static const struct file_operations tcp6_afinfo_seq_fops
= {
1832 .owner
= THIS_MODULE
,
1833 .open
= tcp_seq_open
,
1835 .llseek
= seq_lseek
,
1836 .release
= seq_release_net
1839 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1842 .seq_fops
= &tcp6_afinfo_seq_fops
,
1844 .show
= tcp6_seq_show
,
1848 int __net_init
tcp6_proc_init(struct net
*net
)
1850 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1853 void tcp6_proc_exit(struct net
*net
)
1855 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1859 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1861 struct inet_sock
*inet
= inet_sk(sk
);
1863 /* we do not want to clear pinet6 field, because of RCU lookups */
1864 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1866 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1867 memset(&inet
->pinet6
+ 1, 0, size
);
1870 struct proto tcpv6_prot
= {
1872 .owner
= THIS_MODULE
,
1874 .connect
= tcp_v6_connect
,
1875 .disconnect
= tcp_disconnect
,
1876 .accept
= inet_csk_accept
,
1878 .init
= tcp_v6_init_sock
,
1879 .destroy
= tcp_v6_destroy_sock
,
1880 .shutdown
= tcp_shutdown
,
1881 .setsockopt
= tcp_setsockopt
,
1882 .getsockopt
= tcp_getsockopt
,
1883 .recvmsg
= tcp_recvmsg
,
1884 .sendmsg
= tcp_sendmsg
,
1885 .sendpage
= tcp_sendpage
,
1886 .backlog_rcv
= tcp_v6_do_rcv
,
1887 .release_cb
= tcp_release_cb
,
1889 .unhash
= inet_unhash
,
1890 .get_port
= inet_csk_get_port
,
1891 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1892 .stream_memory_free
= tcp_stream_memory_free
,
1893 .sockets_allocated
= &tcp_sockets_allocated
,
1894 .memory_allocated
= &tcp_memory_allocated
,
1895 .memory_pressure
= &tcp_memory_pressure
,
1896 .orphan_count
= &tcp_orphan_count
,
1897 .sysctl_mem
= sysctl_tcp_mem
,
1898 .sysctl_wmem
= sysctl_tcp_wmem
,
1899 .sysctl_rmem
= sysctl_tcp_rmem
,
1900 .max_header
= MAX_TCP_HEADER
,
1901 .obj_size
= sizeof(struct tcp6_sock
),
1902 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1903 .twsk_prot
= &tcp6_timewait_sock_ops
,
1904 .rsk_prot
= &tcp6_request_sock_ops
,
1905 .h
.hashinfo
= &tcp_hashinfo
,
1906 .no_autobind
= true,
1907 #ifdef CONFIG_COMPAT
1908 .compat_setsockopt
= compat_tcp_setsockopt
,
1909 .compat_getsockopt
= compat_tcp_getsockopt
,
1911 .clear_sk
= tcp_v6_clear_sk
,
1912 .diag_destroy
= tcp_abort
,
1915 static const struct inet6_protocol tcpv6_protocol
= {
1916 .early_demux
= tcp_v6_early_demux
,
1917 .handler
= tcp_v6_rcv
,
1918 .err_handler
= tcp_v6_err
,
1919 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1922 static struct inet_protosw tcpv6_protosw
= {
1923 .type
= SOCK_STREAM
,
1924 .protocol
= IPPROTO_TCP
,
1925 .prot
= &tcpv6_prot
,
1926 .ops
= &inet6_stream_ops
,
1927 .flags
= INET_PROTOSW_PERMANENT
|
1931 static int __net_init
tcpv6_net_init(struct net
*net
)
1933 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1934 SOCK_RAW
, IPPROTO_TCP
, net
);
1937 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1939 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1942 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1944 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
1947 static struct pernet_operations tcpv6_net_ops
= {
1948 .init
= tcpv6_net_init
,
1949 .exit
= tcpv6_net_exit
,
1950 .exit_batch
= tcpv6_net_exit_batch
,
1953 int __init
tcpv6_init(void)
1957 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1961 /* register inet6 protocol */
1962 ret
= inet6_register_protosw(&tcpv6_protosw
);
1964 goto out_tcpv6_protocol
;
1966 ret
= register_pernet_subsys(&tcpv6_net_ops
);
1968 goto out_tcpv6_protosw
;
1973 inet6_unregister_protosw(&tcpv6_protosw
);
1975 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1979 void tcpv6_exit(void)
1981 unregister_pernet_subsys(&tcpv6_net_ops
);
1982 inet6_unregister_protosw(&tcpv6_protosw
);
1983 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);