3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static const struct inet_connection_sock_af_ops ipv6_mapped
;
79 static const struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
85 const struct in6_addr
*addr
)
91 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
95 if (dst
&& dst_hold_safe(dst
)) {
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
99 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
100 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
104 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
107 ipv6_hdr(skb
)->saddr
.s6_addr32
,
109 tcp_hdr(skb
)->source
);
112 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
115 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
116 struct inet_sock
*inet
= inet_sk(sk
);
117 struct inet_connection_sock
*icsk
= inet_csk(sk
);
118 struct ipv6_pinfo
*np
= inet6_sk(sk
);
119 struct tcp_sock
*tp
= tcp_sk(sk
);
120 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
121 struct ipv6_txoptions
*opt
;
123 struct dst_entry
*dst
;
127 if (addr_len
< SIN6_LEN_RFC2133
)
130 if (usin
->sin6_family
!= AF_INET6
)
131 return -EAFNOSUPPORT
;
133 memset(&fl6
, 0, sizeof(fl6
));
136 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
137 IP6_ECN_flow_init(fl6
.flowlabel
);
138 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
139 struct ip6_flowlabel
*flowlabel
;
140 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
143 fl6_sock_release(flowlabel
);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin
->sin6_addr
))
152 usin
->sin6_addr
.s6_addr
[15] = 0x1;
154 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
156 if (addr_type
& IPV6_ADDR_MULTICAST
)
159 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
160 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
161 usin
->sin6_scope_id
) {
162 /* If interface is set while binding, indices
165 if (sk
->sk_bound_dev_if
&&
166 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
169 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
172 /* Connect to link-local address requires an interface */
173 if (!sk
->sk_bound_dev_if
)
177 if (tp
->rx_opt
.ts_recent_stamp
&&
178 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
179 tp
->rx_opt
.ts_recent
= 0;
180 tp
->rx_opt
.ts_recent_stamp
= 0;
184 sk
->sk_v6_daddr
= usin
->sin6_addr
;
185 np
->flow_label
= fl6
.flowlabel
;
191 if (addr_type
== IPV6_ADDR_MAPPED
) {
192 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
193 struct sockaddr_in sin
;
195 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk
))
200 sin
.sin_family
= AF_INET
;
201 sin
.sin_port
= usin
->sin6_port
;
202 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
204 icsk
->icsk_af_ops
= &ipv6_mapped
;
205 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
210 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
213 icsk
->icsk_ext_hdr_len
= exthdrlen
;
214 icsk
->icsk_af_ops
= &ipv6_specific
;
215 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp
->af_specific
= &tcp_sock_ipv6_specific
;
221 np
->saddr
= sk
->sk_v6_rcv_saddr
;
226 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
227 saddr
= &sk
->sk_v6_rcv_saddr
;
229 fl6
.flowi6_proto
= IPPROTO_TCP
;
230 fl6
.daddr
= sk
->sk_v6_daddr
;
231 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
232 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
233 fl6
.flowi6_mark
= sk
->sk_mark
;
234 fl6
.fl6_dport
= usin
->sin6_port
;
235 fl6
.fl6_sport
= inet
->inet_sport
;
237 opt
= rcu_dereference_protected(np
->opt
, sock_owned_by_user(sk
));
238 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
240 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
242 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
250 sk
->sk_v6_rcv_saddr
= *saddr
;
253 /* set the source address */
255 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
257 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
258 ip6_dst_store(sk
, dst
, NULL
, NULL
);
260 if (tcp_death_row
.sysctl_tw_recycle
&&
261 !tp
->rx_opt
.ts_recent_stamp
&&
262 ipv6_addr_equal(&fl6
.daddr
, &sk
->sk_v6_daddr
))
263 tcp_fetch_timewait_stamp(sk
, dst
);
265 icsk
->icsk_ext_hdr_len
= 0;
267 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
270 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
272 inet
->inet_dport
= usin
->sin6_port
;
274 tcp_set_state(sk
, TCP_SYN_SENT
);
275 err
= inet6_hash_connect(&tcp_death_row
, sk
);
281 if (!tp
->write_seq
&& likely(!tp
->repair
))
282 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
283 sk
->sk_v6_daddr
.s6_addr32
,
287 err
= tcp_connect(sk
);
294 tcp_set_state(sk
, TCP_CLOSE
);
297 inet
->inet_dport
= 0;
298 sk
->sk_route_caps
= 0;
302 static void tcp_v6_mtu_reduced(struct sock
*sk
)
304 struct dst_entry
*dst
;
306 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
309 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
313 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
314 tcp_sync_mss(sk
, dst_mtu(dst
));
315 tcp_simple_retransmit(sk
);
319 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
320 u8 type
, u8 code
, int offset
, __be32 info
)
322 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
323 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
324 struct net
*net
= dev_net(skb
->dev
);
325 struct request_sock
*fastopen
;
326 struct ipv6_pinfo
*np
;
333 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
334 &hdr
->daddr
, th
->dest
,
335 &hdr
->saddr
, ntohs(th
->source
),
339 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
344 if (sk
->sk_state
== TCP_TIME_WAIT
) {
345 inet_twsk_put(inet_twsk(sk
));
348 seq
= ntohl(th
->seq
);
349 fatal
= icmpv6_err_convert(type
, code
, &err
);
350 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
351 return tcp_req_err(sk
, seq
, fatal
);
354 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
355 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
357 if (sk
->sk_state
== TCP_CLOSE
)
360 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
361 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen
= tp
->fastopen_rsk
;
368 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
369 if (sk
->sk_state
!= TCP_LISTEN
&&
370 !between(seq
, snd_una
, tp
->snd_nxt
)) {
371 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
377 if (type
== NDISC_REDIRECT
) {
378 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
381 dst
->ops
->redirect(dst
, sk
, skb
);
385 if (type
== ICMPV6_PKT_TOOBIG
) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
390 if (sk
->sk_state
== TCP_LISTEN
)
393 if (!ip6_sk_accept_pmtu(sk
))
396 tp
->mtu_info
= ntohl(info
);
397 if (!sock_owned_by_user(sk
))
398 tcp_v6_mtu_reduced(sk
);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
406 /* Might be for an request_sock */
407 switch (sk
->sk_state
) {
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
413 if (fastopen
&& !fastopen
->sk
)
416 if (!sock_owned_by_user(sk
)) {
418 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
422 sk
->sk_err_soft
= err
;
426 if (!sock_owned_by_user(sk
) && np
->recverr
) {
428 sk
->sk_error_report(sk
);
430 sk
->sk_err_soft
= err
;
438 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
440 struct request_sock
*req
,
441 struct tcp_fastopen_cookie
*foc
,
444 struct inet_request_sock
*ireq
= inet_rsk(req
);
445 struct ipv6_pinfo
*np
= inet6_sk(sk
);
446 struct flowi6
*fl6
= &fl
->u
.ip6
;
450 /* First, grab a route. */
451 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
452 IPPROTO_TCP
)) == NULL
)
455 skb
= tcp_make_synack(sk
, dst
, req
, foc
, attach_req
);
458 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
459 &ireq
->ir_v6_rmt_addr
);
461 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
462 if (np
->repflow
&& ireq
->pktopts
)
463 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
466 err
= ip6_xmit(sk
, skb
, fl6
, rcu_dereference(np
->opt
),
469 err
= net_xmit_eval(err
);
477 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
479 kfree_skb(inet_rsk(req
)->pktopts
);
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
484 const struct in6_addr
*addr
)
486 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
489 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
490 const struct sock
*addr_sk
)
492 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
495 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
498 struct tcp_md5sig cmd
;
499 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
501 if (optlen
< sizeof(cmd
))
504 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
507 if (sin6
->sin6_family
!= AF_INET6
)
510 if (!cmd
.tcpm_keylen
) {
511 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
512 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
514 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
518 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
521 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
522 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
523 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
525 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
526 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
529 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
530 const struct in6_addr
*daddr
,
531 const struct in6_addr
*saddr
, int nbytes
)
533 struct tcp6_pseudohdr
*bp
;
534 struct scatterlist sg
;
536 bp
= &hp
->md5_blk
.ip6
;
537 /* 1. TCP pseudo-header (RFC2460) */
540 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
541 bp
->len
= cpu_to_be32(nbytes
);
543 sg_init_one(&sg
, bp
, sizeof(*bp
));
544 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
547 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
548 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
549 const struct tcphdr
*th
)
551 struct tcp_md5sig_pool
*hp
;
552 struct hash_desc
*desc
;
554 hp
= tcp_get_md5sig_pool();
556 goto clear_hash_noput
;
557 desc
= &hp
->md5_desc
;
559 if (crypto_hash_init(desc
))
561 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
563 if (tcp_md5_hash_header(hp
, th
))
565 if (tcp_md5_hash_key(hp
, key
))
567 if (crypto_hash_final(desc
, md5_hash
))
570 tcp_put_md5sig_pool();
574 tcp_put_md5sig_pool();
576 memset(md5_hash
, 0, 16);
580 static int tcp_v6_md5_hash_skb(char *md5_hash
,
581 const struct tcp_md5sig_key
*key
,
582 const struct sock
*sk
,
583 const struct sk_buff
*skb
)
585 const struct in6_addr
*saddr
, *daddr
;
586 struct tcp_md5sig_pool
*hp
;
587 struct hash_desc
*desc
;
588 const struct tcphdr
*th
= tcp_hdr(skb
);
590 if (sk
) { /* valid for establish/request sockets */
591 saddr
= &sk
->sk_v6_rcv_saddr
;
592 daddr
= &sk
->sk_v6_daddr
;
594 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
595 saddr
= &ip6h
->saddr
;
596 daddr
= &ip6h
->daddr
;
599 hp
= tcp_get_md5sig_pool();
601 goto clear_hash_noput
;
602 desc
= &hp
->md5_desc
;
604 if (crypto_hash_init(desc
))
607 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
609 if (tcp_md5_hash_header(hp
, th
))
611 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
613 if (tcp_md5_hash_key(hp
, key
))
615 if (crypto_hash_final(desc
, md5_hash
))
618 tcp_put_md5sig_pool();
622 tcp_put_md5sig_pool();
624 memset(md5_hash
, 0, 16);
630 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
631 const struct sk_buff
*skb
)
633 #ifdef CONFIG_TCP_MD5SIG
634 const __u8
*hash_location
= NULL
;
635 struct tcp_md5sig_key
*hash_expected
;
636 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
637 const struct tcphdr
*th
= tcp_hdr(skb
);
641 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
642 hash_location
= tcp_parse_md5sig_option(th
);
644 /* We've parsed the options - do we have a hash? */
645 if (!hash_expected
&& !hash_location
)
648 if (hash_expected
&& !hash_location
) {
649 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
653 if (!hash_expected
&& hash_location
) {
654 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
658 /* check the signature */
659 genhash
= tcp_v6_md5_hash_skb(newhash
,
663 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
664 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
665 genhash
? "failed" : "mismatch",
666 &ip6h
->saddr
, ntohs(th
->source
),
667 &ip6h
->daddr
, ntohs(th
->dest
));
674 static void tcp_v6_init_req(struct request_sock
*req
,
675 const struct sock
*sk_listener
,
678 struct inet_request_sock
*ireq
= inet_rsk(req
);
679 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
681 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
682 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
684 /* So that link locals have meaning */
685 if (!sk_listener
->sk_bound_dev_if
&&
686 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
687 ireq
->ir_iif
= tcp_v6_iif(skb
);
689 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
690 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
691 np
->rxopt
.bits
.rxinfo
||
692 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
693 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
694 atomic_inc(&skb
->users
);
699 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
701 const struct request_sock
*req
,
706 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
709 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
711 .obj_size
= sizeof(struct tcp6_request_sock
),
712 .rtx_syn_ack
= tcp_rtx_synack
,
713 .send_ack
= tcp_v6_reqsk_send_ack
,
714 .destructor
= tcp_v6_reqsk_destructor
,
715 .send_reset
= tcp_v6_send_reset
,
716 .syn_ack_timeout
= tcp_syn_ack_timeout
,
719 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
720 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
721 sizeof(struct ipv6hdr
),
722 #ifdef CONFIG_TCP_MD5SIG
723 .req_md5_lookup
= tcp_v6_md5_lookup
,
724 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
726 .init_req
= tcp_v6_init_req
,
727 #ifdef CONFIG_SYN_COOKIES
728 .cookie_init_seq
= cookie_v6_init_sequence
,
730 .route_req
= tcp_v6_route_req
,
731 .init_seq
= tcp_v6_init_sequence
,
732 .send_synack
= tcp_v6_send_synack
,
735 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
736 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
737 int oif
, struct tcp_md5sig_key
*key
, int rst
,
738 u8 tclass
, u32 label
)
740 const struct tcphdr
*th
= tcp_hdr(skb
);
742 struct sk_buff
*buff
;
744 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
745 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
746 unsigned int tot_len
= sizeof(struct tcphdr
);
747 struct dst_entry
*dst
;
751 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
752 #ifdef CONFIG_TCP_MD5SIG
754 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
757 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
762 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
764 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
765 skb_reset_transport_header(buff
);
767 /* Swap the send and the receive. */
768 memset(t1
, 0, sizeof(*t1
));
769 t1
->dest
= th
->source
;
770 t1
->source
= th
->dest
;
771 t1
->doff
= tot_len
/ 4;
772 t1
->seq
= htonl(seq
);
773 t1
->ack_seq
= htonl(ack
);
774 t1
->ack
= !rst
|| !th
->ack
;
776 t1
->window
= htons(win
);
778 topt
= (__be32
*)(t1
+ 1);
781 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
782 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
783 *topt
++ = htonl(tsval
);
784 *topt
++ = htonl(tsecr
);
787 #ifdef CONFIG_TCP_MD5SIG
789 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
790 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
791 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
792 &ipv6_hdr(skb
)->saddr
,
793 &ipv6_hdr(skb
)->daddr
, t1
);
797 memset(&fl6
, 0, sizeof(fl6
));
798 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
799 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
800 fl6
.flowlabel
= label
;
802 buff
->ip_summed
= CHECKSUM_PARTIAL
;
805 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
807 fl6
.flowi6_proto
= IPPROTO_TCP
;
808 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
809 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
811 fl6
.flowi6_oif
= oif
;
812 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
813 fl6
.fl6_dport
= t1
->dest
;
814 fl6
.fl6_sport
= t1
->source
;
815 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
817 /* Pass a socket to ip6_dst_lookup either it is for RST
818 * Underlying function will use this to retrieve the network
821 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
823 skb_dst_set(buff
, dst
);
824 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
825 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
827 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
834 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
836 const struct tcphdr
*th
= tcp_hdr(skb
);
837 u32 seq
= 0, ack_seq
= 0;
838 struct tcp_md5sig_key
*key
= NULL
;
839 #ifdef CONFIG_TCP_MD5SIG
840 const __u8
*hash_location
= NULL
;
841 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
842 unsigned char newhash
[16];
844 struct sock
*sk1
= NULL
;
851 /* If sk not NULL, it means we did a successful lookup and incoming
852 * route had to be correct. prequeue might have dropped our dst.
854 if (!sk
&& !ipv6_unicast_destination(skb
))
857 #ifdef CONFIG_TCP_MD5SIG
858 hash_location
= tcp_parse_md5sig_option(th
);
859 if (sk
&& sk_fullsock(sk
)) {
860 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
861 } else if (hash_location
) {
863 * active side is lost. Try to find listening socket through
864 * source port, and then find md5 key through listening socket.
865 * we are not loose security here:
866 * Incoming packet is checked with md5 hash with finding key,
867 * no RST generated if md5 hash doesn't match.
869 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
870 &tcp_hashinfo
, &ipv6h
->saddr
,
871 th
->source
, &ipv6h
->daddr
,
872 ntohs(th
->source
), tcp_v6_iif(skb
));
877 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
881 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
882 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
888 seq
= ntohl(th
->ack_seq
);
890 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
893 oif
= sk
? sk
->sk_bound_dev_if
: 0;
894 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
896 #ifdef CONFIG_TCP_MD5SIG
905 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
906 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
907 struct tcp_md5sig_key
*key
, u8 tclass
,
910 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
914 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
916 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
917 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
919 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
920 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
921 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
922 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
923 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
928 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
929 struct request_sock
*req
)
931 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
932 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
934 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
935 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
936 tcp_rsk(req
)->rcv_nxt
, req
->rsk_rcv_wnd
,
937 tcp_time_stamp
, req
->ts_recent
, sk
->sk_bound_dev_if
,
938 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
943 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
945 #ifdef CONFIG_SYN_COOKIES
946 const struct tcphdr
*th
= tcp_hdr(skb
);
949 sk
= cookie_v6_check(sk
, skb
);
954 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
956 if (skb
->protocol
== htons(ETH_P_IP
))
957 return tcp_v4_conn_request(sk
, skb
);
959 if (!ipv6_unicast_destination(skb
))
962 return tcp_conn_request(&tcp6_request_sock_ops
,
963 &tcp_request_sock_ipv6_ops
, sk
, skb
);
966 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
967 return 0; /* don't send reset */
970 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
971 struct request_sock
*req
,
972 struct dst_entry
*dst
,
973 struct request_sock
*req_unhash
,
976 struct inet_request_sock
*ireq
;
977 struct ipv6_pinfo
*newnp
;
978 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
979 struct ipv6_txoptions
*opt
;
980 struct tcp6_sock
*newtcp6sk
;
981 struct inet_sock
*newinet
;
982 struct tcp_sock
*newtp
;
984 #ifdef CONFIG_TCP_MD5SIG
985 struct tcp_md5sig_key
*key
;
989 if (skb
->protocol
== htons(ETH_P_IP
)) {
994 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
995 req_unhash
, own_req
);
1000 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1001 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1003 newinet
= inet_sk(newsk
);
1004 newnp
= inet6_sk(newsk
);
1005 newtp
= tcp_sk(newsk
);
1007 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1009 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1011 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1012 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1013 #ifdef CONFIG_TCP_MD5SIG
1014 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1017 newnp
->ipv6_ac_list
= NULL
;
1018 newnp
->ipv6_fl_list
= NULL
;
1019 newnp
->pktoptions
= NULL
;
1021 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1022 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1023 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1025 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1028 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1029 * here, tcp_create_openreq_child now does this for us, see the comment in
1030 * that function for the gory details. -acme
1033 /* It is tricky place. Until this moment IPv4 tcp
1034 worked with IPv6 icsk.icsk_af_ops.
1037 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1042 ireq
= inet_rsk(req
);
1044 if (sk_acceptq_is_full(sk
))
1048 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1053 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1058 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1059 * count here, tcp_create_openreq_child now does this for us, see the
1060 * comment in that function for the gory details. -acme
1063 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1064 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1065 inet6_sk_rx_dst_set(newsk
, skb
);
1067 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1068 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1070 newtp
= tcp_sk(newsk
);
1071 newinet
= inet_sk(newsk
);
1072 newnp
= inet6_sk(newsk
);
1074 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1076 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1077 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1078 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1079 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1081 /* Now IPv6 options...
1083 First: no IPv4 options.
1085 newinet
->inet_opt
= NULL
;
1086 newnp
->ipv6_ac_list
= NULL
;
1087 newnp
->ipv6_fl_list
= NULL
;
1090 newnp
->rxopt
.all
= np
->rxopt
.all
;
1092 newnp
->pktoptions
= NULL
;
1094 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1095 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1096 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1098 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1100 /* Clone native IPv6 options from listening socket (if any)
1102 Yes, keeping reference count would be much more clever,
1103 but we make one more one thing there: reattach optmem
1106 opt
= rcu_dereference(np
->opt
);
1108 opt
= ipv6_dup_options(newsk
, opt
);
1109 RCU_INIT_POINTER(newnp
->opt
, opt
);
1111 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1113 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1116 tcp_ca_openreq_child(newsk
, dst
);
1118 tcp_sync_mss(newsk
, dst_mtu(dst
));
1119 newtp
->advmss
= dst_metric_advmss(dst
);
1120 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1121 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1122 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1124 tcp_initialize_rcv_mss(newsk
);
1126 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1127 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1129 #ifdef CONFIG_TCP_MD5SIG
1130 /* Copy over the MD5 key from the original socket */
1131 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1133 /* We're using one, so create a matching key
1134 * on the newsk structure. If we fail to get
1135 * memory, then we end up not copying the key
1138 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1139 AF_INET6
, key
->key
, key
->keylen
,
1140 sk_gfp_mask(sk
, GFP_ATOMIC
));
1144 if (__inet_inherit_port(sk
, newsk
) < 0) {
1145 inet_csk_prepare_forced_close(newsk
);
1149 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1151 tcp_move_syn(newtp
, req
);
1153 /* Clone pktoptions received with SYN, if we own the req */
1154 if (ireq
->pktopts
) {
1155 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1156 sk_gfp_mask(sk
, GFP_ATOMIC
));
1157 consume_skb(ireq
->pktopts
);
1158 ireq
->pktopts
= NULL
;
1159 if (newnp
->pktoptions
)
1160 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1167 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1171 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1175 /* The socket must have it's spinlock held when we get
1176 * here, unless it is a TCP_LISTEN socket.
1178 * We have a potential double-lock case here, so even when
1179 * doing backlog processing we use the BH locking scheme.
1180 * This is because we cannot sleep with the original spinlock
1183 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1185 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1186 struct tcp_sock
*tp
;
1187 struct sk_buff
*opt_skb
= NULL
;
1189 /* Imagine: socket is IPv6. IPv4 packet arrives,
1190 goes to IPv4 receive handler and backlogged.
1191 From backlog it always goes here. Kerboom...
1192 Fortunately, tcp_rcv_established and rcv_established
1193 handle them correctly, but it is not case with
1194 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1197 if (skb
->protocol
== htons(ETH_P_IP
))
1198 return tcp_v4_do_rcv(sk
, skb
);
1200 if (sk_filter(sk
, skb
))
1204 * socket locking is here for SMP purposes as backlog rcv
1205 * is currently called with bh processing disabled.
1208 /* Do Stevens' IPV6_PKTOPTIONS.
1210 Yes, guys, it is the only place in our code, where we
1211 may make it not affecting IPv4.
1212 The rest of code is protocol independent,
1213 and I do not like idea to uglify IPv4.
1215 Actually, all the idea behind IPV6_PKTOPTIONS
1216 looks not very well thought. For now we latch
1217 options, received in the last packet, enqueued
1218 by tcp. Feel free to propose better solution.
1222 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1224 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1225 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1227 sock_rps_save_rxhash(sk
, skb
);
1228 sk_mark_napi_id(sk
, skb
);
1230 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1231 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1233 sk
->sk_rx_dst
= NULL
;
1237 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1239 goto ipv6_pktoptions
;
1243 if (tcp_checksum_complete(skb
))
1246 if (sk
->sk_state
== TCP_LISTEN
) {
1247 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1253 sock_rps_save_rxhash(nsk
, skb
);
1254 sk_mark_napi_id(nsk
, skb
);
1255 if (tcp_child_process(sk
, nsk
, skb
))
1258 __kfree_skb(opt_skb
);
1262 sock_rps_save_rxhash(sk
, skb
);
1264 if (tcp_rcv_state_process(sk
, skb
))
1267 goto ipv6_pktoptions
;
1271 tcp_v6_send_reset(sk
, skb
);
1274 __kfree_skb(opt_skb
);
1278 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1279 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1284 /* Do you ask, what is it?
1286 1. skb was enqueued by tcp.
1287 2. skb is added to tail of read queue, rather than out of order.
1288 3. socket is not in passive state.
1289 4. Finally, it really contains options, which user wants to receive.
1292 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1293 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1294 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1295 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1296 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1297 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1298 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1299 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1301 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1302 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1303 skb_set_owner_r(opt_skb
, sk
);
1304 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1306 __kfree_skb(opt_skb
);
1307 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1315 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1316 const struct tcphdr
*th
)
1318 /* This is tricky: we move IP6CB at its correct location into
1319 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1320 * _decode_session6() uses IP6CB().
1321 * barrier() makes sure compiler won't play aliasing games.
1323 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1324 sizeof(struct inet6_skb_parm
));
1327 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1328 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1329 skb
->len
- th
->doff
*4);
1330 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1331 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1332 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1333 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1334 TCP_SKB_CB(skb
)->sacked
= 0;
1337 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1339 /* We need to move header back to the beginning if xfrm6_policy_check()
1340 * and tcp_v6_fill_cb() are going to be called again.
1342 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1343 sizeof(struct inet6_skb_parm
));
1346 static int tcp_v6_rcv(struct sk_buff
*skb
)
1348 const struct tcphdr
*th
;
1349 const struct ipv6hdr
*hdr
;
1352 struct net
*net
= dev_net(skb
->dev
);
1354 if (skb
->pkt_type
!= PACKET_HOST
)
1358 * Count it even if it's bad.
1360 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1362 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1367 if (th
->doff
< sizeof(struct tcphdr
)/4)
1369 if (!pskb_may_pull(skb
, th
->doff
*4))
1372 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1376 hdr
= ipv6_hdr(skb
);
1379 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
,
1385 if (sk
->sk_state
== TCP_TIME_WAIT
)
1388 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1389 struct request_sock
*req
= inet_reqsk(sk
);
1390 struct sock
*nsk
= NULL
;
1392 sk
= req
->rsk_listener
;
1393 tcp_v6_fill_cb(skb
, hdr
, th
);
1394 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1398 if (likely(sk
->sk_state
== TCP_LISTEN
)) {
1399 nsk
= tcp_check_req(sk
, skb
, req
, false);
1401 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1411 tcp_v6_restore_cb(skb
);
1412 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1413 tcp_v6_send_reset(nsk
, skb
);
1419 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1420 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1421 goto discard_and_relse
;
1424 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1425 goto discard_and_relse
;
1427 tcp_v6_fill_cb(skb
, hdr
, th
);
1429 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1430 goto discard_and_relse
;
1432 if (sk_filter(sk
, skb
))
1433 goto discard_and_relse
;
1437 if (sk
->sk_state
== TCP_LISTEN
) {
1438 ret
= tcp_v6_do_rcv(sk
, skb
);
1439 goto put_and_return
;
1442 sk_incoming_cpu_update(sk
);
1444 bh_lock_sock_nested(sk
);
1445 tcp_sk(sk
)->segs_in
+= max_t(u16
, 1, skb_shinfo(skb
)->gso_segs
);
1447 if (!sock_owned_by_user(sk
)) {
1448 if (!tcp_prequeue(sk
, skb
))
1449 ret
= tcp_v6_do_rcv(sk
, skb
);
1450 } else if (unlikely(sk_add_backlog(sk
, skb
,
1451 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1453 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1454 goto discard_and_relse
;
1460 return ret
? -1 : 0;
1463 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1466 tcp_v6_fill_cb(skb
, hdr
, th
);
1468 if (tcp_checksum_complete(skb
)) {
1470 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1472 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1474 tcp_v6_send_reset(NULL
, skb
);
1486 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1487 inet_twsk_put(inet_twsk(sk
));
1491 tcp_v6_fill_cb(skb
, hdr
, th
);
1493 if (tcp_checksum_complete(skb
)) {
1494 inet_twsk_put(inet_twsk(sk
));
1498 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1503 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1504 &ipv6_hdr(skb
)->saddr
, th
->source
,
1505 &ipv6_hdr(skb
)->daddr
,
1506 ntohs(th
->dest
), tcp_v6_iif(skb
));
1508 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1509 inet_twsk_deschedule_put(tw
);
1511 tcp_v6_restore_cb(skb
);
1514 /* Fall through to ACK */
1517 tcp_v6_timewait_ack(sk
, skb
);
1520 tcp_v6_restore_cb(skb
);
1521 tcp_v6_send_reset(sk
, skb
);
1522 inet_twsk_deschedule_put(inet_twsk(sk
));
1524 case TCP_TW_SUCCESS
:
1530 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1532 const struct ipv6hdr
*hdr
;
1533 const struct tcphdr
*th
;
1536 if (skb
->pkt_type
!= PACKET_HOST
)
1539 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1542 hdr
= ipv6_hdr(skb
);
1545 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1548 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1549 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1550 &hdr
->saddr
, th
->source
,
1551 &hdr
->daddr
, ntohs(th
->dest
),
1555 skb
->destructor
= sock_edemux
;
1556 if (sk_fullsock(sk
)) {
1557 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1560 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1562 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1563 skb_dst_set_noref(skb
, dst
);
1568 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1569 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1570 .twsk_unique
= tcp_twsk_unique
,
1571 .twsk_destructor
= tcp_twsk_destructor
,
1574 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1575 .queue_xmit
= inet6_csk_xmit
,
1576 .send_check
= tcp_v6_send_check
,
1577 .rebuild_header
= inet6_sk_rebuild_header
,
1578 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1579 .conn_request
= tcp_v6_conn_request
,
1580 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1581 .net_header_len
= sizeof(struct ipv6hdr
),
1582 .net_frag_header_len
= sizeof(struct frag_hdr
),
1583 .setsockopt
= ipv6_setsockopt
,
1584 .getsockopt
= ipv6_getsockopt
,
1585 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1586 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1587 .bind_conflict
= inet6_csk_bind_conflict
,
1588 #ifdef CONFIG_COMPAT
1589 .compat_setsockopt
= compat_ipv6_setsockopt
,
1590 .compat_getsockopt
= compat_ipv6_getsockopt
,
1592 .mtu_reduced
= tcp_v6_mtu_reduced
,
1595 #ifdef CONFIG_TCP_MD5SIG
1596 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1597 .md5_lookup
= tcp_v6_md5_lookup
,
1598 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1599 .md5_parse
= tcp_v6_parse_md5_keys
,
1604 * TCP over IPv4 via INET6 API
1606 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1607 .queue_xmit
= ip_queue_xmit
,
1608 .send_check
= tcp_v4_send_check
,
1609 .rebuild_header
= inet_sk_rebuild_header
,
1610 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1611 .conn_request
= tcp_v6_conn_request
,
1612 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1613 .net_header_len
= sizeof(struct iphdr
),
1614 .setsockopt
= ipv6_setsockopt
,
1615 .getsockopt
= ipv6_getsockopt
,
1616 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1617 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1618 .bind_conflict
= inet6_csk_bind_conflict
,
1619 #ifdef CONFIG_COMPAT
1620 .compat_setsockopt
= compat_ipv6_setsockopt
,
1621 .compat_getsockopt
= compat_ipv6_getsockopt
,
1623 .mtu_reduced
= tcp_v4_mtu_reduced
,
1626 #ifdef CONFIG_TCP_MD5SIG
1627 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1628 .md5_lookup
= tcp_v4_md5_lookup
,
1629 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1630 .md5_parse
= tcp_v6_parse_md5_keys
,
1634 /* NOTE: A lot of things set to zero explicitly by call to
1635 * sk_alloc() so need not be done here.
1637 static int tcp_v6_init_sock(struct sock
*sk
)
1639 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1643 icsk
->icsk_af_ops
= &ipv6_specific
;
1645 #ifdef CONFIG_TCP_MD5SIG
1646 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1652 static void tcp_v6_destroy_sock(struct sock
*sk
)
1654 tcp_v4_destroy_sock(sk
);
1655 inet6_destroy_sock(sk
);
1658 #ifdef CONFIG_PROC_FS
1659 /* Proc filesystem TCPv6 sock list dumping. */
1660 static void get_openreq6(struct seq_file
*seq
,
1661 const struct request_sock
*req
, int i
)
1663 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1664 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1665 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1671 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1672 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1674 src
->s6_addr32
[0], src
->s6_addr32
[1],
1675 src
->s6_addr32
[2], src
->s6_addr32
[3],
1676 inet_rsk(req
)->ir_num
,
1677 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1678 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1679 ntohs(inet_rsk(req
)->ir_rmt_port
),
1681 0, 0, /* could print option size, but that is af dependent. */
1682 1, /* timers active (only the expire timer) */
1683 jiffies_to_clock_t(ttd
),
1685 from_kuid_munged(seq_user_ns(seq
),
1686 sock_i_uid(req
->rsk_listener
)),
1687 0, /* non standard timer */
1688 0, /* open_requests have no inode */
1692 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1694 const struct in6_addr
*dest
, *src
;
1697 unsigned long timer_expires
;
1698 const struct inet_sock
*inet
= inet_sk(sp
);
1699 const struct tcp_sock
*tp
= tcp_sk(sp
);
1700 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1701 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1705 dest
= &sp
->sk_v6_daddr
;
1706 src
= &sp
->sk_v6_rcv_saddr
;
1707 destp
= ntohs(inet
->inet_dport
);
1708 srcp
= ntohs(inet
->inet_sport
);
1710 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1712 timer_expires
= icsk
->icsk_timeout
;
1713 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1715 timer_expires
= icsk
->icsk_timeout
;
1716 } else if (timer_pending(&sp
->sk_timer
)) {
1718 timer_expires
= sp
->sk_timer
.expires
;
1721 timer_expires
= jiffies
;
1724 state
= sk_state_load(sp
);
1725 if (state
== TCP_LISTEN
)
1726 rx_queue
= sp
->sk_ack_backlog
;
1728 /* Because we don't lock the socket,
1729 * we might find a transient negative value.
1731 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1734 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1735 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1737 src
->s6_addr32
[0], src
->s6_addr32
[1],
1738 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1739 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1740 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1742 tp
->write_seq
- tp
->snd_una
,
1745 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1746 icsk
->icsk_retransmits
,
1747 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1748 icsk
->icsk_probes_out
,
1750 atomic_read(&sp
->sk_refcnt
), sp
,
1751 jiffies_to_clock_t(icsk
->icsk_rto
),
1752 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1753 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1755 state
== TCP_LISTEN
?
1756 fastopenq
->max_qlen
:
1757 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1761 static void get_timewait6_sock(struct seq_file
*seq
,
1762 struct inet_timewait_sock
*tw
, int i
)
1764 long delta
= tw
->tw_timer
.expires
- jiffies
;
1765 const struct in6_addr
*dest
, *src
;
1768 dest
= &tw
->tw_v6_daddr
;
1769 src
= &tw
->tw_v6_rcv_saddr
;
1770 destp
= ntohs(tw
->tw_dport
);
1771 srcp
= ntohs(tw
->tw_sport
);
1774 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1775 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1777 src
->s6_addr32
[0], src
->s6_addr32
[1],
1778 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1779 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1780 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1781 tw
->tw_substate
, 0, 0,
1782 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1783 atomic_read(&tw
->tw_refcnt
), tw
);
1786 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1788 struct tcp_iter_state
*st
;
1789 struct sock
*sk
= v
;
1791 if (v
== SEQ_START_TOKEN
) {
1796 "st tx_queue rx_queue tr tm->when retrnsmt"
1797 " uid timeout inode\n");
1802 if (sk
->sk_state
== TCP_TIME_WAIT
)
1803 get_timewait6_sock(seq
, v
, st
->num
);
1804 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1805 get_openreq6(seq
, v
, st
->num
);
1807 get_tcp6_sock(seq
, v
, st
->num
);
1812 static const struct file_operations tcp6_afinfo_seq_fops
= {
1813 .owner
= THIS_MODULE
,
1814 .open
= tcp_seq_open
,
1816 .llseek
= seq_lseek
,
1817 .release
= seq_release_net
1820 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1823 .seq_fops
= &tcp6_afinfo_seq_fops
,
1825 .show
= tcp6_seq_show
,
1829 int __net_init
tcp6_proc_init(struct net
*net
)
1831 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1834 void tcp6_proc_exit(struct net
*net
)
1836 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1840 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1842 struct inet_sock
*inet
= inet_sk(sk
);
1844 /* we do not want to clear pinet6 field, because of RCU lookups */
1845 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1847 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1848 memset(&inet
->pinet6
+ 1, 0, size
);
1851 struct proto tcpv6_prot
= {
1853 .owner
= THIS_MODULE
,
1855 .connect
= tcp_v6_connect
,
1856 .disconnect
= tcp_disconnect
,
1857 .accept
= inet_csk_accept
,
1859 .init
= tcp_v6_init_sock
,
1860 .destroy
= tcp_v6_destroy_sock
,
1861 .shutdown
= tcp_shutdown
,
1862 .setsockopt
= tcp_setsockopt
,
1863 .getsockopt
= tcp_getsockopt
,
1864 .recvmsg
= tcp_recvmsg
,
1865 .sendmsg
= tcp_sendmsg
,
1866 .sendpage
= tcp_sendpage
,
1867 .backlog_rcv
= tcp_v6_do_rcv
,
1868 .release_cb
= tcp_release_cb
,
1870 .unhash
= inet_unhash
,
1871 .get_port
= inet_csk_get_port
,
1872 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1873 .stream_memory_free
= tcp_stream_memory_free
,
1874 .sockets_allocated
= &tcp_sockets_allocated
,
1875 .memory_allocated
= &tcp_memory_allocated
,
1876 .memory_pressure
= &tcp_memory_pressure
,
1877 .orphan_count
= &tcp_orphan_count
,
1878 .sysctl_mem
= sysctl_tcp_mem
,
1879 .sysctl_wmem
= sysctl_tcp_wmem
,
1880 .sysctl_rmem
= sysctl_tcp_rmem
,
1881 .max_header
= MAX_TCP_HEADER
,
1882 .obj_size
= sizeof(struct tcp6_sock
),
1883 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1884 .twsk_prot
= &tcp6_timewait_sock_ops
,
1885 .rsk_prot
= &tcp6_request_sock_ops
,
1886 .h
.hashinfo
= &tcp_hashinfo
,
1887 .no_autobind
= true,
1888 #ifdef CONFIG_COMPAT
1889 .compat_setsockopt
= compat_tcp_setsockopt
,
1890 .compat_getsockopt
= compat_tcp_getsockopt
,
1892 .clear_sk
= tcp_v6_clear_sk
,
1893 .diag_destroy
= tcp_abort
,
1896 static const struct inet6_protocol tcpv6_protocol
= {
1897 .early_demux
= tcp_v6_early_demux
,
1898 .handler
= tcp_v6_rcv
,
1899 .err_handler
= tcp_v6_err
,
1900 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1903 static struct inet_protosw tcpv6_protosw
= {
1904 .type
= SOCK_STREAM
,
1905 .protocol
= IPPROTO_TCP
,
1906 .prot
= &tcpv6_prot
,
1907 .ops
= &inet6_stream_ops
,
1908 .flags
= INET_PROTOSW_PERMANENT
|
1912 static int __net_init
tcpv6_net_init(struct net
*net
)
1914 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1915 SOCK_RAW
, IPPROTO_TCP
, net
);
1918 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1920 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1923 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1925 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
1928 static struct pernet_operations tcpv6_net_ops
= {
1929 .init
= tcpv6_net_init
,
1930 .exit
= tcpv6_net_exit
,
1931 .exit_batch
= tcpv6_net_exit_batch
,
1934 int __init
tcpv6_init(void)
1938 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1942 /* register inet6 protocol */
1943 ret
= inet6_register_protosw(&tcpv6_protosw
);
1945 goto out_tcpv6_protocol
;
1947 ret
= register_pernet_subsys(&tcpv6_net_ops
);
1949 goto out_tcpv6_protosw
;
1954 inet6_unregister_protosw(&tcpv6_protosw
);
1956 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1960 void tcpv6_exit(void)
1962 unregister_pernet_subsys(&tcpv6_net_ops
);
1963 inet6_unregister_protosw(&tcpv6_protosw
);
1964 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);