3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
68 #include <asm/uaccess.h>
70 #include <linux/proc_fs.h>
71 #include <linux/seq_file.h>
73 #include <linux/crypto.h>
74 #include <linux/scatterlist.h>
76 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
77 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
78 struct request_sock
*req
);
80 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
82 static const struct inet_connection_sock_af_ops ipv6_mapped
;
83 static const struct inet_connection_sock_af_ops ipv6_specific
;
84 #ifdef CONFIG_TCP_MD5SIG
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
88 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
89 const struct in6_addr
*addr
)
95 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
97 struct dst_entry
*dst
= skb_dst(skb
);
98 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
102 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
104 inet6_sk(sk
)->rx_dst_cookie
= rt
->rt6i_node
->fn_sernum
;
107 static void tcp_v6_hash(struct sock
*sk
)
109 if (sk
->sk_state
!= TCP_CLOSE
) {
110 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
115 __inet6_hash(sk
, NULL
);
120 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
123 ipv6_hdr(skb
)->saddr
.s6_addr32
,
125 tcp_hdr(skb
)->source
);
128 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
131 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
132 struct inet_sock
*inet
= inet_sk(sk
);
133 struct inet_connection_sock
*icsk
= inet_csk(sk
);
134 struct ipv6_pinfo
*np
= inet6_sk(sk
);
135 struct tcp_sock
*tp
= tcp_sk(sk
);
136 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
139 struct dst_entry
*dst
;
143 if (addr_len
< SIN6_LEN_RFC2133
)
146 if (usin
->sin6_family
!= AF_INET6
)
147 return -EAFNOSUPPORT
;
149 memset(&fl6
, 0, sizeof(fl6
));
152 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
153 IP6_ECN_flow_init(fl6
.flowlabel
);
154 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
155 struct ip6_flowlabel
*flowlabel
;
156 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
157 if (flowlabel
== NULL
)
159 usin
->sin6_addr
= flowlabel
->dst
;
160 fl6_sock_release(flowlabel
);
165 * connect() to INADDR_ANY means loopback (BSD'ism).
168 if(ipv6_addr_any(&usin
->sin6_addr
))
169 usin
->sin6_addr
.s6_addr
[15] = 0x1;
171 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
173 if(addr_type
& IPV6_ADDR_MULTICAST
)
176 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
177 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
178 usin
->sin6_scope_id
) {
179 /* If interface is set while binding, indices
182 if (sk
->sk_bound_dev_if
&&
183 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
186 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
189 /* Connect to link-local address requires an interface */
190 if (!sk
->sk_bound_dev_if
)
194 if (tp
->rx_opt
.ts_recent_stamp
&&
195 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
196 tp
->rx_opt
.ts_recent
= 0;
197 tp
->rx_opt
.ts_recent_stamp
= 0;
201 sk
->sk_v6_daddr
= usin
->sin6_addr
;
202 np
->flow_label
= fl6
.flowlabel
;
208 if (addr_type
== IPV6_ADDR_MAPPED
) {
209 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
210 struct sockaddr_in sin
;
212 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
214 if (__ipv6_only_sock(sk
))
217 sin
.sin_family
= AF_INET
;
218 sin
.sin_port
= usin
->sin6_port
;
219 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
221 icsk
->icsk_af_ops
= &ipv6_mapped
;
222 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
223 #ifdef CONFIG_TCP_MD5SIG
224 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
227 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
230 icsk
->icsk_ext_hdr_len
= exthdrlen
;
231 icsk
->icsk_af_ops
= &ipv6_specific
;
232 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
233 #ifdef CONFIG_TCP_MD5SIG
234 tp
->af_specific
= &tcp_sock_ipv6_specific
;
238 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
239 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
240 &sk
->sk_v6_rcv_saddr
);
246 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
247 saddr
= &sk
->sk_v6_rcv_saddr
;
249 fl6
.flowi6_proto
= IPPROTO_TCP
;
250 fl6
.daddr
= sk
->sk_v6_daddr
;
251 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
252 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
253 fl6
.flowi6_mark
= sk
->sk_mark
;
254 fl6
.fl6_dport
= usin
->sin6_port
;
255 fl6
.fl6_sport
= inet
->inet_sport
;
257 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
259 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
261 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
269 sk
->sk_v6_rcv_saddr
= *saddr
;
272 /* set the source address */
274 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
276 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
277 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
279 rt
= (struct rt6_info
*) dst
;
280 if (tcp_death_row
.sysctl_tw_recycle
&&
281 !tp
->rx_opt
.ts_recent_stamp
&&
282 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &sk
->sk_v6_daddr
))
283 tcp_fetch_timewait_stamp(sk
, dst
);
285 icsk
->icsk_ext_hdr_len
= 0;
287 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
290 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
292 inet
->inet_dport
= usin
->sin6_port
;
294 tcp_set_state(sk
, TCP_SYN_SENT
);
295 err
= inet6_hash_connect(&tcp_death_row
, sk
);
299 if (!tp
->write_seq
&& likely(!tp
->repair
))
300 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
301 sk
->sk_v6_daddr
.s6_addr32
,
305 err
= tcp_connect(sk
);
312 tcp_set_state(sk
, TCP_CLOSE
);
315 inet
->inet_dport
= 0;
316 sk
->sk_route_caps
= 0;
320 static void tcp_v6_mtu_reduced(struct sock
*sk
)
322 struct dst_entry
*dst
;
324 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
327 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
331 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
332 tcp_sync_mss(sk
, dst_mtu(dst
));
333 tcp_simple_retransmit(sk
);
337 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
338 u8 type
, u8 code
, int offset
, __be32 info
)
340 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
341 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
342 struct ipv6_pinfo
*np
;
347 struct net
*net
= dev_net(skb
->dev
);
349 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
350 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
353 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
358 if (sk
->sk_state
== TCP_TIME_WAIT
) {
359 inet_twsk_put(inet_twsk(sk
));
364 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
365 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
367 if (sk
->sk_state
== TCP_CLOSE
)
370 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
371 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
376 seq
= ntohl(th
->seq
);
377 if (sk
->sk_state
!= TCP_LISTEN
&&
378 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
379 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
385 if (type
== NDISC_REDIRECT
) {
386 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
389 dst
->ops
->redirect(dst
, sk
, skb
);
393 if (type
== ICMPV6_PKT_TOOBIG
) {
394 /* We are not interested in TCP_LISTEN and open_requests
395 * (SYN-ACKs send out by Linux are always <576bytes so
396 * they should go through unfragmented).
398 if (sk
->sk_state
== TCP_LISTEN
)
401 tp
->mtu_info
= ntohl(info
);
402 if (!sock_owned_by_user(sk
))
403 tcp_v6_mtu_reduced(sk
);
404 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
410 icmpv6_err_convert(type
, code
, &err
);
412 /* Might be for an request_sock */
413 switch (sk
->sk_state
) {
414 struct request_sock
*req
, **prev
;
416 if (sock_owned_by_user(sk
))
419 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
420 &hdr
->saddr
, inet6_iif(skb
));
424 /* ICMPs are not backlogged, hence we cannot get
425 * an established socket here.
427 WARN_ON(req
->sk
!= NULL
);
429 if (seq
!= tcp_rsk(req
)->snt_isn
) {
430 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
434 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
435 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
439 case TCP_SYN_RECV
: /* Cannot happen.
440 It can, it SYNs are crossed. --ANK */
441 if (!sock_owned_by_user(sk
)) {
443 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
447 sk
->sk_err_soft
= err
;
451 if (!sock_owned_by_user(sk
) && np
->recverr
) {
453 sk
->sk_error_report(sk
);
455 sk
->sk_err_soft
= err
;
463 static int tcp_v6_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
465 struct request_sock
*req
,
468 struct inet_request_sock
*ireq
= inet_rsk(req
);
469 struct ipv6_pinfo
*np
= inet6_sk(sk
);
470 struct sk_buff
* skb
;
473 /* First, grab a route. */
474 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
)) == NULL
)
477 skb
= tcp_make_synack(sk
, dst
, req
, NULL
);
480 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
481 &ireq
->ir_v6_rmt_addr
);
483 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
484 skb_set_queue_mapping(skb
, queue_mapping
);
485 err
= ip6_xmit(sk
, skb
, fl6
, np
->opt
, np
->tclass
);
486 err
= net_xmit_eval(err
);
493 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
)
498 res
= tcp_v6_send_synack(sk
, NULL
, &fl6
, req
, 0);
500 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
504 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
506 kfree_skb(inet_rsk(req
)->pktopts
);
509 #ifdef CONFIG_TCP_MD5SIG
510 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
511 const struct in6_addr
*addr
)
513 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
516 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
517 struct sock
*addr_sk
)
519 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
522 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
523 struct request_sock
*req
)
525 return tcp_v6_md5_do_lookup(sk
, &inet_rsk(req
)->ir_v6_rmt_addr
);
528 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
531 struct tcp_md5sig cmd
;
532 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
534 if (optlen
< sizeof(cmd
))
537 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
540 if (sin6
->sin6_family
!= AF_INET6
)
543 if (!cmd
.tcpm_keylen
) {
544 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
545 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
547 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
551 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
554 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
555 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
556 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
558 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
559 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
562 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
563 const struct in6_addr
*daddr
,
564 const struct in6_addr
*saddr
, int nbytes
)
566 struct tcp6_pseudohdr
*bp
;
567 struct scatterlist sg
;
569 bp
= &hp
->md5_blk
.ip6
;
570 /* 1. TCP pseudo-header (RFC2460) */
573 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
574 bp
->len
= cpu_to_be32(nbytes
);
576 sg_init_one(&sg
, bp
, sizeof(*bp
));
577 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
580 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
581 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
582 const struct tcphdr
*th
)
584 struct tcp_md5sig_pool
*hp
;
585 struct hash_desc
*desc
;
587 hp
= tcp_get_md5sig_pool();
589 goto clear_hash_noput
;
590 desc
= &hp
->md5_desc
;
592 if (crypto_hash_init(desc
))
594 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
596 if (tcp_md5_hash_header(hp
, th
))
598 if (tcp_md5_hash_key(hp
, key
))
600 if (crypto_hash_final(desc
, md5_hash
))
603 tcp_put_md5sig_pool();
607 tcp_put_md5sig_pool();
609 memset(md5_hash
, 0, 16);
613 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
614 const struct sock
*sk
,
615 const struct request_sock
*req
,
616 const struct sk_buff
*skb
)
618 const struct in6_addr
*saddr
, *daddr
;
619 struct tcp_md5sig_pool
*hp
;
620 struct hash_desc
*desc
;
621 const struct tcphdr
*th
= tcp_hdr(skb
);
624 saddr
= &inet6_sk(sk
)->saddr
;
625 daddr
= &sk
->sk_v6_daddr
;
627 saddr
= &inet_rsk(req
)->ir_v6_loc_addr
;
628 daddr
= &inet_rsk(req
)->ir_v6_rmt_addr
;
630 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
631 saddr
= &ip6h
->saddr
;
632 daddr
= &ip6h
->daddr
;
635 hp
= tcp_get_md5sig_pool();
637 goto clear_hash_noput
;
638 desc
= &hp
->md5_desc
;
640 if (crypto_hash_init(desc
))
643 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
645 if (tcp_md5_hash_header(hp
, th
))
647 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
649 if (tcp_md5_hash_key(hp
, key
))
651 if (crypto_hash_final(desc
, md5_hash
))
654 tcp_put_md5sig_pool();
658 tcp_put_md5sig_pool();
660 memset(md5_hash
, 0, 16);
664 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
666 const __u8
*hash_location
= NULL
;
667 struct tcp_md5sig_key
*hash_expected
;
668 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
669 const struct tcphdr
*th
= tcp_hdr(skb
);
673 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
674 hash_location
= tcp_parse_md5sig_option(th
);
676 /* We've parsed the options - do we have a hash? */
677 if (!hash_expected
&& !hash_location
)
680 if (hash_expected
&& !hash_location
) {
681 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
685 if (!hash_expected
&& hash_location
) {
686 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
690 /* check the signature */
691 genhash
= tcp_v6_md5_hash_skb(newhash
,
695 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
696 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
697 genhash
? "failed" : "mismatch",
698 &ip6h
->saddr
, ntohs(th
->source
),
699 &ip6h
->daddr
, ntohs(th
->dest
));
706 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
708 .obj_size
= sizeof(struct tcp6_request_sock
),
709 .rtx_syn_ack
= tcp_v6_rtx_synack
,
710 .send_ack
= tcp_v6_reqsk_send_ack
,
711 .destructor
= tcp_v6_reqsk_destructor
,
712 .send_reset
= tcp_v6_send_reset
,
713 .syn_ack_timeout
= tcp_syn_ack_timeout
,
716 #ifdef CONFIG_TCP_MD5SIG
717 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
718 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
719 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
723 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
724 u32 tsval
, u32 tsecr
,
725 struct tcp_md5sig_key
*key
, int rst
, u8 tclass
)
727 const struct tcphdr
*th
= tcp_hdr(skb
);
729 struct sk_buff
*buff
;
731 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
732 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
733 unsigned int tot_len
= sizeof(struct tcphdr
);
734 struct dst_entry
*dst
;
738 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
739 #ifdef CONFIG_TCP_MD5SIG
741 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
744 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
749 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
751 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
752 skb_reset_transport_header(buff
);
754 /* Swap the send and the receive. */
755 memset(t1
, 0, sizeof(*t1
));
756 t1
->dest
= th
->source
;
757 t1
->source
= th
->dest
;
758 t1
->doff
= tot_len
/ 4;
759 t1
->seq
= htonl(seq
);
760 t1
->ack_seq
= htonl(ack
);
761 t1
->ack
= !rst
|| !th
->ack
;
763 t1
->window
= htons(win
);
765 topt
= (__be32
*)(t1
+ 1);
768 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
769 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
770 *topt
++ = htonl(tsval
);
771 *topt
++ = htonl(tsecr
);
774 #ifdef CONFIG_TCP_MD5SIG
776 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
777 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
778 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
779 &ipv6_hdr(skb
)->saddr
,
780 &ipv6_hdr(skb
)->daddr
, t1
);
784 memset(&fl6
, 0, sizeof(fl6
));
785 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
786 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
788 buff
->ip_summed
= CHECKSUM_PARTIAL
;
791 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
793 fl6
.flowi6_proto
= IPPROTO_TCP
;
794 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
795 fl6
.flowi6_oif
= inet6_iif(skb
);
796 fl6
.fl6_dport
= t1
->dest
;
797 fl6
.fl6_sport
= t1
->source
;
798 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
800 /* Pass a socket to ip6_dst_lookup either it is for RST
801 * Underlying function will use this to retrieve the network
804 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
806 skb_dst_set(buff
, dst
);
807 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
808 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
810 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
817 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
819 const struct tcphdr
*th
= tcp_hdr(skb
);
820 u32 seq
= 0, ack_seq
= 0;
821 struct tcp_md5sig_key
*key
= NULL
;
822 #ifdef CONFIG_TCP_MD5SIG
823 const __u8
*hash_location
= NULL
;
824 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
825 unsigned char newhash
[16];
827 struct sock
*sk1
= NULL
;
833 if (!ipv6_unicast_destination(skb
))
836 #ifdef CONFIG_TCP_MD5SIG
837 hash_location
= tcp_parse_md5sig_option(th
);
838 if (!sk
&& hash_location
) {
840 * active side is lost. Try to find listening socket through
841 * source port, and then find md5 key through listening socket.
842 * we are not loose security here:
843 * Incoming packet is checked with md5 hash with finding key,
844 * no RST generated if md5 hash doesn't match.
846 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
847 &tcp_hashinfo
, &ipv6h
->saddr
,
848 th
->source
, &ipv6h
->daddr
,
849 ntohs(th
->source
), inet6_iif(skb
));
854 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
858 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
859 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
862 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
867 seq
= ntohl(th
->ack_seq
);
869 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
872 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, 0, key
, 1, 0);
874 #ifdef CONFIG_TCP_MD5SIG
883 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
884 u32 win
, u32 tsval
, u32 tsecr
,
885 struct tcp_md5sig_key
*key
, u8 tclass
)
887 tcp_v6_send_response(skb
, seq
, ack
, win
, tsval
, tsecr
, key
, 0, tclass
);
890 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
892 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
893 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
895 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
896 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
897 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
898 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
),
904 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
905 struct request_sock
*req
)
907 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1,
908 req
->rcv_wnd
, tcp_time_stamp
, req
->ts_recent
,
909 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
), 0);
913 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
915 struct request_sock
*req
, **prev
;
916 const struct tcphdr
*th
= tcp_hdr(skb
);
919 /* Find possible connection requests. */
920 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
921 &ipv6_hdr(skb
)->saddr
,
922 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
924 return tcp_check_req(sk
, skb
, req
, prev
, false);
926 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
927 &ipv6_hdr(skb
)->saddr
, th
->source
,
928 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
931 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
935 inet_twsk_put(inet_twsk(nsk
));
939 #ifdef CONFIG_SYN_COOKIES
941 sk
= cookie_v6_check(sk
, skb
);
946 /* FIXME: this is substantially similar to the ipv4 code.
947 * Can some kind of merge be done? -- erics
949 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
951 struct tcp_options_received tmp_opt
;
952 struct request_sock
*req
;
953 struct inet_request_sock
*ireq
;
954 struct ipv6_pinfo
*np
= inet6_sk(sk
);
955 struct tcp_sock
*tp
= tcp_sk(sk
);
956 __u32 isn
= TCP_SKB_CB(skb
)->when
;
957 struct dst_entry
*dst
= NULL
;
959 bool want_cookie
= false;
961 if (skb
->protocol
== htons(ETH_P_IP
))
962 return tcp_v4_conn_request(sk
, skb
);
964 if (!ipv6_unicast_destination(skb
))
967 if ((sysctl_tcp_syncookies
== 2 ||
968 inet_csk_reqsk_queue_is_full(sk
)) && !isn
) {
969 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
974 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1) {
975 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
979 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
983 #ifdef CONFIG_TCP_MD5SIG
984 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
987 tcp_clear_options(&tmp_opt
);
988 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
989 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
990 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
992 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
993 tcp_clear_options(&tmp_opt
);
995 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
996 tcp_openreq_init(req
, &tmp_opt
, skb
);
998 ireq
= inet_rsk(req
);
999 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
1000 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
1001 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1002 TCP_ECN_create_request(req
, skb
, sock_net(sk
));
1004 ireq
->ir_iif
= sk
->sk_bound_dev_if
;
1006 /* So that link locals have meaning */
1007 if (!sk
->sk_bound_dev_if
&&
1008 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1009 ireq
->ir_iif
= inet6_iif(skb
);
1012 if (ipv6_opt_accepted(sk
, skb
) ||
1013 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1014 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1015 atomic_inc(&skb
->users
);
1016 ireq
->pktopts
= skb
;
1020 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1021 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1025 /* VJ's idea. We save last timestamp seen
1026 * from the destination in peer table, when entering
1027 * state TIME-WAIT, and check against it before
1028 * accepting new connection request.
1030 * If "isn" is not zero, this request hit alive
1031 * timewait bucket, so that all the necessary checks
1032 * are made in the function processing timewait state.
1034 if (tmp_opt
.saw_tstamp
&&
1035 tcp_death_row
.sysctl_tw_recycle
&&
1036 (dst
= inet6_csk_route_req(sk
, &fl6
, req
)) != NULL
) {
1037 if (!tcp_peer_is_proven(req
, dst
, true)) {
1038 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1039 goto drop_and_release
;
1042 /* Kill the following clause, if you dislike this way. */
1043 else if (!sysctl_tcp_syncookies
&&
1044 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1045 (sysctl_max_syn_backlog
>> 2)) &&
1046 !tcp_peer_is_proven(req
, dst
, false)) {
1047 /* Without syncookies last quarter of
1048 * backlog is filled with destinations,
1049 * proven to be alive.
1050 * It means that we continue to communicate
1051 * to destinations, already remembered
1052 * to the moment of synflood.
1054 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1055 &ireq
->ir_v6_rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1056 goto drop_and_release
;
1059 isn
= tcp_v6_init_sequence(skb
);
1062 tcp_rsk(req
)->snt_isn
= isn
;
1064 if (security_inet_conn_request(sk
, skb
, req
))
1065 goto drop_and_release
;
1067 if (tcp_v6_send_synack(sk
, dst
, &fl6
, req
,
1068 skb_get_queue_mapping(skb
)) ||
1072 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1073 tcp_rsk(req
)->listener
= NULL
;
1074 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1082 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1083 return 0; /* don't send reset */
1086 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1087 struct request_sock
*req
,
1088 struct dst_entry
*dst
)
1090 struct inet_request_sock
*ireq
;
1091 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1092 struct tcp6_sock
*newtcp6sk
;
1093 struct inet_sock
*newinet
;
1094 struct tcp_sock
*newtp
;
1096 #ifdef CONFIG_TCP_MD5SIG
1097 struct tcp_md5sig_key
*key
;
1101 if (skb
->protocol
== htons(ETH_P_IP
)) {
1106 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1111 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1112 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1114 newinet
= inet_sk(newsk
);
1115 newnp
= inet6_sk(newsk
);
1116 newtp
= tcp_sk(newsk
);
1118 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1120 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newsk
->sk_v6_daddr
);
1122 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1124 newsk
->sk_v6_rcv_saddr
= newnp
->saddr
;
1126 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1127 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1128 #ifdef CONFIG_TCP_MD5SIG
1129 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1132 newnp
->ipv6_ac_list
= NULL
;
1133 newnp
->ipv6_fl_list
= NULL
;
1134 newnp
->pktoptions
= NULL
;
1136 newnp
->mcast_oif
= inet6_iif(skb
);
1137 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1138 newnp
->rcv_tclass
= ipv6_get_dsfield(ipv6_hdr(skb
));
1139 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1142 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1143 * here, tcp_create_openreq_child now does this for us, see the comment in
1144 * that function for the gory details. -acme
1147 /* It is tricky place. Until this moment IPv4 tcp
1148 worked with IPv6 icsk.icsk_af_ops.
1151 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1156 ireq
= inet_rsk(req
);
1158 if (sk_acceptq_is_full(sk
))
1162 dst
= inet6_csk_route_req(sk
, &fl6
, req
);
1167 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1172 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1173 * count here, tcp_create_openreq_child now does this for us, see the
1174 * comment in that function for the gory details. -acme
1177 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1178 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1179 inet6_sk_rx_dst_set(newsk
, skb
);
1181 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1182 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1184 newtp
= tcp_sk(newsk
);
1185 newinet
= inet_sk(newsk
);
1186 newnp
= inet6_sk(newsk
);
1188 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1190 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1191 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1192 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1193 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1195 /* Now IPv6 options...
1197 First: no IPv4 options.
1199 newinet
->inet_opt
= NULL
;
1200 newnp
->ipv6_ac_list
= NULL
;
1201 newnp
->ipv6_fl_list
= NULL
;
1204 newnp
->rxopt
.all
= np
->rxopt
.all
;
1206 /* Clone pktoptions received with SYN */
1207 newnp
->pktoptions
= NULL
;
1208 if (ireq
->pktopts
!= NULL
) {
1209 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1210 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1211 consume_skb(ireq
->pktopts
);
1212 ireq
->pktopts
= NULL
;
1213 if (newnp
->pktoptions
)
1214 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1217 newnp
->mcast_oif
= inet6_iif(skb
);
1218 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1219 newnp
->rcv_tclass
= ipv6_get_dsfield(ipv6_hdr(skb
));
1220 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1222 /* Clone native IPv6 options from listening socket (if any)
1224 Yes, keeping reference count would be much more clever,
1225 but we make one more one thing there: reattach optmem
1229 newnp
->opt
= ipv6_dup_options(newsk
, np
->opt
);
1231 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1233 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1234 newnp
->opt
->opt_flen
);
1236 tcp_mtup_init(newsk
);
1237 tcp_sync_mss(newsk
, dst_mtu(dst
));
1238 newtp
->advmss
= dst_metric_advmss(dst
);
1239 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1240 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1241 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1243 tcp_initialize_rcv_mss(newsk
);
1245 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1246 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1248 #ifdef CONFIG_TCP_MD5SIG
1249 /* Copy over the MD5 key from the original socket */
1250 if ((key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
)) != NULL
) {
1251 /* We're using one, so create a matching key
1252 * on the newsk structure. If we fail to get
1253 * memory, then we end up not copying the key
1256 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1257 AF_INET6
, key
->key
, key
->keylen
,
1258 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1262 if (__inet_inherit_port(sk
, newsk
) < 0) {
1263 inet_csk_prepare_forced_close(newsk
);
1267 __inet6_hash(newsk
, NULL
);
1272 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1276 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1280 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1282 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1283 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1284 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1285 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1290 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1291 &ipv6_hdr(skb
)->saddr
,
1292 &ipv6_hdr(skb
)->daddr
, 0));
1294 if (skb
->len
<= 76) {
1295 return __skb_checksum_complete(skb
);
1300 /* The socket must have it's spinlock held when we get
1303 * We have a potential double-lock case here, so even when
1304 * doing backlog processing we use the BH locking scheme.
1305 * This is because we cannot sleep with the original spinlock
1308 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1310 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1311 struct tcp_sock
*tp
;
1312 struct sk_buff
*opt_skb
= NULL
;
1314 /* Imagine: socket is IPv6. IPv4 packet arrives,
1315 goes to IPv4 receive handler and backlogged.
1316 From backlog it always goes here. Kerboom...
1317 Fortunately, tcp_rcv_established and rcv_established
1318 handle them correctly, but it is not case with
1319 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1322 if (skb
->protocol
== htons(ETH_P_IP
))
1323 return tcp_v4_do_rcv(sk
, skb
);
1325 #ifdef CONFIG_TCP_MD5SIG
1326 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1330 if (sk_filter(sk
, skb
))
1334 * socket locking is here for SMP purposes as backlog rcv
1335 * is currently called with bh processing disabled.
1338 /* Do Stevens' IPV6_PKTOPTIONS.
1340 Yes, guys, it is the only place in our code, where we
1341 may make it not affecting IPv4.
1342 The rest of code is protocol independent,
1343 and I do not like idea to uglify IPv4.
1345 Actually, all the idea behind IPV6_PKTOPTIONS
1346 looks not very well thought. For now we latch
1347 options, received in the last packet, enqueued
1348 by tcp. Feel free to propose better solution.
1352 opt_skb
= skb_clone(skb
, sk_gfp_atomic(sk
, GFP_ATOMIC
));
1354 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1355 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1357 sock_rps_save_rxhash(sk
, skb
);
1359 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1360 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1362 sk
->sk_rx_dst
= NULL
;
1366 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1368 goto ipv6_pktoptions
;
1372 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1375 if (sk
->sk_state
== TCP_LISTEN
) {
1376 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1381 * Queue it on the new socket if the new socket is active,
1382 * otherwise we just shortcircuit this and continue with
1386 sock_rps_save_rxhash(nsk
, skb
);
1387 if (tcp_child_process(sk
, nsk
, skb
))
1390 __kfree_skb(opt_skb
);
1394 sock_rps_save_rxhash(sk
, skb
);
1396 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1399 goto ipv6_pktoptions
;
1403 tcp_v6_send_reset(sk
, skb
);
1406 __kfree_skb(opt_skb
);
1410 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1411 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1416 /* Do you ask, what is it?
1418 1. skb was enqueued by tcp.
1419 2. skb is added to tail of read queue, rather than out of order.
1420 3. socket is not in passive state.
1421 4. Finally, it really contains options, which user wants to receive.
1424 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1425 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1426 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1427 np
->mcast_oif
= inet6_iif(opt_skb
);
1428 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1429 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1430 if (np
->rxopt
.bits
.rxtclass
)
1431 np
->rcv_tclass
= ipv6_get_dsfield(ipv6_hdr(opt_skb
));
1432 if (np
->rxopt
.bits
.rxflow
)
1433 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1434 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1435 skb_set_owner_r(opt_skb
, sk
);
1436 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1438 __kfree_skb(opt_skb
);
1439 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1447 static int tcp_v6_rcv(struct sk_buff
*skb
)
1449 const struct tcphdr
*th
;
1450 const struct ipv6hdr
*hdr
;
1453 struct net
*net
= dev_net(skb
->dev
);
1455 if (skb
->pkt_type
!= PACKET_HOST
)
1459 * Count it even if it's bad.
1461 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1463 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1468 if (th
->doff
< sizeof(struct tcphdr
)/4)
1470 if (!pskb_may_pull(skb
, th
->doff
*4))
1473 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1477 hdr
= ipv6_hdr(skb
);
1478 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1479 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1480 skb
->len
- th
->doff
*4);
1481 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1482 TCP_SKB_CB(skb
)->when
= 0;
1483 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1484 TCP_SKB_CB(skb
)->sacked
= 0;
1486 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1491 if (sk
->sk_state
== TCP_TIME_WAIT
)
1494 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1495 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1496 goto discard_and_relse
;
1499 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1500 goto discard_and_relse
;
1502 if (sk_filter(sk
, skb
))
1503 goto discard_and_relse
;
1505 sk_mark_napi_id(sk
, skb
);
1508 bh_lock_sock_nested(sk
);
1510 if (!sock_owned_by_user(sk
)) {
1511 #ifdef CONFIG_NET_DMA
1512 struct tcp_sock
*tp
= tcp_sk(sk
);
1513 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1514 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1515 if (tp
->ucopy
.dma_chan
)
1516 ret
= tcp_v6_do_rcv(sk
, skb
);
1520 if (!tcp_prequeue(sk
, skb
))
1521 ret
= tcp_v6_do_rcv(sk
, skb
);
1523 } else if (unlikely(sk_add_backlog(sk
, skb
,
1524 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1526 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1527 goto discard_and_relse
;
1532 return ret
? -1 : 0;
1535 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1538 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1540 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1542 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1544 tcp_v6_send_reset(NULL
, skb
);
1556 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1557 inet_twsk_put(inet_twsk(sk
));
1561 if (skb
->len
< (th
->doff
<<2)) {
1562 inet_twsk_put(inet_twsk(sk
));
1565 if (tcp_checksum_complete(skb
)) {
1566 inet_twsk_put(inet_twsk(sk
));
1570 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1575 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1576 &ipv6_hdr(skb
)->saddr
, th
->source
,
1577 &ipv6_hdr(skb
)->daddr
,
1578 ntohs(th
->dest
), inet6_iif(skb
));
1580 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1581 inet_twsk_deschedule(tw
, &tcp_death_row
);
1586 /* Fall through to ACK */
1589 tcp_v6_timewait_ack(sk
, skb
);
1593 case TCP_TW_SUCCESS
:;
1598 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1600 const struct ipv6hdr
*hdr
;
1601 const struct tcphdr
*th
;
1604 if (skb
->pkt_type
!= PACKET_HOST
)
1607 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1610 hdr
= ipv6_hdr(skb
);
1613 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1616 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1617 &hdr
->saddr
, th
->source
,
1618 &hdr
->daddr
, ntohs(th
->dest
),
1622 skb
->destructor
= sock_edemux
;
1623 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1624 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1627 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1629 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1630 skb_dst_set_noref(skb
, dst
);
1635 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1636 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1637 .twsk_unique
= tcp_twsk_unique
,
1638 .twsk_destructor
= tcp_twsk_destructor
,
1641 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1642 .queue_xmit
= inet6_csk_xmit
,
1643 .send_check
= tcp_v6_send_check
,
1644 .rebuild_header
= inet6_sk_rebuild_header
,
1645 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1646 .conn_request
= tcp_v6_conn_request
,
1647 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1648 .net_header_len
= sizeof(struct ipv6hdr
),
1649 .net_frag_header_len
= sizeof(struct frag_hdr
),
1650 .setsockopt
= ipv6_setsockopt
,
1651 .getsockopt
= ipv6_getsockopt
,
1652 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1653 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1654 .bind_conflict
= inet6_csk_bind_conflict
,
1655 #ifdef CONFIG_COMPAT
1656 .compat_setsockopt
= compat_ipv6_setsockopt
,
1657 .compat_getsockopt
= compat_ipv6_getsockopt
,
1661 #ifdef CONFIG_TCP_MD5SIG
1662 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1663 .md5_lookup
= tcp_v6_md5_lookup
,
1664 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1665 .md5_parse
= tcp_v6_parse_md5_keys
,
1670 * TCP over IPv4 via INET6 API
1673 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1674 .queue_xmit
= ip_queue_xmit
,
1675 .send_check
= tcp_v4_send_check
,
1676 .rebuild_header
= inet_sk_rebuild_header
,
1677 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1678 .conn_request
= tcp_v6_conn_request
,
1679 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1680 .net_header_len
= sizeof(struct iphdr
),
1681 .setsockopt
= ipv6_setsockopt
,
1682 .getsockopt
= ipv6_getsockopt
,
1683 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1684 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1685 .bind_conflict
= inet6_csk_bind_conflict
,
1686 #ifdef CONFIG_COMPAT
1687 .compat_setsockopt
= compat_ipv6_setsockopt
,
1688 .compat_getsockopt
= compat_ipv6_getsockopt
,
1692 #ifdef CONFIG_TCP_MD5SIG
1693 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1694 .md5_lookup
= tcp_v4_md5_lookup
,
1695 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1696 .md5_parse
= tcp_v6_parse_md5_keys
,
1700 /* NOTE: A lot of things set to zero explicitly by call to
1701 * sk_alloc() so need not be done here.
1703 static int tcp_v6_init_sock(struct sock
*sk
)
1705 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1709 icsk
->icsk_af_ops
= &ipv6_specific
;
1711 #ifdef CONFIG_TCP_MD5SIG
1712 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1718 static void tcp_v6_destroy_sock(struct sock
*sk
)
1720 tcp_v4_destroy_sock(sk
);
1721 inet6_destroy_sock(sk
);
1724 #ifdef CONFIG_PROC_FS
1725 /* Proc filesystem TCPv6 sock list dumping. */
1726 static void get_openreq6(struct seq_file
*seq
,
1727 const struct sock
*sk
, struct request_sock
*req
, int i
, kuid_t uid
)
1729 int ttd
= req
->expires
- jiffies
;
1730 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1731 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1737 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1738 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1740 src
->s6_addr32
[0], src
->s6_addr32
[1],
1741 src
->s6_addr32
[2], src
->s6_addr32
[3],
1742 inet_rsk(req
)->ir_num
,
1743 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1744 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1745 ntohs(inet_rsk(req
)->ir_rmt_port
),
1747 0,0, /* could print option size, but that is af dependent. */
1748 1, /* timers active (only the expire timer) */
1749 jiffies_to_clock_t(ttd
),
1751 from_kuid_munged(seq_user_ns(seq
), uid
),
1752 0, /* non standard timer */
1753 0, /* open_requests have no inode */
1757 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1759 const struct in6_addr
*dest
, *src
;
1762 unsigned long timer_expires
;
1763 const struct inet_sock
*inet
= inet_sk(sp
);
1764 const struct tcp_sock
*tp
= tcp_sk(sp
);
1765 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1767 dest
= &sp
->sk_v6_daddr
;
1768 src
= &sp
->sk_v6_rcv_saddr
;
1769 destp
= ntohs(inet
->inet_dport
);
1770 srcp
= ntohs(inet
->inet_sport
);
1772 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1774 timer_expires
= icsk
->icsk_timeout
;
1775 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1777 timer_expires
= icsk
->icsk_timeout
;
1778 } else if (timer_pending(&sp
->sk_timer
)) {
1780 timer_expires
= sp
->sk_timer
.expires
;
1783 timer_expires
= jiffies
;
1787 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1788 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1790 src
->s6_addr32
[0], src
->s6_addr32
[1],
1791 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1792 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1793 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1795 tp
->write_seq
-tp
->snd_una
,
1796 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1798 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1799 icsk
->icsk_retransmits
,
1800 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1801 icsk
->icsk_probes_out
,
1803 atomic_read(&sp
->sk_refcnt
), sp
,
1804 jiffies_to_clock_t(icsk
->icsk_rto
),
1805 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1806 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1808 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
1812 static void get_timewait6_sock(struct seq_file
*seq
,
1813 struct inet_timewait_sock
*tw
, int i
)
1815 const struct in6_addr
*dest
, *src
;
1817 s32 delta
= tw
->tw_ttd
- inet_tw_time_stamp();
1819 dest
= &tw
->tw_v6_daddr
;
1820 src
= &tw
->tw_v6_rcv_saddr
;
1821 destp
= ntohs(tw
->tw_dport
);
1822 srcp
= ntohs(tw
->tw_sport
);
1825 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1826 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1828 src
->s6_addr32
[0], src
->s6_addr32
[1],
1829 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1830 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1831 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1832 tw
->tw_substate
, 0, 0,
1833 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1834 atomic_read(&tw
->tw_refcnt
), tw
);
1837 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1839 struct tcp_iter_state
*st
;
1840 struct sock
*sk
= v
;
1842 if (v
== SEQ_START_TOKEN
) {
1847 "st tx_queue rx_queue tr tm->when retrnsmt"
1848 " uid timeout inode\n");
1853 switch (st
->state
) {
1854 case TCP_SEQ_STATE_LISTENING
:
1855 case TCP_SEQ_STATE_ESTABLISHED
:
1856 if (sk
->sk_state
== TCP_TIME_WAIT
)
1857 get_timewait6_sock(seq
, v
, st
->num
);
1859 get_tcp6_sock(seq
, v
, st
->num
);
1861 case TCP_SEQ_STATE_OPENREQ
:
1862 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1869 static const struct file_operations tcp6_afinfo_seq_fops
= {
1870 .owner
= THIS_MODULE
,
1871 .open
= tcp_seq_open
,
1873 .llseek
= seq_lseek
,
1874 .release
= seq_release_net
1877 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1880 .seq_fops
= &tcp6_afinfo_seq_fops
,
1882 .show
= tcp6_seq_show
,
1886 int __net_init
tcp6_proc_init(struct net
*net
)
1888 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1891 void tcp6_proc_exit(struct net
*net
)
1893 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1897 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1899 struct inet_sock
*inet
= inet_sk(sk
);
1901 /* we do not want to clear pinet6 field, because of RCU lookups */
1902 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1904 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1905 memset(&inet
->pinet6
+ 1, 0, size
);
1908 struct proto tcpv6_prot
= {
1910 .owner
= THIS_MODULE
,
1912 .connect
= tcp_v6_connect
,
1913 .disconnect
= tcp_disconnect
,
1914 .accept
= inet_csk_accept
,
1916 .init
= tcp_v6_init_sock
,
1917 .destroy
= tcp_v6_destroy_sock
,
1918 .shutdown
= tcp_shutdown
,
1919 .setsockopt
= tcp_setsockopt
,
1920 .getsockopt
= tcp_getsockopt
,
1921 .recvmsg
= tcp_recvmsg
,
1922 .sendmsg
= tcp_sendmsg
,
1923 .sendpage
= tcp_sendpage
,
1924 .backlog_rcv
= tcp_v6_do_rcv
,
1925 .release_cb
= tcp_release_cb
,
1926 .mtu_reduced
= tcp_v6_mtu_reduced
,
1927 .hash
= tcp_v6_hash
,
1928 .unhash
= inet_unhash
,
1929 .get_port
= inet_csk_get_port
,
1930 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1931 .stream_memory_free
= tcp_stream_memory_free
,
1932 .sockets_allocated
= &tcp_sockets_allocated
,
1933 .memory_allocated
= &tcp_memory_allocated
,
1934 .memory_pressure
= &tcp_memory_pressure
,
1935 .orphan_count
= &tcp_orphan_count
,
1936 .sysctl_mem
= sysctl_tcp_mem
,
1937 .sysctl_wmem
= sysctl_tcp_wmem
,
1938 .sysctl_rmem
= sysctl_tcp_rmem
,
1939 .max_header
= MAX_TCP_HEADER
,
1940 .obj_size
= sizeof(struct tcp6_sock
),
1941 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1942 .twsk_prot
= &tcp6_timewait_sock_ops
,
1943 .rsk_prot
= &tcp6_request_sock_ops
,
1944 .h
.hashinfo
= &tcp_hashinfo
,
1945 .no_autobind
= true,
1946 #ifdef CONFIG_COMPAT
1947 .compat_setsockopt
= compat_tcp_setsockopt
,
1948 .compat_getsockopt
= compat_tcp_getsockopt
,
1950 #ifdef CONFIG_MEMCG_KMEM
1951 .proto_cgroup
= tcp_proto_cgroup
,
1953 .clear_sk
= tcp_v6_clear_sk
,
1956 static const struct inet6_protocol tcpv6_protocol
= {
1957 .early_demux
= tcp_v6_early_demux
,
1958 .handler
= tcp_v6_rcv
,
1959 .err_handler
= tcp_v6_err
,
1960 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1963 static struct inet_protosw tcpv6_protosw
= {
1964 .type
= SOCK_STREAM
,
1965 .protocol
= IPPROTO_TCP
,
1966 .prot
= &tcpv6_prot
,
1967 .ops
= &inet6_stream_ops
,
1969 .flags
= INET_PROTOSW_PERMANENT
|
1973 static int __net_init
tcpv6_net_init(struct net
*net
)
1975 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1976 SOCK_RAW
, IPPROTO_TCP
, net
);
1979 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1981 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1984 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1986 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
1989 static struct pernet_operations tcpv6_net_ops
= {
1990 .init
= tcpv6_net_init
,
1991 .exit
= tcpv6_net_exit
,
1992 .exit_batch
= tcpv6_net_exit_batch
,
1995 int __init
tcpv6_init(void)
1999 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2003 /* register inet6 protocol */
2004 ret
= inet6_register_protosw(&tcpv6_protosw
);
2006 goto out_tcpv6_protocol
;
2008 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2010 goto out_tcpv6_protosw
;
2015 inet6_unregister_protosw(&tcpv6_protosw
);
2017 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2021 void tcpv6_exit(void)
2023 unregister_pernet_subsys(&tcpv6_net_ops
);
2024 inet6_unregister_protosw(&tcpv6_protosw
);
2025 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);