3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
47 #include <net/ndisc.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/inet6_connection_sock.h>
51 #include <net/transp_v6.h>
52 #include <net/addrconf.h>
53 #include <net/ip6_route.h>
54 #include <net/ip6_checksum.h>
55 #include <net/inet_ecn.h>
56 #include <net/protocol.h>
59 #include <net/dsfield.h>
60 #include <net/timewait_sock.h>
61 #include <net/netdma.h>
62 #include <net/inet_common.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static struct inet_connection_sock_af_ops ipv6_mapped
;
79 static struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
85 struct in6_addr
*addr
)
91 static void tcp_v6_hash(struct sock
*sk
)
93 if (sk
->sk_state
!= TCP_CLOSE
) {
94 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
104 static __inline__ __sum16
tcp_v6_check(int len
,
105 struct in6_addr
*saddr
,
106 struct in6_addr
*daddr
,
109 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
112 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
114 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
115 ipv6_hdr(skb
)->saddr
.s6_addr32
,
117 tcp_hdr(skb
)->source
);
120 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
123 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
124 struct inet_sock
*inet
= inet_sk(sk
);
125 struct inet_connection_sock
*icsk
= inet_csk(sk
);
126 struct ipv6_pinfo
*np
= inet6_sk(sk
);
127 struct tcp_sock
*tp
= tcp_sk(sk
);
128 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
130 struct dst_entry
*dst
;
134 if (addr_len
< SIN6_LEN_RFC2133
)
137 if (usin
->sin6_family
!= AF_INET6
)
138 return(-EAFNOSUPPORT
);
140 memset(&fl
, 0, sizeof(fl
));
143 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
144 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
145 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
146 struct ip6_flowlabel
*flowlabel
;
147 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
148 if (flowlabel
== NULL
)
150 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
151 fl6_sock_release(flowlabel
);
156 * connect() to INADDR_ANY means loopback (BSD'ism).
159 if(ipv6_addr_any(&usin
->sin6_addr
))
160 usin
->sin6_addr
.s6_addr
[15] = 0x1;
162 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
164 if(addr_type
& IPV6_ADDR_MULTICAST
)
167 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
168 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
169 usin
->sin6_scope_id
) {
170 /* If interface is set while binding, indices
173 if (sk
->sk_bound_dev_if
&&
174 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
177 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
180 /* Connect to link-local address requires an interface */
181 if (!sk
->sk_bound_dev_if
)
185 if (tp
->rx_opt
.ts_recent_stamp
&&
186 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
187 tp
->rx_opt
.ts_recent
= 0;
188 tp
->rx_opt
.ts_recent_stamp
= 0;
192 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
193 np
->flow_label
= fl
.fl6_flowlabel
;
199 if (addr_type
== IPV6_ADDR_MAPPED
) {
200 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
201 struct sockaddr_in sin
;
203 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
205 if (__ipv6_only_sock(sk
))
208 sin
.sin_family
= AF_INET
;
209 sin
.sin_port
= usin
->sin6_port
;
210 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
212 icsk
->icsk_af_ops
= &ipv6_mapped
;
213 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
214 #ifdef CONFIG_TCP_MD5SIG
215 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
218 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
221 icsk
->icsk_ext_hdr_len
= exthdrlen
;
222 icsk
->icsk_af_ops
= &ipv6_specific
;
223 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
224 #ifdef CONFIG_TCP_MD5SIG
225 tp
->af_specific
= &tcp_sock_ipv6_specific
;
229 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
231 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
238 if (!ipv6_addr_any(&np
->rcv_saddr
))
239 saddr
= &np
->rcv_saddr
;
241 fl
.proto
= IPPROTO_TCP
;
242 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
243 ipv6_addr_copy(&fl
.fl6_src
,
244 (saddr
? saddr
: &np
->saddr
));
245 fl
.oif
= sk
->sk_bound_dev_if
;
246 fl
.fl_ip_dport
= usin
->sin6_port
;
247 fl
.fl_ip_sport
= inet
->sport
;
249 if (np
->opt
&& np
->opt
->srcrt
) {
250 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
251 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
252 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
256 security_sk_classify_flow(sk
, &fl
);
258 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
262 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
264 err
= __xfrm_lookup(sock_net(sk
), &dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
);
267 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
274 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
277 /* set the source address */
278 ipv6_addr_copy(&np
->saddr
, saddr
);
279 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
281 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
282 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
284 icsk
->icsk_ext_hdr_len
= 0;
286 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
289 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
291 inet
->dport
= usin
->sin6_port
;
293 tcp_set_state(sk
, TCP_SYN_SENT
);
294 err
= inet6_hash_connect(&tcp_death_row
, sk
);
299 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
304 err
= tcp_connect(sk
);
311 tcp_set_state(sk
, TCP_CLOSE
);
315 sk
->sk_route_caps
= 0;
319 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
320 int type
, int code
, int offset
, __be32 info
)
322 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
323 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
324 struct ipv6_pinfo
*np
;
329 struct net
*net
= dev_net(skb
->dev
);
331 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
332 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
335 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
340 if (sk
->sk_state
== TCP_TIME_WAIT
) {
341 inet_twsk_put(inet_twsk(sk
));
346 if (sock_owned_by_user(sk
))
347 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
349 if (sk
->sk_state
== TCP_CLOSE
)
353 seq
= ntohl(th
->seq
);
354 if (sk
->sk_state
!= TCP_LISTEN
&&
355 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
356 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
362 if (type
== ICMPV6_PKT_TOOBIG
) {
363 struct dst_entry
*dst
= NULL
;
365 if (sock_owned_by_user(sk
))
367 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
370 /* icmp should have updated the destination cache entry */
371 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
374 struct inet_sock
*inet
= inet_sk(sk
);
377 /* BUGGG_FUTURE: Again, it is not clear how
378 to handle rthdr case. Ignore this complexity
381 memset(&fl
, 0, sizeof(fl
));
382 fl
.proto
= IPPROTO_TCP
;
383 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
384 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
385 fl
.oif
= sk
->sk_bound_dev_if
;
386 fl
.fl_ip_dport
= inet
->dport
;
387 fl
.fl_ip_sport
= inet
->sport
;
388 security_skb_classify_flow(skb
, &fl
);
390 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
391 sk
->sk_err_soft
= -err
;
395 if ((err
= xfrm_lookup(net
, &dst
, &fl
, sk
, 0)) < 0) {
396 sk
->sk_err_soft
= -err
;
403 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
404 tcp_sync_mss(sk
, dst_mtu(dst
));
405 tcp_simple_retransmit(sk
);
406 } /* else let the usual retransmit timer handle it */
411 icmpv6_err_convert(type
, code
, &err
);
413 /* Might be for an request_sock */
414 switch (sk
->sk_state
) {
415 struct request_sock
*req
, **prev
;
417 if (sock_owned_by_user(sk
))
420 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
421 &hdr
->saddr
, inet6_iif(skb
));
425 /* ICMPs are not backlogged, hence we cannot get
426 * an established socket here.
428 WARN_ON(req
->sk
!= NULL
);
430 if (seq
!= tcp_rsk(req
)->snt_isn
) {
431 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
435 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
439 case TCP_SYN_RECV
: /* Cannot happen.
440 It can, it SYNs are crossed. --ANK */
441 if (!sock_owned_by_user(sk
)) {
443 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
447 sk
->sk_err_soft
= err
;
451 if (!sock_owned_by_user(sk
) && np
->recverr
) {
453 sk
->sk_error_report(sk
);
455 sk
->sk_err_soft
= err
;
463 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
)
465 struct inet6_request_sock
*treq
= inet6_rsk(req
);
466 struct ipv6_pinfo
*np
= inet6_sk(sk
);
467 struct sk_buff
* skb
;
468 struct ipv6_txoptions
*opt
= NULL
;
469 struct in6_addr
* final_p
= NULL
, final
;
471 struct dst_entry
*dst
;
474 memset(&fl
, 0, sizeof(fl
));
475 fl
.proto
= IPPROTO_TCP
;
476 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
477 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
478 fl
.fl6_flowlabel
= 0;
480 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
481 fl
.fl_ip_sport
= inet_rsk(req
)->loc_port
;
482 security_req_classify_flow(req
, &fl
);
485 if (opt
&& opt
->srcrt
) {
486 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
487 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
488 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
492 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
496 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
497 if ((err
= xfrm_lookup(sock_net(sk
), &dst
, &fl
, sk
, 0)) < 0)
500 skb
= tcp_make_synack(sk
, dst
, req
);
502 struct tcphdr
*th
= tcp_hdr(skb
);
504 th
->check
= tcp_v6_check(skb
->len
,
505 &treq
->loc_addr
, &treq
->rmt_addr
,
506 csum_partial(th
, skb
->len
, skb
->csum
));
508 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
509 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
510 err
= net_xmit_eval(err
);
514 if (opt
&& opt
!= np
->opt
)
515 sock_kfree_s(sk
, opt
, opt
->tot_len
);
520 static inline void syn_flood_warning(struct sk_buff
*skb
)
522 #ifdef CONFIG_SYN_COOKIES
523 if (sysctl_tcp_syncookies
)
525 "TCPv6: Possible SYN flooding on port %d. "
526 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
530 "TCPv6: Possible SYN flooding on port %d. "
531 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
534 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
536 kfree_skb(inet6_rsk(req
)->pktopts
);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
541 struct in6_addr
*addr
)
543 struct tcp_sock
*tp
= tcp_sk(sk
);
548 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
551 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
552 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
553 return &tp
->md5sig_info
->keys6
[i
].base
;
558 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
559 struct sock
*addr_sk
)
561 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
564 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
565 struct request_sock
*req
)
567 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
570 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
571 char *newkey
, u8 newkeylen
)
573 /* Add key to the list */
574 struct tcp_md5sig_key
*key
;
575 struct tcp_sock
*tp
= tcp_sk(sk
);
576 struct tcp6_md5sig_key
*keys
;
578 key
= tcp_v6_md5_do_lookup(sk
, peer
);
580 /* modify existing entry - just update that one */
583 key
->keylen
= newkeylen
;
585 /* reallocate new list if current one is full. */
586 if (!tp
->md5sig_info
) {
587 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
588 if (!tp
->md5sig_info
) {
592 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
594 if (tcp_alloc_md5sig_pool() == NULL
) {
598 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
599 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
600 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
603 tcp_free_md5sig_pool();
608 if (tp
->md5sig_info
->entries6
)
609 memmove(keys
, tp
->md5sig_info
->keys6
,
610 (sizeof (tp
->md5sig_info
->keys6
[0]) *
611 tp
->md5sig_info
->entries6
));
613 kfree(tp
->md5sig_info
->keys6
);
614 tp
->md5sig_info
->keys6
= keys
;
615 tp
->md5sig_info
->alloced6
++;
618 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
620 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
621 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
623 tp
->md5sig_info
->entries6
++;
628 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
629 u8
*newkey
, __u8 newkeylen
)
631 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
635 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
637 struct tcp_sock
*tp
= tcp_sk(sk
);
640 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
641 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
643 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
644 tp
->md5sig_info
->entries6
--;
646 if (tp
->md5sig_info
->entries6
== 0) {
647 kfree(tp
->md5sig_info
->keys6
);
648 tp
->md5sig_info
->keys6
= NULL
;
649 tp
->md5sig_info
->alloced6
= 0;
651 /* shrink the database */
652 if (tp
->md5sig_info
->entries6
!= i
)
653 memmove(&tp
->md5sig_info
->keys6
[i
],
654 &tp
->md5sig_info
->keys6
[i
+1],
655 (tp
->md5sig_info
->entries6
- i
)
656 * sizeof (tp
->md5sig_info
->keys6
[0]));
658 tcp_free_md5sig_pool();
665 static void tcp_v6_clear_md5_list (struct sock
*sk
)
667 struct tcp_sock
*tp
= tcp_sk(sk
);
670 if (tp
->md5sig_info
->entries6
) {
671 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
672 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
673 tp
->md5sig_info
->entries6
= 0;
674 tcp_free_md5sig_pool();
677 kfree(tp
->md5sig_info
->keys6
);
678 tp
->md5sig_info
->keys6
= NULL
;
679 tp
->md5sig_info
->alloced6
= 0;
681 if (tp
->md5sig_info
->entries4
) {
682 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
683 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
684 tp
->md5sig_info
->entries4
= 0;
685 tcp_free_md5sig_pool();
688 kfree(tp
->md5sig_info
->keys4
);
689 tp
->md5sig_info
->keys4
= NULL
;
690 tp
->md5sig_info
->alloced4
= 0;
693 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
696 struct tcp_md5sig cmd
;
697 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
700 if (optlen
< sizeof(cmd
))
703 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
706 if (sin6
->sin6_family
!= AF_INET6
)
709 if (!cmd
.tcpm_keylen
) {
710 if (!tcp_sk(sk
)->md5sig_info
)
712 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
713 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
714 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
717 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
720 if (!tcp_sk(sk
)->md5sig_info
) {
721 struct tcp_sock
*tp
= tcp_sk(sk
);
722 struct tcp_md5sig_info
*p
;
724 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
729 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
732 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
735 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
736 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
737 newkey
, cmd
.tcpm_keylen
);
739 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
742 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
743 struct in6_addr
*daddr
,
744 struct in6_addr
*saddr
, int nbytes
)
746 struct tcp6_pseudohdr
*bp
;
747 struct scatterlist sg
;
749 bp
= &hp
->md5_blk
.ip6
;
750 /* 1. TCP pseudo-header (RFC2460) */
751 ipv6_addr_copy(&bp
->saddr
, saddr
);
752 ipv6_addr_copy(&bp
->daddr
, daddr
);
753 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
754 bp
->len
= cpu_to_be32(nbytes
);
756 sg_init_one(&sg
, bp
, sizeof(*bp
));
757 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
760 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
761 struct in6_addr
*daddr
, struct in6_addr
*saddr
,
764 struct tcp_md5sig_pool
*hp
;
765 struct hash_desc
*desc
;
767 hp
= tcp_get_md5sig_pool();
769 goto clear_hash_noput
;
770 desc
= &hp
->md5_desc
;
772 if (crypto_hash_init(desc
))
774 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
776 if (tcp_md5_hash_header(hp
, th
))
778 if (tcp_md5_hash_key(hp
, key
))
780 if (crypto_hash_final(desc
, md5_hash
))
783 tcp_put_md5sig_pool();
787 tcp_put_md5sig_pool();
789 memset(md5_hash
, 0, 16);
793 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
794 struct sock
*sk
, struct request_sock
*req
,
797 struct in6_addr
*saddr
, *daddr
;
798 struct tcp_md5sig_pool
*hp
;
799 struct hash_desc
*desc
;
800 struct tcphdr
*th
= tcp_hdr(skb
);
803 saddr
= &inet6_sk(sk
)->saddr
;
804 daddr
= &inet6_sk(sk
)->daddr
;
806 saddr
= &inet6_rsk(req
)->loc_addr
;
807 daddr
= &inet6_rsk(req
)->rmt_addr
;
809 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
810 saddr
= &ip6h
->saddr
;
811 daddr
= &ip6h
->daddr
;
814 hp
= tcp_get_md5sig_pool();
816 goto clear_hash_noput
;
817 desc
= &hp
->md5_desc
;
819 if (crypto_hash_init(desc
))
822 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
824 if (tcp_md5_hash_header(hp
, th
))
826 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
828 if (tcp_md5_hash_key(hp
, key
))
830 if (crypto_hash_final(desc
, md5_hash
))
833 tcp_put_md5sig_pool();
837 tcp_put_md5sig_pool();
839 memset(md5_hash
, 0, 16);
843 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
845 __u8
*hash_location
= NULL
;
846 struct tcp_md5sig_key
*hash_expected
;
847 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
848 struct tcphdr
*th
= tcp_hdr(skb
);
852 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
853 hash_location
= tcp_parse_md5sig_option(th
);
855 /* We've parsed the options - do we have a hash? */
856 if (!hash_expected
&& !hash_location
)
859 if (hash_expected
&& !hash_location
) {
860 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
864 if (!hash_expected
&& hash_location
) {
865 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
869 /* check the signature */
870 genhash
= tcp_v6_md5_hash_skb(newhash
,
874 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
875 if (net_ratelimit()) {
876 printk(KERN_INFO
"MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n",
877 genhash
? "failed" : "mismatch",
878 &ip6h
->saddr
, ntohs(th
->source
),
879 &ip6h
->daddr
, ntohs(th
->dest
));
887 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
889 .obj_size
= sizeof(struct tcp6_request_sock
),
890 .rtx_syn_ack
= tcp_v6_send_synack
,
891 .send_ack
= tcp_v6_reqsk_send_ack
,
892 .destructor
= tcp_v6_reqsk_destructor
,
893 .send_reset
= tcp_v6_send_reset
896 #ifdef CONFIG_TCP_MD5SIG
897 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
898 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
902 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
903 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
904 .twsk_unique
= tcp_twsk_unique
,
905 .twsk_destructor
= tcp_twsk_destructor
,
908 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
910 struct ipv6_pinfo
*np
= inet6_sk(sk
);
911 struct tcphdr
*th
= tcp_hdr(skb
);
913 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
914 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
915 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
916 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
918 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
919 csum_partial(th
, th
->doff
<<2,
924 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
926 struct ipv6hdr
*ipv6h
;
929 if (!pskb_may_pull(skb
, sizeof(*th
)))
932 ipv6h
= ipv6_hdr(skb
);
936 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
938 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
939 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
940 skb
->ip_summed
= CHECKSUM_PARTIAL
;
944 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
947 struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
949 switch (skb
->ip_summed
) {
950 case CHECKSUM_COMPLETE
:
951 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
953 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
959 NAPI_GRO_CB(skb
)->flush
= 1;
963 return tcp_gro_receive(head
, skb
);
966 static int tcp6_gro_complete(struct sk_buff
*skb
)
968 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
969 struct tcphdr
*th
= tcp_hdr(skb
);
971 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
972 &iph
->saddr
, &iph
->daddr
, 0);
973 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
975 return tcp_gro_complete(skb
);
978 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
979 u32 ts
, struct tcp_md5sig_key
*key
, int rst
)
981 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
982 struct sk_buff
*buff
;
984 struct net
*net
= dev_net(skb
->dst
->dev
);
985 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
986 unsigned int tot_len
= sizeof(struct tcphdr
);
990 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
991 #ifdef CONFIG_TCP_MD5SIG
993 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
996 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1001 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1003 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1005 /* Swap the send and the receive. */
1006 memset(t1
, 0, sizeof(*t1
));
1007 t1
->dest
= th
->source
;
1008 t1
->source
= th
->dest
;
1009 t1
->doff
= tot_len
/ 4;
1010 t1
->seq
= htonl(seq
);
1011 t1
->ack_seq
= htonl(ack
);
1012 t1
->ack
= !rst
|| !th
->ack
;
1014 t1
->window
= htons(win
);
1016 topt
= (__be32
*)(t1
+ 1);
1019 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1020 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1021 *topt
++ = htonl(tcp_time_stamp
);
1022 *topt
++ = htonl(ts
);
1025 #ifdef CONFIG_TCP_MD5SIG
1027 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1028 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1029 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1030 &ipv6_hdr(skb
)->saddr
,
1031 &ipv6_hdr(skb
)->daddr
, t1
);
1035 buff
->csum
= csum_partial(t1
, tot_len
, 0);
1037 memset(&fl
, 0, sizeof(fl
));
1038 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1039 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1041 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1042 tot_len
, IPPROTO_TCP
,
1045 fl
.proto
= IPPROTO_TCP
;
1046 fl
.oif
= inet6_iif(skb
);
1047 fl
.fl_ip_dport
= t1
->dest
;
1048 fl
.fl_ip_sport
= t1
->source
;
1049 security_skb_classify_flow(skb
, &fl
);
1051 /* Pass a socket to ip6_dst_lookup either it is for RST
1052 * Underlying function will use this to retrieve the network
1055 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1056 if (xfrm_lookup(net
, &buff
->dst
, &fl
, NULL
, 0) >= 0) {
1057 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1058 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1060 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1068 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1070 struct tcphdr
*th
= tcp_hdr(skb
);
1071 u32 seq
= 0, ack_seq
= 0;
1072 struct tcp_md5sig_key
*key
= NULL
;
1077 if (!ipv6_unicast_destination(skb
))
1080 #ifdef CONFIG_TCP_MD5SIG
1082 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1086 seq
= ntohl(th
->ack_seq
);
1088 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1091 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1);
1094 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1095 struct tcp_md5sig_key
*key
)
1097 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0);
1100 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1102 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1103 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1105 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1106 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1107 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1112 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1113 struct request_sock
*req
)
1115 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1116 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1120 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1122 struct request_sock
*req
, **prev
;
1123 const struct tcphdr
*th
= tcp_hdr(skb
);
1126 /* Find possible connection requests. */
1127 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1128 &ipv6_hdr(skb
)->saddr
,
1129 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1131 return tcp_check_req(sk
, skb
, req
, prev
);
1133 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1134 &ipv6_hdr(skb
)->saddr
, th
->source
,
1135 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1138 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1142 inet_twsk_put(inet_twsk(nsk
));
1146 #ifdef CONFIG_SYN_COOKIES
1147 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1148 sk
= cookie_v6_check(sk
, skb
);
1153 /* FIXME: this is substantially similar to the ipv4 code.
1154 * Can some kind of merge be done? -- erics
1156 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1158 struct inet6_request_sock
*treq
;
1159 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1160 struct tcp_options_received tmp_opt
;
1161 struct tcp_sock
*tp
= tcp_sk(sk
);
1162 struct request_sock
*req
= NULL
;
1163 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1164 #ifdef CONFIG_SYN_COOKIES
1165 int want_cookie
= 0;
1167 #define want_cookie 0
1170 if (skb
->protocol
== htons(ETH_P_IP
))
1171 return tcp_v4_conn_request(sk
, skb
);
1173 if (!ipv6_unicast_destination(skb
))
1176 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1177 if (net_ratelimit())
1178 syn_flood_warning(skb
);
1179 #ifdef CONFIG_SYN_COOKIES
1180 if (sysctl_tcp_syncookies
)
1187 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1190 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1194 #ifdef CONFIG_TCP_MD5SIG
1195 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1198 tcp_clear_options(&tmp_opt
);
1199 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1200 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1202 tcp_parse_options(skb
, &tmp_opt
, 0);
1204 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1205 tcp_clear_options(&tmp_opt
);
1207 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1208 tcp_openreq_init(req
, &tmp_opt
, skb
);
1210 treq
= inet6_rsk(req
);
1211 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1212 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1214 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1217 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1218 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1220 if (ipv6_opt_accepted(sk
, skb
) ||
1221 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1222 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1223 atomic_inc(&skb
->users
);
1224 treq
->pktopts
= skb
;
1226 treq
->iif
= sk
->sk_bound_dev_if
;
1228 /* So that link locals have meaning */
1229 if (!sk
->sk_bound_dev_if
&&
1230 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1231 treq
->iif
= inet6_iif(skb
);
1233 isn
= tcp_v6_init_sequence(skb
);
1236 tcp_rsk(req
)->snt_isn
= isn
;
1238 security_inet_conn_request(sk
, skb
, req
);
1240 if (tcp_v6_send_synack(sk
, req
))
1244 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1252 return 0; /* don't send reset */
1255 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1256 struct request_sock
*req
,
1257 struct dst_entry
*dst
)
1259 struct inet6_request_sock
*treq
;
1260 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1261 struct tcp6_sock
*newtcp6sk
;
1262 struct inet_sock
*newinet
;
1263 struct tcp_sock
*newtp
;
1265 struct ipv6_txoptions
*opt
;
1266 #ifdef CONFIG_TCP_MD5SIG
1267 struct tcp_md5sig_key
*key
;
1270 if (skb
->protocol
== htons(ETH_P_IP
)) {
1275 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1280 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1281 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1283 newinet
= inet_sk(newsk
);
1284 newnp
= inet6_sk(newsk
);
1285 newtp
= tcp_sk(newsk
);
1287 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1289 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1292 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1295 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1297 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1298 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1299 #ifdef CONFIG_TCP_MD5SIG
1300 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1303 newnp
->pktoptions
= NULL
;
1305 newnp
->mcast_oif
= inet6_iif(skb
);
1306 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1309 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1310 * here, tcp_create_openreq_child now does this for us, see the comment in
1311 * that function for the gory details. -acme
1314 /* It is tricky place. Until this moment IPv4 tcp
1315 worked with IPv6 icsk.icsk_af_ops.
1318 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1323 treq
= inet6_rsk(req
);
1326 if (sk_acceptq_is_full(sk
))
1330 struct in6_addr
*final_p
= NULL
, final
;
1333 memset(&fl
, 0, sizeof(fl
));
1334 fl
.proto
= IPPROTO_TCP
;
1335 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1336 if (opt
&& opt
->srcrt
) {
1337 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1338 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1339 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1342 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1343 fl
.oif
= sk
->sk_bound_dev_if
;
1344 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1345 fl
.fl_ip_sport
= inet_rsk(req
)->loc_port
;
1346 security_req_classify_flow(req
, &fl
);
1348 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1352 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1354 if ((xfrm_lookup(sock_net(sk
), &dst
, &fl
, sk
, 0)) < 0)
1358 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1363 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1364 * count here, tcp_create_openreq_child now does this for us, see the
1365 * comment in that function for the gory details. -acme
1368 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1369 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1371 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1372 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1374 newtp
= tcp_sk(newsk
);
1375 newinet
= inet_sk(newsk
);
1376 newnp
= inet6_sk(newsk
);
1378 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1380 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1381 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1382 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1383 newsk
->sk_bound_dev_if
= treq
->iif
;
1385 /* Now IPv6 options...
1387 First: no IPv4 options.
1389 newinet
->opt
= NULL
;
1390 newnp
->ipv6_fl_list
= NULL
;
1393 newnp
->rxopt
.all
= np
->rxopt
.all
;
1395 /* Clone pktoptions received with SYN */
1396 newnp
->pktoptions
= NULL
;
1397 if (treq
->pktopts
!= NULL
) {
1398 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1399 kfree_skb(treq
->pktopts
);
1400 treq
->pktopts
= NULL
;
1401 if (newnp
->pktoptions
)
1402 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1405 newnp
->mcast_oif
= inet6_iif(skb
);
1406 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1408 /* Clone native IPv6 options from listening socket (if any)
1410 Yes, keeping reference count would be much more clever,
1411 but we make one more one thing there: reattach optmem
1415 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1417 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1420 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1422 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1423 newnp
->opt
->opt_flen
);
1425 tcp_mtup_init(newsk
);
1426 tcp_sync_mss(newsk
, dst_mtu(dst
));
1427 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1428 tcp_initialize_rcv_mss(newsk
);
1430 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1432 #ifdef CONFIG_TCP_MD5SIG
1433 /* Copy over the MD5 key from the original socket */
1434 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1435 /* We're using one, so create a matching key
1436 * on the newsk structure. If we fail to get
1437 * memory, then we end up not copying the key
1440 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1442 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1443 newkey
, key
->keylen
);
1447 __inet6_hash(newsk
);
1448 __inet_inherit_port(sk
, newsk
);
1453 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1455 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1456 if (opt
&& opt
!= np
->opt
)
1457 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1462 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1464 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1465 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1466 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1467 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1472 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1473 &ipv6_hdr(skb
)->saddr
,
1474 &ipv6_hdr(skb
)->daddr
, 0));
1476 if (skb
->len
<= 76) {
1477 return __skb_checksum_complete(skb
);
1482 /* The socket must have it's spinlock held when we get
1485 * We have a potential double-lock case here, so even when
1486 * doing backlog processing we use the BH locking scheme.
1487 * This is because we cannot sleep with the original spinlock
1490 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1492 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1493 struct tcp_sock
*tp
;
1494 struct sk_buff
*opt_skb
= NULL
;
1496 /* Imagine: socket is IPv6. IPv4 packet arrives,
1497 goes to IPv4 receive handler and backlogged.
1498 From backlog it always goes here. Kerboom...
1499 Fortunately, tcp_rcv_established and rcv_established
1500 handle them correctly, but it is not case with
1501 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1504 if (skb
->protocol
== htons(ETH_P_IP
))
1505 return tcp_v4_do_rcv(sk
, skb
);
1507 #ifdef CONFIG_TCP_MD5SIG
1508 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1512 if (sk_filter(sk
, skb
))
1516 * socket locking is here for SMP purposes as backlog rcv
1517 * is currently called with bh processing disabled.
1520 /* Do Stevens' IPV6_PKTOPTIONS.
1522 Yes, guys, it is the only place in our code, where we
1523 may make it not affecting IPv4.
1524 The rest of code is protocol independent,
1525 and I do not like idea to uglify IPv4.
1527 Actually, all the idea behind IPV6_PKTOPTIONS
1528 looks not very well thought. For now we latch
1529 options, received in the last packet, enqueued
1530 by tcp. Feel free to propose better solution.
1534 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1536 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1537 TCP_CHECK_TIMER(sk
);
1538 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1540 TCP_CHECK_TIMER(sk
);
1542 goto ipv6_pktoptions
;
1546 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1549 if (sk
->sk_state
== TCP_LISTEN
) {
1550 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1555 * Queue it on the new socket if the new socket is active,
1556 * otherwise we just shortcircuit this and continue with
1560 if (tcp_child_process(sk
, nsk
, skb
))
1563 __kfree_skb(opt_skb
);
1568 TCP_CHECK_TIMER(sk
);
1569 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1571 TCP_CHECK_TIMER(sk
);
1573 goto ipv6_pktoptions
;
1577 tcp_v6_send_reset(sk
, skb
);
1580 __kfree_skb(opt_skb
);
1584 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1589 /* Do you ask, what is it?
1591 1. skb was enqueued by tcp.
1592 2. skb is added to tail of read queue, rather than out of order.
1593 3. socket is not in passive state.
1594 4. Finally, it really contains options, which user wants to receive.
1597 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1598 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1599 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1600 np
->mcast_oif
= inet6_iif(opt_skb
);
1601 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1602 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1603 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1604 skb_set_owner_r(opt_skb
, sk
);
1605 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1607 __kfree_skb(opt_skb
);
1608 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1616 static int tcp_v6_rcv(struct sk_buff
*skb
)
1621 struct net
*net
= dev_net(skb
->dev
);
1623 if (skb
->pkt_type
!= PACKET_HOST
)
1627 * Count it even if it's bad.
1629 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1631 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1636 if (th
->doff
< sizeof(struct tcphdr
)/4)
1638 if (!pskb_may_pull(skb
, th
->doff
*4))
1641 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1645 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1646 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1647 skb
->len
- th
->doff
*4);
1648 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1649 TCP_SKB_CB(skb
)->when
= 0;
1650 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1651 TCP_SKB_CB(skb
)->sacked
= 0;
1653 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1658 if (sk
->sk_state
== TCP_TIME_WAIT
)
1661 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1662 goto discard_and_relse
;
1664 if (sk_filter(sk
, skb
))
1665 goto discard_and_relse
;
1669 bh_lock_sock_nested(sk
);
1671 if (!sock_owned_by_user(sk
)) {
1672 #ifdef CONFIG_NET_DMA
1673 struct tcp_sock
*tp
= tcp_sk(sk
);
1674 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1675 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1676 if (tp
->ucopy
.dma_chan
)
1677 ret
= tcp_v6_do_rcv(sk
, skb
);
1681 if (!tcp_prequeue(sk
, skb
))
1682 ret
= tcp_v6_do_rcv(sk
, skb
);
1685 sk_add_backlog(sk
, skb
);
1689 return ret
? -1 : 0;
1692 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1695 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1697 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1699 tcp_v6_send_reset(NULL
, skb
);
1716 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1717 inet_twsk_put(inet_twsk(sk
));
1721 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1722 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1723 inet_twsk_put(inet_twsk(sk
));
1727 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1732 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1733 &ipv6_hdr(skb
)->daddr
,
1734 ntohs(th
->dest
), inet6_iif(skb
));
1736 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1737 inet_twsk_deschedule(tw
, &tcp_death_row
);
1742 /* Fall through to ACK */
1745 tcp_v6_timewait_ack(sk
, skb
);
1749 case TCP_TW_SUCCESS
:;
1754 static int tcp_v6_remember_stamp(struct sock
*sk
)
1756 /* Alas, not yet... */
1760 static struct inet_connection_sock_af_ops ipv6_specific
= {
1761 .queue_xmit
= inet6_csk_xmit
,
1762 .send_check
= tcp_v6_send_check
,
1763 .rebuild_header
= inet6_sk_rebuild_header
,
1764 .conn_request
= tcp_v6_conn_request
,
1765 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1766 .remember_stamp
= tcp_v6_remember_stamp
,
1767 .net_header_len
= sizeof(struct ipv6hdr
),
1768 .setsockopt
= ipv6_setsockopt
,
1769 .getsockopt
= ipv6_getsockopt
,
1770 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1771 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1772 .bind_conflict
= inet6_csk_bind_conflict
,
1773 #ifdef CONFIG_COMPAT
1774 .compat_setsockopt
= compat_ipv6_setsockopt
,
1775 .compat_getsockopt
= compat_ipv6_getsockopt
,
1779 #ifdef CONFIG_TCP_MD5SIG
1780 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1781 .md5_lookup
= tcp_v6_md5_lookup
,
1782 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1783 .md5_add
= tcp_v6_md5_add_func
,
1784 .md5_parse
= tcp_v6_parse_md5_keys
,
1789 * TCP over IPv4 via INET6 API
1792 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1793 .queue_xmit
= ip_queue_xmit
,
1794 .send_check
= tcp_v4_send_check
,
1795 .rebuild_header
= inet_sk_rebuild_header
,
1796 .conn_request
= tcp_v6_conn_request
,
1797 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1798 .remember_stamp
= tcp_v4_remember_stamp
,
1799 .net_header_len
= sizeof(struct iphdr
),
1800 .setsockopt
= ipv6_setsockopt
,
1801 .getsockopt
= ipv6_getsockopt
,
1802 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1803 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1804 .bind_conflict
= inet6_csk_bind_conflict
,
1805 #ifdef CONFIG_COMPAT
1806 .compat_setsockopt
= compat_ipv6_setsockopt
,
1807 .compat_getsockopt
= compat_ipv6_getsockopt
,
1811 #ifdef CONFIG_TCP_MD5SIG
1812 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1813 .md5_lookup
= tcp_v4_md5_lookup
,
1814 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1815 .md5_add
= tcp_v6_md5_add_func
,
1816 .md5_parse
= tcp_v6_parse_md5_keys
,
1820 /* NOTE: A lot of things set to zero explicitly by call to
1821 * sk_alloc() so need not be done here.
1823 static int tcp_v6_init_sock(struct sock
*sk
)
1825 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1826 struct tcp_sock
*tp
= tcp_sk(sk
);
1828 skb_queue_head_init(&tp
->out_of_order_queue
);
1829 tcp_init_xmit_timers(sk
);
1830 tcp_prequeue_init(tp
);
1832 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1833 tp
->mdev
= TCP_TIMEOUT_INIT
;
1835 /* So many TCP implementations out there (incorrectly) count the
1836 * initial SYN frame in their delayed-ACK and congestion control
1837 * algorithms that we must have the following bandaid to talk
1838 * efficiently to them. -DaveM
1842 /* See draft-stevens-tcpca-spec-01 for discussion of the
1843 * initialization of these values.
1845 tp
->snd_ssthresh
= 0x7fffffff;
1846 tp
->snd_cwnd_clamp
= ~0;
1847 tp
->mss_cache
= 536;
1849 tp
->reordering
= sysctl_tcp_reordering
;
1851 sk
->sk_state
= TCP_CLOSE
;
1853 icsk
->icsk_af_ops
= &ipv6_specific
;
1854 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1855 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1856 sk
->sk_write_space
= sk_stream_write_space
;
1857 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1859 #ifdef CONFIG_TCP_MD5SIG
1860 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1863 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1864 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1867 percpu_counter_inc(&tcp_sockets_allocated
);
1873 static void tcp_v6_destroy_sock(struct sock
*sk
)
1875 #ifdef CONFIG_TCP_MD5SIG
1876 /* Clean up the MD5 key list */
1877 if (tcp_sk(sk
)->md5sig_info
)
1878 tcp_v6_clear_md5_list(sk
);
1880 tcp_v4_destroy_sock(sk
);
1881 inet6_destroy_sock(sk
);
1884 #ifdef CONFIG_PROC_FS
1885 /* Proc filesystem TCPv6 sock list dumping. */
1886 static void get_openreq6(struct seq_file
*seq
,
1887 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1889 int ttd
= req
->expires
- jiffies
;
1890 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1891 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1897 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1898 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1900 src
->s6_addr32
[0], src
->s6_addr32
[1],
1901 src
->s6_addr32
[2], src
->s6_addr32
[3],
1902 ntohs(inet_rsk(req
)->loc_port
),
1903 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1904 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1905 ntohs(inet_rsk(req
)->rmt_port
),
1907 0,0, /* could print option size, but that is af dependent. */
1908 1, /* timers active (only the expire timer) */
1909 jiffies_to_clock_t(ttd
),
1912 0, /* non standard timer */
1913 0, /* open_requests have no inode */
1917 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1919 struct in6_addr
*dest
, *src
;
1922 unsigned long timer_expires
;
1923 struct inet_sock
*inet
= inet_sk(sp
);
1924 struct tcp_sock
*tp
= tcp_sk(sp
);
1925 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1926 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1929 src
= &np
->rcv_saddr
;
1930 destp
= ntohs(inet
->dport
);
1931 srcp
= ntohs(inet
->sport
);
1933 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1935 timer_expires
= icsk
->icsk_timeout
;
1936 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1938 timer_expires
= icsk
->icsk_timeout
;
1939 } else if (timer_pending(&sp
->sk_timer
)) {
1941 timer_expires
= sp
->sk_timer
.expires
;
1944 timer_expires
= jiffies
;
1948 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1949 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1951 src
->s6_addr32
[0], src
->s6_addr32
[1],
1952 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1953 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1954 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1956 tp
->write_seq
-tp
->snd_una
,
1957 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1959 jiffies_to_clock_t(timer_expires
- jiffies
),
1960 icsk
->icsk_retransmits
,
1962 icsk
->icsk_probes_out
,
1964 atomic_read(&sp
->sk_refcnt
), sp
,
1965 jiffies_to_clock_t(icsk
->icsk_rto
),
1966 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1967 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1968 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
1972 static void get_timewait6_sock(struct seq_file
*seq
,
1973 struct inet_timewait_sock
*tw
, int i
)
1975 struct in6_addr
*dest
, *src
;
1977 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1978 int ttd
= tw
->tw_ttd
- jiffies
;
1983 dest
= &tw6
->tw_v6_daddr
;
1984 src
= &tw6
->tw_v6_rcv_saddr
;
1985 destp
= ntohs(tw
->tw_dport
);
1986 srcp
= ntohs(tw
->tw_sport
);
1989 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1990 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1992 src
->s6_addr32
[0], src
->s6_addr32
[1],
1993 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1994 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1995 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1996 tw
->tw_substate
, 0, 0,
1997 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1998 atomic_read(&tw
->tw_refcnt
), tw
);
2001 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2003 struct tcp_iter_state
*st
;
2005 if (v
== SEQ_START_TOKEN
) {
2010 "st tx_queue rx_queue tr tm->when retrnsmt"
2011 " uid timeout inode\n");
2016 switch (st
->state
) {
2017 case TCP_SEQ_STATE_LISTENING
:
2018 case TCP_SEQ_STATE_ESTABLISHED
:
2019 get_tcp6_sock(seq
, v
, st
->num
);
2021 case TCP_SEQ_STATE_OPENREQ
:
2022 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2024 case TCP_SEQ_STATE_TIME_WAIT
:
2025 get_timewait6_sock(seq
, v
, st
->num
);
2032 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2036 .owner
= THIS_MODULE
,
2039 .show
= tcp6_seq_show
,
2043 int tcp6_proc_init(struct net
*net
)
2045 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2048 void tcp6_proc_exit(struct net
*net
)
2050 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2054 struct proto tcpv6_prot
= {
2056 .owner
= THIS_MODULE
,
2058 .connect
= tcp_v6_connect
,
2059 .disconnect
= tcp_disconnect
,
2060 .accept
= inet_csk_accept
,
2062 .init
= tcp_v6_init_sock
,
2063 .destroy
= tcp_v6_destroy_sock
,
2064 .shutdown
= tcp_shutdown
,
2065 .setsockopt
= tcp_setsockopt
,
2066 .getsockopt
= tcp_getsockopt
,
2067 .recvmsg
= tcp_recvmsg
,
2068 .backlog_rcv
= tcp_v6_do_rcv
,
2069 .hash
= tcp_v6_hash
,
2070 .unhash
= inet_unhash
,
2071 .get_port
= inet_csk_get_port
,
2072 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2073 .sockets_allocated
= &tcp_sockets_allocated
,
2074 .memory_allocated
= &tcp_memory_allocated
,
2075 .memory_pressure
= &tcp_memory_pressure
,
2076 .orphan_count
= &tcp_orphan_count
,
2077 .sysctl_mem
= sysctl_tcp_mem
,
2078 .sysctl_wmem
= sysctl_tcp_wmem
,
2079 .sysctl_rmem
= sysctl_tcp_rmem
,
2080 .max_header
= MAX_TCP_HEADER
,
2081 .obj_size
= sizeof(struct tcp6_sock
),
2082 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2083 .twsk_prot
= &tcp6_timewait_sock_ops
,
2084 .rsk_prot
= &tcp6_request_sock_ops
,
2085 .h
.hashinfo
= &tcp_hashinfo
,
2086 #ifdef CONFIG_COMPAT
2087 .compat_setsockopt
= compat_tcp_setsockopt
,
2088 .compat_getsockopt
= compat_tcp_getsockopt
,
2092 static struct inet6_protocol tcpv6_protocol
= {
2093 .handler
= tcp_v6_rcv
,
2094 .err_handler
= tcp_v6_err
,
2095 .gso_send_check
= tcp_v6_gso_send_check
,
2096 .gso_segment
= tcp_tso_segment
,
2097 .gro_receive
= tcp6_gro_receive
,
2098 .gro_complete
= tcp6_gro_complete
,
2099 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2102 static struct inet_protosw tcpv6_protosw
= {
2103 .type
= SOCK_STREAM
,
2104 .protocol
= IPPROTO_TCP
,
2105 .prot
= &tcpv6_prot
,
2106 .ops
= &inet6_stream_ops
,
2109 .flags
= INET_PROTOSW_PERMANENT
|
2113 static int tcpv6_net_init(struct net
*net
)
2115 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2116 SOCK_RAW
, IPPROTO_TCP
, net
);
2119 static void tcpv6_net_exit(struct net
*net
)
2121 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2122 inet_twsk_purge(net
, &tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2125 static struct pernet_operations tcpv6_net_ops
= {
2126 .init
= tcpv6_net_init
,
2127 .exit
= tcpv6_net_exit
,
2130 int __init
tcpv6_init(void)
2134 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2138 /* register inet6 protocol */
2139 ret
= inet6_register_protosw(&tcpv6_protosw
);
2141 goto out_tcpv6_protocol
;
2143 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2145 goto out_tcpv6_protosw
;
2150 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2152 inet6_unregister_protosw(&tcpv6_protosw
);
2156 void tcpv6_exit(void)
2158 unregister_pernet_subsys(&tcpv6_net_ops
);
2159 inet6_unregister_protosw(&tcpv6_protosw
);
2160 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);