]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/ipv6/tcp_ipv6.c
UBUNTU: [Config] CONFIG_CRYPTO_DEV_VMX=y
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
95a22cae 104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
1da177e4 105{
0660e03f
ACM
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7 108 tcp_hdr(skb)->dest,
95a22cae 109 tcp_hdr(skb)->source, tsoff);
1da177e4
LT
110}
111
1ab1457c 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 116 struct inet_sock *inet = inet_sk(sk);
d83d8461 117 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 120 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 121 struct ipv6_txoptions *opt;
4c9483b2 122 struct flowi6 fl6;
1da177e4
LT
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
1ab1457c 127 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
128 return -EINVAL;
129
1ab1457c 130 if (usin->sin6_family != AF_INET6)
a02cec21 131 return -EAFNOSUPPORT;
1da177e4 132
4c9483b2 133 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
134
135 if (np->sndflow) {
4c9483b2
DM
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 139 struct ip6_flowlabel *flowlabel;
4c9483b2 140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 141 if (!flowlabel)
1da177e4 142 return -EINVAL;
1da177e4
LT
143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
1ab1457c
YH
148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
052d2369
JL
151 if (ipv6_addr_any(&usin->sin6_addr)) {
152 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
153 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
154 &usin->sin6_addr);
155 else
156 usin->sin6_addr = in6addr_loopback;
157 }
1da177e4
LT
158
159 addr_type = ipv6_addr_type(&usin->sin6_addr);
160
4c99aa40 161 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
162 return -ENETUNREACH;
163
164 if (addr_type&IPV6_ADDR_LINKLOCAL) {
165 if (addr_len >= sizeof(struct sockaddr_in6) &&
166 usin->sin6_scope_id) {
167 /* If interface is set while binding, indices
168 * must coincide.
169 */
170 if (sk->sk_bound_dev_if &&
171 sk->sk_bound_dev_if != usin->sin6_scope_id)
172 return -EINVAL;
173
174 sk->sk_bound_dev_if = usin->sin6_scope_id;
175 }
176
177 /* Connect to link-local address requires an interface */
178 if (!sk->sk_bound_dev_if)
179 return -EINVAL;
180 }
181
182 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 183 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
184 tp->rx_opt.ts_recent = 0;
185 tp->rx_opt.ts_recent_stamp = 0;
186 tp->write_seq = 0;
187 }
188
efe4208f 189 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 190 np->flow_label = fl6.flowlabel;
1da177e4
LT
191
192 /*
193 * TCP over IPv4
194 */
195
052d2369 196 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 197 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
198 struct sockaddr_in sin;
199
200 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
201
202 if (__ipv6_only_sock(sk))
203 return -ENETUNREACH;
204
205 sin.sin_family = AF_INET;
206 sin.sin_port = usin->sin6_port;
207 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
208
d83d8461 209 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 210 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
211#ifdef CONFIG_TCP_MD5SIG
212 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
213#endif
1da177e4
LT
214
215 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
216
217 if (err) {
d83d8461
ACM
218 icsk->icsk_ext_hdr_len = exthdrlen;
219 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 220 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
221#ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_specific;
223#endif
1da177e4 224 goto failure;
1da177e4 225 }
d1e559d0 226 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
227
228 return err;
229 }
230
efe4208f
ED
231 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
232 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 233
4c9483b2 234 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 235 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 236 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
237 fl6.flowi6_oif = sk->sk_bound_dev_if;
238 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
239 fl6.fl6_dport = usin->sin6_port;
240 fl6.fl6_sport = inet->inet_sport;
e2d118a1 241 fl6.flowi6_uid = sk->sk_uid;
1da177e4 242
1e1d04e6 243 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 244 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 245
4c9483b2 246 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 247
0e0d44ab 248 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
249 if (IS_ERR(dst)) {
250 err = PTR_ERR(dst);
1da177e4 251 goto failure;
14e50e57 252 }
1da177e4 253
63159f29 254 if (!saddr) {
4c9483b2 255 saddr = &fl6.saddr;
efe4208f 256 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
257 }
258
259 /* set the source address */
4e3fd7a0 260 np->saddr = *saddr;
c720c7e8 261 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 262
f83ef8c0 263 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 264 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 265
493f377d
DM
266 if (tcp_death_row.sysctl_tw_recycle &&
267 !tp->rx_opt.ts_recent_stamp &&
fd0273d7 268 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
81166dd6 269 tcp_fetch_timewait_stamp(sk, dst);
493f377d 270
d83d8461 271 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
272 if (opt)
273 icsk->icsk_ext_hdr_len = opt->opt_flen +
274 opt->opt_nflen;
1da177e4
LT
275
276 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
277
c720c7e8 278 inet->inet_dport = usin->sin6_port;
1da177e4
LT
279
280 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 281 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
282 if (err)
283 goto late_failure;
284
877d1f62 285 sk_set_txhash(sk);
9e7ceb06 286
2b916477 287 if (!tp->write_seq && likely(!tp->repair))
1da177e4 288 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
efe4208f 289 sk->sk_v6_daddr.s6_addr32,
c720c7e8 290 inet->inet_sport,
95a22cae
FW
291 inet->inet_dport,
292 &tp->tsoffset);
1da177e4
LT
293
294 err = tcp_connect(sk);
295 if (err)
296 goto late_failure;
297
298 return 0;
299
300late_failure:
301 tcp_set_state(sk, TCP_CLOSE);
302 __sk_dst_reset(sk);
303failure:
c720c7e8 304 inet->inet_dport = 0;
1da177e4
LT
305 sk->sk_route_caps = 0;
306 return err;
307}
308
563d34d0
ED
309static void tcp_v6_mtu_reduced(struct sock *sk)
310{
311 struct dst_entry *dst;
312
313 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
314 return;
315
316 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
317 if (!dst)
318 return;
319
320 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
321 tcp_sync_mss(sk, dst_mtu(dst));
322 tcp_simple_retransmit(sk);
323 }
324}
325
1da177e4 326static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 327 u8 type, u8 code, int offset, __be32 info)
1da177e4 328{
4c99aa40 329 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 330 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
331 struct net *net = dev_net(skb->dev);
332 struct request_sock *fastopen;
1da177e4 333 struct ipv6_pinfo *np;
1ab1457c 334 struct tcp_sock *tp;
0a672f74 335 __u32 seq, snd_una;
2215089b 336 struct sock *sk;
9cf74903 337 bool fatal;
2215089b 338 int err;
1da177e4 339
2215089b
ED
340 sk = __inet6_lookup_established(net, &tcp_hashinfo,
341 &hdr->daddr, th->dest,
342 &hdr->saddr, ntohs(th->source),
343 skb->dev->ifindex);
1da177e4 344
2215089b 345 if (!sk) {
a16292a0
ED
346 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
347 ICMP6_MIB_INERRORS);
1da177e4
LT
348 return;
349 }
350
351 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 352 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
353 return;
354 }
2215089b 355 seq = ntohl(th->seq);
9cf74903 356 fatal = icmpv6_err_convert(type, code, &err);
2215089b 357 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 358 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
359
360 bh_lock_sock(sk);
563d34d0 361 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 362 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
363
364 if (sk->sk_state == TCP_CLOSE)
365 goto out;
366
e802af9c 367 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 368 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
369 goto out;
370 }
371
1da177e4 372 tp = tcp_sk(sk);
0a672f74
YC
373 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
374 fastopen = tp->fastopen_rsk;
375 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 376 if (sk->sk_state != TCP_LISTEN &&
0a672f74 377 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 378 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
379 goto out;
380 }
381
382 np = inet6_sk(sk);
383
ec18d9a2
DM
384 if (type == NDISC_REDIRECT) {
385 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
386
1ed5c48f 387 if (dst)
6700c270 388 dst->ops->redirect(dst, sk, skb);
50a75a89 389 goto out;
ec18d9a2
DM
390 }
391
1da177e4 392 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
396 */
397 if (sk->sk_state == TCP_LISTEN)
398 goto out;
399
93b36cf3
HFS
400 if (!ip6_sk_accept_pmtu(sk))
401 goto out;
402
563d34d0
ED
403 tp->mtu_info = ntohl(info);
404 if (!sock_owned_by_user(sk))
405 tcp_v6_mtu_reduced(sk);
d013ef2a 406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 407 &sk->sk_tsq_flags))
d013ef2a 408 sock_hold(sk);
1da177e4
LT
409 goto out;
410 }
411
1da177e4 412
60236fdd 413 /* Might be for an request_sock */
1da177e4 414 switch (sk->sk_state) {
1da177e4 415 case TCP_SYN_SENT:
0a672f74
YC
416 case TCP_SYN_RECV:
417 /* Only in fast or simultaneous open. If a fast open socket is
418 * is already accepted it is treated as a connected one below.
419 */
63159f29 420 if (fastopen && !fastopen->sk)
0a672f74
YC
421 break;
422
1da177e4 423 if (!sock_owned_by_user(sk)) {
1da177e4
LT
424 sk->sk_err = err;
425 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
426
427 tcp_done(sk);
428 } else
429 sk->sk_err_soft = err;
430 goto out;
431 }
432
433 if (!sock_owned_by_user(sk) && np->recverr) {
434 sk->sk_err = err;
435 sk->sk_error_report(sk);
436 } else
437 sk->sk_err_soft = err;
438
439out:
440 bh_unlock_sock(sk);
441 sock_put(sk);
442}
443
444
0f935dbe 445static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 446 struct flowi *fl,
3840a06e 447 struct request_sock *req,
ca6fb065 448 struct tcp_fastopen_cookie *foc,
b3d05147 449 enum tcp_synack_type synack_type)
1da177e4 450{
634fb979 451 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 452 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 453 struct ipv6_txoptions *opt;
d6274bd8 454 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 455 struct sk_buff *skb;
9494218f 456 int err = -ENOMEM;
1da177e4 457
9f10d3f6 458 /* First, grab a route. */
f76b33c3
ED
459 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
460 IPPROTO_TCP)) == NULL)
fd80eb94 461 goto done;
9494218f 462
b3d05147 463 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 464
1da177e4 465 if (skb) {
634fb979
ED
466 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
467 &ireq->ir_v6_rmt_addr);
1da177e4 468
634fb979 469 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 470 if (np->repflow && ireq->pktopts)
df3687ff
FF
471 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
472
3e4006f0 473 rcu_read_lock();
56ac42bc
HD
474 opt = ireq->ipv6_opt;
475 if (!opt)
476 opt = rcu_dereference(np->opt);
92e55f41 477 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 478 rcu_read_unlock();
b9df3cb8 479 err = net_xmit_eval(err);
1da177e4
LT
480 }
481
482done:
1da177e4
LT
483 return err;
484}
485
72659ecc 486
60236fdd 487static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 488{
56ac42bc 489 kfree(inet_rsk(req)->ipv6_opt);
634fb979 490 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
491}
492
cfb6eeb4 493#ifdef CONFIG_TCP_MD5SIG
b83e3deb 494static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 495 const struct in6_addr *addr)
cfb6eeb4 496{
a915da9b 497 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
498}
499
b83e3deb 500static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 501 const struct sock *addr_sk)
cfb6eeb4 502{
efe4208f 503 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
504}
505
4aa956d8
WY
506static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
507 int optlen)
cfb6eeb4
YH
508{
509 struct tcp_md5sig cmd;
510 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
511
512 if (optlen < sizeof(cmd))
513 return -EINVAL;
514
515 if (copy_from_user(&cmd, optval, sizeof(cmd)))
516 return -EFAULT;
517
518 if (sin6->sin6_family != AF_INET6)
519 return -EINVAL;
520
521 if (!cmd.tcpm_keylen) {
e773e4fa 522 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
523 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
524 AF_INET);
525 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6);
cfb6eeb4
YH
527 }
528
529 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
530 return -EINVAL;
531
a915da9b
ED
532 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
533 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
534 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 535
a915da9b
ED
536 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
538}
539
19689e38
ED
540static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
541 const struct in6_addr *daddr,
542 const struct in6_addr *saddr,
543 const struct tcphdr *th, int nbytes)
cfb6eeb4 544{
cfb6eeb4 545 struct tcp6_pseudohdr *bp;
49a72dfb 546 struct scatterlist sg;
19689e38 547 struct tcphdr *_th;
8d26d76d 548
19689e38 549 bp = hp->scratch;
cfb6eeb4 550 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
551 bp->saddr = *saddr;
552 bp->daddr = *daddr;
49a72dfb 553 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 554 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 555
19689e38
ED
556 _th = (struct tcphdr *)(bp + 1);
557 memcpy(_th, th, sizeof(*th));
558 _th->check = 0;
559
560 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
561 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
562 sizeof(*bp) + sizeof(*th));
cf80e0e4 563 return crypto_ahash_update(hp->md5_req);
49a72dfb 564}
c7da57a1 565
19689e38 566static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 567 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 568 const struct tcphdr *th)
49a72dfb
AL
569{
570 struct tcp_md5sig_pool *hp;
cf80e0e4 571 struct ahash_request *req;
49a72dfb
AL
572
573 hp = tcp_get_md5sig_pool();
574 if (!hp)
575 goto clear_hash_noput;
cf80e0e4 576 req = hp->md5_req;
49a72dfb 577
cf80e0e4 578 if (crypto_ahash_init(req))
49a72dfb 579 goto clear_hash;
19689e38 580 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
581 goto clear_hash;
582 if (tcp_md5_hash_key(hp, key))
583 goto clear_hash;
cf80e0e4
HX
584 ahash_request_set_crypt(req, NULL, md5_hash, 0);
585 if (crypto_ahash_final(req))
cfb6eeb4 586 goto clear_hash;
cfb6eeb4 587
cfb6eeb4 588 tcp_put_md5sig_pool();
cfb6eeb4 589 return 0;
49a72dfb 590
cfb6eeb4
YH
591clear_hash:
592 tcp_put_md5sig_pool();
593clear_hash_noput:
594 memset(md5_hash, 0, 16);
49a72dfb 595 return 1;
cfb6eeb4
YH
596}
597
39f8e58e
ED
598static int tcp_v6_md5_hash_skb(char *md5_hash,
599 const struct tcp_md5sig_key *key,
318cf7aa 600 const struct sock *sk,
318cf7aa 601 const struct sk_buff *skb)
cfb6eeb4 602{
b71d1d42 603 const struct in6_addr *saddr, *daddr;
49a72dfb 604 struct tcp_md5sig_pool *hp;
cf80e0e4 605 struct ahash_request *req;
318cf7aa 606 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 607
39f8e58e
ED
608 if (sk) { /* valid for establish/request sockets */
609 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 610 daddr = &sk->sk_v6_daddr;
49a72dfb 611 } else {
b71d1d42 612 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
613 saddr = &ip6h->saddr;
614 daddr = &ip6h->daddr;
cfb6eeb4 615 }
49a72dfb
AL
616
617 hp = tcp_get_md5sig_pool();
618 if (!hp)
619 goto clear_hash_noput;
cf80e0e4 620 req = hp->md5_req;
49a72dfb 621
cf80e0e4 622 if (crypto_ahash_init(req))
49a72dfb
AL
623 goto clear_hash;
624
19689e38 625 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
626 goto clear_hash;
627 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
628 goto clear_hash;
629 if (tcp_md5_hash_key(hp, key))
630 goto clear_hash;
cf80e0e4
HX
631 ahash_request_set_crypt(req, NULL, md5_hash, 0);
632 if (crypto_ahash_final(req))
49a72dfb
AL
633 goto clear_hash;
634
635 tcp_put_md5sig_pool();
636 return 0;
637
638clear_hash:
639 tcp_put_md5sig_pool();
640clear_hash_noput:
641 memset(md5_hash, 0, 16);
642 return 1;
cfb6eeb4
YH
643}
644
ba8e275a
ED
645#endif
646
647static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
648 const struct sk_buff *skb)
cfb6eeb4 649{
ba8e275a 650#ifdef CONFIG_TCP_MD5SIG
cf533ea5 651 const __u8 *hash_location = NULL;
cfb6eeb4 652 struct tcp_md5sig_key *hash_expected;
b71d1d42 653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 654 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 655 int genhash;
cfb6eeb4
YH
656 u8 newhash[16];
657
658 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 659 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 660
785957d3
DM
661 /* We've parsed the options - do we have a hash? */
662 if (!hash_expected && !hash_location)
ff74e23f 663 return false;
785957d3
DM
664
665 if (hash_expected && !hash_location) {
c10d9310 666 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 667 return true;
cfb6eeb4
YH
668 }
669
785957d3 670 if (!hash_expected && hash_location) {
c10d9310 671 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 672 return true;
cfb6eeb4
YH
673 }
674
675 /* check the signature */
49a72dfb
AL
676 genhash = tcp_v6_md5_hash_skb(newhash,
677 hash_expected,
39f8e58e 678 NULL, skb);
49a72dfb 679
cfb6eeb4 680 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 681 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
682 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
683 genhash ? "failed" : "mismatch",
684 &ip6h->saddr, ntohs(th->source),
685 &ip6h->daddr, ntohs(th->dest));
ff74e23f 686 return true;
cfb6eeb4 687 }
ba8e275a 688#endif
ff74e23f 689 return false;
cfb6eeb4 690}
cfb6eeb4 691
b40cf18e
ED
692static void tcp_v6_init_req(struct request_sock *req,
693 const struct sock *sk_listener,
16bea70a
OP
694 struct sk_buff *skb)
695{
696 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 697 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
698
699 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
700 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
701
16bea70a 702 /* So that link locals have meaning */
b40cf18e 703 if (!sk_listener->sk_bound_dev_if &&
16bea70a 704 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 705 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 706
04317daf 707 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 708 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 709 np->rxopt.bits.rxinfo ||
16bea70a
OP
710 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
711 np->rxopt.bits.rxohlim || np->repflow)) {
712 atomic_inc(&skb->users);
713 ireq->pktopts = skb;
714 }
715}
716
f964629e
ED
717static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
718 struct flowi *fl,
d94e0417
OP
719 const struct request_sock *req,
720 bool *strict)
721{
722 if (strict)
723 *strict = true;
f76b33c3 724 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
725}
726
c6aefafb 727struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 728 .family = AF_INET6,
2e6599cb 729 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 730 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
731 .send_ack = tcp_v6_reqsk_send_ack,
732 .destructor = tcp_v6_reqsk_destructor,
72659ecc 733 .send_reset = tcp_v6_send_reset,
4aa956d8 734 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
735};
736
b2e4b3de 737static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
738 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
739 sizeof(struct ipv6hdr),
16bea70a 740#ifdef CONFIG_TCP_MD5SIG
fd3a154a 741 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 742 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 743#endif
16bea70a 744 .init_req = tcp_v6_init_req,
fb7b37a7
OP
745#ifdef CONFIG_SYN_COOKIES
746 .cookie_init_seq = cookie_v6_init_sequence,
747#endif
d94e0417 748 .route_req = tcp_v6_route_req,
936b8bdb 749 .init_seq = tcp_v6_init_sequence,
d6274bd8 750 .send_synack = tcp_v6_send_synack,
16bea70a 751};
cfb6eeb4 752
a00e7444 753static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
754 u32 ack, u32 win, u32 tsval, u32 tsecr,
755 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 756 u8 tclass, __be32 label)
1da177e4 757{
cf533ea5
ED
758 const struct tcphdr *th = tcp_hdr(skb);
759 struct tcphdr *t1;
1da177e4 760 struct sk_buff *buff;
4c9483b2 761 struct flowi6 fl6;
0f85feae 762 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 763 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 764 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 765 struct dst_entry *dst;
81ada62d 766 __be32 *topt;
1da177e4 767
ee684b6f 768 if (tsecr)
626e264d 769 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 770#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
771 if (key)
772 tot_len += TCPOLEN_MD5SIG_ALIGNED;
773#endif
774
cfb6eeb4 775 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 776 GFP_ATOMIC);
63159f29 777 if (!buff)
1ab1457c 778 return;
1da177e4 779
cfb6eeb4 780 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 781
cfb6eeb4 782 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 783 skb_reset_transport_header(buff);
1da177e4
LT
784
785 /* Swap the send and the receive. */
786 memset(t1, 0, sizeof(*t1));
787 t1->dest = th->source;
788 t1->source = th->dest;
cfb6eeb4 789 t1->doff = tot_len / 4;
626e264d
IJ
790 t1->seq = htonl(seq);
791 t1->ack_seq = htonl(ack);
792 t1->ack = !rst || !th->ack;
793 t1->rst = rst;
794 t1->window = htons(win);
1da177e4 795
81ada62d
IJ
796 topt = (__be32 *)(t1 + 1);
797
ee684b6f 798 if (tsecr) {
626e264d
IJ
799 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
801 *topt++ = htonl(tsval);
802 *topt++ = htonl(tsecr);
626e264d
IJ
803 }
804
cfb6eeb4
YH
805#ifdef CONFIG_TCP_MD5SIG
806 if (key) {
81ada62d
IJ
807 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
808 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
809 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
810 &ipv6_hdr(skb)->saddr,
811 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
812 }
813#endif
814
4c9483b2 815 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
816 fl6.daddr = ipv6_hdr(skb)->saddr;
817 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 818 fl6.flowlabel = label;
1da177e4 819
e5700aff
DM
820 buff->ip_summed = CHECKSUM_PARTIAL;
821 buff->csum = 0;
822
4c9483b2 823 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 824
4c9483b2 825 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 826 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 827 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
828 else {
829 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
830 oif = skb->skb_iif;
831
832 fl6.flowi6_oif = oif;
833 }
1d2f7b2d 834
e110861f 835 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
836 fl6.fl6_dport = t1->dest;
837 fl6.fl6_sport = t1->source;
e2d118a1 838 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 839 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 840
c20121ae
DL
841 /* Pass a socket to ip6_dst_lookup either it is for RST
842 * Underlying function will use this to retrieve the network
843 * namespace
844 */
0e0d44ab 845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
846 if (!IS_ERR(dst)) {
847 skb_dst_set(buff, dst);
92e55f41 848 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 850 if (rst)
c10d9310 851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 852 return;
1da177e4
LT
853 }
854
855 kfree_skb(buff);
856}
857
a00e7444 858static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 859{
cf533ea5 860 const struct tcphdr *th = tcp_hdr(skb);
626e264d 861 u32 seq = 0, ack_seq = 0;
fa3e5b4e 862 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
863#ifdef CONFIG_TCP_MD5SIG
864 const __u8 *hash_location = NULL;
865 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
866 unsigned char newhash[16];
867 int genhash;
868 struct sock *sk1 = NULL;
869#endif
9c76a114 870 int oif;
1da177e4 871
626e264d 872 if (th->rst)
1da177e4
LT
873 return;
874
c3658e8d
ED
875 /* If sk not NULL, it means we did a successful lookup and incoming
876 * route had to be correct. prequeue might have dropped our dst.
877 */
878 if (!sk && !ipv6_unicast_destination(skb))
626e264d 879 return;
1da177e4 880
cfb6eeb4 881#ifdef CONFIG_TCP_MD5SIG
3b24d854 882 rcu_read_lock();
658ddaaf 883 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 884 if (sk && sk_fullsock(sk)) {
e46787f0
FW
885 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
886 } else if (hash_location) {
658ddaaf
SL
887 /*
888 * active side is lost. Try to find listening socket through
889 * source port, and then find md5 key through listening socket.
890 * we are not loose security here:
891 * Incoming packet is checked with md5 hash with finding key,
892 * no RST generated if md5 hash doesn't match.
893 */
894 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
895 &tcp_hashinfo, NULL, 0,
896 &ipv6h->saddr,
5ba24953 897 th->source, &ipv6h->daddr,
870c3151 898 ntohs(th->source), tcp_v6_iif(skb));
658ddaaf 899 if (!sk1)
3b24d854 900 goto out;
658ddaaf 901
658ddaaf
SL
902 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
903 if (!key)
3b24d854 904 goto out;
658ddaaf 905
39f8e58e 906 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 907 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 908 goto out;
658ddaaf 909 }
cfb6eeb4
YH
910#endif
911
626e264d
IJ
912 if (th->ack)
913 seq = ntohl(th->ack_seq);
914 else
915 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
916 (th->doff << 2);
1da177e4 917
9c76a114 918 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 919 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
920
921#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
922out:
923 rcu_read_unlock();
658ddaaf 924#endif
626e264d 925}
1da177e4 926
a00e7444 927static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 928 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 929 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 930 __be32 label)
626e264d 931{
0f85feae
ED
932 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
933 tclass, label);
1da177e4
LT
934}
935
936static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
937{
8feaf0c0 938 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 939 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 940
0f85feae 941 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 942 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 943 tcp_time_stamp + tcptw->tw_ts_offset,
9c76a114 944 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 945 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 946
8feaf0c0 947 inet_twsk_put(tw);
1da177e4
LT
948}
949
a00e7444 950static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 951 struct request_sock *req)
1da177e4 952{
3a19ce0e
DL
953 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
954 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
955 */
20a2b49f
ED
956 /* RFC 7323 2.3
957 * The window field (SEG.WND) of every outgoing segment, with the
958 * exception of <SYN> segments, MUST be right-shifted by
959 * Rcv.Wind.Shift bits:
960 */
0f85feae 961 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 962 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
963 tcp_rsk(req)->rcv_nxt,
964 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
95a22cae
FW
965 tcp_time_stamp + tcp_rsk(req)->ts_off,
966 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
967 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
968 0, 0);
1da177e4
LT
969}
970
971
079096f1 972static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 973{
079096f1 974#ifdef CONFIG_SYN_COOKIES
aa8223c7 975 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 976
af9b4738 977 if (!th->syn)
c6aefafb 978 sk = cookie_v6_check(sk, skb);
1da177e4
LT
979#endif
980 return sk;
981}
982
1da177e4
LT
983static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
984{
1da177e4
LT
985 if (skb->protocol == htons(ETH_P_IP))
986 return tcp_v4_conn_request(sk, skb);
987
988 if (!ipv6_unicast_destination(skb))
1ab1457c 989 goto drop;
1da177e4 990
1fb6f159
OP
991 return tcp_conn_request(&tcp6_request_sock_ops,
992 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
993
994drop:
9caad864 995 tcp_listendrop(sk);
1da177e4
LT
996 return 0; /* don't send reset */
997}
998
ebf6c9cb
ED
999static void tcp_v6_restore_cb(struct sk_buff *skb)
1000{
1001 /* We need to move header back to the beginning if xfrm6_policy_check()
1002 * and tcp_v6_fill_cb() are going to be called again.
1003 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1004 */
1005 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1006 sizeof(struct inet6_skb_parm));
1007}
1008
0c27171e 1009static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1010 struct request_sock *req,
5e0724d0
ED
1011 struct dst_entry *dst,
1012 struct request_sock *req_unhash,
1013 bool *own_req)
1da177e4 1014{
634fb979 1015 struct inet_request_sock *ireq;
0c27171e
ED
1016 struct ipv6_pinfo *newnp;
1017 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1018 struct ipv6_txoptions *opt;
1da177e4
LT
1019 struct tcp6_sock *newtcp6sk;
1020 struct inet_sock *newinet;
1021 struct tcp_sock *newtp;
1022 struct sock *newsk;
cfb6eeb4
YH
1023#ifdef CONFIG_TCP_MD5SIG
1024 struct tcp_md5sig_key *key;
1025#endif
3840a06e 1026 struct flowi6 fl6;
1da177e4
LT
1027
1028 if (skb->protocol == htons(ETH_P_IP)) {
1029 /*
1030 * v6 mapped
1031 */
1032
5e0724d0
ED
1033 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1034 req_unhash, own_req);
1da177e4 1035
63159f29 1036 if (!newsk)
1da177e4
LT
1037 return NULL;
1038
1039 newtcp6sk = (struct tcp6_sock *)newsk;
1040 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1041
1042 newinet = inet_sk(newsk);
1043 newnp = inet6_sk(newsk);
1044 newtp = tcp_sk(newsk);
1045
1046 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1047
d1e559d0 1048 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1049
8292a17a 1050 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1051 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1052#ifdef CONFIG_TCP_MD5SIG
1053 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1054#endif
1055
676a1184
YZ
1056 newnp->ipv6_ac_list = NULL;
1057 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1058 newnp->pktoptions = NULL;
1059 newnp->opt = NULL;
870c3151 1060 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1061 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1062 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1063 if (np->repflow)
1064 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1065
e6848976
ACM
1066 /*
1067 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1068 * here, tcp_create_openreq_child now does this for us, see the comment in
1069 * that function for the gory details. -acme
1da177e4 1070 */
1da177e4
LT
1071
1072 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1073 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1074 Sync it now.
1075 */
d83d8461 1076 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1077
1078 return newsk;
1079 }
1080
634fb979 1081 ireq = inet_rsk(req);
1da177e4
LT
1082
1083 if (sk_acceptq_is_full(sk))
1084 goto out_overflow;
1085
493f377d 1086 if (!dst) {
f76b33c3 1087 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1088 if (!dst)
1da177e4 1089 goto out;
1ab1457c 1090 }
1da177e4
LT
1091
1092 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1093 if (!newsk)
093d2823 1094 goto out_nonewsk;
1da177e4 1095
e6848976
ACM
1096 /*
1097 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1098 * count here, tcp_create_openreq_child now does this for us, see the
1099 * comment in that function for the gory details. -acme
1100 */
1da177e4 1101
59eed279 1102 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1103 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1104 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1105
1106 newtcp6sk = (struct tcp6_sock *)newsk;
1107 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1108
1109 newtp = tcp_sk(newsk);
1110 newinet = inet_sk(newsk);
1111 newnp = inet6_sk(newsk);
1112
1113 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1114
634fb979
ED
1115 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1116 newnp->saddr = ireq->ir_v6_loc_addr;
1117 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1118 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1119
1ab1457c 1120 /* Now IPv6 options...
1da177e4
LT
1121
1122 First: no IPv4 options.
1123 */
f6d8bd05 1124 newinet->inet_opt = NULL;
676a1184 1125 newnp->ipv6_ac_list = NULL;
d35690be 1126 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1127
1128 /* Clone RX bits */
1129 newnp->rxopt.all = np->rxopt.all;
1130
1da177e4 1131 newnp->pktoptions = NULL;
1da177e4 1132 newnp->opt = NULL;
870c3151 1133 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1134 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1135 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1136 if (np->repflow)
1137 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1138
1139 /* Clone native IPv6 options from listening socket (if any)
1140
1141 Yes, keeping reference count would be much more clever,
1142 but we make one more one thing there: reattach optmem
1143 to newsk.
1144 */
56ac42bc
HD
1145 opt = ireq->ipv6_opt;
1146 if (!opt)
1147 opt = rcu_dereference(np->opt);
45f6fad8
ED
1148 if (opt) {
1149 opt = ipv6_dup_options(newsk, opt);
1150 RCU_INIT_POINTER(newnp->opt, opt);
1151 }
d83d8461 1152 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1153 if (opt)
1154 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1155 opt->opt_flen;
1da177e4 1156
81164413
DB
1157 tcp_ca_openreq_child(newsk, dst);
1158
1da177e4 1159 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1160 newtp->advmss = dst_metric_advmss(dst);
d135c522
NC
1161 if (tcp_sk(sk)->rx_opt.user_mss &&
1162 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1163 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1164
1da177e4
LT
1165 tcp_initialize_rcv_mss(newsk);
1166
c720c7e8
ED
1167 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1168 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1169
cfb6eeb4
YH
1170#ifdef CONFIG_TCP_MD5SIG
1171 /* Copy over the MD5 key from the original socket */
4aa956d8 1172 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1173 if (key) {
cfb6eeb4
YH
1174 /* We're using one, so create a matching key
1175 * on the newsk structure. If we fail to get
1176 * memory, then we end up not copying the key
1177 * across. Shucks.
1178 */
efe4208f 1179 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
99a1dec7 1180 AF_INET6, key->key, key->keylen,
7450aaf6 1181 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1182 }
1183#endif
1184
093d2823 1185 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1186 inet_csk_prepare_forced_close(newsk);
1187 tcp_done(newsk);
093d2823
BS
1188 goto out;
1189 }
5e0724d0 1190 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1191 if (*own_req) {
49a496c9 1192 tcp_move_syn(newtp, req);
805c4bc0
ED
1193
1194 /* Clone pktoptions received with SYN, if we own the req */
1195 if (ireq->pktopts) {
1196 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1197 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1198 consume_skb(ireq->pktopts);
1199 ireq->pktopts = NULL;
ebf6c9cb
ED
1200 if (newnp->pktoptions) {
1201 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1202 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1203 }
805c4bc0 1204 }
ce105008 1205 }
1da177e4
LT
1206
1207 return newsk;
1208
1209out_overflow:
02a1d6e7 1210 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1211out_nonewsk:
1da177e4 1212 dst_release(dst);
093d2823 1213out:
9caad864 1214 tcp_listendrop(sk);
1da177e4
LT
1215 return NULL;
1216}
1217
1da177e4 1218/* The socket must have it's spinlock held when we get
e994b2f0 1219 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1220 *
1221 * We have a potential double-lock case here, so even when
1222 * doing backlog processing we use the BH locking scheme.
1223 * This is because we cannot sleep with the original spinlock
1224 * held.
1225 */
1226static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1227{
1228 struct ipv6_pinfo *np = inet6_sk(sk);
1229 struct tcp_sock *tp;
1230 struct sk_buff *opt_skb = NULL;
1231
1232 /* Imagine: socket is IPv6. IPv4 packet arrives,
1233 goes to IPv4 receive handler and backlogged.
1234 From backlog it always goes here. Kerboom...
1235 Fortunately, tcp_rcv_established and rcv_established
1236 handle them correctly, but it is not case with
1237 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1238 */
1239
1240 if (skb->protocol == htons(ETH_P_IP))
1241 return tcp_v4_do_rcv(sk, skb);
1242
ac6e7800 1243 if (tcp_filter(sk, skb))
1da177e4
LT
1244 goto discard;
1245
1246 /*
1247 * socket locking is here for SMP purposes as backlog rcv
1248 * is currently called with bh processing disabled.
1249 */
1250
1251 /* Do Stevens' IPV6_PKTOPTIONS.
1252
1253 Yes, guys, it is the only place in our code, where we
1254 may make it not affecting IPv4.
1255 The rest of code is protocol independent,
1256 and I do not like idea to uglify IPv4.
1257
1258 Actually, all the idea behind IPV6_PKTOPTIONS
1259 looks not very well thought. For now we latch
1260 options, received in the last packet, enqueued
1261 by tcp. Feel free to propose better solution.
1ab1457c 1262 --ANK (980728)
1da177e4
LT
1263 */
1264 if (np->rxopt.all)
7450aaf6 1265 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1266
1267 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1268 struct dst_entry *dst = sk->sk_rx_dst;
1269
bdeab991 1270 sock_rps_save_rxhash(sk, skb);
3d97379a 1271 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1272 if (dst) {
1273 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1274 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1275 dst_release(dst);
1276 sk->sk_rx_dst = NULL;
1277 }
1278 }
1279
c995ae22 1280 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1281 if (opt_skb)
1282 goto ipv6_pktoptions;
1283 return 0;
1284 }
1285
12e25e10 1286 if (tcp_checksum_complete(skb))
1da177e4
LT
1287 goto csum_err;
1288
1ab1457c 1289 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1290 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1291
1da177e4
LT
1292 if (!nsk)
1293 goto discard;
1294
4c99aa40 1295 if (nsk != sk) {
bdeab991 1296 sock_rps_save_rxhash(nsk, skb);
38cb5245 1297 sk_mark_napi_id(nsk, skb);
1da177e4
LT
1298 if (tcp_child_process(sk, nsk, skb))
1299 goto reset;
1300 if (opt_skb)
1301 __kfree_skb(opt_skb);
1302 return 0;
1303 }
47482f13 1304 } else
bdeab991 1305 sock_rps_save_rxhash(sk, skb);
1da177e4 1306
72ab4a86 1307 if (tcp_rcv_state_process(sk, skb))
1da177e4 1308 goto reset;
1da177e4
LT
1309 if (opt_skb)
1310 goto ipv6_pktoptions;
1311 return 0;
1312
1313reset:
cfb6eeb4 1314 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1315discard:
1316 if (opt_skb)
1317 __kfree_skb(opt_skb);
1318 kfree_skb(skb);
1319 return 0;
1320csum_err:
c10d9310
ED
1321 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1322 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1323 goto discard;
1324
1325
1326ipv6_pktoptions:
1327 /* Do you ask, what is it?
1328
1329 1. skb was enqueued by tcp.
1330 2. skb is added to tail of read queue, rather than out of order.
1331 3. socket is not in passive state.
1332 4. Finally, it really contains options, which user wants to receive.
1333 */
1334 tp = tcp_sk(sk);
1335 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1336 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1337 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1338 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1339 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1340 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1341 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1342 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1343 if (np->repflow)
1344 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1345 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1346 skb_set_owner_r(opt_skb, sk);
8ce48623 1347 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1348 opt_skb = xchg(&np->pktoptions, opt_skb);
1349 } else {
1350 __kfree_skb(opt_skb);
1351 opt_skb = xchg(&np->pktoptions, NULL);
1352 }
1353 }
1354
800d55f1 1355 kfree_skb(opt_skb);
1da177e4
LT
1356 return 0;
1357}
1358
2dc49d16
ND
1359static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1360 const struct tcphdr *th)
1361{
1362 /* This is tricky: we move IP6CB at its correct location into
1363 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1364 * _decode_session6() uses IP6CB().
1365 * barrier() makes sure compiler won't play aliasing games.
1366 */
1367 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1368 sizeof(struct inet6_skb_parm));
1369 barrier();
1370
1371 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1372 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1373 skb->len - th->doff*4);
1374 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1375 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1376 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1377 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1378 TCP_SKB_CB(skb)->sacked = 0;
1379}
1380
e5bbef20 1381static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1382{
cf533ea5 1383 const struct tcphdr *th;
b71d1d42 1384 const struct ipv6hdr *hdr;
3b24d854 1385 bool refcounted;
1da177e4
LT
1386 struct sock *sk;
1387 int ret;
a86b1e30 1388 struct net *net = dev_net(skb->dev);
1da177e4
LT
1389
1390 if (skb->pkt_type != PACKET_HOST)
1391 goto discard_it;
1392
1393 /*
1394 * Count it even if it's bad.
1395 */
90bbcc60 1396 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1397
1398 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1399 goto discard_it;
1400
ea1627c2 1401 th = (const struct tcphdr *)skb->data;
1da177e4 1402
ea1627c2 1403 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1404 goto bad_packet;
1405 if (!pskb_may_pull(skb, th->doff*4))
1406 goto discard_it;
1407
e4f45b7f 1408 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1409 goto csum_error;
1da177e4 1410
ea1627c2 1411 th = (const struct tcphdr *)skb->data;
e802af9c 1412 hdr = ipv6_hdr(skb);
1da177e4 1413
4bdc3d66 1414lookup:
a583636a 1415 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
3b24d854
ED
1416 th->source, th->dest, inet6_iif(skb),
1417 &refcounted);
1da177e4
LT
1418 if (!sk)
1419 goto no_tcp_socket;
1420
1421process:
1422 if (sk->sk_state == TCP_TIME_WAIT)
1423 goto do_time_wait;
1424
079096f1
ED
1425 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1426 struct request_sock *req = inet_reqsk(sk);
7716682c 1427 struct sock *nsk;
079096f1
ED
1428
1429 sk = req->rsk_listener;
1430 tcp_v6_fill_cb(skb, hdr, th);
1431 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1432 sk_drops_add(sk, skb);
079096f1
ED
1433 reqsk_put(req);
1434 goto discard_it;
1435 }
7716682c 1436 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1437 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1438 goto lookup;
1439 }
7716682c 1440 sock_hold(sk);
3b24d854 1441 refcounted = true;
7716682c 1442 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1443 if (!nsk) {
1444 reqsk_put(req);
7716682c 1445 goto discard_and_relse;
079096f1
ED
1446 }
1447 if (nsk == sk) {
079096f1
ED
1448 reqsk_put(req);
1449 tcp_v6_restore_cb(skb);
1450 } else if (tcp_child_process(sk, nsk, skb)) {
1451 tcp_v6_send_reset(nsk, skb);
7716682c 1452 goto discard_and_relse;
079096f1 1453 } else {
7716682c 1454 sock_put(sk);
079096f1
ED
1455 return 0;
1456 }
1457 }
e802af9c 1458 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1459 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1460 goto discard_and_relse;
1461 }
1462
1da177e4
LT
1463 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1464 goto discard_and_relse;
1465
2dc49d16
ND
1466 tcp_v6_fill_cb(skb, hdr, th);
1467
9ea88a15
DP
1468 if (tcp_v6_inbound_md5_hash(sk, skb))
1469 goto discard_and_relse;
9ea88a15 1470
ac6e7800 1471 if (tcp_filter(sk, skb))
1da177e4 1472 goto discard_and_relse;
ac6e7800
ED
1473 th = (const struct tcphdr *)skb->data;
1474 hdr = ipv6_hdr(skb);
1da177e4
LT
1475
1476 skb->dev = NULL;
1477
e994b2f0
ED
1478 if (sk->sk_state == TCP_LISTEN) {
1479 ret = tcp_v6_do_rcv(sk, skb);
1480 goto put_and_return;
1481 }
1482
1483 sk_incoming_cpu_update(sk);
1484
293b9c42 1485 bh_lock_sock_nested(sk);
a44d6eac 1486 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1487 ret = 0;
1488 if (!sock_owned_by_user(sk)) {
7bced397 1489 if (!tcp_prequeue(sk, skb))
1ab1457c 1490 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1491 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1492 goto discard_and_relse;
1493 }
1da177e4
LT
1494 bh_unlock_sock(sk);
1495
e994b2f0 1496put_and_return:
3b24d854
ED
1497 if (refcounted)
1498 sock_put(sk);
1da177e4
LT
1499 return ret ? -1 : 0;
1500
1501no_tcp_socket:
1502 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1503 goto discard_it;
1504
2dc49d16
ND
1505 tcp_v6_fill_cb(skb, hdr, th);
1506
12e25e10 1507 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1508csum_error:
90bbcc60 1509 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1510bad_packet:
90bbcc60 1511 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1512 } else {
cfb6eeb4 1513 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1514 }
1515
1516discard_it:
1da177e4
LT
1517 kfree_skb(skb);
1518 return 0;
1519
1520discard_and_relse:
532182cd 1521 sk_drops_add(sk, skb);
3b24d854
ED
1522 if (refcounted)
1523 sock_put(sk);
1da177e4
LT
1524 goto discard_it;
1525
1526do_time_wait:
1527 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1528 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1529 goto discard_it;
1530 }
1531
2dc49d16
ND
1532 tcp_v6_fill_cb(skb, hdr, th);
1533
6a5dc9e5
ED
1534 if (tcp_checksum_complete(skb)) {
1535 inet_twsk_put(inet_twsk(sk));
1536 goto csum_error;
1da177e4
LT
1537 }
1538
9469c7b4 1539 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1540 case TCP_TW_SYN:
1541 {
1542 struct sock *sk2;
1543
c346dca1 1544 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1545 skb, __tcp_hdrlen(th),
5ba24953 1546 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1547 &ipv6_hdr(skb)->daddr,
870c3151 1548 ntohs(th->dest), tcp_v6_iif(skb));
53b24b8f 1549 if (sk2) {
295ff7ed 1550 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1551 inet_twsk_deschedule_put(tw);
1da177e4 1552 sk = sk2;
4ad19de8 1553 tcp_v6_restore_cb(skb);
3b24d854 1554 refcounted = false;
1da177e4
LT
1555 goto process;
1556 }
1557 /* Fall through to ACK */
1558 }
1559 case TCP_TW_ACK:
1560 tcp_v6_timewait_ack(sk, skb);
1561 break;
1562 case TCP_TW_RST:
4ad19de8 1563 tcp_v6_restore_cb(skb);
271c3b9b
FW
1564 tcp_v6_send_reset(sk, skb);
1565 inet_twsk_deschedule_put(inet_twsk(sk));
1566 goto discard_it;
4aa956d8
WY
1567 case TCP_TW_SUCCESS:
1568 ;
1da177e4
LT
1569 }
1570 goto discard_it;
1571}
1572
c7109986
ED
1573static void tcp_v6_early_demux(struct sk_buff *skb)
1574{
1575 const struct ipv6hdr *hdr;
1576 const struct tcphdr *th;
1577 struct sock *sk;
1578
1579 if (skb->pkt_type != PACKET_HOST)
1580 return;
1581
1582 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1583 return;
1584
1585 hdr = ipv6_hdr(skb);
1586 th = tcp_hdr(skb);
1587
1588 if (th->doff < sizeof(struct tcphdr) / 4)
1589 return;
1590
870c3151 1591 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1592 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1593 &hdr->saddr, th->source,
1594 &hdr->daddr, ntohs(th->dest),
1595 inet6_iif(skb));
1596 if (sk) {
1597 skb->sk = sk;
1598 skb->destructor = sock_edemux;
f7e4eb03 1599 if (sk_fullsock(sk)) {
d0c294c5 1600 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1601
c7109986 1602 if (dst)
5d299f3d 1603 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1604 if (dst &&
f3f12135 1605 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1606 skb_dst_set_noref(skb, dst);
1607 }
1608 }
1609}
1610
ccb7c410
DM
1611static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1612 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1613 .twsk_unique = tcp_twsk_unique,
4aa956d8 1614 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1615};
1616
3b401a81 1617static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1618 .queue_xmit = inet6_csk_xmit,
1619 .send_check = tcp_v6_send_check,
1620 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1621 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1622 .conn_request = tcp_v6_conn_request,
1623 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1624 .net_header_len = sizeof(struct ipv6hdr),
67469601 1625 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1626 .setsockopt = ipv6_setsockopt,
1627 .getsockopt = ipv6_getsockopt,
1628 .addr2sockaddr = inet6_csk_addr2sockaddr,
1629 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1630 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1631#ifdef CONFIG_COMPAT
543d9cfe
ACM
1632 .compat_setsockopt = compat_ipv6_setsockopt,
1633 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1634#endif
4fab9071 1635 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1636};
1637
cfb6eeb4 1638#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1639static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1640 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1641 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1642 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1643};
a928630a 1644#endif
cfb6eeb4 1645
1da177e4
LT
1646/*
1647 * TCP over IPv4 via INET6 API
1648 */
3b401a81 1649static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1650 .queue_xmit = ip_queue_xmit,
1651 .send_check = tcp_v4_send_check,
1652 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1653 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1654 .conn_request = tcp_v6_conn_request,
1655 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1656 .net_header_len = sizeof(struct iphdr),
1657 .setsockopt = ipv6_setsockopt,
1658 .getsockopt = ipv6_getsockopt,
1659 .addr2sockaddr = inet6_csk_addr2sockaddr,
1660 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1661 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1662#ifdef CONFIG_COMPAT
543d9cfe
ACM
1663 .compat_setsockopt = compat_ipv6_setsockopt,
1664 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1665#endif
4fab9071 1666 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1667};
1668
cfb6eeb4 1669#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1670static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1671 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1672 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1673 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1674};
a928630a 1675#endif
cfb6eeb4 1676
1da177e4
LT
1677/* NOTE: A lot of things set to zero explicitly by call to
1678 * sk_alloc() so need not be done here.
1679 */
1680static int tcp_v6_init_sock(struct sock *sk)
1681{
6687e988 1682 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1683
900f65d3 1684 tcp_init_sock(sk);
1da177e4 1685
8292a17a 1686 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1687
cfb6eeb4 1688#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1689 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1690#endif
1691
1da177e4
LT
1692 return 0;
1693}
1694
7d06b2e0 1695static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1696{
1da177e4 1697 tcp_v4_destroy_sock(sk);
7d06b2e0 1698 inet6_destroy_sock(sk);
1da177e4
LT
1699}
1700
952a10be 1701#ifdef CONFIG_PROC_FS
1da177e4 1702/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1703static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1704 const struct request_sock *req, int i)
1da177e4 1705{
fa76ce73 1706 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1707 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1708 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1709
1710 if (ttd < 0)
1711 ttd = 0;
1712
1da177e4
LT
1713 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1716 i,
1717 src->s6_addr32[0], src->s6_addr32[1],
1718 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1719 inet_rsk(req)->ir_num,
1da177e4
LT
1720 dest->s6_addr32[0], dest->s6_addr32[1],
1721 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1722 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1723 TCP_SYN_RECV,
4c99aa40 1724 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1725 1, /* timers active (only the expire timer) */
1726 jiffies_to_clock_t(ttd),
e6c022a4 1727 req->num_timeout,
aa3a0c8c
ED
1728 from_kuid_munged(seq_user_ns(seq),
1729 sock_i_uid(req->rsk_listener)),
1ab1457c 1730 0, /* non standard timer */
1da177e4
LT
1731 0, /* open_requests have no inode */
1732 0, req);
1733}
1734
1735static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1736{
b71d1d42 1737 const struct in6_addr *dest, *src;
1da177e4
LT
1738 __u16 destp, srcp;
1739 int timer_active;
1740 unsigned long timer_expires;
cf533ea5
ED
1741 const struct inet_sock *inet = inet_sk(sp);
1742 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1743 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1744 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1745 int rx_queue;
1746 int state;
1da177e4 1747
efe4208f
ED
1748 dest = &sp->sk_v6_daddr;
1749 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1750 destp = ntohs(inet->inet_dport);
1751 srcp = ntohs(inet->inet_sport);
463c84b9 1752
ce3cf4ec
YC
1753 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1754 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1755 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1756 timer_active = 1;
463c84b9
ACM
1757 timer_expires = icsk->icsk_timeout;
1758 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1759 timer_active = 4;
463c84b9 1760 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1761 } else if (timer_pending(&sp->sk_timer)) {
1762 timer_active = 2;
1763 timer_expires = sp->sk_timer.expires;
1764 } else {
1765 timer_active = 0;
1766 timer_expires = jiffies;
1767 }
1768
00fd38d9
ED
1769 state = sk_state_load(sp);
1770 if (state == TCP_LISTEN)
1771 rx_queue = sp->sk_ack_backlog;
1772 else
1773 /* Because we don't lock the socket,
1774 * we might find a transient negative value.
1775 */
1776 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1777
1da177e4
LT
1778 seq_printf(seq,
1779 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1780 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1781 i,
1782 src->s6_addr32[0], src->s6_addr32[1],
1783 src->s6_addr32[2], src->s6_addr32[3], srcp,
1784 dest->s6_addr32[0], dest->s6_addr32[1],
1785 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1786 state,
1787 tp->write_seq - tp->snd_una,
1788 rx_queue,
1da177e4 1789 timer_active,
a399a805 1790 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1791 icsk->icsk_retransmits,
a7cb5a49 1792 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1793 icsk->icsk_probes_out,
1da177e4
LT
1794 sock_i_ino(sp),
1795 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1796 jiffies_to_clock_t(icsk->icsk_rto),
1797 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1798 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1799 tp->snd_cwnd,
00fd38d9 1800 state == TCP_LISTEN ?
0536fcc0 1801 fastopenq->max_qlen :
0a672f74 1802 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1803 );
1804}
1805
1ab1457c 1806static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1807 struct inet_timewait_sock *tw, int i)
1da177e4 1808{
789f558c 1809 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1810 const struct in6_addr *dest, *src;
1da177e4 1811 __u16 destp, srcp;
1da177e4 1812
efe4208f
ED
1813 dest = &tw->tw_v6_daddr;
1814 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1815 destp = ntohs(tw->tw_dport);
1816 srcp = ntohs(tw->tw_sport);
1817
1818 seq_printf(seq,
1819 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1820 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1821 i,
1822 src->s6_addr32[0], src->s6_addr32[1],
1823 src->s6_addr32[2], src->s6_addr32[3], srcp,
1824 dest->s6_addr32[0], dest->s6_addr32[1],
1825 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1826 tw->tw_substate, 0, 0,
a399a805 1827 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1da177e4
LT
1828 atomic_read(&tw->tw_refcnt), tw);
1829}
1830
1da177e4
LT
1831static int tcp6_seq_show(struct seq_file *seq, void *v)
1832{
1833 struct tcp_iter_state *st;
05dbc7b5 1834 struct sock *sk = v;
1da177e4
LT
1835
1836 if (v == SEQ_START_TOKEN) {
1837 seq_puts(seq,
1838 " sl "
1839 "local_address "
1840 "remote_address "
1841 "st tx_queue rx_queue tr tm->when retrnsmt"
1842 " uid timeout inode\n");
1843 goto out;
1844 }
1845 st = seq->private;
1846
079096f1
ED
1847 if (sk->sk_state == TCP_TIME_WAIT)
1848 get_timewait6_sock(seq, v, st->num);
1849 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1850 get_openreq6(seq, v, st->num);
079096f1
ED
1851 else
1852 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1853out:
1854 return 0;
1855}
1856
73cb88ec
AV
1857static const struct file_operations tcp6_afinfo_seq_fops = {
1858 .owner = THIS_MODULE,
1859 .open = tcp_seq_open,
1860 .read = seq_read,
1861 .llseek = seq_lseek,
1862 .release = seq_release_net
1863};
1864
1da177e4 1865static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1866 .name = "tcp6",
1867 .family = AF_INET6,
73cb88ec 1868 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1869 .seq_ops = {
1870 .show = tcp6_seq_show,
1871 },
1da177e4
LT
1872};
1873
2c8c1e72 1874int __net_init tcp6_proc_init(struct net *net)
1da177e4 1875{
6f8b13bc 1876 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1877}
1878
6f8b13bc 1879void tcp6_proc_exit(struct net *net)
1da177e4 1880{
6f8b13bc 1881 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1882}
1883#endif
1884
1885struct proto tcpv6_prot = {
1886 .name = "TCPv6",
1887 .owner = THIS_MODULE,
1888 .close = tcp_close,
1889 .connect = tcp_v6_connect,
1890 .disconnect = tcp_disconnect,
463c84b9 1891 .accept = inet_csk_accept,
1da177e4
LT
1892 .ioctl = tcp_ioctl,
1893 .init = tcp_v6_init_sock,
1894 .destroy = tcp_v6_destroy_sock,
1895 .shutdown = tcp_shutdown,
1896 .setsockopt = tcp_setsockopt,
1897 .getsockopt = tcp_getsockopt,
1da177e4 1898 .recvmsg = tcp_recvmsg,
7ba42910
CG
1899 .sendmsg = tcp_sendmsg,
1900 .sendpage = tcp_sendpage,
1da177e4 1901 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1902 .release_cb = tcp_release_cb,
496611d7 1903 .hash = inet6_hash,
ab1e0a13
ACM
1904 .unhash = inet_unhash,
1905 .get_port = inet_csk_get_port,
1da177e4 1906 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 1907 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1908 .sockets_allocated = &tcp_sockets_allocated,
1909 .memory_allocated = &tcp_memory_allocated,
1910 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1911 .orphan_count = &tcp_orphan_count,
a4fe34bf 1912 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1913 .sysctl_wmem = sysctl_tcp_wmem,
1914 .sysctl_rmem = sysctl_tcp_rmem,
1915 .max_header = MAX_TCP_HEADER,
1916 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 1917 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 1918 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1919 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1920 .h.hashinfo = &tcp_hashinfo,
7ba42910 1921 .no_autobind = true,
543d9cfe
ACM
1922#ifdef CONFIG_COMPAT
1923 .compat_setsockopt = compat_tcp_setsockopt,
1924 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1925#endif
c1e64e29 1926 .diag_destroy = tcp_abort,
1da177e4
LT
1927};
1928
41135cc8 1929static const struct inet6_protocol tcpv6_protocol = {
c7109986 1930 .early_demux = tcp_v6_early_demux,
1da177e4
LT
1931 .handler = tcp_v6_rcv,
1932 .err_handler = tcp_v6_err,
1933 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1934};
1935
1da177e4
LT
1936static struct inet_protosw tcpv6_protosw = {
1937 .type = SOCK_STREAM,
1938 .protocol = IPPROTO_TCP,
1939 .prot = &tcpv6_prot,
1940 .ops = &inet6_stream_ops,
d83d8461
ACM
1941 .flags = INET_PROTOSW_PERMANENT |
1942 INET_PROTOSW_ICSK,
1da177e4
LT
1943};
1944
2c8c1e72 1945static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1946{
5677242f
DL
1947 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1948 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1949}
1950
2c8c1e72 1951static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1952{
5677242f 1953 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1954}
1955
2c8c1e72 1956static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
1957{
1958 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
1959}
1960
1961static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1962 .init = tcpv6_net_init,
1963 .exit = tcpv6_net_exit,
1964 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1965};
1966
7f4e4868 1967int __init tcpv6_init(void)
1da177e4 1968{
7f4e4868
DL
1969 int ret;
1970
3336288a
VY
1971 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1972 if (ret)
c6b641a4 1973 goto out;
3336288a 1974
1da177e4 1975 /* register inet6 protocol */
7f4e4868
DL
1976 ret = inet6_register_protosw(&tcpv6_protosw);
1977 if (ret)
1978 goto out_tcpv6_protocol;
1979
93ec926b 1980 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1981 if (ret)
1982 goto out_tcpv6_protosw;
1983out:
1984 return ret;
ae0f7d5f 1985
7f4e4868
DL
1986out_tcpv6_protosw:
1987 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
1988out_tcpv6_protocol:
1989 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
1990 goto out;
1991}
1992
09f7709f 1993void tcpv6_exit(void)
7f4e4868 1994{
93ec926b 1995 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1996 inet6_unregister_protosw(&tcpv6_protosw);
1997 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 1998}