]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv6/tcp_ipv6.c
net/dccp: fix use after free in tw_timer_handler()
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
95a22cae 104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
1da177e4 105{
0660e03f
ACM
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7 108 tcp_hdr(skb)->dest,
95a22cae 109 tcp_hdr(skb)->source, tsoff);
1da177e4
LT
110}
111
1ab1457c 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 116 struct inet_sock *inet = inet_sk(sk);
d83d8461 117 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 120 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 121 struct ipv6_txoptions *opt;
4c9483b2 122 struct flowi6 fl6;
1da177e4
LT
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
1946e672 126 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 127
1ab1457c 128 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
129 return -EINVAL;
130
1ab1457c 131 if (usin->sin6_family != AF_INET6)
a02cec21 132 return -EAFNOSUPPORT;
1da177e4 133
4c9483b2 134 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
135
136 if (np->sndflow) {
4c9483b2
DM
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 140 struct ip6_flowlabel *flowlabel;
4c9483b2 141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 142 if (!flowlabel)
1da177e4 143 return -EINVAL;
1da177e4
LT
144 fl6_sock_release(flowlabel);
145 }
146 }
147
148 /*
1ab1457c
YH
149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */
151
052d2369
JL
152 if (ipv6_addr_any(&usin->sin6_addr)) {
153 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
154 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
155 &usin->sin6_addr);
156 else
157 usin->sin6_addr = in6addr_loopback;
158 }
1da177e4
LT
159
160 addr_type = ipv6_addr_type(&usin->sin6_addr);
161
4c99aa40 162 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
163 return -ENETUNREACH;
164
165 if (addr_type&IPV6_ADDR_LINKLOCAL) {
166 if (addr_len >= sizeof(struct sockaddr_in6) &&
167 usin->sin6_scope_id) {
168 /* If interface is set while binding, indices
169 * must coincide.
170 */
171 if (sk->sk_bound_dev_if &&
172 sk->sk_bound_dev_if != usin->sin6_scope_id)
173 return -EINVAL;
174
175 sk->sk_bound_dev_if = usin->sin6_scope_id;
176 }
177
178 /* Connect to link-local address requires an interface */
179 if (!sk->sk_bound_dev_if)
180 return -EINVAL;
181 }
182
183 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 184 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
185 tp->rx_opt.ts_recent = 0;
186 tp->rx_opt.ts_recent_stamp = 0;
187 tp->write_seq = 0;
188 }
189
efe4208f 190 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 191 np->flow_label = fl6.flowlabel;
1da177e4
LT
192
193 /*
194 * TCP over IPv4
195 */
196
052d2369 197 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 198 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
199 struct sockaddr_in sin;
200
201 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
202
203 if (__ipv6_only_sock(sk))
204 return -ENETUNREACH;
205
206 sin.sin_family = AF_INET;
207 sin.sin_port = usin->sin6_port;
208 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
209
d83d8461 210 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 211 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
212#ifdef CONFIG_TCP_MD5SIG
213 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
214#endif
1da177e4
LT
215
216 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
217
218 if (err) {
d83d8461
ACM
219 icsk->icsk_ext_hdr_len = exthdrlen;
220 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 221 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
222#ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_specific;
224#endif
1da177e4 225 goto failure;
1da177e4 226 }
d1e559d0 227 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
228
229 return err;
230 }
231
efe4208f
ED
232 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
233 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 234
4c9483b2 235 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 236 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 237 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
238 fl6.flowi6_oif = sk->sk_bound_dev_if;
239 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
240 fl6.fl6_dport = usin->sin6_port;
241 fl6.fl6_sport = inet->inet_sport;
e2d118a1 242 fl6.flowi6_uid = sk->sk_uid;
1da177e4 243
1e1d04e6 244 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 245 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 246
4c9483b2 247 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 248
0e0d44ab 249 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
250 if (IS_ERR(dst)) {
251 err = PTR_ERR(dst);
1da177e4 252 goto failure;
14e50e57 253 }
1da177e4 254
63159f29 255 if (!saddr) {
4c9483b2 256 saddr = &fl6.saddr;
efe4208f 257 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
258 }
259
260 /* set the source address */
4e3fd7a0 261 np->saddr = *saddr;
c720c7e8 262 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 263
f83ef8c0 264 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 265 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 266
1946e672 267 if (tcp_death_row->sysctl_tw_recycle &&
493f377d 268 !tp->rx_opt.ts_recent_stamp &&
fd0273d7 269 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
81166dd6 270 tcp_fetch_timewait_stamp(sk, dst);
493f377d 271
d83d8461 272 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
273 if (opt)
274 icsk->icsk_ext_hdr_len = opt->opt_flen +
275 opt->opt_nflen;
1da177e4
LT
276
277 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
278
c720c7e8 279 inet->inet_dport = usin->sin6_port;
1da177e4
LT
280
281 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 282 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
283 if (err)
284 goto late_failure;
285
877d1f62 286 sk_set_txhash(sk);
9e7ceb06 287
2b916477 288 if (!tp->write_seq && likely(!tp->repair))
1da177e4 289 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
efe4208f 290 sk->sk_v6_daddr.s6_addr32,
c720c7e8 291 inet->inet_sport,
95a22cae
FW
292 inet->inet_dport,
293 &tp->tsoffset);
1da177e4 294
19f6d3f3
WW
295 if (tcp_fastopen_defer_connect(sk, &err))
296 return err;
297 if (err)
298 goto late_failure;
299
1da177e4
LT
300 err = tcp_connect(sk);
301 if (err)
302 goto late_failure;
303
304 return 0;
305
306late_failure:
307 tcp_set_state(sk, TCP_CLOSE);
1da177e4 308failure:
c720c7e8 309 inet->inet_dport = 0;
1da177e4
LT
310 sk->sk_route_caps = 0;
311 return err;
312}
313
563d34d0
ED
314static void tcp_v6_mtu_reduced(struct sock *sk)
315{
316 struct dst_entry *dst;
317
318 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
319 return;
320
321 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
322 if (!dst)
323 return;
324
325 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
326 tcp_sync_mss(sk, dst_mtu(dst));
327 tcp_simple_retransmit(sk);
328 }
329}
330
1da177e4 331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 332 u8 type, u8 code, int offset, __be32 info)
1da177e4 333{
4c99aa40 334 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
336 struct net *net = dev_net(skb->dev);
337 struct request_sock *fastopen;
1da177e4 338 struct ipv6_pinfo *np;
1ab1457c 339 struct tcp_sock *tp;
0a672f74 340 __u32 seq, snd_una;
2215089b 341 struct sock *sk;
9cf74903 342 bool fatal;
2215089b 343 int err;
1da177e4 344
2215089b
ED
345 sk = __inet6_lookup_established(net, &tcp_hashinfo,
346 &hdr->daddr, th->dest,
347 &hdr->saddr, ntohs(th->source),
348 skb->dev->ifindex);
1da177e4 349
2215089b 350 if (!sk) {
a16292a0
ED
351 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
352 ICMP6_MIB_INERRORS);
1da177e4
LT
353 return;
354 }
355
356 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 357 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
358 return;
359 }
2215089b 360 seq = ntohl(th->seq);
9cf74903 361 fatal = icmpv6_err_convert(type, code, &err);
2215089b 362 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 363 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
364
365 bh_lock_sock(sk);
563d34d0 366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 367 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
368
369 if (sk->sk_state == TCP_CLOSE)
370 goto out;
371
e802af9c 372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 373 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
374 goto out;
375 }
376
1da177e4 377 tp = tcp_sk(sk);
0a672f74
YC
378 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
379 fastopen = tp->fastopen_rsk;
380 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 381 if (sk->sk_state != TCP_LISTEN &&
0a672f74 382 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 383 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
384 goto out;
385 }
386
387 np = inet6_sk(sk);
388
ec18d9a2
DM
389 if (type == NDISC_REDIRECT) {
390 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
391
1ed5c48f 392 if (dst)
6700c270 393 dst->ops->redirect(dst, sk, skb);
50a75a89 394 goto out;
ec18d9a2
DM
395 }
396
1da177e4 397 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
398 /* We are not interested in TCP_LISTEN and open_requests
399 * (SYN-ACKs send out by Linux are always <576bytes so
400 * they should go through unfragmented).
401 */
402 if (sk->sk_state == TCP_LISTEN)
403 goto out;
404
93b36cf3
HFS
405 if (!ip6_sk_accept_pmtu(sk))
406 goto out;
407
563d34d0
ED
408 tp->mtu_info = ntohl(info);
409 if (!sock_owned_by_user(sk))
410 tcp_v6_mtu_reduced(sk);
d013ef2a 411 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 412 &sk->sk_tsq_flags))
d013ef2a 413 sock_hold(sk);
1da177e4
LT
414 goto out;
415 }
416
1da177e4 417
60236fdd 418 /* Might be for an request_sock */
1da177e4 419 switch (sk->sk_state) {
1da177e4 420 case TCP_SYN_SENT:
0a672f74
YC
421 case TCP_SYN_RECV:
422 /* Only in fast or simultaneous open. If a fast open socket is
423 * is already accepted it is treated as a connected one below.
424 */
63159f29 425 if (fastopen && !fastopen->sk)
0a672f74
YC
426 break;
427
1da177e4 428 if (!sock_owned_by_user(sk)) {
1da177e4
LT
429 sk->sk_err = err;
430 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
431
432 tcp_done(sk);
433 } else
434 sk->sk_err_soft = err;
435 goto out;
436 }
437
438 if (!sock_owned_by_user(sk) && np->recverr) {
439 sk->sk_err = err;
440 sk->sk_error_report(sk);
441 } else
442 sk->sk_err_soft = err;
443
444out:
445 bh_unlock_sock(sk);
446 sock_put(sk);
447}
448
449
0f935dbe 450static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 451 struct flowi *fl,
3840a06e 452 struct request_sock *req,
ca6fb065 453 struct tcp_fastopen_cookie *foc,
b3d05147 454 enum tcp_synack_type synack_type)
1da177e4 455{
634fb979 456 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 457 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 458 struct ipv6_txoptions *opt;
d6274bd8 459 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 460 struct sk_buff *skb;
9494218f 461 int err = -ENOMEM;
1da177e4 462
9f10d3f6 463 /* First, grab a route. */
f76b33c3
ED
464 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
465 IPPROTO_TCP)) == NULL)
fd80eb94 466 goto done;
9494218f 467
b3d05147 468 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 469
1da177e4 470 if (skb) {
634fb979
ED
471 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
472 &ireq->ir_v6_rmt_addr);
1da177e4 473
634fb979 474 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 475 if (np->repflow && ireq->pktopts)
df3687ff
FF
476 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
477
3e4006f0 478 rcu_read_lock();
56ac42bc
HD
479 opt = ireq->ipv6_opt;
480 if (!opt)
481 opt = rcu_dereference(np->opt);
92e55f41 482 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 483 rcu_read_unlock();
b9df3cb8 484 err = net_xmit_eval(err);
1da177e4
LT
485 }
486
487done:
1da177e4
LT
488 return err;
489}
490
72659ecc 491
60236fdd 492static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 493{
56ac42bc 494 kfree(inet_rsk(req)->ipv6_opt);
634fb979 495 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
496}
497
cfb6eeb4 498#ifdef CONFIG_TCP_MD5SIG
b83e3deb 499static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 500 const struct in6_addr *addr)
cfb6eeb4 501{
a915da9b 502 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
503}
504
b83e3deb 505static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 506 const struct sock *addr_sk)
cfb6eeb4 507{
efe4208f 508 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
509}
510
4aa956d8
WY
511static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
512 int optlen)
cfb6eeb4
YH
513{
514 struct tcp_md5sig cmd;
515 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
516
517 if (optlen < sizeof(cmd))
518 return -EINVAL;
519
520 if (copy_from_user(&cmd, optval, sizeof(cmd)))
521 return -EFAULT;
522
523 if (sin6->sin6_family != AF_INET6)
524 return -EINVAL;
525
526 if (!cmd.tcpm_keylen) {
e773e4fa 527 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
528 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
529 AF_INET);
530 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
531 AF_INET6);
cfb6eeb4
YH
532 }
533
534 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
535 return -EINVAL;
536
a915da9b
ED
537 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
538 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
539 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 540
a915da9b
ED
541 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
542 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
543}
544
19689e38
ED
545static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
546 const struct in6_addr *daddr,
547 const struct in6_addr *saddr,
548 const struct tcphdr *th, int nbytes)
cfb6eeb4 549{
cfb6eeb4 550 struct tcp6_pseudohdr *bp;
49a72dfb 551 struct scatterlist sg;
19689e38 552 struct tcphdr *_th;
8d26d76d 553
19689e38 554 bp = hp->scratch;
cfb6eeb4 555 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
556 bp->saddr = *saddr;
557 bp->daddr = *daddr;
49a72dfb 558 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 559 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 560
19689e38
ED
561 _th = (struct tcphdr *)(bp + 1);
562 memcpy(_th, th, sizeof(*th));
563 _th->check = 0;
564
565 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
566 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
567 sizeof(*bp) + sizeof(*th));
cf80e0e4 568 return crypto_ahash_update(hp->md5_req);
49a72dfb 569}
c7da57a1 570
19689e38 571static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 572 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 573 const struct tcphdr *th)
49a72dfb
AL
574{
575 struct tcp_md5sig_pool *hp;
cf80e0e4 576 struct ahash_request *req;
49a72dfb
AL
577
578 hp = tcp_get_md5sig_pool();
579 if (!hp)
580 goto clear_hash_noput;
cf80e0e4 581 req = hp->md5_req;
49a72dfb 582
cf80e0e4 583 if (crypto_ahash_init(req))
49a72dfb 584 goto clear_hash;
19689e38 585 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
586 goto clear_hash;
587 if (tcp_md5_hash_key(hp, key))
588 goto clear_hash;
cf80e0e4
HX
589 ahash_request_set_crypt(req, NULL, md5_hash, 0);
590 if (crypto_ahash_final(req))
cfb6eeb4 591 goto clear_hash;
cfb6eeb4 592
cfb6eeb4 593 tcp_put_md5sig_pool();
cfb6eeb4 594 return 0;
49a72dfb 595
cfb6eeb4
YH
596clear_hash:
597 tcp_put_md5sig_pool();
598clear_hash_noput:
599 memset(md5_hash, 0, 16);
49a72dfb 600 return 1;
cfb6eeb4
YH
601}
602
39f8e58e
ED
603static int tcp_v6_md5_hash_skb(char *md5_hash,
604 const struct tcp_md5sig_key *key,
318cf7aa 605 const struct sock *sk,
318cf7aa 606 const struct sk_buff *skb)
cfb6eeb4 607{
b71d1d42 608 const struct in6_addr *saddr, *daddr;
49a72dfb 609 struct tcp_md5sig_pool *hp;
cf80e0e4 610 struct ahash_request *req;
318cf7aa 611 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 612
39f8e58e
ED
613 if (sk) { /* valid for establish/request sockets */
614 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 615 daddr = &sk->sk_v6_daddr;
49a72dfb 616 } else {
b71d1d42 617 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
618 saddr = &ip6h->saddr;
619 daddr = &ip6h->daddr;
cfb6eeb4 620 }
49a72dfb
AL
621
622 hp = tcp_get_md5sig_pool();
623 if (!hp)
624 goto clear_hash_noput;
cf80e0e4 625 req = hp->md5_req;
49a72dfb 626
cf80e0e4 627 if (crypto_ahash_init(req))
49a72dfb
AL
628 goto clear_hash;
629
19689e38 630 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
631 goto clear_hash;
632 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
633 goto clear_hash;
634 if (tcp_md5_hash_key(hp, key))
635 goto clear_hash;
cf80e0e4
HX
636 ahash_request_set_crypt(req, NULL, md5_hash, 0);
637 if (crypto_ahash_final(req))
49a72dfb
AL
638 goto clear_hash;
639
640 tcp_put_md5sig_pool();
641 return 0;
642
643clear_hash:
644 tcp_put_md5sig_pool();
645clear_hash_noput:
646 memset(md5_hash, 0, 16);
647 return 1;
cfb6eeb4
YH
648}
649
ba8e275a
ED
650#endif
651
652static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
653 const struct sk_buff *skb)
cfb6eeb4 654{
ba8e275a 655#ifdef CONFIG_TCP_MD5SIG
cf533ea5 656 const __u8 *hash_location = NULL;
cfb6eeb4 657 struct tcp_md5sig_key *hash_expected;
b71d1d42 658 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 659 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 660 int genhash;
cfb6eeb4
YH
661 u8 newhash[16];
662
663 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 664 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 665
785957d3
DM
666 /* We've parsed the options - do we have a hash? */
667 if (!hash_expected && !hash_location)
ff74e23f 668 return false;
785957d3
DM
669
670 if (hash_expected && !hash_location) {
c10d9310 671 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 672 return true;
cfb6eeb4
YH
673 }
674
785957d3 675 if (!hash_expected && hash_location) {
c10d9310 676 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 677 return true;
cfb6eeb4
YH
678 }
679
680 /* check the signature */
49a72dfb
AL
681 genhash = tcp_v6_md5_hash_skb(newhash,
682 hash_expected,
39f8e58e 683 NULL, skb);
49a72dfb 684
cfb6eeb4 685 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 686 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
687 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
688 genhash ? "failed" : "mismatch",
689 &ip6h->saddr, ntohs(th->source),
690 &ip6h->daddr, ntohs(th->dest));
ff74e23f 691 return true;
cfb6eeb4 692 }
ba8e275a 693#endif
ff74e23f 694 return false;
cfb6eeb4 695}
cfb6eeb4 696
b40cf18e
ED
697static void tcp_v6_init_req(struct request_sock *req,
698 const struct sock *sk_listener,
16bea70a
OP
699 struct sk_buff *skb)
700{
701 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 702 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
703
704 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
705 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
706
16bea70a 707 /* So that link locals have meaning */
b40cf18e 708 if (!sk_listener->sk_bound_dev_if &&
16bea70a 709 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 710 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 711
04317daf 712 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 713 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 714 np->rxopt.bits.rxinfo ||
16bea70a
OP
715 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
716 np->rxopt.bits.rxohlim || np->repflow)) {
717 atomic_inc(&skb->users);
718 ireq->pktopts = skb;
719 }
720}
721
f964629e
ED
722static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
723 struct flowi *fl,
d94e0417
OP
724 const struct request_sock *req,
725 bool *strict)
726{
727 if (strict)
728 *strict = true;
f76b33c3 729 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
730}
731
c6aefafb 732struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 733 .family = AF_INET6,
2e6599cb 734 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 735 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
736 .send_ack = tcp_v6_reqsk_send_ack,
737 .destructor = tcp_v6_reqsk_destructor,
72659ecc 738 .send_reset = tcp_v6_send_reset,
4aa956d8 739 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
740};
741
b2e4b3de 742static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
743 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
744 sizeof(struct ipv6hdr),
16bea70a 745#ifdef CONFIG_TCP_MD5SIG
fd3a154a 746 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 747 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 748#endif
16bea70a 749 .init_req = tcp_v6_init_req,
fb7b37a7
OP
750#ifdef CONFIG_SYN_COOKIES
751 .cookie_init_seq = cookie_v6_init_sequence,
752#endif
d94e0417 753 .route_req = tcp_v6_route_req,
936b8bdb 754 .init_seq = tcp_v6_init_sequence,
d6274bd8 755 .send_synack = tcp_v6_send_synack,
16bea70a 756};
cfb6eeb4 757
a00e7444 758static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
759 u32 ack, u32 win, u32 tsval, u32 tsecr,
760 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 761 u8 tclass, __be32 label)
1da177e4 762{
cf533ea5
ED
763 const struct tcphdr *th = tcp_hdr(skb);
764 struct tcphdr *t1;
1da177e4 765 struct sk_buff *buff;
4c9483b2 766 struct flowi6 fl6;
0f85feae 767 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 768 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 769 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 770 struct dst_entry *dst;
81ada62d 771 __be32 *topt;
1da177e4 772
ee684b6f 773 if (tsecr)
626e264d 774 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 775#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
776 if (key)
777 tot_len += TCPOLEN_MD5SIG_ALIGNED;
778#endif
779
cfb6eeb4 780 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 781 GFP_ATOMIC);
63159f29 782 if (!buff)
1ab1457c 783 return;
1da177e4 784
cfb6eeb4 785 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 786
cfb6eeb4 787 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 788 skb_reset_transport_header(buff);
1da177e4
LT
789
790 /* Swap the send and the receive. */
791 memset(t1, 0, sizeof(*t1));
792 t1->dest = th->source;
793 t1->source = th->dest;
cfb6eeb4 794 t1->doff = tot_len / 4;
626e264d
IJ
795 t1->seq = htonl(seq);
796 t1->ack_seq = htonl(ack);
797 t1->ack = !rst || !th->ack;
798 t1->rst = rst;
799 t1->window = htons(win);
1da177e4 800
81ada62d
IJ
801 topt = (__be32 *)(t1 + 1);
802
ee684b6f 803 if (tsecr) {
626e264d
IJ
804 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
805 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
806 *topt++ = htonl(tsval);
807 *topt++ = htonl(tsecr);
626e264d
IJ
808 }
809
cfb6eeb4
YH
810#ifdef CONFIG_TCP_MD5SIG
811 if (key) {
81ada62d
IJ
812 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
813 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
814 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
815 &ipv6_hdr(skb)->saddr,
816 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
817 }
818#endif
819
4c9483b2 820 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
821 fl6.daddr = ipv6_hdr(skb)->saddr;
822 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 823 fl6.flowlabel = label;
1da177e4 824
e5700aff
DM
825 buff->ip_summed = CHECKSUM_PARTIAL;
826 buff->csum = 0;
827
4c9483b2 828 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 829
4c9483b2 830 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 831 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 832 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
833 else {
834 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
835 oif = skb->skb_iif;
836
837 fl6.flowi6_oif = oif;
838 }
1d2f7b2d 839
e110861f 840 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
841 fl6.fl6_dport = t1->dest;
842 fl6.fl6_sport = t1->source;
e2d118a1 843 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 844 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 845
c20121ae
DL
846 /* Pass a socket to ip6_dst_lookup either it is for RST
847 * Underlying function will use this to retrieve the network
848 * namespace
849 */
0e0d44ab 850 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
851 if (!IS_ERR(dst)) {
852 skb_dst_set(buff, dst);
92e55f41 853 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 854 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 855 if (rst)
c10d9310 856 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 857 return;
1da177e4
LT
858 }
859
860 kfree_skb(buff);
861}
862
a00e7444 863static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 864{
cf533ea5 865 const struct tcphdr *th = tcp_hdr(skb);
626e264d 866 u32 seq = 0, ack_seq = 0;
fa3e5b4e 867 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
868#ifdef CONFIG_TCP_MD5SIG
869 const __u8 *hash_location = NULL;
870 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
871 unsigned char newhash[16];
872 int genhash;
873 struct sock *sk1 = NULL;
874#endif
9c76a114 875 int oif;
1da177e4 876
626e264d 877 if (th->rst)
1da177e4
LT
878 return;
879
c3658e8d
ED
880 /* If sk not NULL, it means we did a successful lookup and incoming
881 * route had to be correct. prequeue might have dropped our dst.
882 */
883 if (!sk && !ipv6_unicast_destination(skb))
626e264d 884 return;
1da177e4 885
cfb6eeb4 886#ifdef CONFIG_TCP_MD5SIG
3b24d854 887 rcu_read_lock();
658ddaaf 888 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 889 if (sk && sk_fullsock(sk)) {
e46787f0
FW
890 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
891 } else if (hash_location) {
658ddaaf
SL
892 /*
893 * active side is lost. Try to find listening socket through
894 * source port, and then find md5 key through listening socket.
895 * we are not loose security here:
896 * Incoming packet is checked with md5 hash with finding key,
897 * no RST generated if md5 hash doesn't match.
898 */
899 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
900 &tcp_hashinfo, NULL, 0,
901 &ipv6h->saddr,
5ba24953 902 th->source, &ipv6h->daddr,
870c3151 903 ntohs(th->source), tcp_v6_iif(skb));
658ddaaf 904 if (!sk1)
3b24d854 905 goto out;
658ddaaf 906
658ddaaf
SL
907 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
908 if (!key)
3b24d854 909 goto out;
658ddaaf 910
39f8e58e 911 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 912 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 913 goto out;
658ddaaf 914 }
cfb6eeb4
YH
915#endif
916
626e264d
IJ
917 if (th->ack)
918 seq = ntohl(th->ack_seq);
919 else
920 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
921 (th->doff << 2);
1da177e4 922
9c76a114 923 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 924 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
925
926#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
927out:
928 rcu_read_unlock();
658ddaaf 929#endif
626e264d 930}
1da177e4 931
a00e7444 932static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 933 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 934 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 935 __be32 label)
626e264d 936{
0f85feae
ED
937 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
938 tclass, label);
1da177e4
LT
939}
940
941static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
942{
8feaf0c0 943 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 944 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 945
0f85feae 946 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 947 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 948 tcp_time_stamp + tcptw->tw_ts_offset,
9c76a114 949 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 950 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 951
8feaf0c0 952 inet_twsk_put(tw);
1da177e4
LT
953}
954
a00e7444 955static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 956 struct request_sock *req)
1da177e4 957{
3a19ce0e
DL
958 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
959 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
960 */
20a2b49f
ED
961 /* RFC 7323 2.3
962 * The window field (SEG.WND) of every outgoing segment, with the
963 * exception of <SYN> segments, MUST be right-shifted by
964 * Rcv.Wind.Shift bits:
965 */
0f85feae 966 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 967 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
968 tcp_rsk(req)->rcv_nxt,
969 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
95a22cae
FW
970 tcp_time_stamp + tcp_rsk(req)->ts_off,
971 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
972 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
973 0, 0);
1da177e4
LT
974}
975
976
079096f1 977static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 978{
079096f1 979#ifdef CONFIG_SYN_COOKIES
aa8223c7 980 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 981
af9b4738 982 if (!th->syn)
c6aefafb 983 sk = cookie_v6_check(sk, skb);
1da177e4
LT
984#endif
985 return sk;
986}
987
1da177e4
LT
988static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
989{
1da177e4
LT
990 if (skb->protocol == htons(ETH_P_IP))
991 return tcp_v4_conn_request(sk, skb);
992
993 if (!ipv6_unicast_destination(skb))
1ab1457c 994 goto drop;
1da177e4 995
1fb6f159
OP
996 return tcp_conn_request(&tcp6_request_sock_ops,
997 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
998
999drop:
9caad864 1000 tcp_listendrop(sk);
1da177e4
LT
1001 return 0; /* don't send reset */
1002}
1003
ebf6c9cb
ED
1004static void tcp_v6_restore_cb(struct sk_buff *skb)
1005{
1006 /* We need to move header back to the beginning if xfrm6_policy_check()
1007 * and tcp_v6_fill_cb() are going to be called again.
1008 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1009 */
1010 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1011 sizeof(struct inet6_skb_parm));
1012}
1013
0c27171e 1014static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1015 struct request_sock *req,
5e0724d0
ED
1016 struct dst_entry *dst,
1017 struct request_sock *req_unhash,
1018 bool *own_req)
1da177e4 1019{
634fb979 1020 struct inet_request_sock *ireq;
0c27171e
ED
1021 struct ipv6_pinfo *newnp;
1022 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1023 struct ipv6_txoptions *opt;
1da177e4
LT
1024 struct tcp6_sock *newtcp6sk;
1025 struct inet_sock *newinet;
1026 struct tcp_sock *newtp;
1027 struct sock *newsk;
cfb6eeb4
YH
1028#ifdef CONFIG_TCP_MD5SIG
1029 struct tcp_md5sig_key *key;
1030#endif
3840a06e 1031 struct flowi6 fl6;
1da177e4
LT
1032
1033 if (skb->protocol == htons(ETH_P_IP)) {
1034 /*
1035 * v6 mapped
1036 */
1037
5e0724d0
ED
1038 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1039 req_unhash, own_req);
1da177e4 1040
63159f29 1041 if (!newsk)
1da177e4
LT
1042 return NULL;
1043
1044 newtcp6sk = (struct tcp6_sock *)newsk;
1045 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1046
1047 newinet = inet_sk(newsk);
1048 newnp = inet6_sk(newsk);
1049 newtp = tcp_sk(newsk);
1050
1051 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1052
d1e559d0 1053 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1054
8292a17a 1055 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1056 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1057#ifdef CONFIG_TCP_MD5SIG
1058 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1059#endif
1060
676a1184
YZ
1061 newnp->ipv6_ac_list = NULL;
1062 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1063 newnp->pktoptions = NULL;
1064 newnp->opt = NULL;
870c3151 1065 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1066 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1067 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1068 if (np->repflow)
1069 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1070
e6848976
ACM
1071 /*
1072 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1073 * here, tcp_create_openreq_child now does this for us, see the comment in
1074 * that function for the gory details. -acme
1da177e4 1075 */
1da177e4
LT
1076
1077 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1078 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1079 Sync it now.
1080 */
d83d8461 1081 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1082
1083 return newsk;
1084 }
1085
634fb979 1086 ireq = inet_rsk(req);
1da177e4
LT
1087
1088 if (sk_acceptq_is_full(sk))
1089 goto out_overflow;
1090
493f377d 1091 if (!dst) {
f76b33c3 1092 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1093 if (!dst)
1da177e4 1094 goto out;
1ab1457c 1095 }
1da177e4
LT
1096
1097 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1098 if (!newsk)
093d2823 1099 goto out_nonewsk;
1da177e4 1100
e6848976
ACM
1101 /*
1102 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1103 * count here, tcp_create_openreq_child now does this for us, see the
1104 * comment in that function for the gory details. -acme
1105 */
1da177e4 1106
59eed279 1107 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1108 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1109 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1110
1111 newtcp6sk = (struct tcp6_sock *)newsk;
1112 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1113
1114 newtp = tcp_sk(newsk);
1115 newinet = inet_sk(newsk);
1116 newnp = inet6_sk(newsk);
1117
1118 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1119
634fb979
ED
1120 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1121 newnp->saddr = ireq->ir_v6_loc_addr;
1122 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1123 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1124
1ab1457c 1125 /* Now IPv6 options...
1da177e4
LT
1126
1127 First: no IPv4 options.
1128 */
f6d8bd05 1129 newinet->inet_opt = NULL;
676a1184 1130 newnp->ipv6_ac_list = NULL;
d35690be 1131 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1132
1133 /* Clone RX bits */
1134 newnp->rxopt.all = np->rxopt.all;
1135
1da177e4 1136 newnp->pktoptions = NULL;
1da177e4 1137 newnp->opt = NULL;
870c3151 1138 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1139 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1140 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1141 if (np->repflow)
1142 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1143
1144 /* Clone native IPv6 options from listening socket (if any)
1145
1146 Yes, keeping reference count would be much more clever,
1147 but we make one more one thing there: reattach optmem
1148 to newsk.
1149 */
56ac42bc
HD
1150 opt = ireq->ipv6_opt;
1151 if (!opt)
1152 opt = rcu_dereference(np->opt);
45f6fad8
ED
1153 if (opt) {
1154 opt = ipv6_dup_options(newsk, opt);
1155 RCU_INIT_POINTER(newnp->opt, opt);
1156 }
d83d8461 1157 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1158 if (opt)
1159 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1160 opt->opt_flen;
1da177e4 1161
81164413
DB
1162 tcp_ca_openreq_child(newsk, dst);
1163
1da177e4 1164 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1165 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1166
1da177e4
LT
1167 tcp_initialize_rcv_mss(newsk);
1168
c720c7e8
ED
1169 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1170 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1171
cfb6eeb4
YH
1172#ifdef CONFIG_TCP_MD5SIG
1173 /* Copy over the MD5 key from the original socket */
4aa956d8 1174 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1175 if (key) {
cfb6eeb4
YH
1176 /* We're using one, so create a matching key
1177 * on the newsk structure. If we fail to get
1178 * memory, then we end up not copying the key
1179 * across. Shucks.
1180 */
efe4208f 1181 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
99a1dec7 1182 AF_INET6, key->key, key->keylen,
7450aaf6 1183 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1184 }
1185#endif
1186
093d2823 1187 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1188 inet_csk_prepare_forced_close(newsk);
1189 tcp_done(newsk);
093d2823
BS
1190 goto out;
1191 }
5e0724d0 1192 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1193 if (*own_req) {
49a496c9 1194 tcp_move_syn(newtp, req);
805c4bc0
ED
1195
1196 /* Clone pktoptions received with SYN, if we own the req */
1197 if (ireq->pktopts) {
1198 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1199 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1200 consume_skb(ireq->pktopts);
1201 ireq->pktopts = NULL;
ebf6c9cb
ED
1202 if (newnp->pktoptions) {
1203 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1204 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1205 }
805c4bc0 1206 }
ce105008 1207 }
1da177e4
LT
1208
1209 return newsk;
1210
1211out_overflow:
02a1d6e7 1212 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1213out_nonewsk:
1da177e4 1214 dst_release(dst);
093d2823 1215out:
9caad864 1216 tcp_listendrop(sk);
1da177e4
LT
1217 return NULL;
1218}
1219
1da177e4 1220/* The socket must have it's spinlock held when we get
e994b2f0 1221 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1222 *
1223 * We have a potential double-lock case here, so even when
1224 * doing backlog processing we use the BH locking scheme.
1225 * This is because we cannot sleep with the original spinlock
1226 * held.
1227 */
1228static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1229{
1230 struct ipv6_pinfo *np = inet6_sk(sk);
1231 struct tcp_sock *tp;
1232 struct sk_buff *opt_skb = NULL;
1233
1234 /* Imagine: socket is IPv6. IPv4 packet arrives,
1235 goes to IPv4 receive handler and backlogged.
1236 From backlog it always goes here. Kerboom...
1237 Fortunately, tcp_rcv_established and rcv_established
1238 handle them correctly, but it is not case with
1239 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1240 */
1241
1242 if (skb->protocol == htons(ETH_P_IP))
1243 return tcp_v4_do_rcv(sk, skb);
1244
ac6e7800 1245 if (tcp_filter(sk, skb))
1da177e4
LT
1246 goto discard;
1247
1248 /*
1249 * socket locking is here for SMP purposes as backlog rcv
1250 * is currently called with bh processing disabled.
1251 */
1252
1253 /* Do Stevens' IPV6_PKTOPTIONS.
1254
1255 Yes, guys, it is the only place in our code, where we
1256 may make it not affecting IPv4.
1257 The rest of code is protocol independent,
1258 and I do not like idea to uglify IPv4.
1259
1260 Actually, all the idea behind IPV6_PKTOPTIONS
1261 looks not very well thought. For now we latch
1262 options, received in the last packet, enqueued
1263 by tcp. Feel free to propose better solution.
1ab1457c 1264 --ANK (980728)
1da177e4
LT
1265 */
1266 if (np->rxopt.all)
7450aaf6 1267 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1268
1269 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1270 struct dst_entry *dst = sk->sk_rx_dst;
1271
bdeab991 1272 sock_rps_save_rxhash(sk, skb);
3d97379a 1273 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1274 if (dst) {
1275 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1276 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1277 dst_release(dst);
1278 sk->sk_rx_dst = NULL;
1279 }
1280 }
1281
c995ae22 1282 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1283 if (opt_skb)
1284 goto ipv6_pktoptions;
1285 return 0;
1286 }
1287
12e25e10 1288 if (tcp_checksum_complete(skb))
1da177e4
LT
1289 goto csum_err;
1290
1ab1457c 1291 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1292 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1293
1da177e4
LT
1294 if (!nsk)
1295 goto discard;
1296
4c99aa40 1297 if (nsk != sk) {
bdeab991 1298 sock_rps_save_rxhash(nsk, skb);
38cb5245 1299 sk_mark_napi_id(nsk, skb);
1da177e4
LT
1300 if (tcp_child_process(sk, nsk, skb))
1301 goto reset;
1302 if (opt_skb)
1303 __kfree_skb(opt_skb);
1304 return 0;
1305 }
47482f13 1306 } else
bdeab991 1307 sock_rps_save_rxhash(sk, skb);
1da177e4 1308
72ab4a86 1309 if (tcp_rcv_state_process(sk, skb))
1da177e4 1310 goto reset;
1da177e4
LT
1311 if (opt_skb)
1312 goto ipv6_pktoptions;
1313 return 0;
1314
1315reset:
cfb6eeb4 1316 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1317discard:
1318 if (opt_skb)
1319 __kfree_skb(opt_skb);
1320 kfree_skb(skb);
1321 return 0;
1322csum_err:
c10d9310
ED
1323 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1324 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1325 goto discard;
1326
1327
1328ipv6_pktoptions:
1329 /* Do you ask, what is it?
1330
1331 1. skb was enqueued by tcp.
1332 2. skb is added to tail of read queue, rather than out of order.
1333 3. socket is not in passive state.
1334 4. Finally, it really contains options, which user wants to receive.
1335 */
1336 tp = tcp_sk(sk);
1337 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1338 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1339 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1340 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1341 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1342 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1343 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1344 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1345 if (np->repflow)
1346 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1347 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1348 skb_set_owner_r(opt_skb, sk);
8ce48623 1349 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1350 opt_skb = xchg(&np->pktoptions, opt_skb);
1351 } else {
1352 __kfree_skb(opt_skb);
1353 opt_skb = xchg(&np->pktoptions, NULL);
1354 }
1355 }
1356
800d55f1 1357 kfree_skb(opt_skb);
1da177e4
LT
1358 return 0;
1359}
1360
2dc49d16
ND
1361static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1362 const struct tcphdr *th)
1363{
1364 /* This is tricky: we move IP6CB at its correct location into
1365 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1366 * _decode_session6() uses IP6CB().
1367 * barrier() makes sure compiler won't play aliasing games.
1368 */
1369 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1370 sizeof(struct inet6_skb_parm));
1371 barrier();
1372
1373 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1374 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1375 skb->len - th->doff*4);
1376 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1377 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1378 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1379 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1380 TCP_SKB_CB(skb)->sacked = 0;
1381}
1382
e5bbef20 1383static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1384{
cf533ea5 1385 const struct tcphdr *th;
b71d1d42 1386 const struct ipv6hdr *hdr;
3b24d854 1387 bool refcounted;
1da177e4
LT
1388 struct sock *sk;
1389 int ret;
a86b1e30 1390 struct net *net = dev_net(skb->dev);
1da177e4
LT
1391
1392 if (skb->pkt_type != PACKET_HOST)
1393 goto discard_it;
1394
1395 /*
1396 * Count it even if it's bad.
1397 */
90bbcc60 1398 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1399
1400 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1401 goto discard_it;
1402
ea1627c2 1403 th = (const struct tcphdr *)skb->data;
1da177e4 1404
ea1627c2 1405 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1406 goto bad_packet;
1407 if (!pskb_may_pull(skb, th->doff*4))
1408 goto discard_it;
1409
e4f45b7f 1410 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1411 goto csum_error;
1da177e4 1412
ea1627c2 1413 th = (const struct tcphdr *)skb->data;
e802af9c 1414 hdr = ipv6_hdr(skb);
1da177e4 1415
4bdc3d66 1416lookup:
a583636a 1417 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
3b24d854
ED
1418 th->source, th->dest, inet6_iif(skb),
1419 &refcounted);
1da177e4
LT
1420 if (!sk)
1421 goto no_tcp_socket;
1422
1423process:
1424 if (sk->sk_state == TCP_TIME_WAIT)
1425 goto do_time_wait;
1426
079096f1
ED
1427 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1428 struct request_sock *req = inet_reqsk(sk);
7716682c 1429 struct sock *nsk;
079096f1
ED
1430
1431 sk = req->rsk_listener;
1432 tcp_v6_fill_cb(skb, hdr, th);
1433 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1434 sk_drops_add(sk, skb);
079096f1
ED
1435 reqsk_put(req);
1436 goto discard_it;
1437 }
7716682c 1438 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1439 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1440 goto lookup;
1441 }
7716682c 1442 sock_hold(sk);
3b24d854 1443 refcounted = true;
7716682c 1444 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1445 if (!nsk) {
1446 reqsk_put(req);
7716682c 1447 goto discard_and_relse;
079096f1
ED
1448 }
1449 if (nsk == sk) {
079096f1
ED
1450 reqsk_put(req);
1451 tcp_v6_restore_cb(skb);
1452 } else if (tcp_child_process(sk, nsk, skb)) {
1453 tcp_v6_send_reset(nsk, skb);
7716682c 1454 goto discard_and_relse;
079096f1 1455 } else {
7716682c 1456 sock_put(sk);
079096f1
ED
1457 return 0;
1458 }
1459 }
e802af9c 1460 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1461 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1462 goto discard_and_relse;
1463 }
1464
1da177e4
LT
1465 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1466 goto discard_and_relse;
1467
2dc49d16
ND
1468 tcp_v6_fill_cb(skb, hdr, th);
1469
9ea88a15
DP
1470 if (tcp_v6_inbound_md5_hash(sk, skb))
1471 goto discard_and_relse;
9ea88a15 1472
ac6e7800 1473 if (tcp_filter(sk, skb))
1da177e4 1474 goto discard_and_relse;
ac6e7800
ED
1475 th = (const struct tcphdr *)skb->data;
1476 hdr = ipv6_hdr(skb);
1da177e4
LT
1477
1478 skb->dev = NULL;
1479
e994b2f0
ED
1480 if (sk->sk_state == TCP_LISTEN) {
1481 ret = tcp_v6_do_rcv(sk, skb);
1482 goto put_and_return;
1483 }
1484
1485 sk_incoming_cpu_update(sk);
1486
293b9c42 1487 bh_lock_sock_nested(sk);
a44d6eac 1488 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1489 ret = 0;
1490 if (!sock_owned_by_user(sk)) {
7bced397 1491 if (!tcp_prequeue(sk, skb))
1ab1457c 1492 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1493 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1494 goto discard_and_relse;
1495 }
1da177e4
LT
1496 bh_unlock_sock(sk);
1497
e994b2f0 1498put_and_return:
3b24d854
ED
1499 if (refcounted)
1500 sock_put(sk);
1da177e4
LT
1501 return ret ? -1 : 0;
1502
1503no_tcp_socket:
1504 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1505 goto discard_it;
1506
2dc49d16
ND
1507 tcp_v6_fill_cb(skb, hdr, th);
1508
12e25e10 1509 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1510csum_error:
90bbcc60 1511 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1512bad_packet:
90bbcc60 1513 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1514 } else {
cfb6eeb4 1515 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1516 }
1517
1518discard_it:
1da177e4
LT
1519 kfree_skb(skb);
1520 return 0;
1521
1522discard_and_relse:
532182cd 1523 sk_drops_add(sk, skb);
3b24d854
ED
1524 if (refcounted)
1525 sock_put(sk);
1da177e4
LT
1526 goto discard_it;
1527
1528do_time_wait:
1529 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1530 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1531 goto discard_it;
1532 }
1533
2dc49d16
ND
1534 tcp_v6_fill_cb(skb, hdr, th);
1535
6a5dc9e5
ED
1536 if (tcp_checksum_complete(skb)) {
1537 inet_twsk_put(inet_twsk(sk));
1538 goto csum_error;
1da177e4
LT
1539 }
1540
9469c7b4 1541 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1542 case TCP_TW_SYN:
1543 {
1544 struct sock *sk2;
1545
c346dca1 1546 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1547 skb, __tcp_hdrlen(th),
5ba24953 1548 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1549 &ipv6_hdr(skb)->daddr,
870c3151 1550 ntohs(th->dest), tcp_v6_iif(skb));
53b24b8f 1551 if (sk2) {
295ff7ed 1552 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1553 inet_twsk_deschedule_put(tw);
1da177e4 1554 sk = sk2;
4ad19de8 1555 tcp_v6_restore_cb(skb);
3b24d854 1556 refcounted = false;
1da177e4
LT
1557 goto process;
1558 }
1559 /* Fall through to ACK */
1560 }
1561 case TCP_TW_ACK:
1562 tcp_v6_timewait_ack(sk, skb);
1563 break;
1564 case TCP_TW_RST:
4ad19de8 1565 tcp_v6_restore_cb(skb);
271c3b9b
FW
1566 tcp_v6_send_reset(sk, skb);
1567 inet_twsk_deschedule_put(inet_twsk(sk));
1568 goto discard_it;
4aa956d8
WY
1569 case TCP_TW_SUCCESS:
1570 ;
1da177e4
LT
1571 }
1572 goto discard_it;
1573}
1574
c7109986
ED
1575static void tcp_v6_early_demux(struct sk_buff *skb)
1576{
1577 const struct ipv6hdr *hdr;
1578 const struct tcphdr *th;
1579 struct sock *sk;
1580
1581 if (skb->pkt_type != PACKET_HOST)
1582 return;
1583
1584 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1585 return;
1586
1587 hdr = ipv6_hdr(skb);
1588 th = tcp_hdr(skb);
1589
1590 if (th->doff < sizeof(struct tcphdr) / 4)
1591 return;
1592
870c3151 1593 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1594 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1595 &hdr->saddr, th->source,
1596 &hdr->daddr, ntohs(th->dest),
1597 inet6_iif(skb));
1598 if (sk) {
1599 skb->sk = sk;
1600 skb->destructor = sock_edemux;
f7e4eb03 1601 if (sk_fullsock(sk)) {
d0c294c5 1602 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1603
c7109986 1604 if (dst)
5d299f3d 1605 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1606 if (dst &&
f3f12135 1607 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1608 skb_dst_set_noref(skb, dst);
1609 }
1610 }
1611}
1612
ccb7c410
DM
1613static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1614 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1615 .twsk_unique = tcp_twsk_unique,
4aa956d8 1616 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1617};
1618
3b401a81 1619static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1620 .queue_xmit = inet6_csk_xmit,
1621 .send_check = tcp_v6_send_check,
1622 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1623 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1624 .conn_request = tcp_v6_conn_request,
1625 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1626 .net_header_len = sizeof(struct ipv6hdr),
67469601 1627 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1628 .setsockopt = ipv6_setsockopt,
1629 .getsockopt = ipv6_getsockopt,
1630 .addr2sockaddr = inet6_csk_addr2sockaddr,
1631 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1632#ifdef CONFIG_COMPAT
543d9cfe
ACM
1633 .compat_setsockopt = compat_ipv6_setsockopt,
1634 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1635#endif
4fab9071 1636 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1637};
1638
cfb6eeb4 1639#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1640static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1641 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1642 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1643 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1644};
a928630a 1645#endif
cfb6eeb4 1646
1da177e4
LT
1647/*
1648 * TCP over IPv4 via INET6 API
1649 */
3b401a81 1650static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1651 .queue_xmit = ip_queue_xmit,
1652 .send_check = tcp_v4_send_check,
1653 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1654 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1655 .conn_request = tcp_v6_conn_request,
1656 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1657 .net_header_len = sizeof(struct iphdr),
1658 .setsockopt = ipv6_setsockopt,
1659 .getsockopt = ipv6_getsockopt,
1660 .addr2sockaddr = inet6_csk_addr2sockaddr,
1661 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1662#ifdef CONFIG_COMPAT
543d9cfe
ACM
1663 .compat_setsockopt = compat_ipv6_setsockopt,
1664 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1665#endif
4fab9071 1666 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1667};
1668
cfb6eeb4 1669#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1670static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1671 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1672 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1673 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1674};
a928630a 1675#endif
cfb6eeb4 1676
1da177e4
LT
1677/* NOTE: A lot of things set to zero explicitly by call to
1678 * sk_alloc() so need not be done here.
1679 */
1680static int tcp_v6_init_sock(struct sock *sk)
1681{
6687e988 1682 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1683
900f65d3 1684 tcp_init_sock(sk);
1da177e4 1685
8292a17a 1686 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1687
cfb6eeb4 1688#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1689 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1690#endif
1691
1da177e4
LT
1692 return 0;
1693}
1694
7d06b2e0 1695static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1696{
1da177e4 1697 tcp_v4_destroy_sock(sk);
7d06b2e0 1698 inet6_destroy_sock(sk);
1da177e4
LT
1699}
1700
952a10be 1701#ifdef CONFIG_PROC_FS
1da177e4 1702/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1703static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1704 const struct request_sock *req, int i)
1da177e4 1705{
fa76ce73 1706 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1707 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1708 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1709
1710 if (ttd < 0)
1711 ttd = 0;
1712
1da177e4
LT
1713 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1716 i,
1717 src->s6_addr32[0], src->s6_addr32[1],
1718 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1719 inet_rsk(req)->ir_num,
1da177e4
LT
1720 dest->s6_addr32[0], dest->s6_addr32[1],
1721 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1722 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1723 TCP_SYN_RECV,
4c99aa40 1724 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1725 1, /* timers active (only the expire timer) */
1726 jiffies_to_clock_t(ttd),
e6c022a4 1727 req->num_timeout,
aa3a0c8c
ED
1728 from_kuid_munged(seq_user_ns(seq),
1729 sock_i_uid(req->rsk_listener)),
1ab1457c 1730 0, /* non standard timer */
1da177e4
LT
1731 0, /* open_requests have no inode */
1732 0, req);
1733}
1734
1735static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1736{
b71d1d42 1737 const struct in6_addr *dest, *src;
1da177e4
LT
1738 __u16 destp, srcp;
1739 int timer_active;
1740 unsigned long timer_expires;
cf533ea5
ED
1741 const struct inet_sock *inet = inet_sk(sp);
1742 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1743 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1744 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1745 int rx_queue;
1746 int state;
1da177e4 1747
efe4208f
ED
1748 dest = &sp->sk_v6_daddr;
1749 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1750 destp = ntohs(inet->inet_dport);
1751 srcp = ntohs(inet->inet_sport);
463c84b9 1752
ce3cf4ec 1753 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1754 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1755 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1756 timer_active = 1;
463c84b9
ACM
1757 timer_expires = icsk->icsk_timeout;
1758 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1759 timer_active = 4;
463c84b9 1760 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1761 } else if (timer_pending(&sp->sk_timer)) {
1762 timer_active = 2;
1763 timer_expires = sp->sk_timer.expires;
1764 } else {
1765 timer_active = 0;
1766 timer_expires = jiffies;
1767 }
1768
00fd38d9
ED
1769 state = sk_state_load(sp);
1770 if (state == TCP_LISTEN)
1771 rx_queue = sp->sk_ack_backlog;
1772 else
1773 /* Because we don't lock the socket,
1774 * we might find a transient negative value.
1775 */
1776 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1777
1da177e4
LT
1778 seq_printf(seq,
1779 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1780 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1781 i,
1782 src->s6_addr32[0], src->s6_addr32[1],
1783 src->s6_addr32[2], src->s6_addr32[3], srcp,
1784 dest->s6_addr32[0], dest->s6_addr32[1],
1785 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1786 state,
1787 tp->write_seq - tp->snd_una,
1788 rx_queue,
1da177e4 1789 timer_active,
a399a805 1790 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1791 icsk->icsk_retransmits,
a7cb5a49 1792 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1793 icsk->icsk_probes_out,
1da177e4
LT
1794 sock_i_ino(sp),
1795 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1796 jiffies_to_clock_t(icsk->icsk_rto),
1797 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1798 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1799 tp->snd_cwnd,
00fd38d9 1800 state == TCP_LISTEN ?
0536fcc0 1801 fastopenq->max_qlen :
0a672f74 1802 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1803 );
1804}
1805
1ab1457c 1806static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1807 struct inet_timewait_sock *tw, int i)
1da177e4 1808{
789f558c 1809 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1810 const struct in6_addr *dest, *src;
1da177e4 1811 __u16 destp, srcp;
1da177e4 1812
efe4208f
ED
1813 dest = &tw->tw_v6_daddr;
1814 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1815 destp = ntohs(tw->tw_dport);
1816 srcp = ntohs(tw->tw_sport);
1817
1818 seq_printf(seq,
1819 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1820 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1821 i,
1822 src->s6_addr32[0], src->s6_addr32[1],
1823 src->s6_addr32[2], src->s6_addr32[3], srcp,
1824 dest->s6_addr32[0], dest->s6_addr32[1],
1825 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1826 tw->tw_substate, 0, 0,
a399a805 1827 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1da177e4
LT
1828 atomic_read(&tw->tw_refcnt), tw);
1829}
1830
1da177e4
LT
1831static int tcp6_seq_show(struct seq_file *seq, void *v)
1832{
1833 struct tcp_iter_state *st;
05dbc7b5 1834 struct sock *sk = v;
1da177e4
LT
1835
1836 if (v == SEQ_START_TOKEN) {
1837 seq_puts(seq,
1838 " sl "
1839 "local_address "
1840 "remote_address "
1841 "st tx_queue rx_queue tr tm->when retrnsmt"
1842 " uid timeout inode\n");
1843 goto out;
1844 }
1845 st = seq->private;
1846
079096f1
ED
1847 if (sk->sk_state == TCP_TIME_WAIT)
1848 get_timewait6_sock(seq, v, st->num);
1849 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1850 get_openreq6(seq, v, st->num);
079096f1
ED
1851 else
1852 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1853out:
1854 return 0;
1855}
1856
73cb88ec
AV
1857static const struct file_operations tcp6_afinfo_seq_fops = {
1858 .owner = THIS_MODULE,
1859 .open = tcp_seq_open,
1860 .read = seq_read,
1861 .llseek = seq_lseek,
1862 .release = seq_release_net
1863};
1864
1da177e4 1865static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1866 .name = "tcp6",
1867 .family = AF_INET6,
73cb88ec 1868 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1869 .seq_ops = {
1870 .show = tcp6_seq_show,
1871 },
1da177e4
LT
1872};
1873
2c8c1e72 1874int __net_init tcp6_proc_init(struct net *net)
1da177e4 1875{
6f8b13bc 1876 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1877}
1878
6f8b13bc 1879void tcp6_proc_exit(struct net *net)
1da177e4 1880{
6f8b13bc 1881 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1882}
1883#endif
1884
1885struct proto tcpv6_prot = {
1886 .name = "TCPv6",
1887 .owner = THIS_MODULE,
1888 .close = tcp_close,
1889 .connect = tcp_v6_connect,
1890 .disconnect = tcp_disconnect,
463c84b9 1891 .accept = inet_csk_accept,
1da177e4
LT
1892 .ioctl = tcp_ioctl,
1893 .init = tcp_v6_init_sock,
1894 .destroy = tcp_v6_destroy_sock,
1895 .shutdown = tcp_shutdown,
1896 .setsockopt = tcp_setsockopt,
1897 .getsockopt = tcp_getsockopt,
4b9d07a4 1898 .keepalive = tcp_set_keepalive,
1da177e4 1899 .recvmsg = tcp_recvmsg,
7ba42910
CG
1900 .sendmsg = tcp_sendmsg,
1901 .sendpage = tcp_sendpage,
1da177e4 1902 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1903 .release_cb = tcp_release_cb,
496611d7 1904 .hash = inet6_hash,
ab1e0a13
ACM
1905 .unhash = inet_unhash,
1906 .get_port = inet_csk_get_port,
1da177e4 1907 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 1908 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1909 .sockets_allocated = &tcp_sockets_allocated,
1910 .memory_allocated = &tcp_memory_allocated,
1911 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1912 .orphan_count = &tcp_orphan_count,
a4fe34bf 1913 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1914 .sysctl_wmem = sysctl_tcp_wmem,
1915 .sysctl_rmem = sysctl_tcp_rmem,
1916 .max_header = MAX_TCP_HEADER,
1917 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 1918 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 1919 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1920 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1921 .h.hashinfo = &tcp_hashinfo,
7ba42910 1922 .no_autobind = true,
543d9cfe
ACM
1923#ifdef CONFIG_COMPAT
1924 .compat_setsockopt = compat_tcp_setsockopt,
1925 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1926#endif
c1e64e29 1927 .diag_destroy = tcp_abort,
1da177e4
LT
1928};
1929
41135cc8 1930static const struct inet6_protocol tcpv6_protocol = {
c7109986 1931 .early_demux = tcp_v6_early_demux,
1da177e4
LT
1932 .handler = tcp_v6_rcv,
1933 .err_handler = tcp_v6_err,
1934 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1935};
1936
1da177e4
LT
1937static struct inet_protosw tcpv6_protosw = {
1938 .type = SOCK_STREAM,
1939 .protocol = IPPROTO_TCP,
1940 .prot = &tcpv6_prot,
1941 .ops = &inet6_stream_ops,
d83d8461
ACM
1942 .flags = INET_PROTOSW_PERMANENT |
1943 INET_PROTOSW_ICSK,
1da177e4
LT
1944};
1945
2c8c1e72 1946static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1947{
5677242f
DL
1948 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1949 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1950}
1951
2c8c1e72 1952static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1953{
5677242f 1954 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1955}
1956
2c8c1e72 1957static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 1958{
1946e672 1959 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
1960}
1961
1962static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1963 .init = tcpv6_net_init,
1964 .exit = tcpv6_net_exit,
1965 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1966};
1967
7f4e4868 1968int __init tcpv6_init(void)
1da177e4 1969{
7f4e4868
DL
1970 int ret;
1971
3336288a
VY
1972 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1973 if (ret)
c6b641a4 1974 goto out;
3336288a 1975
1da177e4 1976 /* register inet6 protocol */
7f4e4868
DL
1977 ret = inet6_register_protosw(&tcpv6_protosw);
1978 if (ret)
1979 goto out_tcpv6_protocol;
1980
93ec926b 1981 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1982 if (ret)
1983 goto out_tcpv6_protosw;
1984out:
1985 return ret;
ae0f7d5f 1986
7f4e4868
DL
1987out_tcpv6_protosw:
1988 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
1989out_tcpv6_protocol:
1990 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
1991 goto out;
1992}
1993
09f7709f 1994void tcpv6_exit(void)
7f4e4868 1995{
93ec926b 1996 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1997 inet6_unregister_protosw(&tcpv6_protosw);
1998 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 1999}