]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv6/tcp_ipv6.c
Linux 4.11-rc2
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
95a22cae 104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
1da177e4 105{
0660e03f
ACM
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7 108 tcp_hdr(skb)->dest,
95a22cae 109 tcp_hdr(skb)->source, tsoff);
1da177e4
LT
110}
111
1ab1457c 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 116 struct inet_sock *inet = inet_sk(sk);
d83d8461 117 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 120 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 121 struct ipv6_txoptions *opt;
4c9483b2 122 struct flowi6 fl6;
1da177e4
LT
123 struct dst_entry *dst;
124 int addr_type;
00355fa5 125 u32 seq;
1da177e4 126 int err;
1946e672 127 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 128
1ab1457c 129 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
130 return -EINVAL;
131
1ab1457c 132 if (usin->sin6_family != AF_INET6)
a02cec21 133 return -EAFNOSUPPORT;
1da177e4 134
4c9483b2 135 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
136
137 if (np->sndflow) {
4c9483b2
DM
138 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 IP6_ECN_flow_init(fl6.flowlabel);
140 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 141 struct ip6_flowlabel *flowlabel;
4c9483b2 142 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 143 if (!flowlabel)
1da177e4 144 return -EINVAL;
1da177e4
LT
145 fl6_sock_release(flowlabel);
146 }
147 }
148
149 /*
1ab1457c
YH
150 * connect() to INADDR_ANY means loopback (BSD'ism).
151 */
152
052d2369
JL
153 if (ipv6_addr_any(&usin->sin6_addr)) {
154 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
155 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
156 &usin->sin6_addr);
157 else
158 usin->sin6_addr = in6addr_loopback;
159 }
1da177e4
LT
160
161 addr_type = ipv6_addr_type(&usin->sin6_addr);
162
4c99aa40 163 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
164 return -ENETUNREACH;
165
166 if (addr_type&IPV6_ADDR_LINKLOCAL) {
167 if (addr_len >= sizeof(struct sockaddr_in6) &&
168 usin->sin6_scope_id) {
169 /* If interface is set while binding, indices
170 * must coincide.
171 */
172 if (sk->sk_bound_dev_if &&
173 sk->sk_bound_dev_if != usin->sin6_scope_id)
174 return -EINVAL;
175
176 sk->sk_bound_dev_if = usin->sin6_scope_id;
177 }
178
179 /* Connect to link-local address requires an interface */
180 if (!sk->sk_bound_dev_if)
181 return -EINVAL;
182 }
183
184 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 185 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
186 tp->rx_opt.ts_recent = 0;
187 tp->rx_opt.ts_recent_stamp = 0;
188 tp->write_seq = 0;
189 }
190
efe4208f 191 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 192 np->flow_label = fl6.flowlabel;
1da177e4
LT
193
194 /*
195 * TCP over IPv4
196 */
197
052d2369 198 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 199 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
200 struct sockaddr_in sin;
201
202 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
203
204 if (__ipv6_only_sock(sk))
205 return -ENETUNREACH;
206
207 sin.sin_family = AF_INET;
208 sin.sin_port = usin->sin6_port;
209 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
210
d83d8461 211 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 212 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
213#ifdef CONFIG_TCP_MD5SIG
214 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
215#endif
1da177e4
LT
216
217 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
218
219 if (err) {
d83d8461
ACM
220 icsk->icsk_ext_hdr_len = exthdrlen;
221 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 222 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
223#ifdef CONFIG_TCP_MD5SIG
224 tp->af_specific = &tcp_sock_ipv6_specific;
225#endif
1da177e4 226 goto failure;
1da177e4 227 }
d1e559d0 228 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
229
230 return err;
231 }
232
efe4208f
ED
233 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
234 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 235
4c9483b2 236 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 237 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 238 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
239 fl6.flowi6_oif = sk->sk_bound_dev_if;
240 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
241 fl6.fl6_dport = usin->sin6_port;
242 fl6.fl6_sport = inet->inet_sport;
e2d118a1 243 fl6.flowi6_uid = sk->sk_uid;
1da177e4 244
1e1d04e6 245 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 246 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 247
4c9483b2 248 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 249
0e0d44ab 250 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
251 if (IS_ERR(dst)) {
252 err = PTR_ERR(dst);
1da177e4 253 goto failure;
14e50e57 254 }
1da177e4 255
63159f29 256 if (!saddr) {
4c9483b2 257 saddr = &fl6.saddr;
efe4208f 258 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
259 }
260
261 /* set the source address */
4e3fd7a0 262 np->saddr = *saddr;
c720c7e8 263 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 264
f83ef8c0 265 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 266 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 267
1946e672 268 if (tcp_death_row->sysctl_tw_recycle &&
493f377d 269 !tp->rx_opt.ts_recent_stamp &&
fd0273d7 270 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
81166dd6 271 tcp_fetch_timewait_stamp(sk, dst);
493f377d 272
d83d8461 273 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
274 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 opt->opt_nflen;
1da177e4
LT
277
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
279
c720c7e8 280 inet->inet_dport = usin->sin6_port;
1da177e4
LT
281
282 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 283 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
284 if (err)
285 goto late_failure;
286
877d1f62 287 sk_set_txhash(sk);
9e7ceb06 288
00355fa5
AK
289 if (likely(!tp->repair)) {
290 seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
291 sk->sk_v6_daddr.s6_addr32,
292 inet->inet_sport,
293 inet->inet_dport,
294 &tp->tsoffset);
295 if (!tp->write_seq)
296 tp->write_seq = seq;
297 }
1da177e4 298
19f6d3f3
WW
299 if (tcp_fastopen_defer_connect(sk, &err))
300 return err;
301 if (err)
302 goto late_failure;
303
1da177e4
LT
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
1da177e4 312failure:
c720c7e8 313 inet->inet_dport = 0;
1da177e4
LT
314 sk->sk_route_caps = 0;
315 return err;
316}
317
563d34d0
ED
318static void tcp_v6_mtu_reduced(struct sock *sk)
319{
320 struct dst_entry *dst;
321
322 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323 return;
324
325 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326 if (!dst)
327 return;
328
329 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330 tcp_sync_mss(sk, dst_mtu(dst));
331 tcp_simple_retransmit(sk);
332 }
333}
334
1da177e4 335static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 336 u8 type, u8 code, int offset, __be32 info)
1da177e4 337{
4c99aa40 338 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 339 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
340 struct net *net = dev_net(skb->dev);
341 struct request_sock *fastopen;
1da177e4 342 struct ipv6_pinfo *np;
1ab1457c 343 struct tcp_sock *tp;
0a672f74 344 __u32 seq, snd_una;
2215089b 345 struct sock *sk;
9cf74903 346 bool fatal;
2215089b 347 int err;
1da177e4 348
2215089b
ED
349 sk = __inet6_lookup_established(net, &tcp_hashinfo,
350 &hdr->daddr, th->dest,
351 &hdr->saddr, ntohs(th->source),
352 skb->dev->ifindex);
1da177e4 353
2215089b 354 if (!sk) {
a16292a0
ED
355 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
1da177e4
LT
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 361 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
362 return;
363 }
2215089b 364 seq = ntohl(th->seq);
9cf74903 365 fatal = icmpv6_err_convert(type, code, &err);
2215089b 366 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 367 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
368
369 bh_lock_sock(sk);
563d34d0 370 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 371 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
372
373 if (sk->sk_state == TCP_CLOSE)
374 goto out;
375
e802af9c 376 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 377 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
378 goto out;
379 }
380
1da177e4 381 tp = tcp_sk(sk);
0a672f74
YC
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 385 if (sk->sk_state != TCP_LISTEN &&
0a672f74 386 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 387 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
388 goto out;
389 }
390
391 np = inet6_sk(sk);
392
ec18d9a2
DM
393 if (type == NDISC_REDIRECT) {
394 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
395
1ed5c48f 396 if (dst)
6700c270 397 dst->ops->redirect(dst, sk, skb);
50a75a89 398 goto out;
ec18d9a2
DM
399 }
400
1da177e4 401 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
402 /* We are not interested in TCP_LISTEN and open_requests
403 * (SYN-ACKs send out by Linux are always <576bytes so
404 * they should go through unfragmented).
405 */
406 if (sk->sk_state == TCP_LISTEN)
407 goto out;
408
93b36cf3
HFS
409 if (!ip6_sk_accept_pmtu(sk))
410 goto out;
411
563d34d0
ED
412 tp->mtu_info = ntohl(info);
413 if (!sock_owned_by_user(sk))
414 tcp_v6_mtu_reduced(sk);
d013ef2a 415 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 416 &sk->sk_tsq_flags))
d013ef2a 417 sock_hold(sk);
1da177e4
LT
418 goto out;
419 }
420
1da177e4 421
60236fdd 422 /* Might be for an request_sock */
1da177e4 423 switch (sk->sk_state) {
1da177e4 424 case TCP_SYN_SENT:
0a672f74
YC
425 case TCP_SYN_RECV:
426 /* Only in fast or simultaneous open. If a fast open socket is
427 * is already accepted it is treated as a connected one below.
428 */
63159f29 429 if (fastopen && !fastopen->sk)
0a672f74
YC
430 break;
431
1da177e4 432 if (!sock_owned_by_user(sk)) {
1da177e4
LT
433 sk->sk_err = err;
434 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
435
436 tcp_done(sk);
437 } else
438 sk->sk_err_soft = err;
439 goto out;
440 }
441
442 if (!sock_owned_by_user(sk) && np->recverr) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk);
445 } else
446 sk->sk_err_soft = err;
447
448out:
449 bh_unlock_sock(sk);
450 sock_put(sk);
451}
452
453
0f935dbe 454static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 455 struct flowi *fl,
3840a06e 456 struct request_sock *req,
ca6fb065 457 struct tcp_fastopen_cookie *foc,
b3d05147 458 enum tcp_synack_type synack_type)
1da177e4 459{
634fb979 460 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 461 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 462 struct ipv6_txoptions *opt;
d6274bd8 463 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 464 struct sk_buff *skb;
9494218f 465 int err = -ENOMEM;
1da177e4 466
9f10d3f6 467 /* First, grab a route. */
f76b33c3
ED
468 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
469 IPPROTO_TCP)) == NULL)
fd80eb94 470 goto done;
9494218f 471
b3d05147 472 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 473
1da177e4 474 if (skb) {
634fb979
ED
475 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
476 &ireq->ir_v6_rmt_addr);
1da177e4 477
634fb979 478 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 479 if (np->repflow && ireq->pktopts)
df3687ff
FF
480 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
481
3e4006f0 482 rcu_read_lock();
56ac42bc
HD
483 opt = ireq->ipv6_opt;
484 if (!opt)
485 opt = rcu_dereference(np->opt);
92e55f41 486 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 487 rcu_read_unlock();
b9df3cb8 488 err = net_xmit_eval(err);
1da177e4
LT
489 }
490
491done:
1da177e4
LT
492 return err;
493}
494
72659ecc 495
60236fdd 496static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 497{
56ac42bc 498 kfree(inet_rsk(req)->ipv6_opt);
634fb979 499 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
500}
501
cfb6eeb4 502#ifdef CONFIG_TCP_MD5SIG
b83e3deb 503static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 504 const struct in6_addr *addr)
cfb6eeb4 505{
a915da9b 506 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
507}
508
b83e3deb 509static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 510 const struct sock *addr_sk)
cfb6eeb4 511{
efe4208f 512 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
513}
514
4aa956d8
WY
515static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
516 int optlen)
cfb6eeb4
YH
517{
518 struct tcp_md5sig cmd;
519 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
520
521 if (optlen < sizeof(cmd))
522 return -EINVAL;
523
524 if (copy_from_user(&cmd, optval, sizeof(cmd)))
525 return -EFAULT;
526
527 if (sin6->sin6_family != AF_INET6)
528 return -EINVAL;
529
530 if (!cmd.tcpm_keylen) {
e773e4fa 531 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
532 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
533 AF_INET);
534 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
535 AF_INET6);
cfb6eeb4
YH
536 }
537
538 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
539 return -EINVAL;
540
a915da9b
ED
541 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
542 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
543 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 544
a915da9b
ED
545 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
546 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
547}
548
19689e38
ED
549static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
550 const struct in6_addr *daddr,
551 const struct in6_addr *saddr,
552 const struct tcphdr *th, int nbytes)
cfb6eeb4 553{
cfb6eeb4 554 struct tcp6_pseudohdr *bp;
49a72dfb 555 struct scatterlist sg;
19689e38 556 struct tcphdr *_th;
8d26d76d 557
19689e38 558 bp = hp->scratch;
cfb6eeb4 559 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
560 bp->saddr = *saddr;
561 bp->daddr = *daddr;
49a72dfb 562 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 563 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 564
19689e38
ED
565 _th = (struct tcphdr *)(bp + 1);
566 memcpy(_th, th, sizeof(*th));
567 _th->check = 0;
568
569 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
570 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
571 sizeof(*bp) + sizeof(*th));
cf80e0e4 572 return crypto_ahash_update(hp->md5_req);
49a72dfb 573}
c7da57a1 574
19689e38 575static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 576 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 577 const struct tcphdr *th)
49a72dfb
AL
578{
579 struct tcp_md5sig_pool *hp;
cf80e0e4 580 struct ahash_request *req;
49a72dfb
AL
581
582 hp = tcp_get_md5sig_pool();
583 if (!hp)
584 goto clear_hash_noput;
cf80e0e4 585 req = hp->md5_req;
49a72dfb 586
cf80e0e4 587 if (crypto_ahash_init(req))
49a72dfb 588 goto clear_hash;
19689e38 589 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
590 goto clear_hash;
591 if (tcp_md5_hash_key(hp, key))
592 goto clear_hash;
cf80e0e4
HX
593 ahash_request_set_crypt(req, NULL, md5_hash, 0);
594 if (crypto_ahash_final(req))
cfb6eeb4 595 goto clear_hash;
cfb6eeb4 596
cfb6eeb4 597 tcp_put_md5sig_pool();
cfb6eeb4 598 return 0;
49a72dfb 599
cfb6eeb4
YH
600clear_hash:
601 tcp_put_md5sig_pool();
602clear_hash_noput:
603 memset(md5_hash, 0, 16);
49a72dfb 604 return 1;
cfb6eeb4
YH
605}
606
39f8e58e
ED
607static int tcp_v6_md5_hash_skb(char *md5_hash,
608 const struct tcp_md5sig_key *key,
318cf7aa 609 const struct sock *sk,
318cf7aa 610 const struct sk_buff *skb)
cfb6eeb4 611{
b71d1d42 612 const struct in6_addr *saddr, *daddr;
49a72dfb 613 struct tcp_md5sig_pool *hp;
cf80e0e4 614 struct ahash_request *req;
318cf7aa 615 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 616
39f8e58e
ED
617 if (sk) { /* valid for establish/request sockets */
618 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 619 daddr = &sk->sk_v6_daddr;
49a72dfb 620 } else {
b71d1d42 621 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
622 saddr = &ip6h->saddr;
623 daddr = &ip6h->daddr;
cfb6eeb4 624 }
49a72dfb
AL
625
626 hp = tcp_get_md5sig_pool();
627 if (!hp)
628 goto clear_hash_noput;
cf80e0e4 629 req = hp->md5_req;
49a72dfb 630
cf80e0e4 631 if (crypto_ahash_init(req))
49a72dfb
AL
632 goto clear_hash;
633
19689e38 634 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
635 goto clear_hash;
636 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
637 goto clear_hash;
638 if (tcp_md5_hash_key(hp, key))
639 goto clear_hash;
cf80e0e4
HX
640 ahash_request_set_crypt(req, NULL, md5_hash, 0);
641 if (crypto_ahash_final(req))
49a72dfb
AL
642 goto clear_hash;
643
644 tcp_put_md5sig_pool();
645 return 0;
646
647clear_hash:
648 tcp_put_md5sig_pool();
649clear_hash_noput:
650 memset(md5_hash, 0, 16);
651 return 1;
cfb6eeb4
YH
652}
653
ba8e275a
ED
654#endif
655
656static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
657 const struct sk_buff *skb)
cfb6eeb4 658{
ba8e275a 659#ifdef CONFIG_TCP_MD5SIG
cf533ea5 660 const __u8 *hash_location = NULL;
cfb6eeb4 661 struct tcp_md5sig_key *hash_expected;
b71d1d42 662 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 663 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 664 int genhash;
cfb6eeb4
YH
665 u8 newhash[16];
666
667 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 668 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 669
785957d3
DM
670 /* We've parsed the options - do we have a hash? */
671 if (!hash_expected && !hash_location)
ff74e23f 672 return false;
785957d3
DM
673
674 if (hash_expected && !hash_location) {
c10d9310 675 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 676 return true;
cfb6eeb4
YH
677 }
678
785957d3 679 if (!hash_expected && hash_location) {
c10d9310 680 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 681 return true;
cfb6eeb4
YH
682 }
683
684 /* check the signature */
49a72dfb
AL
685 genhash = tcp_v6_md5_hash_skb(newhash,
686 hash_expected,
39f8e58e 687 NULL, skb);
49a72dfb 688
cfb6eeb4 689 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 690 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
691 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
692 genhash ? "failed" : "mismatch",
693 &ip6h->saddr, ntohs(th->source),
694 &ip6h->daddr, ntohs(th->dest));
ff74e23f 695 return true;
cfb6eeb4 696 }
ba8e275a 697#endif
ff74e23f 698 return false;
cfb6eeb4 699}
cfb6eeb4 700
b40cf18e
ED
701static void tcp_v6_init_req(struct request_sock *req,
702 const struct sock *sk_listener,
16bea70a
OP
703 struct sk_buff *skb)
704{
705 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 706 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
707
708 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
709 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
710
16bea70a 711 /* So that link locals have meaning */
b40cf18e 712 if (!sk_listener->sk_bound_dev_if &&
16bea70a 713 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 714 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 715
04317daf 716 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 717 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 718 np->rxopt.bits.rxinfo ||
16bea70a
OP
719 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
720 np->rxopt.bits.rxohlim || np->repflow)) {
721 atomic_inc(&skb->users);
722 ireq->pktopts = skb;
723 }
724}
725
f964629e
ED
726static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
727 struct flowi *fl,
d94e0417
OP
728 const struct request_sock *req,
729 bool *strict)
730{
731 if (strict)
732 *strict = true;
f76b33c3 733 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
734}
735
c6aefafb 736struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 737 .family = AF_INET6,
2e6599cb 738 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 739 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
740 .send_ack = tcp_v6_reqsk_send_ack,
741 .destructor = tcp_v6_reqsk_destructor,
72659ecc 742 .send_reset = tcp_v6_send_reset,
4aa956d8 743 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
744};
745
b2e4b3de 746static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
747 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
748 sizeof(struct ipv6hdr),
16bea70a 749#ifdef CONFIG_TCP_MD5SIG
fd3a154a 750 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 751 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 752#endif
16bea70a 753 .init_req = tcp_v6_init_req,
fb7b37a7
OP
754#ifdef CONFIG_SYN_COOKIES
755 .cookie_init_seq = cookie_v6_init_sequence,
756#endif
d94e0417 757 .route_req = tcp_v6_route_req,
936b8bdb 758 .init_seq = tcp_v6_init_sequence,
d6274bd8 759 .send_synack = tcp_v6_send_synack,
16bea70a 760};
cfb6eeb4 761
a00e7444 762static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
763 u32 ack, u32 win, u32 tsval, u32 tsecr,
764 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 765 u8 tclass, __be32 label)
1da177e4 766{
cf533ea5
ED
767 const struct tcphdr *th = tcp_hdr(skb);
768 struct tcphdr *t1;
1da177e4 769 struct sk_buff *buff;
4c9483b2 770 struct flowi6 fl6;
0f85feae 771 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 772 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 773 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 774 struct dst_entry *dst;
81ada62d 775 __be32 *topt;
1da177e4 776
ee684b6f 777 if (tsecr)
626e264d 778 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 779#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
780 if (key)
781 tot_len += TCPOLEN_MD5SIG_ALIGNED;
782#endif
783
cfb6eeb4 784 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 785 GFP_ATOMIC);
63159f29 786 if (!buff)
1ab1457c 787 return;
1da177e4 788
cfb6eeb4 789 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 790
cfb6eeb4 791 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 792 skb_reset_transport_header(buff);
1da177e4
LT
793
794 /* Swap the send and the receive. */
795 memset(t1, 0, sizeof(*t1));
796 t1->dest = th->source;
797 t1->source = th->dest;
cfb6eeb4 798 t1->doff = tot_len / 4;
626e264d
IJ
799 t1->seq = htonl(seq);
800 t1->ack_seq = htonl(ack);
801 t1->ack = !rst || !th->ack;
802 t1->rst = rst;
803 t1->window = htons(win);
1da177e4 804
81ada62d
IJ
805 topt = (__be32 *)(t1 + 1);
806
ee684b6f 807 if (tsecr) {
626e264d
IJ
808 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
809 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
810 *topt++ = htonl(tsval);
811 *topt++ = htonl(tsecr);
626e264d
IJ
812 }
813
cfb6eeb4
YH
814#ifdef CONFIG_TCP_MD5SIG
815 if (key) {
81ada62d
IJ
816 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
817 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
818 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
819 &ipv6_hdr(skb)->saddr,
820 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
821 }
822#endif
823
4c9483b2 824 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
825 fl6.daddr = ipv6_hdr(skb)->saddr;
826 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 827 fl6.flowlabel = label;
1da177e4 828
e5700aff
DM
829 buff->ip_summed = CHECKSUM_PARTIAL;
830 buff->csum = 0;
831
4c9483b2 832 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 833
4c9483b2 834 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 835 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 836 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
837 else {
838 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
839 oif = skb->skb_iif;
840
841 fl6.flowi6_oif = oif;
842 }
1d2f7b2d 843
e110861f 844 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
845 fl6.fl6_dport = t1->dest;
846 fl6.fl6_sport = t1->source;
e2d118a1 847 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 848 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 849
c20121ae
DL
850 /* Pass a socket to ip6_dst_lookup either it is for RST
851 * Underlying function will use this to retrieve the network
852 * namespace
853 */
0e0d44ab 854 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
855 if (!IS_ERR(dst)) {
856 skb_dst_set(buff, dst);
92e55f41 857 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 858 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 859 if (rst)
c10d9310 860 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 861 return;
1da177e4
LT
862 }
863
864 kfree_skb(buff);
865}
866
a00e7444 867static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 868{
cf533ea5 869 const struct tcphdr *th = tcp_hdr(skb);
626e264d 870 u32 seq = 0, ack_seq = 0;
fa3e5b4e 871 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
872#ifdef CONFIG_TCP_MD5SIG
873 const __u8 *hash_location = NULL;
874 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
875 unsigned char newhash[16];
876 int genhash;
877 struct sock *sk1 = NULL;
878#endif
9c76a114 879 int oif;
1da177e4 880
626e264d 881 if (th->rst)
1da177e4
LT
882 return;
883
c3658e8d
ED
884 /* If sk not NULL, it means we did a successful lookup and incoming
885 * route had to be correct. prequeue might have dropped our dst.
886 */
887 if (!sk && !ipv6_unicast_destination(skb))
626e264d 888 return;
1da177e4 889
cfb6eeb4 890#ifdef CONFIG_TCP_MD5SIG
3b24d854 891 rcu_read_lock();
658ddaaf 892 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 893 if (sk && sk_fullsock(sk)) {
e46787f0
FW
894 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
895 } else if (hash_location) {
658ddaaf
SL
896 /*
897 * active side is lost. Try to find listening socket through
898 * source port, and then find md5 key through listening socket.
899 * we are not loose security here:
900 * Incoming packet is checked with md5 hash with finding key,
901 * no RST generated if md5 hash doesn't match.
902 */
903 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
904 &tcp_hashinfo, NULL, 0,
905 &ipv6h->saddr,
5ba24953 906 th->source, &ipv6h->daddr,
870c3151 907 ntohs(th->source), tcp_v6_iif(skb));
658ddaaf 908 if (!sk1)
3b24d854 909 goto out;
658ddaaf 910
658ddaaf
SL
911 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
912 if (!key)
3b24d854 913 goto out;
658ddaaf 914
39f8e58e 915 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 916 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 917 goto out;
658ddaaf 918 }
cfb6eeb4
YH
919#endif
920
626e264d
IJ
921 if (th->ack)
922 seq = ntohl(th->ack_seq);
923 else
924 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
925 (th->doff << 2);
1da177e4 926
9c76a114 927 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 928 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
929
930#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
931out:
932 rcu_read_unlock();
658ddaaf 933#endif
626e264d 934}
1da177e4 935
a00e7444 936static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 937 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 938 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 939 __be32 label)
626e264d 940{
0f85feae
ED
941 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
942 tclass, label);
1da177e4
LT
943}
944
945static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
946{
8feaf0c0 947 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 948 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 949
0f85feae 950 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 951 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 952 tcp_time_stamp + tcptw->tw_ts_offset,
9c76a114 953 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 954 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 955
8feaf0c0 956 inet_twsk_put(tw);
1da177e4
LT
957}
958
a00e7444 959static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 960 struct request_sock *req)
1da177e4 961{
3a19ce0e
DL
962 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
963 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
964 */
20a2b49f
ED
965 /* RFC 7323 2.3
966 * The window field (SEG.WND) of every outgoing segment, with the
967 * exception of <SYN> segments, MUST be right-shifted by
968 * Rcv.Wind.Shift bits:
969 */
0f85feae 970 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 971 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
972 tcp_rsk(req)->rcv_nxt,
973 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
95a22cae
FW
974 tcp_time_stamp + tcp_rsk(req)->ts_off,
975 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
976 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
977 0, 0);
1da177e4
LT
978}
979
980
079096f1 981static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 982{
079096f1 983#ifdef CONFIG_SYN_COOKIES
aa8223c7 984 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 985
af9b4738 986 if (!th->syn)
c6aefafb 987 sk = cookie_v6_check(sk, skb);
1da177e4
LT
988#endif
989 return sk;
990}
991
1da177e4
LT
992static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
993{
1da177e4
LT
994 if (skb->protocol == htons(ETH_P_IP))
995 return tcp_v4_conn_request(sk, skb);
996
997 if (!ipv6_unicast_destination(skb))
1ab1457c 998 goto drop;
1da177e4 999
1fb6f159
OP
1000 return tcp_conn_request(&tcp6_request_sock_ops,
1001 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1002
1003drop:
9caad864 1004 tcp_listendrop(sk);
1da177e4
LT
1005 return 0; /* don't send reset */
1006}
1007
ebf6c9cb
ED
1008static void tcp_v6_restore_cb(struct sk_buff *skb)
1009{
1010 /* We need to move header back to the beginning if xfrm6_policy_check()
1011 * and tcp_v6_fill_cb() are going to be called again.
1012 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1013 */
1014 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1015 sizeof(struct inet6_skb_parm));
1016}
1017
0c27171e 1018static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1019 struct request_sock *req,
5e0724d0
ED
1020 struct dst_entry *dst,
1021 struct request_sock *req_unhash,
1022 bool *own_req)
1da177e4 1023{
634fb979 1024 struct inet_request_sock *ireq;
0c27171e
ED
1025 struct ipv6_pinfo *newnp;
1026 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1027 struct ipv6_txoptions *opt;
1da177e4
LT
1028 struct tcp6_sock *newtcp6sk;
1029 struct inet_sock *newinet;
1030 struct tcp_sock *newtp;
1031 struct sock *newsk;
cfb6eeb4
YH
1032#ifdef CONFIG_TCP_MD5SIG
1033 struct tcp_md5sig_key *key;
1034#endif
3840a06e 1035 struct flowi6 fl6;
1da177e4
LT
1036
1037 if (skb->protocol == htons(ETH_P_IP)) {
1038 /*
1039 * v6 mapped
1040 */
1041
5e0724d0
ED
1042 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1043 req_unhash, own_req);
1da177e4 1044
63159f29 1045 if (!newsk)
1da177e4
LT
1046 return NULL;
1047
1048 newtcp6sk = (struct tcp6_sock *)newsk;
1049 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1050
1051 newinet = inet_sk(newsk);
1052 newnp = inet6_sk(newsk);
1053 newtp = tcp_sk(newsk);
1054
1055 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1056
d1e559d0 1057 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1058
8292a17a 1059 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1060 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1061#ifdef CONFIG_TCP_MD5SIG
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063#endif
1064
676a1184
YZ
1065 newnp->ipv6_ac_list = NULL;
1066 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1067 newnp->pktoptions = NULL;
1068 newnp->opt = NULL;
870c3151 1069 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1070 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1071 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1072 if (np->repflow)
1073 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1074
e6848976
ACM
1075 /*
1076 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1077 * here, tcp_create_openreq_child now does this for us, see the comment in
1078 * that function for the gory details. -acme
1da177e4 1079 */
1da177e4
LT
1080
1081 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1082 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1083 Sync it now.
1084 */
d83d8461 1085 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1086
1087 return newsk;
1088 }
1089
634fb979 1090 ireq = inet_rsk(req);
1da177e4
LT
1091
1092 if (sk_acceptq_is_full(sk))
1093 goto out_overflow;
1094
493f377d 1095 if (!dst) {
f76b33c3 1096 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1097 if (!dst)
1da177e4 1098 goto out;
1ab1457c 1099 }
1da177e4
LT
1100
1101 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1102 if (!newsk)
093d2823 1103 goto out_nonewsk;
1da177e4 1104
e6848976
ACM
1105 /*
1106 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1107 * count here, tcp_create_openreq_child now does this for us, see the
1108 * comment in that function for the gory details. -acme
1109 */
1da177e4 1110
59eed279 1111 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1112 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1113 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1114
1115 newtcp6sk = (struct tcp6_sock *)newsk;
1116 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1117
1118 newtp = tcp_sk(newsk);
1119 newinet = inet_sk(newsk);
1120 newnp = inet6_sk(newsk);
1121
1122 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1123
634fb979
ED
1124 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1125 newnp->saddr = ireq->ir_v6_loc_addr;
1126 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1127 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1128
1ab1457c 1129 /* Now IPv6 options...
1da177e4
LT
1130
1131 First: no IPv4 options.
1132 */
f6d8bd05 1133 newinet->inet_opt = NULL;
676a1184 1134 newnp->ipv6_ac_list = NULL;
d35690be 1135 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1136
1137 /* Clone RX bits */
1138 newnp->rxopt.all = np->rxopt.all;
1139
1da177e4 1140 newnp->pktoptions = NULL;
1da177e4 1141 newnp->opt = NULL;
870c3151 1142 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1143 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1144 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1145 if (np->repflow)
1146 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1147
1148 /* Clone native IPv6 options from listening socket (if any)
1149
1150 Yes, keeping reference count would be much more clever,
1151 but we make one more one thing there: reattach optmem
1152 to newsk.
1153 */
56ac42bc
HD
1154 opt = ireq->ipv6_opt;
1155 if (!opt)
1156 opt = rcu_dereference(np->opt);
45f6fad8
ED
1157 if (opt) {
1158 opt = ipv6_dup_options(newsk, opt);
1159 RCU_INIT_POINTER(newnp->opt, opt);
1160 }
d83d8461 1161 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1162 if (opt)
1163 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1164 opt->opt_flen;
1da177e4 1165
81164413
DB
1166 tcp_ca_openreq_child(newsk, dst);
1167
1da177e4 1168 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1169 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1170
1da177e4
LT
1171 tcp_initialize_rcv_mss(newsk);
1172
c720c7e8
ED
1173 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1174 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1175
cfb6eeb4
YH
1176#ifdef CONFIG_TCP_MD5SIG
1177 /* Copy over the MD5 key from the original socket */
4aa956d8 1178 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1179 if (key) {
cfb6eeb4
YH
1180 /* We're using one, so create a matching key
1181 * on the newsk structure. If we fail to get
1182 * memory, then we end up not copying the key
1183 * across. Shucks.
1184 */
efe4208f 1185 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
99a1dec7 1186 AF_INET6, key->key, key->keylen,
7450aaf6 1187 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1188 }
1189#endif
1190
093d2823 1191 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1192 inet_csk_prepare_forced_close(newsk);
1193 tcp_done(newsk);
093d2823
BS
1194 goto out;
1195 }
5e0724d0 1196 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1197 if (*own_req) {
49a496c9 1198 tcp_move_syn(newtp, req);
805c4bc0
ED
1199
1200 /* Clone pktoptions received with SYN, if we own the req */
1201 if (ireq->pktopts) {
1202 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1203 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1204 consume_skb(ireq->pktopts);
1205 ireq->pktopts = NULL;
ebf6c9cb
ED
1206 if (newnp->pktoptions) {
1207 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1208 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1209 }
805c4bc0 1210 }
ce105008 1211 }
1da177e4
LT
1212
1213 return newsk;
1214
1215out_overflow:
02a1d6e7 1216 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1217out_nonewsk:
1da177e4 1218 dst_release(dst);
093d2823 1219out:
9caad864 1220 tcp_listendrop(sk);
1da177e4
LT
1221 return NULL;
1222}
1223
1da177e4 1224/* The socket must have it's spinlock held when we get
e994b2f0 1225 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1226 *
1227 * We have a potential double-lock case here, so even when
1228 * doing backlog processing we use the BH locking scheme.
1229 * This is because we cannot sleep with the original spinlock
1230 * held.
1231 */
1232static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1233{
1234 struct ipv6_pinfo *np = inet6_sk(sk);
1235 struct tcp_sock *tp;
1236 struct sk_buff *opt_skb = NULL;
1237
1238 /* Imagine: socket is IPv6. IPv4 packet arrives,
1239 goes to IPv4 receive handler and backlogged.
1240 From backlog it always goes here. Kerboom...
1241 Fortunately, tcp_rcv_established and rcv_established
1242 handle them correctly, but it is not case with
1243 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1244 */
1245
1246 if (skb->protocol == htons(ETH_P_IP))
1247 return tcp_v4_do_rcv(sk, skb);
1248
ac6e7800 1249 if (tcp_filter(sk, skb))
1da177e4
LT
1250 goto discard;
1251
1252 /*
1253 * socket locking is here for SMP purposes as backlog rcv
1254 * is currently called with bh processing disabled.
1255 */
1256
1257 /* Do Stevens' IPV6_PKTOPTIONS.
1258
1259 Yes, guys, it is the only place in our code, where we
1260 may make it not affecting IPv4.
1261 The rest of code is protocol independent,
1262 and I do not like idea to uglify IPv4.
1263
1264 Actually, all the idea behind IPV6_PKTOPTIONS
1265 looks not very well thought. For now we latch
1266 options, received in the last packet, enqueued
1267 by tcp. Feel free to propose better solution.
1ab1457c 1268 --ANK (980728)
1da177e4
LT
1269 */
1270 if (np->rxopt.all)
7450aaf6 1271 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1272
1273 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1274 struct dst_entry *dst = sk->sk_rx_dst;
1275
bdeab991 1276 sock_rps_save_rxhash(sk, skb);
3d97379a 1277 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1278 if (dst) {
1279 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1280 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1281 dst_release(dst);
1282 sk->sk_rx_dst = NULL;
1283 }
1284 }
1285
c995ae22 1286 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1287 if (opt_skb)
1288 goto ipv6_pktoptions;
1289 return 0;
1290 }
1291
12e25e10 1292 if (tcp_checksum_complete(skb))
1da177e4
LT
1293 goto csum_err;
1294
1ab1457c 1295 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1296 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1297
1da177e4
LT
1298 if (!nsk)
1299 goto discard;
1300
4c99aa40 1301 if (nsk != sk) {
bdeab991 1302 sock_rps_save_rxhash(nsk, skb);
38cb5245 1303 sk_mark_napi_id(nsk, skb);
1da177e4
LT
1304 if (tcp_child_process(sk, nsk, skb))
1305 goto reset;
1306 if (opt_skb)
1307 __kfree_skb(opt_skb);
1308 return 0;
1309 }
47482f13 1310 } else
bdeab991 1311 sock_rps_save_rxhash(sk, skb);
1da177e4 1312
72ab4a86 1313 if (tcp_rcv_state_process(sk, skb))
1da177e4 1314 goto reset;
1da177e4
LT
1315 if (opt_skb)
1316 goto ipv6_pktoptions;
1317 return 0;
1318
1319reset:
cfb6eeb4 1320 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1321discard:
1322 if (opt_skb)
1323 __kfree_skb(opt_skb);
1324 kfree_skb(skb);
1325 return 0;
1326csum_err:
c10d9310
ED
1327 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1328 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1329 goto discard;
1330
1331
1332ipv6_pktoptions:
1333 /* Do you ask, what is it?
1334
1335 1. skb was enqueued by tcp.
1336 2. skb is added to tail of read queue, rather than out of order.
1337 3. socket is not in passive state.
1338 4. Finally, it really contains options, which user wants to receive.
1339 */
1340 tp = tcp_sk(sk);
1341 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1342 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1343 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1344 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1345 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1346 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1347 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1348 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1349 if (np->repflow)
1350 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1351 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1352 skb_set_owner_r(opt_skb, sk);
8ce48623 1353 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1354 opt_skb = xchg(&np->pktoptions, opt_skb);
1355 } else {
1356 __kfree_skb(opt_skb);
1357 opt_skb = xchg(&np->pktoptions, NULL);
1358 }
1359 }
1360
800d55f1 1361 kfree_skb(opt_skb);
1da177e4
LT
1362 return 0;
1363}
1364
2dc49d16
ND
1365static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1366 const struct tcphdr *th)
1367{
1368 /* This is tricky: we move IP6CB at its correct location into
1369 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1370 * _decode_session6() uses IP6CB().
1371 * barrier() makes sure compiler won't play aliasing games.
1372 */
1373 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1374 sizeof(struct inet6_skb_parm));
1375 barrier();
1376
1377 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1378 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1379 skb->len - th->doff*4);
1380 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1381 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1382 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1383 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1384 TCP_SKB_CB(skb)->sacked = 0;
1385}
1386
e5bbef20 1387static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1388{
cf533ea5 1389 const struct tcphdr *th;
b71d1d42 1390 const struct ipv6hdr *hdr;
3b24d854 1391 bool refcounted;
1da177e4
LT
1392 struct sock *sk;
1393 int ret;
a86b1e30 1394 struct net *net = dev_net(skb->dev);
1da177e4
LT
1395
1396 if (skb->pkt_type != PACKET_HOST)
1397 goto discard_it;
1398
1399 /*
1400 * Count it even if it's bad.
1401 */
90bbcc60 1402 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1403
1404 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1405 goto discard_it;
1406
ea1627c2 1407 th = (const struct tcphdr *)skb->data;
1da177e4 1408
ea1627c2 1409 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1410 goto bad_packet;
1411 if (!pskb_may_pull(skb, th->doff*4))
1412 goto discard_it;
1413
e4f45b7f 1414 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1415 goto csum_error;
1da177e4 1416
ea1627c2 1417 th = (const struct tcphdr *)skb->data;
e802af9c 1418 hdr = ipv6_hdr(skb);
1da177e4 1419
4bdc3d66 1420lookup:
a583636a 1421 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
3b24d854
ED
1422 th->source, th->dest, inet6_iif(skb),
1423 &refcounted);
1da177e4
LT
1424 if (!sk)
1425 goto no_tcp_socket;
1426
1427process:
1428 if (sk->sk_state == TCP_TIME_WAIT)
1429 goto do_time_wait;
1430
079096f1
ED
1431 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1432 struct request_sock *req = inet_reqsk(sk);
7716682c 1433 struct sock *nsk;
079096f1
ED
1434
1435 sk = req->rsk_listener;
1436 tcp_v6_fill_cb(skb, hdr, th);
1437 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1438 sk_drops_add(sk, skb);
079096f1
ED
1439 reqsk_put(req);
1440 goto discard_it;
1441 }
7716682c 1442 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1443 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1444 goto lookup;
1445 }
7716682c 1446 sock_hold(sk);
3b24d854 1447 refcounted = true;
7716682c 1448 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1449 if (!nsk) {
1450 reqsk_put(req);
7716682c 1451 goto discard_and_relse;
079096f1
ED
1452 }
1453 if (nsk == sk) {
079096f1
ED
1454 reqsk_put(req);
1455 tcp_v6_restore_cb(skb);
1456 } else if (tcp_child_process(sk, nsk, skb)) {
1457 tcp_v6_send_reset(nsk, skb);
7716682c 1458 goto discard_and_relse;
079096f1 1459 } else {
7716682c 1460 sock_put(sk);
079096f1
ED
1461 return 0;
1462 }
1463 }
e802af9c 1464 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1465 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1466 goto discard_and_relse;
1467 }
1468
1da177e4
LT
1469 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1470 goto discard_and_relse;
1471
2dc49d16
ND
1472 tcp_v6_fill_cb(skb, hdr, th);
1473
9ea88a15
DP
1474 if (tcp_v6_inbound_md5_hash(sk, skb))
1475 goto discard_and_relse;
9ea88a15 1476
ac6e7800 1477 if (tcp_filter(sk, skb))
1da177e4 1478 goto discard_and_relse;
ac6e7800
ED
1479 th = (const struct tcphdr *)skb->data;
1480 hdr = ipv6_hdr(skb);
1da177e4
LT
1481
1482 skb->dev = NULL;
1483
e994b2f0
ED
1484 if (sk->sk_state == TCP_LISTEN) {
1485 ret = tcp_v6_do_rcv(sk, skb);
1486 goto put_and_return;
1487 }
1488
1489 sk_incoming_cpu_update(sk);
1490
293b9c42 1491 bh_lock_sock_nested(sk);
a44d6eac 1492 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1493 ret = 0;
1494 if (!sock_owned_by_user(sk)) {
7bced397 1495 if (!tcp_prequeue(sk, skb))
1ab1457c 1496 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1497 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1498 goto discard_and_relse;
1499 }
1da177e4
LT
1500 bh_unlock_sock(sk);
1501
e994b2f0 1502put_and_return:
3b24d854
ED
1503 if (refcounted)
1504 sock_put(sk);
1da177e4
LT
1505 return ret ? -1 : 0;
1506
1507no_tcp_socket:
1508 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1509 goto discard_it;
1510
2dc49d16
ND
1511 tcp_v6_fill_cb(skb, hdr, th);
1512
12e25e10 1513 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1514csum_error:
90bbcc60 1515 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1516bad_packet:
90bbcc60 1517 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1518 } else {
cfb6eeb4 1519 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1520 }
1521
1522discard_it:
1da177e4
LT
1523 kfree_skb(skb);
1524 return 0;
1525
1526discard_and_relse:
532182cd 1527 sk_drops_add(sk, skb);
3b24d854
ED
1528 if (refcounted)
1529 sock_put(sk);
1da177e4
LT
1530 goto discard_it;
1531
1532do_time_wait:
1533 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1534 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1535 goto discard_it;
1536 }
1537
2dc49d16
ND
1538 tcp_v6_fill_cb(skb, hdr, th);
1539
6a5dc9e5
ED
1540 if (tcp_checksum_complete(skb)) {
1541 inet_twsk_put(inet_twsk(sk));
1542 goto csum_error;
1da177e4
LT
1543 }
1544
9469c7b4 1545 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1546 case TCP_TW_SYN:
1547 {
1548 struct sock *sk2;
1549
c346dca1 1550 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1551 skb, __tcp_hdrlen(th),
5ba24953 1552 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1553 &ipv6_hdr(skb)->daddr,
870c3151 1554 ntohs(th->dest), tcp_v6_iif(skb));
53b24b8f 1555 if (sk2) {
295ff7ed 1556 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1557 inet_twsk_deschedule_put(tw);
1da177e4 1558 sk = sk2;
4ad19de8 1559 tcp_v6_restore_cb(skb);
3b24d854 1560 refcounted = false;
1da177e4
LT
1561 goto process;
1562 }
1563 /* Fall through to ACK */
1564 }
1565 case TCP_TW_ACK:
1566 tcp_v6_timewait_ack(sk, skb);
1567 break;
1568 case TCP_TW_RST:
4ad19de8 1569 tcp_v6_restore_cb(skb);
271c3b9b
FW
1570 tcp_v6_send_reset(sk, skb);
1571 inet_twsk_deschedule_put(inet_twsk(sk));
1572 goto discard_it;
4aa956d8
WY
1573 case TCP_TW_SUCCESS:
1574 ;
1da177e4
LT
1575 }
1576 goto discard_it;
1577}
1578
c7109986
ED
1579static void tcp_v6_early_demux(struct sk_buff *skb)
1580{
1581 const struct ipv6hdr *hdr;
1582 const struct tcphdr *th;
1583 struct sock *sk;
1584
1585 if (skb->pkt_type != PACKET_HOST)
1586 return;
1587
1588 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1589 return;
1590
1591 hdr = ipv6_hdr(skb);
1592 th = tcp_hdr(skb);
1593
1594 if (th->doff < sizeof(struct tcphdr) / 4)
1595 return;
1596
870c3151 1597 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1598 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1599 &hdr->saddr, th->source,
1600 &hdr->daddr, ntohs(th->dest),
1601 inet6_iif(skb));
1602 if (sk) {
1603 skb->sk = sk;
1604 skb->destructor = sock_edemux;
f7e4eb03 1605 if (sk_fullsock(sk)) {
d0c294c5 1606 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1607
c7109986 1608 if (dst)
5d299f3d 1609 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1610 if (dst &&
f3f12135 1611 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1612 skb_dst_set_noref(skb, dst);
1613 }
1614 }
1615}
1616
ccb7c410
DM
1617static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1618 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1619 .twsk_unique = tcp_twsk_unique,
4aa956d8 1620 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1621};
1622
3b401a81 1623static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1624 .queue_xmit = inet6_csk_xmit,
1625 .send_check = tcp_v6_send_check,
1626 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1627 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1628 .conn_request = tcp_v6_conn_request,
1629 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1630 .net_header_len = sizeof(struct ipv6hdr),
67469601 1631 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1632 .setsockopt = ipv6_setsockopt,
1633 .getsockopt = ipv6_getsockopt,
1634 .addr2sockaddr = inet6_csk_addr2sockaddr,
1635 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1636#ifdef CONFIG_COMPAT
543d9cfe
ACM
1637 .compat_setsockopt = compat_ipv6_setsockopt,
1638 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1639#endif
4fab9071 1640 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1641};
1642
cfb6eeb4 1643#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1644static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1645 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1646 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1647 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1648};
a928630a 1649#endif
cfb6eeb4 1650
1da177e4
LT
1651/*
1652 * TCP over IPv4 via INET6 API
1653 */
3b401a81 1654static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1655 .queue_xmit = ip_queue_xmit,
1656 .send_check = tcp_v4_send_check,
1657 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1658 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1659 .conn_request = tcp_v6_conn_request,
1660 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1661 .net_header_len = sizeof(struct iphdr),
1662 .setsockopt = ipv6_setsockopt,
1663 .getsockopt = ipv6_getsockopt,
1664 .addr2sockaddr = inet6_csk_addr2sockaddr,
1665 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1666#ifdef CONFIG_COMPAT
543d9cfe
ACM
1667 .compat_setsockopt = compat_ipv6_setsockopt,
1668 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1669#endif
4fab9071 1670 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1671};
1672
cfb6eeb4 1673#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1674static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1675 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1676 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1677 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1678};
a928630a 1679#endif
cfb6eeb4 1680
1da177e4
LT
1681/* NOTE: A lot of things set to zero explicitly by call to
1682 * sk_alloc() so need not be done here.
1683 */
1684static int tcp_v6_init_sock(struct sock *sk)
1685{
6687e988 1686 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1687
900f65d3 1688 tcp_init_sock(sk);
1da177e4 1689
8292a17a 1690 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1691
cfb6eeb4 1692#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1693 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1694#endif
1695
1da177e4
LT
1696 return 0;
1697}
1698
7d06b2e0 1699static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1700{
1da177e4 1701 tcp_v4_destroy_sock(sk);
7d06b2e0 1702 inet6_destroy_sock(sk);
1da177e4
LT
1703}
1704
952a10be 1705#ifdef CONFIG_PROC_FS
1da177e4 1706/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1707static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1708 const struct request_sock *req, int i)
1da177e4 1709{
fa76ce73 1710 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1711 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1712 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1713
1714 if (ttd < 0)
1715 ttd = 0;
1716
1da177e4
LT
1717 seq_printf(seq,
1718 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1719 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1720 i,
1721 src->s6_addr32[0], src->s6_addr32[1],
1722 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1723 inet_rsk(req)->ir_num,
1da177e4
LT
1724 dest->s6_addr32[0], dest->s6_addr32[1],
1725 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1726 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1727 TCP_SYN_RECV,
4c99aa40 1728 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1729 1, /* timers active (only the expire timer) */
1730 jiffies_to_clock_t(ttd),
e6c022a4 1731 req->num_timeout,
aa3a0c8c
ED
1732 from_kuid_munged(seq_user_ns(seq),
1733 sock_i_uid(req->rsk_listener)),
1ab1457c 1734 0, /* non standard timer */
1da177e4
LT
1735 0, /* open_requests have no inode */
1736 0, req);
1737}
1738
1739static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1740{
b71d1d42 1741 const struct in6_addr *dest, *src;
1da177e4
LT
1742 __u16 destp, srcp;
1743 int timer_active;
1744 unsigned long timer_expires;
cf533ea5
ED
1745 const struct inet_sock *inet = inet_sk(sp);
1746 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1747 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1748 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1749 int rx_queue;
1750 int state;
1da177e4 1751
efe4208f
ED
1752 dest = &sp->sk_v6_daddr;
1753 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1754 destp = ntohs(inet->inet_dport);
1755 srcp = ntohs(inet->inet_sport);
463c84b9 1756
ce3cf4ec 1757 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1758 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1759 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1760 timer_active = 1;
463c84b9
ACM
1761 timer_expires = icsk->icsk_timeout;
1762 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1763 timer_active = 4;
463c84b9 1764 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1765 } else if (timer_pending(&sp->sk_timer)) {
1766 timer_active = 2;
1767 timer_expires = sp->sk_timer.expires;
1768 } else {
1769 timer_active = 0;
1770 timer_expires = jiffies;
1771 }
1772
00fd38d9
ED
1773 state = sk_state_load(sp);
1774 if (state == TCP_LISTEN)
1775 rx_queue = sp->sk_ack_backlog;
1776 else
1777 /* Because we don't lock the socket,
1778 * we might find a transient negative value.
1779 */
1780 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1781
1da177e4
LT
1782 seq_printf(seq,
1783 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1784 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1785 i,
1786 src->s6_addr32[0], src->s6_addr32[1],
1787 src->s6_addr32[2], src->s6_addr32[3], srcp,
1788 dest->s6_addr32[0], dest->s6_addr32[1],
1789 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1790 state,
1791 tp->write_seq - tp->snd_una,
1792 rx_queue,
1da177e4 1793 timer_active,
a399a805 1794 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1795 icsk->icsk_retransmits,
a7cb5a49 1796 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1797 icsk->icsk_probes_out,
1da177e4
LT
1798 sock_i_ino(sp),
1799 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1800 jiffies_to_clock_t(icsk->icsk_rto),
1801 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1802 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1803 tp->snd_cwnd,
00fd38d9 1804 state == TCP_LISTEN ?
0536fcc0 1805 fastopenq->max_qlen :
0a672f74 1806 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1807 );
1808}
1809
1ab1457c 1810static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1811 struct inet_timewait_sock *tw, int i)
1da177e4 1812{
789f558c 1813 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1814 const struct in6_addr *dest, *src;
1da177e4 1815 __u16 destp, srcp;
1da177e4 1816
efe4208f
ED
1817 dest = &tw->tw_v6_daddr;
1818 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1819 destp = ntohs(tw->tw_dport);
1820 srcp = ntohs(tw->tw_sport);
1821
1822 seq_printf(seq,
1823 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1824 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1825 i,
1826 src->s6_addr32[0], src->s6_addr32[1],
1827 src->s6_addr32[2], src->s6_addr32[3], srcp,
1828 dest->s6_addr32[0], dest->s6_addr32[1],
1829 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1830 tw->tw_substate, 0, 0,
a399a805 1831 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1da177e4
LT
1832 atomic_read(&tw->tw_refcnt), tw);
1833}
1834
1da177e4
LT
1835static int tcp6_seq_show(struct seq_file *seq, void *v)
1836{
1837 struct tcp_iter_state *st;
05dbc7b5 1838 struct sock *sk = v;
1da177e4
LT
1839
1840 if (v == SEQ_START_TOKEN) {
1841 seq_puts(seq,
1842 " sl "
1843 "local_address "
1844 "remote_address "
1845 "st tx_queue rx_queue tr tm->when retrnsmt"
1846 " uid timeout inode\n");
1847 goto out;
1848 }
1849 st = seq->private;
1850
079096f1
ED
1851 if (sk->sk_state == TCP_TIME_WAIT)
1852 get_timewait6_sock(seq, v, st->num);
1853 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1854 get_openreq6(seq, v, st->num);
079096f1
ED
1855 else
1856 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1857out:
1858 return 0;
1859}
1860
73cb88ec
AV
1861static const struct file_operations tcp6_afinfo_seq_fops = {
1862 .owner = THIS_MODULE,
1863 .open = tcp_seq_open,
1864 .read = seq_read,
1865 .llseek = seq_lseek,
1866 .release = seq_release_net
1867};
1868
1da177e4 1869static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1870 .name = "tcp6",
1871 .family = AF_INET6,
73cb88ec 1872 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1873 .seq_ops = {
1874 .show = tcp6_seq_show,
1875 },
1da177e4
LT
1876};
1877
2c8c1e72 1878int __net_init tcp6_proc_init(struct net *net)
1da177e4 1879{
6f8b13bc 1880 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1881}
1882
6f8b13bc 1883void tcp6_proc_exit(struct net *net)
1da177e4 1884{
6f8b13bc 1885 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1886}
1887#endif
1888
1889struct proto tcpv6_prot = {
1890 .name = "TCPv6",
1891 .owner = THIS_MODULE,
1892 .close = tcp_close,
1893 .connect = tcp_v6_connect,
1894 .disconnect = tcp_disconnect,
463c84b9 1895 .accept = inet_csk_accept,
1da177e4
LT
1896 .ioctl = tcp_ioctl,
1897 .init = tcp_v6_init_sock,
1898 .destroy = tcp_v6_destroy_sock,
1899 .shutdown = tcp_shutdown,
1900 .setsockopt = tcp_setsockopt,
1901 .getsockopt = tcp_getsockopt,
4b9d07a4 1902 .keepalive = tcp_set_keepalive,
1da177e4 1903 .recvmsg = tcp_recvmsg,
7ba42910
CG
1904 .sendmsg = tcp_sendmsg,
1905 .sendpage = tcp_sendpage,
1da177e4 1906 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1907 .release_cb = tcp_release_cb,
496611d7 1908 .hash = inet6_hash,
ab1e0a13
ACM
1909 .unhash = inet_unhash,
1910 .get_port = inet_csk_get_port,
1da177e4 1911 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 1912 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1913 .sockets_allocated = &tcp_sockets_allocated,
1914 .memory_allocated = &tcp_memory_allocated,
1915 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1916 .orphan_count = &tcp_orphan_count,
a4fe34bf 1917 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1918 .sysctl_wmem = sysctl_tcp_wmem,
1919 .sysctl_rmem = sysctl_tcp_rmem,
1920 .max_header = MAX_TCP_HEADER,
1921 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 1922 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 1923 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1924 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1925 .h.hashinfo = &tcp_hashinfo,
7ba42910 1926 .no_autobind = true,
543d9cfe
ACM
1927#ifdef CONFIG_COMPAT
1928 .compat_setsockopt = compat_tcp_setsockopt,
1929 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1930#endif
c1e64e29 1931 .diag_destroy = tcp_abort,
1da177e4
LT
1932};
1933
41135cc8 1934static const struct inet6_protocol tcpv6_protocol = {
c7109986 1935 .early_demux = tcp_v6_early_demux,
1da177e4
LT
1936 .handler = tcp_v6_rcv,
1937 .err_handler = tcp_v6_err,
1938 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1939};
1940
1da177e4
LT
1941static struct inet_protosw tcpv6_protosw = {
1942 .type = SOCK_STREAM,
1943 .protocol = IPPROTO_TCP,
1944 .prot = &tcpv6_prot,
1945 .ops = &inet6_stream_ops,
d83d8461
ACM
1946 .flags = INET_PROTOSW_PERMANENT |
1947 INET_PROTOSW_ICSK,
1da177e4
LT
1948};
1949
2c8c1e72 1950static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1951{
5677242f
DL
1952 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1953 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1954}
1955
2c8c1e72 1956static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1957{
5677242f 1958 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1959}
1960
2c8c1e72 1961static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 1962{
1946e672 1963 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
1964}
1965
1966static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1967 .init = tcpv6_net_init,
1968 .exit = tcpv6_net_exit,
1969 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1970};
1971
7f4e4868 1972int __init tcpv6_init(void)
1da177e4 1973{
7f4e4868
DL
1974 int ret;
1975
3336288a
VY
1976 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1977 if (ret)
c6b641a4 1978 goto out;
3336288a 1979
1da177e4 1980 /* register inet6 protocol */
7f4e4868
DL
1981 ret = inet6_register_protosw(&tcpv6_protosw);
1982 if (ret)
1983 goto out_tcpv6_protocol;
1984
93ec926b 1985 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1986 if (ret)
1987 goto out_tcpv6_protosw;
1988out:
1989 return ret;
ae0f7d5f 1990
7f4e4868
DL
1991out_tcpv6_protosw:
1992 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
1993out_tcpv6_protocol:
1994 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
1995 goto out;
1996}
1997
09f7709f 1998void tcpv6_exit(void)
7f4e4868 1999{
93ec926b 2000 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2001 inet6_unregister_protosw(&tcpv6_protosw);
2002 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2003}