]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv6/tcp_ipv6.c
tcp: remove per-destination timestamp cache
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
a30aad50 104static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
1da177e4 105{
a30aad50
AK
106 return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source, tsoff);
1da177e4
LT
110}
111
1ab1457c 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 116 struct inet_sock *inet = inet_sk(sk);
d83d8461 117 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 120 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 121 struct ipv6_txoptions *opt;
4c9483b2 122 struct flowi6 fl6;
1da177e4
LT
123 struct dst_entry *dst;
124 int addr_type;
00355fa5 125 u32 seq;
1da177e4 126 int err;
1946e672 127 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 128
1ab1457c 129 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
130 return -EINVAL;
131
1ab1457c 132 if (usin->sin6_family != AF_INET6)
a02cec21 133 return -EAFNOSUPPORT;
1da177e4 134
4c9483b2 135 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
136
137 if (np->sndflow) {
4c9483b2
DM
138 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 IP6_ECN_flow_init(fl6.flowlabel);
140 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 141 struct ip6_flowlabel *flowlabel;
4c9483b2 142 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 143 if (!flowlabel)
1da177e4 144 return -EINVAL;
1da177e4
LT
145 fl6_sock_release(flowlabel);
146 }
147 }
148
149 /*
1ab1457c
YH
150 * connect() to INADDR_ANY means loopback (BSD'ism).
151 */
152
052d2369
JL
153 if (ipv6_addr_any(&usin->sin6_addr)) {
154 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
155 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
156 &usin->sin6_addr);
157 else
158 usin->sin6_addr = in6addr_loopback;
159 }
1da177e4
LT
160
161 addr_type = ipv6_addr_type(&usin->sin6_addr);
162
4c99aa40 163 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
164 return -ENETUNREACH;
165
166 if (addr_type&IPV6_ADDR_LINKLOCAL) {
167 if (addr_len >= sizeof(struct sockaddr_in6) &&
168 usin->sin6_scope_id) {
169 /* If interface is set while binding, indices
170 * must coincide.
171 */
172 if (sk->sk_bound_dev_if &&
173 sk->sk_bound_dev_if != usin->sin6_scope_id)
174 return -EINVAL;
175
176 sk->sk_bound_dev_if = usin->sin6_scope_id;
177 }
178
179 /* Connect to link-local address requires an interface */
180 if (!sk->sk_bound_dev_if)
181 return -EINVAL;
182 }
183
184 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 185 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
186 tp->rx_opt.ts_recent = 0;
187 tp->rx_opt.ts_recent_stamp = 0;
188 tp->write_seq = 0;
189 }
190
efe4208f 191 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 192 np->flow_label = fl6.flowlabel;
1da177e4
LT
193
194 /*
195 * TCP over IPv4
196 */
197
052d2369 198 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 199 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
200 struct sockaddr_in sin;
201
202 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
203
204 if (__ipv6_only_sock(sk))
205 return -ENETUNREACH;
206
207 sin.sin_family = AF_INET;
208 sin.sin_port = usin->sin6_port;
209 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
210
d83d8461 211 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 212 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
213#ifdef CONFIG_TCP_MD5SIG
214 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
215#endif
1da177e4
LT
216
217 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
218
219 if (err) {
d83d8461
ACM
220 icsk->icsk_ext_hdr_len = exthdrlen;
221 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 222 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
223#ifdef CONFIG_TCP_MD5SIG
224 tp->af_specific = &tcp_sock_ipv6_specific;
225#endif
1da177e4 226 goto failure;
1da177e4 227 }
d1e559d0 228 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
229
230 return err;
231 }
232
efe4208f
ED
233 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
234 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 235
4c9483b2 236 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 237 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 238 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
239 fl6.flowi6_oif = sk->sk_bound_dev_if;
240 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
241 fl6.fl6_dport = usin->sin6_port;
242 fl6.fl6_sport = inet->inet_sport;
e2d118a1 243 fl6.flowi6_uid = sk->sk_uid;
1da177e4 244
1e1d04e6 245 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 246 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 247
4c9483b2 248 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 249
0e0d44ab 250 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
251 if (IS_ERR(dst)) {
252 err = PTR_ERR(dst);
1da177e4 253 goto failure;
14e50e57 254 }
1da177e4 255
63159f29 256 if (!saddr) {
4c9483b2 257 saddr = &fl6.saddr;
efe4208f 258 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
259 }
260
261 /* set the source address */
4e3fd7a0 262 np->saddr = *saddr;
c720c7e8 263 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 264
f83ef8c0 265 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 266 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 267
d83d8461 268 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
269 if (opt)
270 icsk->icsk_ext_hdr_len = opt->opt_flen +
271 opt->opt_nflen;
1da177e4
LT
272
273 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
274
c720c7e8 275 inet->inet_dport = usin->sin6_port;
1da177e4
LT
276
277 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 278 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
279 if (err)
280 goto late_failure;
281
877d1f62 282 sk_set_txhash(sk);
9e7ceb06 283
00355fa5 284 if (likely(!tp->repair)) {
a30aad50
AK
285 seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
286 sk->sk_v6_daddr.s6_addr32,
287 inet->inet_sport,
288 inet->inet_dport,
289 &tp->tsoffset);
00355fa5
AK
290 if (!tp->write_seq)
291 tp->write_seq = seq;
292 }
1da177e4 293
19f6d3f3
WW
294 if (tcp_fastopen_defer_connect(sk, &err))
295 return err;
296 if (err)
297 goto late_failure;
298
1da177e4
LT
299 err = tcp_connect(sk);
300 if (err)
301 goto late_failure;
302
303 return 0;
304
305late_failure:
306 tcp_set_state(sk, TCP_CLOSE);
1da177e4 307failure:
c720c7e8 308 inet->inet_dport = 0;
1da177e4
LT
309 sk->sk_route_caps = 0;
310 return err;
311}
312
563d34d0
ED
313static void tcp_v6_mtu_reduced(struct sock *sk)
314{
315 struct dst_entry *dst;
316
317 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
318 return;
319
320 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
321 if (!dst)
322 return;
323
324 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
325 tcp_sync_mss(sk, dst_mtu(dst));
326 tcp_simple_retransmit(sk);
327 }
328}
329
1da177e4 330static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 331 u8 type, u8 code, int offset, __be32 info)
1da177e4 332{
4c99aa40 333 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 334 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
335 struct net *net = dev_net(skb->dev);
336 struct request_sock *fastopen;
1da177e4 337 struct ipv6_pinfo *np;
1ab1457c 338 struct tcp_sock *tp;
0a672f74 339 __u32 seq, snd_una;
2215089b 340 struct sock *sk;
9cf74903 341 bool fatal;
2215089b 342 int err;
1da177e4 343
2215089b
ED
344 sk = __inet6_lookup_established(net, &tcp_hashinfo,
345 &hdr->daddr, th->dest,
346 &hdr->saddr, ntohs(th->source),
347 skb->dev->ifindex);
1da177e4 348
2215089b 349 if (!sk) {
a16292a0
ED
350 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
351 ICMP6_MIB_INERRORS);
1da177e4
LT
352 return;
353 }
354
355 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 356 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
357 return;
358 }
2215089b 359 seq = ntohl(th->seq);
9cf74903 360 fatal = icmpv6_err_convert(type, code, &err);
2215089b 361 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 362 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
363
364 bh_lock_sock(sk);
563d34d0 365 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 366 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
367
368 if (sk->sk_state == TCP_CLOSE)
369 goto out;
370
e802af9c 371 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 372 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
373 goto out;
374 }
375
1da177e4 376 tp = tcp_sk(sk);
0a672f74
YC
377 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
378 fastopen = tp->fastopen_rsk;
379 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 380 if (sk->sk_state != TCP_LISTEN &&
0a672f74 381 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 382 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
383 goto out;
384 }
385
386 np = inet6_sk(sk);
387
ec18d9a2 388 if (type == NDISC_REDIRECT) {
45caeaa5
JM
389 if (!sock_owned_by_user(sk)) {
390 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
ec18d9a2 391
45caeaa5
JM
392 if (dst)
393 dst->ops->redirect(dst, sk, skb);
394 }
50a75a89 395 goto out;
ec18d9a2
DM
396 }
397
1da177e4 398 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
402 */
403 if (sk->sk_state == TCP_LISTEN)
404 goto out;
405
93b36cf3
HFS
406 if (!ip6_sk_accept_pmtu(sk))
407 goto out;
408
563d34d0
ED
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
d013ef2a 412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 413 &sk->sk_tsq_flags))
d013ef2a 414 sock_hold(sk);
1da177e4
LT
415 goto out;
416 }
417
1da177e4 418
60236fdd 419 /* Might be for an request_sock */
1da177e4 420 switch (sk->sk_state) {
1da177e4 421 case TCP_SYN_SENT:
0a672f74
YC
422 case TCP_SYN_RECV:
423 /* Only in fast or simultaneous open. If a fast open socket is
424 * is already accepted it is treated as a connected one below.
425 */
63159f29 426 if (fastopen && !fastopen->sk)
0a672f74
YC
427 break;
428
1da177e4 429 if (!sock_owned_by_user(sk)) {
1da177e4
LT
430 sk->sk_err = err;
431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
432
433 tcp_done(sk);
434 } else
435 sk->sk_err_soft = err;
436 goto out;
437 }
438
439 if (!sock_owned_by_user(sk) && np->recverr) {
440 sk->sk_err = err;
441 sk->sk_error_report(sk);
442 } else
443 sk->sk_err_soft = err;
444
445out:
446 bh_unlock_sock(sk);
447 sock_put(sk);
448}
449
450
0f935dbe 451static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 452 struct flowi *fl,
3840a06e 453 struct request_sock *req,
ca6fb065 454 struct tcp_fastopen_cookie *foc,
b3d05147 455 enum tcp_synack_type synack_type)
1da177e4 456{
634fb979 457 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 458 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 459 struct ipv6_txoptions *opt;
d6274bd8 460 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 461 struct sk_buff *skb;
9494218f 462 int err = -ENOMEM;
1da177e4 463
9f10d3f6 464 /* First, grab a route. */
f76b33c3
ED
465 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
466 IPPROTO_TCP)) == NULL)
fd80eb94 467 goto done;
9494218f 468
b3d05147 469 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 470
1da177e4 471 if (skb) {
634fb979
ED
472 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
473 &ireq->ir_v6_rmt_addr);
1da177e4 474
634fb979 475 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 476 if (np->repflow && ireq->pktopts)
df3687ff
FF
477 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
478
3e4006f0 479 rcu_read_lock();
56ac42bc
HD
480 opt = ireq->ipv6_opt;
481 if (!opt)
482 opt = rcu_dereference(np->opt);
92e55f41 483 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 484 rcu_read_unlock();
b9df3cb8 485 err = net_xmit_eval(err);
1da177e4
LT
486 }
487
488done:
1da177e4
LT
489 return err;
490}
491
72659ecc 492
60236fdd 493static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 494{
56ac42bc 495 kfree(inet_rsk(req)->ipv6_opt);
634fb979 496 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
497}
498
cfb6eeb4 499#ifdef CONFIG_TCP_MD5SIG
b83e3deb 500static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 501 const struct in6_addr *addr)
cfb6eeb4 502{
a915da9b 503 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
504}
505
b83e3deb 506static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 507 const struct sock *addr_sk)
cfb6eeb4 508{
efe4208f 509 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
510}
511
4aa956d8
WY
512static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
513 int optlen)
cfb6eeb4
YH
514{
515 struct tcp_md5sig cmd;
516 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
517
518 if (optlen < sizeof(cmd))
519 return -EINVAL;
520
521 if (copy_from_user(&cmd, optval, sizeof(cmd)))
522 return -EFAULT;
523
524 if (sin6->sin6_family != AF_INET6)
525 return -EINVAL;
526
527 if (!cmd.tcpm_keylen) {
e773e4fa 528 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
529 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
530 AF_INET);
531 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
532 AF_INET6);
cfb6eeb4
YH
533 }
534
535 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
536 return -EINVAL;
537
a915da9b
ED
538 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
539 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
540 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 541
a915da9b
ED
542 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
543 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
544}
545
19689e38
ED
546static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
547 const struct in6_addr *daddr,
548 const struct in6_addr *saddr,
549 const struct tcphdr *th, int nbytes)
cfb6eeb4 550{
cfb6eeb4 551 struct tcp6_pseudohdr *bp;
49a72dfb 552 struct scatterlist sg;
19689e38 553 struct tcphdr *_th;
8d26d76d 554
19689e38 555 bp = hp->scratch;
cfb6eeb4 556 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
557 bp->saddr = *saddr;
558 bp->daddr = *daddr;
49a72dfb 559 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 560 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 561
19689e38
ED
562 _th = (struct tcphdr *)(bp + 1);
563 memcpy(_th, th, sizeof(*th));
564 _th->check = 0;
565
566 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
567 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
568 sizeof(*bp) + sizeof(*th));
cf80e0e4 569 return crypto_ahash_update(hp->md5_req);
49a72dfb 570}
c7da57a1 571
19689e38 572static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 573 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 574 const struct tcphdr *th)
49a72dfb
AL
575{
576 struct tcp_md5sig_pool *hp;
cf80e0e4 577 struct ahash_request *req;
49a72dfb
AL
578
579 hp = tcp_get_md5sig_pool();
580 if (!hp)
581 goto clear_hash_noput;
cf80e0e4 582 req = hp->md5_req;
49a72dfb 583
cf80e0e4 584 if (crypto_ahash_init(req))
49a72dfb 585 goto clear_hash;
19689e38 586 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
587 goto clear_hash;
588 if (tcp_md5_hash_key(hp, key))
589 goto clear_hash;
cf80e0e4
HX
590 ahash_request_set_crypt(req, NULL, md5_hash, 0);
591 if (crypto_ahash_final(req))
cfb6eeb4 592 goto clear_hash;
cfb6eeb4 593
cfb6eeb4 594 tcp_put_md5sig_pool();
cfb6eeb4 595 return 0;
49a72dfb 596
cfb6eeb4
YH
597clear_hash:
598 tcp_put_md5sig_pool();
599clear_hash_noput:
600 memset(md5_hash, 0, 16);
49a72dfb 601 return 1;
cfb6eeb4
YH
602}
603
39f8e58e
ED
604static int tcp_v6_md5_hash_skb(char *md5_hash,
605 const struct tcp_md5sig_key *key,
318cf7aa 606 const struct sock *sk,
318cf7aa 607 const struct sk_buff *skb)
cfb6eeb4 608{
b71d1d42 609 const struct in6_addr *saddr, *daddr;
49a72dfb 610 struct tcp_md5sig_pool *hp;
cf80e0e4 611 struct ahash_request *req;
318cf7aa 612 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 613
39f8e58e
ED
614 if (sk) { /* valid for establish/request sockets */
615 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 616 daddr = &sk->sk_v6_daddr;
49a72dfb 617 } else {
b71d1d42 618 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
619 saddr = &ip6h->saddr;
620 daddr = &ip6h->daddr;
cfb6eeb4 621 }
49a72dfb
AL
622
623 hp = tcp_get_md5sig_pool();
624 if (!hp)
625 goto clear_hash_noput;
cf80e0e4 626 req = hp->md5_req;
49a72dfb 627
cf80e0e4 628 if (crypto_ahash_init(req))
49a72dfb
AL
629 goto clear_hash;
630
19689e38 631 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
632 goto clear_hash;
633 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
634 goto clear_hash;
635 if (tcp_md5_hash_key(hp, key))
636 goto clear_hash;
cf80e0e4
HX
637 ahash_request_set_crypt(req, NULL, md5_hash, 0);
638 if (crypto_ahash_final(req))
49a72dfb
AL
639 goto clear_hash;
640
641 tcp_put_md5sig_pool();
642 return 0;
643
644clear_hash:
645 tcp_put_md5sig_pool();
646clear_hash_noput:
647 memset(md5_hash, 0, 16);
648 return 1;
cfb6eeb4
YH
649}
650
ba8e275a
ED
651#endif
652
653static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
654 const struct sk_buff *skb)
cfb6eeb4 655{
ba8e275a 656#ifdef CONFIG_TCP_MD5SIG
cf533ea5 657 const __u8 *hash_location = NULL;
cfb6eeb4 658 struct tcp_md5sig_key *hash_expected;
b71d1d42 659 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 660 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 661 int genhash;
cfb6eeb4
YH
662 u8 newhash[16];
663
664 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 665 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 666
785957d3
DM
667 /* We've parsed the options - do we have a hash? */
668 if (!hash_expected && !hash_location)
ff74e23f 669 return false;
785957d3
DM
670
671 if (hash_expected && !hash_location) {
c10d9310 672 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 673 return true;
cfb6eeb4
YH
674 }
675
785957d3 676 if (!hash_expected && hash_location) {
c10d9310 677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 678 return true;
cfb6eeb4
YH
679 }
680
681 /* check the signature */
49a72dfb
AL
682 genhash = tcp_v6_md5_hash_skb(newhash,
683 hash_expected,
39f8e58e 684 NULL, skb);
49a72dfb 685
cfb6eeb4 686 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 687 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
688 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
689 genhash ? "failed" : "mismatch",
690 &ip6h->saddr, ntohs(th->source),
691 &ip6h->daddr, ntohs(th->dest));
ff74e23f 692 return true;
cfb6eeb4 693 }
ba8e275a 694#endif
ff74e23f 695 return false;
cfb6eeb4 696}
cfb6eeb4 697
b40cf18e
ED
698static void tcp_v6_init_req(struct request_sock *req,
699 const struct sock *sk_listener,
16bea70a
OP
700 struct sk_buff *skb)
701{
702 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 703 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
704
705 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
706 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
707
16bea70a 708 /* So that link locals have meaning */
b40cf18e 709 if (!sk_listener->sk_bound_dev_if &&
16bea70a 710 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 711 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 712
04317daf 713 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 714 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 715 np->rxopt.bits.rxinfo ||
16bea70a
OP
716 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
717 np->rxopt.bits.rxohlim || np->repflow)) {
718 atomic_inc(&skb->users);
719 ireq->pktopts = skb;
720 }
721}
722
f964629e
ED
723static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
724 struct flowi *fl,
d94e0417
OP
725 const struct request_sock *req,
726 bool *strict)
727{
728 if (strict)
729 *strict = true;
f76b33c3 730 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
731}
732
c6aefafb 733struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 734 .family = AF_INET6,
2e6599cb 735 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 736 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
737 .send_ack = tcp_v6_reqsk_send_ack,
738 .destructor = tcp_v6_reqsk_destructor,
72659ecc 739 .send_reset = tcp_v6_send_reset,
4aa956d8 740 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
741};
742
b2e4b3de 743static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
744 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
745 sizeof(struct ipv6hdr),
16bea70a 746#ifdef CONFIG_TCP_MD5SIG
fd3a154a 747 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 748 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 749#endif
16bea70a 750 .init_req = tcp_v6_init_req,
fb7b37a7
OP
751#ifdef CONFIG_SYN_COOKIES
752 .cookie_init_seq = cookie_v6_init_sequence,
753#endif
d94e0417 754 .route_req = tcp_v6_route_req,
a30aad50 755 .init_seq_tsoff = tcp_v6_init_seq_and_tsoff,
d6274bd8 756 .send_synack = tcp_v6_send_synack,
16bea70a 757};
cfb6eeb4 758
a00e7444 759static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
760 u32 ack, u32 win, u32 tsval, u32 tsecr,
761 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 762 u8 tclass, __be32 label)
1da177e4 763{
cf533ea5
ED
764 const struct tcphdr *th = tcp_hdr(skb);
765 struct tcphdr *t1;
1da177e4 766 struct sk_buff *buff;
4c9483b2 767 struct flowi6 fl6;
0f85feae 768 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 769 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 770 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 771 struct dst_entry *dst;
81ada62d 772 __be32 *topt;
1da177e4 773
ee684b6f 774 if (tsecr)
626e264d 775 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 776#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
777 if (key)
778 tot_len += TCPOLEN_MD5SIG_ALIGNED;
779#endif
780
cfb6eeb4 781 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 782 GFP_ATOMIC);
63159f29 783 if (!buff)
1ab1457c 784 return;
1da177e4 785
cfb6eeb4 786 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 787
cfb6eeb4 788 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 789 skb_reset_transport_header(buff);
1da177e4
LT
790
791 /* Swap the send and the receive. */
792 memset(t1, 0, sizeof(*t1));
793 t1->dest = th->source;
794 t1->source = th->dest;
cfb6eeb4 795 t1->doff = tot_len / 4;
626e264d
IJ
796 t1->seq = htonl(seq);
797 t1->ack_seq = htonl(ack);
798 t1->ack = !rst || !th->ack;
799 t1->rst = rst;
800 t1->window = htons(win);
1da177e4 801
81ada62d
IJ
802 topt = (__be32 *)(t1 + 1);
803
ee684b6f 804 if (tsecr) {
626e264d
IJ
805 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
806 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
807 *topt++ = htonl(tsval);
808 *topt++ = htonl(tsecr);
626e264d
IJ
809 }
810
cfb6eeb4
YH
811#ifdef CONFIG_TCP_MD5SIG
812 if (key) {
81ada62d
IJ
813 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
814 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
815 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
816 &ipv6_hdr(skb)->saddr,
817 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
818 }
819#endif
820
4c9483b2 821 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
822 fl6.daddr = ipv6_hdr(skb)->saddr;
823 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 824 fl6.flowlabel = label;
1da177e4 825
e5700aff
DM
826 buff->ip_summed = CHECKSUM_PARTIAL;
827 buff->csum = 0;
828
4c9483b2 829 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 830
4c9483b2 831 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 832 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 833 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
834 else {
835 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
836 oif = skb->skb_iif;
837
838 fl6.flowi6_oif = oif;
839 }
1d2f7b2d 840
e110861f 841 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
842 fl6.fl6_dport = t1->dest;
843 fl6.fl6_sport = t1->source;
e2d118a1 844 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 845 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 846
c20121ae
DL
847 /* Pass a socket to ip6_dst_lookup either it is for RST
848 * Underlying function will use this to retrieve the network
849 * namespace
850 */
0e0d44ab 851 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
852 if (!IS_ERR(dst)) {
853 skb_dst_set(buff, dst);
92e55f41 854 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 855 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 856 if (rst)
c10d9310 857 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 858 return;
1da177e4
LT
859 }
860
861 kfree_skb(buff);
862}
863
a00e7444 864static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 865{
cf533ea5 866 const struct tcphdr *th = tcp_hdr(skb);
626e264d 867 u32 seq = 0, ack_seq = 0;
fa3e5b4e 868 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
869#ifdef CONFIG_TCP_MD5SIG
870 const __u8 *hash_location = NULL;
871 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
872 unsigned char newhash[16];
873 int genhash;
874 struct sock *sk1 = NULL;
875#endif
9c76a114 876 int oif;
1da177e4 877
626e264d 878 if (th->rst)
1da177e4
LT
879 return;
880
c3658e8d
ED
881 /* If sk not NULL, it means we did a successful lookup and incoming
882 * route had to be correct. prequeue might have dropped our dst.
883 */
884 if (!sk && !ipv6_unicast_destination(skb))
626e264d 885 return;
1da177e4 886
cfb6eeb4 887#ifdef CONFIG_TCP_MD5SIG
3b24d854 888 rcu_read_lock();
658ddaaf 889 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 890 if (sk && sk_fullsock(sk)) {
e46787f0
FW
891 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
892 } else if (hash_location) {
658ddaaf
SL
893 /*
894 * active side is lost. Try to find listening socket through
895 * source port, and then find md5 key through listening socket.
896 * we are not loose security here:
897 * Incoming packet is checked with md5 hash with finding key,
898 * no RST generated if md5 hash doesn't match.
899 */
900 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
901 &tcp_hashinfo, NULL, 0,
902 &ipv6h->saddr,
5ba24953 903 th->source, &ipv6h->daddr,
870c3151 904 ntohs(th->source), tcp_v6_iif(skb));
658ddaaf 905 if (!sk1)
3b24d854 906 goto out;
658ddaaf 907
658ddaaf
SL
908 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
909 if (!key)
3b24d854 910 goto out;
658ddaaf 911
39f8e58e 912 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 913 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 914 goto out;
658ddaaf 915 }
cfb6eeb4
YH
916#endif
917
626e264d
IJ
918 if (th->ack)
919 seq = ntohl(th->ack_seq);
920 else
921 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
922 (th->doff << 2);
1da177e4 923
9c76a114 924 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 925 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
926
927#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
928out:
929 rcu_read_unlock();
658ddaaf 930#endif
626e264d 931}
1da177e4 932
a00e7444 933static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 934 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 935 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 936 __be32 label)
626e264d 937{
0f85feae
ED
938 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
939 tclass, label);
1da177e4
LT
940}
941
942static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
943{
8feaf0c0 944 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 945 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 946
0f85feae 947 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 948 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 949 tcp_time_stamp + tcptw->tw_ts_offset,
9c76a114 950 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 951 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 952
8feaf0c0 953 inet_twsk_put(tw);
1da177e4
LT
954}
955
a00e7444 956static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 957 struct request_sock *req)
1da177e4 958{
3a19ce0e
DL
959 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
960 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
961 */
20a2b49f
ED
962 /* RFC 7323 2.3
963 * The window field (SEG.WND) of every outgoing segment, with the
964 * exception of <SYN> segments, MUST be right-shifted by
965 * Rcv.Wind.Shift bits:
966 */
0f85feae 967 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 968 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
969 tcp_rsk(req)->rcv_nxt,
970 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
95a22cae
FW
971 tcp_time_stamp + tcp_rsk(req)->ts_off,
972 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
973 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
974 0, 0);
1da177e4
LT
975}
976
977
079096f1 978static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 979{
079096f1 980#ifdef CONFIG_SYN_COOKIES
aa8223c7 981 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 982
af9b4738 983 if (!th->syn)
c6aefafb 984 sk = cookie_v6_check(sk, skb);
1da177e4
LT
985#endif
986 return sk;
987}
988
1da177e4
LT
989static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
990{
1da177e4
LT
991 if (skb->protocol == htons(ETH_P_IP))
992 return tcp_v4_conn_request(sk, skb);
993
994 if (!ipv6_unicast_destination(skb))
1ab1457c 995 goto drop;
1da177e4 996
1fb6f159
OP
997 return tcp_conn_request(&tcp6_request_sock_ops,
998 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
999
1000drop:
9caad864 1001 tcp_listendrop(sk);
1da177e4
LT
1002 return 0; /* don't send reset */
1003}
1004
ebf6c9cb
ED
1005static void tcp_v6_restore_cb(struct sk_buff *skb)
1006{
1007 /* We need to move header back to the beginning if xfrm6_policy_check()
1008 * and tcp_v6_fill_cb() are going to be called again.
1009 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1010 */
1011 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1012 sizeof(struct inet6_skb_parm));
1013}
1014
0c27171e 1015static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1016 struct request_sock *req,
5e0724d0
ED
1017 struct dst_entry *dst,
1018 struct request_sock *req_unhash,
1019 bool *own_req)
1da177e4 1020{
634fb979 1021 struct inet_request_sock *ireq;
0c27171e
ED
1022 struct ipv6_pinfo *newnp;
1023 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1024 struct ipv6_txoptions *opt;
1da177e4
LT
1025 struct tcp6_sock *newtcp6sk;
1026 struct inet_sock *newinet;
1027 struct tcp_sock *newtp;
1028 struct sock *newsk;
cfb6eeb4
YH
1029#ifdef CONFIG_TCP_MD5SIG
1030 struct tcp_md5sig_key *key;
1031#endif
3840a06e 1032 struct flowi6 fl6;
1da177e4
LT
1033
1034 if (skb->protocol == htons(ETH_P_IP)) {
1035 /*
1036 * v6 mapped
1037 */
1038
5e0724d0
ED
1039 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1040 req_unhash, own_req);
1da177e4 1041
63159f29 1042 if (!newsk)
1da177e4
LT
1043 return NULL;
1044
1045 newtcp6sk = (struct tcp6_sock *)newsk;
1046 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1047
1048 newinet = inet_sk(newsk);
1049 newnp = inet6_sk(newsk);
1050 newtp = tcp_sk(newsk);
1051
1052 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1053
d1e559d0 1054 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1055
8292a17a 1056 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1057 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1058#ifdef CONFIG_TCP_MD5SIG
1059 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1060#endif
1061
676a1184
YZ
1062 newnp->ipv6_ac_list = NULL;
1063 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1064 newnp->pktoptions = NULL;
1065 newnp->opt = NULL;
870c3151 1066 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1067 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1068 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1069 if (np->repflow)
1070 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1071
e6848976
ACM
1072 /*
1073 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1074 * here, tcp_create_openreq_child now does this for us, see the comment in
1075 * that function for the gory details. -acme
1da177e4 1076 */
1da177e4
LT
1077
1078 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1079 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1080 Sync it now.
1081 */
d83d8461 1082 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1083
1084 return newsk;
1085 }
1086
634fb979 1087 ireq = inet_rsk(req);
1da177e4
LT
1088
1089 if (sk_acceptq_is_full(sk))
1090 goto out_overflow;
1091
493f377d 1092 if (!dst) {
f76b33c3 1093 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1094 if (!dst)
1da177e4 1095 goto out;
1ab1457c 1096 }
1da177e4
LT
1097
1098 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1099 if (!newsk)
093d2823 1100 goto out_nonewsk;
1da177e4 1101
e6848976
ACM
1102 /*
1103 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1104 * count here, tcp_create_openreq_child now does this for us, see the
1105 * comment in that function for the gory details. -acme
1106 */
1da177e4 1107
59eed279 1108 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1109 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1110 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1111
1112 newtcp6sk = (struct tcp6_sock *)newsk;
1113 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1114
1115 newtp = tcp_sk(newsk);
1116 newinet = inet_sk(newsk);
1117 newnp = inet6_sk(newsk);
1118
1119 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1120
634fb979
ED
1121 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1122 newnp->saddr = ireq->ir_v6_loc_addr;
1123 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1124 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1125
1ab1457c 1126 /* Now IPv6 options...
1da177e4
LT
1127
1128 First: no IPv4 options.
1129 */
f6d8bd05 1130 newinet->inet_opt = NULL;
676a1184 1131 newnp->ipv6_ac_list = NULL;
d35690be 1132 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1133
1134 /* Clone RX bits */
1135 newnp->rxopt.all = np->rxopt.all;
1136
1da177e4 1137 newnp->pktoptions = NULL;
1da177e4 1138 newnp->opt = NULL;
870c3151 1139 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1140 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1141 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1142 if (np->repflow)
1143 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1144
1145 /* Clone native IPv6 options from listening socket (if any)
1146
1147 Yes, keeping reference count would be much more clever,
1148 but we make one more one thing there: reattach optmem
1149 to newsk.
1150 */
56ac42bc
HD
1151 opt = ireq->ipv6_opt;
1152 if (!opt)
1153 opt = rcu_dereference(np->opt);
45f6fad8
ED
1154 if (opt) {
1155 opt = ipv6_dup_options(newsk, opt);
1156 RCU_INIT_POINTER(newnp->opt, opt);
1157 }
d83d8461 1158 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1159 if (opt)
1160 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1161 opt->opt_flen;
1da177e4 1162
81164413
DB
1163 tcp_ca_openreq_child(newsk, dst);
1164
1da177e4 1165 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1166 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1167
1da177e4
LT
1168 tcp_initialize_rcv_mss(newsk);
1169
c720c7e8
ED
1170 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1171 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1172
cfb6eeb4
YH
1173#ifdef CONFIG_TCP_MD5SIG
1174 /* Copy over the MD5 key from the original socket */
4aa956d8 1175 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1176 if (key) {
cfb6eeb4
YH
1177 /* We're using one, so create a matching key
1178 * on the newsk structure. If we fail to get
1179 * memory, then we end up not copying the key
1180 * across. Shucks.
1181 */
efe4208f 1182 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
99a1dec7 1183 AF_INET6, key->key, key->keylen,
7450aaf6 1184 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1185 }
1186#endif
1187
093d2823 1188 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1189 inet_csk_prepare_forced_close(newsk);
1190 tcp_done(newsk);
093d2823
BS
1191 goto out;
1192 }
5e0724d0 1193 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1194 if (*own_req) {
49a496c9 1195 tcp_move_syn(newtp, req);
805c4bc0
ED
1196
1197 /* Clone pktoptions received with SYN, if we own the req */
1198 if (ireq->pktopts) {
1199 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1200 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1201 consume_skb(ireq->pktopts);
1202 ireq->pktopts = NULL;
ebf6c9cb
ED
1203 if (newnp->pktoptions) {
1204 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1205 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1206 }
805c4bc0 1207 }
ce105008 1208 }
1da177e4
LT
1209
1210 return newsk;
1211
1212out_overflow:
02a1d6e7 1213 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1214out_nonewsk:
1da177e4 1215 dst_release(dst);
093d2823 1216out:
9caad864 1217 tcp_listendrop(sk);
1da177e4
LT
1218 return NULL;
1219}
1220
1da177e4 1221/* The socket must have it's spinlock held when we get
e994b2f0 1222 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1223 *
1224 * We have a potential double-lock case here, so even when
1225 * doing backlog processing we use the BH locking scheme.
1226 * This is because we cannot sleep with the original spinlock
1227 * held.
1228 */
1229static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1230{
1231 struct ipv6_pinfo *np = inet6_sk(sk);
1232 struct tcp_sock *tp;
1233 struct sk_buff *opt_skb = NULL;
1234
1235 /* Imagine: socket is IPv6. IPv4 packet arrives,
1236 goes to IPv4 receive handler and backlogged.
1237 From backlog it always goes here. Kerboom...
1238 Fortunately, tcp_rcv_established and rcv_established
1239 handle them correctly, but it is not case with
1240 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1241 */
1242
1243 if (skb->protocol == htons(ETH_P_IP))
1244 return tcp_v4_do_rcv(sk, skb);
1245
ac6e7800 1246 if (tcp_filter(sk, skb))
1da177e4
LT
1247 goto discard;
1248
1249 /*
1250 * socket locking is here for SMP purposes as backlog rcv
1251 * is currently called with bh processing disabled.
1252 */
1253
1254 /* Do Stevens' IPV6_PKTOPTIONS.
1255
1256 Yes, guys, it is the only place in our code, where we
1257 may make it not affecting IPv4.
1258 The rest of code is protocol independent,
1259 and I do not like idea to uglify IPv4.
1260
1261 Actually, all the idea behind IPV6_PKTOPTIONS
1262 looks not very well thought. For now we latch
1263 options, received in the last packet, enqueued
1264 by tcp. Feel free to propose better solution.
1ab1457c 1265 --ANK (980728)
1da177e4
LT
1266 */
1267 if (np->rxopt.all)
7450aaf6 1268 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1269
1270 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1271 struct dst_entry *dst = sk->sk_rx_dst;
1272
bdeab991 1273 sock_rps_save_rxhash(sk, skb);
3d97379a 1274 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1275 if (dst) {
1276 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1277 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1278 dst_release(dst);
1279 sk->sk_rx_dst = NULL;
1280 }
1281 }
1282
c995ae22 1283 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1284 if (opt_skb)
1285 goto ipv6_pktoptions;
1286 return 0;
1287 }
1288
12e25e10 1289 if (tcp_checksum_complete(skb))
1da177e4
LT
1290 goto csum_err;
1291
1ab1457c 1292 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1293 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1294
1da177e4
LT
1295 if (!nsk)
1296 goto discard;
1297
4c99aa40 1298 if (nsk != sk) {
bdeab991 1299 sock_rps_save_rxhash(nsk, skb);
38cb5245 1300 sk_mark_napi_id(nsk, skb);
1da177e4
LT
1301 if (tcp_child_process(sk, nsk, skb))
1302 goto reset;
1303 if (opt_skb)
1304 __kfree_skb(opt_skb);
1305 return 0;
1306 }
47482f13 1307 } else
bdeab991 1308 sock_rps_save_rxhash(sk, skb);
1da177e4 1309
72ab4a86 1310 if (tcp_rcv_state_process(sk, skb))
1da177e4 1311 goto reset;
1da177e4
LT
1312 if (opt_skb)
1313 goto ipv6_pktoptions;
1314 return 0;
1315
1316reset:
cfb6eeb4 1317 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1318discard:
1319 if (opt_skb)
1320 __kfree_skb(opt_skb);
1321 kfree_skb(skb);
1322 return 0;
1323csum_err:
c10d9310
ED
1324 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1325 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1326 goto discard;
1327
1328
1329ipv6_pktoptions:
1330 /* Do you ask, what is it?
1331
1332 1. skb was enqueued by tcp.
1333 2. skb is added to tail of read queue, rather than out of order.
1334 3. socket is not in passive state.
1335 4. Finally, it really contains options, which user wants to receive.
1336 */
1337 tp = tcp_sk(sk);
1338 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1339 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1340 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1341 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1342 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1343 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1344 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1345 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1346 if (np->repflow)
1347 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1348 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1349 skb_set_owner_r(opt_skb, sk);
8ce48623 1350 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1351 opt_skb = xchg(&np->pktoptions, opt_skb);
1352 } else {
1353 __kfree_skb(opt_skb);
1354 opt_skb = xchg(&np->pktoptions, NULL);
1355 }
1356 }
1357
800d55f1 1358 kfree_skb(opt_skb);
1da177e4
LT
1359 return 0;
1360}
1361
2dc49d16
ND
1362static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1363 const struct tcphdr *th)
1364{
1365 /* This is tricky: we move IP6CB at its correct location into
1366 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1367 * _decode_session6() uses IP6CB().
1368 * barrier() makes sure compiler won't play aliasing games.
1369 */
1370 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1371 sizeof(struct inet6_skb_parm));
1372 barrier();
1373
1374 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1375 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1376 skb->len - th->doff*4);
1377 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1378 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1379 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1380 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1381 TCP_SKB_CB(skb)->sacked = 0;
1382}
1383
e5bbef20 1384static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1385{
cf533ea5 1386 const struct tcphdr *th;
b71d1d42 1387 const struct ipv6hdr *hdr;
3b24d854 1388 bool refcounted;
1da177e4
LT
1389 struct sock *sk;
1390 int ret;
a86b1e30 1391 struct net *net = dev_net(skb->dev);
1da177e4
LT
1392
1393 if (skb->pkt_type != PACKET_HOST)
1394 goto discard_it;
1395
1396 /*
1397 * Count it even if it's bad.
1398 */
90bbcc60 1399 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1400
1401 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1402 goto discard_it;
1403
ea1627c2 1404 th = (const struct tcphdr *)skb->data;
1da177e4 1405
ea1627c2 1406 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1407 goto bad_packet;
1408 if (!pskb_may_pull(skb, th->doff*4))
1409 goto discard_it;
1410
e4f45b7f 1411 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1412 goto csum_error;
1da177e4 1413
ea1627c2 1414 th = (const struct tcphdr *)skb->data;
e802af9c 1415 hdr = ipv6_hdr(skb);
1da177e4 1416
4bdc3d66 1417lookup:
a583636a 1418 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
3b24d854
ED
1419 th->source, th->dest, inet6_iif(skb),
1420 &refcounted);
1da177e4
LT
1421 if (!sk)
1422 goto no_tcp_socket;
1423
1424process:
1425 if (sk->sk_state == TCP_TIME_WAIT)
1426 goto do_time_wait;
1427
079096f1
ED
1428 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1429 struct request_sock *req = inet_reqsk(sk);
7716682c 1430 struct sock *nsk;
079096f1
ED
1431
1432 sk = req->rsk_listener;
1433 tcp_v6_fill_cb(skb, hdr, th);
1434 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1435 sk_drops_add(sk, skb);
079096f1
ED
1436 reqsk_put(req);
1437 goto discard_it;
1438 }
7716682c 1439 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1440 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1441 goto lookup;
1442 }
7716682c 1443 sock_hold(sk);
3b24d854 1444 refcounted = true;
7716682c 1445 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1446 if (!nsk) {
1447 reqsk_put(req);
7716682c 1448 goto discard_and_relse;
079096f1
ED
1449 }
1450 if (nsk == sk) {
079096f1
ED
1451 reqsk_put(req);
1452 tcp_v6_restore_cb(skb);
1453 } else if (tcp_child_process(sk, nsk, skb)) {
1454 tcp_v6_send_reset(nsk, skb);
7716682c 1455 goto discard_and_relse;
079096f1 1456 } else {
7716682c 1457 sock_put(sk);
079096f1
ED
1458 return 0;
1459 }
1460 }
e802af9c 1461 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1462 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1463 goto discard_and_relse;
1464 }
1465
1da177e4
LT
1466 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1467 goto discard_and_relse;
1468
2dc49d16
ND
1469 tcp_v6_fill_cb(skb, hdr, th);
1470
9ea88a15
DP
1471 if (tcp_v6_inbound_md5_hash(sk, skb))
1472 goto discard_and_relse;
9ea88a15 1473
ac6e7800 1474 if (tcp_filter(sk, skb))
1da177e4 1475 goto discard_and_relse;
ac6e7800
ED
1476 th = (const struct tcphdr *)skb->data;
1477 hdr = ipv6_hdr(skb);
1da177e4
LT
1478
1479 skb->dev = NULL;
1480
e994b2f0
ED
1481 if (sk->sk_state == TCP_LISTEN) {
1482 ret = tcp_v6_do_rcv(sk, skb);
1483 goto put_and_return;
1484 }
1485
1486 sk_incoming_cpu_update(sk);
1487
293b9c42 1488 bh_lock_sock_nested(sk);
a44d6eac 1489 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1490 ret = 0;
1491 if (!sock_owned_by_user(sk)) {
7bced397 1492 if (!tcp_prequeue(sk, skb))
1ab1457c 1493 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1494 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1495 goto discard_and_relse;
1496 }
1da177e4
LT
1497 bh_unlock_sock(sk);
1498
e994b2f0 1499put_and_return:
3b24d854
ED
1500 if (refcounted)
1501 sock_put(sk);
1da177e4
LT
1502 return ret ? -1 : 0;
1503
1504no_tcp_socket:
1505 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1506 goto discard_it;
1507
2dc49d16
ND
1508 tcp_v6_fill_cb(skb, hdr, th);
1509
12e25e10 1510 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1511csum_error:
90bbcc60 1512 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1513bad_packet:
90bbcc60 1514 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1515 } else {
cfb6eeb4 1516 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1517 }
1518
1519discard_it:
1da177e4
LT
1520 kfree_skb(skb);
1521 return 0;
1522
1523discard_and_relse:
532182cd 1524 sk_drops_add(sk, skb);
3b24d854
ED
1525 if (refcounted)
1526 sock_put(sk);
1da177e4
LT
1527 goto discard_it;
1528
1529do_time_wait:
1530 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1531 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1532 goto discard_it;
1533 }
1534
2dc49d16
ND
1535 tcp_v6_fill_cb(skb, hdr, th);
1536
6a5dc9e5
ED
1537 if (tcp_checksum_complete(skb)) {
1538 inet_twsk_put(inet_twsk(sk));
1539 goto csum_error;
1da177e4
LT
1540 }
1541
9469c7b4 1542 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1543 case TCP_TW_SYN:
1544 {
1545 struct sock *sk2;
1546
c346dca1 1547 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1548 skb, __tcp_hdrlen(th),
5ba24953 1549 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1550 &ipv6_hdr(skb)->daddr,
870c3151 1551 ntohs(th->dest), tcp_v6_iif(skb));
53b24b8f 1552 if (sk2) {
295ff7ed 1553 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1554 inet_twsk_deschedule_put(tw);
1da177e4 1555 sk = sk2;
4ad19de8 1556 tcp_v6_restore_cb(skb);
3b24d854 1557 refcounted = false;
1da177e4
LT
1558 goto process;
1559 }
1560 /* Fall through to ACK */
1561 }
1562 case TCP_TW_ACK:
1563 tcp_v6_timewait_ack(sk, skb);
1564 break;
1565 case TCP_TW_RST:
4ad19de8 1566 tcp_v6_restore_cb(skb);
271c3b9b
FW
1567 tcp_v6_send_reset(sk, skb);
1568 inet_twsk_deschedule_put(inet_twsk(sk));
1569 goto discard_it;
4aa956d8
WY
1570 case TCP_TW_SUCCESS:
1571 ;
1da177e4
LT
1572 }
1573 goto discard_it;
1574}
1575
c7109986
ED
1576static void tcp_v6_early_demux(struct sk_buff *skb)
1577{
1578 const struct ipv6hdr *hdr;
1579 const struct tcphdr *th;
1580 struct sock *sk;
1581
1582 if (skb->pkt_type != PACKET_HOST)
1583 return;
1584
1585 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1586 return;
1587
1588 hdr = ipv6_hdr(skb);
1589 th = tcp_hdr(skb);
1590
1591 if (th->doff < sizeof(struct tcphdr) / 4)
1592 return;
1593
870c3151 1594 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1595 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1596 &hdr->saddr, th->source,
1597 &hdr->daddr, ntohs(th->dest),
1598 inet6_iif(skb));
1599 if (sk) {
1600 skb->sk = sk;
1601 skb->destructor = sock_edemux;
f7e4eb03 1602 if (sk_fullsock(sk)) {
d0c294c5 1603 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1604
c7109986 1605 if (dst)
5d299f3d 1606 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1607 if (dst &&
f3f12135 1608 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1609 skb_dst_set_noref(skb, dst);
1610 }
1611 }
1612}
1613
ccb7c410
DM
1614static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1615 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1616 .twsk_unique = tcp_twsk_unique,
4aa956d8 1617 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1618};
1619
3b401a81 1620static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1621 .queue_xmit = inet6_csk_xmit,
1622 .send_check = tcp_v6_send_check,
1623 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1624 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1625 .conn_request = tcp_v6_conn_request,
1626 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1627 .net_header_len = sizeof(struct ipv6hdr),
67469601 1628 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1629 .setsockopt = ipv6_setsockopt,
1630 .getsockopt = ipv6_getsockopt,
1631 .addr2sockaddr = inet6_csk_addr2sockaddr,
1632 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1633#ifdef CONFIG_COMPAT
543d9cfe
ACM
1634 .compat_setsockopt = compat_ipv6_setsockopt,
1635 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1636#endif
4fab9071 1637 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1638};
1639
cfb6eeb4 1640#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1641static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1642 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1643 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1644 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1645};
a928630a 1646#endif
cfb6eeb4 1647
1da177e4
LT
1648/*
1649 * TCP over IPv4 via INET6 API
1650 */
3b401a81 1651static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1652 .queue_xmit = ip_queue_xmit,
1653 .send_check = tcp_v4_send_check,
1654 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1655 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1656 .conn_request = tcp_v6_conn_request,
1657 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1658 .net_header_len = sizeof(struct iphdr),
1659 .setsockopt = ipv6_setsockopt,
1660 .getsockopt = ipv6_getsockopt,
1661 .addr2sockaddr = inet6_csk_addr2sockaddr,
1662 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1663#ifdef CONFIG_COMPAT
543d9cfe
ACM
1664 .compat_setsockopt = compat_ipv6_setsockopt,
1665 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1666#endif
4fab9071 1667 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1668};
1669
cfb6eeb4 1670#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1671static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1672 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1673 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1674 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1675};
a928630a 1676#endif
cfb6eeb4 1677
1da177e4
LT
1678/* NOTE: A lot of things set to zero explicitly by call to
1679 * sk_alloc() so need not be done here.
1680 */
1681static int tcp_v6_init_sock(struct sock *sk)
1682{
6687e988 1683 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1684
900f65d3 1685 tcp_init_sock(sk);
1da177e4 1686
8292a17a 1687 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1688
cfb6eeb4 1689#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1690 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1691#endif
1692
1da177e4
LT
1693 return 0;
1694}
1695
7d06b2e0 1696static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1697{
1da177e4 1698 tcp_v4_destroy_sock(sk);
7d06b2e0 1699 inet6_destroy_sock(sk);
1da177e4
LT
1700}
1701
952a10be 1702#ifdef CONFIG_PROC_FS
1da177e4 1703/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1704static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1705 const struct request_sock *req, int i)
1da177e4 1706{
fa76ce73 1707 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1708 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1709 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1710
1711 if (ttd < 0)
1712 ttd = 0;
1713
1da177e4
LT
1714 seq_printf(seq,
1715 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1716 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1717 i,
1718 src->s6_addr32[0], src->s6_addr32[1],
1719 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1720 inet_rsk(req)->ir_num,
1da177e4
LT
1721 dest->s6_addr32[0], dest->s6_addr32[1],
1722 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1723 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1724 TCP_SYN_RECV,
4c99aa40 1725 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1726 1, /* timers active (only the expire timer) */
1727 jiffies_to_clock_t(ttd),
e6c022a4 1728 req->num_timeout,
aa3a0c8c
ED
1729 from_kuid_munged(seq_user_ns(seq),
1730 sock_i_uid(req->rsk_listener)),
1ab1457c 1731 0, /* non standard timer */
1da177e4
LT
1732 0, /* open_requests have no inode */
1733 0, req);
1734}
1735
1736static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1737{
b71d1d42 1738 const struct in6_addr *dest, *src;
1da177e4
LT
1739 __u16 destp, srcp;
1740 int timer_active;
1741 unsigned long timer_expires;
cf533ea5
ED
1742 const struct inet_sock *inet = inet_sk(sp);
1743 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1744 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1745 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1746 int rx_queue;
1747 int state;
1da177e4 1748
efe4208f
ED
1749 dest = &sp->sk_v6_daddr;
1750 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1751 destp = ntohs(inet->inet_dport);
1752 srcp = ntohs(inet->inet_sport);
463c84b9 1753
ce3cf4ec 1754 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1755 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1756 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1757 timer_active = 1;
463c84b9
ACM
1758 timer_expires = icsk->icsk_timeout;
1759 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1760 timer_active = 4;
463c84b9 1761 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1762 } else if (timer_pending(&sp->sk_timer)) {
1763 timer_active = 2;
1764 timer_expires = sp->sk_timer.expires;
1765 } else {
1766 timer_active = 0;
1767 timer_expires = jiffies;
1768 }
1769
00fd38d9
ED
1770 state = sk_state_load(sp);
1771 if (state == TCP_LISTEN)
1772 rx_queue = sp->sk_ack_backlog;
1773 else
1774 /* Because we don't lock the socket,
1775 * we might find a transient negative value.
1776 */
1777 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1778
1da177e4
LT
1779 seq_printf(seq,
1780 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1781 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1782 i,
1783 src->s6_addr32[0], src->s6_addr32[1],
1784 src->s6_addr32[2], src->s6_addr32[3], srcp,
1785 dest->s6_addr32[0], dest->s6_addr32[1],
1786 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1787 state,
1788 tp->write_seq - tp->snd_una,
1789 rx_queue,
1da177e4 1790 timer_active,
a399a805 1791 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1792 icsk->icsk_retransmits,
a7cb5a49 1793 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1794 icsk->icsk_probes_out,
1da177e4
LT
1795 sock_i_ino(sp),
1796 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1797 jiffies_to_clock_t(icsk->icsk_rto),
1798 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1799 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1800 tp->snd_cwnd,
00fd38d9 1801 state == TCP_LISTEN ?
0536fcc0 1802 fastopenq->max_qlen :
0a672f74 1803 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1804 );
1805}
1806
1ab1457c 1807static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1808 struct inet_timewait_sock *tw, int i)
1da177e4 1809{
789f558c 1810 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1811 const struct in6_addr *dest, *src;
1da177e4 1812 __u16 destp, srcp;
1da177e4 1813
efe4208f
ED
1814 dest = &tw->tw_v6_daddr;
1815 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1816 destp = ntohs(tw->tw_dport);
1817 srcp = ntohs(tw->tw_sport);
1818
1819 seq_printf(seq,
1820 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1821 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1822 i,
1823 src->s6_addr32[0], src->s6_addr32[1],
1824 src->s6_addr32[2], src->s6_addr32[3], srcp,
1825 dest->s6_addr32[0], dest->s6_addr32[1],
1826 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1827 tw->tw_substate, 0, 0,
a399a805 1828 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1da177e4
LT
1829 atomic_read(&tw->tw_refcnt), tw);
1830}
1831
1da177e4
LT
1832static int tcp6_seq_show(struct seq_file *seq, void *v)
1833{
1834 struct tcp_iter_state *st;
05dbc7b5 1835 struct sock *sk = v;
1da177e4
LT
1836
1837 if (v == SEQ_START_TOKEN) {
1838 seq_puts(seq,
1839 " sl "
1840 "local_address "
1841 "remote_address "
1842 "st tx_queue rx_queue tr tm->when retrnsmt"
1843 " uid timeout inode\n");
1844 goto out;
1845 }
1846 st = seq->private;
1847
079096f1
ED
1848 if (sk->sk_state == TCP_TIME_WAIT)
1849 get_timewait6_sock(seq, v, st->num);
1850 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1851 get_openreq6(seq, v, st->num);
079096f1
ED
1852 else
1853 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1854out:
1855 return 0;
1856}
1857
73cb88ec
AV
1858static const struct file_operations tcp6_afinfo_seq_fops = {
1859 .owner = THIS_MODULE,
1860 .open = tcp_seq_open,
1861 .read = seq_read,
1862 .llseek = seq_lseek,
1863 .release = seq_release_net
1864};
1865
1da177e4 1866static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1867 .name = "tcp6",
1868 .family = AF_INET6,
73cb88ec 1869 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1870 .seq_ops = {
1871 .show = tcp6_seq_show,
1872 },
1da177e4
LT
1873};
1874
2c8c1e72 1875int __net_init tcp6_proc_init(struct net *net)
1da177e4 1876{
6f8b13bc 1877 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1878}
1879
6f8b13bc 1880void tcp6_proc_exit(struct net *net)
1da177e4 1881{
6f8b13bc 1882 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1883}
1884#endif
1885
1886struct proto tcpv6_prot = {
1887 .name = "TCPv6",
1888 .owner = THIS_MODULE,
1889 .close = tcp_close,
1890 .connect = tcp_v6_connect,
1891 .disconnect = tcp_disconnect,
463c84b9 1892 .accept = inet_csk_accept,
1da177e4
LT
1893 .ioctl = tcp_ioctl,
1894 .init = tcp_v6_init_sock,
1895 .destroy = tcp_v6_destroy_sock,
1896 .shutdown = tcp_shutdown,
1897 .setsockopt = tcp_setsockopt,
1898 .getsockopt = tcp_getsockopt,
4b9d07a4 1899 .keepalive = tcp_set_keepalive,
1da177e4 1900 .recvmsg = tcp_recvmsg,
7ba42910
CG
1901 .sendmsg = tcp_sendmsg,
1902 .sendpage = tcp_sendpage,
1da177e4 1903 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1904 .release_cb = tcp_release_cb,
496611d7 1905 .hash = inet6_hash,
ab1e0a13
ACM
1906 .unhash = inet_unhash,
1907 .get_port = inet_csk_get_port,
1da177e4 1908 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 1909 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1910 .sockets_allocated = &tcp_sockets_allocated,
1911 .memory_allocated = &tcp_memory_allocated,
1912 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1913 .orphan_count = &tcp_orphan_count,
a4fe34bf 1914 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1915 .sysctl_wmem = sysctl_tcp_wmem,
1916 .sysctl_rmem = sysctl_tcp_rmem,
1917 .max_header = MAX_TCP_HEADER,
1918 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 1919 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 1920 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1921 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1922 .h.hashinfo = &tcp_hashinfo,
7ba42910 1923 .no_autobind = true,
543d9cfe
ACM
1924#ifdef CONFIG_COMPAT
1925 .compat_setsockopt = compat_tcp_setsockopt,
1926 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1927#endif
c1e64e29 1928 .diag_destroy = tcp_abort,
1da177e4
LT
1929};
1930
41135cc8 1931static const struct inet6_protocol tcpv6_protocol = {
c7109986 1932 .early_demux = tcp_v6_early_demux,
1da177e4
LT
1933 .handler = tcp_v6_rcv,
1934 .err_handler = tcp_v6_err,
1935 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1936};
1937
1da177e4
LT
1938static struct inet_protosw tcpv6_protosw = {
1939 .type = SOCK_STREAM,
1940 .protocol = IPPROTO_TCP,
1941 .prot = &tcpv6_prot,
1942 .ops = &inet6_stream_ops,
d83d8461
ACM
1943 .flags = INET_PROTOSW_PERMANENT |
1944 INET_PROTOSW_ICSK,
1da177e4
LT
1945};
1946
2c8c1e72 1947static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1948{
5677242f
DL
1949 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1950 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1951}
1952
2c8c1e72 1953static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1954{
5677242f 1955 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1956}
1957
2c8c1e72 1958static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 1959{
1946e672 1960 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
1961}
1962
1963static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1964 .init = tcpv6_net_init,
1965 .exit = tcpv6_net_exit,
1966 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1967};
1968
7f4e4868 1969int __init tcpv6_init(void)
1da177e4 1970{
7f4e4868
DL
1971 int ret;
1972
3336288a
VY
1973 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1974 if (ret)
c6b641a4 1975 goto out;
3336288a 1976
1da177e4 1977 /* register inet6 protocol */
7f4e4868
DL
1978 ret = inet6_register_protosw(&tcpv6_protosw);
1979 if (ret)
1980 goto out_tcpv6_protocol;
1981
93ec926b 1982 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1983 if (ret)
1984 goto out_tcpv6_protosw;
1985out:
1986 return ret;
ae0f7d5f 1987
7f4e4868
DL
1988out_tcpv6_protosw:
1989 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
1990out_tcpv6_protocol:
1991 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
1992 goto out;
1993}
1994
09f7709f 1995void tcpv6_exit(void)
7f4e4868 1996{
93ec926b 1997 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1998 inet6_unregister_protosw(&tcpv6_protosw);
1999 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2000}