]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv6/tcp_ipv6.c
Merge remote-tracking branches 'regulator/topic/anatop', 'regulator/topic/arizona...
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
95a22cae 104static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
1da177e4 105{
0660e03f
ACM
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7 108 tcp_hdr(skb)->dest,
95a22cae 109 tcp_hdr(skb)->source, tsoff);
1da177e4
LT
110}
111
1ab1457c 112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 116 struct inet_sock *inet = inet_sk(sk);
d83d8461 117 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 120 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 121 struct ipv6_txoptions *opt;
4c9483b2 122 struct flowi6 fl6;
1da177e4
LT
123 struct dst_entry *dst;
124 int addr_type;
00355fa5 125 u32 seq;
1da177e4 126 int err;
1946e672 127 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 128
1ab1457c 129 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
130 return -EINVAL;
131
1ab1457c 132 if (usin->sin6_family != AF_INET6)
a02cec21 133 return -EAFNOSUPPORT;
1da177e4 134
4c9483b2 135 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
136
137 if (np->sndflow) {
4c9483b2
DM
138 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 IP6_ECN_flow_init(fl6.flowlabel);
140 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 141 struct ip6_flowlabel *flowlabel;
4c9483b2 142 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 143 if (!flowlabel)
1da177e4 144 return -EINVAL;
1da177e4
LT
145 fl6_sock_release(flowlabel);
146 }
147 }
148
149 /*
1ab1457c
YH
150 * connect() to INADDR_ANY means loopback (BSD'ism).
151 */
152
052d2369
JL
153 if (ipv6_addr_any(&usin->sin6_addr)) {
154 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
155 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
156 &usin->sin6_addr);
157 else
158 usin->sin6_addr = in6addr_loopback;
159 }
1da177e4
LT
160
161 addr_type = ipv6_addr_type(&usin->sin6_addr);
162
4c99aa40 163 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
164 return -ENETUNREACH;
165
166 if (addr_type&IPV6_ADDR_LINKLOCAL) {
167 if (addr_len >= sizeof(struct sockaddr_in6) &&
168 usin->sin6_scope_id) {
169 /* If interface is set while binding, indices
170 * must coincide.
171 */
172 if (sk->sk_bound_dev_if &&
173 sk->sk_bound_dev_if != usin->sin6_scope_id)
174 return -EINVAL;
175
176 sk->sk_bound_dev_if = usin->sin6_scope_id;
177 }
178
179 /* Connect to link-local address requires an interface */
180 if (!sk->sk_bound_dev_if)
181 return -EINVAL;
182 }
183
184 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 185 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
186 tp->rx_opt.ts_recent = 0;
187 tp->rx_opt.ts_recent_stamp = 0;
188 tp->write_seq = 0;
189 }
190
efe4208f 191 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 192 np->flow_label = fl6.flowlabel;
1da177e4
LT
193
194 /*
195 * TCP over IPv4
196 */
197
052d2369 198 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 199 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
200 struct sockaddr_in sin;
201
202 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
203
204 if (__ipv6_only_sock(sk))
205 return -ENETUNREACH;
206
207 sin.sin_family = AF_INET;
208 sin.sin_port = usin->sin6_port;
209 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
210
d83d8461 211 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 212 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
213#ifdef CONFIG_TCP_MD5SIG
214 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
215#endif
1da177e4
LT
216
217 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
218
219 if (err) {
d83d8461
ACM
220 icsk->icsk_ext_hdr_len = exthdrlen;
221 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 222 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
223#ifdef CONFIG_TCP_MD5SIG
224 tp->af_specific = &tcp_sock_ipv6_specific;
225#endif
1da177e4 226 goto failure;
1da177e4 227 }
d1e559d0 228 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
229
230 return err;
231 }
232
efe4208f
ED
233 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
234 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 235
4c9483b2 236 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 237 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 238 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
239 fl6.flowi6_oif = sk->sk_bound_dev_if;
240 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
241 fl6.fl6_dport = usin->sin6_port;
242 fl6.fl6_sport = inet->inet_sport;
e2d118a1 243 fl6.flowi6_uid = sk->sk_uid;
1da177e4 244
1e1d04e6 245 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 246 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 247
4c9483b2 248 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 249
0e0d44ab 250 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
251 if (IS_ERR(dst)) {
252 err = PTR_ERR(dst);
1da177e4 253 goto failure;
14e50e57 254 }
1da177e4 255
63159f29 256 if (!saddr) {
4c9483b2 257 saddr = &fl6.saddr;
efe4208f 258 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
259 }
260
261 /* set the source address */
4e3fd7a0 262 np->saddr = *saddr;
c720c7e8 263 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 264
f83ef8c0 265 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 266 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 267
1946e672 268 if (tcp_death_row->sysctl_tw_recycle &&
493f377d 269 !tp->rx_opt.ts_recent_stamp &&
fd0273d7 270 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
81166dd6 271 tcp_fetch_timewait_stamp(sk, dst);
493f377d 272
d83d8461 273 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
274 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 opt->opt_nflen;
1da177e4
LT
277
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
279
c720c7e8 280 inet->inet_dport = usin->sin6_port;
1da177e4
LT
281
282 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 283 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
284 if (err)
285 goto late_failure;
286
877d1f62 287 sk_set_txhash(sk);
9e7ceb06 288
00355fa5
AK
289 if (likely(!tp->repair)) {
290 seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
291 sk->sk_v6_daddr.s6_addr32,
292 inet->inet_sport,
293 inet->inet_dport,
294 &tp->tsoffset);
295 if (!tp->write_seq)
296 tp->write_seq = seq;
297 }
1da177e4 298
19f6d3f3
WW
299 if (tcp_fastopen_defer_connect(sk, &err))
300 return err;
301 if (err)
302 goto late_failure;
303
1da177e4
LT
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
1da177e4 312failure:
c720c7e8 313 inet->inet_dport = 0;
1da177e4
LT
314 sk->sk_route_caps = 0;
315 return err;
316}
317
563d34d0
ED
318static void tcp_v6_mtu_reduced(struct sock *sk)
319{
320 struct dst_entry *dst;
321
322 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323 return;
324
325 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326 if (!dst)
327 return;
328
329 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330 tcp_sync_mss(sk, dst_mtu(dst));
331 tcp_simple_retransmit(sk);
332 }
333}
334
1da177e4 335static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 336 u8 type, u8 code, int offset, __be32 info)
1da177e4 337{
4c99aa40 338 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 339 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
340 struct net *net = dev_net(skb->dev);
341 struct request_sock *fastopen;
1da177e4 342 struct ipv6_pinfo *np;
1ab1457c 343 struct tcp_sock *tp;
0a672f74 344 __u32 seq, snd_una;
2215089b 345 struct sock *sk;
9cf74903 346 bool fatal;
2215089b 347 int err;
1da177e4 348
2215089b
ED
349 sk = __inet6_lookup_established(net, &tcp_hashinfo,
350 &hdr->daddr, th->dest,
351 &hdr->saddr, ntohs(th->source),
352 skb->dev->ifindex);
1da177e4 353
2215089b 354 if (!sk) {
a16292a0
ED
355 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
1da177e4
LT
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 361 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
362 return;
363 }
2215089b 364 seq = ntohl(th->seq);
9cf74903 365 fatal = icmpv6_err_convert(type, code, &err);
2215089b 366 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 367 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
368
369 bh_lock_sock(sk);
563d34d0 370 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 371 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
372
373 if (sk->sk_state == TCP_CLOSE)
374 goto out;
375
e802af9c 376 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 377 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
378 goto out;
379 }
380
1da177e4 381 tp = tcp_sk(sk);
0a672f74
YC
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 385 if (sk->sk_state != TCP_LISTEN &&
0a672f74 386 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 387 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
388 goto out;
389 }
390
391 np = inet6_sk(sk);
392
ec18d9a2 393 if (type == NDISC_REDIRECT) {
45caeaa5
JM
394 if (!sock_owned_by_user(sk)) {
395 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
ec18d9a2 396
45caeaa5
JM
397 if (dst)
398 dst->ops->redirect(dst, sk, skb);
399 }
50a75a89 400 goto out;
ec18d9a2
DM
401 }
402
1da177e4 403 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
93b36cf3
HFS
411 if (!ip6_sk_accept_pmtu(sk))
412 goto out;
413
563d34d0
ED
414 tp->mtu_info = ntohl(info);
415 if (!sock_owned_by_user(sk))
416 tcp_v6_mtu_reduced(sk);
d013ef2a 417 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 418 &sk->sk_tsq_flags))
d013ef2a 419 sock_hold(sk);
1da177e4
LT
420 goto out;
421 }
422
1da177e4 423
60236fdd 424 /* Might be for an request_sock */
1da177e4 425 switch (sk->sk_state) {
1da177e4 426 case TCP_SYN_SENT:
0a672f74
YC
427 case TCP_SYN_RECV:
428 /* Only in fast or simultaneous open. If a fast open socket is
429 * is already accepted it is treated as a connected one below.
430 */
63159f29 431 if (fastopen && !fastopen->sk)
0a672f74
YC
432 break;
433
1da177e4 434 if (!sock_owned_by_user(sk)) {
1da177e4
LT
435 sk->sk_err = err;
436 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
437
438 tcp_done(sk);
439 } else
440 sk->sk_err_soft = err;
441 goto out;
442 }
443
444 if (!sock_owned_by_user(sk) && np->recverr) {
445 sk->sk_err = err;
446 sk->sk_error_report(sk);
447 } else
448 sk->sk_err_soft = err;
449
450out:
451 bh_unlock_sock(sk);
452 sock_put(sk);
453}
454
455
0f935dbe 456static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 457 struct flowi *fl,
3840a06e 458 struct request_sock *req,
ca6fb065 459 struct tcp_fastopen_cookie *foc,
b3d05147 460 enum tcp_synack_type synack_type)
1da177e4 461{
634fb979 462 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 463 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 464 struct ipv6_txoptions *opt;
d6274bd8 465 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 466 struct sk_buff *skb;
9494218f 467 int err = -ENOMEM;
1da177e4 468
9f10d3f6 469 /* First, grab a route. */
f76b33c3
ED
470 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
471 IPPROTO_TCP)) == NULL)
fd80eb94 472 goto done;
9494218f 473
b3d05147 474 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 475
1da177e4 476 if (skb) {
634fb979
ED
477 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
478 &ireq->ir_v6_rmt_addr);
1da177e4 479
634fb979 480 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 481 if (np->repflow && ireq->pktopts)
df3687ff
FF
482 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
483
3e4006f0 484 rcu_read_lock();
56ac42bc
HD
485 opt = ireq->ipv6_opt;
486 if (!opt)
487 opt = rcu_dereference(np->opt);
92e55f41 488 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 489 rcu_read_unlock();
b9df3cb8 490 err = net_xmit_eval(err);
1da177e4
LT
491 }
492
493done:
1da177e4
LT
494 return err;
495}
496
72659ecc 497
60236fdd 498static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 499{
56ac42bc 500 kfree(inet_rsk(req)->ipv6_opt);
634fb979 501 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
502}
503
cfb6eeb4 504#ifdef CONFIG_TCP_MD5SIG
b83e3deb 505static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 506 const struct in6_addr *addr)
cfb6eeb4 507{
a915da9b 508 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
509}
510
b83e3deb 511static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 512 const struct sock *addr_sk)
cfb6eeb4 513{
efe4208f 514 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
515}
516
4aa956d8
WY
517static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
518 int optlen)
cfb6eeb4
YH
519{
520 struct tcp_md5sig cmd;
521 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
522
523 if (optlen < sizeof(cmd))
524 return -EINVAL;
525
526 if (copy_from_user(&cmd, optval, sizeof(cmd)))
527 return -EFAULT;
528
529 if (sin6->sin6_family != AF_INET6)
530 return -EINVAL;
531
532 if (!cmd.tcpm_keylen) {
e773e4fa 533 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
534 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
535 AF_INET);
536 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6);
cfb6eeb4
YH
538 }
539
540 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
541 return -EINVAL;
542
a915da9b
ED
543 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
544 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
545 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 546
a915da9b
ED
547 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
548 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
549}
550
19689e38
ED
551static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
552 const struct in6_addr *daddr,
553 const struct in6_addr *saddr,
554 const struct tcphdr *th, int nbytes)
cfb6eeb4 555{
cfb6eeb4 556 struct tcp6_pseudohdr *bp;
49a72dfb 557 struct scatterlist sg;
19689e38 558 struct tcphdr *_th;
8d26d76d 559
19689e38 560 bp = hp->scratch;
cfb6eeb4 561 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
562 bp->saddr = *saddr;
563 bp->daddr = *daddr;
49a72dfb 564 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 565 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 566
19689e38
ED
567 _th = (struct tcphdr *)(bp + 1);
568 memcpy(_th, th, sizeof(*th));
569 _th->check = 0;
570
571 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
572 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
573 sizeof(*bp) + sizeof(*th));
cf80e0e4 574 return crypto_ahash_update(hp->md5_req);
49a72dfb 575}
c7da57a1 576
19689e38 577static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 578 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 579 const struct tcphdr *th)
49a72dfb
AL
580{
581 struct tcp_md5sig_pool *hp;
cf80e0e4 582 struct ahash_request *req;
49a72dfb
AL
583
584 hp = tcp_get_md5sig_pool();
585 if (!hp)
586 goto clear_hash_noput;
cf80e0e4 587 req = hp->md5_req;
49a72dfb 588
cf80e0e4 589 if (crypto_ahash_init(req))
49a72dfb 590 goto clear_hash;
19689e38 591 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
592 goto clear_hash;
593 if (tcp_md5_hash_key(hp, key))
594 goto clear_hash;
cf80e0e4
HX
595 ahash_request_set_crypt(req, NULL, md5_hash, 0);
596 if (crypto_ahash_final(req))
cfb6eeb4 597 goto clear_hash;
cfb6eeb4 598
cfb6eeb4 599 tcp_put_md5sig_pool();
cfb6eeb4 600 return 0;
49a72dfb 601
cfb6eeb4
YH
602clear_hash:
603 tcp_put_md5sig_pool();
604clear_hash_noput:
605 memset(md5_hash, 0, 16);
49a72dfb 606 return 1;
cfb6eeb4
YH
607}
608
39f8e58e
ED
609static int tcp_v6_md5_hash_skb(char *md5_hash,
610 const struct tcp_md5sig_key *key,
318cf7aa 611 const struct sock *sk,
318cf7aa 612 const struct sk_buff *skb)
cfb6eeb4 613{
b71d1d42 614 const struct in6_addr *saddr, *daddr;
49a72dfb 615 struct tcp_md5sig_pool *hp;
cf80e0e4 616 struct ahash_request *req;
318cf7aa 617 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 618
39f8e58e
ED
619 if (sk) { /* valid for establish/request sockets */
620 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 621 daddr = &sk->sk_v6_daddr;
49a72dfb 622 } else {
b71d1d42 623 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
624 saddr = &ip6h->saddr;
625 daddr = &ip6h->daddr;
cfb6eeb4 626 }
49a72dfb
AL
627
628 hp = tcp_get_md5sig_pool();
629 if (!hp)
630 goto clear_hash_noput;
cf80e0e4 631 req = hp->md5_req;
49a72dfb 632
cf80e0e4 633 if (crypto_ahash_init(req))
49a72dfb
AL
634 goto clear_hash;
635
19689e38 636 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
637 goto clear_hash;
638 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
639 goto clear_hash;
640 if (tcp_md5_hash_key(hp, key))
641 goto clear_hash;
cf80e0e4
HX
642 ahash_request_set_crypt(req, NULL, md5_hash, 0);
643 if (crypto_ahash_final(req))
49a72dfb
AL
644 goto clear_hash;
645
646 tcp_put_md5sig_pool();
647 return 0;
648
649clear_hash:
650 tcp_put_md5sig_pool();
651clear_hash_noput:
652 memset(md5_hash, 0, 16);
653 return 1;
cfb6eeb4
YH
654}
655
ba8e275a
ED
656#endif
657
658static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
659 const struct sk_buff *skb)
cfb6eeb4 660{
ba8e275a 661#ifdef CONFIG_TCP_MD5SIG
cf533ea5 662 const __u8 *hash_location = NULL;
cfb6eeb4 663 struct tcp_md5sig_key *hash_expected;
b71d1d42 664 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 665 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 666 int genhash;
cfb6eeb4
YH
667 u8 newhash[16];
668
669 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 670 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 671
785957d3
DM
672 /* We've parsed the options - do we have a hash? */
673 if (!hash_expected && !hash_location)
ff74e23f 674 return false;
785957d3
DM
675
676 if (hash_expected && !hash_location) {
c10d9310 677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 678 return true;
cfb6eeb4
YH
679 }
680
785957d3 681 if (!hash_expected && hash_location) {
c10d9310 682 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 683 return true;
cfb6eeb4
YH
684 }
685
686 /* check the signature */
49a72dfb
AL
687 genhash = tcp_v6_md5_hash_skb(newhash,
688 hash_expected,
39f8e58e 689 NULL, skb);
49a72dfb 690
cfb6eeb4 691 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 692 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
693 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
694 genhash ? "failed" : "mismatch",
695 &ip6h->saddr, ntohs(th->source),
696 &ip6h->daddr, ntohs(th->dest));
ff74e23f 697 return true;
cfb6eeb4 698 }
ba8e275a 699#endif
ff74e23f 700 return false;
cfb6eeb4 701}
cfb6eeb4 702
b40cf18e
ED
703static void tcp_v6_init_req(struct request_sock *req,
704 const struct sock *sk_listener,
16bea70a
OP
705 struct sk_buff *skb)
706{
707 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 708 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
709
710 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
711 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
712
16bea70a 713 /* So that link locals have meaning */
b40cf18e 714 if (!sk_listener->sk_bound_dev_if &&
16bea70a 715 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 716 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 717
04317daf 718 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 719 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 720 np->rxopt.bits.rxinfo ||
16bea70a
OP
721 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
722 np->rxopt.bits.rxohlim || np->repflow)) {
723 atomic_inc(&skb->users);
724 ireq->pktopts = skb;
725 }
726}
727
f964629e
ED
728static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
729 struct flowi *fl,
d94e0417
OP
730 const struct request_sock *req,
731 bool *strict)
732{
733 if (strict)
734 *strict = true;
f76b33c3 735 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
736}
737
c6aefafb 738struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 739 .family = AF_INET6,
2e6599cb 740 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 741 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
742 .send_ack = tcp_v6_reqsk_send_ack,
743 .destructor = tcp_v6_reqsk_destructor,
72659ecc 744 .send_reset = tcp_v6_send_reset,
4aa956d8 745 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
746};
747
b2e4b3de 748static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
749 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
750 sizeof(struct ipv6hdr),
16bea70a 751#ifdef CONFIG_TCP_MD5SIG
fd3a154a 752 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 753 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 754#endif
16bea70a 755 .init_req = tcp_v6_init_req,
fb7b37a7
OP
756#ifdef CONFIG_SYN_COOKIES
757 .cookie_init_seq = cookie_v6_init_sequence,
758#endif
d94e0417 759 .route_req = tcp_v6_route_req,
936b8bdb 760 .init_seq = tcp_v6_init_sequence,
d6274bd8 761 .send_synack = tcp_v6_send_synack,
16bea70a 762};
cfb6eeb4 763
a00e7444 764static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
765 u32 ack, u32 win, u32 tsval, u32 tsecr,
766 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 767 u8 tclass, __be32 label)
1da177e4 768{
cf533ea5
ED
769 const struct tcphdr *th = tcp_hdr(skb);
770 struct tcphdr *t1;
1da177e4 771 struct sk_buff *buff;
4c9483b2 772 struct flowi6 fl6;
0f85feae 773 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 774 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 775 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 776 struct dst_entry *dst;
81ada62d 777 __be32 *topt;
1da177e4 778
ee684b6f 779 if (tsecr)
626e264d 780 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 781#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
782 if (key)
783 tot_len += TCPOLEN_MD5SIG_ALIGNED;
784#endif
785
cfb6eeb4 786 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 787 GFP_ATOMIC);
63159f29 788 if (!buff)
1ab1457c 789 return;
1da177e4 790
cfb6eeb4 791 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 792
cfb6eeb4 793 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 794 skb_reset_transport_header(buff);
1da177e4
LT
795
796 /* Swap the send and the receive. */
797 memset(t1, 0, sizeof(*t1));
798 t1->dest = th->source;
799 t1->source = th->dest;
cfb6eeb4 800 t1->doff = tot_len / 4;
626e264d
IJ
801 t1->seq = htonl(seq);
802 t1->ack_seq = htonl(ack);
803 t1->ack = !rst || !th->ack;
804 t1->rst = rst;
805 t1->window = htons(win);
1da177e4 806
81ada62d
IJ
807 topt = (__be32 *)(t1 + 1);
808
ee684b6f 809 if (tsecr) {
626e264d
IJ
810 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
811 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
812 *topt++ = htonl(tsval);
813 *topt++ = htonl(tsecr);
626e264d
IJ
814 }
815
cfb6eeb4
YH
816#ifdef CONFIG_TCP_MD5SIG
817 if (key) {
81ada62d
IJ
818 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
819 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
820 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
821 &ipv6_hdr(skb)->saddr,
822 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
823 }
824#endif
825
4c9483b2 826 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
827 fl6.daddr = ipv6_hdr(skb)->saddr;
828 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 829 fl6.flowlabel = label;
1da177e4 830
e5700aff
DM
831 buff->ip_summed = CHECKSUM_PARTIAL;
832 buff->csum = 0;
833
4c9483b2 834 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 835
4c9483b2 836 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 837 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 838 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
839 else {
840 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
841 oif = skb->skb_iif;
842
843 fl6.flowi6_oif = oif;
844 }
1d2f7b2d 845
e110861f 846 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
847 fl6.fl6_dport = t1->dest;
848 fl6.fl6_sport = t1->source;
e2d118a1 849 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 850 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 851
c20121ae
DL
852 /* Pass a socket to ip6_dst_lookup either it is for RST
853 * Underlying function will use this to retrieve the network
854 * namespace
855 */
0e0d44ab 856 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
857 if (!IS_ERR(dst)) {
858 skb_dst_set(buff, dst);
92e55f41 859 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 860 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 861 if (rst)
c10d9310 862 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 863 return;
1da177e4
LT
864 }
865
866 kfree_skb(buff);
867}
868
a00e7444 869static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 870{
cf533ea5 871 const struct tcphdr *th = tcp_hdr(skb);
626e264d 872 u32 seq = 0, ack_seq = 0;
fa3e5b4e 873 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
874#ifdef CONFIG_TCP_MD5SIG
875 const __u8 *hash_location = NULL;
876 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
877 unsigned char newhash[16];
878 int genhash;
879 struct sock *sk1 = NULL;
880#endif
9c76a114 881 int oif;
1da177e4 882
626e264d 883 if (th->rst)
1da177e4
LT
884 return;
885
c3658e8d
ED
886 /* If sk not NULL, it means we did a successful lookup and incoming
887 * route had to be correct. prequeue might have dropped our dst.
888 */
889 if (!sk && !ipv6_unicast_destination(skb))
626e264d 890 return;
1da177e4 891
cfb6eeb4 892#ifdef CONFIG_TCP_MD5SIG
3b24d854 893 rcu_read_lock();
658ddaaf 894 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 895 if (sk && sk_fullsock(sk)) {
e46787f0
FW
896 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
897 } else if (hash_location) {
658ddaaf
SL
898 /*
899 * active side is lost. Try to find listening socket through
900 * source port, and then find md5 key through listening socket.
901 * we are not loose security here:
902 * Incoming packet is checked with md5 hash with finding key,
903 * no RST generated if md5 hash doesn't match.
904 */
905 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
906 &tcp_hashinfo, NULL, 0,
907 &ipv6h->saddr,
5ba24953 908 th->source, &ipv6h->daddr,
870c3151 909 ntohs(th->source), tcp_v6_iif(skb));
658ddaaf 910 if (!sk1)
3b24d854 911 goto out;
658ddaaf 912
658ddaaf
SL
913 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
914 if (!key)
3b24d854 915 goto out;
658ddaaf 916
39f8e58e 917 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 918 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 919 goto out;
658ddaaf 920 }
cfb6eeb4
YH
921#endif
922
626e264d
IJ
923 if (th->ack)
924 seq = ntohl(th->ack_seq);
925 else
926 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
927 (th->doff << 2);
1da177e4 928
9c76a114 929 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 930 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
931
932#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
933out:
934 rcu_read_unlock();
658ddaaf 935#endif
626e264d 936}
1da177e4 937
a00e7444 938static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 939 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 940 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 941 __be32 label)
626e264d 942{
0f85feae
ED
943 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
944 tclass, label);
1da177e4
LT
945}
946
947static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
948{
8feaf0c0 949 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 950 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 951
0f85feae 952 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 953 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 954 tcp_time_stamp + tcptw->tw_ts_offset,
9c76a114 955 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 956 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 957
8feaf0c0 958 inet_twsk_put(tw);
1da177e4
LT
959}
960
a00e7444 961static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 962 struct request_sock *req)
1da177e4 963{
3a19ce0e
DL
964 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
965 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
966 */
20a2b49f
ED
967 /* RFC 7323 2.3
968 * The window field (SEG.WND) of every outgoing segment, with the
969 * exception of <SYN> segments, MUST be right-shifted by
970 * Rcv.Wind.Shift bits:
971 */
0f85feae 972 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 973 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
974 tcp_rsk(req)->rcv_nxt,
975 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
95a22cae
FW
976 tcp_time_stamp + tcp_rsk(req)->ts_off,
977 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
978 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
979 0, 0);
1da177e4
LT
980}
981
982
079096f1 983static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 984{
079096f1 985#ifdef CONFIG_SYN_COOKIES
aa8223c7 986 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 987
af9b4738 988 if (!th->syn)
c6aefafb 989 sk = cookie_v6_check(sk, skb);
1da177e4
LT
990#endif
991 return sk;
992}
993
1da177e4
LT
994static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
995{
1da177e4
LT
996 if (skb->protocol == htons(ETH_P_IP))
997 return tcp_v4_conn_request(sk, skb);
998
999 if (!ipv6_unicast_destination(skb))
1ab1457c 1000 goto drop;
1da177e4 1001
1fb6f159
OP
1002 return tcp_conn_request(&tcp6_request_sock_ops,
1003 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1004
1005drop:
9caad864 1006 tcp_listendrop(sk);
1da177e4
LT
1007 return 0; /* don't send reset */
1008}
1009
ebf6c9cb
ED
1010static void tcp_v6_restore_cb(struct sk_buff *skb)
1011{
1012 /* We need to move header back to the beginning if xfrm6_policy_check()
1013 * and tcp_v6_fill_cb() are going to be called again.
1014 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1015 */
1016 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1017 sizeof(struct inet6_skb_parm));
1018}
1019
0c27171e 1020static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1021 struct request_sock *req,
5e0724d0
ED
1022 struct dst_entry *dst,
1023 struct request_sock *req_unhash,
1024 bool *own_req)
1da177e4 1025{
634fb979 1026 struct inet_request_sock *ireq;
0c27171e
ED
1027 struct ipv6_pinfo *newnp;
1028 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1029 struct ipv6_txoptions *opt;
1da177e4
LT
1030 struct tcp6_sock *newtcp6sk;
1031 struct inet_sock *newinet;
1032 struct tcp_sock *newtp;
1033 struct sock *newsk;
cfb6eeb4
YH
1034#ifdef CONFIG_TCP_MD5SIG
1035 struct tcp_md5sig_key *key;
1036#endif
3840a06e 1037 struct flowi6 fl6;
1da177e4
LT
1038
1039 if (skb->protocol == htons(ETH_P_IP)) {
1040 /*
1041 * v6 mapped
1042 */
1043
5e0724d0
ED
1044 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1045 req_unhash, own_req);
1da177e4 1046
63159f29 1047 if (!newsk)
1da177e4
LT
1048 return NULL;
1049
1050 newtcp6sk = (struct tcp6_sock *)newsk;
1051 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1052
1053 newinet = inet_sk(newsk);
1054 newnp = inet6_sk(newsk);
1055 newtp = tcp_sk(newsk);
1056
1057 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1058
d1e559d0 1059 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1060
8292a17a 1061 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1062 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1063#ifdef CONFIG_TCP_MD5SIG
1064 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1065#endif
1066
676a1184
YZ
1067 newnp->ipv6_ac_list = NULL;
1068 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1069 newnp->pktoptions = NULL;
1070 newnp->opt = NULL;
870c3151 1071 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1072 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1073 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1074 if (np->repflow)
1075 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1076
e6848976
ACM
1077 /*
1078 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1079 * here, tcp_create_openreq_child now does this for us, see the comment in
1080 * that function for the gory details. -acme
1da177e4 1081 */
1da177e4
LT
1082
1083 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1084 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1085 Sync it now.
1086 */
d83d8461 1087 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1088
1089 return newsk;
1090 }
1091
634fb979 1092 ireq = inet_rsk(req);
1da177e4
LT
1093
1094 if (sk_acceptq_is_full(sk))
1095 goto out_overflow;
1096
493f377d 1097 if (!dst) {
f76b33c3 1098 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1099 if (!dst)
1da177e4 1100 goto out;
1ab1457c 1101 }
1da177e4
LT
1102
1103 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1104 if (!newsk)
093d2823 1105 goto out_nonewsk;
1da177e4 1106
e6848976
ACM
1107 /*
1108 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1109 * count here, tcp_create_openreq_child now does this for us, see the
1110 * comment in that function for the gory details. -acme
1111 */
1da177e4 1112
59eed279 1113 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1114 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1115 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1116
1117 newtcp6sk = (struct tcp6_sock *)newsk;
1118 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1119
1120 newtp = tcp_sk(newsk);
1121 newinet = inet_sk(newsk);
1122 newnp = inet6_sk(newsk);
1123
1124 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1125
634fb979
ED
1126 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1127 newnp->saddr = ireq->ir_v6_loc_addr;
1128 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1129 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1130
1ab1457c 1131 /* Now IPv6 options...
1da177e4
LT
1132
1133 First: no IPv4 options.
1134 */
f6d8bd05 1135 newinet->inet_opt = NULL;
676a1184 1136 newnp->ipv6_ac_list = NULL;
d35690be 1137 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1138
1139 /* Clone RX bits */
1140 newnp->rxopt.all = np->rxopt.all;
1141
1da177e4 1142 newnp->pktoptions = NULL;
1da177e4 1143 newnp->opt = NULL;
870c3151 1144 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1145 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1146 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1147 if (np->repflow)
1148 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1149
1150 /* Clone native IPv6 options from listening socket (if any)
1151
1152 Yes, keeping reference count would be much more clever,
1153 but we make one more one thing there: reattach optmem
1154 to newsk.
1155 */
56ac42bc
HD
1156 opt = ireq->ipv6_opt;
1157 if (!opt)
1158 opt = rcu_dereference(np->opt);
45f6fad8
ED
1159 if (opt) {
1160 opt = ipv6_dup_options(newsk, opt);
1161 RCU_INIT_POINTER(newnp->opt, opt);
1162 }
d83d8461 1163 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1164 if (opt)
1165 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1166 opt->opt_flen;
1da177e4 1167
81164413
DB
1168 tcp_ca_openreq_child(newsk, dst);
1169
1da177e4 1170 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1171 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1172
1da177e4
LT
1173 tcp_initialize_rcv_mss(newsk);
1174
c720c7e8
ED
1175 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1176 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1177
cfb6eeb4
YH
1178#ifdef CONFIG_TCP_MD5SIG
1179 /* Copy over the MD5 key from the original socket */
4aa956d8 1180 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1181 if (key) {
cfb6eeb4
YH
1182 /* We're using one, so create a matching key
1183 * on the newsk structure. If we fail to get
1184 * memory, then we end up not copying the key
1185 * across. Shucks.
1186 */
efe4208f 1187 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
99a1dec7 1188 AF_INET6, key->key, key->keylen,
7450aaf6 1189 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1190 }
1191#endif
1192
093d2823 1193 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1194 inet_csk_prepare_forced_close(newsk);
1195 tcp_done(newsk);
093d2823
BS
1196 goto out;
1197 }
5e0724d0 1198 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1199 if (*own_req) {
49a496c9 1200 tcp_move_syn(newtp, req);
805c4bc0
ED
1201
1202 /* Clone pktoptions received with SYN, if we own the req */
1203 if (ireq->pktopts) {
1204 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1205 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1206 consume_skb(ireq->pktopts);
1207 ireq->pktopts = NULL;
ebf6c9cb
ED
1208 if (newnp->pktoptions) {
1209 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1210 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1211 }
805c4bc0 1212 }
ce105008 1213 }
1da177e4
LT
1214
1215 return newsk;
1216
1217out_overflow:
02a1d6e7 1218 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1219out_nonewsk:
1da177e4 1220 dst_release(dst);
093d2823 1221out:
9caad864 1222 tcp_listendrop(sk);
1da177e4
LT
1223 return NULL;
1224}
1225
1da177e4 1226/* The socket must have it's spinlock held when we get
e994b2f0 1227 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1228 *
1229 * We have a potential double-lock case here, so even when
1230 * doing backlog processing we use the BH locking scheme.
1231 * This is because we cannot sleep with the original spinlock
1232 * held.
1233 */
1234static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1235{
1236 struct ipv6_pinfo *np = inet6_sk(sk);
1237 struct tcp_sock *tp;
1238 struct sk_buff *opt_skb = NULL;
1239
1240 /* Imagine: socket is IPv6. IPv4 packet arrives,
1241 goes to IPv4 receive handler and backlogged.
1242 From backlog it always goes here. Kerboom...
1243 Fortunately, tcp_rcv_established and rcv_established
1244 handle them correctly, but it is not case with
1245 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1246 */
1247
1248 if (skb->protocol == htons(ETH_P_IP))
1249 return tcp_v4_do_rcv(sk, skb);
1250
ac6e7800 1251 if (tcp_filter(sk, skb))
1da177e4
LT
1252 goto discard;
1253
1254 /*
1255 * socket locking is here for SMP purposes as backlog rcv
1256 * is currently called with bh processing disabled.
1257 */
1258
1259 /* Do Stevens' IPV6_PKTOPTIONS.
1260
1261 Yes, guys, it is the only place in our code, where we
1262 may make it not affecting IPv4.
1263 The rest of code is protocol independent,
1264 and I do not like idea to uglify IPv4.
1265
1266 Actually, all the idea behind IPV6_PKTOPTIONS
1267 looks not very well thought. For now we latch
1268 options, received in the last packet, enqueued
1269 by tcp. Feel free to propose better solution.
1ab1457c 1270 --ANK (980728)
1da177e4
LT
1271 */
1272 if (np->rxopt.all)
7450aaf6 1273 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1274
1275 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1276 struct dst_entry *dst = sk->sk_rx_dst;
1277
bdeab991 1278 sock_rps_save_rxhash(sk, skb);
3d97379a 1279 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1280 if (dst) {
1281 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1282 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1283 dst_release(dst);
1284 sk->sk_rx_dst = NULL;
1285 }
1286 }
1287
c995ae22 1288 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1289 if (opt_skb)
1290 goto ipv6_pktoptions;
1291 return 0;
1292 }
1293
12e25e10 1294 if (tcp_checksum_complete(skb))
1da177e4
LT
1295 goto csum_err;
1296
1ab1457c 1297 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1298 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1299
1da177e4
LT
1300 if (!nsk)
1301 goto discard;
1302
4c99aa40 1303 if (nsk != sk) {
bdeab991 1304 sock_rps_save_rxhash(nsk, skb);
38cb5245 1305 sk_mark_napi_id(nsk, skb);
1da177e4
LT
1306 if (tcp_child_process(sk, nsk, skb))
1307 goto reset;
1308 if (opt_skb)
1309 __kfree_skb(opt_skb);
1310 return 0;
1311 }
47482f13 1312 } else
bdeab991 1313 sock_rps_save_rxhash(sk, skb);
1da177e4 1314
72ab4a86 1315 if (tcp_rcv_state_process(sk, skb))
1da177e4 1316 goto reset;
1da177e4
LT
1317 if (opt_skb)
1318 goto ipv6_pktoptions;
1319 return 0;
1320
1321reset:
cfb6eeb4 1322 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1323discard:
1324 if (opt_skb)
1325 __kfree_skb(opt_skb);
1326 kfree_skb(skb);
1327 return 0;
1328csum_err:
c10d9310
ED
1329 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1330 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1331 goto discard;
1332
1333
1334ipv6_pktoptions:
1335 /* Do you ask, what is it?
1336
1337 1. skb was enqueued by tcp.
1338 2. skb is added to tail of read queue, rather than out of order.
1339 3. socket is not in passive state.
1340 4. Finally, it really contains options, which user wants to receive.
1341 */
1342 tp = tcp_sk(sk);
1343 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1344 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1345 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1346 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1347 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1348 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1349 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1350 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1351 if (np->repflow)
1352 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1353 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1354 skb_set_owner_r(opt_skb, sk);
8ce48623 1355 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1356 opt_skb = xchg(&np->pktoptions, opt_skb);
1357 } else {
1358 __kfree_skb(opt_skb);
1359 opt_skb = xchg(&np->pktoptions, NULL);
1360 }
1361 }
1362
800d55f1 1363 kfree_skb(opt_skb);
1da177e4
LT
1364 return 0;
1365}
1366
2dc49d16
ND
1367static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1368 const struct tcphdr *th)
1369{
1370 /* This is tricky: we move IP6CB at its correct location into
1371 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1372 * _decode_session6() uses IP6CB().
1373 * barrier() makes sure compiler won't play aliasing games.
1374 */
1375 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1376 sizeof(struct inet6_skb_parm));
1377 barrier();
1378
1379 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1380 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1381 skb->len - th->doff*4);
1382 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1383 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1384 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1385 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1386 TCP_SKB_CB(skb)->sacked = 0;
1387}
1388
e5bbef20 1389static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1390{
cf533ea5 1391 const struct tcphdr *th;
b71d1d42 1392 const struct ipv6hdr *hdr;
3b24d854 1393 bool refcounted;
1da177e4
LT
1394 struct sock *sk;
1395 int ret;
a86b1e30 1396 struct net *net = dev_net(skb->dev);
1da177e4
LT
1397
1398 if (skb->pkt_type != PACKET_HOST)
1399 goto discard_it;
1400
1401 /*
1402 * Count it even if it's bad.
1403 */
90bbcc60 1404 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1405
1406 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1407 goto discard_it;
1408
ea1627c2 1409 th = (const struct tcphdr *)skb->data;
1da177e4 1410
ea1627c2 1411 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1412 goto bad_packet;
1413 if (!pskb_may_pull(skb, th->doff*4))
1414 goto discard_it;
1415
e4f45b7f 1416 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1417 goto csum_error;
1da177e4 1418
ea1627c2 1419 th = (const struct tcphdr *)skb->data;
e802af9c 1420 hdr = ipv6_hdr(skb);
1da177e4 1421
4bdc3d66 1422lookup:
a583636a 1423 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
3b24d854
ED
1424 th->source, th->dest, inet6_iif(skb),
1425 &refcounted);
1da177e4
LT
1426 if (!sk)
1427 goto no_tcp_socket;
1428
1429process:
1430 if (sk->sk_state == TCP_TIME_WAIT)
1431 goto do_time_wait;
1432
079096f1
ED
1433 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1434 struct request_sock *req = inet_reqsk(sk);
7716682c 1435 struct sock *nsk;
079096f1
ED
1436
1437 sk = req->rsk_listener;
1438 tcp_v6_fill_cb(skb, hdr, th);
1439 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1440 sk_drops_add(sk, skb);
079096f1
ED
1441 reqsk_put(req);
1442 goto discard_it;
1443 }
7716682c 1444 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1445 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1446 goto lookup;
1447 }
7716682c 1448 sock_hold(sk);
3b24d854 1449 refcounted = true;
7716682c 1450 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1451 if (!nsk) {
1452 reqsk_put(req);
7716682c 1453 goto discard_and_relse;
079096f1
ED
1454 }
1455 if (nsk == sk) {
079096f1
ED
1456 reqsk_put(req);
1457 tcp_v6_restore_cb(skb);
1458 } else if (tcp_child_process(sk, nsk, skb)) {
1459 tcp_v6_send_reset(nsk, skb);
7716682c 1460 goto discard_and_relse;
079096f1 1461 } else {
7716682c 1462 sock_put(sk);
079096f1
ED
1463 return 0;
1464 }
1465 }
e802af9c 1466 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1467 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1468 goto discard_and_relse;
1469 }
1470
1da177e4
LT
1471 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1472 goto discard_and_relse;
1473
2dc49d16
ND
1474 tcp_v6_fill_cb(skb, hdr, th);
1475
9ea88a15
DP
1476 if (tcp_v6_inbound_md5_hash(sk, skb))
1477 goto discard_and_relse;
9ea88a15 1478
ac6e7800 1479 if (tcp_filter(sk, skb))
1da177e4 1480 goto discard_and_relse;
ac6e7800
ED
1481 th = (const struct tcphdr *)skb->data;
1482 hdr = ipv6_hdr(skb);
1da177e4
LT
1483
1484 skb->dev = NULL;
1485
e994b2f0
ED
1486 if (sk->sk_state == TCP_LISTEN) {
1487 ret = tcp_v6_do_rcv(sk, skb);
1488 goto put_and_return;
1489 }
1490
1491 sk_incoming_cpu_update(sk);
1492
293b9c42 1493 bh_lock_sock_nested(sk);
a44d6eac 1494 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1495 ret = 0;
1496 if (!sock_owned_by_user(sk)) {
7bced397 1497 if (!tcp_prequeue(sk, skb))
1ab1457c 1498 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1499 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1500 goto discard_and_relse;
1501 }
1da177e4
LT
1502 bh_unlock_sock(sk);
1503
e994b2f0 1504put_and_return:
3b24d854
ED
1505 if (refcounted)
1506 sock_put(sk);
1da177e4
LT
1507 return ret ? -1 : 0;
1508
1509no_tcp_socket:
1510 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1511 goto discard_it;
1512
2dc49d16
ND
1513 tcp_v6_fill_cb(skb, hdr, th);
1514
12e25e10 1515 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1516csum_error:
90bbcc60 1517 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1518bad_packet:
90bbcc60 1519 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1520 } else {
cfb6eeb4 1521 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1522 }
1523
1524discard_it:
1da177e4
LT
1525 kfree_skb(skb);
1526 return 0;
1527
1528discard_and_relse:
532182cd 1529 sk_drops_add(sk, skb);
3b24d854
ED
1530 if (refcounted)
1531 sock_put(sk);
1da177e4
LT
1532 goto discard_it;
1533
1534do_time_wait:
1535 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1536 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1537 goto discard_it;
1538 }
1539
2dc49d16
ND
1540 tcp_v6_fill_cb(skb, hdr, th);
1541
6a5dc9e5
ED
1542 if (tcp_checksum_complete(skb)) {
1543 inet_twsk_put(inet_twsk(sk));
1544 goto csum_error;
1da177e4
LT
1545 }
1546
9469c7b4 1547 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1548 case TCP_TW_SYN:
1549 {
1550 struct sock *sk2;
1551
c346dca1 1552 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1553 skb, __tcp_hdrlen(th),
5ba24953 1554 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1555 &ipv6_hdr(skb)->daddr,
870c3151 1556 ntohs(th->dest), tcp_v6_iif(skb));
53b24b8f 1557 if (sk2) {
295ff7ed 1558 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1559 inet_twsk_deschedule_put(tw);
1da177e4 1560 sk = sk2;
4ad19de8 1561 tcp_v6_restore_cb(skb);
3b24d854 1562 refcounted = false;
1da177e4
LT
1563 goto process;
1564 }
1565 /* Fall through to ACK */
1566 }
1567 case TCP_TW_ACK:
1568 tcp_v6_timewait_ack(sk, skb);
1569 break;
1570 case TCP_TW_RST:
4ad19de8 1571 tcp_v6_restore_cb(skb);
271c3b9b
FW
1572 tcp_v6_send_reset(sk, skb);
1573 inet_twsk_deschedule_put(inet_twsk(sk));
1574 goto discard_it;
4aa956d8
WY
1575 case TCP_TW_SUCCESS:
1576 ;
1da177e4
LT
1577 }
1578 goto discard_it;
1579}
1580
c7109986
ED
1581static void tcp_v6_early_demux(struct sk_buff *skb)
1582{
1583 const struct ipv6hdr *hdr;
1584 const struct tcphdr *th;
1585 struct sock *sk;
1586
1587 if (skb->pkt_type != PACKET_HOST)
1588 return;
1589
1590 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1591 return;
1592
1593 hdr = ipv6_hdr(skb);
1594 th = tcp_hdr(skb);
1595
1596 if (th->doff < sizeof(struct tcphdr) / 4)
1597 return;
1598
870c3151 1599 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1600 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1601 &hdr->saddr, th->source,
1602 &hdr->daddr, ntohs(th->dest),
1603 inet6_iif(skb));
1604 if (sk) {
1605 skb->sk = sk;
1606 skb->destructor = sock_edemux;
f7e4eb03 1607 if (sk_fullsock(sk)) {
d0c294c5 1608 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1609
c7109986 1610 if (dst)
5d299f3d 1611 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1612 if (dst &&
f3f12135 1613 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1614 skb_dst_set_noref(skb, dst);
1615 }
1616 }
1617}
1618
ccb7c410
DM
1619static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1620 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1621 .twsk_unique = tcp_twsk_unique,
4aa956d8 1622 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1623};
1624
3b401a81 1625static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1626 .queue_xmit = inet6_csk_xmit,
1627 .send_check = tcp_v6_send_check,
1628 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1629 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1630 .conn_request = tcp_v6_conn_request,
1631 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1632 .net_header_len = sizeof(struct ipv6hdr),
67469601 1633 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1634 .setsockopt = ipv6_setsockopt,
1635 .getsockopt = ipv6_getsockopt,
1636 .addr2sockaddr = inet6_csk_addr2sockaddr,
1637 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1638#ifdef CONFIG_COMPAT
543d9cfe
ACM
1639 .compat_setsockopt = compat_ipv6_setsockopt,
1640 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1641#endif
4fab9071 1642 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1643};
1644
cfb6eeb4 1645#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1646static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1647 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1648 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1649 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1650};
a928630a 1651#endif
cfb6eeb4 1652
1da177e4
LT
1653/*
1654 * TCP over IPv4 via INET6 API
1655 */
3b401a81 1656static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1657 .queue_xmit = ip_queue_xmit,
1658 .send_check = tcp_v4_send_check,
1659 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1660 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1661 .conn_request = tcp_v6_conn_request,
1662 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1663 .net_header_len = sizeof(struct iphdr),
1664 .setsockopt = ipv6_setsockopt,
1665 .getsockopt = ipv6_getsockopt,
1666 .addr2sockaddr = inet6_csk_addr2sockaddr,
1667 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1668#ifdef CONFIG_COMPAT
543d9cfe
ACM
1669 .compat_setsockopt = compat_ipv6_setsockopt,
1670 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1671#endif
4fab9071 1672 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1673};
1674
cfb6eeb4 1675#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1676static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1677 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1678 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1679 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1680};
a928630a 1681#endif
cfb6eeb4 1682
1da177e4
LT
1683/* NOTE: A lot of things set to zero explicitly by call to
1684 * sk_alloc() so need not be done here.
1685 */
1686static int tcp_v6_init_sock(struct sock *sk)
1687{
6687e988 1688 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1689
900f65d3 1690 tcp_init_sock(sk);
1da177e4 1691
8292a17a 1692 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1693
cfb6eeb4 1694#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1695 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1696#endif
1697
1da177e4
LT
1698 return 0;
1699}
1700
7d06b2e0 1701static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1702{
1da177e4 1703 tcp_v4_destroy_sock(sk);
7d06b2e0 1704 inet6_destroy_sock(sk);
1da177e4
LT
1705}
1706
952a10be 1707#ifdef CONFIG_PROC_FS
1da177e4 1708/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1709static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1710 const struct request_sock *req, int i)
1da177e4 1711{
fa76ce73 1712 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1713 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1714 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1715
1716 if (ttd < 0)
1717 ttd = 0;
1718
1da177e4
LT
1719 seq_printf(seq,
1720 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1721 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1722 i,
1723 src->s6_addr32[0], src->s6_addr32[1],
1724 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1725 inet_rsk(req)->ir_num,
1da177e4
LT
1726 dest->s6_addr32[0], dest->s6_addr32[1],
1727 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1728 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1729 TCP_SYN_RECV,
4c99aa40 1730 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1731 1, /* timers active (only the expire timer) */
1732 jiffies_to_clock_t(ttd),
e6c022a4 1733 req->num_timeout,
aa3a0c8c
ED
1734 from_kuid_munged(seq_user_ns(seq),
1735 sock_i_uid(req->rsk_listener)),
1ab1457c 1736 0, /* non standard timer */
1da177e4
LT
1737 0, /* open_requests have no inode */
1738 0, req);
1739}
1740
1741static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1742{
b71d1d42 1743 const struct in6_addr *dest, *src;
1da177e4
LT
1744 __u16 destp, srcp;
1745 int timer_active;
1746 unsigned long timer_expires;
cf533ea5
ED
1747 const struct inet_sock *inet = inet_sk(sp);
1748 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1749 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1750 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1751 int rx_queue;
1752 int state;
1da177e4 1753
efe4208f
ED
1754 dest = &sp->sk_v6_daddr;
1755 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1756 destp = ntohs(inet->inet_dport);
1757 srcp = ntohs(inet->inet_sport);
463c84b9 1758
ce3cf4ec 1759 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1760 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1761 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1762 timer_active = 1;
463c84b9
ACM
1763 timer_expires = icsk->icsk_timeout;
1764 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1765 timer_active = 4;
463c84b9 1766 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1767 } else if (timer_pending(&sp->sk_timer)) {
1768 timer_active = 2;
1769 timer_expires = sp->sk_timer.expires;
1770 } else {
1771 timer_active = 0;
1772 timer_expires = jiffies;
1773 }
1774
00fd38d9
ED
1775 state = sk_state_load(sp);
1776 if (state == TCP_LISTEN)
1777 rx_queue = sp->sk_ack_backlog;
1778 else
1779 /* Because we don't lock the socket,
1780 * we might find a transient negative value.
1781 */
1782 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1783
1da177e4
LT
1784 seq_printf(seq,
1785 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1786 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1787 i,
1788 src->s6_addr32[0], src->s6_addr32[1],
1789 src->s6_addr32[2], src->s6_addr32[3], srcp,
1790 dest->s6_addr32[0], dest->s6_addr32[1],
1791 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1792 state,
1793 tp->write_seq - tp->snd_una,
1794 rx_queue,
1da177e4 1795 timer_active,
a399a805 1796 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1797 icsk->icsk_retransmits,
a7cb5a49 1798 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1799 icsk->icsk_probes_out,
1da177e4
LT
1800 sock_i_ino(sp),
1801 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1802 jiffies_to_clock_t(icsk->icsk_rto),
1803 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1804 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1805 tp->snd_cwnd,
00fd38d9 1806 state == TCP_LISTEN ?
0536fcc0 1807 fastopenq->max_qlen :
0a672f74 1808 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1809 );
1810}
1811
1ab1457c 1812static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1813 struct inet_timewait_sock *tw, int i)
1da177e4 1814{
789f558c 1815 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1816 const struct in6_addr *dest, *src;
1da177e4 1817 __u16 destp, srcp;
1da177e4 1818
efe4208f
ED
1819 dest = &tw->tw_v6_daddr;
1820 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1821 destp = ntohs(tw->tw_dport);
1822 srcp = ntohs(tw->tw_sport);
1823
1824 seq_printf(seq,
1825 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1826 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1827 i,
1828 src->s6_addr32[0], src->s6_addr32[1],
1829 src->s6_addr32[2], src->s6_addr32[3], srcp,
1830 dest->s6_addr32[0], dest->s6_addr32[1],
1831 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1832 tw->tw_substate, 0, 0,
a399a805 1833 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1da177e4
LT
1834 atomic_read(&tw->tw_refcnt), tw);
1835}
1836
1da177e4
LT
1837static int tcp6_seq_show(struct seq_file *seq, void *v)
1838{
1839 struct tcp_iter_state *st;
05dbc7b5 1840 struct sock *sk = v;
1da177e4
LT
1841
1842 if (v == SEQ_START_TOKEN) {
1843 seq_puts(seq,
1844 " sl "
1845 "local_address "
1846 "remote_address "
1847 "st tx_queue rx_queue tr tm->when retrnsmt"
1848 " uid timeout inode\n");
1849 goto out;
1850 }
1851 st = seq->private;
1852
079096f1
ED
1853 if (sk->sk_state == TCP_TIME_WAIT)
1854 get_timewait6_sock(seq, v, st->num);
1855 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1856 get_openreq6(seq, v, st->num);
079096f1
ED
1857 else
1858 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1859out:
1860 return 0;
1861}
1862
73cb88ec
AV
1863static const struct file_operations tcp6_afinfo_seq_fops = {
1864 .owner = THIS_MODULE,
1865 .open = tcp_seq_open,
1866 .read = seq_read,
1867 .llseek = seq_lseek,
1868 .release = seq_release_net
1869};
1870
1da177e4 1871static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1872 .name = "tcp6",
1873 .family = AF_INET6,
73cb88ec 1874 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1875 .seq_ops = {
1876 .show = tcp6_seq_show,
1877 },
1da177e4
LT
1878};
1879
2c8c1e72 1880int __net_init tcp6_proc_init(struct net *net)
1da177e4 1881{
6f8b13bc 1882 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1883}
1884
6f8b13bc 1885void tcp6_proc_exit(struct net *net)
1da177e4 1886{
6f8b13bc 1887 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1888}
1889#endif
1890
1891struct proto tcpv6_prot = {
1892 .name = "TCPv6",
1893 .owner = THIS_MODULE,
1894 .close = tcp_close,
1895 .connect = tcp_v6_connect,
1896 .disconnect = tcp_disconnect,
463c84b9 1897 .accept = inet_csk_accept,
1da177e4
LT
1898 .ioctl = tcp_ioctl,
1899 .init = tcp_v6_init_sock,
1900 .destroy = tcp_v6_destroy_sock,
1901 .shutdown = tcp_shutdown,
1902 .setsockopt = tcp_setsockopt,
1903 .getsockopt = tcp_getsockopt,
4b9d07a4 1904 .keepalive = tcp_set_keepalive,
1da177e4 1905 .recvmsg = tcp_recvmsg,
7ba42910
CG
1906 .sendmsg = tcp_sendmsg,
1907 .sendpage = tcp_sendpage,
1da177e4 1908 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1909 .release_cb = tcp_release_cb,
496611d7 1910 .hash = inet6_hash,
ab1e0a13
ACM
1911 .unhash = inet_unhash,
1912 .get_port = inet_csk_get_port,
1da177e4 1913 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 1914 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1915 .sockets_allocated = &tcp_sockets_allocated,
1916 .memory_allocated = &tcp_memory_allocated,
1917 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1918 .orphan_count = &tcp_orphan_count,
a4fe34bf 1919 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1920 .sysctl_wmem = sysctl_tcp_wmem,
1921 .sysctl_rmem = sysctl_tcp_rmem,
1922 .max_header = MAX_TCP_HEADER,
1923 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 1924 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 1925 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1926 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1927 .h.hashinfo = &tcp_hashinfo,
7ba42910 1928 .no_autobind = true,
543d9cfe
ACM
1929#ifdef CONFIG_COMPAT
1930 .compat_setsockopt = compat_tcp_setsockopt,
1931 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1932#endif
c1e64e29 1933 .diag_destroy = tcp_abort,
1da177e4
LT
1934};
1935
41135cc8 1936static const struct inet6_protocol tcpv6_protocol = {
c7109986 1937 .early_demux = tcp_v6_early_demux,
1da177e4
LT
1938 .handler = tcp_v6_rcv,
1939 .err_handler = tcp_v6_err,
1940 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1941};
1942
1da177e4
LT
1943static struct inet_protosw tcpv6_protosw = {
1944 .type = SOCK_STREAM,
1945 .protocol = IPPROTO_TCP,
1946 .prot = &tcpv6_prot,
1947 .ops = &inet6_stream_ops,
d83d8461
ACM
1948 .flags = INET_PROTOSW_PERMANENT |
1949 INET_PROTOSW_ICSK,
1da177e4
LT
1950};
1951
2c8c1e72 1952static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1953{
5677242f
DL
1954 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1955 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1956}
1957
2c8c1e72 1958static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1959{
5677242f 1960 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1961}
1962
2c8c1e72 1963static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 1964{
1946e672 1965 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
1966}
1967
1968static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1969 .init = tcpv6_net_init,
1970 .exit = tcpv6_net_exit,
1971 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1972};
1973
7f4e4868 1974int __init tcpv6_init(void)
1da177e4 1975{
7f4e4868
DL
1976 int ret;
1977
3336288a
VY
1978 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1979 if (ret)
c6b641a4 1980 goto out;
3336288a 1981
1da177e4 1982 /* register inet6 protocol */
7f4e4868
DL
1983 ret = inet6_register_protosw(&tcpv6_protosw);
1984 if (ret)
1985 goto out_tcpv6_protocol;
1986
93ec926b 1987 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1988 if (ret)
1989 goto out_tcpv6_protosw;
1990out:
1991 return ret;
ae0f7d5f 1992
7f4e4868
DL
1993out_tcpv6_protosw:
1994 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
1995out_tcpv6_protocol:
1996 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
1997 goto out;
1998}
1999
09f7709f 2000void tcpv6_exit(void)
7f4e4868 2001{
93ec926b 2002 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2003 inet6_unregister_protosw(&tcpv6_protosw);
2004 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2005}