]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv6/tcp_ipv6.c
liquidio: change manner of detecting whether or not NIC firmware is loaded
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
84b114b9 104static u32 tcp_v6_init_seq(const struct sk_buff *skb)
1da177e4 105{
84b114b9
ED
106 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110}
111
5d2ed052 112static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
84b114b9 113{
5d2ed052 114 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
84b114b9 115 ipv6_hdr(skb)->saddr.s6_addr32);
1da177e4
LT
116}
117
1ab1457c 118static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
119 int addr_len)
120{
121 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 122 struct inet_sock *inet = inet_sk(sk);
d83d8461 123 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
124 struct ipv6_pinfo *np = inet6_sk(sk);
125 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 126 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 127 struct ipv6_txoptions *opt;
4c9483b2 128 struct flowi6 fl6;
1da177e4
LT
129 struct dst_entry *dst;
130 int addr_type;
131 int err;
1946e672 132 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 133
1ab1457c 134 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
135 return -EINVAL;
136
1ab1457c 137 if (usin->sin6_family != AF_INET6)
a02cec21 138 return -EAFNOSUPPORT;
1da177e4 139
4c9483b2 140 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
141
142 if (np->sndflow) {
4c9483b2
DM
143 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl6.flowlabel);
145 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 146 struct ip6_flowlabel *flowlabel;
4c9483b2 147 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 148 if (!flowlabel)
1da177e4 149 return -EINVAL;
1da177e4
LT
150 fl6_sock_release(flowlabel);
151 }
152 }
153
154 /*
1ab1457c
YH
155 * connect() to INADDR_ANY means loopback (BSD'ism).
156 */
157
052d2369
JL
158 if (ipv6_addr_any(&usin->sin6_addr)) {
159 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
161 &usin->sin6_addr);
162 else
163 usin->sin6_addr = in6addr_loopback;
164 }
1da177e4
LT
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
4c99aa40 168 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 190 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
efe4208f 196 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 197 np->flow_label = fl6.flowlabel;
1da177e4
LT
198
199 /*
200 * TCP over IPv4
201 */
202
052d2369 203 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
d83d8461 216 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
218#ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220#endif
1da177e4
LT
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
d83d8461
ACM
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
228#ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230#endif
1da177e4 231 goto failure;
1da177e4 232 }
d1e559d0 233 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
234
235 return err;
236 }
237
efe4208f
ED
238 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
239 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 240
4c9483b2 241 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 242 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 243 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
244 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
246 fl6.fl6_dport = usin->sin6_port;
247 fl6.fl6_sport = inet->inet_sport;
e2d118a1 248 fl6.flowi6_uid = sk->sk_uid;
1da177e4 249
1e1d04e6 250 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 251 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 252
4c9483b2 253 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 254
0e0d44ab 255 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
256 if (IS_ERR(dst)) {
257 err = PTR_ERR(dst);
1da177e4 258 goto failure;
14e50e57 259 }
1da177e4 260
63159f29 261 if (!saddr) {
4c9483b2 262 saddr = &fl6.saddr;
efe4208f 263 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
264 }
265
266 /* set the source address */
4e3fd7a0 267 np->saddr = *saddr;
c720c7e8 268 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 269
f83ef8c0 270 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 271 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 272
d83d8461 273 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
274 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 opt->opt_nflen;
1da177e4
LT
277
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
279
c720c7e8 280 inet->inet_dport = usin->sin6_port;
1da177e4
LT
281
282 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 283 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
284 if (err)
285 goto late_failure;
286
877d1f62 287 sk_set_txhash(sk);
9e7ceb06 288
00355fa5 289 if (likely(!tp->repair)) {
00355fa5 290 if (!tp->write_seq)
84b114b9
ED
291 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
292 sk->sk_v6_daddr.s6_addr32,
293 inet->inet_sport,
294 inet->inet_dport);
5d2ed052
ED
295 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
296 np->saddr.s6_addr32,
84b114b9 297 sk->sk_v6_daddr.s6_addr32);
00355fa5 298 }
1da177e4 299
19f6d3f3
WW
300 if (tcp_fastopen_defer_connect(sk, &err))
301 return err;
302 if (err)
303 goto late_failure;
304
1da177e4
LT
305 err = tcp_connect(sk);
306 if (err)
307 goto late_failure;
308
309 return 0;
310
311late_failure:
312 tcp_set_state(sk, TCP_CLOSE);
1da177e4 313failure:
c720c7e8 314 inet->inet_dport = 0;
1da177e4
LT
315 sk->sk_route_caps = 0;
316 return err;
317}
318
563d34d0
ED
319static void tcp_v6_mtu_reduced(struct sock *sk)
320{
321 struct dst_entry *dst;
322
323 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
324 return;
325
326 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
327 if (!dst)
328 return;
329
330 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
331 tcp_sync_mss(sk, dst_mtu(dst));
332 tcp_simple_retransmit(sk);
333 }
334}
335
1da177e4 336static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 337 u8 type, u8 code, int offset, __be32 info)
1da177e4 338{
4c99aa40 339 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 340 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
341 struct net *net = dev_net(skb->dev);
342 struct request_sock *fastopen;
1da177e4 343 struct ipv6_pinfo *np;
1ab1457c 344 struct tcp_sock *tp;
0a672f74 345 __u32 seq, snd_una;
2215089b 346 struct sock *sk;
9cf74903 347 bool fatal;
2215089b 348 int err;
1da177e4 349
2215089b
ED
350 sk = __inet6_lookup_established(net, &tcp_hashinfo,
351 &hdr->daddr, th->dest,
352 &hdr->saddr, ntohs(th->source),
4297a0ef 353 skb->dev->ifindex, inet6_sdif(skb));
1da177e4 354
2215089b 355 if (!sk) {
a16292a0
ED
356 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
357 ICMP6_MIB_INERRORS);
1da177e4
LT
358 return;
359 }
360
361 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 362 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
363 return;
364 }
2215089b 365 seq = ntohl(th->seq);
9cf74903 366 fatal = icmpv6_err_convert(type, code, &err);
2215089b 367 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 368 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
369
370 bh_lock_sock(sk);
563d34d0 371 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 372 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
373
374 if (sk->sk_state == TCP_CLOSE)
375 goto out;
376
e802af9c 377 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 378 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
379 goto out;
380 }
381
1da177e4 382 tp = tcp_sk(sk);
0a672f74
YC
383 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
384 fastopen = tp->fastopen_rsk;
385 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 386 if (sk->sk_state != TCP_LISTEN &&
0a672f74 387 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 388 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
389 goto out;
390 }
391
392 np = inet6_sk(sk);
393
ec18d9a2 394 if (type == NDISC_REDIRECT) {
45caeaa5
JM
395 if (!sock_owned_by_user(sk)) {
396 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
ec18d9a2 397
45caeaa5
JM
398 if (dst)
399 dst->ops->redirect(dst, sk, skb);
400 }
50a75a89 401 goto out;
ec18d9a2
DM
402 }
403
1da177e4 404 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
408 */
409 if (sk->sk_state == TCP_LISTEN)
410 goto out;
411
93b36cf3
HFS
412 if (!ip6_sk_accept_pmtu(sk))
413 goto out;
414
563d34d0
ED
415 tp->mtu_info = ntohl(info);
416 if (!sock_owned_by_user(sk))
417 tcp_v6_mtu_reduced(sk);
d013ef2a 418 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 419 &sk->sk_tsq_flags))
d013ef2a 420 sock_hold(sk);
1da177e4
LT
421 goto out;
422 }
423
1da177e4 424
60236fdd 425 /* Might be for an request_sock */
1da177e4 426 switch (sk->sk_state) {
1da177e4 427 case TCP_SYN_SENT:
0a672f74
YC
428 case TCP_SYN_RECV:
429 /* Only in fast or simultaneous open. If a fast open socket is
430 * is already accepted it is treated as a connected one below.
431 */
63159f29 432 if (fastopen && !fastopen->sk)
0a672f74
YC
433 break;
434
1da177e4 435 if (!sock_owned_by_user(sk)) {
1da177e4
LT
436 sk->sk_err = err;
437 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
438
439 tcp_done(sk);
440 } else
441 sk->sk_err_soft = err;
442 goto out;
443 }
444
445 if (!sock_owned_by_user(sk) && np->recverr) {
446 sk->sk_err = err;
447 sk->sk_error_report(sk);
448 } else
449 sk->sk_err_soft = err;
450
451out:
452 bh_unlock_sock(sk);
453 sock_put(sk);
454}
455
456
0f935dbe 457static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 458 struct flowi *fl,
3840a06e 459 struct request_sock *req,
ca6fb065 460 struct tcp_fastopen_cookie *foc,
b3d05147 461 enum tcp_synack_type synack_type)
1da177e4 462{
634fb979 463 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 464 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 465 struct ipv6_txoptions *opt;
d6274bd8 466 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 467 struct sk_buff *skb;
9494218f 468 int err = -ENOMEM;
1da177e4 469
9f10d3f6 470 /* First, grab a route. */
f76b33c3
ED
471 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
472 IPPROTO_TCP)) == NULL)
fd80eb94 473 goto done;
9494218f 474
b3d05147 475 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 476
1da177e4 477 if (skb) {
634fb979
ED
478 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
479 &ireq->ir_v6_rmt_addr);
1da177e4 480
634fb979 481 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 482 if (np->repflow && ireq->pktopts)
df3687ff
FF
483 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
484
3e4006f0 485 rcu_read_lock();
56ac42bc
HD
486 opt = ireq->ipv6_opt;
487 if (!opt)
488 opt = rcu_dereference(np->opt);
92e55f41 489 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 490 rcu_read_unlock();
b9df3cb8 491 err = net_xmit_eval(err);
1da177e4
LT
492 }
493
494done:
1da177e4
LT
495 return err;
496}
497
72659ecc 498
60236fdd 499static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 500{
56ac42bc 501 kfree(inet_rsk(req)->ipv6_opt);
634fb979 502 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
503}
504
cfb6eeb4 505#ifdef CONFIG_TCP_MD5SIG
b83e3deb 506static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 507 const struct in6_addr *addr)
cfb6eeb4 508{
a915da9b 509 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
510}
511
b83e3deb 512static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 513 const struct sock *addr_sk)
cfb6eeb4 514{
efe4208f 515 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
516}
517
8917a777
ID
518static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
519 char __user *optval, int optlen)
cfb6eeb4
YH
520{
521 struct tcp_md5sig cmd;
522 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
8917a777 523 u8 prefixlen;
cfb6eeb4
YH
524
525 if (optlen < sizeof(cmd))
526 return -EINVAL;
527
528 if (copy_from_user(&cmd, optval, sizeof(cmd)))
529 return -EFAULT;
530
531 if (sin6->sin6_family != AF_INET6)
532 return -EINVAL;
533
8917a777
ID
534 if (optname == TCP_MD5SIG_EXT &&
535 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
536 prefixlen = cmd.tcpm_prefixlen;
537 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
538 prefixlen > 32))
539 return -EINVAL;
540 } else {
541 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
542 }
543
cfb6eeb4 544 if (!cmd.tcpm_keylen) {
e773e4fa 545 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b 546 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 547 AF_INET, prefixlen);
a915da9b 548 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777 549 AF_INET6, prefixlen);
cfb6eeb4
YH
550 }
551
552 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
553 return -EINVAL;
554
a915da9b
ED
555 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
556 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 557 AF_INET, prefixlen, cmd.tcpm_key,
6797318e 558 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 559
a915da9b 560 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777
ID
561 AF_INET6, prefixlen, cmd.tcpm_key,
562 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
563}
564
19689e38
ED
565static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
566 const struct in6_addr *daddr,
567 const struct in6_addr *saddr,
568 const struct tcphdr *th, int nbytes)
cfb6eeb4 569{
cfb6eeb4 570 struct tcp6_pseudohdr *bp;
49a72dfb 571 struct scatterlist sg;
19689e38 572 struct tcphdr *_th;
8d26d76d 573
19689e38 574 bp = hp->scratch;
cfb6eeb4 575 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
576 bp->saddr = *saddr;
577 bp->daddr = *daddr;
49a72dfb 578 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 579 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 580
19689e38
ED
581 _th = (struct tcphdr *)(bp + 1);
582 memcpy(_th, th, sizeof(*th));
583 _th->check = 0;
584
585 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
586 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
587 sizeof(*bp) + sizeof(*th));
cf80e0e4 588 return crypto_ahash_update(hp->md5_req);
49a72dfb 589}
c7da57a1 590
19689e38 591static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 592 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 593 const struct tcphdr *th)
49a72dfb
AL
594{
595 struct tcp_md5sig_pool *hp;
cf80e0e4 596 struct ahash_request *req;
49a72dfb
AL
597
598 hp = tcp_get_md5sig_pool();
599 if (!hp)
600 goto clear_hash_noput;
cf80e0e4 601 req = hp->md5_req;
49a72dfb 602
cf80e0e4 603 if (crypto_ahash_init(req))
49a72dfb 604 goto clear_hash;
19689e38 605 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
606 goto clear_hash;
607 if (tcp_md5_hash_key(hp, key))
608 goto clear_hash;
cf80e0e4
HX
609 ahash_request_set_crypt(req, NULL, md5_hash, 0);
610 if (crypto_ahash_final(req))
cfb6eeb4 611 goto clear_hash;
cfb6eeb4 612
cfb6eeb4 613 tcp_put_md5sig_pool();
cfb6eeb4 614 return 0;
49a72dfb 615
cfb6eeb4
YH
616clear_hash:
617 tcp_put_md5sig_pool();
618clear_hash_noput:
619 memset(md5_hash, 0, 16);
49a72dfb 620 return 1;
cfb6eeb4
YH
621}
622
39f8e58e
ED
623static int tcp_v6_md5_hash_skb(char *md5_hash,
624 const struct tcp_md5sig_key *key,
318cf7aa 625 const struct sock *sk,
318cf7aa 626 const struct sk_buff *skb)
cfb6eeb4 627{
b71d1d42 628 const struct in6_addr *saddr, *daddr;
49a72dfb 629 struct tcp_md5sig_pool *hp;
cf80e0e4 630 struct ahash_request *req;
318cf7aa 631 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 632
39f8e58e
ED
633 if (sk) { /* valid for establish/request sockets */
634 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 635 daddr = &sk->sk_v6_daddr;
49a72dfb 636 } else {
b71d1d42 637 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
638 saddr = &ip6h->saddr;
639 daddr = &ip6h->daddr;
cfb6eeb4 640 }
49a72dfb
AL
641
642 hp = tcp_get_md5sig_pool();
643 if (!hp)
644 goto clear_hash_noput;
cf80e0e4 645 req = hp->md5_req;
49a72dfb 646
cf80e0e4 647 if (crypto_ahash_init(req))
49a72dfb
AL
648 goto clear_hash;
649
19689e38 650 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
651 goto clear_hash;
652 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
653 goto clear_hash;
654 if (tcp_md5_hash_key(hp, key))
655 goto clear_hash;
cf80e0e4
HX
656 ahash_request_set_crypt(req, NULL, md5_hash, 0);
657 if (crypto_ahash_final(req))
49a72dfb
AL
658 goto clear_hash;
659
660 tcp_put_md5sig_pool();
661 return 0;
662
663clear_hash:
664 tcp_put_md5sig_pool();
665clear_hash_noput:
666 memset(md5_hash, 0, 16);
667 return 1;
cfb6eeb4
YH
668}
669
ba8e275a
ED
670#endif
671
672static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
673 const struct sk_buff *skb)
cfb6eeb4 674{
ba8e275a 675#ifdef CONFIG_TCP_MD5SIG
cf533ea5 676 const __u8 *hash_location = NULL;
cfb6eeb4 677 struct tcp_md5sig_key *hash_expected;
b71d1d42 678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 679 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 680 int genhash;
cfb6eeb4
YH
681 u8 newhash[16];
682
683 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 684 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 685
785957d3
DM
686 /* We've parsed the options - do we have a hash? */
687 if (!hash_expected && !hash_location)
ff74e23f 688 return false;
785957d3
DM
689
690 if (hash_expected && !hash_location) {
c10d9310 691 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 692 return true;
cfb6eeb4
YH
693 }
694
785957d3 695 if (!hash_expected && hash_location) {
c10d9310 696 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 697 return true;
cfb6eeb4
YH
698 }
699
700 /* check the signature */
49a72dfb
AL
701 genhash = tcp_v6_md5_hash_skb(newhash,
702 hash_expected,
39f8e58e 703 NULL, skb);
49a72dfb 704
cfb6eeb4 705 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 706 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
707 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
708 genhash ? "failed" : "mismatch",
709 &ip6h->saddr, ntohs(th->source),
710 &ip6h->daddr, ntohs(th->dest));
ff74e23f 711 return true;
cfb6eeb4 712 }
ba8e275a 713#endif
ff74e23f 714 return false;
cfb6eeb4 715}
cfb6eeb4 716
b40cf18e
ED
717static void tcp_v6_init_req(struct request_sock *req,
718 const struct sock *sk_listener,
16bea70a
OP
719 struct sk_buff *skb)
720{
721 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 722 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
723
724 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
725 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
726
16bea70a 727 /* So that link locals have meaning */
b40cf18e 728 if (!sk_listener->sk_bound_dev_if &&
16bea70a 729 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 730 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 731
04317daf 732 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 733 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 734 np->rxopt.bits.rxinfo ||
16bea70a
OP
735 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
736 np->rxopt.bits.rxohlim || np->repflow)) {
63354797 737 refcount_inc(&skb->users);
16bea70a
OP
738 ireq->pktopts = skb;
739 }
740}
741
f964629e
ED
742static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
743 struct flowi *fl,
4396e461 744 const struct request_sock *req)
d94e0417 745{
f76b33c3 746 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
747}
748
c6aefafb 749struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 750 .family = AF_INET6,
2e6599cb 751 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 752 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
753 .send_ack = tcp_v6_reqsk_send_ack,
754 .destructor = tcp_v6_reqsk_destructor,
72659ecc 755 .send_reset = tcp_v6_send_reset,
4aa956d8 756 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
757};
758
b2e4b3de 759static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
760 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
761 sizeof(struct ipv6hdr),
16bea70a 762#ifdef CONFIG_TCP_MD5SIG
fd3a154a 763 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 764 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 765#endif
16bea70a 766 .init_req = tcp_v6_init_req,
fb7b37a7
OP
767#ifdef CONFIG_SYN_COOKIES
768 .cookie_init_seq = cookie_v6_init_sequence,
769#endif
d94e0417 770 .route_req = tcp_v6_route_req,
84b114b9
ED
771 .init_seq = tcp_v6_init_seq,
772 .init_ts_off = tcp_v6_init_ts_off,
d6274bd8 773 .send_synack = tcp_v6_send_synack,
16bea70a 774};
cfb6eeb4 775
a00e7444 776static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
777 u32 ack, u32 win, u32 tsval, u32 tsecr,
778 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 779 u8 tclass, __be32 label)
1da177e4 780{
cf533ea5
ED
781 const struct tcphdr *th = tcp_hdr(skb);
782 struct tcphdr *t1;
1da177e4 783 struct sk_buff *buff;
4c9483b2 784 struct flowi6 fl6;
0f85feae 785 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 786 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 787 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 788 struct dst_entry *dst;
81ada62d 789 __be32 *topt;
1da177e4 790
ee684b6f 791 if (tsecr)
626e264d 792 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 793#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
794 if (key)
795 tot_len += TCPOLEN_MD5SIG_ALIGNED;
796#endif
797
cfb6eeb4 798 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 799 GFP_ATOMIC);
63159f29 800 if (!buff)
1ab1457c 801 return;
1da177e4 802
cfb6eeb4 803 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 804
d58ff351 805 t1 = skb_push(buff, tot_len);
6651ffc8 806 skb_reset_transport_header(buff);
1da177e4
LT
807
808 /* Swap the send and the receive. */
809 memset(t1, 0, sizeof(*t1));
810 t1->dest = th->source;
811 t1->source = th->dest;
cfb6eeb4 812 t1->doff = tot_len / 4;
626e264d
IJ
813 t1->seq = htonl(seq);
814 t1->ack_seq = htonl(ack);
815 t1->ack = !rst || !th->ack;
816 t1->rst = rst;
817 t1->window = htons(win);
1da177e4 818
81ada62d
IJ
819 topt = (__be32 *)(t1 + 1);
820
ee684b6f 821 if (tsecr) {
626e264d
IJ
822 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
823 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
824 *topt++ = htonl(tsval);
825 *topt++ = htonl(tsecr);
626e264d
IJ
826 }
827
cfb6eeb4
YH
828#ifdef CONFIG_TCP_MD5SIG
829 if (key) {
81ada62d
IJ
830 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
831 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
832 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
833 &ipv6_hdr(skb)->saddr,
834 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
835 }
836#endif
837
4c9483b2 838 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
839 fl6.daddr = ipv6_hdr(skb)->saddr;
840 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 841 fl6.flowlabel = label;
1da177e4 842
e5700aff
DM
843 buff->ip_summed = CHECKSUM_PARTIAL;
844 buff->csum = 0;
845
4c9483b2 846 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 847
4c9483b2 848 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 849 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 850 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
851 else {
852 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
853 oif = skb->skb_iif;
854
855 fl6.flowi6_oif = oif;
856 }
1d2f7b2d 857
e110861f 858 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
859 fl6.fl6_dport = t1->dest;
860 fl6.fl6_sport = t1->source;
e2d118a1 861 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 862 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 863
c20121ae
DL
864 /* Pass a socket to ip6_dst_lookup either it is for RST
865 * Underlying function will use this to retrieve the network
866 * namespace
867 */
0e0d44ab 868 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
869 if (!IS_ERR(dst)) {
870 skb_dst_set(buff, dst);
92e55f41 871 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 872 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 873 if (rst)
c10d9310 874 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 875 return;
1da177e4
LT
876 }
877
878 kfree_skb(buff);
879}
880
a00e7444 881static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 882{
cf533ea5 883 const struct tcphdr *th = tcp_hdr(skb);
626e264d 884 u32 seq = 0, ack_seq = 0;
fa3e5b4e 885 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
886#ifdef CONFIG_TCP_MD5SIG
887 const __u8 *hash_location = NULL;
888 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
889 unsigned char newhash[16];
890 int genhash;
891 struct sock *sk1 = NULL;
892#endif
9c76a114 893 int oif;
1da177e4 894
626e264d 895 if (th->rst)
1da177e4
LT
896 return;
897
c3658e8d
ED
898 /* If sk not NULL, it means we did a successful lookup and incoming
899 * route had to be correct. prequeue might have dropped our dst.
900 */
901 if (!sk && !ipv6_unicast_destination(skb))
626e264d 902 return;
1da177e4 903
cfb6eeb4 904#ifdef CONFIG_TCP_MD5SIG
3b24d854 905 rcu_read_lock();
658ddaaf 906 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 907 if (sk && sk_fullsock(sk)) {
e46787f0
FW
908 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
909 } else if (hash_location) {
658ddaaf
SL
910 /*
911 * active side is lost. Try to find listening socket through
912 * source port, and then find md5 key through listening socket.
913 * we are not loose security here:
914 * Incoming packet is checked with md5 hash with finding key,
915 * no RST generated if md5 hash doesn't match.
916 */
917 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
918 &tcp_hashinfo, NULL, 0,
919 &ipv6h->saddr,
5ba24953 920 th->source, &ipv6h->daddr,
4297a0ef
DA
921 ntohs(th->source), tcp_v6_iif(skb),
922 tcp_v6_sdif(skb));
658ddaaf 923 if (!sk1)
3b24d854 924 goto out;
658ddaaf 925
658ddaaf
SL
926 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
927 if (!key)
3b24d854 928 goto out;
658ddaaf 929
39f8e58e 930 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 931 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 932 goto out;
658ddaaf 933 }
cfb6eeb4
YH
934#endif
935
626e264d
IJ
936 if (th->ack)
937 seq = ntohl(th->ack_seq);
938 else
939 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
940 (th->doff << 2);
1da177e4 941
9c76a114 942 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 943 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
944
945#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
946out:
947 rcu_read_unlock();
658ddaaf 948#endif
626e264d 949}
1da177e4 950
a00e7444 951static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 952 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 953 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 954 __be32 label)
626e264d 955{
0f85feae
ED
956 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
957 tclass, label);
1da177e4
LT
958}
959
960static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
961{
8feaf0c0 962 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 963 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 964
0f85feae 965 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 966 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9a568de4 967 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
9c76a114 968 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 969 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 970
8feaf0c0 971 inet_twsk_put(tw);
1da177e4
LT
972}
973
a00e7444 974static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 975 struct request_sock *req)
1da177e4 976{
3a19ce0e
DL
977 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
978 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
979 */
20a2b49f
ED
980 /* RFC 7323 2.3
981 * The window field (SEG.WND) of every outgoing segment, with the
982 * exception of <SYN> segments, MUST be right-shifted by
983 * Rcv.Wind.Shift bits:
984 */
0f85feae 985 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 986 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
987 tcp_rsk(req)->rcv_nxt,
988 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
9a568de4 989 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
95a22cae 990 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
991 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
992 0, 0);
1da177e4
LT
993}
994
995
079096f1 996static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 997{
079096f1 998#ifdef CONFIG_SYN_COOKIES
aa8223c7 999 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1000
af9b4738 1001 if (!th->syn)
c6aefafb 1002 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1003#endif
1004 return sk;
1005}
1006
1da177e4
LT
1007static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1008{
1da177e4
LT
1009 if (skb->protocol == htons(ETH_P_IP))
1010 return tcp_v4_conn_request(sk, skb);
1011
1012 if (!ipv6_unicast_destination(skb))
1ab1457c 1013 goto drop;
1da177e4 1014
1fb6f159
OP
1015 return tcp_conn_request(&tcp6_request_sock_ops,
1016 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1017
1018drop:
9caad864 1019 tcp_listendrop(sk);
1da177e4
LT
1020 return 0; /* don't send reset */
1021}
1022
ebf6c9cb
ED
1023static void tcp_v6_restore_cb(struct sk_buff *skb)
1024{
1025 /* We need to move header back to the beginning if xfrm6_policy_check()
1026 * and tcp_v6_fill_cb() are going to be called again.
1027 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1028 */
1029 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1030 sizeof(struct inet6_skb_parm));
1031}
1032
0c27171e 1033static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1034 struct request_sock *req,
5e0724d0
ED
1035 struct dst_entry *dst,
1036 struct request_sock *req_unhash,
1037 bool *own_req)
1da177e4 1038{
634fb979 1039 struct inet_request_sock *ireq;
0c27171e
ED
1040 struct ipv6_pinfo *newnp;
1041 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1042 struct ipv6_txoptions *opt;
1da177e4
LT
1043 struct tcp6_sock *newtcp6sk;
1044 struct inet_sock *newinet;
1045 struct tcp_sock *newtp;
1046 struct sock *newsk;
cfb6eeb4
YH
1047#ifdef CONFIG_TCP_MD5SIG
1048 struct tcp_md5sig_key *key;
1049#endif
3840a06e 1050 struct flowi6 fl6;
1da177e4
LT
1051
1052 if (skb->protocol == htons(ETH_P_IP)) {
1053 /*
1054 * v6 mapped
1055 */
1056
5e0724d0
ED
1057 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1058 req_unhash, own_req);
1da177e4 1059
63159f29 1060 if (!newsk)
1da177e4
LT
1061 return NULL;
1062
1063 newtcp6sk = (struct tcp6_sock *)newsk;
1064 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1065
1066 newinet = inet_sk(newsk);
1067 newnp = inet6_sk(newsk);
1068 newtp = tcp_sk(newsk);
1069
1070 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1071
d1e559d0 1072 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1073
8292a17a 1074 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1075 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1076#ifdef CONFIG_TCP_MD5SIG
1077 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1078#endif
1079
83eaddab 1080 newnp->ipv6_mc_list = NULL;
676a1184
YZ
1081 newnp->ipv6_ac_list = NULL;
1082 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1083 newnp->pktoptions = NULL;
1084 newnp->opt = NULL;
870c3151 1085 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1086 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1087 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1088 if (np->repflow)
1089 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1090
e6848976
ACM
1091 /*
1092 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1093 * here, tcp_create_openreq_child now does this for us, see the comment in
1094 * that function for the gory details. -acme
1da177e4 1095 */
1da177e4
LT
1096
1097 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1098 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1099 Sync it now.
1100 */
d83d8461 1101 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1102
1103 return newsk;
1104 }
1105
634fb979 1106 ireq = inet_rsk(req);
1da177e4
LT
1107
1108 if (sk_acceptq_is_full(sk))
1109 goto out_overflow;
1110
493f377d 1111 if (!dst) {
f76b33c3 1112 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1113 if (!dst)
1da177e4 1114 goto out;
1ab1457c 1115 }
1da177e4
LT
1116
1117 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1118 if (!newsk)
093d2823 1119 goto out_nonewsk;
1da177e4 1120
e6848976
ACM
1121 /*
1122 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1123 * count here, tcp_create_openreq_child now does this for us, see the
1124 * comment in that function for the gory details. -acme
1125 */
1da177e4 1126
59eed279 1127 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1128 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1129 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1130
1131 newtcp6sk = (struct tcp6_sock *)newsk;
1132 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1133
1134 newtp = tcp_sk(newsk);
1135 newinet = inet_sk(newsk);
1136 newnp = inet6_sk(newsk);
1137
1138 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1139
634fb979
ED
1140 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1141 newnp->saddr = ireq->ir_v6_loc_addr;
1142 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1143 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1144
1ab1457c 1145 /* Now IPv6 options...
1da177e4
LT
1146
1147 First: no IPv4 options.
1148 */
f6d8bd05 1149 newinet->inet_opt = NULL;
83eaddab 1150 newnp->ipv6_mc_list = NULL;
676a1184 1151 newnp->ipv6_ac_list = NULL;
d35690be 1152 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1153
1154 /* Clone RX bits */
1155 newnp->rxopt.all = np->rxopt.all;
1156
1da177e4 1157 newnp->pktoptions = NULL;
1da177e4 1158 newnp->opt = NULL;
870c3151 1159 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1160 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1161 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1162 if (np->repflow)
1163 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1164
1165 /* Clone native IPv6 options from listening socket (if any)
1166
1167 Yes, keeping reference count would be much more clever,
1168 but we make one more one thing there: reattach optmem
1169 to newsk.
1170 */
56ac42bc
HD
1171 opt = ireq->ipv6_opt;
1172 if (!opt)
1173 opt = rcu_dereference(np->opt);
45f6fad8
ED
1174 if (opt) {
1175 opt = ipv6_dup_options(newsk, opt);
1176 RCU_INIT_POINTER(newnp->opt, opt);
1177 }
d83d8461 1178 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1179 if (opt)
1180 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1181 opt->opt_flen;
1da177e4 1182
81164413
DB
1183 tcp_ca_openreq_child(newsk, dst);
1184
1da177e4 1185 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1186 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1187
1da177e4
LT
1188 tcp_initialize_rcv_mss(newsk);
1189
c720c7e8
ED
1190 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1191 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1192
cfb6eeb4
YH
1193#ifdef CONFIG_TCP_MD5SIG
1194 /* Copy over the MD5 key from the original socket */
4aa956d8 1195 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1196 if (key) {
cfb6eeb4
YH
1197 /* We're using one, so create a matching key
1198 * on the newsk structure. If we fail to get
1199 * memory, then we end up not copying the key
1200 * across. Shucks.
1201 */
efe4208f 1202 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
6797318e 1203 AF_INET6, 128, key->key, key->keylen,
7450aaf6 1204 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1205 }
1206#endif
1207
093d2823 1208 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1209 inet_csk_prepare_forced_close(newsk);
1210 tcp_done(newsk);
093d2823
BS
1211 goto out;
1212 }
5e0724d0 1213 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1214 if (*own_req) {
49a496c9 1215 tcp_move_syn(newtp, req);
805c4bc0
ED
1216
1217 /* Clone pktoptions received with SYN, if we own the req */
1218 if (ireq->pktopts) {
1219 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1220 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1221 consume_skb(ireq->pktopts);
1222 ireq->pktopts = NULL;
ebf6c9cb
ED
1223 if (newnp->pktoptions) {
1224 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1225 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1226 }
805c4bc0 1227 }
ce105008 1228 }
1da177e4
LT
1229
1230 return newsk;
1231
1232out_overflow:
02a1d6e7 1233 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1234out_nonewsk:
1da177e4 1235 dst_release(dst);
093d2823 1236out:
9caad864 1237 tcp_listendrop(sk);
1da177e4
LT
1238 return NULL;
1239}
1240
1da177e4 1241/* The socket must have it's spinlock held when we get
e994b2f0 1242 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1243 *
1244 * We have a potential double-lock case here, so even when
1245 * doing backlog processing we use the BH locking scheme.
1246 * This is because we cannot sleep with the original spinlock
1247 * held.
1248 */
1249static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1250{
1251 struct ipv6_pinfo *np = inet6_sk(sk);
1252 struct tcp_sock *tp;
1253 struct sk_buff *opt_skb = NULL;
1254
1255 /* Imagine: socket is IPv6. IPv4 packet arrives,
1256 goes to IPv4 receive handler and backlogged.
1257 From backlog it always goes here. Kerboom...
1258 Fortunately, tcp_rcv_established and rcv_established
1259 handle them correctly, but it is not case with
1260 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1261 */
1262
1263 if (skb->protocol == htons(ETH_P_IP))
1264 return tcp_v4_do_rcv(sk, skb);
1265
1da177e4
LT
1266 /*
1267 * socket locking is here for SMP purposes as backlog rcv
1268 * is currently called with bh processing disabled.
1269 */
1270
1271 /* Do Stevens' IPV6_PKTOPTIONS.
1272
1273 Yes, guys, it is the only place in our code, where we
1274 may make it not affecting IPv4.
1275 The rest of code is protocol independent,
1276 and I do not like idea to uglify IPv4.
1277
1278 Actually, all the idea behind IPV6_PKTOPTIONS
1279 looks not very well thought. For now we latch
1280 options, received in the last packet, enqueued
1281 by tcp. Feel free to propose better solution.
1ab1457c 1282 --ANK (980728)
1da177e4
LT
1283 */
1284 if (np->rxopt.all)
7450aaf6 1285 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1286
1287 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1288 struct dst_entry *dst = sk->sk_rx_dst;
1289
bdeab991 1290 sock_rps_save_rxhash(sk, skb);
3d97379a 1291 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1292 if (dst) {
1293 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1294 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1295 dst_release(dst);
1296 sk->sk_rx_dst = NULL;
1297 }
1298 }
1299
e42e24c3 1300 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1da177e4
LT
1301 if (opt_skb)
1302 goto ipv6_pktoptions;
1303 return 0;
1304 }
1305
12e25e10 1306 if (tcp_checksum_complete(skb))
1da177e4
LT
1307 goto csum_err;
1308
1ab1457c 1309 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1310 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1311
1da177e4
LT
1312 if (!nsk)
1313 goto discard;
1314
4c99aa40 1315 if (nsk != sk) {
1da177e4
LT
1316 if (tcp_child_process(sk, nsk, skb))
1317 goto reset;
1318 if (opt_skb)
1319 __kfree_skb(opt_skb);
1320 return 0;
1321 }
47482f13 1322 } else
bdeab991 1323 sock_rps_save_rxhash(sk, skb);
1da177e4 1324
72ab4a86 1325 if (tcp_rcv_state_process(sk, skb))
1da177e4 1326 goto reset;
1da177e4
LT
1327 if (opt_skb)
1328 goto ipv6_pktoptions;
1329 return 0;
1330
1331reset:
cfb6eeb4 1332 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1333discard:
1334 if (opt_skb)
1335 __kfree_skb(opt_skb);
1336 kfree_skb(skb);
1337 return 0;
1338csum_err:
c10d9310
ED
1339 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1340 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1341 goto discard;
1342
1343
1344ipv6_pktoptions:
1345 /* Do you ask, what is it?
1346
1347 1. skb was enqueued by tcp.
1348 2. skb is added to tail of read queue, rather than out of order.
1349 3. socket is not in passive state.
1350 4. Finally, it really contains options, which user wants to receive.
1351 */
1352 tp = tcp_sk(sk);
1353 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1354 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1355 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1356 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1357 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1358 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1359 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1360 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1361 if (np->repflow)
1362 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1363 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1364 skb_set_owner_r(opt_skb, sk);
8ce48623 1365 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1366 opt_skb = xchg(&np->pktoptions, opt_skb);
1367 } else {
1368 __kfree_skb(opt_skb);
1369 opt_skb = xchg(&np->pktoptions, NULL);
1370 }
1371 }
1372
800d55f1 1373 kfree_skb(opt_skb);
1da177e4
LT
1374 return 0;
1375}
1376
2dc49d16
ND
1377static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1378 const struct tcphdr *th)
1379{
1380 /* This is tricky: we move IP6CB at its correct location into
1381 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1382 * _decode_session6() uses IP6CB().
1383 * barrier() makes sure compiler won't play aliasing games.
1384 */
1385 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1386 sizeof(struct inet6_skb_parm));
1387 barrier();
1388
1389 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1390 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1391 skb->len - th->doff*4);
1392 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1393 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1394 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1395 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1396 TCP_SKB_CB(skb)->sacked = 0;
1397}
1398
e5bbef20 1399static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1400{
4297a0ef 1401 int sdif = inet6_sdif(skb);
cf533ea5 1402 const struct tcphdr *th;
b71d1d42 1403 const struct ipv6hdr *hdr;
3b24d854 1404 bool refcounted;
1da177e4
LT
1405 struct sock *sk;
1406 int ret;
a86b1e30 1407 struct net *net = dev_net(skb->dev);
1da177e4
LT
1408
1409 if (skb->pkt_type != PACKET_HOST)
1410 goto discard_it;
1411
1412 /*
1413 * Count it even if it's bad.
1414 */
90bbcc60 1415 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1416
1417 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1418 goto discard_it;
1419
ea1627c2 1420 th = (const struct tcphdr *)skb->data;
1da177e4 1421
ea1627c2 1422 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1423 goto bad_packet;
1424 if (!pskb_may_pull(skb, th->doff*4))
1425 goto discard_it;
1426
e4f45b7f 1427 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1428 goto csum_error;
1da177e4 1429
ea1627c2 1430 th = (const struct tcphdr *)skb->data;
e802af9c 1431 hdr = ipv6_hdr(skb);
1da177e4 1432
4bdc3d66 1433lookup:
a583636a 1434 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
4297a0ef 1435 th->source, th->dest, inet6_iif(skb), sdif,
3b24d854 1436 &refcounted);
1da177e4
LT
1437 if (!sk)
1438 goto no_tcp_socket;
1439
1440process:
1441 if (sk->sk_state == TCP_TIME_WAIT)
1442 goto do_time_wait;
1443
079096f1
ED
1444 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1445 struct request_sock *req = inet_reqsk(sk);
7716682c 1446 struct sock *nsk;
079096f1
ED
1447
1448 sk = req->rsk_listener;
1449 tcp_v6_fill_cb(skb, hdr, th);
1450 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1451 sk_drops_add(sk, skb);
079096f1
ED
1452 reqsk_put(req);
1453 goto discard_it;
1454 }
7716682c 1455 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1456 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1457 goto lookup;
1458 }
7716682c 1459 sock_hold(sk);
3b24d854 1460 refcounted = true;
d624d276
ED
1461 if (tcp_filter(sk, skb))
1462 goto discard_and_relse;
7716682c 1463 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1464 if (!nsk) {
1465 reqsk_put(req);
7716682c 1466 goto discard_and_relse;
079096f1
ED
1467 }
1468 if (nsk == sk) {
079096f1
ED
1469 reqsk_put(req);
1470 tcp_v6_restore_cb(skb);
1471 } else if (tcp_child_process(sk, nsk, skb)) {
1472 tcp_v6_send_reset(nsk, skb);
7716682c 1473 goto discard_and_relse;
079096f1 1474 } else {
7716682c 1475 sock_put(sk);
079096f1
ED
1476 return 0;
1477 }
1478 }
e802af9c 1479 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1480 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1481 goto discard_and_relse;
1482 }
1483
1da177e4
LT
1484 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1485 goto discard_and_relse;
1486
2dc49d16
ND
1487 tcp_v6_fill_cb(skb, hdr, th);
1488
9ea88a15
DP
1489 if (tcp_v6_inbound_md5_hash(sk, skb))
1490 goto discard_and_relse;
9ea88a15 1491
ac6e7800 1492 if (tcp_filter(sk, skb))
1da177e4 1493 goto discard_and_relse;
ac6e7800
ED
1494 th = (const struct tcphdr *)skb->data;
1495 hdr = ipv6_hdr(skb);
1da177e4
LT
1496
1497 skb->dev = NULL;
1498
e994b2f0
ED
1499 if (sk->sk_state == TCP_LISTEN) {
1500 ret = tcp_v6_do_rcv(sk, skb);
1501 goto put_and_return;
1502 }
1503
1504 sk_incoming_cpu_update(sk);
1505
293b9c42 1506 bh_lock_sock_nested(sk);
a44d6eac 1507 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1508 ret = 0;
1509 if (!sock_owned_by_user(sk)) {
e7942d06 1510 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1511 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1512 goto discard_and_relse;
1513 }
1da177e4
LT
1514 bh_unlock_sock(sk);
1515
e994b2f0 1516put_and_return:
3b24d854
ED
1517 if (refcounted)
1518 sock_put(sk);
1da177e4
LT
1519 return ret ? -1 : 0;
1520
1521no_tcp_socket:
1522 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1523 goto discard_it;
1524
2dc49d16
ND
1525 tcp_v6_fill_cb(skb, hdr, th);
1526
12e25e10 1527 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1528csum_error:
90bbcc60 1529 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1530bad_packet:
90bbcc60 1531 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1532 } else {
cfb6eeb4 1533 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1534 }
1535
1536discard_it:
1da177e4
LT
1537 kfree_skb(skb);
1538 return 0;
1539
1540discard_and_relse:
532182cd 1541 sk_drops_add(sk, skb);
3b24d854
ED
1542 if (refcounted)
1543 sock_put(sk);
1da177e4
LT
1544 goto discard_it;
1545
1546do_time_wait:
1547 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1548 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1549 goto discard_it;
1550 }
1551
2dc49d16
ND
1552 tcp_v6_fill_cb(skb, hdr, th);
1553
6a5dc9e5
ED
1554 if (tcp_checksum_complete(skb)) {
1555 inet_twsk_put(inet_twsk(sk));
1556 goto csum_error;
1da177e4
LT
1557 }
1558
9469c7b4 1559 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1560 case TCP_TW_SYN:
1561 {
1562 struct sock *sk2;
1563
c346dca1 1564 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1565 skb, __tcp_hdrlen(th),
5ba24953 1566 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1567 &ipv6_hdr(skb)->daddr,
4297a0ef
DA
1568 ntohs(th->dest), tcp_v6_iif(skb),
1569 sdif);
53b24b8f 1570 if (sk2) {
295ff7ed 1571 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1572 inet_twsk_deschedule_put(tw);
1da177e4 1573 sk = sk2;
4ad19de8 1574 tcp_v6_restore_cb(skb);
3b24d854 1575 refcounted = false;
1da177e4
LT
1576 goto process;
1577 }
1578 /* Fall through to ACK */
1579 }
1580 case TCP_TW_ACK:
1581 tcp_v6_timewait_ack(sk, skb);
1582 break;
1583 case TCP_TW_RST:
4ad19de8 1584 tcp_v6_restore_cb(skb);
271c3b9b
FW
1585 tcp_v6_send_reset(sk, skb);
1586 inet_twsk_deschedule_put(inet_twsk(sk));
1587 goto discard_it;
4aa956d8
WY
1588 case TCP_TW_SUCCESS:
1589 ;
1da177e4
LT
1590 }
1591 goto discard_it;
1592}
1593
c7109986
ED
1594static void tcp_v6_early_demux(struct sk_buff *skb)
1595{
1596 const struct ipv6hdr *hdr;
1597 const struct tcphdr *th;
1598 struct sock *sk;
1599
1600 if (skb->pkt_type != PACKET_HOST)
1601 return;
1602
1603 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1604 return;
1605
1606 hdr = ipv6_hdr(skb);
1607 th = tcp_hdr(skb);
1608
1609 if (th->doff < sizeof(struct tcphdr) / 4)
1610 return;
1611
870c3151 1612 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1613 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1614 &hdr->saddr, th->source,
1615 &hdr->daddr, ntohs(th->dest),
4297a0ef 1616 inet6_iif(skb), inet6_sdif(skb));
c7109986
ED
1617 if (sk) {
1618 skb->sk = sk;
1619 skb->destructor = sock_edemux;
f7e4eb03 1620 if (sk_fullsock(sk)) {
d0c294c5 1621 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1622
c7109986 1623 if (dst)
5d299f3d 1624 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1625 if (dst &&
f3f12135 1626 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1627 skb_dst_set_noref(skb, dst);
1628 }
1629 }
1630}
1631
ccb7c410
DM
1632static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1633 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1634 .twsk_unique = tcp_twsk_unique,
4aa956d8 1635 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1636};
1637
3b401a81 1638static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1639 .queue_xmit = inet6_csk_xmit,
1640 .send_check = tcp_v6_send_check,
1641 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1642 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1643 .conn_request = tcp_v6_conn_request,
1644 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1645 .net_header_len = sizeof(struct ipv6hdr),
67469601 1646 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1647 .setsockopt = ipv6_setsockopt,
1648 .getsockopt = ipv6_getsockopt,
1649 .addr2sockaddr = inet6_csk_addr2sockaddr,
1650 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1651#ifdef CONFIG_COMPAT
543d9cfe
ACM
1652 .compat_setsockopt = compat_ipv6_setsockopt,
1653 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1654#endif
4fab9071 1655 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1656};
1657
cfb6eeb4 1658#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1659static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1660 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1661 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1662 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1663};
a928630a 1664#endif
cfb6eeb4 1665
1da177e4
LT
1666/*
1667 * TCP over IPv4 via INET6 API
1668 */
3b401a81 1669static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1670 .queue_xmit = ip_queue_xmit,
1671 .send_check = tcp_v4_send_check,
1672 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1673 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1674 .conn_request = tcp_v6_conn_request,
1675 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1676 .net_header_len = sizeof(struct iphdr),
1677 .setsockopt = ipv6_setsockopt,
1678 .getsockopt = ipv6_getsockopt,
1679 .addr2sockaddr = inet6_csk_addr2sockaddr,
1680 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1681#ifdef CONFIG_COMPAT
543d9cfe
ACM
1682 .compat_setsockopt = compat_ipv6_setsockopt,
1683 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1684#endif
4fab9071 1685 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1686};
1687
cfb6eeb4 1688#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1689static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1690 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1691 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1692 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1693};
a928630a 1694#endif
cfb6eeb4 1695
1da177e4
LT
1696/* NOTE: A lot of things set to zero explicitly by call to
1697 * sk_alloc() so need not be done here.
1698 */
1699static int tcp_v6_init_sock(struct sock *sk)
1700{
6687e988 1701 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1702
900f65d3 1703 tcp_init_sock(sk);
1da177e4 1704
8292a17a 1705 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1706
cfb6eeb4 1707#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1708 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1709#endif
1710
1da177e4
LT
1711 return 0;
1712}
1713
7d06b2e0 1714static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1715{
1da177e4 1716 tcp_v4_destroy_sock(sk);
7d06b2e0 1717 inet6_destroy_sock(sk);
1da177e4
LT
1718}
1719
952a10be 1720#ifdef CONFIG_PROC_FS
1da177e4 1721/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1722static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1723 const struct request_sock *req, int i)
1da177e4 1724{
fa76ce73 1725 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1726 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1727 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1728
1729 if (ttd < 0)
1730 ttd = 0;
1731
1da177e4
LT
1732 seq_printf(seq,
1733 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1734 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1735 i,
1736 src->s6_addr32[0], src->s6_addr32[1],
1737 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1738 inet_rsk(req)->ir_num,
1da177e4
LT
1739 dest->s6_addr32[0], dest->s6_addr32[1],
1740 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1741 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1742 TCP_SYN_RECV,
4c99aa40 1743 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1744 1, /* timers active (only the expire timer) */
1745 jiffies_to_clock_t(ttd),
e6c022a4 1746 req->num_timeout,
aa3a0c8c
ED
1747 from_kuid_munged(seq_user_ns(seq),
1748 sock_i_uid(req->rsk_listener)),
1ab1457c 1749 0, /* non standard timer */
1da177e4
LT
1750 0, /* open_requests have no inode */
1751 0, req);
1752}
1753
1754static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1755{
b71d1d42 1756 const struct in6_addr *dest, *src;
1da177e4
LT
1757 __u16 destp, srcp;
1758 int timer_active;
1759 unsigned long timer_expires;
cf533ea5
ED
1760 const struct inet_sock *inet = inet_sk(sp);
1761 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1762 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1763 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1764 int rx_queue;
1765 int state;
1da177e4 1766
efe4208f
ED
1767 dest = &sp->sk_v6_daddr;
1768 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1769 destp = ntohs(inet->inet_dport);
1770 srcp = ntohs(inet->inet_sport);
463c84b9 1771
ce3cf4ec 1772 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1773 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1774 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1775 timer_active = 1;
463c84b9
ACM
1776 timer_expires = icsk->icsk_timeout;
1777 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1778 timer_active = 4;
463c84b9 1779 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1780 } else if (timer_pending(&sp->sk_timer)) {
1781 timer_active = 2;
1782 timer_expires = sp->sk_timer.expires;
1783 } else {
1784 timer_active = 0;
1785 timer_expires = jiffies;
1786 }
1787
00fd38d9
ED
1788 state = sk_state_load(sp);
1789 if (state == TCP_LISTEN)
1790 rx_queue = sp->sk_ack_backlog;
1791 else
1792 /* Because we don't lock the socket,
1793 * we might find a transient negative value.
1794 */
1795 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1796
1da177e4
LT
1797 seq_printf(seq,
1798 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1799 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1800 i,
1801 src->s6_addr32[0], src->s6_addr32[1],
1802 src->s6_addr32[2], src->s6_addr32[3], srcp,
1803 dest->s6_addr32[0], dest->s6_addr32[1],
1804 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1805 state,
1806 tp->write_seq - tp->snd_una,
1807 rx_queue,
1da177e4 1808 timer_active,
a399a805 1809 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1810 icsk->icsk_retransmits,
a7cb5a49 1811 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1812 icsk->icsk_probes_out,
1da177e4 1813 sock_i_ino(sp),
41c6d650 1814 refcount_read(&sp->sk_refcnt), sp,
7be87351
SH
1815 jiffies_to_clock_t(icsk->icsk_rto),
1816 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1817 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1818 tp->snd_cwnd,
00fd38d9 1819 state == TCP_LISTEN ?
0536fcc0 1820 fastopenq->max_qlen :
0a672f74 1821 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1822 );
1823}
1824
1ab1457c 1825static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1826 struct inet_timewait_sock *tw, int i)
1da177e4 1827{
789f558c 1828 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1829 const struct in6_addr *dest, *src;
1da177e4 1830 __u16 destp, srcp;
1da177e4 1831
efe4208f
ED
1832 dest = &tw->tw_v6_daddr;
1833 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1834 destp = ntohs(tw->tw_dport);
1835 srcp = ntohs(tw->tw_sport);
1836
1837 seq_printf(seq,
1838 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1839 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1840 i,
1841 src->s6_addr32[0], src->s6_addr32[1],
1842 src->s6_addr32[2], src->s6_addr32[3], srcp,
1843 dest->s6_addr32[0], dest->s6_addr32[1],
1844 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1845 tw->tw_substate, 0, 0,
a399a805 1846 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
41c6d650 1847 refcount_read(&tw->tw_refcnt), tw);
1da177e4
LT
1848}
1849
1da177e4
LT
1850static int tcp6_seq_show(struct seq_file *seq, void *v)
1851{
1852 struct tcp_iter_state *st;
05dbc7b5 1853 struct sock *sk = v;
1da177e4
LT
1854
1855 if (v == SEQ_START_TOKEN) {
1856 seq_puts(seq,
1857 " sl "
1858 "local_address "
1859 "remote_address "
1860 "st tx_queue rx_queue tr tm->when retrnsmt"
1861 " uid timeout inode\n");
1862 goto out;
1863 }
1864 st = seq->private;
1865
079096f1
ED
1866 if (sk->sk_state == TCP_TIME_WAIT)
1867 get_timewait6_sock(seq, v, st->num);
1868 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1869 get_openreq6(seq, v, st->num);
079096f1
ED
1870 else
1871 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1872out:
1873 return 0;
1874}
1875
73cb88ec
AV
1876static const struct file_operations tcp6_afinfo_seq_fops = {
1877 .owner = THIS_MODULE,
1878 .open = tcp_seq_open,
1879 .read = seq_read,
1880 .llseek = seq_lseek,
1881 .release = seq_release_net
1882};
1883
1da177e4 1884static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1885 .name = "tcp6",
1886 .family = AF_INET6,
73cb88ec 1887 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1888 .seq_ops = {
1889 .show = tcp6_seq_show,
1890 },
1da177e4
LT
1891};
1892
2c8c1e72 1893int __net_init tcp6_proc_init(struct net *net)
1da177e4 1894{
6f8b13bc 1895 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1896}
1897
6f8b13bc 1898void tcp6_proc_exit(struct net *net)
1da177e4 1899{
6f8b13bc 1900 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1901}
1902#endif
1903
1904struct proto tcpv6_prot = {
1905 .name = "TCPv6",
1906 .owner = THIS_MODULE,
1907 .close = tcp_close,
1908 .connect = tcp_v6_connect,
1909 .disconnect = tcp_disconnect,
463c84b9 1910 .accept = inet_csk_accept,
1da177e4
LT
1911 .ioctl = tcp_ioctl,
1912 .init = tcp_v6_init_sock,
1913 .destroy = tcp_v6_destroy_sock,
1914 .shutdown = tcp_shutdown,
1915 .setsockopt = tcp_setsockopt,
1916 .getsockopt = tcp_getsockopt,
4b9d07a4 1917 .keepalive = tcp_set_keepalive,
1da177e4 1918 .recvmsg = tcp_recvmsg,
7ba42910
CG
1919 .sendmsg = tcp_sendmsg,
1920 .sendpage = tcp_sendpage,
1da177e4 1921 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1922 .release_cb = tcp_release_cb,
496611d7 1923 .hash = inet6_hash,
ab1e0a13
ACM
1924 .unhash = inet_unhash,
1925 .get_port = inet_csk_get_port,
1da177e4 1926 .enter_memory_pressure = tcp_enter_memory_pressure,
06044751 1927 .leave_memory_pressure = tcp_leave_memory_pressure,
c9bee3b7 1928 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1929 .sockets_allocated = &tcp_sockets_allocated,
1930 .memory_allocated = &tcp_memory_allocated,
1931 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1932 .orphan_count = &tcp_orphan_count,
a4fe34bf 1933 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1934 .sysctl_wmem = sysctl_tcp_wmem,
1935 .sysctl_rmem = sysctl_tcp_rmem,
1936 .max_header = MAX_TCP_HEADER,
1937 .obj_size = sizeof(struct tcp6_sock),
5f0d5a3a 1938 .slab_flags = SLAB_TYPESAFE_BY_RCU,
6d6ee43e 1939 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1940 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1941 .h.hashinfo = &tcp_hashinfo,
7ba42910 1942 .no_autobind = true,
543d9cfe
ACM
1943#ifdef CONFIG_COMPAT
1944 .compat_setsockopt = compat_tcp_setsockopt,
1945 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1946#endif
c1e64e29 1947 .diag_destroy = tcp_abort,
1da177e4
LT
1948};
1949
39294c3d 1950static struct inet6_protocol tcpv6_protocol = {
c7109986 1951 .early_demux = tcp_v6_early_demux,
dddb64bc 1952 .early_demux_handler = tcp_v6_early_demux,
1da177e4
LT
1953 .handler = tcp_v6_rcv,
1954 .err_handler = tcp_v6_err,
1955 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1956};
1957
1da177e4
LT
1958static struct inet_protosw tcpv6_protosw = {
1959 .type = SOCK_STREAM,
1960 .protocol = IPPROTO_TCP,
1961 .prot = &tcpv6_prot,
1962 .ops = &inet6_stream_ops,
d83d8461
ACM
1963 .flags = INET_PROTOSW_PERMANENT |
1964 INET_PROTOSW_ICSK,
1da177e4
LT
1965};
1966
2c8c1e72 1967static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1968{
5677242f
DL
1969 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1970 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1971}
1972
2c8c1e72 1973static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1974{
5677242f 1975 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1976}
1977
2c8c1e72 1978static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 1979{
1946e672 1980 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
1981}
1982
1983static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1984 .init = tcpv6_net_init,
1985 .exit = tcpv6_net_exit,
1986 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1987};
1988
7f4e4868 1989int __init tcpv6_init(void)
1da177e4 1990{
7f4e4868
DL
1991 int ret;
1992
3336288a
VY
1993 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1994 if (ret)
c6b641a4 1995 goto out;
3336288a 1996
1da177e4 1997 /* register inet6 protocol */
7f4e4868
DL
1998 ret = inet6_register_protosw(&tcpv6_protosw);
1999 if (ret)
2000 goto out_tcpv6_protocol;
2001
93ec926b 2002 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2003 if (ret)
2004 goto out_tcpv6_protosw;
2005out:
2006 return ret;
ae0f7d5f 2007
7f4e4868
DL
2008out_tcpv6_protosw:
2009 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
2010out_tcpv6_protocol:
2011 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
2012 goto out;
2013}
2014
09f7709f 2015void tcpv6_exit(void)
7f4e4868 2016{
93ec926b 2017 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2018 inet6_unregister_protosw(&tcpv6_protosw);
2019 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2020}