]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv6/tcp_ipv6.c
KVM: SVM: Move spec control call after restore of GS
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
076bb0c8 64#include <net/busy_poll.h>
1da177e4 65
1da177e4
LT
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cf80e0e4 69#include <crypto/hash.h>
cfb6eeb4
YH
70#include <linux/scatterlist.h>
71
a00e7444
ED
72static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
3b401a81
SH
78static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
81static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 83#else
51723935 84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 85 const struct in6_addr *addr)
9501f972
YH
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
fae6ef87
NC
91static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
fae6ef87 94
5037e9ef 95 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
ca777eff
ED
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
b197df4f 100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 101 }
fae6ef87
NC
102}
103
84b114b9 104static u32 tcp_v6_init_seq(const struct sk_buff *skb)
1da177e4 105{
84b114b9
ED
106 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110}
111
5d2ed052 112static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
84b114b9 113{
5d2ed052 114 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
84b114b9 115 ipv6_hdr(skb)->saddr.s6_addr32);
1da177e4
LT
116}
117
1ab1457c 118static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
119 int addr_len)
120{
121 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 122 struct inet_sock *inet = inet_sk(sk);
d83d8461 123 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
124 struct ipv6_pinfo *np = inet6_sk(sk);
125 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 126 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 127 struct ipv6_txoptions *opt;
4c9483b2 128 struct flowi6 fl6;
1da177e4
LT
129 struct dst_entry *dst;
130 int addr_type;
131 int err;
1946e672 132 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 133
1ab1457c 134 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
135 return -EINVAL;
136
1ab1457c 137 if (usin->sin6_family != AF_INET6)
a02cec21 138 return -EAFNOSUPPORT;
1da177e4 139
4c9483b2 140 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
141
142 if (np->sndflow) {
4c9483b2
DM
143 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl6.flowlabel);
145 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 146 struct ip6_flowlabel *flowlabel;
4c9483b2 147 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
63159f29 148 if (!flowlabel)
1da177e4 149 return -EINVAL;
1da177e4
LT
150 fl6_sock_release(flowlabel);
151 }
152 }
153
154 /*
1ab1457c
YH
155 * connect() to INADDR_ANY means loopback (BSD'ism).
156 */
157
052d2369
JL
158 if (ipv6_addr_any(&usin->sin6_addr)) {
159 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
161 &usin->sin6_addr);
162 else
163 usin->sin6_addr = in6addr_loopback;
164 }
1da177e4
LT
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
4c99aa40 168 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 190 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
efe4208f 196 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 197 np->flow_label = fl6.flowlabel;
1da177e4
LT
198
199 /*
200 * TCP over IPv4
201 */
202
052d2369 203 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
d83d8461 216 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
218#ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220#endif
1da177e4
LT
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
d83d8461
ACM
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
228#ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230#endif
1da177e4 231 goto failure;
1da177e4 232 }
d1e559d0 233 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
234
235 return err;
236 }
237
efe4208f
ED
238 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
239 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 240
4c9483b2 241 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 242 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 243 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
244 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
246 fl6.fl6_dport = usin->sin6_port;
247 fl6.fl6_sport = inet->inet_sport;
e2d118a1 248 fl6.flowi6_uid = sk->sk_uid;
1da177e4 249
1e1d04e6 250 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 251 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 252
4c9483b2 253 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 254
0e0d44ab 255 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
256 if (IS_ERR(dst)) {
257 err = PTR_ERR(dst);
1da177e4 258 goto failure;
14e50e57 259 }
1da177e4 260
63159f29 261 if (!saddr) {
4c9483b2 262 saddr = &fl6.saddr;
efe4208f 263 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
264 }
265
266 /* set the source address */
4e3fd7a0 267 np->saddr = *saddr;
c720c7e8 268 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 269
f83ef8c0 270 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 271 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 272
d83d8461 273 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
274 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 opt->opt_nflen;
1da177e4
LT
277
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
279
c720c7e8 280 inet->inet_dport = usin->sin6_port;
1da177e4
LT
281
282 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 283 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
284 if (err)
285 goto late_failure;
286
877d1f62 287 sk_set_txhash(sk);
9e7ceb06 288
00355fa5 289 if (likely(!tp->repair)) {
00355fa5 290 if (!tp->write_seq)
84b114b9
ED
291 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
292 sk->sk_v6_daddr.s6_addr32,
293 inet->inet_sport,
294 inet->inet_dport);
5d2ed052
ED
295 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
296 np->saddr.s6_addr32,
84b114b9 297 sk->sk_v6_daddr.s6_addr32);
00355fa5 298 }
1da177e4 299
19f6d3f3
WW
300 if (tcp_fastopen_defer_connect(sk, &err))
301 return err;
302 if (err)
303 goto late_failure;
304
1da177e4
LT
305 err = tcp_connect(sk);
306 if (err)
307 goto late_failure;
308
309 return 0;
310
311late_failure:
312 tcp_set_state(sk, TCP_CLOSE);
1da177e4 313failure:
c720c7e8 314 inet->inet_dport = 0;
1da177e4
LT
315 sk->sk_route_caps = 0;
316 return err;
317}
318
563d34d0
ED
319static void tcp_v6_mtu_reduced(struct sock *sk)
320{
321 struct dst_entry *dst;
322
323 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
324 return;
325
326 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
327 if (!dst)
328 return;
329
330 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
331 tcp_sync_mss(sk, dst_mtu(dst));
332 tcp_simple_retransmit(sk);
333 }
334}
335
1da177e4 336static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 337 u8 type, u8 code, int offset, __be32 info)
1da177e4 338{
4c99aa40 339 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 340 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
341 struct net *net = dev_net(skb->dev);
342 struct request_sock *fastopen;
1da177e4 343 struct ipv6_pinfo *np;
1ab1457c 344 struct tcp_sock *tp;
0a672f74 345 __u32 seq, snd_una;
2215089b 346 struct sock *sk;
9cf74903 347 bool fatal;
2215089b 348 int err;
1da177e4 349
2215089b
ED
350 sk = __inet6_lookup_established(net, &tcp_hashinfo,
351 &hdr->daddr, th->dest,
352 &hdr->saddr, ntohs(th->source),
353 skb->dev->ifindex);
1da177e4 354
2215089b 355 if (!sk) {
a16292a0
ED
356 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
357 ICMP6_MIB_INERRORS);
1da177e4
LT
358 return;
359 }
360
361 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 362 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
363 return;
364 }
2215089b 365 seq = ntohl(th->seq);
9cf74903 366 fatal = icmpv6_err_convert(type, code, &err);
2215089b 367 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903 368 return tcp_req_err(sk, seq, fatal);
1da177e4
LT
369
370 bh_lock_sock(sk);
563d34d0 371 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 372 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
373
374 if (sk->sk_state == TCP_CLOSE)
375 goto out;
376
e802af9c 377 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 378 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
379 goto out;
380 }
381
1da177e4 382 tp = tcp_sk(sk);
0a672f74
YC
383 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
384 fastopen = tp->fastopen_rsk;
385 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 386 if (sk->sk_state != TCP_LISTEN &&
0a672f74 387 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 388 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
389 goto out;
390 }
391
392 np = inet6_sk(sk);
393
ec18d9a2 394 if (type == NDISC_REDIRECT) {
45caeaa5
JM
395 if (!sock_owned_by_user(sk)) {
396 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
ec18d9a2 397
45caeaa5
JM
398 if (dst)
399 dst->ops->redirect(dst, sk, skb);
400 }
50a75a89 401 goto out;
ec18d9a2
DM
402 }
403
1da177e4 404 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
408 */
409 if (sk->sk_state == TCP_LISTEN)
410 goto out;
411
93b36cf3
HFS
412 if (!ip6_sk_accept_pmtu(sk))
413 goto out;
414
563d34d0
ED
415 tp->mtu_info = ntohl(info);
416 if (!sock_owned_by_user(sk))
417 tcp_v6_mtu_reduced(sk);
d013ef2a 418 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 419 &sk->sk_tsq_flags))
d013ef2a 420 sock_hold(sk);
1da177e4
LT
421 goto out;
422 }
423
1da177e4 424
60236fdd 425 /* Might be for an request_sock */
1da177e4 426 switch (sk->sk_state) {
1da177e4 427 case TCP_SYN_SENT:
0a672f74
YC
428 case TCP_SYN_RECV:
429 /* Only in fast or simultaneous open. If a fast open socket is
430 * is already accepted it is treated as a connected one below.
431 */
63159f29 432 if (fastopen && !fastopen->sk)
0a672f74
YC
433 break;
434
1da177e4 435 if (!sock_owned_by_user(sk)) {
1da177e4
LT
436 sk->sk_err = err;
437 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
438
439 tcp_done(sk);
440 } else
441 sk->sk_err_soft = err;
442 goto out;
443 }
444
445 if (!sock_owned_by_user(sk) && np->recverr) {
446 sk->sk_err = err;
447 sk->sk_error_report(sk);
448 } else
449 sk->sk_err_soft = err;
450
451out:
452 bh_unlock_sock(sk);
453 sock_put(sk);
454}
455
456
0f935dbe 457static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 458 struct flowi *fl,
3840a06e 459 struct request_sock *req,
ca6fb065 460 struct tcp_fastopen_cookie *foc,
b3d05147 461 enum tcp_synack_type synack_type)
1da177e4 462{
634fb979 463 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 464 struct ipv6_pinfo *np = inet6_sk(sk);
56ac42bc 465 struct ipv6_txoptions *opt;
d6274bd8 466 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 467 struct sk_buff *skb;
9494218f 468 int err = -ENOMEM;
1da177e4 469
9f10d3f6 470 /* First, grab a route. */
f76b33c3
ED
471 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
472 IPPROTO_TCP)) == NULL)
fd80eb94 473 goto done;
9494218f 474
b3d05147 475 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 476
1da177e4 477 if (skb) {
634fb979
ED
478 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
479 &ireq->ir_v6_rmt_addr);
1da177e4 480
634fb979 481 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 482 if (np->repflow && ireq->pktopts)
df3687ff
FF
483 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
484
3e4006f0 485 rcu_read_lock();
56ac42bc
HD
486 opt = ireq->ipv6_opt;
487 if (!opt)
488 opt = rcu_dereference(np->opt);
92e55f41 489 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
3e4006f0 490 rcu_read_unlock();
b9df3cb8 491 err = net_xmit_eval(err);
1da177e4
LT
492 }
493
494done:
1da177e4
LT
495 return err;
496}
497
72659ecc 498
60236fdd 499static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 500{
56ac42bc 501 kfree(inet_rsk(req)->ipv6_opt);
634fb979 502 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
503}
504
cfb6eeb4 505#ifdef CONFIG_TCP_MD5SIG
b83e3deb 506static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 507 const struct in6_addr *addr)
cfb6eeb4 508{
a915da9b 509 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
510}
511
b83e3deb 512static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 513 const struct sock *addr_sk)
cfb6eeb4 514{
efe4208f 515 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
516}
517
8917a777
ID
518static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
519 char __user *optval, int optlen)
cfb6eeb4
YH
520{
521 struct tcp_md5sig cmd;
522 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
8917a777 523 u8 prefixlen;
cfb6eeb4
YH
524
525 if (optlen < sizeof(cmd))
526 return -EINVAL;
527
528 if (copy_from_user(&cmd, optval, sizeof(cmd)))
529 return -EFAULT;
530
531 if (sin6->sin6_family != AF_INET6)
532 return -EINVAL;
533
8917a777
ID
534 if (optname == TCP_MD5SIG_EXT &&
535 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
536 prefixlen = cmd.tcpm_prefixlen;
537 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
538 prefixlen > 32))
539 return -EINVAL;
540 } else {
541 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
542 }
543
cfb6eeb4 544 if (!cmd.tcpm_keylen) {
e773e4fa 545 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b 546 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 547 AF_INET, prefixlen);
a915da9b 548 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777 549 AF_INET6, prefixlen);
cfb6eeb4
YH
550 }
551
552 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
553 return -EINVAL;
554
a915da9b
ED
555 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
556 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 557 AF_INET, prefixlen, cmd.tcpm_key,
6797318e 558 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 559
a915da9b 560 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777
ID
561 AF_INET6, prefixlen, cmd.tcpm_key,
562 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
563}
564
19689e38
ED
565static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
566 const struct in6_addr *daddr,
567 const struct in6_addr *saddr,
568 const struct tcphdr *th, int nbytes)
cfb6eeb4 569{
cfb6eeb4 570 struct tcp6_pseudohdr *bp;
49a72dfb 571 struct scatterlist sg;
19689e38 572 struct tcphdr *_th;
8d26d76d 573
19689e38 574 bp = hp->scratch;
cfb6eeb4 575 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
576 bp->saddr = *saddr;
577 bp->daddr = *daddr;
49a72dfb 578 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 579 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 580
19689e38
ED
581 _th = (struct tcphdr *)(bp + 1);
582 memcpy(_th, th, sizeof(*th));
583 _th->check = 0;
584
585 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
586 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
587 sizeof(*bp) + sizeof(*th));
cf80e0e4 588 return crypto_ahash_update(hp->md5_req);
49a72dfb 589}
c7da57a1 590
19689e38 591static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 592 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 593 const struct tcphdr *th)
49a72dfb
AL
594{
595 struct tcp_md5sig_pool *hp;
cf80e0e4 596 struct ahash_request *req;
49a72dfb
AL
597
598 hp = tcp_get_md5sig_pool();
599 if (!hp)
600 goto clear_hash_noput;
cf80e0e4 601 req = hp->md5_req;
49a72dfb 602
cf80e0e4 603 if (crypto_ahash_init(req))
49a72dfb 604 goto clear_hash;
19689e38 605 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
606 goto clear_hash;
607 if (tcp_md5_hash_key(hp, key))
608 goto clear_hash;
cf80e0e4
HX
609 ahash_request_set_crypt(req, NULL, md5_hash, 0);
610 if (crypto_ahash_final(req))
cfb6eeb4 611 goto clear_hash;
cfb6eeb4 612
cfb6eeb4 613 tcp_put_md5sig_pool();
cfb6eeb4 614 return 0;
49a72dfb 615
cfb6eeb4
YH
616clear_hash:
617 tcp_put_md5sig_pool();
618clear_hash_noput:
619 memset(md5_hash, 0, 16);
49a72dfb 620 return 1;
cfb6eeb4
YH
621}
622
39f8e58e
ED
623static int tcp_v6_md5_hash_skb(char *md5_hash,
624 const struct tcp_md5sig_key *key,
318cf7aa 625 const struct sock *sk,
318cf7aa 626 const struct sk_buff *skb)
cfb6eeb4 627{
b71d1d42 628 const struct in6_addr *saddr, *daddr;
49a72dfb 629 struct tcp_md5sig_pool *hp;
cf80e0e4 630 struct ahash_request *req;
318cf7aa 631 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 632
39f8e58e
ED
633 if (sk) { /* valid for establish/request sockets */
634 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 635 daddr = &sk->sk_v6_daddr;
49a72dfb 636 } else {
b71d1d42 637 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
638 saddr = &ip6h->saddr;
639 daddr = &ip6h->daddr;
cfb6eeb4 640 }
49a72dfb
AL
641
642 hp = tcp_get_md5sig_pool();
643 if (!hp)
644 goto clear_hash_noput;
cf80e0e4 645 req = hp->md5_req;
49a72dfb 646
cf80e0e4 647 if (crypto_ahash_init(req))
49a72dfb
AL
648 goto clear_hash;
649
19689e38 650 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
651 goto clear_hash;
652 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
653 goto clear_hash;
654 if (tcp_md5_hash_key(hp, key))
655 goto clear_hash;
cf80e0e4
HX
656 ahash_request_set_crypt(req, NULL, md5_hash, 0);
657 if (crypto_ahash_final(req))
49a72dfb
AL
658 goto clear_hash;
659
660 tcp_put_md5sig_pool();
661 return 0;
662
663clear_hash:
664 tcp_put_md5sig_pool();
665clear_hash_noput:
666 memset(md5_hash, 0, 16);
667 return 1;
cfb6eeb4
YH
668}
669
ba8e275a
ED
670#endif
671
672static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
673 const struct sk_buff *skb)
cfb6eeb4 674{
ba8e275a 675#ifdef CONFIG_TCP_MD5SIG
cf533ea5 676 const __u8 *hash_location = NULL;
cfb6eeb4 677 struct tcp_md5sig_key *hash_expected;
b71d1d42 678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 679 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 680 int genhash;
cfb6eeb4
YH
681 u8 newhash[16];
682
683 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 684 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 685
785957d3
DM
686 /* We've parsed the options - do we have a hash? */
687 if (!hash_expected && !hash_location)
ff74e23f 688 return false;
785957d3
DM
689
690 if (hash_expected && !hash_location) {
c10d9310 691 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 692 return true;
cfb6eeb4
YH
693 }
694
785957d3 695 if (!hash_expected && hash_location) {
c10d9310 696 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 697 return true;
cfb6eeb4
YH
698 }
699
700 /* check the signature */
49a72dfb
AL
701 genhash = tcp_v6_md5_hash_skb(newhash,
702 hash_expected,
39f8e58e 703 NULL, skb);
49a72dfb 704
cfb6eeb4 705 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 706 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
707 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
708 genhash ? "failed" : "mismatch",
709 &ip6h->saddr, ntohs(th->source),
710 &ip6h->daddr, ntohs(th->dest));
ff74e23f 711 return true;
cfb6eeb4 712 }
ba8e275a 713#endif
ff74e23f 714 return false;
cfb6eeb4 715}
cfb6eeb4 716
b40cf18e
ED
717static void tcp_v6_init_req(struct request_sock *req,
718 const struct sock *sk_listener,
16bea70a
OP
719 struct sk_buff *skb)
720{
721 struct inet_request_sock *ireq = inet_rsk(req);
b40cf18e 722 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
16bea70a
OP
723
724 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
725 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
726
16bea70a 727 /* So that link locals have meaning */
b40cf18e 728 if (!sk_listener->sk_bound_dev_if &&
16bea70a 729 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 730 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 731
04317daf 732 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 733 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 734 np->rxopt.bits.rxinfo ||
16bea70a
OP
735 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
736 np->rxopt.bits.rxohlim || np->repflow)) {
63354797 737 refcount_inc(&skb->users);
16bea70a
OP
738 ireq->pktopts = skb;
739 }
740}
741
f964629e
ED
742static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
743 struct flowi *fl,
4396e461 744 const struct request_sock *req)
d94e0417 745{
f76b33c3 746 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
747}
748
c6aefafb 749struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 750 .family = AF_INET6,
2e6599cb 751 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 752 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
753 .send_ack = tcp_v6_reqsk_send_ack,
754 .destructor = tcp_v6_reqsk_destructor,
72659ecc 755 .send_reset = tcp_v6_send_reset,
4aa956d8 756 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
757};
758
b2e4b3de 759static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
760 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
761 sizeof(struct ipv6hdr),
16bea70a 762#ifdef CONFIG_TCP_MD5SIG
fd3a154a 763 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 764 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 765#endif
16bea70a 766 .init_req = tcp_v6_init_req,
fb7b37a7
OP
767#ifdef CONFIG_SYN_COOKIES
768 .cookie_init_seq = cookie_v6_init_sequence,
769#endif
d94e0417 770 .route_req = tcp_v6_route_req,
84b114b9
ED
771 .init_seq = tcp_v6_init_seq,
772 .init_ts_off = tcp_v6_init_ts_off,
d6274bd8 773 .send_synack = tcp_v6_send_synack,
16bea70a 774};
cfb6eeb4 775
a00e7444 776static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
777 u32 ack, u32 win, u32 tsval, u32 tsecr,
778 int oif, struct tcp_md5sig_key *key, int rst,
5119bd16 779 u8 tclass, __be32 label)
1da177e4 780{
cf533ea5
ED
781 const struct tcphdr *th = tcp_hdr(skb);
782 struct tcphdr *t1;
1da177e4 783 struct sk_buff *buff;
4c9483b2 784 struct flowi6 fl6;
0f85feae 785 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 786 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 787 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 788 struct dst_entry *dst;
81ada62d 789 __be32 *topt;
1da177e4 790
ee684b6f 791 if (tsecr)
626e264d 792 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 793#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
794 if (key)
795 tot_len += TCPOLEN_MD5SIG_ALIGNED;
796#endif
797
cfb6eeb4 798 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 799 GFP_ATOMIC);
63159f29 800 if (!buff)
1ab1457c 801 return;
1da177e4 802
cfb6eeb4 803 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 804
d58ff351 805 t1 = skb_push(buff, tot_len);
6651ffc8 806 skb_reset_transport_header(buff);
1da177e4
LT
807
808 /* Swap the send and the receive. */
809 memset(t1, 0, sizeof(*t1));
810 t1->dest = th->source;
811 t1->source = th->dest;
cfb6eeb4 812 t1->doff = tot_len / 4;
626e264d
IJ
813 t1->seq = htonl(seq);
814 t1->ack_seq = htonl(ack);
815 t1->ack = !rst || !th->ack;
816 t1->rst = rst;
817 t1->window = htons(win);
1da177e4 818
81ada62d
IJ
819 topt = (__be32 *)(t1 + 1);
820
ee684b6f 821 if (tsecr) {
626e264d
IJ
822 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
823 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
824 *topt++ = htonl(tsval);
825 *topt++ = htonl(tsecr);
626e264d
IJ
826 }
827
cfb6eeb4
YH
828#ifdef CONFIG_TCP_MD5SIG
829 if (key) {
81ada62d
IJ
830 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
831 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
832 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
833 &ipv6_hdr(skb)->saddr,
834 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
835 }
836#endif
837
4c9483b2 838 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
839 fl6.daddr = ipv6_hdr(skb)->saddr;
840 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 841 fl6.flowlabel = label;
1da177e4 842
e5700aff
DM
843 buff->ip_summed = CHECKSUM_PARTIAL;
844 buff->csum = 0;
845
4c9483b2 846 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 847
4c9483b2 848 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 849 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 850 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
851 else {
852 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
853 oif = skb->skb_iif;
854
855 fl6.flowi6_oif = oif;
856 }
1d2f7b2d 857
e110861f 858 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
859 fl6.fl6_dport = t1->dest;
860 fl6.fl6_sport = t1->source;
e2d118a1 861 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 862 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 863
c20121ae
DL
864 /* Pass a socket to ip6_dst_lookup either it is for RST
865 * Underlying function will use this to retrieve the network
866 * namespace
867 */
0e0d44ab 868 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
869 if (!IS_ERR(dst)) {
870 skb_dst_set(buff, dst);
92e55f41 871 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
c10d9310 872 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 873 if (rst)
c10d9310 874 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 875 return;
1da177e4
LT
876 }
877
878 kfree_skb(buff);
879}
880
a00e7444 881static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 882{
cf533ea5 883 const struct tcphdr *th = tcp_hdr(skb);
626e264d 884 u32 seq = 0, ack_seq = 0;
fa3e5b4e 885 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
886#ifdef CONFIG_TCP_MD5SIG
887 const __u8 *hash_location = NULL;
888 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
889 unsigned char newhash[16];
890 int genhash;
891 struct sock *sk1 = NULL;
892#endif
9c76a114 893 int oif;
1da177e4 894
626e264d 895 if (th->rst)
1da177e4
LT
896 return;
897
c3658e8d
ED
898 /* If sk not NULL, it means we did a successful lookup and incoming
899 * route had to be correct. prequeue might have dropped our dst.
900 */
901 if (!sk && !ipv6_unicast_destination(skb))
626e264d 902 return;
1da177e4 903
cfb6eeb4 904#ifdef CONFIG_TCP_MD5SIG
3b24d854 905 rcu_read_lock();
658ddaaf 906 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 907 if (sk && sk_fullsock(sk)) {
e46787f0
FW
908 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
909 } else if (hash_location) {
658ddaaf
SL
910 /*
911 * active side is lost. Try to find listening socket through
912 * source port, and then find md5 key through listening socket.
913 * we are not loose security here:
914 * Incoming packet is checked with md5 hash with finding key,
915 * no RST generated if md5 hash doesn't match.
916 */
917 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
a583636a
CG
918 &tcp_hashinfo, NULL, 0,
919 &ipv6h->saddr,
5ba24953 920 th->source, &ipv6h->daddr,
870c3151 921 ntohs(th->source), tcp_v6_iif(skb));
658ddaaf 922 if (!sk1)
3b24d854 923 goto out;
658ddaaf 924
658ddaaf
SL
925 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
926 if (!key)
3b24d854 927 goto out;
658ddaaf 928
39f8e58e 929 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 930 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 931 goto out;
658ddaaf 932 }
cfb6eeb4
YH
933#endif
934
626e264d
IJ
935 if (th->ack)
936 seq = ntohl(th->ack_seq);
937 else
938 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
939 (th->doff << 2);
1da177e4 940
9c76a114 941 oif = sk ? sk->sk_bound_dev_if : 0;
0f85feae 942 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
943
944#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
945out:
946 rcu_read_unlock();
658ddaaf 947#endif
626e264d 948}
1da177e4 949
a00e7444 950static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 951 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 952 struct tcp_md5sig_key *key, u8 tclass,
5119bd16 953 __be32 label)
626e264d 954{
0f85feae
ED
955 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
956 tclass, label);
1da177e4
LT
957}
958
959static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
960{
8feaf0c0 961 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 962 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 963
0f85feae 964 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 965 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9a568de4 966 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
9c76a114 967 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
21858cd0 968 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
1da177e4 969
8feaf0c0 970 inet_twsk_put(tw);
1da177e4
LT
971}
972
a00e7444 973static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 974 struct request_sock *req)
1da177e4 975{
3a19ce0e
DL
976 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
977 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
978 */
20a2b49f
ED
979 /* RFC 7323 2.3
980 * The window field (SEG.WND) of every outgoing segment, with the
981 * exception of <SYN> segments, MUST be right-shifted by
982 * Rcv.Wind.Shift bits:
983 */
0f85feae 984 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 985 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
986 tcp_rsk(req)->rcv_nxt,
987 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
9a568de4 988 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
95a22cae 989 req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
990 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
991 0, 0);
1da177e4
LT
992}
993
994
079096f1 995static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 996{
079096f1 997#ifdef CONFIG_SYN_COOKIES
aa8223c7 998 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 999
af9b4738 1000 if (!th->syn)
c6aefafb 1001 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1002#endif
1003 return sk;
1004}
1005
1da177e4
LT
1006static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1007{
1da177e4
LT
1008 if (skb->protocol == htons(ETH_P_IP))
1009 return tcp_v4_conn_request(sk, skb);
1010
1011 if (!ipv6_unicast_destination(skb))
1ab1457c 1012 goto drop;
1da177e4 1013
1fb6f159
OP
1014 return tcp_conn_request(&tcp6_request_sock_ops,
1015 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1016
1017drop:
9caad864 1018 tcp_listendrop(sk);
1da177e4
LT
1019 return 0; /* don't send reset */
1020}
1021
ebf6c9cb
ED
1022static void tcp_v6_restore_cb(struct sk_buff *skb)
1023{
1024 /* We need to move header back to the beginning if xfrm6_policy_check()
1025 * and tcp_v6_fill_cb() are going to be called again.
1026 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1027 */
1028 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1029 sizeof(struct inet6_skb_parm));
1030}
1031
0c27171e 1032static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1033 struct request_sock *req,
5e0724d0
ED
1034 struct dst_entry *dst,
1035 struct request_sock *req_unhash,
1036 bool *own_req)
1da177e4 1037{
634fb979 1038 struct inet_request_sock *ireq;
0c27171e
ED
1039 struct ipv6_pinfo *newnp;
1040 const struct ipv6_pinfo *np = inet6_sk(sk);
45f6fad8 1041 struct ipv6_txoptions *opt;
1da177e4
LT
1042 struct tcp6_sock *newtcp6sk;
1043 struct inet_sock *newinet;
1044 struct tcp_sock *newtp;
1045 struct sock *newsk;
cfb6eeb4
YH
1046#ifdef CONFIG_TCP_MD5SIG
1047 struct tcp_md5sig_key *key;
1048#endif
3840a06e 1049 struct flowi6 fl6;
1da177e4
LT
1050
1051 if (skb->protocol == htons(ETH_P_IP)) {
1052 /*
1053 * v6 mapped
1054 */
1055
5e0724d0
ED
1056 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1057 req_unhash, own_req);
1da177e4 1058
63159f29 1059 if (!newsk)
1da177e4
LT
1060 return NULL;
1061
1062 newtcp6sk = (struct tcp6_sock *)newsk;
1063 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1064
1065 newinet = inet_sk(newsk);
1066 newnp = inet6_sk(newsk);
1067 newtp = tcp_sk(newsk);
1068
1069 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1070
d1e559d0 1071 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1072
8292a17a 1073 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1074 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1075#ifdef CONFIG_TCP_MD5SIG
1076 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1077#endif
1078
83eaddab 1079 newnp->ipv6_mc_list = NULL;
676a1184
YZ
1080 newnp->ipv6_ac_list = NULL;
1081 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1082 newnp->pktoptions = NULL;
1083 newnp->opt = NULL;
870c3151 1084 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1085 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1086 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1087 if (np->repflow)
1088 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1089
e6848976
ACM
1090 /*
1091 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1092 * here, tcp_create_openreq_child now does this for us, see the comment in
1093 * that function for the gory details. -acme
1da177e4 1094 */
1da177e4
LT
1095
1096 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1097 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1098 Sync it now.
1099 */
d83d8461 1100 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1101
1102 return newsk;
1103 }
1104
634fb979 1105 ireq = inet_rsk(req);
1da177e4
LT
1106
1107 if (sk_acceptq_is_full(sk))
1108 goto out_overflow;
1109
493f377d 1110 if (!dst) {
f76b33c3 1111 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1112 if (!dst)
1da177e4 1113 goto out;
1ab1457c 1114 }
1da177e4
LT
1115
1116 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1117 if (!newsk)
093d2823 1118 goto out_nonewsk;
1da177e4 1119
e6848976
ACM
1120 /*
1121 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1122 * count here, tcp_create_openreq_child now does this for us, see the
1123 * comment in that function for the gory details. -acme
1124 */
1da177e4 1125
59eed279 1126 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1127 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1128 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1129
1130 newtcp6sk = (struct tcp6_sock *)newsk;
1131 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1132
1133 newtp = tcp_sk(newsk);
1134 newinet = inet_sk(newsk);
1135 newnp = inet6_sk(newsk);
1136
1137 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1138
634fb979
ED
1139 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1140 newnp->saddr = ireq->ir_v6_loc_addr;
1141 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1142 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1143
1ab1457c 1144 /* Now IPv6 options...
1da177e4
LT
1145
1146 First: no IPv4 options.
1147 */
f6d8bd05 1148 newinet->inet_opt = NULL;
83eaddab 1149 newnp->ipv6_mc_list = NULL;
676a1184 1150 newnp->ipv6_ac_list = NULL;
d35690be 1151 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1152
1153 /* Clone RX bits */
1154 newnp->rxopt.all = np->rxopt.all;
1155
1da177e4 1156 newnp->pktoptions = NULL;
1da177e4 1157 newnp->opt = NULL;
870c3151 1158 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1159 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1160 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1161 if (np->repflow)
1162 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1163
1164 /* Clone native IPv6 options from listening socket (if any)
1165
1166 Yes, keeping reference count would be much more clever,
1167 but we make one more one thing there: reattach optmem
1168 to newsk.
1169 */
56ac42bc
HD
1170 opt = ireq->ipv6_opt;
1171 if (!opt)
1172 opt = rcu_dereference(np->opt);
45f6fad8
ED
1173 if (opt) {
1174 opt = ipv6_dup_options(newsk, opt);
1175 RCU_INIT_POINTER(newnp->opt, opt);
1176 }
d83d8461 1177 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1178 if (opt)
1179 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1180 opt->opt_flen;
1da177e4 1181
81164413
DB
1182 tcp_ca_openreq_child(newsk, dst);
1183
1da177e4 1184 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1185 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1186
1da177e4
LT
1187 tcp_initialize_rcv_mss(newsk);
1188
c720c7e8
ED
1189 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1190 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1191
cfb6eeb4
YH
1192#ifdef CONFIG_TCP_MD5SIG
1193 /* Copy over the MD5 key from the original socket */
4aa956d8 1194 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1195 if (key) {
cfb6eeb4
YH
1196 /* We're using one, so create a matching key
1197 * on the newsk structure. If we fail to get
1198 * memory, then we end up not copying the key
1199 * across. Shucks.
1200 */
efe4208f 1201 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
6797318e 1202 AF_INET6, 128, key->key, key->keylen,
7450aaf6 1203 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1204 }
1205#endif
1206
093d2823 1207 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1208 inet_csk_prepare_forced_close(newsk);
1209 tcp_done(newsk);
093d2823
BS
1210 goto out;
1211 }
5e0724d0 1212 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1213 if (*own_req) {
49a496c9 1214 tcp_move_syn(newtp, req);
805c4bc0
ED
1215
1216 /* Clone pktoptions received with SYN, if we own the req */
1217 if (ireq->pktopts) {
1218 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1219 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1220 consume_skb(ireq->pktopts);
1221 ireq->pktopts = NULL;
ebf6c9cb
ED
1222 if (newnp->pktoptions) {
1223 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1224 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1225 }
805c4bc0 1226 }
ce105008 1227 }
1da177e4
LT
1228
1229 return newsk;
1230
1231out_overflow:
02a1d6e7 1232 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1233out_nonewsk:
1da177e4 1234 dst_release(dst);
093d2823 1235out:
9caad864 1236 tcp_listendrop(sk);
1da177e4
LT
1237 return NULL;
1238}
1239
1da177e4 1240/* The socket must have it's spinlock held when we get
e994b2f0 1241 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1242 *
1243 * We have a potential double-lock case here, so even when
1244 * doing backlog processing we use the BH locking scheme.
1245 * This is because we cannot sleep with the original spinlock
1246 * held.
1247 */
1248static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1249{
1250 struct ipv6_pinfo *np = inet6_sk(sk);
1251 struct tcp_sock *tp;
1252 struct sk_buff *opt_skb = NULL;
1253
1254 /* Imagine: socket is IPv6. IPv4 packet arrives,
1255 goes to IPv4 receive handler and backlogged.
1256 From backlog it always goes here. Kerboom...
1257 Fortunately, tcp_rcv_established and rcv_established
1258 handle them correctly, but it is not case with
1259 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1260 */
1261
1262 if (skb->protocol == htons(ETH_P_IP))
1263 return tcp_v4_do_rcv(sk, skb);
1264
1da177e4
LT
1265 /*
1266 * socket locking is here for SMP purposes as backlog rcv
1267 * is currently called with bh processing disabled.
1268 */
1269
1270 /* Do Stevens' IPV6_PKTOPTIONS.
1271
1272 Yes, guys, it is the only place in our code, where we
1273 may make it not affecting IPv4.
1274 The rest of code is protocol independent,
1275 and I do not like idea to uglify IPv4.
1276
1277 Actually, all the idea behind IPV6_PKTOPTIONS
1278 looks not very well thought. For now we latch
1279 options, received in the last packet, enqueued
1280 by tcp. Feel free to propose better solution.
1ab1457c 1281 --ANK (980728)
1da177e4
LT
1282 */
1283 if (np->rxopt.all)
7450aaf6 1284 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1285
1286 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1287 struct dst_entry *dst = sk->sk_rx_dst;
1288
bdeab991 1289 sock_rps_save_rxhash(sk, skb);
3d97379a 1290 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1291 if (dst) {
1292 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1293 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1294 dst_release(dst);
1295 sk->sk_rx_dst = NULL;
1296 }
1297 }
1298
c995ae22 1299 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1300 if (opt_skb)
1301 goto ipv6_pktoptions;
1302 return 0;
1303 }
1304
12e25e10 1305 if (tcp_checksum_complete(skb))
1da177e4
LT
1306 goto csum_err;
1307
1ab1457c 1308 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1309 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1310
1da177e4
LT
1311 if (!nsk)
1312 goto discard;
1313
4c99aa40 1314 if (nsk != sk) {
1da177e4
LT
1315 if (tcp_child_process(sk, nsk, skb))
1316 goto reset;
1317 if (opt_skb)
1318 __kfree_skb(opt_skb);
1319 return 0;
1320 }
47482f13 1321 } else
bdeab991 1322 sock_rps_save_rxhash(sk, skb);
1da177e4 1323
72ab4a86 1324 if (tcp_rcv_state_process(sk, skb))
1da177e4 1325 goto reset;
1da177e4
LT
1326 if (opt_skb)
1327 goto ipv6_pktoptions;
1328 return 0;
1329
1330reset:
cfb6eeb4 1331 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1332discard:
1333 if (opt_skb)
1334 __kfree_skb(opt_skb);
1335 kfree_skb(skb);
1336 return 0;
1337csum_err:
c10d9310
ED
1338 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1339 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1340 goto discard;
1341
1342
1343ipv6_pktoptions:
1344 /* Do you ask, what is it?
1345
1346 1. skb was enqueued by tcp.
1347 2. skb is added to tail of read queue, rather than out of order.
1348 3. socket is not in passive state.
1349 4. Finally, it really contains options, which user wants to receive.
1350 */
1351 tp = tcp_sk(sk);
1352 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1353 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1354 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1355 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1356 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1357 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1358 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1359 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1360 if (np->repflow)
1361 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1362 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1363 skb_set_owner_r(opt_skb, sk);
8ce48623 1364 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1365 opt_skb = xchg(&np->pktoptions, opt_skb);
1366 } else {
1367 __kfree_skb(opt_skb);
1368 opt_skb = xchg(&np->pktoptions, NULL);
1369 }
1370 }
1371
800d55f1 1372 kfree_skb(opt_skb);
1da177e4
LT
1373 return 0;
1374}
1375
2dc49d16
ND
1376static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1377 const struct tcphdr *th)
1378{
1379 /* This is tricky: we move IP6CB at its correct location into
1380 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1381 * _decode_session6() uses IP6CB().
1382 * barrier() makes sure compiler won't play aliasing games.
1383 */
1384 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1385 sizeof(struct inet6_skb_parm));
1386 barrier();
1387
1388 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1389 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1390 skb->len - th->doff*4);
1391 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1392 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1393 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1394 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1395 TCP_SKB_CB(skb)->sacked = 0;
1396}
1397
e5bbef20 1398static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1399{
cf533ea5 1400 const struct tcphdr *th;
b71d1d42 1401 const struct ipv6hdr *hdr;
3b24d854 1402 bool refcounted;
1da177e4
LT
1403 struct sock *sk;
1404 int ret;
a86b1e30 1405 struct net *net = dev_net(skb->dev);
1da177e4
LT
1406
1407 if (skb->pkt_type != PACKET_HOST)
1408 goto discard_it;
1409
1410 /*
1411 * Count it even if it's bad.
1412 */
90bbcc60 1413 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1414
1415 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1416 goto discard_it;
1417
ea1627c2 1418 th = (const struct tcphdr *)skb->data;
1da177e4 1419
ea1627c2 1420 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1421 goto bad_packet;
1422 if (!pskb_may_pull(skb, th->doff*4))
1423 goto discard_it;
1424
e4f45b7f 1425 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1426 goto csum_error;
1da177e4 1427
ea1627c2 1428 th = (const struct tcphdr *)skb->data;
e802af9c 1429 hdr = ipv6_hdr(skb);
1da177e4 1430
4bdc3d66 1431lookup:
a583636a 1432 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
3b24d854
ED
1433 th->source, th->dest, inet6_iif(skb),
1434 &refcounted);
1da177e4
LT
1435 if (!sk)
1436 goto no_tcp_socket;
1437
1438process:
1439 if (sk->sk_state == TCP_TIME_WAIT)
1440 goto do_time_wait;
1441
079096f1
ED
1442 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1443 struct request_sock *req = inet_reqsk(sk);
7716682c 1444 struct sock *nsk;
079096f1
ED
1445
1446 sk = req->rsk_listener;
1447 tcp_v6_fill_cb(skb, hdr, th);
1448 if (tcp_v6_inbound_md5_hash(sk, skb)) {
e65c332d 1449 sk_drops_add(sk, skb);
079096f1
ED
1450 reqsk_put(req);
1451 goto discard_it;
1452 }
7716682c 1453 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1454 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1455 goto lookup;
1456 }
7716682c 1457 sock_hold(sk);
3b24d854 1458 refcounted = true;
fcc323f5
ED
1459 nsk = NULL;
1460 if (!tcp_filter(sk, skb))
1461 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1462 if (!nsk) {
1463 reqsk_put(req);
7716682c 1464 goto discard_and_relse;
079096f1
ED
1465 }
1466 if (nsk == sk) {
079096f1
ED
1467 reqsk_put(req);
1468 tcp_v6_restore_cb(skb);
1469 } else if (tcp_child_process(sk, nsk, skb)) {
1470 tcp_v6_send_reset(nsk, skb);
7716682c 1471 goto discard_and_relse;
079096f1 1472 } else {
7716682c 1473 sock_put(sk);
079096f1
ED
1474 return 0;
1475 }
1476 }
e802af9c 1477 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
02a1d6e7 1478 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1479 goto discard_and_relse;
1480 }
1481
1da177e4
LT
1482 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1483 goto discard_and_relse;
1484
2dc49d16
ND
1485 tcp_v6_fill_cb(skb, hdr, th);
1486
9ea88a15
DP
1487 if (tcp_v6_inbound_md5_hash(sk, skb))
1488 goto discard_and_relse;
9ea88a15 1489
ac6e7800 1490 if (tcp_filter(sk, skb))
1da177e4 1491 goto discard_and_relse;
ac6e7800
ED
1492 th = (const struct tcphdr *)skb->data;
1493 hdr = ipv6_hdr(skb);
1da177e4
LT
1494
1495 skb->dev = NULL;
1496
e994b2f0
ED
1497 if (sk->sk_state == TCP_LISTEN) {
1498 ret = tcp_v6_do_rcv(sk, skb);
1499 goto put_and_return;
1500 }
1501
1502 sk_incoming_cpu_update(sk);
1503
293b9c42 1504 bh_lock_sock_nested(sk);
a44d6eac 1505 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1506 ret = 0;
1507 if (!sock_owned_by_user(sk)) {
7bced397 1508 if (!tcp_prequeue(sk, skb))
1ab1457c 1509 ret = tcp_v6_do_rcv(sk, skb);
c9c33212 1510 } else if (tcp_add_backlog(sk, skb)) {
6b03a53a
ZY
1511 goto discard_and_relse;
1512 }
1da177e4
LT
1513 bh_unlock_sock(sk);
1514
e994b2f0 1515put_and_return:
3b24d854
ED
1516 if (refcounted)
1517 sock_put(sk);
1da177e4
LT
1518 return ret ? -1 : 0;
1519
1520no_tcp_socket:
1521 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1522 goto discard_it;
1523
2dc49d16
ND
1524 tcp_v6_fill_cb(skb, hdr, th);
1525
12e25e10 1526 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1527csum_error:
90bbcc60 1528 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1529bad_packet:
90bbcc60 1530 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1531 } else {
cfb6eeb4 1532 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1533 }
1534
1535discard_it:
1da177e4
LT
1536 kfree_skb(skb);
1537 return 0;
1538
1539discard_and_relse:
532182cd 1540 sk_drops_add(sk, skb);
3b24d854
ED
1541 if (refcounted)
1542 sock_put(sk);
1da177e4
LT
1543 goto discard_it;
1544
1545do_time_wait:
1546 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1547 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1548 goto discard_it;
1549 }
1550
2dc49d16
ND
1551 tcp_v6_fill_cb(skb, hdr, th);
1552
6a5dc9e5
ED
1553 if (tcp_checksum_complete(skb)) {
1554 inet_twsk_put(inet_twsk(sk));
1555 goto csum_error;
1da177e4
LT
1556 }
1557
9469c7b4 1558 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1559 case TCP_TW_SYN:
1560 {
1561 struct sock *sk2;
1562
c346dca1 1563 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1564 skb, __tcp_hdrlen(th),
5ba24953 1565 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1566 &ipv6_hdr(skb)->daddr,
870c3151 1567 ntohs(th->dest), tcp_v6_iif(skb));
53b24b8f 1568 if (sk2) {
295ff7ed 1569 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1570 inet_twsk_deschedule_put(tw);
1da177e4 1571 sk = sk2;
4ad19de8 1572 tcp_v6_restore_cb(skb);
3b24d854 1573 refcounted = false;
1da177e4
LT
1574 goto process;
1575 }
1576 /* Fall through to ACK */
1577 }
1578 case TCP_TW_ACK:
1579 tcp_v6_timewait_ack(sk, skb);
1580 break;
1581 case TCP_TW_RST:
4ad19de8 1582 tcp_v6_restore_cb(skb);
271c3b9b
FW
1583 tcp_v6_send_reset(sk, skb);
1584 inet_twsk_deschedule_put(inet_twsk(sk));
1585 goto discard_it;
4aa956d8
WY
1586 case TCP_TW_SUCCESS:
1587 ;
1da177e4
LT
1588 }
1589 goto discard_it;
1590}
1591
c7109986
ED
1592static void tcp_v6_early_demux(struct sk_buff *skb)
1593{
1594 const struct ipv6hdr *hdr;
1595 const struct tcphdr *th;
1596 struct sock *sk;
1597
1598 if (skb->pkt_type != PACKET_HOST)
1599 return;
1600
1601 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1602 return;
1603
1604 hdr = ipv6_hdr(skb);
1605 th = tcp_hdr(skb);
1606
1607 if (th->doff < sizeof(struct tcphdr) / 4)
1608 return;
1609
870c3151 1610 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1611 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1612 &hdr->saddr, th->source,
1613 &hdr->daddr, ntohs(th->dest),
1614 inet6_iif(skb));
1615 if (sk) {
1616 skb->sk = sk;
1617 skb->destructor = sock_edemux;
f7e4eb03 1618 if (sk_fullsock(sk)) {
d0c294c5 1619 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1620
c7109986 1621 if (dst)
5d299f3d 1622 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1623 if (dst &&
f3f12135 1624 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1625 skb_dst_set_noref(skb, dst);
1626 }
1627 }
1628}
1629
ccb7c410
DM
1630static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1631 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1632 .twsk_unique = tcp_twsk_unique,
4aa956d8 1633 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1634};
1635
3b401a81 1636static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1637 .queue_xmit = inet6_csk_xmit,
1638 .send_check = tcp_v6_send_check,
1639 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1640 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1641 .conn_request = tcp_v6_conn_request,
1642 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1643 .net_header_len = sizeof(struct ipv6hdr),
67469601 1644 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1645 .setsockopt = ipv6_setsockopt,
1646 .getsockopt = ipv6_getsockopt,
1647 .addr2sockaddr = inet6_csk_addr2sockaddr,
1648 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1649#ifdef CONFIG_COMPAT
543d9cfe
ACM
1650 .compat_setsockopt = compat_ipv6_setsockopt,
1651 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1652#endif
4fab9071 1653 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1654};
1655
cfb6eeb4 1656#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1657static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1658 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1659 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1660 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1661};
a928630a 1662#endif
cfb6eeb4 1663
1da177e4
LT
1664/*
1665 * TCP over IPv4 via INET6 API
1666 */
3b401a81 1667static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1668 .queue_xmit = ip_queue_xmit,
1669 .send_check = tcp_v4_send_check,
1670 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1671 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1672 .conn_request = tcp_v6_conn_request,
1673 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1674 .net_header_len = sizeof(struct iphdr),
1675 .setsockopt = ipv6_setsockopt,
1676 .getsockopt = ipv6_getsockopt,
1677 .addr2sockaddr = inet6_csk_addr2sockaddr,
1678 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1679#ifdef CONFIG_COMPAT
543d9cfe
ACM
1680 .compat_setsockopt = compat_ipv6_setsockopt,
1681 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1682#endif
4fab9071 1683 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1684};
1685
cfb6eeb4 1686#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1687static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1688 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1689 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1690 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1691};
a928630a 1692#endif
cfb6eeb4 1693
1da177e4
LT
1694/* NOTE: A lot of things set to zero explicitly by call to
1695 * sk_alloc() so need not be done here.
1696 */
1697static int tcp_v6_init_sock(struct sock *sk)
1698{
6687e988 1699 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1700
900f65d3 1701 tcp_init_sock(sk);
1da177e4 1702
8292a17a 1703 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1704
cfb6eeb4 1705#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1706 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1707#endif
1708
1da177e4
LT
1709 return 0;
1710}
1711
7d06b2e0 1712static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1713{
1da177e4 1714 tcp_v4_destroy_sock(sk);
7d06b2e0 1715 inet6_destroy_sock(sk);
1da177e4
LT
1716}
1717
952a10be 1718#ifdef CONFIG_PROC_FS
1da177e4 1719/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1720static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1721 const struct request_sock *req, int i)
1da177e4 1722{
fa76ce73 1723 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1724 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1725 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1726
1727 if (ttd < 0)
1728 ttd = 0;
1729
1da177e4
LT
1730 seq_printf(seq,
1731 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1732 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1733 i,
1734 src->s6_addr32[0], src->s6_addr32[1],
1735 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1736 inet_rsk(req)->ir_num,
1da177e4
LT
1737 dest->s6_addr32[0], dest->s6_addr32[1],
1738 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1739 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1740 TCP_SYN_RECV,
4c99aa40 1741 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1742 1, /* timers active (only the expire timer) */
1743 jiffies_to_clock_t(ttd),
e6c022a4 1744 req->num_timeout,
aa3a0c8c
ED
1745 from_kuid_munged(seq_user_ns(seq),
1746 sock_i_uid(req->rsk_listener)),
1ab1457c 1747 0, /* non standard timer */
1da177e4
LT
1748 0, /* open_requests have no inode */
1749 0, req);
1750}
1751
1752static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1753{
b71d1d42 1754 const struct in6_addr *dest, *src;
1da177e4
LT
1755 __u16 destp, srcp;
1756 int timer_active;
1757 unsigned long timer_expires;
cf533ea5
ED
1758 const struct inet_sock *inet = inet_sk(sp);
1759 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1760 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1761 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1762 int rx_queue;
1763 int state;
1da177e4 1764
efe4208f
ED
1765 dest = &sp->sk_v6_daddr;
1766 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1767 destp = ntohs(inet->inet_dport);
1768 srcp = ntohs(inet->inet_sport);
463c84b9 1769
ce3cf4ec 1770 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1771 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1772 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1773 timer_active = 1;
463c84b9
ACM
1774 timer_expires = icsk->icsk_timeout;
1775 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1776 timer_active = 4;
463c84b9 1777 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1778 } else if (timer_pending(&sp->sk_timer)) {
1779 timer_active = 2;
1780 timer_expires = sp->sk_timer.expires;
1781 } else {
1782 timer_active = 0;
1783 timer_expires = jiffies;
1784 }
1785
00fd38d9
ED
1786 state = sk_state_load(sp);
1787 if (state == TCP_LISTEN)
1788 rx_queue = sp->sk_ack_backlog;
1789 else
1790 /* Because we don't lock the socket,
1791 * we might find a transient negative value.
1792 */
1793 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1794
1da177e4
LT
1795 seq_printf(seq,
1796 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1797 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1798 i,
1799 src->s6_addr32[0], src->s6_addr32[1],
1800 src->s6_addr32[2], src->s6_addr32[3], srcp,
1801 dest->s6_addr32[0], dest->s6_addr32[1],
1802 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9
ED
1803 state,
1804 tp->write_seq - tp->snd_una,
1805 rx_queue,
1da177e4 1806 timer_active,
a399a805 1807 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1808 icsk->icsk_retransmits,
a7cb5a49 1809 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1810 icsk->icsk_probes_out,
1da177e4 1811 sock_i_ino(sp),
41c6d650 1812 refcount_read(&sp->sk_refcnt), sp,
7be87351
SH
1813 jiffies_to_clock_t(icsk->icsk_rto),
1814 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1815 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1816 tp->snd_cwnd,
00fd38d9 1817 state == TCP_LISTEN ?
0536fcc0 1818 fastopenq->max_qlen :
0a672f74 1819 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1820 );
1821}
1822
1ab1457c 1823static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1824 struct inet_timewait_sock *tw, int i)
1da177e4 1825{
789f558c 1826 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1827 const struct in6_addr *dest, *src;
1da177e4 1828 __u16 destp, srcp;
1da177e4 1829
efe4208f
ED
1830 dest = &tw->tw_v6_daddr;
1831 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1832 destp = ntohs(tw->tw_dport);
1833 srcp = ntohs(tw->tw_sport);
1834
1835 seq_printf(seq,
1836 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1837 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1838 i,
1839 src->s6_addr32[0], src->s6_addr32[1],
1840 src->s6_addr32[2], src->s6_addr32[3], srcp,
1841 dest->s6_addr32[0], dest->s6_addr32[1],
1842 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1843 tw->tw_substate, 0, 0,
a399a805 1844 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
41c6d650 1845 refcount_read(&tw->tw_refcnt), tw);
1da177e4
LT
1846}
1847
1da177e4
LT
1848static int tcp6_seq_show(struct seq_file *seq, void *v)
1849{
1850 struct tcp_iter_state *st;
05dbc7b5 1851 struct sock *sk = v;
1da177e4
LT
1852
1853 if (v == SEQ_START_TOKEN) {
1854 seq_puts(seq,
1855 " sl "
1856 "local_address "
1857 "remote_address "
1858 "st tx_queue rx_queue tr tm->when retrnsmt"
1859 " uid timeout inode\n");
1860 goto out;
1861 }
1862 st = seq->private;
1863
079096f1
ED
1864 if (sk->sk_state == TCP_TIME_WAIT)
1865 get_timewait6_sock(seq, v, st->num);
1866 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1867 get_openreq6(seq, v, st->num);
079096f1
ED
1868 else
1869 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1870out:
1871 return 0;
1872}
1873
73cb88ec
AV
1874static const struct file_operations tcp6_afinfo_seq_fops = {
1875 .owner = THIS_MODULE,
1876 .open = tcp_seq_open,
1877 .read = seq_read,
1878 .llseek = seq_lseek,
1879 .release = seq_release_net
1880};
1881
1da177e4 1882static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1883 .name = "tcp6",
1884 .family = AF_INET6,
73cb88ec 1885 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1886 .seq_ops = {
1887 .show = tcp6_seq_show,
1888 },
1da177e4
LT
1889};
1890
2c8c1e72 1891int __net_init tcp6_proc_init(struct net *net)
1da177e4 1892{
6f8b13bc 1893 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1894}
1895
6f8b13bc 1896void tcp6_proc_exit(struct net *net)
1da177e4 1897{
6f8b13bc 1898 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1899}
1900#endif
1901
1902struct proto tcpv6_prot = {
1903 .name = "TCPv6",
1904 .owner = THIS_MODULE,
1905 .close = tcp_close,
1906 .connect = tcp_v6_connect,
1907 .disconnect = tcp_disconnect,
463c84b9 1908 .accept = inet_csk_accept,
1da177e4
LT
1909 .ioctl = tcp_ioctl,
1910 .init = tcp_v6_init_sock,
1911 .destroy = tcp_v6_destroy_sock,
1912 .shutdown = tcp_shutdown,
1913 .setsockopt = tcp_setsockopt,
1914 .getsockopt = tcp_getsockopt,
4b9d07a4 1915 .keepalive = tcp_set_keepalive,
1da177e4 1916 .recvmsg = tcp_recvmsg,
7ba42910
CG
1917 .sendmsg = tcp_sendmsg,
1918 .sendpage = tcp_sendpage,
1da177e4 1919 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1920 .release_cb = tcp_release_cb,
496611d7 1921 .hash = inet6_hash,
ab1e0a13
ACM
1922 .unhash = inet_unhash,
1923 .get_port = inet_csk_get_port,
1da177e4 1924 .enter_memory_pressure = tcp_enter_memory_pressure,
06044751 1925 .leave_memory_pressure = tcp_leave_memory_pressure,
c9bee3b7 1926 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1927 .sockets_allocated = &tcp_sockets_allocated,
1928 .memory_allocated = &tcp_memory_allocated,
1929 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1930 .orphan_count = &tcp_orphan_count,
a4fe34bf 1931 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1932 .sysctl_wmem = sysctl_tcp_wmem,
1933 .sysctl_rmem = sysctl_tcp_rmem,
1934 .max_header = MAX_TCP_HEADER,
1935 .obj_size = sizeof(struct tcp6_sock),
5f0d5a3a 1936 .slab_flags = SLAB_TYPESAFE_BY_RCU,
6d6ee43e 1937 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1938 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1939 .h.hashinfo = &tcp_hashinfo,
7ba42910 1940 .no_autobind = true,
543d9cfe
ACM
1941#ifdef CONFIG_COMPAT
1942 .compat_setsockopt = compat_tcp_setsockopt,
1943 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 1944#endif
c1e64e29 1945 .diag_destroy = tcp_abort,
1da177e4
LT
1946};
1947
dddb64bc 1948static struct inet6_protocol tcpv6_protocol = {
c7109986 1949 .early_demux = tcp_v6_early_demux,
dddb64bc 1950 .early_demux_handler = tcp_v6_early_demux,
1da177e4
LT
1951 .handler = tcp_v6_rcv,
1952 .err_handler = tcp_v6_err,
1953 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1954};
1955
1da177e4
LT
1956static struct inet_protosw tcpv6_protosw = {
1957 .type = SOCK_STREAM,
1958 .protocol = IPPROTO_TCP,
1959 .prot = &tcpv6_prot,
1960 .ops = &inet6_stream_ops,
d83d8461
ACM
1961 .flags = INET_PROTOSW_PERMANENT |
1962 INET_PROTOSW_ICSK,
1da177e4
LT
1963};
1964
2c8c1e72 1965static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1966{
5677242f
DL
1967 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1968 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1969}
1970
2c8c1e72 1971static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1972{
5677242f 1973 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1974}
1975
2c8c1e72 1976static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 1977{
1946e672 1978 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
1979}
1980
1981static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1982 .init = tcpv6_net_init,
1983 .exit = tcpv6_net_exit,
1984 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1985};
1986
7f4e4868 1987int __init tcpv6_init(void)
1da177e4 1988{
7f4e4868
DL
1989 int ret;
1990
3336288a
VY
1991 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1992 if (ret)
c6b641a4 1993 goto out;
3336288a 1994
1da177e4 1995 /* register inet6 protocol */
7f4e4868
DL
1996 ret = inet6_register_protosw(&tcpv6_protosw);
1997 if (ret)
1998 goto out_tcpv6_protocol;
1999
93ec926b 2000 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2001 if (ret)
2002 goto out_tcpv6_protosw;
2003out:
2004 return ret;
ae0f7d5f 2005
7f4e4868
DL
2006out_tcpv6_protosw:
2007 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
2008out_tcpv6_protocol:
2009 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
2010 goto out;
2011}
2012
09f7709f 2013void tcpv6_exit(void)
7f4e4868 2014{
93ec926b 2015 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2016 inet6_unregister_protosw(&tcpv6_protosw);
2017 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2018}