]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv6/tcp_ipv6.c
ipv4/tcp: Pass dif and sdif to tcp_v4_inbound_md5_hash
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * TCP over IPv6
1ab1457c 4 * Linux INET6 implementation
1da177e4
LT
5 *
6 * Authors:
1ab1457c 7 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 8 *
1ab1457c 9 * Based on:
1da177e4
LT
10 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
1da177e4
LT
20 */
21
eb4dea58 22#include <linux/bottom_half.h>
1da177e4 23#include <linux/module.h>
1da177e4
LT
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
5a0e3ad6 37#include <linux/slab.h>
4aa956d8 38#include <linux/uaccess.h>
1da177e4
LT
39#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
0e219ae4 42#include <linux/indirect_call_wrapper.h>
1da177e4
LT
43
44#include <net/tcp.h>
45#include <net/ndisc.h>
5324a040 46#include <net/inet6_hashtables.h>
8129765a 47#include <net/inet6_connection_sock.h>
1da177e4
LT
48#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
1da177e4
LT
56#include <net/snmp.h>
57#include <net/dsfield.h>
6d6ee43e 58#include <net/timewait_sock.h>
3d58b5fa 59#include <net/inet_common.h>
6e5714ea 60#include <net/secure_seq.h>
076bb0c8 61#include <net/busy_poll.h>
1da177e4 62
1da177e4
LT
63#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
cf80e0e4 66#include <crypto/hash.h>
cfb6eeb4
YH
67#include <linux/scatterlist.h>
68
c24b14c4
SL
69#include <trace/events/tcp.h>
70
a00e7444
ED
71static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 73 struct request_sock *req);
1da177e4
LT
74
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 76
3b401a81
SH
77static const struct inet_connection_sock_af_ops ipv6_mapped;
78static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 79#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
80static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972 82#else
51723935 83static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 84 const struct in6_addr *addr)
9501f972
YH
85{
86 return NULL;
87}
a928630a 88#endif
1da177e4 89
93a77c11
ED
90/* Helper returning the inet6 address from a given tcp socket.
91 * It can be used in TCP stack instead of inet6_sk(sk).
92 * This avoids a dereference and allow compiler optimizations.
f5d54767 93 * It is a specialized version of inet6_sk_generic().
93a77c11
ED
94 */
95static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
96{
f5d54767 97 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
93a77c11 98
f5d54767 99 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
93a77c11
ED
100}
101
fae6ef87
NC
102static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
103{
104 struct dst_entry *dst = skb_dst(skb);
fae6ef87 105
5037e9ef 106 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
107 const struct rt6_info *rt = (const struct rt6_info *)dst;
108
ca777eff
ED
109 sk->sk_rx_dst = dst;
110 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
93a77c11 111 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
ca777eff 112 }
fae6ef87
NC
113}
114
84b114b9 115static u32 tcp_v6_init_seq(const struct sk_buff *skb)
1da177e4 116{
84b114b9
ED
117 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
118 ipv6_hdr(skb)->saddr.s6_addr32,
119 tcp_hdr(skb)->dest,
120 tcp_hdr(skb)->source);
121}
122
5d2ed052 123static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
84b114b9 124{
5d2ed052 125 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
84b114b9 126 ipv6_hdr(skb)->saddr.s6_addr32);
1da177e4
LT
127}
128
d74bad4e
AI
129static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
130 int addr_len)
131{
132 /* This check is replicated from tcp_v6_connect() and intended to
133 * prevent BPF program called below from accessing bytes that are out
134 * of the bound specified by user in addr_len.
135 */
136 if (addr_len < SIN6_LEN_RFC2133)
137 return -EINVAL;
138
139 sock_owned_by_me(sk);
140
141 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
142}
143
1ab1457c 144static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
145 int addr_len)
146{
147 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 148 struct inet_sock *inet = inet_sk(sk);
d83d8461 149 struct inet_connection_sock *icsk = inet_csk(sk);
93a77c11 150 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1da177e4 151 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 152 struct in6_addr *saddr = NULL, *final_p, final;
45f6fad8 153 struct ipv6_txoptions *opt;
4c9483b2 154 struct flowi6 fl6;
1da177e4
LT
155 struct dst_entry *dst;
156 int addr_type;
157 int err;
1946e672 158 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
1da177e4 159
1ab1457c 160 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
161 return -EINVAL;
162
1ab1457c 163 if (usin->sin6_family != AF_INET6)
a02cec21 164 return -EAFNOSUPPORT;
1da177e4 165
4c9483b2 166 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
167
168 if (np->sndflow) {
4c9483b2
DM
169 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
170 IP6_ECN_flow_init(fl6.flowlabel);
171 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 172 struct ip6_flowlabel *flowlabel;
4c9483b2 173 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
59c820b2 174 if (IS_ERR(flowlabel))
1da177e4 175 return -EINVAL;
1da177e4
LT
176 fl6_sock_release(flowlabel);
177 }
178 }
179
180 /*
1ab1457c
YH
181 * connect() to INADDR_ANY means loopback (BSD'ism).
182 */
183
052d2369
JL
184 if (ipv6_addr_any(&usin->sin6_addr)) {
185 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
186 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
187 &usin->sin6_addr);
188 else
189 usin->sin6_addr = in6addr_loopback;
190 }
1da177e4
LT
191
192 addr_type = ipv6_addr_type(&usin->sin6_addr);
193
4c99aa40 194 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
195 return -ENETUNREACH;
196
197 if (addr_type&IPV6_ADDR_LINKLOCAL) {
198 if (addr_len >= sizeof(struct sockaddr_in6) &&
199 usin->sin6_scope_id) {
200 /* If interface is set while binding, indices
201 * must coincide.
202 */
54dc3e33 203 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
1da177e4
LT
204 return -EINVAL;
205
206 sk->sk_bound_dev_if = usin->sin6_scope_id;
207 }
208
209 /* Connect to link-local address requires an interface */
210 if (!sk->sk_bound_dev_if)
211 return -EINVAL;
212 }
213
214 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 215 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
216 tp->rx_opt.ts_recent = 0;
217 tp->rx_opt.ts_recent_stamp = 0;
0f317464 218 WRITE_ONCE(tp->write_seq, 0);
1da177e4
LT
219 }
220
efe4208f 221 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 222 np->flow_label = fl6.flowlabel;
1da177e4
LT
223
224 /*
225 * TCP over IPv4
226 */
227
052d2369 228 if (addr_type & IPV6_ADDR_MAPPED) {
d83d8461 229 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
230 struct sockaddr_in sin;
231
1da177e4
LT
232 if (__ipv6_only_sock(sk))
233 return -ENETUNREACH;
234
235 sin.sin_family = AF_INET;
236 sin.sin_port = usin->sin6_port;
237 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
238
d83d8461 239 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 240 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
241#ifdef CONFIG_TCP_MD5SIG
242 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
243#endif
1da177e4
LT
244
245 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
246
247 if (err) {
d83d8461
ACM
248 icsk->icsk_ext_hdr_len = exthdrlen;
249 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 250 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
251#ifdef CONFIG_TCP_MD5SIG
252 tp->af_specific = &tcp_sock_ipv6_specific;
253#endif
1da177e4 254 goto failure;
1da177e4 255 }
d1e559d0 256 np->saddr = sk->sk_v6_rcv_saddr;
1da177e4
LT
257
258 return err;
259 }
260
efe4208f
ED
261 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
262 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 263
4c9483b2 264 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 265 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 266 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
267 fl6.flowi6_oif = sk->sk_bound_dev_if;
268 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
269 fl6.fl6_dport = usin->sin6_port;
270 fl6.fl6_sport = inet->inet_sport;
e2d118a1 271 fl6.flowi6_uid = sk->sk_uid;
1da177e4 272
1e1d04e6 273 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
45f6fad8 274 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 275
4c9483b2 276 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 277
c4e85f73 278 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
68d0c6d3
DM
279 if (IS_ERR(dst)) {
280 err = PTR_ERR(dst);
1da177e4 281 goto failure;
14e50e57 282 }
1da177e4 283
63159f29 284 if (!saddr) {
4c9483b2 285 saddr = &fl6.saddr;
efe4208f 286 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
287 }
288
289 /* set the source address */
4e3fd7a0 290 np->saddr = *saddr;
c720c7e8 291 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 292
f83ef8c0 293 sk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 294 ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 295
d83d8461 296 icsk->icsk_ext_hdr_len = 0;
45f6fad8
ED
297 if (opt)
298 icsk->icsk_ext_hdr_len = opt->opt_flen +
299 opt->opt_nflen;
1da177e4
LT
300
301 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
302
c720c7e8 303 inet->inet_dport = usin->sin6_port;
1da177e4
LT
304
305 tcp_set_state(sk, TCP_SYN_SENT);
1946e672 306 err = inet6_hash_connect(tcp_death_row, sk);
1da177e4
LT
307 if (err)
308 goto late_failure;
309
877d1f62 310 sk_set_txhash(sk);
9e7ceb06 311
00355fa5 312 if (likely(!tp->repair)) {
00355fa5 313 if (!tp->write_seq)
0f317464
ED
314 WRITE_ONCE(tp->write_seq,
315 secure_tcpv6_seq(np->saddr.s6_addr32,
316 sk->sk_v6_daddr.s6_addr32,
317 inet->inet_sport,
318 inet->inet_dport));
5d2ed052
ED
319 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
320 np->saddr.s6_addr32,
84b114b9 321 sk->sk_v6_daddr.s6_addr32);
00355fa5 322 }
1da177e4 323
19f6d3f3
WW
324 if (tcp_fastopen_defer_connect(sk, &err))
325 return err;
326 if (err)
327 goto late_failure;
328
1da177e4
LT
329 err = tcp_connect(sk);
330 if (err)
331 goto late_failure;
332
333 return 0;
334
335late_failure:
336 tcp_set_state(sk, TCP_CLOSE);
1da177e4 337failure:
c720c7e8 338 inet->inet_dport = 0;
1da177e4
LT
339 sk->sk_route_caps = 0;
340 return err;
341}
342
563d34d0
ED
343static void tcp_v6_mtu_reduced(struct sock *sk)
344{
345 struct dst_entry *dst;
346
347 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
348 return;
349
350 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
351 if (!dst)
352 return;
353
354 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
355 tcp_sync_mss(sk, dst_mtu(dst));
356 tcp_simple_retransmit(sk);
357 }
358}
359
32bbd879 360static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 361 u8 type, u8 code, int offset, __be32 info)
1da177e4 362{
4c99aa40 363 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 364 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
2215089b
ED
365 struct net *net = dev_net(skb->dev);
366 struct request_sock *fastopen;
1da177e4 367 struct ipv6_pinfo *np;
1ab1457c 368 struct tcp_sock *tp;
0a672f74 369 __u32 seq, snd_una;
2215089b 370 struct sock *sk;
9cf74903 371 bool fatal;
2215089b 372 int err;
1da177e4 373
2215089b
ED
374 sk = __inet6_lookup_established(net, &tcp_hashinfo,
375 &hdr->daddr, th->dest,
376 &hdr->saddr, ntohs(th->source),
4297a0ef 377 skb->dev->ifindex, inet6_sdif(skb));
1da177e4 378
2215089b 379 if (!sk) {
a16292a0
ED
380 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
381 ICMP6_MIB_INERRORS);
32bbd879 382 return -ENOENT;
1da177e4
LT
383 }
384
385 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 386 inet_twsk_put(inet_twsk(sk));
32bbd879 387 return 0;
1da177e4 388 }
2215089b 389 seq = ntohl(th->seq);
9cf74903 390 fatal = icmpv6_err_convert(type, code, &err);
32bbd879
SB
391 if (sk->sk_state == TCP_NEW_SYN_RECV) {
392 tcp_req_err(sk, seq, fatal);
393 return 0;
394 }
1da177e4
LT
395
396 bh_lock_sock(sk);
563d34d0 397 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
02a1d6e7 398 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
399
400 if (sk->sk_state == TCP_CLOSE)
401 goto out;
402
93a77c11 403 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
02a1d6e7 404 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
405 goto out;
406 }
407
1da177e4 408 tp = tcp_sk(sk);
0a672f74 409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
d983ea6f 410 fastopen = rcu_dereference(tp->fastopen_rsk);
0a672f74 411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 412 if (sk->sk_state != TCP_LISTEN &&
0a672f74 413 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 414 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
415 goto out;
416 }
417
93a77c11 418 np = tcp_inet6_sk(sk);
1da177e4 419
ec18d9a2 420 if (type == NDISC_REDIRECT) {
45caeaa5
JM
421 if (!sock_owned_by_user(sk)) {
422 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
ec18d9a2 423
45caeaa5
JM
424 if (dst)
425 dst->ops->redirect(dst, sk, skb);
426 }
50a75a89 427 goto out;
ec18d9a2
DM
428 }
429
1da177e4 430 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
431 /* We are not interested in TCP_LISTEN and open_requests
432 * (SYN-ACKs send out by Linux are always <576bytes so
433 * they should go through unfragmented).
434 */
435 if (sk->sk_state == TCP_LISTEN)
436 goto out;
437
93b36cf3
HFS
438 if (!ip6_sk_accept_pmtu(sk))
439 goto out;
440
563d34d0
ED
441 tp->mtu_info = ntohl(info);
442 if (!sock_owned_by_user(sk))
443 tcp_v6_mtu_reduced(sk);
d013ef2a 444 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
7aa5470c 445 &sk->sk_tsq_flags))
d013ef2a 446 sock_hold(sk);
1da177e4
LT
447 goto out;
448 }
449
1da177e4 450
60236fdd 451 /* Might be for an request_sock */
1da177e4 452 switch (sk->sk_state) {
1da177e4 453 case TCP_SYN_SENT:
0a672f74
YC
454 case TCP_SYN_RECV:
455 /* Only in fast or simultaneous open. If a fast open socket is
456 * is already accepted it is treated as a connected one below.
457 */
63159f29 458 if (fastopen && !fastopen->sk)
0a672f74
YC
459 break;
460
1da177e4 461 if (!sock_owned_by_user(sk)) {
1da177e4
LT
462 sk->sk_err = err;
463 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
464
465 tcp_done(sk);
466 } else
467 sk->sk_err_soft = err;
468 goto out;
469 }
470
471 if (!sock_owned_by_user(sk) && np->recverr) {
472 sk->sk_err = err;
473 sk->sk_error_report(sk);
474 } else
475 sk->sk_err_soft = err;
476
477out:
478 bh_unlock_sock(sk);
479 sock_put(sk);
32bbd879 480 return 0;
1da177e4
LT
481}
482
483
0f935dbe 484static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 485 struct flowi *fl,
3840a06e 486 struct request_sock *req,
ca6fb065 487 struct tcp_fastopen_cookie *foc,
b3d05147 488 enum tcp_synack_type synack_type)
1da177e4 489{
634fb979 490 struct inet_request_sock *ireq = inet_rsk(req);
93a77c11 491 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
56ac42bc 492 struct ipv6_txoptions *opt;
d6274bd8 493 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 494 struct sk_buff *skb;
9494218f 495 int err = -ENOMEM;
1da177e4 496
9f10d3f6 497 /* First, grab a route. */
f76b33c3
ED
498 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
499 IPPROTO_TCP)) == NULL)
fd80eb94 500 goto done;
9494218f 501
b3d05147 502 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
9494218f 503
1da177e4 504 if (skb) {
634fb979
ED
505 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
506 &ireq->ir_v6_rmt_addr);
1da177e4 507
634fb979 508 fl6->daddr = ireq->ir_v6_rmt_addr;
53b24b8f 509 if (np->repflow && ireq->pktopts)
df3687ff
FF
510 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
511
3e4006f0 512 rcu_read_lock();
56ac42bc
HD
513 opt = ireq->ipv6_opt;
514 if (!opt)
515 opt = rcu_dereference(np->opt);
4f6570d7
ED
516 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
517 sk->sk_priority);
3e4006f0 518 rcu_read_unlock();
b9df3cb8 519 err = net_xmit_eval(err);
1da177e4
LT
520 }
521
522done:
1da177e4
LT
523 return err;
524}
525
72659ecc 526
60236fdd 527static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 528{
56ac42bc 529 kfree(inet_rsk(req)->ipv6_opt);
634fb979 530 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
531}
532
cfb6eeb4 533#ifdef CONFIG_TCP_MD5SIG
b83e3deb 534static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
b71d1d42 535 const struct in6_addr *addr)
cfb6eeb4 536{
a915da9b 537 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
538}
539
b83e3deb 540static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
fd3a154a 541 const struct sock *addr_sk)
cfb6eeb4 542{
efe4208f 543 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
544}
545
8917a777
ID
546static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
547 char __user *optval, int optlen)
cfb6eeb4
YH
548{
549 struct tcp_md5sig cmd;
550 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
8917a777 551 u8 prefixlen;
cfb6eeb4
YH
552
553 if (optlen < sizeof(cmd))
554 return -EINVAL;
555
556 if (copy_from_user(&cmd, optval, sizeof(cmd)))
557 return -EFAULT;
558
559 if (sin6->sin6_family != AF_INET6)
560 return -EINVAL;
561
8917a777
ID
562 if (optname == TCP_MD5SIG_EXT &&
563 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
564 prefixlen = cmd.tcpm_prefixlen;
565 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
566 prefixlen > 32))
567 return -EINVAL;
568 } else {
569 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
570 }
571
cfb6eeb4 572 if (!cmd.tcpm_keylen) {
e773e4fa 573 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b 574 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 575 AF_INET, prefixlen);
a915da9b 576 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777 577 AF_INET6, prefixlen);
cfb6eeb4
YH
578 }
579
580 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
581 return -EINVAL;
582
a915da9b
ED
583 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
584 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
8917a777 585 AF_INET, prefixlen, cmd.tcpm_key,
6797318e 586 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 587
a915da9b 588 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
8917a777
ID
589 AF_INET6, prefixlen, cmd.tcpm_key,
590 cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
591}
592
19689e38
ED
593static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
594 const struct in6_addr *daddr,
595 const struct in6_addr *saddr,
596 const struct tcphdr *th, int nbytes)
cfb6eeb4 597{
cfb6eeb4 598 struct tcp6_pseudohdr *bp;
49a72dfb 599 struct scatterlist sg;
19689e38 600 struct tcphdr *_th;
8d26d76d 601
19689e38 602 bp = hp->scratch;
cfb6eeb4 603 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
604 bp->saddr = *saddr;
605 bp->daddr = *daddr;
49a72dfb 606 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 607 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 608
19689e38
ED
609 _th = (struct tcphdr *)(bp + 1);
610 memcpy(_th, th, sizeof(*th));
611 _th->check = 0;
612
613 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
614 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
615 sizeof(*bp) + sizeof(*th));
cf80e0e4 616 return crypto_ahash_update(hp->md5_req);
49a72dfb 617}
c7da57a1 618
19689e38 619static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
b71d1d42 620 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 621 const struct tcphdr *th)
49a72dfb
AL
622{
623 struct tcp_md5sig_pool *hp;
cf80e0e4 624 struct ahash_request *req;
49a72dfb
AL
625
626 hp = tcp_get_md5sig_pool();
627 if (!hp)
628 goto clear_hash_noput;
cf80e0e4 629 req = hp->md5_req;
49a72dfb 630
cf80e0e4 631 if (crypto_ahash_init(req))
49a72dfb 632 goto clear_hash;
19689e38 633 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
634 goto clear_hash;
635 if (tcp_md5_hash_key(hp, key))
636 goto clear_hash;
cf80e0e4
HX
637 ahash_request_set_crypt(req, NULL, md5_hash, 0);
638 if (crypto_ahash_final(req))
cfb6eeb4 639 goto clear_hash;
cfb6eeb4 640
cfb6eeb4 641 tcp_put_md5sig_pool();
cfb6eeb4 642 return 0;
49a72dfb 643
cfb6eeb4
YH
644clear_hash:
645 tcp_put_md5sig_pool();
646clear_hash_noput:
647 memset(md5_hash, 0, 16);
49a72dfb 648 return 1;
cfb6eeb4
YH
649}
650
39f8e58e
ED
651static int tcp_v6_md5_hash_skb(char *md5_hash,
652 const struct tcp_md5sig_key *key,
318cf7aa 653 const struct sock *sk,
318cf7aa 654 const struct sk_buff *skb)
cfb6eeb4 655{
b71d1d42 656 const struct in6_addr *saddr, *daddr;
49a72dfb 657 struct tcp_md5sig_pool *hp;
cf80e0e4 658 struct ahash_request *req;
318cf7aa 659 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 660
39f8e58e
ED
661 if (sk) { /* valid for establish/request sockets */
662 saddr = &sk->sk_v6_rcv_saddr;
efe4208f 663 daddr = &sk->sk_v6_daddr;
49a72dfb 664 } else {
b71d1d42 665 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
666 saddr = &ip6h->saddr;
667 daddr = &ip6h->daddr;
cfb6eeb4 668 }
49a72dfb
AL
669
670 hp = tcp_get_md5sig_pool();
671 if (!hp)
672 goto clear_hash_noput;
cf80e0e4 673 req = hp->md5_req;
49a72dfb 674
cf80e0e4 675 if (crypto_ahash_init(req))
49a72dfb
AL
676 goto clear_hash;
677
19689e38 678 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
679 goto clear_hash;
680 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
681 goto clear_hash;
682 if (tcp_md5_hash_key(hp, key))
683 goto clear_hash;
cf80e0e4
HX
684 ahash_request_set_crypt(req, NULL, md5_hash, 0);
685 if (crypto_ahash_final(req))
49a72dfb
AL
686 goto clear_hash;
687
688 tcp_put_md5sig_pool();
689 return 0;
690
691clear_hash:
692 tcp_put_md5sig_pool();
693clear_hash_noput:
694 memset(md5_hash, 0, 16);
695 return 1;
cfb6eeb4
YH
696}
697
ba8e275a
ED
698#endif
699
700static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
d14c77e0
DA
701 const struct sk_buff *skb,
702 int dif, int sdif)
cfb6eeb4 703{
ba8e275a 704#ifdef CONFIG_TCP_MD5SIG
cf533ea5 705 const __u8 *hash_location = NULL;
cfb6eeb4 706 struct tcp_md5sig_key *hash_expected;
b71d1d42 707 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 708 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 709 int genhash;
cfb6eeb4
YH
710 u8 newhash[16];
711
712 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 713 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 714
785957d3
DM
715 /* We've parsed the options - do we have a hash? */
716 if (!hash_expected && !hash_location)
ff74e23f 717 return false;
785957d3
DM
718
719 if (hash_expected && !hash_location) {
c10d9310 720 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
ff74e23f 721 return true;
cfb6eeb4
YH
722 }
723
785957d3 724 if (!hash_expected && hash_location) {
c10d9310 725 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
ff74e23f 726 return true;
cfb6eeb4
YH
727 }
728
729 /* check the signature */
49a72dfb
AL
730 genhash = tcp_v6_md5_hash_skb(newhash,
731 hash_expected,
39f8e58e 732 NULL, skb);
49a72dfb 733
cfb6eeb4 734 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
72145a68 735 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
e87cc472
JP
736 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
737 genhash ? "failed" : "mismatch",
738 &ip6h->saddr, ntohs(th->source),
739 &ip6h->daddr, ntohs(th->dest));
ff74e23f 740 return true;
cfb6eeb4 741 }
ba8e275a 742#endif
ff74e23f 743 return false;
cfb6eeb4 744}
cfb6eeb4 745
b40cf18e
ED
746static void tcp_v6_init_req(struct request_sock *req,
747 const struct sock *sk_listener,
16bea70a
OP
748 struct sk_buff *skb)
749{
c2027d1e 750 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
16bea70a 751 struct inet_request_sock *ireq = inet_rsk(req);
93a77c11 752 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
16bea70a
OP
753
754 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
755 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
756
16bea70a 757 /* So that link locals have meaning */
c2027d1e 758 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
16bea70a 759 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
870c3151 760 ireq->ir_iif = tcp_v6_iif(skb);
16bea70a 761
04317daf 762 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
b40cf18e 763 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
a224772d 764 np->rxopt.bits.rxinfo ||
16bea70a
OP
765 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
766 np->rxopt.bits.rxohlim || np->repflow)) {
63354797 767 refcount_inc(&skb->users);
16bea70a
OP
768 ireq->pktopts = skb;
769 }
770}
771
f964629e
ED
772static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
773 struct flowi *fl,
4396e461 774 const struct request_sock *req)
d94e0417 775{
f76b33c3 776 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
d94e0417
OP
777}
778
c6aefafb 779struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 780 .family = AF_INET6,
2e6599cb 781 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 782 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
783 .send_ack = tcp_v6_reqsk_send_ack,
784 .destructor = tcp_v6_reqsk_destructor,
72659ecc 785 .send_reset = tcp_v6_send_reset,
4aa956d8 786 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
787};
788
b2e4b3de 789static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
790 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
791 sizeof(struct ipv6hdr),
16bea70a 792#ifdef CONFIG_TCP_MD5SIG
fd3a154a 793 .req_md5_lookup = tcp_v6_md5_lookup,
e3afe7b7 794 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 795#endif
16bea70a 796 .init_req = tcp_v6_init_req,
fb7b37a7
OP
797#ifdef CONFIG_SYN_COOKIES
798 .cookie_init_seq = cookie_v6_init_sequence,
799#endif
d94e0417 800 .route_req = tcp_v6_route_req,
84b114b9
ED
801 .init_seq = tcp_v6_init_seq,
802 .init_ts_off = tcp_v6_init_ts_off,
d6274bd8 803 .send_synack = tcp_v6_send_synack,
16bea70a 804};
cfb6eeb4 805
a00e7444 806static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae
ED
807 u32 ack, u32 win, u32 tsval, u32 tsecr,
808 int oif, struct tcp_md5sig_key *key, int rst,
e9a5dcee 809 u8 tclass, __be32 label, u32 priority)
1da177e4 810{
cf533ea5
ED
811 const struct tcphdr *th = tcp_hdr(skb);
812 struct tcphdr *t1;
1da177e4 813 struct sk_buff *buff;
4c9483b2 814 struct flowi6 fl6;
0f85feae 815 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
e5047992 816 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 817 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 818 struct dst_entry *dst;
81ada62d 819 __be32 *topt;
00483690 820 __u32 mark = 0;
1da177e4 821
ee684b6f 822 if (tsecr)
626e264d 823 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 824#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
825 if (key)
826 tot_len += TCPOLEN_MD5SIG_ALIGNED;
827#endif
828
cfb6eeb4 829 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 830 GFP_ATOMIC);
63159f29 831 if (!buff)
1ab1457c 832 return;
1da177e4 833
cfb6eeb4 834 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 835
d58ff351 836 t1 = skb_push(buff, tot_len);
6651ffc8 837 skb_reset_transport_header(buff);
1da177e4
LT
838
839 /* Swap the send and the receive. */
840 memset(t1, 0, sizeof(*t1));
841 t1->dest = th->source;
842 t1->source = th->dest;
cfb6eeb4 843 t1->doff = tot_len / 4;
626e264d
IJ
844 t1->seq = htonl(seq);
845 t1->ack_seq = htonl(ack);
846 t1->ack = !rst || !th->ack;
847 t1->rst = rst;
848 t1->window = htons(win);
1da177e4 849
81ada62d
IJ
850 topt = (__be32 *)(t1 + 1);
851
ee684b6f 852 if (tsecr) {
626e264d
IJ
853 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
854 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
855 *topt++ = htonl(tsval);
856 *topt++ = htonl(tsecr);
626e264d
IJ
857 }
858
cfb6eeb4
YH
859#ifdef CONFIG_TCP_MD5SIG
860 if (key) {
81ada62d
IJ
861 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
862 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
863 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
864 &ipv6_hdr(skb)->saddr,
865 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
866 }
867#endif
868
4c9483b2 869 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
870 fl6.daddr = ipv6_hdr(skb)->saddr;
871 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 872 fl6.flowlabel = label;
1da177e4 873
e5700aff
DM
874 buff->ip_summed = CHECKSUM_PARTIAL;
875 buff->csum = 0;
876
4c9483b2 877 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 878
4c9483b2 879 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 880 if (rt6_need_strict(&fl6.daddr) && !oif)
870c3151 881 fl6.flowi6_oif = tcp_v6_iif(skb);
9b6c14d5
DA
882 else {
883 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
884 oif = skb->skb_iif;
885
886 fl6.flowi6_oif = oif;
887 }
1d2f7b2d 888
c67b8555
ED
889 if (sk) {
890 if (sk->sk_state == TCP_TIME_WAIT) {
891 mark = inet_twsk(sk)->tw_mark;
892 /* autoflowlabel relies on buff->hash */
893 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
894 PKT_HASH_TYPE_L4);
895 } else {
896 mark = sk->sk_mark;
897 }
d6fb396c 898 buff->tstamp = tcp_transmit_time(sk);
c67b8555 899 }
00483690 900 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
1958b856
DM
901 fl6.fl6_dport = t1->dest;
902 fl6.fl6_sport = t1->source;
e2d118a1 903 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
4c9483b2 904 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 905
c20121ae
DL
906 /* Pass a socket to ip6_dst_lookup either it is for RST
907 * Underlying function will use this to retrieve the network
908 * namespace
909 */
c4e85f73 910 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
68d0c6d3
DM
911 if (!IS_ERR(dst)) {
912 skb_dst_set(buff, dst);
4f6570d7 913 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
e9a5dcee 914 priority);
c10d9310 915 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
68d0c6d3 916 if (rst)
c10d9310 917 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
68d0c6d3 918 return;
1da177e4
LT
919 }
920
921 kfree_skb(buff);
922}
923
a00e7444 924static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 925{
cf533ea5 926 const struct tcphdr *th = tcp_hdr(skb);
323a53c4 927 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
626e264d 928 u32 seq = 0, ack_seq = 0;
fa3e5b4e 929 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
930#ifdef CONFIG_TCP_MD5SIG
931 const __u8 *hash_location = NULL;
658ddaaf
SL
932 unsigned char newhash[16];
933 int genhash;
934 struct sock *sk1 = NULL;
935#endif
323a53c4 936 __be32 label = 0;
e9a5dcee 937 u32 priority = 0;
323a53c4 938 struct net *net;
c24b14c4 939 int oif = 0;
1da177e4 940
626e264d 941 if (th->rst)
1da177e4
LT
942 return;
943
c3658e8d
ED
944 /* If sk not NULL, it means we did a successful lookup and incoming
945 * route had to be correct. prequeue might have dropped our dst.
946 */
947 if (!sk && !ipv6_unicast_destination(skb))
626e264d 948 return;
1da177e4 949
39209673 950 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 951#ifdef CONFIG_TCP_MD5SIG
3b24d854 952 rcu_read_lock();
658ddaaf 953 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 954 if (sk && sk_fullsock(sk)) {
e46787f0
FW
955 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
956 } else if (hash_location) {
d14c77e0
DA
957 int dif = tcp_v6_iif_l3_slave(skb);
958 int sdif = tcp_v6_sdif(skb);
959
658ddaaf
SL
960 /*
961 * active side is lost. Try to find listening socket through
962 * source port, and then find md5 key through listening socket.
963 * we are not loose security here:
964 * Incoming packet is checked with md5 hash with finding key,
965 * no RST generated if md5 hash doesn't match.
966 */
323a53c4 967 sk1 = inet6_lookup_listener(net,
a583636a
CG
968 &tcp_hashinfo, NULL, 0,
969 &ipv6h->saddr,
5ba24953 970 th->source, &ipv6h->daddr,
d14c77e0 971 ntohs(th->source), dif, sdif);
658ddaaf 972 if (!sk1)
3b24d854 973 goto out;
658ddaaf 974
658ddaaf
SL
975 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
976 if (!key)
3b24d854 977 goto out;
658ddaaf 978
39f8e58e 979 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 980 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854 981 goto out;
658ddaaf 982 }
cfb6eeb4
YH
983#endif
984
626e264d
IJ
985 if (th->ack)
986 seq = ntohl(th->ack_seq);
987 else
988 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
989 (th->doff << 2);
1da177e4 990
c24b14c4
SL
991 if (sk) {
992 oif = sk->sk_bound_dev_if;
052e0690
ED
993 if (sk_fullsock(sk)) {
994 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
995
5c487bb9 996 trace_tcp_send_reset(sk, skb);
052e0690
ED
997 if (np->repflow)
998 label = ip6_flowlabel(ipv6h);
e9a5dcee 999 priority = sk->sk_priority;
052e0690 1000 }
f6c0f5d2 1001 if (sk->sk_state == TCP_TIME_WAIT) {
50a8accf 1002 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
f6c0f5d2
ED
1003 priority = inet_twsk(sk)->tw_priority;
1004 }
323a53c4 1005 } else {
a346abe0 1006 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
323a53c4 1007 label = ip6_flowlabel(ipv6h);
c24b14c4
SL
1008 }
1009
323a53c4 1010 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0,
e9a5dcee 1011 label, priority);
658ddaaf
SL
1012
1013#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
1014out:
1015 rcu_read_unlock();
658ddaaf 1016#endif
626e264d 1017}
1da177e4 1018
a00e7444 1019static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
0f85feae 1020 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c 1021 struct tcp_md5sig_key *key, u8 tclass,
e9a5dcee 1022 __be32 label, u32 priority)
626e264d 1023{
0f85feae 1024 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
e9a5dcee 1025 tclass, label, priority);
1da177e4
LT
1026}
1027
1028static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1029{
8feaf0c0 1030 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1031 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1032
0f85feae 1033 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1034 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9a568de4 1035 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
9c76a114 1036 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
f6c0f5d2 1037 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
1da177e4 1038
8feaf0c0 1039 inet_twsk_put(tw);
1da177e4
LT
1040}
1041
a00e7444 1042static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
6edafaaf 1043 struct request_sock *req)
1da177e4 1044{
3a19ce0e
DL
1045 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1046 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1047 */
20a2b49f
ED
1048 /* RFC 7323 2.3
1049 * The window field (SEG.WND) of every outgoing segment, with the
1050 * exception of <SYN> segments, MUST be right-shifted by
1051 * Rcv.Wind.Shift bits:
1052 */
0f85feae 1053 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
3a19ce0e 1054 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
20a2b49f
ED
1055 tcp_rsk(req)->rcv_nxt,
1056 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
9a568de4 1057 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
95a22cae 1058 req->ts_recent, sk->sk_bound_dev_if,
30791ac4 1059 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
e9a5dcee 1060 0, 0, sk->sk_priority);
1da177e4
LT
1061}
1062
1063
079096f1 1064static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1065{
079096f1 1066#ifdef CONFIG_SYN_COOKIES
aa8223c7 1067 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1068
af9b4738 1069 if (!th->syn)
c6aefafb 1070 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1071#endif
1072 return sk;
1073}
1074
9349d600
PP
1075u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1076 struct tcphdr *th, u32 *cookie)
1077{
1078 u16 mss = 0;
1079#ifdef CONFIG_SYN_COOKIES
1080 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1081 &tcp_request_sock_ipv6_ops, sk, th);
1082 if (mss) {
1083 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1084 tcp_synq_overflow(sk);
1085 }
1086#endif
1087 return mss;
1088}
1089
1da177e4
LT
1090static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1091{
1da177e4
LT
1092 if (skb->protocol == htons(ETH_P_IP))
1093 return tcp_v4_conn_request(sk, skb);
1094
1095 if (!ipv6_unicast_destination(skb))
1ab1457c 1096 goto drop;
1da177e4 1097
1fb6f159
OP
1098 return tcp_conn_request(&tcp6_request_sock_ops,
1099 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1100
1101drop:
9caad864 1102 tcp_listendrop(sk);
1da177e4
LT
1103 return 0; /* don't send reset */
1104}
1105
ebf6c9cb
ED
1106static void tcp_v6_restore_cb(struct sk_buff *skb)
1107{
1108 /* We need to move header back to the beginning if xfrm6_policy_check()
1109 * and tcp_v6_fill_cb() are going to be called again.
1110 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1111 */
1112 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1113 sizeof(struct inet6_skb_parm));
1114}
1115
0c27171e 1116static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
4c99aa40 1117 struct request_sock *req,
5e0724d0
ED
1118 struct dst_entry *dst,
1119 struct request_sock *req_unhash,
1120 bool *own_req)
1da177e4 1121{
634fb979 1122 struct inet_request_sock *ireq;
0c27171e 1123 struct ipv6_pinfo *newnp;
93a77c11 1124 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
45f6fad8 1125 struct ipv6_txoptions *opt;
1da177e4
LT
1126 struct inet_sock *newinet;
1127 struct tcp_sock *newtp;
1128 struct sock *newsk;
cfb6eeb4
YH
1129#ifdef CONFIG_TCP_MD5SIG
1130 struct tcp_md5sig_key *key;
1131#endif
3840a06e 1132 struct flowi6 fl6;
1da177e4
LT
1133
1134 if (skb->protocol == htons(ETH_P_IP)) {
1135 /*
1136 * v6 mapped
1137 */
1138
5e0724d0
ED
1139 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1140 req_unhash, own_req);
1da177e4 1141
63159f29 1142 if (!newsk)
1da177e4
LT
1143 return NULL;
1144
93a77c11 1145 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1da177e4
LT
1146
1147 newinet = inet_sk(newsk);
93a77c11 1148 newnp = tcp_inet6_sk(newsk);
1da177e4
LT
1149 newtp = tcp_sk(newsk);
1150
1151 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1152
d1e559d0 1153 newnp->saddr = newsk->sk_v6_rcv_saddr;
1da177e4 1154
8292a17a 1155 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1156 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1157#ifdef CONFIG_TCP_MD5SIG
1158 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1159#endif
1160
83eaddab 1161 newnp->ipv6_mc_list = NULL;
676a1184
YZ
1162 newnp->ipv6_ac_list = NULL;
1163 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1164 newnp->pktoptions = NULL;
1165 newnp->opt = NULL;
89e41309
ED
1166 newnp->mcast_oif = inet_iif(skb);
1167 newnp->mcast_hops = ip_hdr(skb)->ttl;
1168 newnp->rcv_flowinfo = 0;
df3687ff 1169 if (np->repflow)
89e41309 1170 newnp->flow_label = 0;
1da177e4 1171
e6848976
ACM
1172 /*
1173 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1174 * here, tcp_create_openreq_child now does this for us, see the comment in
1175 * that function for the gory details. -acme
1da177e4 1176 */
1da177e4
LT
1177
1178 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1179 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1180 Sync it now.
1181 */
d83d8461 1182 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1183
1184 return newsk;
1185 }
1186
634fb979 1187 ireq = inet_rsk(req);
1da177e4
LT
1188
1189 if (sk_acceptq_is_full(sk))
1190 goto out_overflow;
1191
493f377d 1192 if (!dst) {
f76b33c3 1193 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
493f377d 1194 if (!dst)
1da177e4 1195 goto out;
1ab1457c 1196 }
1da177e4
LT
1197
1198 newsk = tcp_create_openreq_child(sk, req, skb);
63159f29 1199 if (!newsk)
093d2823 1200 goto out_nonewsk;
1da177e4 1201
e6848976
ACM
1202 /*
1203 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1204 * count here, tcp_create_openreq_child now does this for us, see the
1205 * comment in that function for the gory details. -acme
1206 */
1da177e4 1207
59eed279 1208 newsk->sk_gso_type = SKB_GSO_TCPV6;
6bd4f355 1209 ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1210 inet6_sk_rx_dst_set(newsk, skb);
1da177e4 1211
93a77c11 1212 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1da177e4
LT
1213
1214 newtp = tcp_sk(newsk);
1215 newinet = inet_sk(newsk);
93a77c11 1216 newnp = tcp_inet6_sk(newsk);
1da177e4
LT
1217
1218 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1219
634fb979
ED
1220 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1221 newnp->saddr = ireq->ir_v6_loc_addr;
1222 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1223 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1224
1ab1457c 1225 /* Now IPv6 options...
1da177e4
LT
1226
1227 First: no IPv4 options.
1228 */
f6d8bd05 1229 newinet->inet_opt = NULL;
83eaddab 1230 newnp->ipv6_mc_list = NULL;
676a1184 1231 newnp->ipv6_ac_list = NULL;
d35690be 1232 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1233
1234 /* Clone RX bits */
1235 newnp->rxopt.all = np->rxopt.all;
1236
1da177e4 1237 newnp->pktoptions = NULL;
1da177e4 1238 newnp->opt = NULL;
870c3151 1239 newnp->mcast_oif = tcp_v6_iif(skb);
0660e03f 1240 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1241 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1242 if (np->repflow)
1243 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1244
1245 /* Clone native IPv6 options from listening socket (if any)
1246
1247 Yes, keeping reference count would be much more clever,
1248 but we make one more one thing there: reattach optmem
1249 to newsk.
1250 */
56ac42bc
HD
1251 opt = ireq->ipv6_opt;
1252 if (!opt)
1253 opt = rcu_dereference(np->opt);
45f6fad8
ED
1254 if (opt) {
1255 opt = ipv6_dup_options(newsk, opt);
1256 RCU_INIT_POINTER(newnp->opt, opt);
1257 }
d83d8461 1258 inet_csk(newsk)->icsk_ext_hdr_len = 0;
45f6fad8
ED
1259 if (opt)
1260 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1261 opt->opt_flen;
1da177e4 1262
81164413
DB
1263 tcp_ca_openreq_child(newsk, dst);
1264
1da177e4 1265 tcp_sync_mss(newsk, dst_mtu(dst));
3541f9e8 1266 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
d135c522 1267
1da177e4
LT
1268 tcp_initialize_rcv_mss(newsk);
1269
c720c7e8
ED
1270 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1271 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1272
cfb6eeb4
YH
1273#ifdef CONFIG_TCP_MD5SIG
1274 /* Copy over the MD5 key from the original socket */
4aa956d8 1275 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
53b24b8f 1276 if (key) {
cfb6eeb4
YH
1277 /* We're using one, so create a matching key
1278 * on the newsk structure. If we fail to get
1279 * memory, then we end up not copying the key
1280 * across. Shucks.
1281 */
efe4208f 1282 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
6797318e 1283 AF_INET6, 128, key->key, key->keylen,
7450aaf6 1284 sk_gfp_mask(sk, GFP_ATOMIC));
cfb6eeb4
YH
1285 }
1286#endif
1287
093d2823 1288 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1289 inet_csk_prepare_forced_close(newsk);
1290 tcp_done(newsk);
093d2823
BS
1291 goto out;
1292 }
5e0724d0 1293 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1294 if (*own_req) {
49a496c9 1295 tcp_move_syn(newtp, req);
805c4bc0
ED
1296
1297 /* Clone pktoptions received with SYN, if we own the req */
1298 if (ireq->pktopts) {
1299 newnp->pktoptions = skb_clone(ireq->pktopts,
7450aaf6 1300 sk_gfp_mask(sk, GFP_ATOMIC));
805c4bc0
ED
1301 consume_skb(ireq->pktopts);
1302 ireq->pktopts = NULL;
ebf6c9cb
ED
1303 if (newnp->pktoptions) {
1304 tcp_v6_restore_cb(newnp->pktoptions);
805c4bc0 1305 skb_set_owner_r(newnp->pktoptions, newsk);
ebf6c9cb 1306 }
805c4bc0 1307 }
ce105008 1308 }
1da177e4
LT
1309
1310 return newsk;
1311
1312out_overflow:
02a1d6e7 1313 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1314out_nonewsk:
1da177e4 1315 dst_release(dst);
093d2823 1316out:
9caad864 1317 tcp_listendrop(sk);
1da177e4
LT
1318 return NULL;
1319}
1320
1da177e4 1321/* The socket must have it's spinlock held when we get
e994b2f0 1322 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1323 *
1324 * We have a potential double-lock case here, so even when
1325 * doing backlog processing we use the BH locking scheme.
1326 * This is because we cannot sleep with the original spinlock
1327 * held.
1328 */
1329static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1330{
93a77c11 1331 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1da177e4 1332 struct sk_buff *opt_skb = NULL;
93a77c11 1333 struct tcp_sock *tp;
1da177e4
LT
1334
1335 /* Imagine: socket is IPv6. IPv4 packet arrives,
1336 goes to IPv4 receive handler and backlogged.
1337 From backlog it always goes here. Kerboom...
1338 Fortunately, tcp_rcv_established and rcv_established
1339 handle them correctly, but it is not case with
1340 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1341 */
1342
1343 if (skb->protocol == htons(ETH_P_IP))
1344 return tcp_v4_do_rcv(sk, skb);
1345
1da177e4
LT
1346 /*
1347 * socket locking is here for SMP purposes as backlog rcv
1348 * is currently called with bh processing disabled.
1349 */
1350
1351 /* Do Stevens' IPV6_PKTOPTIONS.
1352
1353 Yes, guys, it is the only place in our code, where we
1354 may make it not affecting IPv4.
1355 The rest of code is protocol independent,
1356 and I do not like idea to uglify IPv4.
1357
1358 Actually, all the idea behind IPV6_PKTOPTIONS
1359 looks not very well thought. For now we latch
1360 options, received in the last packet, enqueued
1361 by tcp. Feel free to propose better solution.
1ab1457c 1362 --ANK (980728)
1da177e4
LT
1363 */
1364 if (np->rxopt.all)
7450aaf6 1365 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1da177e4
LT
1366
1367 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1368 struct dst_entry *dst = sk->sk_rx_dst;
1369
bdeab991 1370 sock_rps_save_rxhash(sk, skb);
3d97379a 1371 sk_mark_napi_id(sk, skb);
5d299f3d
ED
1372 if (dst) {
1373 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1374 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1375 dst_release(dst);
1376 sk->sk_rx_dst = NULL;
1377 }
1378 }
1379
3d97d88e 1380 tcp_rcv_established(sk, skb);
1da177e4
LT
1381 if (opt_skb)
1382 goto ipv6_pktoptions;
1383 return 0;
1384 }
1385
12e25e10 1386 if (tcp_checksum_complete(skb))
1da177e4
LT
1387 goto csum_err;
1388
1ab1457c 1389 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1390 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1391
1da177e4
LT
1392 if (!nsk)
1393 goto discard;
1394
4c99aa40 1395 if (nsk != sk) {
1da177e4
LT
1396 if (tcp_child_process(sk, nsk, skb))
1397 goto reset;
1398 if (opt_skb)
1399 __kfree_skb(opt_skb);
1400 return 0;
1401 }
47482f13 1402 } else
bdeab991 1403 sock_rps_save_rxhash(sk, skb);
1da177e4 1404
72ab4a86 1405 if (tcp_rcv_state_process(sk, skb))
1da177e4 1406 goto reset;
1da177e4
LT
1407 if (opt_skb)
1408 goto ipv6_pktoptions;
1409 return 0;
1410
1411reset:
cfb6eeb4 1412 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1413discard:
1414 if (opt_skb)
1415 __kfree_skb(opt_skb);
1416 kfree_skb(skb);
1417 return 0;
1418csum_err:
c10d9310
ED
1419 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1420 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1421 goto discard;
1422
1423
1424ipv6_pktoptions:
1425 /* Do you ask, what is it?
1426
1427 1. skb was enqueued by tcp.
1428 2. skb is added to tail of read queue, rather than out of order.
1429 3. socket is not in passive state.
1430 4. Finally, it really contains options, which user wants to receive.
1431 */
1432 tp = tcp_sk(sk);
1433 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1434 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1435 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
870c3151 1436 np->mcast_oif = tcp_v6_iif(opt_skb);
333fad53 1437 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1438 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1439 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1440 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1441 if (np->repflow)
1442 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1443 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4 1444 skb_set_owner_r(opt_skb, sk);
8ce48623 1445 tcp_v6_restore_cb(opt_skb);
1da177e4
LT
1446 opt_skb = xchg(&np->pktoptions, opt_skb);
1447 } else {
1448 __kfree_skb(opt_skb);
1449 opt_skb = xchg(&np->pktoptions, NULL);
1450 }
1451 }
1452
800d55f1 1453 kfree_skb(opt_skb);
1da177e4
LT
1454 return 0;
1455}
1456
2dc49d16
ND
1457static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1458 const struct tcphdr *th)
1459{
1460 /* This is tricky: we move IP6CB at its correct location into
1461 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1462 * _decode_session6() uses IP6CB().
1463 * barrier() makes sure compiler won't play aliasing games.
1464 */
1465 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1466 sizeof(struct inet6_skb_parm));
1467 barrier();
1468
1469 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1470 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1471 skb->len - th->doff*4);
1472 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1473 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1474 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1475 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1476 TCP_SKB_CB(skb)->sacked = 0;
98aaa913
MM
1477 TCP_SKB_CB(skb)->has_rxtstamp =
1478 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
2dc49d16
ND
1479}
1480
0e219ae4 1481INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1482{
8b27dae5 1483 struct sk_buff *skb_to_free;
4297a0ef 1484 int sdif = inet6_sdif(skb);
d14c77e0 1485 int dif = inet6_iif(skb);
cf533ea5 1486 const struct tcphdr *th;
b71d1d42 1487 const struct ipv6hdr *hdr;
3b24d854 1488 bool refcounted;
1da177e4
LT
1489 struct sock *sk;
1490 int ret;
a86b1e30 1491 struct net *net = dev_net(skb->dev);
1da177e4
LT
1492
1493 if (skb->pkt_type != PACKET_HOST)
1494 goto discard_it;
1495
1496 /*
1497 * Count it even if it's bad.
1498 */
90bbcc60 1499 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1500
1501 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1502 goto discard_it;
1503
ea1627c2 1504 th = (const struct tcphdr *)skb->data;
1da177e4 1505
ea1627c2 1506 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1da177e4
LT
1507 goto bad_packet;
1508 if (!pskb_may_pull(skb, th->doff*4))
1509 goto discard_it;
1510
e4f45b7f 1511 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1512 goto csum_error;
1da177e4 1513
ea1627c2 1514 th = (const struct tcphdr *)skb->data;
e802af9c 1515 hdr = ipv6_hdr(skb);
1da177e4 1516
4bdc3d66 1517lookup:
a583636a 1518 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
4297a0ef 1519 th->source, th->dest, inet6_iif(skb), sdif,
3b24d854 1520 &refcounted);
1da177e4
LT
1521 if (!sk)
1522 goto no_tcp_socket;
1523
1524process:
1525 if (sk->sk_state == TCP_TIME_WAIT)
1526 goto do_time_wait;
1527
079096f1
ED
1528 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1529 struct request_sock *req = inet_reqsk(sk);
e0f9759f 1530 bool req_stolen = false;
7716682c 1531 struct sock *nsk;
079096f1
ED
1532
1533 sk = req->rsk_listener;
d14c77e0 1534 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
e65c332d 1535 sk_drops_add(sk, skb);
079096f1
ED
1536 reqsk_put(req);
1537 goto discard_it;
1538 }
4fd44a98
FL
1539 if (tcp_checksum_complete(skb)) {
1540 reqsk_put(req);
1541 goto csum_error;
1542 }
7716682c 1543 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1544 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1545 goto lookup;
1546 }
7716682c 1547 sock_hold(sk);
3b24d854 1548 refcounted = true;
1f3b359f 1549 nsk = NULL;
eeea10b8
ED
1550 if (!tcp_filter(sk, skb)) {
1551 th = (const struct tcphdr *)skb->data;
1552 hdr = ipv6_hdr(skb);
1553 tcp_v6_fill_cb(skb, hdr, th);
e0f9759f 1554 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
eeea10b8 1555 }
079096f1
ED
1556 if (!nsk) {
1557 reqsk_put(req);
e0f9759f
ED
1558 if (req_stolen) {
1559 /* Another cpu got exclusive access to req
1560 * and created a full blown socket.
1561 * Try to feed this packet to this socket
1562 * instead of discarding it.
1563 */
1564 tcp_v6_restore_cb(skb);
1565 sock_put(sk);
1566 goto lookup;
1567 }
7716682c 1568 goto discard_and_relse;
079096f1
ED
1569 }
1570 if (nsk == sk) {
079096f1
ED
1571 reqsk_put(req);
1572 tcp_v6_restore_cb(skb);
1573 } else if (tcp_child_process(sk, nsk, skb)) {
1574 tcp_v6_send_reset(nsk, skb);
7716682c 1575 goto discard_and_relse;
079096f1 1576 } else {
7716682c 1577 sock_put(sk);
079096f1
ED
1578 return 0;
1579 }
1580 }
93a77c11 1581 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
02a1d6e7 1582 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
e802af9c
SH
1583 goto discard_and_relse;
1584 }
1585
1da177e4
LT
1586 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1587 goto discard_and_relse;
1588
d14c77e0 1589 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
9ea88a15 1590 goto discard_and_relse;
9ea88a15 1591
ac6e7800 1592 if (tcp_filter(sk, skb))
1da177e4 1593 goto discard_and_relse;
ac6e7800
ED
1594 th = (const struct tcphdr *)skb->data;
1595 hdr = ipv6_hdr(skb);
eeea10b8 1596 tcp_v6_fill_cb(skb, hdr, th);
1da177e4
LT
1597
1598 skb->dev = NULL;
1599
e994b2f0
ED
1600 if (sk->sk_state == TCP_LISTEN) {
1601 ret = tcp_v6_do_rcv(sk, skb);
1602 goto put_and_return;
1603 }
1604
1605 sk_incoming_cpu_update(sk);
1606
293b9c42 1607 bh_lock_sock_nested(sk);
a44d6eac 1608 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1609 ret = 0;
1610 if (!sock_owned_by_user(sk)) {
8b27dae5
ED
1611 skb_to_free = sk->sk_rx_skb_cache;
1612 sk->sk_rx_skb_cache = NULL;
e7942d06 1613 ret = tcp_v6_do_rcv(sk, skb);
8b27dae5
ED
1614 } else {
1615 if (tcp_add_backlog(sk, skb))
1616 goto discard_and_relse;
1617 skb_to_free = NULL;
6b03a53a 1618 }
1da177e4 1619 bh_unlock_sock(sk);
8b27dae5
ED
1620 if (skb_to_free)
1621 __kfree_skb(skb_to_free);
e994b2f0 1622put_and_return:
3b24d854
ED
1623 if (refcounted)
1624 sock_put(sk);
1da177e4
LT
1625 return ret ? -1 : 0;
1626
1627no_tcp_socket:
1628 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1629 goto discard_it;
1630
2dc49d16
ND
1631 tcp_v6_fill_cb(skb, hdr, th);
1632
12e25e10 1633 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1634csum_error:
90bbcc60 1635 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1636bad_packet:
90bbcc60 1637 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1638 } else {
cfb6eeb4 1639 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1640 }
1641
1642discard_it:
1da177e4
LT
1643 kfree_skb(skb);
1644 return 0;
1645
1646discard_and_relse:
532182cd 1647 sk_drops_add(sk, skb);
3b24d854
ED
1648 if (refcounted)
1649 sock_put(sk);
1da177e4
LT
1650 goto discard_it;
1651
1652do_time_wait:
1653 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1654 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1655 goto discard_it;
1656 }
1657
2dc49d16
ND
1658 tcp_v6_fill_cb(skb, hdr, th);
1659
6a5dc9e5
ED
1660 if (tcp_checksum_complete(skb)) {
1661 inet_twsk_put(inet_twsk(sk));
1662 goto csum_error;
1da177e4
LT
1663 }
1664
9469c7b4 1665 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1666 case TCP_TW_SYN:
1667 {
1668 struct sock *sk2;
1669
c346dca1 1670 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
a583636a 1671 skb, __tcp_hdrlen(th),
5ba24953 1672 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1673 &ipv6_hdr(skb)->daddr,
24b711ed
DA
1674 ntohs(th->dest),
1675 tcp_v6_iif_l3_slave(skb),
4297a0ef 1676 sdif);
53b24b8f 1677 if (sk2) {
295ff7ed 1678 struct inet_timewait_sock *tw = inet_twsk(sk);
dbe7faa4 1679 inet_twsk_deschedule_put(tw);
1da177e4 1680 sk = sk2;
4ad19de8 1681 tcp_v6_restore_cb(skb);
3b24d854 1682 refcounted = false;
1da177e4
LT
1683 goto process;
1684 }
1da177e4 1685 }
275757e6
GS
1686 /* to ACK */
1687 /* fall through */
1da177e4
LT
1688 case TCP_TW_ACK:
1689 tcp_v6_timewait_ack(sk, skb);
1690 break;
1691 case TCP_TW_RST:
271c3b9b
FW
1692 tcp_v6_send_reset(sk, skb);
1693 inet_twsk_deschedule_put(inet_twsk(sk));
1694 goto discard_it;
4aa956d8
WY
1695 case TCP_TW_SUCCESS:
1696 ;
1da177e4
LT
1697 }
1698 goto discard_it;
1699}
1700
97ff7ffb 1701INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
c7109986
ED
1702{
1703 const struct ipv6hdr *hdr;
1704 const struct tcphdr *th;
1705 struct sock *sk;
1706
1707 if (skb->pkt_type != PACKET_HOST)
1708 return;
1709
1710 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1711 return;
1712
1713 hdr = ipv6_hdr(skb);
1714 th = tcp_hdr(skb);
1715
1716 if (th->doff < sizeof(struct tcphdr) / 4)
1717 return;
1718
870c3151 1719 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
c7109986
ED
1720 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1721 &hdr->saddr, th->source,
1722 &hdr->daddr, ntohs(th->dest),
4297a0ef 1723 inet6_iif(skb), inet6_sdif(skb));
c7109986
ED
1724 if (sk) {
1725 skb->sk = sk;
1726 skb->destructor = sock_edemux;
f7e4eb03 1727 if (sk_fullsock(sk)) {
d0c294c5 1728 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
f3f12135 1729
c7109986 1730 if (dst)
93a77c11 1731 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
c7109986 1732 if (dst &&
f3f12135 1733 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1734 skb_dst_set_noref(skb, dst);
1735 }
1736 }
1737}
1738
ccb7c410
DM
1739static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1740 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1741 .twsk_unique = tcp_twsk_unique,
4aa956d8 1742 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1743};
1744
3b401a81 1745static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1746 .queue_xmit = inet6_csk_xmit,
1747 .send_check = tcp_v6_send_check,
1748 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1749 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1750 .conn_request = tcp_v6_conn_request,
1751 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1752 .net_header_len = sizeof(struct ipv6hdr),
67469601 1753 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1754 .setsockopt = ipv6_setsockopt,
1755 .getsockopt = ipv6_getsockopt,
1756 .addr2sockaddr = inet6_csk_addr2sockaddr,
1757 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1758#ifdef CONFIG_COMPAT
543d9cfe
ACM
1759 .compat_setsockopt = compat_ipv6_setsockopt,
1760 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1761#endif
4fab9071 1762 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1763};
1764
cfb6eeb4 1765#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1766static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1767 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1768 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1769 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1770};
a928630a 1771#endif
cfb6eeb4 1772
1da177e4
LT
1773/*
1774 * TCP over IPv4 via INET6 API
1775 */
3b401a81 1776static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1777 .queue_xmit = ip_queue_xmit,
1778 .send_check = tcp_v4_send_check,
1779 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1780 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1781 .conn_request = tcp_v6_conn_request,
1782 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1783 .net_header_len = sizeof(struct iphdr),
1784 .setsockopt = ipv6_setsockopt,
1785 .getsockopt = ipv6_getsockopt,
1786 .addr2sockaddr = inet6_csk_addr2sockaddr,
1787 .sockaddr_len = sizeof(struct sockaddr_in6),
3fdadf7d 1788#ifdef CONFIG_COMPAT
543d9cfe
ACM
1789 .compat_setsockopt = compat_ipv6_setsockopt,
1790 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1791#endif
4fab9071 1792 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1793};
1794
cfb6eeb4 1795#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1796static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1797 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1798 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1799 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1800};
a928630a 1801#endif
cfb6eeb4 1802
1da177e4
LT
1803/* NOTE: A lot of things set to zero explicitly by call to
1804 * sk_alloc() so need not be done here.
1805 */
1806static int tcp_v6_init_sock(struct sock *sk)
1807{
6687e988 1808 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1809
900f65d3 1810 tcp_init_sock(sk);
1da177e4 1811
8292a17a 1812 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1813
cfb6eeb4 1814#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1815 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1816#endif
1817
1da177e4
LT
1818 return 0;
1819}
1820
7d06b2e0 1821static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1822{
1da177e4 1823 tcp_v4_destroy_sock(sk);
7d06b2e0 1824 inet6_destroy_sock(sk);
1da177e4
LT
1825}
1826
952a10be 1827#ifdef CONFIG_PROC_FS
1da177e4 1828/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1829static void get_openreq6(struct seq_file *seq,
aa3a0c8c 1830 const struct request_sock *req, int i)
1da177e4 1831{
fa76ce73 1832 long ttd = req->rsk_timer.expires - jiffies;
634fb979
ED
1833 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1834 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1835
1836 if (ttd < 0)
1837 ttd = 0;
1838
1da177e4
LT
1839 seq_printf(seq,
1840 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1841 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1842 i,
1843 src->s6_addr32[0], src->s6_addr32[1],
1844 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1845 inet_rsk(req)->ir_num,
1da177e4
LT
1846 dest->s6_addr32[0], dest->s6_addr32[1],
1847 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1848 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1849 TCP_SYN_RECV,
4c99aa40 1850 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1851 1, /* timers active (only the expire timer) */
1852 jiffies_to_clock_t(ttd),
e6c022a4 1853 req->num_timeout,
aa3a0c8c
ED
1854 from_kuid_munged(seq_user_ns(seq),
1855 sock_i_uid(req->rsk_listener)),
1ab1457c 1856 0, /* non standard timer */
1da177e4
LT
1857 0, /* open_requests have no inode */
1858 0, req);
1859}
1860
1861static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1862{
b71d1d42 1863 const struct in6_addr *dest, *src;
1da177e4
LT
1864 __u16 destp, srcp;
1865 int timer_active;
1866 unsigned long timer_expires;
cf533ea5
ED
1867 const struct inet_sock *inet = inet_sk(sp);
1868 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1869 const struct inet_connection_sock *icsk = inet_csk(sp);
0536fcc0 1870 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
00fd38d9
ED
1871 int rx_queue;
1872 int state;
1da177e4 1873
efe4208f
ED
1874 dest = &sp->sk_v6_daddr;
1875 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1876 destp = ntohs(inet->inet_dport);
1877 srcp = ntohs(inet->inet_sport);
463c84b9 1878
ce3cf4ec 1879 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
57dde7f7 1880 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
ce3cf4ec 1881 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 1882 timer_active = 1;
463c84b9
ACM
1883 timer_expires = icsk->icsk_timeout;
1884 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1885 timer_active = 4;
463c84b9 1886 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1887 } else if (timer_pending(&sp->sk_timer)) {
1888 timer_active = 2;
1889 timer_expires = sp->sk_timer.expires;
1890 } else {
1891 timer_active = 0;
1892 timer_expires = jiffies;
1893 }
1894
986ffdfd 1895 state = inet_sk_state_load(sp);
00fd38d9 1896 if (state == TCP_LISTEN)
288efe86 1897 rx_queue = READ_ONCE(sp->sk_ack_backlog);
00fd38d9
ED
1898 else
1899 /* Because we don't lock the socket,
1900 * we might find a transient negative value.
1901 */
dba7d9b8 1902 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
7db48e98 1903 READ_ONCE(tp->copied_seq), 0);
00fd38d9 1904
1da177e4
LT
1905 seq_printf(seq,
1906 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1907 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1908 i,
1909 src->s6_addr32[0], src->s6_addr32[1],
1910 src->s6_addr32[2], src->s6_addr32[3], srcp,
1911 dest->s6_addr32[0], dest->s6_addr32[1],
1912 dest->s6_addr32[2], dest->s6_addr32[3], destp,
00fd38d9 1913 state,
0f317464 1914 READ_ONCE(tp->write_seq) - tp->snd_una,
00fd38d9 1915 rx_queue,
1da177e4 1916 timer_active,
a399a805 1917 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1918 icsk->icsk_retransmits,
a7cb5a49 1919 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1920 icsk->icsk_probes_out,
1da177e4 1921 sock_i_ino(sp),
41c6d650 1922 refcount_read(&sp->sk_refcnt), sp,
7be87351
SH
1923 jiffies_to_clock_t(icsk->icsk_rto),
1924 jiffies_to_clock_t(icsk->icsk_ack.ato),
31954cd8 1925 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
0b6a05c1 1926 tp->snd_cwnd,
00fd38d9 1927 state == TCP_LISTEN ?
0536fcc0 1928 fastopenq->max_qlen :
0a672f74 1929 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1930 );
1931}
1932
1ab1457c 1933static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1934 struct inet_timewait_sock *tw, int i)
1da177e4 1935{
789f558c 1936 long delta = tw->tw_timer.expires - jiffies;
b71d1d42 1937 const struct in6_addr *dest, *src;
1da177e4 1938 __u16 destp, srcp;
1da177e4 1939
efe4208f
ED
1940 dest = &tw->tw_v6_daddr;
1941 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1942 destp = ntohs(tw->tw_dport);
1943 srcp = ntohs(tw->tw_sport);
1944
1945 seq_printf(seq,
1946 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1947 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1948 i,
1949 src->s6_addr32[0], src->s6_addr32[1],
1950 src->s6_addr32[2], src->s6_addr32[3], srcp,
1951 dest->s6_addr32[0], dest->s6_addr32[1],
1952 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1953 tw->tw_substate, 0, 0,
a399a805 1954 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
41c6d650 1955 refcount_read(&tw->tw_refcnt), tw);
1da177e4
LT
1956}
1957
1da177e4
LT
1958static int tcp6_seq_show(struct seq_file *seq, void *v)
1959{
1960 struct tcp_iter_state *st;
05dbc7b5 1961 struct sock *sk = v;
1da177e4
LT
1962
1963 if (v == SEQ_START_TOKEN) {
1964 seq_puts(seq,
1965 " sl "
1966 "local_address "
1967 "remote_address "
1968 "st tx_queue rx_queue tr tm->when retrnsmt"
1969 " uid timeout inode\n");
1970 goto out;
1971 }
1972 st = seq->private;
1973
079096f1
ED
1974 if (sk->sk_state == TCP_TIME_WAIT)
1975 get_timewait6_sock(seq, v, st->num);
1976 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 1977 get_openreq6(seq, v, st->num);
079096f1
ED
1978 else
1979 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1980out:
1981 return 0;
1982}
1983
37d849bb
CH
1984static const struct seq_operations tcp6_seq_ops = {
1985 .show = tcp6_seq_show,
1986 .start = tcp_seq_start,
1987 .next = tcp_seq_next,
1988 .stop = tcp_seq_stop,
1989};
1990
1da177e4 1991static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4 1992 .family = AF_INET6,
1da177e4
LT
1993};
1994
2c8c1e72 1995int __net_init tcp6_proc_init(struct net *net)
1da177e4 1996{
c3506372
CH
1997 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
1998 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
37d849bb
CH
1999 return -ENOMEM;
2000 return 0;
1da177e4
LT
2001}
2002
6f8b13bc 2003void tcp6_proc_exit(struct net *net)
1da177e4 2004{
37d849bb 2005 remove_proc_entry("tcp6", net->proc_net);
1da177e4
LT
2006}
2007#endif
2008
2009struct proto tcpv6_prot = {
2010 .name = "TCPv6",
2011 .owner = THIS_MODULE,
2012 .close = tcp_close,
d74bad4e 2013 .pre_connect = tcp_v6_pre_connect,
1da177e4
LT
2014 .connect = tcp_v6_connect,
2015 .disconnect = tcp_disconnect,
463c84b9 2016 .accept = inet_csk_accept,
1da177e4
LT
2017 .ioctl = tcp_ioctl,
2018 .init = tcp_v6_init_sock,
2019 .destroy = tcp_v6_destroy_sock,
2020 .shutdown = tcp_shutdown,
2021 .setsockopt = tcp_setsockopt,
2022 .getsockopt = tcp_getsockopt,
4b9d07a4 2023 .keepalive = tcp_set_keepalive,
1da177e4 2024 .recvmsg = tcp_recvmsg,
7ba42910
CG
2025 .sendmsg = tcp_sendmsg,
2026 .sendpage = tcp_sendpage,
1da177e4 2027 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 2028 .release_cb = tcp_release_cb,
496611d7 2029 .hash = inet6_hash,
ab1e0a13
ACM
2030 .unhash = inet_unhash,
2031 .get_port = inet_csk_get_port,
1da177e4 2032 .enter_memory_pressure = tcp_enter_memory_pressure,
06044751 2033 .leave_memory_pressure = tcp_leave_memory_pressure,
c9bee3b7 2034 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
2035 .sockets_allocated = &tcp_sockets_allocated,
2036 .memory_allocated = &tcp_memory_allocated,
2037 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2038 .orphan_count = &tcp_orphan_count,
a4fe34bf 2039 .sysctl_mem = sysctl_tcp_mem,
356d1833
ED
2040 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2041 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1da177e4
LT
2042 .max_header = MAX_TCP_HEADER,
2043 .obj_size = sizeof(struct tcp6_sock),
5f0d5a3a 2044 .slab_flags = SLAB_TYPESAFE_BY_RCU,
6d6ee43e 2045 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2046 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2047 .h.hashinfo = &tcp_hashinfo,
7ba42910 2048 .no_autobind = true,
543d9cfe
ACM
2049#ifdef CONFIG_COMPAT
2050 .compat_setsockopt = compat_tcp_setsockopt,
2051 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 2052#endif
c1e64e29 2053 .diag_destroy = tcp_abort,
1da177e4
LT
2054};
2055
a8e3bb34
DA
2056/* thinking of making this const? Don't.
2057 * early_demux can change based on sysctl.
2058 */
39294c3d 2059static struct inet6_protocol tcpv6_protocol = {
c7109986 2060 .early_demux = tcp_v6_early_demux,
dddb64bc 2061 .early_demux_handler = tcp_v6_early_demux,
1da177e4
LT
2062 .handler = tcp_v6_rcv,
2063 .err_handler = tcp_v6_err,
2064 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2065};
2066
1da177e4
LT
2067static struct inet_protosw tcpv6_protosw = {
2068 .type = SOCK_STREAM,
2069 .protocol = IPPROTO_TCP,
2070 .prot = &tcpv6_prot,
2071 .ops = &inet6_stream_ops,
d83d8461
ACM
2072 .flags = INET_PROTOSW_PERMANENT |
2073 INET_PROTOSW_ICSK,
1da177e4
LT
2074};
2075
2c8c1e72 2076static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2077{
5677242f
DL
2078 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2079 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2080}
2081
2c8c1e72 2082static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2083{
5677242f 2084 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2085}
2086
2c8c1e72 2087static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26 2088{
1946e672 2089 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
93ec926b
DL
2090}
2091
2092static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2093 .init = tcpv6_net_init,
2094 .exit = tcpv6_net_exit,
2095 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2096};
2097
7f4e4868 2098int __init tcpv6_init(void)
1da177e4 2099{
7f4e4868
DL
2100 int ret;
2101
3336288a
VY
2102 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2103 if (ret)
c6b641a4 2104 goto out;
3336288a 2105
1da177e4 2106 /* register inet6 protocol */
7f4e4868
DL
2107 ret = inet6_register_protosw(&tcpv6_protosw);
2108 if (ret)
2109 goto out_tcpv6_protocol;
2110
93ec926b 2111 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2112 if (ret)
2113 goto out_tcpv6_protosw;
2114out:
2115 return ret;
ae0f7d5f 2116
7f4e4868
DL
2117out_tcpv6_protosw:
2118 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
2119out_tcpv6_protocol:
2120 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
2121 goto out;
2122}
2123
09f7709f 2124void tcpv6_exit(void)
7f4e4868 2125{
93ec926b 2126 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2127 inet6_unregister_protosw(&tcpv6_protosw);
2128 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2129}