]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/tcp_ipv6.c
net/ipv6: Fix linklocal to global address with VRF
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 #include <trace/events/tcp.h>
73
74 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
77
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #else
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87 const struct in6_addr *addr)
88 {
89 return NULL;
90 }
91 #endif
92
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 {
95 struct dst_entry *dst = skb_dst(skb);
96
97 if (dst && dst_hold_safe(dst)) {
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
99
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 }
104 }
105
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112 }
113
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119
120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
121 int addr_len)
122 {
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
124 struct inet_sock *inet = inet_sk(sk);
125 struct inet_connection_sock *icsk = inet_csk(sk);
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p, final;
129 struct ipv6_txoptions *opt;
130 struct flowi6 fl6;
131 struct dst_entry *dst;
132 int addr_type;
133 int err;
134 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
135
136 if (addr_len < SIN6_LEN_RFC2133)
137 return -EINVAL;
138
139 if (usin->sin6_family != AF_INET6)
140 return -EAFNOSUPPORT;
141
142 memset(&fl6, 0, sizeof(fl6));
143
144 if (np->sndflow) {
145 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146 IP6_ECN_flow_init(fl6.flowlabel);
147 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
148 struct ip6_flowlabel *flowlabel;
149 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
150 if (!flowlabel)
151 return -EINVAL;
152 fl6_sock_release(flowlabel);
153 }
154 }
155
156 /*
157 * connect() to INADDR_ANY means loopback (BSD'ism).
158 */
159
160 if (ipv6_addr_any(&usin->sin6_addr)) {
161 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
162 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
163 &usin->sin6_addr);
164 else
165 usin->sin6_addr = in6addr_loopback;
166 }
167
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
169
170 if (addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
172
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
178 */
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
182
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 }
185
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
189 }
190
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
198 sk->sk_v6_daddr = usin->sin6_addr;
199 np->flow_label = fl6.flowlabel;
200
201 /*
202 * TCP over IPv4
203 */
204
205 if (addr_type & IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
208
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210
211 if (__ipv6_only_sock(sk))
212 return -ENETUNREACH;
213
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222 #endif
223
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225
226 if (err) {
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
232 #endif
233 goto failure;
234 }
235 np->saddr = sk->sk_v6_rcv_saddr;
236
237 return err;
238 }
239
240 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
241 saddr = &sk->sk_v6_rcv_saddr;
242
243 fl6.flowi6_proto = IPPROTO_TCP;
244 fl6.daddr = sk->sk_v6_daddr;
245 fl6.saddr = saddr ? *saddr : np->saddr;
246 fl6.flowi6_oif = sk->sk_bound_dev_if;
247 fl6.flowi6_mark = sk->sk_mark;
248 fl6.fl6_dport = usin->sin6_port;
249 fl6.fl6_sport = inet->inet_sport;
250 fl6.flowi6_uid = sk->sk_uid;
251
252 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
253 final_p = fl6_update_dst(&fl6, opt, &final);
254
255 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
256
257 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
258 if (IS_ERR(dst)) {
259 err = PTR_ERR(dst);
260 goto failure;
261 }
262
263 if (!saddr) {
264 saddr = &fl6.saddr;
265 sk->sk_v6_rcv_saddr = *saddr;
266 }
267
268 /* set the source address */
269 np->saddr = *saddr;
270 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
271
272 sk->sk_gso_type = SKB_GSO_TCPV6;
273 ip6_dst_store(sk, dst, NULL, NULL);
274
275 icsk->icsk_ext_hdr_len = 0;
276 if (opt)
277 icsk->icsk_ext_hdr_len = opt->opt_flen +
278 opt->opt_nflen;
279
280 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
281
282 inet->inet_dport = usin->sin6_port;
283
284 tcp_set_state(sk, TCP_SYN_SENT);
285 err = inet6_hash_connect(tcp_death_row, sk);
286 if (err)
287 goto late_failure;
288
289 sk_set_txhash(sk);
290
291 if (likely(!tp->repair)) {
292 if (!tp->write_seq)
293 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
294 sk->sk_v6_daddr.s6_addr32,
295 inet->inet_sport,
296 inet->inet_dport);
297 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
298 np->saddr.s6_addr32,
299 sk->sk_v6_daddr.s6_addr32);
300 }
301
302 if (tcp_fastopen_defer_connect(sk, &err))
303 return err;
304 if (err)
305 goto late_failure;
306
307 err = tcp_connect(sk);
308 if (err)
309 goto late_failure;
310
311 return 0;
312
313 late_failure:
314 tcp_set_state(sk, TCP_CLOSE);
315 failure:
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
318 return err;
319 }
320
321 static void tcp_v6_mtu_reduced(struct sock *sk)
322 {
323 struct dst_entry *dst;
324
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 return;
327
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 if (!dst)
330 return;
331
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
335 }
336 }
337
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
340 {
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct net *net = dev_net(skb->dev);
344 struct request_sock *fastopen;
345 struct ipv6_pinfo *np;
346 struct tcp_sock *tp;
347 __u32 seq, snd_una;
348 struct sock *sk;
349 bool fatal;
350 int err;
351
352 sk = __inet6_lookup_established(net, &tcp_hashinfo,
353 &hdr->daddr, th->dest,
354 &hdr->saddr, ntohs(th->source),
355 skb->dev->ifindex, inet6_sdif(skb));
356
357 if (!sk) {
358 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
359 ICMP6_MIB_INERRORS);
360 return;
361 }
362
363 if (sk->sk_state == TCP_TIME_WAIT) {
364 inet_twsk_put(inet_twsk(sk));
365 return;
366 }
367 seq = ntohl(th->seq);
368 fatal = icmpv6_err_convert(type, code, &err);
369 if (sk->sk_state == TCP_NEW_SYN_RECV)
370 return tcp_req_err(sk, seq, fatal);
371
372 bh_lock_sock(sk);
373 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
374 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
375
376 if (sk->sk_state == TCP_CLOSE)
377 goto out;
378
379 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
380 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
381 goto out;
382 }
383
384 tp = tcp_sk(sk);
385 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
386 fastopen = tp->fastopen_rsk;
387 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
388 if (sk->sk_state != TCP_LISTEN &&
389 !between(seq, snd_una, tp->snd_nxt)) {
390 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
391 goto out;
392 }
393
394 np = inet6_sk(sk);
395
396 if (type == NDISC_REDIRECT) {
397 if (!sock_owned_by_user(sk)) {
398 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
399
400 if (dst)
401 dst->ops->redirect(dst, sk, skb);
402 }
403 goto out;
404 }
405
406 if (type == ICMPV6_PKT_TOOBIG) {
407 /* We are not interested in TCP_LISTEN and open_requests
408 * (SYN-ACKs send out by Linux are always <576bytes so
409 * they should go through unfragmented).
410 */
411 if (sk->sk_state == TCP_LISTEN)
412 goto out;
413
414 if (!ip6_sk_accept_pmtu(sk))
415 goto out;
416
417 tp->mtu_info = ntohl(info);
418 if (!sock_owned_by_user(sk))
419 tcp_v6_mtu_reduced(sk);
420 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
421 &sk->sk_tsq_flags))
422 sock_hold(sk);
423 goto out;
424 }
425
426
427 /* Might be for an request_sock */
428 switch (sk->sk_state) {
429 case TCP_SYN_SENT:
430 case TCP_SYN_RECV:
431 /* Only in fast or simultaneous open. If a fast open socket is
432 * is already accepted it is treated as a connected one below.
433 */
434 if (fastopen && !fastopen->sk)
435 break;
436
437 if (!sock_owned_by_user(sk)) {
438 sk->sk_err = err;
439 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
440
441 tcp_done(sk);
442 } else
443 sk->sk_err_soft = err;
444 goto out;
445 }
446
447 if (!sock_owned_by_user(sk) && np->recverr) {
448 sk->sk_err = err;
449 sk->sk_error_report(sk);
450 } else
451 sk->sk_err_soft = err;
452
453 out:
454 bh_unlock_sock(sk);
455 sock_put(sk);
456 }
457
458
459 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
460 struct flowi *fl,
461 struct request_sock *req,
462 struct tcp_fastopen_cookie *foc,
463 enum tcp_synack_type synack_type)
464 {
465 struct inet_request_sock *ireq = inet_rsk(req);
466 struct ipv6_pinfo *np = inet6_sk(sk);
467 struct ipv6_txoptions *opt;
468 struct flowi6 *fl6 = &fl->u.ip6;
469 struct sk_buff *skb;
470 int err = -ENOMEM;
471
472 /* First, grab a route. */
473 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
474 IPPROTO_TCP)) == NULL)
475 goto done;
476
477 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
478
479 if (skb) {
480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
481 &ireq->ir_v6_rmt_addr);
482
483 fl6->daddr = ireq->ir_v6_rmt_addr;
484 if (np->repflow && ireq->pktopts)
485 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
486
487 rcu_read_lock();
488 opt = ireq->ipv6_opt;
489 if (!opt)
490 opt = rcu_dereference(np->opt);
491 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
492 rcu_read_unlock();
493 err = net_xmit_eval(err);
494 }
495
496 done:
497 return err;
498 }
499
500
501 static void tcp_v6_reqsk_destructor(struct request_sock *req)
502 {
503 kfree(inet_rsk(req)->ipv6_opt);
504 kfree_skb(inet_rsk(req)->pktopts);
505 }
506
507 #ifdef CONFIG_TCP_MD5SIG
508 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
509 const struct in6_addr *addr)
510 {
511 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
512 }
513
514 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
515 const struct sock *addr_sk)
516 {
517 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
518 }
519
520 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
521 char __user *optval, int optlen)
522 {
523 struct tcp_md5sig cmd;
524 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
525 u8 prefixlen;
526
527 if (optlen < sizeof(cmd))
528 return -EINVAL;
529
530 if (copy_from_user(&cmd, optval, sizeof(cmd)))
531 return -EFAULT;
532
533 if (sin6->sin6_family != AF_INET6)
534 return -EINVAL;
535
536 if (optname == TCP_MD5SIG_EXT &&
537 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
538 prefixlen = cmd.tcpm_prefixlen;
539 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
540 prefixlen > 32))
541 return -EINVAL;
542 } else {
543 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
544 }
545
546 if (!cmd.tcpm_keylen) {
547 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
548 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
549 AF_INET, prefixlen);
550 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
551 AF_INET6, prefixlen);
552 }
553
554 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
555 return -EINVAL;
556
557 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
558 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
559 AF_INET, prefixlen, cmd.tcpm_key,
560 cmd.tcpm_keylen, GFP_KERNEL);
561
562 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
563 AF_INET6, prefixlen, cmd.tcpm_key,
564 cmd.tcpm_keylen, GFP_KERNEL);
565 }
566
567 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
568 const struct in6_addr *daddr,
569 const struct in6_addr *saddr,
570 const struct tcphdr *th, int nbytes)
571 {
572 struct tcp6_pseudohdr *bp;
573 struct scatterlist sg;
574 struct tcphdr *_th;
575
576 bp = hp->scratch;
577 /* 1. TCP pseudo-header (RFC2460) */
578 bp->saddr = *saddr;
579 bp->daddr = *daddr;
580 bp->protocol = cpu_to_be32(IPPROTO_TCP);
581 bp->len = cpu_to_be32(nbytes);
582
583 _th = (struct tcphdr *)(bp + 1);
584 memcpy(_th, th, sizeof(*th));
585 _th->check = 0;
586
587 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
588 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
589 sizeof(*bp) + sizeof(*th));
590 return crypto_ahash_update(hp->md5_req);
591 }
592
593 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
594 const struct in6_addr *daddr, struct in6_addr *saddr,
595 const struct tcphdr *th)
596 {
597 struct tcp_md5sig_pool *hp;
598 struct ahash_request *req;
599
600 hp = tcp_get_md5sig_pool();
601 if (!hp)
602 goto clear_hash_noput;
603 req = hp->md5_req;
604
605 if (crypto_ahash_init(req))
606 goto clear_hash;
607 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
608 goto clear_hash;
609 if (tcp_md5_hash_key(hp, key))
610 goto clear_hash;
611 ahash_request_set_crypt(req, NULL, md5_hash, 0);
612 if (crypto_ahash_final(req))
613 goto clear_hash;
614
615 tcp_put_md5sig_pool();
616 return 0;
617
618 clear_hash:
619 tcp_put_md5sig_pool();
620 clear_hash_noput:
621 memset(md5_hash, 0, 16);
622 return 1;
623 }
624
625 static int tcp_v6_md5_hash_skb(char *md5_hash,
626 const struct tcp_md5sig_key *key,
627 const struct sock *sk,
628 const struct sk_buff *skb)
629 {
630 const struct in6_addr *saddr, *daddr;
631 struct tcp_md5sig_pool *hp;
632 struct ahash_request *req;
633 const struct tcphdr *th = tcp_hdr(skb);
634
635 if (sk) { /* valid for establish/request sockets */
636 saddr = &sk->sk_v6_rcv_saddr;
637 daddr = &sk->sk_v6_daddr;
638 } else {
639 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 saddr = &ip6h->saddr;
641 daddr = &ip6h->daddr;
642 }
643
644 hp = tcp_get_md5sig_pool();
645 if (!hp)
646 goto clear_hash_noput;
647 req = hp->md5_req;
648
649 if (crypto_ahash_init(req))
650 goto clear_hash;
651
652 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
653 goto clear_hash;
654 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
655 goto clear_hash;
656 if (tcp_md5_hash_key(hp, key))
657 goto clear_hash;
658 ahash_request_set_crypt(req, NULL, md5_hash, 0);
659 if (crypto_ahash_final(req))
660 goto clear_hash;
661
662 tcp_put_md5sig_pool();
663 return 0;
664
665 clear_hash:
666 tcp_put_md5sig_pool();
667 clear_hash_noput:
668 memset(md5_hash, 0, 16);
669 return 1;
670 }
671
672 #endif
673
674 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
675 const struct sk_buff *skb)
676 {
677 #ifdef CONFIG_TCP_MD5SIG
678 const __u8 *hash_location = NULL;
679 struct tcp_md5sig_key *hash_expected;
680 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
681 const struct tcphdr *th = tcp_hdr(skb);
682 int genhash;
683 u8 newhash[16];
684
685 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
686 hash_location = tcp_parse_md5sig_option(th);
687
688 /* We've parsed the options - do we have a hash? */
689 if (!hash_expected && !hash_location)
690 return false;
691
692 if (hash_expected && !hash_location) {
693 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
694 return true;
695 }
696
697 if (!hash_expected && hash_location) {
698 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
699 return true;
700 }
701
702 /* check the signature */
703 genhash = tcp_v6_md5_hash_skb(newhash,
704 hash_expected,
705 NULL, skb);
706
707 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
708 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
709 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
710 genhash ? "failed" : "mismatch",
711 &ip6h->saddr, ntohs(th->source),
712 &ip6h->daddr, ntohs(th->dest));
713 return true;
714 }
715 #endif
716 return false;
717 }
718
719 static void tcp_v6_init_req(struct request_sock *req,
720 const struct sock *sk_listener,
721 struct sk_buff *skb)
722 {
723 struct inet_request_sock *ireq = inet_rsk(req);
724 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
725
726 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
727 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
728
729 /* So that link locals have meaning */
730 if (!sk_listener->sk_bound_dev_if &&
731 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
732 ireq->ir_iif = tcp_v6_iif(skb);
733
734 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
735 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
736 np->rxopt.bits.rxinfo ||
737 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
738 np->rxopt.bits.rxohlim || np->repflow)) {
739 refcount_inc(&skb->users);
740 ireq->pktopts = skb;
741 }
742 }
743
744 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
745 struct flowi *fl,
746 const struct request_sock *req)
747 {
748 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
749 }
750
751 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
752 .family = AF_INET6,
753 .obj_size = sizeof(struct tcp6_request_sock),
754 .rtx_syn_ack = tcp_rtx_synack,
755 .send_ack = tcp_v6_reqsk_send_ack,
756 .destructor = tcp_v6_reqsk_destructor,
757 .send_reset = tcp_v6_send_reset,
758 .syn_ack_timeout = tcp_syn_ack_timeout,
759 };
760
761 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
762 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
763 sizeof(struct ipv6hdr),
764 #ifdef CONFIG_TCP_MD5SIG
765 .req_md5_lookup = tcp_v6_md5_lookup,
766 .calc_md5_hash = tcp_v6_md5_hash_skb,
767 #endif
768 .init_req = tcp_v6_init_req,
769 #ifdef CONFIG_SYN_COOKIES
770 .cookie_init_seq = cookie_v6_init_sequence,
771 #endif
772 .route_req = tcp_v6_route_req,
773 .init_seq = tcp_v6_init_seq,
774 .init_ts_off = tcp_v6_init_ts_off,
775 .send_synack = tcp_v6_send_synack,
776 };
777
778 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
779 u32 ack, u32 win, u32 tsval, u32 tsecr,
780 int oif, struct tcp_md5sig_key *key, int rst,
781 u8 tclass, __be32 label)
782 {
783 const struct tcphdr *th = tcp_hdr(skb);
784 struct tcphdr *t1;
785 struct sk_buff *buff;
786 struct flowi6 fl6;
787 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
788 struct sock *ctl_sk = net->ipv6.tcp_sk;
789 unsigned int tot_len = sizeof(struct tcphdr);
790 struct dst_entry *dst;
791 __be32 *topt;
792
793 if (tsecr)
794 tot_len += TCPOLEN_TSTAMP_ALIGNED;
795 #ifdef CONFIG_TCP_MD5SIG
796 if (key)
797 tot_len += TCPOLEN_MD5SIG_ALIGNED;
798 #endif
799
800 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
801 GFP_ATOMIC);
802 if (!buff)
803 return;
804
805 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
806
807 t1 = skb_push(buff, tot_len);
808 skb_reset_transport_header(buff);
809
810 /* Swap the send and the receive. */
811 memset(t1, 0, sizeof(*t1));
812 t1->dest = th->source;
813 t1->source = th->dest;
814 t1->doff = tot_len / 4;
815 t1->seq = htonl(seq);
816 t1->ack_seq = htonl(ack);
817 t1->ack = !rst || !th->ack;
818 t1->rst = rst;
819 t1->window = htons(win);
820
821 topt = (__be32 *)(t1 + 1);
822
823 if (tsecr) {
824 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
825 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
826 *topt++ = htonl(tsval);
827 *topt++ = htonl(tsecr);
828 }
829
830 #ifdef CONFIG_TCP_MD5SIG
831 if (key) {
832 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
833 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
834 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
835 &ipv6_hdr(skb)->saddr,
836 &ipv6_hdr(skb)->daddr, t1);
837 }
838 #endif
839
840 memset(&fl6, 0, sizeof(fl6));
841 fl6.daddr = ipv6_hdr(skb)->saddr;
842 fl6.saddr = ipv6_hdr(skb)->daddr;
843 fl6.flowlabel = label;
844
845 buff->ip_summed = CHECKSUM_PARTIAL;
846 buff->csum = 0;
847
848 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
849
850 fl6.flowi6_proto = IPPROTO_TCP;
851 if (rt6_need_strict(&fl6.daddr) && !oif)
852 fl6.flowi6_oif = tcp_v6_iif(skb);
853 else {
854 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
855 oif = skb->skb_iif;
856
857 fl6.flowi6_oif = oif;
858 }
859
860 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
861 fl6.fl6_dport = t1->dest;
862 fl6.fl6_sport = t1->source;
863 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
864 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
865
866 /* Pass a socket to ip6_dst_lookup either it is for RST
867 * Underlying function will use this to retrieve the network
868 * namespace
869 */
870 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
871 if (!IS_ERR(dst)) {
872 skb_dst_set(buff, dst);
873 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
874 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
875 if (rst)
876 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
877 return;
878 }
879
880 kfree_skb(buff);
881 }
882
883 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
884 {
885 const struct tcphdr *th = tcp_hdr(skb);
886 u32 seq = 0, ack_seq = 0;
887 struct tcp_md5sig_key *key = NULL;
888 #ifdef CONFIG_TCP_MD5SIG
889 const __u8 *hash_location = NULL;
890 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891 unsigned char newhash[16];
892 int genhash;
893 struct sock *sk1 = NULL;
894 #endif
895 int oif = 0;
896
897 if (th->rst)
898 return;
899
900 /* If sk not NULL, it means we did a successful lookup and incoming
901 * route had to be correct. prequeue might have dropped our dst.
902 */
903 if (!sk && !ipv6_unicast_destination(skb))
904 return;
905
906 #ifdef CONFIG_TCP_MD5SIG
907 rcu_read_lock();
908 hash_location = tcp_parse_md5sig_option(th);
909 if (sk && sk_fullsock(sk)) {
910 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
911 } else if (hash_location) {
912 /*
913 * active side is lost. Try to find listening socket through
914 * source port, and then find md5 key through listening socket.
915 * we are not loose security here:
916 * Incoming packet is checked with md5 hash with finding key,
917 * no RST generated if md5 hash doesn't match.
918 */
919 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
920 &tcp_hashinfo, NULL, 0,
921 &ipv6h->saddr,
922 th->source, &ipv6h->daddr,
923 ntohs(th->source),
924 tcp_v6_iif_l3_slave(skb),
925 tcp_v6_sdif(skb));
926 if (!sk1)
927 goto out;
928
929 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
930 if (!key)
931 goto out;
932
933 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
934 if (genhash || memcmp(hash_location, newhash, 16) != 0)
935 goto out;
936 }
937 #endif
938
939 if (th->ack)
940 seq = ntohl(th->ack_seq);
941 else
942 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
943 (th->doff << 2);
944
945 if (sk) {
946 oif = sk->sk_bound_dev_if;
947 if (sk_fullsock(sk))
948 trace_tcp_send_reset(sk, skb);
949 }
950
951 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
952
953 #ifdef CONFIG_TCP_MD5SIG
954 out:
955 rcu_read_unlock();
956 #endif
957 }
958
959 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
960 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
961 struct tcp_md5sig_key *key, u8 tclass,
962 __be32 label)
963 {
964 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
965 tclass, label);
966 }
967
968 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
969 {
970 struct inet_timewait_sock *tw = inet_twsk(sk);
971 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
972
973 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
974 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
975 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
976 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
977 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
978
979 inet_twsk_put(tw);
980 }
981
982 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
983 struct request_sock *req)
984 {
985 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
986 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
987 */
988 /* RFC 7323 2.3
989 * The window field (SEG.WND) of every outgoing segment, with the
990 * exception of <SYN> segments, MUST be right-shifted by
991 * Rcv.Wind.Shift bits:
992 */
993 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
994 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
995 tcp_rsk(req)->rcv_nxt,
996 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
997 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
998 req->ts_recent, sk->sk_bound_dev_if,
999 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
1000 0, 0);
1001 }
1002
1003
1004 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1005 {
1006 #ifdef CONFIG_SYN_COOKIES
1007 const struct tcphdr *th = tcp_hdr(skb);
1008
1009 if (!th->syn)
1010 sk = cookie_v6_check(sk, skb);
1011 #endif
1012 return sk;
1013 }
1014
1015 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1016 {
1017 if (skb->protocol == htons(ETH_P_IP))
1018 return tcp_v4_conn_request(sk, skb);
1019
1020 if (!ipv6_unicast_destination(skb))
1021 goto drop;
1022
1023 return tcp_conn_request(&tcp6_request_sock_ops,
1024 &tcp_request_sock_ipv6_ops, sk, skb);
1025
1026 drop:
1027 tcp_listendrop(sk);
1028 return 0; /* don't send reset */
1029 }
1030
1031 static void tcp_v6_restore_cb(struct sk_buff *skb)
1032 {
1033 /* We need to move header back to the beginning if xfrm6_policy_check()
1034 * and tcp_v6_fill_cb() are going to be called again.
1035 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1036 */
1037 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1038 sizeof(struct inet6_skb_parm));
1039 }
1040
1041 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1042 struct request_sock *req,
1043 struct dst_entry *dst,
1044 struct request_sock *req_unhash,
1045 bool *own_req)
1046 {
1047 struct inet_request_sock *ireq;
1048 struct ipv6_pinfo *newnp;
1049 const struct ipv6_pinfo *np = inet6_sk(sk);
1050 struct ipv6_txoptions *opt;
1051 struct tcp6_sock *newtcp6sk;
1052 struct inet_sock *newinet;
1053 struct tcp_sock *newtp;
1054 struct sock *newsk;
1055 #ifdef CONFIG_TCP_MD5SIG
1056 struct tcp_md5sig_key *key;
1057 #endif
1058 struct flowi6 fl6;
1059
1060 if (skb->protocol == htons(ETH_P_IP)) {
1061 /*
1062 * v6 mapped
1063 */
1064
1065 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1066 req_unhash, own_req);
1067
1068 if (!newsk)
1069 return NULL;
1070
1071 newtcp6sk = (struct tcp6_sock *)newsk;
1072 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1073
1074 newinet = inet_sk(newsk);
1075 newnp = inet6_sk(newsk);
1076 newtp = tcp_sk(newsk);
1077
1078 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1079
1080 newnp->saddr = newsk->sk_v6_rcv_saddr;
1081
1082 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1083 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1084 #ifdef CONFIG_TCP_MD5SIG
1085 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1086 #endif
1087
1088 newnp->ipv6_mc_list = NULL;
1089 newnp->ipv6_ac_list = NULL;
1090 newnp->ipv6_fl_list = NULL;
1091 newnp->pktoptions = NULL;
1092 newnp->opt = NULL;
1093 newnp->mcast_oif = tcp_v6_iif(skb);
1094 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1095 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1096 if (np->repflow)
1097 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1098
1099 /*
1100 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1101 * here, tcp_create_openreq_child now does this for us, see the comment in
1102 * that function for the gory details. -acme
1103 */
1104
1105 /* It is tricky place. Until this moment IPv4 tcp
1106 worked with IPv6 icsk.icsk_af_ops.
1107 Sync it now.
1108 */
1109 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1110
1111 return newsk;
1112 }
1113
1114 ireq = inet_rsk(req);
1115
1116 if (sk_acceptq_is_full(sk))
1117 goto out_overflow;
1118
1119 if (!dst) {
1120 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1121 if (!dst)
1122 goto out;
1123 }
1124
1125 newsk = tcp_create_openreq_child(sk, req, skb);
1126 if (!newsk)
1127 goto out_nonewsk;
1128
1129 /*
1130 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1131 * count here, tcp_create_openreq_child now does this for us, see the
1132 * comment in that function for the gory details. -acme
1133 */
1134
1135 newsk->sk_gso_type = SKB_GSO_TCPV6;
1136 ip6_dst_store(newsk, dst, NULL, NULL);
1137 inet6_sk_rx_dst_set(newsk, skb);
1138
1139 newtcp6sk = (struct tcp6_sock *)newsk;
1140 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1141
1142 newtp = tcp_sk(newsk);
1143 newinet = inet_sk(newsk);
1144 newnp = inet6_sk(newsk);
1145
1146 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1147
1148 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1149 newnp->saddr = ireq->ir_v6_loc_addr;
1150 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1151 newsk->sk_bound_dev_if = ireq->ir_iif;
1152
1153 /* Now IPv6 options...
1154
1155 First: no IPv4 options.
1156 */
1157 newinet->inet_opt = NULL;
1158 newnp->ipv6_mc_list = NULL;
1159 newnp->ipv6_ac_list = NULL;
1160 newnp->ipv6_fl_list = NULL;
1161
1162 /* Clone RX bits */
1163 newnp->rxopt.all = np->rxopt.all;
1164
1165 newnp->pktoptions = NULL;
1166 newnp->opt = NULL;
1167 newnp->mcast_oif = tcp_v6_iif(skb);
1168 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1169 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1170 if (np->repflow)
1171 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1172
1173 /* Clone native IPv6 options from listening socket (if any)
1174
1175 Yes, keeping reference count would be much more clever,
1176 but we make one more one thing there: reattach optmem
1177 to newsk.
1178 */
1179 opt = ireq->ipv6_opt;
1180 if (!opt)
1181 opt = rcu_dereference(np->opt);
1182 if (opt) {
1183 opt = ipv6_dup_options(newsk, opt);
1184 RCU_INIT_POINTER(newnp->opt, opt);
1185 }
1186 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1187 if (opt)
1188 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1189 opt->opt_flen;
1190
1191 tcp_ca_openreq_child(newsk, dst);
1192
1193 tcp_sync_mss(newsk, dst_mtu(dst));
1194 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1195
1196 tcp_initialize_rcv_mss(newsk);
1197
1198 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1199 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1200
1201 #ifdef CONFIG_TCP_MD5SIG
1202 /* Copy over the MD5 key from the original socket */
1203 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1204 if (key) {
1205 /* We're using one, so create a matching key
1206 * on the newsk structure. If we fail to get
1207 * memory, then we end up not copying the key
1208 * across. Shucks.
1209 */
1210 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1211 AF_INET6, 128, key->key, key->keylen,
1212 sk_gfp_mask(sk, GFP_ATOMIC));
1213 }
1214 #endif
1215
1216 if (__inet_inherit_port(sk, newsk) < 0) {
1217 inet_csk_prepare_forced_close(newsk);
1218 tcp_done(newsk);
1219 goto out;
1220 }
1221 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1222 if (*own_req) {
1223 tcp_move_syn(newtp, req);
1224
1225 /* Clone pktoptions received with SYN, if we own the req */
1226 if (ireq->pktopts) {
1227 newnp->pktoptions = skb_clone(ireq->pktopts,
1228 sk_gfp_mask(sk, GFP_ATOMIC));
1229 consume_skb(ireq->pktopts);
1230 ireq->pktopts = NULL;
1231 if (newnp->pktoptions) {
1232 tcp_v6_restore_cb(newnp->pktoptions);
1233 skb_set_owner_r(newnp->pktoptions, newsk);
1234 }
1235 }
1236 }
1237
1238 return newsk;
1239
1240 out_overflow:
1241 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1242 out_nonewsk:
1243 dst_release(dst);
1244 out:
1245 tcp_listendrop(sk);
1246 return NULL;
1247 }
1248
1249 /* The socket must have it's spinlock held when we get
1250 * here, unless it is a TCP_LISTEN socket.
1251 *
1252 * We have a potential double-lock case here, so even when
1253 * doing backlog processing we use the BH locking scheme.
1254 * This is because we cannot sleep with the original spinlock
1255 * held.
1256 */
1257 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1258 {
1259 struct ipv6_pinfo *np = inet6_sk(sk);
1260 struct tcp_sock *tp;
1261 struct sk_buff *opt_skb = NULL;
1262
1263 /* Imagine: socket is IPv6. IPv4 packet arrives,
1264 goes to IPv4 receive handler and backlogged.
1265 From backlog it always goes here. Kerboom...
1266 Fortunately, tcp_rcv_established and rcv_established
1267 handle them correctly, but it is not case with
1268 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1269 */
1270
1271 if (skb->protocol == htons(ETH_P_IP))
1272 return tcp_v4_do_rcv(sk, skb);
1273
1274 /*
1275 * socket locking is here for SMP purposes as backlog rcv
1276 * is currently called with bh processing disabled.
1277 */
1278
1279 /* Do Stevens' IPV6_PKTOPTIONS.
1280
1281 Yes, guys, it is the only place in our code, where we
1282 may make it not affecting IPv4.
1283 The rest of code is protocol independent,
1284 and I do not like idea to uglify IPv4.
1285
1286 Actually, all the idea behind IPV6_PKTOPTIONS
1287 looks not very well thought. For now we latch
1288 options, received in the last packet, enqueued
1289 by tcp. Feel free to propose better solution.
1290 --ANK (980728)
1291 */
1292 if (np->rxopt.all)
1293 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1294
1295 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1296 struct dst_entry *dst = sk->sk_rx_dst;
1297
1298 sock_rps_save_rxhash(sk, skb);
1299 sk_mark_napi_id(sk, skb);
1300 if (dst) {
1301 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1302 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1303 dst_release(dst);
1304 sk->sk_rx_dst = NULL;
1305 }
1306 }
1307
1308 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1309 if (opt_skb)
1310 goto ipv6_pktoptions;
1311 return 0;
1312 }
1313
1314 if (tcp_checksum_complete(skb))
1315 goto csum_err;
1316
1317 if (sk->sk_state == TCP_LISTEN) {
1318 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1319
1320 if (!nsk)
1321 goto discard;
1322
1323 if (nsk != sk) {
1324 if (tcp_child_process(sk, nsk, skb))
1325 goto reset;
1326 if (opt_skb)
1327 __kfree_skb(opt_skb);
1328 return 0;
1329 }
1330 } else
1331 sock_rps_save_rxhash(sk, skb);
1332
1333 if (tcp_rcv_state_process(sk, skb))
1334 goto reset;
1335 if (opt_skb)
1336 goto ipv6_pktoptions;
1337 return 0;
1338
1339 reset:
1340 tcp_v6_send_reset(sk, skb);
1341 discard:
1342 if (opt_skb)
1343 __kfree_skb(opt_skb);
1344 kfree_skb(skb);
1345 return 0;
1346 csum_err:
1347 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1348 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1349 goto discard;
1350
1351
1352 ipv6_pktoptions:
1353 /* Do you ask, what is it?
1354
1355 1. skb was enqueued by tcp.
1356 2. skb is added to tail of read queue, rather than out of order.
1357 3. socket is not in passive state.
1358 4. Finally, it really contains options, which user wants to receive.
1359 */
1360 tp = tcp_sk(sk);
1361 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1362 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1363 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1364 np->mcast_oif = tcp_v6_iif(opt_skb);
1365 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1366 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1367 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1368 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1369 if (np->repflow)
1370 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1371 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1372 skb_set_owner_r(opt_skb, sk);
1373 tcp_v6_restore_cb(opt_skb);
1374 opt_skb = xchg(&np->pktoptions, opt_skb);
1375 } else {
1376 __kfree_skb(opt_skb);
1377 opt_skb = xchg(&np->pktoptions, NULL);
1378 }
1379 }
1380
1381 kfree_skb(opt_skb);
1382 return 0;
1383 }
1384
1385 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1386 const struct tcphdr *th)
1387 {
1388 /* This is tricky: we move IP6CB at its correct location into
1389 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1390 * _decode_session6() uses IP6CB().
1391 * barrier() makes sure compiler won't play aliasing games.
1392 */
1393 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1394 sizeof(struct inet6_skb_parm));
1395 barrier();
1396
1397 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1398 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1399 skb->len - th->doff*4);
1400 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1401 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1402 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1403 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1404 TCP_SKB_CB(skb)->sacked = 0;
1405 TCP_SKB_CB(skb)->has_rxtstamp =
1406 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1407 }
1408
1409 static int tcp_v6_rcv(struct sk_buff *skb)
1410 {
1411 int sdif = inet6_sdif(skb);
1412 const struct tcphdr *th;
1413 const struct ipv6hdr *hdr;
1414 bool refcounted;
1415 struct sock *sk;
1416 int ret;
1417 struct net *net = dev_net(skb->dev);
1418
1419 if (skb->pkt_type != PACKET_HOST)
1420 goto discard_it;
1421
1422 /*
1423 * Count it even if it's bad.
1424 */
1425 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1426
1427 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1428 goto discard_it;
1429
1430 th = (const struct tcphdr *)skb->data;
1431
1432 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1433 goto bad_packet;
1434 if (!pskb_may_pull(skb, th->doff*4))
1435 goto discard_it;
1436
1437 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1438 goto csum_error;
1439
1440 th = (const struct tcphdr *)skb->data;
1441 hdr = ipv6_hdr(skb);
1442
1443 lookup:
1444 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1445 th->source, th->dest, inet6_iif(skb), sdif,
1446 &refcounted);
1447 if (!sk)
1448 goto no_tcp_socket;
1449
1450 process:
1451 if (sk->sk_state == TCP_TIME_WAIT)
1452 goto do_time_wait;
1453
1454 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1455 struct request_sock *req = inet_reqsk(sk);
1456 struct sock *nsk;
1457
1458 sk = req->rsk_listener;
1459 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1460 sk_drops_add(sk, skb);
1461 reqsk_put(req);
1462 goto discard_it;
1463 }
1464 if (tcp_checksum_complete(skb)) {
1465 reqsk_put(req);
1466 goto csum_error;
1467 }
1468 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1469 inet_csk_reqsk_queue_drop_and_put(sk, req);
1470 goto lookup;
1471 }
1472 sock_hold(sk);
1473 refcounted = true;
1474 nsk = NULL;
1475 if (!tcp_filter(sk, skb)) {
1476 th = (const struct tcphdr *)skb->data;
1477 hdr = ipv6_hdr(skb);
1478 tcp_v6_fill_cb(skb, hdr, th);
1479 nsk = tcp_check_req(sk, skb, req, false);
1480 }
1481 if (!nsk) {
1482 reqsk_put(req);
1483 goto discard_and_relse;
1484 }
1485 if (nsk == sk) {
1486 reqsk_put(req);
1487 tcp_v6_restore_cb(skb);
1488 } else if (tcp_child_process(sk, nsk, skb)) {
1489 tcp_v6_send_reset(nsk, skb);
1490 goto discard_and_relse;
1491 } else {
1492 sock_put(sk);
1493 return 0;
1494 }
1495 }
1496 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1497 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1498 goto discard_and_relse;
1499 }
1500
1501 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1502 goto discard_and_relse;
1503
1504 if (tcp_v6_inbound_md5_hash(sk, skb))
1505 goto discard_and_relse;
1506
1507 if (tcp_filter(sk, skb))
1508 goto discard_and_relse;
1509 th = (const struct tcphdr *)skb->data;
1510 hdr = ipv6_hdr(skb);
1511 tcp_v6_fill_cb(skb, hdr, th);
1512
1513 skb->dev = NULL;
1514
1515 if (sk->sk_state == TCP_LISTEN) {
1516 ret = tcp_v6_do_rcv(sk, skb);
1517 goto put_and_return;
1518 }
1519
1520 sk_incoming_cpu_update(sk);
1521
1522 bh_lock_sock_nested(sk);
1523 tcp_segs_in(tcp_sk(sk), skb);
1524 ret = 0;
1525 if (!sock_owned_by_user(sk)) {
1526 ret = tcp_v6_do_rcv(sk, skb);
1527 } else if (tcp_add_backlog(sk, skb)) {
1528 goto discard_and_relse;
1529 }
1530 bh_unlock_sock(sk);
1531
1532 put_and_return:
1533 if (refcounted)
1534 sock_put(sk);
1535 return ret ? -1 : 0;
1536
1537 no_tcp_socket:
1538 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1539 goto discard_it;
1540
1541 tcp_v6_fill_cb(skb, hdr, th);
1542
1543 if (tcp_checksum_complete(skb)) {
1544 csum_error:
1545 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1546 bad_packet:
1547 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1548 } else {
1549 tcp_v6_send_reset(NULL, skb);
1550 }
1551
1552 discard_it:
1553 kfree_skb(skb);
1554 return 0;
1555
1556 discard_and_relse:
1557 sk_drops_add(sk, skb);
1558 if (refcounted)
1559 sock_put(sk);
1560 goto discard_it;
1561
1562 do_time_wait:
1563 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1564 inet_twsk_put(inet_twsk(sk));
1565 goto discard_it;
1566 }
1567
1568 tcp_v6_fill_cb(skb, hdr, th);
1569
1570 if (tcp_checksum_complete(skb)) {
1571 inet_twsk_put(inet_twsk(sk));
1572 goto csum_error;
1573 }
1574
1575 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1576 case TCP_TW_SYN:
1577 {
1578 struct sock *sk2;
1579
1580 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1581 skb, __tcp_hdrlen(th),
1582 &ipv6_hdr(skb)->saddr, th->source,
1583 &ipv6_hdr(skb)->daddr,
1584 ntohs(th->dest),
1585 tcp_v6_iif_l3_slave(skb),
1586 sdif);
1587 if (sk2) {
1588 struct inet_timewait_sock *tw = inet_twsk(sk);
1589 inet_twsk_deschedule_put(tw);
1590 sk = sk2;
1591 tcp_v6_restore_cb(skb);
1592 refcounted = false;
1593 goto process;
1594 }
1595 }
1596 /* to ACK */
1597 /* fall through */
1598 case TCP_TW_ACK:
1599 tcp_v6_timewait_ack(sk, skb);
1600 break;
1601 case TCP_TW_RST:
1602 tcp_v6_send_reset(sk, skb);
1603 inet_twsk_deschedule_put(inet_twsk(sk));
1604 goto discard_it;
1605 case TCP_TW_SUCCESS:
1606 ;
1607 }
1608 goto discard_it;
1609 }
1610
1611 static void tcp_v6_early_demux(struct sk_buff *skb)
1612 {
1613 const struct ipv6hdr *hdr;
1614 const struct tcphdr *th;
1615 struct sock *sk;
1616
1617 if (skb->pkt_type != PACKET_HOST)
1618 return;
1619
1620 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1621 return;
1622
1623 hdr = ipv6_hdr(skb);
1624 th = tcp_hdr(skb);
1625
1626 if (th->doff < sizeof(struct tcphdr) / 4)
1627 return;
1628
1629 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1630 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1631 &hdr->saddr, th->source,
1632 &hdr->daddr, ntohs(th->dest),
1633 inet6_iif(skb), inet6_sdif(skb));
1634 if (sk) {
1635 skb->sk = sk;
1636 skb->destructor = sock_edemux;
1637 if (sk_fullsock(sk)) {
1638 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1639
1640 if (dst)
1641 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1642 if (dst &&
1643 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1644 skb_dst_set_noref(skb, dst);
1645 }
1646 }
1647 }
1648
1649 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1650 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1651 .twsk_unique = tcp_twsk_unique,
1652 .twsk_destructor = tcp_twsk_destructor,
1653 };
1654
1655 static const struct inet_connection_sock_af_ops ipv6_specific = {
1656 .queue_xmit = inet6_csk_xmit,
1657 .send_check = tcp_v6_send_check,
1658 .rebuild_header = inet6_sk_rebuild_header,
1659 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1660 .conn_request = tcp_v6_conn_request,
1661 .syn_recv_sock = tcp_v6_syn_recv_sock,
1662 .net_header_len = sizeof(struct ipv6hdr),
1663 .net_frag_header_len = sizeof(struct frag_hdr),
1664 .setsockopt = ipv6_setsockopt,
1665 .getsockopt = ipv6_getsockopt,
1666 .addr2sockaddr = inet6_csk_addr2sockaddr,
1667 .sockaddr_len = sizeof(struct sockaddr_in6),
1668 #ifdef CONFIG_COMPAT
1669 .compat_setsockopt = compat_ipv6_setsockopt,
1670 .compat_getsockopt = compat_ipv6_getsockopt,
1671 #endif
1672 .mtu_reduced = tcp_v6_mtu_reduced,
1673 };
1674
1675 #ifdef CONFIG_TCP_MD5SIG
1676 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1677 .md5_lookup = tcp_v6_md5_lookup,
1678 .calc_md5_hash = tcp_v6_md5_hash_skb,
1679 .md5_parse = tcp_v6_parse_md5_keys,
1680 };
1681 #endif
1682
1683 /*
1684 * TCP over IPv4 via INET6 API
1685 */
1686 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1687 .queue_xmit = ip_queue_xmit,
1688 .send_check = tcp_v4_send_check,
1689 .rebuild_header = inet_sk_rebuild_header,
1690 .sk_rx_dst_set = inet_sk_rx_dst_set,
1691 .conn_request = tcp_v6_conn_request,
1692 .syn_recv_sock = tcp_v6_syn_recv_sock,
1693 .net_header_len = sizeof(struct iphdr),
1694 .setsockopt = ipv6_setsockopt,
1695 .getsockopt = ipv6_getsockopt,
1696 .addr2sockaddr = inet6_csk_addr2sockaddr,
1697 .sockaddr_len = sizeof(struct sockaddr_in6),
1698 #ifdef CONFIG_COMPAT
1699 .compat_setsockopt = compat_ipv6_setsockopt,
1700 .compat_getsockopt = compat_ipv6_getsockopt,
1701 #endif
1702 .mtu_reduced = tcp_v4_mtu_reduced,
1703 };
1704
1705 #ifdef CONFIG_TCP_MD5SIG
1706 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1707 .md5_lookup = tcp_v4_md5_lookup,
1708 .calc_md5_hash = tcp_v4_md5_hash_skb,
1709 .md5_parse = tcp_v6_parse_md5_keys,
1710 };
1711 #endif
1712
1713 /* NOTE: A lot of things set to zero explicitly by call to
1714 * sk_alloc() so need not be done here.
1715 */
1716 static int tcp_v6_init_sock(struct sock *sk)
1717 {
1718 struct inet_connection_sock *icsk = inet_csk(sk);
1719
1720 tcp_init_sock(sk);
1721
1722 icsk->icsk_af_ops = &ipv6_specific;
1723
1724 #ifdef CONFIG_TCP_MD5SIG
1725 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1726 #endif
1727
1728 return 0;
1729 }
1730
1731 static void tcp_v6_destroy_sock(struct sock *sk)
1732 {
1733 tcp_v4_destroy_sock(sk);
1734 inet6_destroy_sock(sk);
1735 }
1736
1737 #ifdef CONFIG_PROC_FS
1738 /* Proc filesystem TCPv6 sock list dumping. */
1739 static void get_openreq6(struct seq_file *seq,
1740 const struct request_sock *req, int i)
1741 {
1742 long ttd = req->rsk_timer.expires - jiffies;
1743 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1744 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1745
1746 if (ttd < 0)
1747 ttd = 0;
1748
1749 seq_printf(seq,
1750 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1751 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1752 i,
1753 src->s6_addr32[0], src->s6_addr32[1],
1754 src->s6_addr32[2], src->s6_addr32[3],
1755 inet_rsk(req)->ir_num,
1756 dest->s6_addr32[0], dest->s6_addr32[1],
1757 dest->s6_addr32[2], dest->s6_addr32[3],
1758 ntohs(inet_rsk(req)->ir_rmt_port),
1759 TCP_SYN_RECV,
1760 0, 0, /* could print option size, but that is af dependent. */
1761 1, /* timers active (only the expire timer) */
1762 jiffies_to_clock_t(ttd),
1763 req->num_timeout,
1764 from_kuid_munged(seq_user_ns(seq),
1765 sock_i_uid(req->rsk_listener)),
1766 0, /* non standard timer */
1767 0, /* open_requests have no inode */
1768 0, req);
1769 }
1770
1771 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1772 {
1773 const struct in6_addr *dest, *src;
1774 __u16 destp, srcp;
1775 int timer_active;
1776 unsigned long timer_expires;
1777 const struct inet_sock *inet = inet_sk(sp);
1778 const struct tcp_sock *tp = tcp_sk(sp);
1779 const struct inet_connection_sock *icsk = inet_csk(sp);
1780 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1781 int rx_queue;
1782 int state;
1783
1784 dest = &sp->sk_v6_daddr;
1785 src = &sp->sk_v6_rcv_saddr;
1786 destp = ntohs(inet->inet_dport);
1787 srcp = ntohs(inet->inet_sport);
1788
1789 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1790 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1791 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1792 timer_active = 1;
1793 timer_expires = icsk->icsk_timeout;
1794 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1795 timer_active = 4;
1796 timer_expires = icsk->icsk_timeout;
1797 } else if (timer_pending(&sp->sk_timer)) {
1798 timer_active = 2;
1799 timer_expires = sp->sk_timer.expires;
1800 } else {
1801 timer_active = 0;
1802 timer_expires = jiffies;
1803 }
1804
1805 state = sk_state_load(sp);
1806 if (state == TCP_LISTEN)
1807 rx_queue = sp->sk_ack_backlog;
1808 else
1809 /* Because we don't lock the socket,
1810 * we might find a transient negative value.
1811 */
1812 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1813
1814 seq_printf(seq,
1815 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1816 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1817 i,
1818 src->s6_addr32[0], src->s6_addr32[1],
1819 src->s6_addr32[2], src->s6_addr32[3], srcp,
1820 dest->s6_addr32[0], dest->s6_addr32[1],
1821 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1822 state,
1823 tp->write_seq - tp->snd_una,
1824 rx_queue,
1825 timer_active,
1826 jiffies_delta_to_clock_t(timer_expires - jiffies),
1827 icsk->icsk_retransmits,
1828 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1829 icsk->icsk_probes_out,
1830 sock_i_ino(sp),
1831 refcount_read(&sp->sk_refcnt), sp,
1832 jiffies_to_clock_t(icsk->icsk_rto),
1833 jiffies_to_clock_t(icsk->icsk_ack.ato),
1834 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1835 tp->snd_cwnd,
1836 state == TCP_LISTEN ?
1837 fastopenq->max_qlen :
1838 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1839 );
1840 }
1841
1842 static void get_timewait6_sock(struct seq_file *seq,
1843 struct inet_timewait_sock *tw, int i)
1844 {
1845 long delta = tw->tw_timer.expires - jiffies;
1846 const struct in6_addr *dest, *src;
1847 __u16 destp, srcp;
1848
1849 dest = &tw->tw_v6_daddr;
1850 src = &tw->tw_v6_rcv_saddr;
1851 destp = ntohs(tw->tw_dport);
1852 srcp = ntohs(tw->tw_sport);
1853
1854 seq_printf(seq,
1855 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1856 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1857 i,
1858 src->s6_addr32[0], src->s6_addr32[1],
1859 src->s6_addr32[2], src->s6_addr32[3], srcp,
1860 dest->s6_addr32[0], dest->s6_addr32[1],
1861 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1862 tw->tw_substate, 0, 0,
1863 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1864 refcount_read(&tw->tw_refcnt), tw);
1865 }
1866
1867 static int tcp6_seq_show(struct seq_file *seq, void *v)
1868 {
1869 struct tcp_iter_state *st;
1870 struct sock *sk = v;
1871
1872 if (v == SEQ_START_TOKEN) {
1873 seq_puts(seq,
1874 " sl "
1875 "local_address "
1876 "remote_address "
1877 "st tx_queue rx_queue tr tm->when retrnsmt"
1878 " uid timeout inode\n");
1879 goto out;
1880 }
1881 st = seq->private;
1882
1883 if (sk->sk_state == TCP_TIME_WAIT)
1884 get_timewait6_sock(seq, v, st->num);
1885 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1886 get_openreq6(seq, v, st->num);
1887 else
1888 get_tcp6_sock(seq, v, st->num);
1889 out:
1890 return 0;
1891 }
1892
1893 static const struct file_operations tcp6_afinfo_seq_fops = {
1894 .owner = THIS_MODULE,
1895 .open = tcp_seq_open,
1896 .read = seq_read,
1897 .llseek = seq_lseek,
1898 .release = seq_release_net
1899 };
1900
1901 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1902 .name = "tcp6",
1903 .family = AF_INET6,
1904 .seq_fops = &tcp6_afinfo_seq_fops,
1905 .seq_ops = {
1906 .show = tcp6_seq_show,
1907 },
1908 };
1909
1910 int __net_init tcp6_proc_init(struct net *net)
1911 {
1912 return tcp_proc_register(net, &tcp6_seq_afinfo);
1913 }
1914
1915 void tcp6_proc_exit(struct net *net)
1916 {
1917 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1918 }
1919 #endif
1920
1921 struct proto tcpv6_prot = {
1922 .name = "TCPv6",
1923 .owner = THIS_MODULE,
1924 .close = tcp_close,
1925 .connect = tcp_v6_connect,
1926 .disconnect = tcp_disconnect,
1927 .accept = inet_csk_accept,
1928 .ioctl = tcp_ioctl,
1929 .init = tcp_v6_init_sock,
1930 .destroy = tcp_v6_destroy_sock,
1931 .shutdown = tcp_shutdown,
1932 .setsockopt = tcp_setsockopt,
1933 .getsockopt = tcp_getsockopt,
1934 .keepalive = tcp_set_keepalive,
1935 .recvmsg = tcp_recvmsg,
1936 .sendmsg = tcp_sendmsg,
1937 .sendpage = tcp_sendpage,
1938 .backlog_rcv = tcp_v6_do_rcv,
1939 .release_cb = tcp_release_cb,
1940 .hash = inet6_hash,
1941 .unhash = inet_unhash,
1942 .get_port = inet_csk_get_port,
1943 .enter_memory_pressure = tcp_enter_memory_pressure,
1944 .leave_memory_pressure = tcp_leave_memory_pressure,
1945 .stream_memory_free = tcp_stream_memory_free,
1946 .sockets_allocated = &tcp_sockets_allocated,
1947 .memory_allocated = &tcp_memory_allocated,
1948 .memory_pressure = &tcp_memory_pressure,
1949 .orphan_count = &tcp_orphan_count,
1950 .sysctl_mem = sysctl_tcp_mem,
1951 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1952 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1953 .max_header = MAX_TCP_HEADER,
1954 .obj_size = sizeof(struct tcp6_sock),
1955 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1956 .twsk_prot = &tcp6_timewait_sock_ops,
1957 .rsk_prot = &tcp6_request_sock_ops,
1958 .h.hashinfo = &tcp_hashinfo,
1959 .no_autobind = true,
1960 #ifdef CONFIG_COMPAT
1961 .compat_setsockopt = compat_tcp_setsockopt,
1962 .compat_getsockopt = compat_tcp_getsockopt,
1963 #endif
1964 .diag_destroy = tcp_abort,
1965 };
1966
1967 /* thinking of making this const? Don't.
1968 * early_demux can change based on sysctl.
1969 */
1970 static struct inet6_protocol tcpv6_protocol = {
1971 .early_demux = tcp_v6_early_demux,
1972 .early_demux_handler = tcp_v6_early_demux,
1973 .handler = tcp_v6_rcv,
1974 .err_handler = tcp_v6_err,
1975 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1976 };
1977
1978 static struct inet_protosw tcpv6_protosw = {
1979 .type = SOCK_STREAM,
1980 .protocol = IPPROTO_TCP,
1981 .prot = &tcpv6_prot,
1982 .ops = &inet6_stream_ops,
1983 .flags = INET_PROTOSW_PERMANENT |
1984 INET_PROTOSW_ICSK,
1985 };
1986
1987 static int __net_init tcpv6_net_init(struct net *net)
1988 {
1989 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1990 SOCK_RAW, IPPROTO_TCP, net);
1991 }
1992
1993 static void __net_exit tcpv6_net_exit(struct net *net)
1994 {
1995 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1996 }
1997
1998 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1999 {
2000 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
2001 }
2002
2003 static struct pernet_operations tcpv6_net_ops = {
2004 .init = tcpv6_net_init,
2005 .exit = tcpv6_net_exit,
2006 .exit_batch = tcpv6_net_exit_batch,
2007 };
2008
2009 int __init tcpv6_init(void)
2010 {
2011 int ret;
2012
2013 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2014 if (ret)
2015 goto out;
2016
2017 /* register inet6 protocol */
2018 ret = inet6_register_protosw(&tcpv6_protosw);
2019 if (ret)
2020 goto out_tcpv6_protocol;
2021
2022 ret = register_pernet_subsys(&tcpv6_net_ops);
2023 if (ret)
2024 goto out_tcpv6_protosw;
2025 out:
2026 return ret;
2027
2028 out_tcpv6_protosw:
2029 inet6_unregister_protosw(&tcpv6_protosw);
2030 out_tcpv6_protocol:
2031 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2032 goto out;
2033 }
2034
2035 void tcpv6_exit(void)
2036 {
2037 unregister_pernet_subsys(&tcpv6_net_ops);
2038 inet6_unregister_protosw(&tcpv6_protosw);
2039 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2040 }