]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/tcp_ipv6.c
tcp/dccp: remove inet_csk_reqsk_queue_added() timeout argument
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 const struct in6_addr *addr)
87 {
88 return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 }
104 }
105
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 {
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112 }
113
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 int addr_len)
116 {
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
123 struct flowi6 fl6;
124 struct dst_entry *dst;
125 int addr_type;
126 int err;
127
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
130
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
133
134 memset(&fl6, 0, sizeof(fl6));
135
136 if (np->sndflow) {
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 if (!flowlabel)
143 return -EINVAL;
144 fl6_sock_release(flowlabel);
145 }
146 }
147
148 /*
149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */
151
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
154
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
156
157 if (addr_type & IPV6_ADDR_MULTICAST)
158 return -ENETUNREACH;
159
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
164 * must coincide.
165 */
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
169
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
171 }
172
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
176 }
177
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
183 }
184
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
187
188 /*
189 * TCP over IPv4
190 */
191
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
195
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
200
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
210
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213 if (err) {
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220 goto failure;
221 }
222 np->saddr = sk->sk_v6_rcv_saddr;
223
224 return err;
225 }
226
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
229
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
237
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253 /* set the source address */
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (np->opt)
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
268 np->opt->opt_nflen);
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 sk_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293 late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296 failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300 }
301
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317 }
318
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321 {
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 int err;
331
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
335 skb->dev->ifindex);
336
337 if (!sk) {
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
339 ICMP6_MIB_INERRORS);
340 return;
341 }
342
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
345 return;
346 }
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
350
351 bh_lock_sock(sk);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
354
355 if (sk->sk_state == TCP_CLOSE)
356 goto out;
357
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
360 goto out;
361 }
362
363 tp = tcp_sk(sk);
364 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
370 goto out;
371 }
372
373 np = inet6_sk(sk);
374
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
377
378 if (dst)
379 dst->ops->redirect(dst, sk, skb);
380 goto out;
381 }
382
383 if (type == ICMPV6_PKT_TOOBIG) {
384 /* We are not interested in TCP_LISTEN and open_requests
385 * (SYN-ACKs send out by Linux are always <576bytes so
386 * they should go through unfragmented).
387 */
388 if (sk->sk_state == TCP_LISTEN)
389 goto out;
390
391 if (!ip6_sk_accept_pmtu(sk))
392 goto out;
393
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
398 &tp->tsq_flags))
399 sock_hold(sk);
400 goto out;
401 }
402
403 icmpv6_err_convert(type, code, &err);
404
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
407 case TCP_SYN_SENT:
408 case TCP_SYN_RECV:
409 /* Only in fast or simultaneous open. If a fast open socket is
410 * is already accepted it is treated as a connected one below.
411 */
412 if (fastopen && !fastopen->sk)
413 break;
414
415 if (!sock_owned_by_user(sk)) {
416 sk->sk_err = err;
417 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
418
419 tcp_done(sk);
420 } else
421 sk->sk_err_soft = err;
422 goto out;
423 }
424
425 if (!sock_owned_by_user(sk) && np->recverr) {
426 sk->sk_err = err;
427 sk->sk_error_report(sk);
428 } else
429 sk->sk_err_soft = err;
430
431 out:
432 bh_unlock_sock(sk);
433 sock_put(sk);
434 }
435
436
437 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
438 struct flowi *fl,
439 struct request_sock *req,
440 u16 queue_mapping,
441 struct tcp_fastopen_cookie *foc)
442 {
443 struct inet_request_sock *ireq = inet_rsk(req);
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct flowi6 *fl6 = &fl->u.ip6;
446 struct sk_buff *skb;
447 int err = -ENOMEM;
448
449 /* First, grab a route. */
450 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
451 IPPROTO_TCP)) == NULL)
452 goto done;
453
454 skb = tcp_make_synack(sk, dst, req, foc);
455
456 if (skb) {
457 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
458 &ireq->ir_v6_rmt_addr);
459
460 fl6->daddr = ireq->ir_v6_rmt_addr;
461 if (np->repflow && ireq->pktopts)
462 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
463
464 skb_set_queue_mapping(skb, queue_mapping);
465 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
466 err = net_xmit_eval(err);
467 }
468
469 done:
470 return err;
471 }
472
473
474 static void tcp_v6_reqsk_destructor(struct request_sock *req)
475 {
476 kfree_skb(inet_rsk(req)->pktopts);
477 }
478
479 #ifdef CONFIG_TCP_MD5SIG
480 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
481 const struct in6_addr *addr)
482 {
483 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
484 }
485
486 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
487 const struct sock *addr_sk)
488 {
489 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
490 }
491
492 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
493 int optlen)
494 {
495 struct tcp_md5sig cmd;
496 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
497
498 if (optlen < sizeof(cmd))
499 return -EINVAL;
500
501 if (copy_from_user(&cmd, optval, sizeof(cmd)))
502 return -EFAULT;
503
504 if (sin6->sin6_family != AF_INET6)
505 return -EINVAL;
506
507 if (!cmd.tcpm_keylen) {
508 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
509 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
510 AF_INET);
511 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
512 AF_INET6);
513 }
514
515 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
516 return -EINVAL;
517
518 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
519 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
520 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
521
522 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
523 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524 }
525
526 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
527 const struct in6_addr *daddr,
528 const struct in6_addr *saddr, int nbytes)
529 {
530 struct tcp6_pseudohdr *bp;
531 struct scatterlist sg;
532
533 bp = &hp->md5_blk.ip6;
534 /* 1. TCP pseudo-header (RFC2460) */
535 bp->saddr = *saddr;
536 bp->daddr = *daddr;
537 bp->protocol = cpu_to_be32(IPPROTO_TCP);
538 bp->len = cpu_to_be32(nbytes);
539
540 sg_init_one(&sg, bp, sizeof(*bp));
541 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
542 }
543
544 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
545 const struct in6_addr *daddr, struct in6_addr *saddr,
546 const struct tcphdr *th)
547 {
548 struct tcp_md5sig_pool *hp;
549 struct hash_desc *desc;
550
551 hp = tcp_get_md5sig_pool();
552 if (!hp)
553 goto clear_hash_noput;
554 desc = &hp->md5_desc;
555
556 if (crypto_hash_init(desc))
557 goto clear_hash;
558 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
559 goto clear_hash;
560 if (tcp_md5_hash_header(hp, th))
561 goto clear_hash;
562 if (tcp_md5_hash_key(hp, key))
563 goto clear_hash;
564 if (crypto_hash_final(desc, md5_hash))
565 goto clear_hash;
566
567 tcp_put_md5sig_pool();
568 return 0;
569
570 clear_hash:
571 tcp_put_md5sig_pool();
572 clear_hash_noput:
573 memset(md5_hash, 0, 16);
574 return 1;
575 }
576
577 static int tcp_v6_md5_hash_skb(char *md5_hash,
578 const struct tcp_md5sig_key *key,
579 const struct sock *sk,
580 const struct sk_buff *skb)
581 {
582 const struct in6_addr *saddr, *daddr;
583 struct tcp_md5sig_pool *hp;
584 struct hash_desc *desc;
585 const struct tcphdr *th = tcp_hdr(skb);
586
587 if (sk) { /* valid for establish/request sockets */
588 saddr = &sk->sk_v6_rcv_saddr;
589 daddr = &sk->sk_v6_daddr;
590 } else {
591 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
592 saddr = &ip6h->saddr;
593 daddr = &ip6h->daddr;
594 }
595
596 hp = tcp_get_md5sig_pool();
597 if (!hp)
598 goto clear_hash_noput;
599 desc = &hp->md5_desc;
600
601 if (crypto_hash_init(desc))
602 goto clear_hash;
603
604 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
605 goto clear_hash;
606 if (tcp_md5_hash_header(hp, th))
607 goto clear_hash;
608 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
609 goto clear_hash;
610 if (tcp_md5_hash_key(hp, key))
611 goto clear_hash;
612 if (crypto_hash_final(desc, md5_hash))
613 goto clear_hash;
614
615 tcp_put_md5sig_pool();
616 return 0;
617
618 clear_hash:
619 tcp_put_md5sig_pool();
620 clear_hash_noput:
621 memset(md5_hash, 0, 16);
622 return 1;
623 }
624
625 #endif
626
627 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
628 const struct sk_buff *skb)
629 {
630 #ifdef CONFIG_TCP_MD5SIG
631 const __u8 *hash_location = NULL;
632 struct tcp_md5sig_key *hash_expected;
633 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
634 const struct tcphdr *th = tcp_hdr(skb);
635 int genhash;
636 u8 newhash[16];
637
638 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
639 hash_location = tcp_parse_md5sig_option(th);
640
641 /* We've parsed the options - do we have a hash? */
642 if (!hash_expected && !hash_location)
643 return false;
644
645 if (hash_expected && !hash_location) {
646 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
647 return true;
648 }
649
650 if (!hash_expected && hash_location) {
651 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
652 return true;
653 }
654
655 /* check the signature */
656 genhash = tcp_v6_md5_hash_skb(newhash,
657 hash_expected,
658 NULL, skb);
659
660 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
661 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
662 genhash ? "failed" : "mismatch",
663 &ip6h->saddr, ntohs(th->source),
664 &ip6h->daddr, ntohs(th->dest));
665 return true;
666 }
667 #endif
668 return false;
669 }
670
671 static void tcp_v6_init_req(struct request_sock *req,
672 const struct sock *sk_listener,
673 struct sk_buff *skb)
674 {
675 struct inet_request_sock *ireq = inet_rsk(req);
676 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
677
678 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
679 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
680
681 /* So that link locals have meaning */
682 if (!sk_listener->sk_bound_dev_if &&
683 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
684 ireq->ir_iif = tcp_v6_iif(skb);
685
686 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
687 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
688 np->rxopt.bits.rxinfo ||
689 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
690 np->rxopt.bits.rxohlim || np->repflow)) {
691 atomic_inc(&skb->users);
692 ireq->pktopts = skb;
693 }
694 }
695
696 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
697 struct flowi *fl,
698 const struct request_sock *req,
699 bool *strict)
700 {
701 if (strict)
702 *strict = true;
703 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
704 }
705
706 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
707 .family = AF_INET6,
708 .obj_size = sizeof(struct tcp6_request_sock),
709 .rtx_syn_ack = tcp_rtx_synack,
710 .send_ack = tcp_v6_reqsk_send_ack,
711 .destructor = tcp_v6_reqsk_destructor,
712 .send_reset = tcp_v6_send_reset,
713 .syn_ack_timeout = tcp_syn_ack_timeout,
714 };
715
716 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
717 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
718 sizeof(struct ipv6hdr),
719 #ifdef CONFIG_TCP_MD5SIG
720 .req_md5_lookup = tcp_v6_md5_lookup,
721 .calc_md5_hash = tcp_v6_md5_hash_skb,
722 #endif
723 .init_req = tcp_v6_init_req,
724 #ifdef CONFIG_SYN_COOKIES
725 .cookie_init_seq = cookie_v6_init_sequence,
726 #endif
727 .route_req = tcp_v6_route_req,
728 .init_seq = tcp_v6_init_sequence,
729 .send_synack = tcp_v6_send_synack,
730 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
731 };
732
733 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
734 u32 ack, u32 win, u32 tsval, u32 tsecr,
735 int oif, struct tcp_md5sig_key *key, int rst,
736 u8 tclass, u32 label)
737 {
738 const struct tcphdr *th = tcp_hdr(skb);
739 struct tcphdr *t1;
740 struct sk_buff *buff;
741 struct flowi6 fl6;
742 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
743 struct sock *ctl_sk = net->ipv6.tcp_sk;
744 unsigned int tot_len = sizeof(struct tcphdr);
745 struct dst_entry *dst;
746 __be32 *topt;
747
748 if (tsecr)
749 tot_len += TCPOLEN_TSTAMP_ALIGNED;
750 #ifdef CONFIG_TCP_MD5SIG
751 if (key)
752 tot_len += TCPOLEN_MD5SIG_ALIGNED;
753 #endif
754
755 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
756 GFP_ATOMIC);
757 if (!buff)
758 return;
759
760 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
761
762 t1 = (struct tcphdr *) skb_push(buff, tot_len);
763 skb_reset_transport_header(buff);
764
765 /* Swap the send and the receive. */
766 memset(t1, 0, sizeof(*t1));
767 t1->dest = th->source;
768 t1->source = th->dest;
769 t1->doff = tot_len / 4;
770 t1->seq = htonl(seq);
771 t1->ack_seq = htonl(ack);
772 t1->ack = !rst || !th->ack;
773 t1->rst = rst;
774 t1->window = htons(win);
775
776 topt = (__be32 *)(t1 + 1);
777
778 if (tsecr) {
779 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
780 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
781 *topt++ = htonl(tsval);
782 *topt++ = htonl(tsecr);
783 }
784
785 #ifdef CONFIG_TCP_MD5SIG
786 if (key) {
787 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
788 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
789 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
790 &ipv6_hdr(skb)->saddr,
791 &ipv6_hdr(skb)->daddr, t1);
792 }
793 #endif
794
795 memset(&fl6, 0, sizeof(fl6));
796 fl6.daddr = ipv6_hdr(skb)->saddr;
797 fl6.saddr = ipv6_hdr(skb)->daddr;
798 fl6.flowlabel = label;
799
800 buff->ip_summed = CHECKSUM_PARTIAL;
801 buff->csum = 0;
802
803 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
804
805 fl6.flowi6_proto = IPPROTO_TCP;
806 if (rt6_need_strict(&fl6.daddr) && !oif)
807 fl6.flowi6_oif = tcp_v6_iif(skb);
808 else
809 fl6.flowi6_oif = oif;
810 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
811 fl6.fl6_dport = t1->dest;
812 fl6.fl6_sport = t1->source;
813 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
814
815 /* Pass a socket to ip6_dst_lookup either it is for RST
816 * Underlying function will use this to retrieve the network
817 * namespace
818 */
819 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
820 if (!IS_ERR(dst)) {
821 skb_dst_set(buff, dst);
822 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
823 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
824 if (rst)
825 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
826 return;
827 }
828
829 kfree_skb(buff);
830 }
831
832 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
833 {
834 const struct tcphdr *th = tcp_hdr(skb);
835 u32 seq = 0, ack_seq = 0;
836 struct tcp_md5sig_key *key = NULL;
837 #ifdef CONFIG_TCP_MD5SIG
838 const __u8 *hash_location = NULL;
839 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
840 unsigned char newhash[16];
841 int genhash;
842 struct sock *sk1 = NULL;
843 #endif
844 int oif;
845
846 if (th->rst)
847 return;
848
849 /* If sk not NULL, it means we did a successful lookup and incoming
850 * route had to be correct. prequeue might have dropped our dst.
851 */
852 if (!sk && !ipv6_unicast_destination(skb))
853 return;
854
855 #ifdef CONFIG_TCP_MD5SIG
856 hash_location = tcp_parse_md5sig_option(th);
857 if (!sk && hash_location) {
858 /*
859 * active side is lost. Try to find listening socket through
860 * source port, and then find md5 key through listening socket.
861 * we are not loose security here:
862 * Incoming packet is checked with md5 hash with finding key,
863 * no RST generated if md5 hash doesn't match.
864 */
865 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
866 &tcp_hashinfo, &ipv6h->saddr,
867 th->source, &ipv6h->daddr,
868 ntohs(th->source), tcp_v6_iif(skb));
869 if (!sk1)
870 return;
871
872 rcu_read_lock();
873 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
874 if (!key)
875 goto release_sk1;
876
877 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
878 if (genhash || memcmp(hash_location, newhash, 16) != 0)
879 goto release_sk1;
880 } else {
881 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
882 }
883 #endif
884
885 if (th->ack)
886 seq = ntohl(th->ack_seq);
887 else
888 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
889 (th->doff << 2);
890
891 oif = sk ? sk->sk_bound_dev_if : 0;
892 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
893
894 #ifdef CONFIG_TCP_MD5SIG
895 release_sk1:
896 if (sk1) {
897 rcu_read_unlock();
898 sock_put(sk1);
899 }
900 #endif
901 }
902
903 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
904 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
905 struct tcp_md5sig_key *key, u8 tclass,
906 u32 label)
907 {
908 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
909 tclass, label);
910 }
911
912 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
913 {
914 struct inet_timewait_sock *tw = inet_twsk(sk);
915 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
916
917 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
918 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
919 tcp_time_stamp + tcptw->tw_ts_offset,
920 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
921 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
922
923 inet_twsk_put(tw);
924 }
925
926 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
927 struct request_sock *req)
928 {
929 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
930 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
931 */
932 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
933 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
934 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
935 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
936 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
937 0, 0);
938 }
939
940
941 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
942 {
943 const struct tcphdr *th = tcp_hdr(skb);
944 struct request_sock *req;
945 struct sock *nsk;
946
947 /* Find possible connection requests. */
948 req = inet6_csk_search_req(sk, th->source,
949 &ipv6_hdr(skb)->saddr,
950 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
951 if (req) {
952 nsk = tcp_check_req(sk, skb, req, false);
953 if (!nsk || nsk == sk)
954 reqsk_put(req);
955 return nsk;
956 }
957 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
958 &ipv6_hdr(skb)->saddr, th->source,
959 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
960 tcp_v6_iif(skb));
961
962 if (nsk) {
963 if (nsk->sk_state != TCP_TIME_WAIT) {
964 bh_lock_sock(nsk);
965 return nsk;
966 }
967 inet_twsk_put(inet_twsk(nsk));
968 return NULL;
969 }
970
971 #ifdef CONFIG_SYN_COOKIES
972 if (!th->syn)
973 sk = cookie_v6_check(sk, skb);
974 #endif
975 return sk;
976 }
977
978 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
979 {
980 if (skb->protocol == htons(ETH_P_IP))
981 return tcp_v4_conn_request(sk, skb);
982
983 if (!ipv6_unicast_destination(skb))
984 goto drop;
985
986 return tcp_conn_request(&tcp6_request_sock_ops,
987 &tcp_request_sock_ipv6_ops, sk, skb);
988
989 drop:
990 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
991 return 0; /* don't send reset */
992 }
993
994 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
995 struct request_sock *req,
996 struct dst_entry *dst)
997 {
998 struct inet_request_sock *ireq;
999 struct ipv6_pinfo *newnp;
1000 const struct ipv6_pinfo *np = inet6_sk(sk);
1001 struct tcp6_sock *newtcp6sk;
1002 struct inet_sock *newinet;
1003 struct tcp_sock *newtp;
1004 struct sock *newsk;
1005 #ifdef CONFIG_TCP_MD5SIG
1006 struct tcp_md5sig_key *key;
1007 #endif
1008 struct flowi6 fl6;
1009
1010 if (skb->protocol == htons(ETH_P_IP)) {
1011 /*
1012 * v6 mapped
1013 */
1014
1015 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1016
1017 if (!newsk)
1018 return NULL;
1019
1020 newtcp6sk = (struct tcp6_sock *)newsk;
1021 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1022
1023 newinet = inet_sk(newsk);
1024 newnp = inet6_sk(newsk);
1025 newtp = tcp_sk(newsk);
1026
1027 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1028
1029 newnp->saddr = newsk->sk_v6_rcv_saddr;
1030
1031 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1032 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1033 #ifdef CONFIG_TCP_MD5SIG
1034 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1035 #endif
1036
1037 newnp->ipv6_ac_list = NULL;
1038 newnp->ipv6_fl_list = NULL;
1039 newnp->pktoptions = NULL;
1040 newnp->opt = NULL;
1041 newnp->mcast_oif = tcp_v6_iif(skb);
1042 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1043 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1044 if (np->repflow)
1045 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1046
1047 /*
1048 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1049 * here, tcp_create_openreq_child now does this for us, see the comment in
1050 * that function for the gory details. -acme
1051 */
1052
1053 /* It is tricky place. Until this moment IPv4 tcp
1054 worked with IPv6 icsk.icsk_af_ops.
1055 Sync it now.
1056 */
1057 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1058
1059 return newsk;
1060 }
1061
1062 ireq = inet_rsk(req);
1063
1064 if (sk_acceptq_is_full(sk))
1065 goto out_overflow;
1066
1067 if (!dst) {
1068 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1069 if (!dst)
1070 goto out;
1071 }
1072
1073 newsk = tcp_create_openreq_child(sk, req, skb);
1074 if (!newsk)
1075 goto out_nonewsk;
1076
1077 /*
1078 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1079 * count here, tcp_create_openreq_child now does this for us, see the
1080 * comment in that function for the gory details. -acme
1081 */
1082
1083 newsk->sk_gso_type = SKB_GSO_TCPV6;
1084 __ip6_dst_store(newsk, dst, NULL, NULL);
1085 inet6_sk_rx_dst_set(newsk, skb);
1086
1087 newtcp6sk = (struct tcp6_sock *)newsk;
1088 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1089
1090 newtp = tcp_sk(newsk);
1091 newinet = inet_sk(newsk);
1092 newnp = inet6_sk(newsk);
1093
1094 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1095
1096 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1097 newnp->saddr = ireq->ir_v6_loc_addr;
1098 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1099 newsk->sk_bound_dev_if = ireq->ir_iif;
1100
1101 /* Now IPv6 options...
1102
1103 First: no IPv4 options.
1104 */
1105 newinet->inet_opt = NULL;
1106 newnp->ipv6_ac_list = NULL;
1107 newnp->ipv6_fl_list = NULL;
1108
1109 /* Clone RX bits */
1110 newnp->rxopt.all = np->rxopt.all;
1111
1112 /* Clone pktoptions received with SYN */
1113 newnp->pktoptions = NULL;
1114 if (ireq->pktopts) {
1115 newnp->pktoptions = skb_clone(ireq->pktopts,
1116 sk_gfp_atomic(sk, GFP_ATOMIC));
1117 consume_skb(ireq->pktopts);
1118 ireq->pktopts = NULL;
1119 if (newnp->pktoptions)
1120 skb_set_owner_r(newnp->pktoptions, newsk);
1121 }
1122 newnp->opt = NULL;
1123 newnp->mcast_oif = tcp_v6_iif(skb);
1124 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1125 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1126 if (np->repflow)
1127 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1128
1129 /* Clone native IPv6 options from listening socket (if any)
1130
1131 Yes, keeping reference count would be much more clever,
1132 but we make one more one thing there: reattach optmem
1133 to newsk.
1134 */
1135 if (np->opt)
1136 newnp->opt = ipv6_dup_options(newsk, np->opt);
1137
1138 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1139 if (newnp->opt)
1140 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1141 newnp->opt->opt_flen);
1142
1143 tcp_ca_openreq_child(newsk, dst);
1144
1145 tcp_sync_mss(newsk, dst_mtu(dst));
1146 newtp->advmss = dst_metric_advmss(dst);
1147 if (tcp_sk(sk)->rx_opt.user_mss &&
1148 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1149 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1150
1151 tcp_initialize_rcv_mss(newsk);
1152
1153 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1154 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1155
1156 #ifdef CONFIG_TCP_MD5SIG
1157 /* Copy over the MD5 key from the original socket */
1158 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1159 if (key) {
1160 /* We're using one, so create a matching key
1161 * on the newsk structure. If we fail to get
1162 * memory, then we end up not copying the key
1163 * across. Shucks.
1164 */
1165 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1166 AF_INET6, key->key, key->keylen,
1167 sk_gfp_atomic(sk, GFP_ATOMIC));
1168 }
1169 #endif
1170
1171 if (__inet_inherit_port(sk, newsk) < 0) {
1172 inet_csk_prepare_forced_close(newsk);
1173 tcp_done(newsk);
1174 goto out;
1175 }
1176 __inet_hash(newsk, NULL);
1177
1178 return newsk;
1179
1180 out_overflow:
1181 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1182 out_nonewsk:
1183 dst_release(dst);
1184 out:
1185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1186 return NULL;
1187 }
1188
1189 /* The socket must have it's spinlock held when we get
1190 * here.
1191 *
1192 * We have a potential double-lock case here, so even when
1193 * doing backlog processing we use the BH locking scheme.
1194 * This is because we cannot sleep with the original spinlock
1195 * held.
1196 */
1197 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1198 {
1199 struct ipv6_pinfo *np = inet6_sk(sk);
1200 struct tcp_sock *tp;
1201 struct sk_buff *opt_skb = NULL;
1202
1203 /* Imagine: socket is IPv6. IPv4 packet arrives,
1204 goes to IPv4 receive handler and backlogged.
1205 From backlog it always goes here. Kerboom...
1206 Fortunately, tcp_rcv_established and rcv_established
1207 handle them correctly, but it is not case with
1208 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1209 */
1210
1211 if (skb->protocol == htons(ETH_P_IP))
1212 return tcp_v4_do_rcv(sk, skb);
1213
1214 if (sk_filter(sk, skb))
1215 goto discard;
1216
1217 /*
1218 * socket locking is here for SMP purposes as backlog rcv
1219 * is currently called with bh processing disabled.
1220 */
1221
1222 /* Do Stevens' IPV6_PKTOPTIONS.
1223
1224 Yes, guys, it is the only place in our code, where we
1225 may make it not affecting IPv4.
1226 The rest of code is protocol independent,
1227 and I do not like idea to uglify IPv4.
1228
1229 Actually, all the idea behind IPV6_PKTOPTIONS
1230 looks not very well thought. For now we latch
1231 options, received in the last packet, enqueued
1232 by tcp. Feel free to propose better solution.
1233 --ANK (980728)
1234 */
1235 if (np->rxopt.all)
1236 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1237
1238 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1239 struct dst_entry *dst = sk->sk_rx_dst;
1240
1241 sock_rps_save_rxhash(sk, skb);
1242 sk_mark_napi_id(sk, skb);
1243 if (dst) {
1244 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1245 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1246 dst_release(dst);
1247 sk->sk_rx_dst = NULL;
1248 }
1249 }
1250
1251 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1252 if (opt_skb)
1253 goto ipv6_pktoptions;
1254 return 0;
1255 }
1256
1257 if (tcp_checksum_complete(skb))
1258 goto csum_err;
1259
1260 if (sk->sk_state == TCP_LISTEN) {
1261 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1262 if (!nsk)
1263 goto discard;
1264
1265 /*
1266 * Queue it on the new socket if the new socket is active,
1267 * otherwise we just shortcircuit this and continue with
1268 * the new socket..
1269 */
1270 if (nsk != sk) {
1271 sock_rps_save_rxhash(nsk, skb);
1272 sk_mark_napi_id(nsk, skb);
1273 if (tcp_child_process(sk, nsk, skb))
1274 goto reset;
1275 if (opt_skb)
1276 __kfree_skb(opt_skb);
1277 return 0;
1278 }
1279 } else
1280 sock_rps_save_rxhash(sk, skb);
1281
1282 if (tcp_rcv_state_process(sk, skb))
1283 goto reset;
1284 if (opt_skb)
1285 goto ipv6_pktoptions;
1286 return 0;
1287
1288 reset:
1289 tcp_v6_send_reset(sk, skb);
1290 discard:
1291 if (opt_skb)
1292 __kfree_skb(opt_skb);
1293 kfree_skb(skb);
1294 return 0;
1295 csum_err:
1296 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1297 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1298 goto discard;
1299
1300
1301 ipv6_pktoptions:
1302 /* Do you ask, what is it?
1303
1304 1. skb was enqueued by tcp.
1305 2. skb is added to tail of read queue, rather than out of order.
1306 3. socket is not in passive state.
1307 4. Finally, it really contains options, which user wants to receive.
1308 */
1309 tp = tcp_sk(sk);
1310 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1311 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1312 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1313 np->mcast_oif = tcp_v6_iif(opt_skb);
1314 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1315 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1316 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1317 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1318 if (np->repflow)
1319 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1320 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1321 skb_set_owner_r(opt_skb, sk);
1322 opt_skb = xchg(&np->pktoptions, opt_skb);
1323 } else {
1324 __kfree_skb(opt_skb);
1325 opt_skb = xchg(&np->pktoptions, NULL);
1326 }
1327 }
1328
1329 kfree_skb(opt_skb);
1330 return 0;
1331 }
1332
1333 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1334 const struct tcphdr *th)
1335 {
1336 /* This is tricky: we move IP6CB at its correct location into
1337 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1338 * _decode_session6() uses IP6CB().
1339 * barrier() makes sure compiler won't play aliasing games.
1340 */
1341 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1342 sizeof(struct inet6_skb_parm));
1343 barrier();
1344
1345 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1346 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1347 skb->len - th->doff*4);
1348 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1349 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1350 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1351 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1352 TCP_SKB_CB(skb)->sacked = 0;
1353 }
1354
1355 static void tcp_v6_restore_cb(struct sk_buff *skb)
1356 {
1357 /* We need to move header back to the beginning if xfrm6_policy_check()
1358 * and tcp_v6_fill_cb() are going to be called again.
1359 */
1360 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1361 sizeof(struct inet6_skb_parm));
1362 }
1363
1364 static int tcp_v6_rcv(struct sk_buff *skb)
1365 {
1366 const struct tcphdr *th;
1367 const struct ipv6hdr *hdr;
1368 struct sock *sk;
1369 int ret;
1370 struct net *net = dev_net(skb->dev);
1371
1372 if (skb->pkt_type != PACKET_HOST)
1373 goto discard_it;
1374
1375 /*
1376 * Count it even if it's bad.
1377 */
1378 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1379
1380 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1381 goto discard_it;
1382
1383 th = tcp_hdr(skb);
1384
1385 if (th->doff < sizeof(struct tcphdr)/4)
1386 goto bad_packet;
1387 if (!pskb_may_pull(skb, th->doff*4))
1388 goto discard_it;
1389
1390 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1391 goto csum_error;
1392
1393 th = tcp_hdr(skb);
1394 hdr = ipv6_hdr(skb);
1395
1396 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1397 inet6_iif(skb));
1398 if (!sk)
1399 goto no_tcp_socket;
1400
1401 process:
1402 if (sk->sk_state == TCP_TIME_WAIT)
1403 goto do_time_wait;
1404
1405 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1406 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1407 goto discard_and_relse;
1408 }
1409
1410 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1411 goto discard_and_relse;
1412
1413 tcp_v6_fill_cb(skb, hdr, th);
1414
1415 if (tcp_v6_inbound_md5_hash(sk, skb))
1416 goto discard_and_relse;
1417
1418 if (sk_filter(sk, skb))
1419 goto discard_and_relse;
1420
1421 sk_incoming_cpu_update(sk);
1422 skb->dev = NULL;
1423
1424 bh_lock_sock_nested(sk);
1425 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1426 ret = 0;
1427 if (!sock_owned_by_user(sk)) {
1428 if (!tcp_prequeue(sk, skb))
1429 ret = tcp_v6_do_rcv(sk, skb);
1430 } else if (unlikely(sk_add_backlog(sk, skb,
1431 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1432 bh_unlock_sock(sk);
1433 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1434 goto discard_and_relse;
1435 }
1436 bh_unlock_sock(sk);
1437
1438 sock_put(sk);
1439 return ret ? -1 : 0;
1440
1441 no_tcp_socket:
1442 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1443 goto discard_it;
1444
1445 tcp_v6_fill_cb(skb, hdr, th);
1446
1447 if (tcp_checksum_complete(skb)) {
1448 csum_error:
1449 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1450 bad_packet:
1451 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1452 } else {
1453 tcp_v6_send_reset(NULL, skb);
1454 }
1455
1456 discard_it:
1457 kfree_skb(skb);
1458 return 0;
1459
1460 discard_and_relse:
1461 sock_put(sk);
1462 goto discard_it;
1463
1464 do_time_wait:
1465 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1466 inet_twsk_put(inet_twsk(sk));
1467 goto discard_it;
1468 }
1469
1470 tcp_v6_fill_cb(skb, hdr, th);
1471
1472 if (tcp_checksum_complete(skb)) {
1473 inet_twsk_put(inet_twsk(sk));
1474 goto csum_error;
1475 }
1476
1477 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1478 case TCP_TW_SYN:
1479 {
1480 struct sock *sk2;
1481
1482 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1483 &ipv6_hdr(skb)->saddr, th->source,
1484 &ipv6_hdr(skb)->daddr,
1485 ntohs(th->dest), tcp_v6_iif(skb));
1486 if (sk2) {
1487 struct inet_timewait_sock *tw = inet_twsk(sk);
1488 inet_twsk_deschedule_put(tw);
1489 sk = sk2;
1490 tcp_v6_restore_cb(skb);
1491 goto process;
1492 }
1493 /* Fall through to ACK */
1494 }
1495 case TCP_TW_ACK:
1496 tcp_v6_timewait_ack(sk, skb);
1497 break;
1498 case TCP_TW_RST:
1499 tcp_v6_restore_cb(skb);
1500 goto no_tcp_socket;
1501 case TCP_TW_SUCCESS:
1502 ;
1503 }
1504 goto discard_it;
1505 }
1506
1507 static void tcp_v6_early_demux(struct sk_buff *skb)
1508 {
1509 const struct ipv6hdr *hdr;
1510 const struct tcphdr *th;
1511 struct sock *sk;
1512
1513 if (skb->pkt_type != PACKET_HOST)
1514 return;
1515
1516 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1517 return;
1518
1519 hdr = ipv6_hdr(skb);
1520 th = tcp_hdr(skb);
1521
1522 if (th->doff < sizeof(struct tcphdr) / 4)
1523 return;
1524
1525 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1526 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1527 &hdr->saddr, th->source,
1528 &hdr->daddr, ntohs(th->dest),
1529 inet6_iif(skb));
1530 if (sk) {
1531 skb->sk = sk;
1532 skb->destructor = sock_edemux;
1533 if (sk_fullsock(sk)) {
1534 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1535
1536 if (dst)
1537 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1538 if (dst &&
1539 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1540 skb_dst_set_noref(skb, dst);
1541 }
1542 }
1543 }
1544
1545 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1546 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1547 .twsk_unique = tcp_twsk_unique,
1548 .twsk_destructor = tcp_twsk_destructor,
1549 };
1550
1551 static const struct inet_connection_sock_af_ops ipv6_specific = {
1552 .queue_xmit = inet6_csk_xmit,
1553 .send_check = tcp_v6_send_check,
1554 .rebuild_header = inet6_sk_rebuild_header,
1555 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1556 .conn_request = tcp_v6_conn_request,
1557 .syn_recv_sock = tcp_v6_syn_recv_sock,
1558 .net_header_len = sizeof(struct ipv6hdr),
1559 .net_frag_header_len = sizeof(struct frag_hdr),
1560 .setsockopt = ipv6_setsockopt,
1561 .getsockopt = ipv6_getsockopt,
1562 .addr2sockaddr = inet6_csk_addr2sockaddr,
1563 .sockaddr_len = sizeof(struct sockaddr_in6),
1564 .bind_conflict = inet6_csk_bind_conflict,
1565 #ifdef CONFIG_COMPAT
1566 .compat_setsockopt = compat_ipv6_setsockopt,
1567 .compat_getsockopt = compat_ipv6_getsockopt,
1568 #endif
1569 .mtu_reduced = tcp_v6_mtu_reduced,
1570 };
1571
1572 #ifdef CONFIG_TCP_MD5SIG
1573 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1574 .md5_lookup = tcp_v6_md5_lookup,
1575 .calc_md5_hash = tcp_v6_md5_hash_skb,
1576 .md5_parse = tcp_v6_parse_md5_keys,
1577 };
1578 #endif
1579
1580 /*
1581 * TCP over IPv4 via INET6 API
1582 */
1583 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1584 .queue_xmit = ip_queue_xmit,
1585 .send_check = tcp_v4_send_check,
1586 .rebuild_header = inet_sk_rebuild_header,
1587 .sk_rx_dst_set = inet_sk_rx_dst_set,
1588 .conn_request = tcp_v6_conn_request,
1589 .syn_recv_sock = tcp_v6_syn_recv_sock,
1590 .net_header_len = sizeof(struct iphdr),
1591 .setsockopt = ipv6_setsockopt,
1592 .getsockopt = ipv6_getsockopt,
1593 .addr2sockaddr = inet6_csk_addr2sockaddr,
1594 .sockaddr_len = sizeof(struct sockaddr_in6),
1595 .bind_conflict = inet6_csk_bind_conflict,
1596 #ifdef CONFIG_COMPAT
1597 .compat_setsockopt = compat_ipv6_setsockopt,
1598 .compat_getsockopt = compat_ipv6_getsockopt,
1599 #endif
1600 .mtu_reduced = tcp_v4_mtu_reduced,
1601 };
1602
1603 #ifdef CONFIG_TCP_MD5SIG
1604 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1605 .md5_lookup = tcp_v4_md5_lookup,
1606 .calc_md5_hash = tcp_v4_md5_hash_skb,
1607 .md5_parse = tcp_v6_parse_md5_keys,
1608 };
1609 #endif
1610
1611 /* NOTE: A lot of things set to zero explicitly by call to
1612 * sk_alloc() so need not be done here.
1613 */
1614 static int tcp_v6_init_sock(struct sock *sk)
1615 {
1616 struct inet_connection_sock *icsk = inet_csk(sk);
1617
1618 tcp_init_sock(sk);
1619
1620 icsk->icsk_af_ops = &ipv6_specific;
1621
1622 #ifdef CONFIG_TCP_MD5SIG
1623 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1624 #endif
1625
1626 return 0;
1627 }
1628
1629 static void tcp_v6_destroy_sock(struct sock *sk)
1630 {
1631 tcp_v4_destroy_sock(sk);
1632 inet6_destroy_sock(sk);
1633 }
1634
1635 #ifdef CONFIG_PROC_FS
1636 /* Proc filesystem TCPv6 sock list dumping. */
1637 static void get_openreq6(struct seq_file *seq,
1638 const struct request_sock *req, int i)
1639 {
1640 long ttd = req->rsk_timer.expires - jiffies;
1641 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1642 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1643
1644 if (ttd < 0)
1645 ttd = 0;
1646
1647 seq_printf(seq,
1648 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1649 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1650 i,
1651 src->s6_addr32[0], src->s6_addr32[1],
1652 src->s6_addr32[2], src->s6_addr32[3],
1653 inet_rsk(req)->ir_num,
1654 dest->s6_addr32[0], dest->s6_addr32[1],
1655 dest->s6_addr32[2], dest->s6_addr32[3],
1656 ntohs(inet_rsk(req)->ir_rmt_port),
1657 TCP_SYN_RECV,
1658 0, 0, /* could print option size, but that is af dependent. */
1659 1, /* timers active (only the expire timer) */
1660 jiffies_to_clock_t(ttd),
1661 req->num_timeout,
1662 from_kuid_munged(seq_user_ns(seq),
1663 sock_i_uid(req->rsk_listener)),
1664 0, /* non standard timer */
1665 0, /* open_requests have no inode */
1666 0, req);
1667 }
1668
1669 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1670 {
1671 const struct in6_addr *dest, *src;
1672 __u16 destp, srcp;
1673 int timer_active;
1674 unsigned long timer_expires;
1675 const struct inet_sock *inet = inet_sk(sp);
1676 const struct tcp_sock *tp = tcp_sk(sp);
1677 const struct inet_connection_sock *icsk = inet_csk(sp);
1678 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1679
1680 dest = &sp->sk_v6_daddr;
1681 src = &sp->sk_v6_rcv_saddr;
1682 destp = ntohs(inet->inet_dport);
1683 srcp = ntohs(inet->inet_sport);
1684
1685 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1686 timer_active = 1;
1687 timer_expires = icsk->icsk_timeout;
1688 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1689 timer_active = 4;
1690 timer_expires = icsk->icsk_timeout;
1691 } else if (timer_pending(&sp->sk_timer)) {
1692 timer_active = 2;
1693 timer_expires = sp->sk_timer.expires;
1694 } else {
1695 timer_active = 0;
1696 timer_expires = jiffies;
1697 }
1698
1699 seq_printf(seq,
1700 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1701 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1702 i,
1703 src->s6_addr32[0], src->s6_addr32[1],
1704 src->s6_addr32[2], src->s6_addr32[3], srcp,
1705 dest->s6_addr32[0], dest->s6_addr32[1],
1706 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1707 sp->sk_state,
1708 tp->write_seq-tp->snd_una,
1709 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1710 timer_active,
1711 jiffies_delta_to_clock_t(timer_expires - jiffies),
1712 icsk->icsk_retransmits,
1713 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1714 icsk->icsk_probes_out,
1715 sock_i_ino(sp),
1716 atomic_read(&sp->sk_refcnt), sp,
1717 jiffies_to_clock_t(icsk->icsk_rto),
1718 jiffies_to_clock_t(icsk->icsk_ack.ato),
1719 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1720 tp->snd_cwnd,
1721 sp->sk_state == TCP_LISTEN ?
1722 fastopenq->max_qlen :
1723 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1724 );
1725 }
1726
1727 static void get_timewait6_sock(struct seq_file *seq,
1728 struct inet_timewait_sock *tw, int i)
1729 {
1730 long delta = tw->tw_timer.expires - jiffies;
1731 const struct in6_addr *dest, *src;
1732 __u16 destp, srcp;
1733
1734 dest = &tw->tw_v6_daddr;
1735 src = &tw->tw_v6_rcv_saddr;
1736 destp = ntohs(tw->tw_dport);
1737 srcp = ntohs(tw->tw_sport);
1738
1739 seq_printf(seq,
1740 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1741 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1742 i,
1743 src->s6_addr32[0], src->s6_addr32[1],
1744 src->s6_addr32[2], src->s6_addr32[3], srcp,
1745 dest->s6_addr32[0], dest->s6_addr32[1],
1746 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1747 tw->tw_substate, 0, 0,
1748 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1749 atomic_read(&tw->tw_refcnt), tw);
1750 }
1751
1752 static int tcp6_seq_show(struct seq_file *seq, void *v)
1753 {
1754 struct tcp_iter_state *st;
1755 struct sock *sk = v;
1756
1757 if (v == SEQ_START_TOKEN) {
1758 seq_puts(seq,
1759 " sl "
1760 "local_address "
1761 "remote_address "
1762 "st tx_queue rx_queue tr tm->when retrnsmt"
1763 " uid timeout inode\n");
1764 goto out;
1765 }
1766 st = seq->private;
1767
1768 switch (st->state) {
1769 case TCP_SEQ_STATE_LISTENING:
1770 case TCP_SEQ_STATE_ESTABLISHED:
1771 if (sk->sk_state == TCP_TIME_WAIT)
1772 get_timewait6_sock(seq, v, st->num);
1773 else
1774 get_tcp6_sock(seq, v, st->num);
1775 break;
1776 case TCP_SEQ_STATE_OPENREQ:
1777 get_openreq6(seq, v, st->num);
1778 break;
1779 }
1780 out:
1781 return 0;
1782 }
1783
1784 static const struct file_operations tcp6_afinfo_seq_fops = {
1785 .owner = THIS_MODULE,
1786 .open = tcp_seq_open,
1787 .read = seq_read,
1788 .llseek = seq_lseek,
1789 .release = seq_release_net
1790 };
1791
1792 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1793 .name = "tcp6",
1794 .family = AF_INET6,
1795 .seq_fops = &tcp6_afinfo_seq_fops,
1796 .seq_ops = {
1797 .show = tcp6_seq_show,
1798 },
1799 };
1800
1801 int __net_init tcp6_proc_init(struct net *net)
1802 {
1803 return tcp_proc_register(net, &tcp6_seq_afinfo);
1804 }
1805
1806 void tcp6_proc_exit(struct net *net)
1807 {
1808 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1809 }
1810 #endif
1811
1812 static void tcp_v6_clear_sk(struct sock *sk, int size)
1813 {
1814 struct inet_sock *inet = inet_sk(sk);
1815
1816 /* we do not want to clear pinet6 field, because of RCU lookups */
1817 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1818
1819 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1820 memset(&inet->pinet6 + 1, 0, size);
1821 }
1822
1823 struct proto tcpv6_prot = {
1824 .name = "TCPv6",
1825 .owner = THIS_MODULE,
1826 .close = tcp_close,
1827 .connect = tcp_v6_connect,
1828 .disconnect = tcp_disconnect,
1829 .accept = inet_csk_accept,
1830 .ioctl = tcp_ioctl,
1831 .init = tcp_v6_init_sock,
1832 .destroy = tcp_v6_destroy_sock,
1833 .shutdown = tcp_shutdown,
1834 .setsockopt = tcp_setsockopt,
1835 .getsockopt = tcp_getsockopt,
1836 .recvmsg = tcp_recvmsg,
1837 .sendmsg = tcp_sendmsg,
1838 .sendpage = tcp_sendpage,
1839 .backlog_rcv = tcp_v6_do_rcv,
1840 .release_cb = tcp_release_cb,
1841 .hash = inet_hash,
1842 .unhash = inet_unhash,
1843 .get_port = inet_csk_get_port,
1844 .enter_memory_pressure = tcp_enter_memory_pressure,
1845 .stream_memory_free = tcp_stream_memory_free,
1846 .sockets_allocated = &tcp_sockets_allocated,
1847 .memory_allocated = &tcp_memory_allocated,
1848 .memory_pressure = &tcp_memory_pressure,
1849 .orphan_count = &tcp_orphan_count,
1850 .sysctl_mem = sysctl_tcp_mem,
1851 .sysctl_wmem = sysctl_tcp_wmem,
1852 .sysctl_rmem = sysctl_tcp_rmem,
1853 .max_header = MAX_TCP_HEADER,
1854 .obj_size = sizeof(struct tcp6_sock),
1855 .slab_flags = SLAB_DESTROY_BY_RCU,
1856 .twsk_prot = &tcp6_timewait_sock_ops,
1857 .rsk_prot = &tcp6_request_sock_ops,
1858 .h.hashinfo = &tcp_hashinfo,
1859 .no_autobind = true,
1860 #ifdef CONFIG_COMPAT
1861 .compat_setsockopt = compat_tcp_setsockopt,
1862 .compat_getsockopt = compat_tcp_getsockopt,
1863 #endif
1864 #ifdef CONFIG_MEMCG_KMEM
1865 .proto_cgroup = tcp_proto_cgroup,
1866 #endif
1867 .clear_sk = tcp_v6_clear_sk,
1868 };
1869
1870 static const struct inet6_protocol tcpv6_protocol = {
1871 .early_demux = tcp_v6_early_demux,
1872 .handler = tcp_v6_rcv,
1873 .err_handler = tcp_v6_err,
1874 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1875 };
1876
1877 static struct inet_protosw tcpv6_protosw = {
1878 .type = SOCK_STREAM,
1879 .protocol = IPPROTO_TCP,
1880 .prot = &tcpv6_prot,
1881 .ops = &inet6_stream_ops,
1882 .flags = INET_PROTOSW_PERMANENT |
1883 INET_PROTOSW_ICSK,
1884 };
1885
1886 static int __net_init tcpv6_net_init(struct net *net)
1887 {
1888 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1889 SOCK_RAW, IPPROTO_TCP, net);
1890 }
1891
1892 static void __net_exit tcpv6_net_exit(struct net *net)
1893 {
1894 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1895 }
1896
1897 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1898 {
1899 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1900 }
1901
1902 static struct pernet_operations tcpv6_net_ops = {
1903 .init = tcpv6_net_init,
1904 .exit = tcpv6_net_exit,
1905 .exit_batch = tcpv6_net_exit_batch,
1906 };
1907
1908 int __init tcpv6_init(void)
1909 {
1910 int ret;
1911
1912 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1913 if (ret)
1914 goto out;
1915
1916 /* register inet6 protocol */
1917 ret = inet6_register_protosw(&tcpv6_protosw);
1918 if (ret)
1919 goto out_tcpv6_protocol;
1920
1921 ret = register_pernet_subsys(&tcpv6_net_ops);
1922 if (ret)
1923 goto out_tcpv6_protosw;
1924 out:
1925 return ret;
1926
1927 out_tcpv6_protosw:
1928 inet6_unregister_protosw(&tcpv6_protosw);
1929 out_tcpv6_protocol:
1930 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1931 goto out;
1932 }
1933
1934 void tcpv6_exit(void)
1935 {
1936 unregister_pernet_subsys(&tcpv6_net_ops);
1937 inet6_unregister_protosw(&tcpv6_protosw);
1938 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1939 }