]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/tcp_ipv6.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
86 const struct in6_addr *addr)
87 {
88 return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst && dst_hold_safe(dst)) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 sk->sk_rx_dst = dst;
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
102 }
103 }
104
105 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
106 {
107 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
108 ipv6_hdr(skb)->saddr.s6_addr32,
109 tcp_hdr(skb)->dest,
110 tcp_hdr(skb)->source);
111 }
112
113 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
114 int addr_len)
115 {
116 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
117 struct inet_sock *inet = inet_sk(sk);
118 struct inet_connection_sock *icsk = inet_csk(sk);
119 struct ipv6_pinfo *np = inet6_sk(sk);
120 struct tcp_sock *tp = tcp_sk(sk);
121 struct in6_addr *saddr = NULL, *final_p, final;
122 struct ipv6_txoptions *opt;
123 struct flowi6 fl6;
124 struct dst_entry *dst;
125 int addr_type;
126 int err;
127
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
130
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
133
134 memset(&fl6, 0, sizeof(fl6));
135
136 if (np->sndflow) {
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 if (!flowlabel)
143 return -EINVAL;
144 fl6_sock_release(flowlabel);
145 }
146 }
147
148 /*
149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */
151
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
154
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
156
157 if (addr_type & IPV6_ADDR_MULTICAST)
158 return -ENETUNREACH;
159
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
164 * must coincide.
165 */
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
169
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
171 }
172
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
176 }
177
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
183 }
184
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
187
188 /*
189 * TCP over IPv4
190 */
191
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
195
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
200
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
210
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213 if (err) {
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220 goto failure;
221 }
222 np->saddr = sk->sk_v6_rcv_saddr;
223
224 return err;
225 }
226
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
229
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
237
238 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
239 final_p = fl6_update_dst(&fl6, opt, &final);
240
241 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242
243 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
244 if (IS_ERR(dst)) {
245 err = PTR_ERR(dst);
246 goto failure;
247 }
248
249 if (!saddr) {
250 saddr = &fl6.saddr;
251 sk->sk_v6_rcv_saddr = *saddr;
252 }
253
254 /* set the source address */
255 np->saddr = *saddr;
256 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257
258 sk->sk_gso_type = SKB_GSO_TCPV6;
259 ip6_dst_store(sk, dst, NULL, NULL);
260
261 if (tcp_death_row.sysctl_tw_recycle &&
262 !tp->rx_opt.ts_recent_stamp &&
263 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
264 tcp_fetch_timewait_stamp(sk, dst);
265
266 icsk->icsk_ext_hdr_len = 0;
267 if (opt)
268 icsk->icsk_ext_hdr_len = opt->opt_flen +
269 opt->opt_nflen;
270
271 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272
273 inet->inet_dport = usin->sin6_port;
274
275 tcp_set_state(sk, TCP_SYN_SENT);
276 err = inet6_hash_connect(&tcp_death_row, sk);
277 if (err)
278 goto late_failure;
279
280 sk_set_txhash(sk);
281
282 if (!tp->write_seq && likely(!tp->repair))
283 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
284 sk->sk_v6_daddr.s6_addr32,
285 inet->inet_sport,
286 inet->inet_dport);
287
288 err = tcp_connect(sk);
289 if (err)
290 goto late_failure;
291
292 return 0;
293
294 late_failure:
295 tcp_set_state(sk, TCP_CLOSE);
296 __sk_dst_reset(sk);
297 failure:
298 inet->inet_dport = 0;
299 sk->sk_route_caps = 0;
300 return err;
301 }
302
303 static void tcp_v6_mtu_reduced(struct sock *sk)
304 {
305 struct dst_entry *dst;
306
307 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
308 return;
309
310 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
311 if (!dst)
312 return;
313
314 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
315 tcp_sync_mss(sk, dst_mtu(dst));
316 tcp_simple_retransmit(sk);
317 }
318 }
319
320 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
321 u8 type, u8 code, int offset, __be32 info)
322 {
323 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
324 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
325 struct net *net = dev_net(skb->dev);
326 struct request_sock *fastopen;
327 struct ipv6_pinfo *np;
328 struct tcp_sock *tp;
329 __u32 seq, snd_una;
330 struct sock *sk;
331 int err;
332
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
336 skb->dev->ifindex);
337
338 if (!sk) {
339 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
346 return;
347 }
348 seq = ntohl(th->seq);
349 if (sk->sk_state == TCP_NEW_SYN_RECV)
350 return tcp_req_err(sk, seq);
351
352 bh_lock_sock(sk);
353 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
354 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
355
356 if (sk->sk_state == TCP_CLOSE)
357 goto out;
358
359 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
360 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
361 goto out;
362 }
363
364 tp = tcp_sk(sk);
365 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
366 fastopen = tp->fastopen_rsk;
367 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
368 if (sk->sk_state != TCP_LISTEN &&
369 !between(seq, snd_una, tp->snd_nxt)) {
370 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
371 goto out;
372 }
373
374 np = inet6_sk(sk);
375
376 if (type == NDISC_REDIRECT) {
377 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
378
379 if (dst)
380 dst->ops->redirect(dst, sk, skb);
381 goto out;
382 }
383
384 if (type == ICMPV6_PKT_TOOBIG) {
385 /* We are not interested in TCP_LISTEN and open_requests
386 * (SYN-ACKs send out by Linux are always <576bytes so
387 * they should go through unfragmented).
388 */
389 if (sk->sk_state == TCP_LISTEN)
390 goto out;
391
392 if (!ip6_sk_accept_pmtu(sk))
393 goto out;
394
395 tp->mtu_info = ntohl(info);
396 if (!sock_owned_by_user(sk))
397 tcp_v6_mtu_reduced(sk);
398 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
399 &tp->tsq_flags))
400 sock_hold(sk);
401 goto out;
402 }
403
404 icmpv6_err_convert(type, code, &err);
405
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
408 case TCP_SYN_SENT:
409 case TCP_SYN_RECV:
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
412 */
413 if (fastopen && !fastopen->sk)
414 break;
415
416 if (!sock_owned_by_user(sk)) {
417 sk->sk_err = err;
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
419
420 tcp_done(sk);
421 } else
422 sk->sk_err_soft = err;
423 goto out;
424 }
425
426 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_err = err;
428 sk->sk_error_report(sk);
429 } else
430 sk->sk_err_soft = err;
431
432 out:
433 bh_unlock_sock(sk);
434 sock_put(sk);
435 }
436
437
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct flowi *fl,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 bool attach_req)
443 {
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct flowi6 *fl6 = &fl->u.ip6;
447 struct sk_buff *skb;
448 int err = -ENOMEM;
449
450 /* First, grab a route. */
451 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
452 IPPROTO_TCP)) == NULL)
453 goto done;
454
455 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
456
457 if (skb) {
458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 &ireq->ir_v6_rmt_addr);
460
461 fl6->daddr = ireq->ir_v6_rmt_addr;
462 if (np->repflow && ireq->pktopts)
463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464
465 rcu_read_lock();
466 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 np->tclass);
468 rcu_read_unlock();
469 err = net_xmit_eval(err);
470 }
471
472 done:
473 return err;
474 }
475
476
477 static void tcp_v6_reqsk_destructor(struct request_sock *req)
478 {
479 kfree_skb(inet_rsk(req)->pktopts);
480 }
481
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
484 const struct in6_addr *addr)
485 {
486 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
487 }
488
489 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
490 const struct sock *addr_sk)
491 {
492 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
493 }
494
495 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
496 int optlen)
497 {
498 struct tcp_md5sig cmd;
499 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500
501 if (optlen < sizeof(cmd))
502 return -EINVAL;
503
504 if (copy_from_user(&cmd, optval, sizeof(cmd)))
505 return -EFAULT;
506
507 if (sin6->sin6_family != AF_INET6)
508 return -EINVAL;
509
510 if (!cmd.tcpm_keylen) {
511 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
512 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 AF_INET);
514 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
515 AF_INET6);
516 }
517
518 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
519 return -EINVAL;
520
521 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
522 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
523 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524
525 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527 }
528
529 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
530 const struct in6_addr *daddr,
531 const struct in6_addr *saddr, int nbytes)
532 {
533 struct tcp6_pseudohdr *bp;
534 struct scatterlist sg;
535
536 bp = &hp->md5_blk.ip6;
537 /* 1. TCP pseudo-header (RFC2460) */
538 bp->saddr = *saddr;
539 bp->daddr = *daddr;
540 bp->protocol = cpu_to_be32(IPPROTO_TCP);
541 bp->len = cpu_to_be32(nbytes);
542
543 sg_init_one(&sg, bp, sizeof(*bp));
544 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
545 }
546
547 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
548 const struct in6_addr *daddr, struct in6_addr *saddr,
549 const struct tcphdr *th)
550 {
551 struct tcp_md5sig_pool *hp;
552 struct hash_desc *desc;
553
554 hp = tcp_get_md5sig_pool();
555 if (!hp)
556 goto clear_hash_noput;
557 desc = &hp->md5_desc;
558
559 if (crypto_hash_init(desc))
560 goto clear_hash;
561 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
562 goto clear_hash;
563 if (tcp_md5_hash_header(hp, th))
564 goto clear_hash;
565 if (tcp_md5_hash_key(hp, key))
566 goto clear_hash;
567 if (crypto_hash_final(desc, md5_hash))
568 goto clear_hash;
569
570 tcp_put_md5sig_pool();
571 return 0;
572
573 clear_hash:
574 tcp_put_md5sig_pool();
575 clear_hash_noput:
576 memset(md5_hash, 0, 16);
577 return 1;
578 }
579
580 static int tcp_v6_md5_hash_skb(char *md5_hash,
581 const struct tcp_md5sig_key *key,
582 const struct sock *sk,
583 const struct sk_buff *skb)
584 {
585 const struct in6_addr *saddr, *daddr;
586 struct tcp_md5sig_pool *hp;
587 struct hash_desc *desc;
588 const struct tcphdr *th = tcp_hdr(skb);
589
590 if (sk) { /* valid for establish/request sockets */
591 saddr = &sk->sk_v6_rcv_saddr;
592 daddr = &sk->sk_v6_daddr;
593 } else {
594 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
595 saddr = &ip6h->saddr;
596 daddr = &ip6h->daddr;
597 }
598
599 hp = tcp_get_md5sig_pool();
600 if (!hp)
601 goto clear_hash_noput;
602 desc = &hp->md5_desc;
603
604 if (crypto_hash_init(desc))
605 goto clear_hash;
606
607 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
608 goto clear_hash;
609 if (tcp_md5_hash_header(hp, th))
610 goto clear_hash;
611 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
612 goto clear_hash;
613 if (tcp_md5_hash_key(hp, key))
614 goto clear_hash;
615 if (crypto_hash_final(desc, md5_hash))
616 goto clear_hash;
617
618 tcp_put_md5sig_pool();
619 return 0;
620
621 clear_hash:
622 tcp_put_md5sig_pool();
623 clear_hash_noput:
624 memset(md5_hash, 0, 16);
625 return 1;
626 }
627
628 #endif
629
630 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
631 const struct sk_buff *skb)
632 {
633 #ifdef CONFIG_TCP_MD5SIG
634 const __u8 *hash_location = NULL;
635 struct tcp_md5sig_key *hash_expected;
636 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
637 const struct tcphdr *th = tcp_hdr(skb);
638 int genhash;
639 u8 newhash[16];
640
641 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
642 hash_location = tcp_parse_md5sig_option(th);
643
644 /* We've parsed the options - do we have a hash? */
645 if (!hash_expected && !hash_location)
646 return false;
647
648 if (hash_expected && !hash_location) {
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
650 return true;
651 }
652
653 if (!hash_expected && hash_location) {
654 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
655 return true;
656 }
657
658 /* check the signature */
659 genhash = tcp_v6_md5_hash_skb(newhash,
660 hash_expected,
661 NULL, skb);
662
663 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
664 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
665 genhash ? "failed" : "mismatch",
666 &ip6h->saddr, ntohs(th->source),
667 &ip6h->daddr, ntohs(th->dest));
668 return true;
669 }
670 #endif
671 return false;
672 }
673
674 static void tcp_v6_init_req(struct request_sock *req,
675 const struct sock *sk_listener,
676 struct sk_buff *skb)
677 {
678 struct inet_request_sock *ireq = inet_rsk(req);
679 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
680
681 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
682 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
683
684 /* So that link locals have meaning */
685 if (!sk_listener->sk_bound_dev_if &&
686 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
687 ireq->ir_iif = tcp_v6_iif(skb);
688
689 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
690 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
691 np->rxopt.bits.rxinfo ||
692 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
693 np->rxopt.bits.rxohlim || np->repflow)) {
694 atomic_inc(&skb->users);
695 ireq->pktopts = skb;
696 }
697 }
698
699 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
700 struct flowi *fl,
701 const struct request_sock *req,
702 bool *strict)
703 {
704 if (strict)
705 *strict = true;
706 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
707 }
708
709 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
710 .family = AF_INET6,
711 .obj_size = sizeof(struct tcp6_request_sock),
712 .rtx_syn_ack = tcp_rtx_synack,
713 .send_ack = tcp_v6_reqsk_send_ack,
714 .destructor = tcp_v6_reqsk_destructor,
715 .send_reset = tcp_v6_send_reset,
716 .syn_ack_timeout = tcp_syn_ack_timeout,
717 };
718
719 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
720 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
721 sizeof(struct ipv6hdr),
722 #ifdef CONFIG_TCP_MD5SIG
723 .req_md5_lookup = tcp_v6_md5_lookup,
724 .calc_md5_hash = tcp_v6_md5_hash_skb,
725 #endif
726 .init_req = tcp_v6_init_req,
727 #ifdef CONFIG_SYN_COOKIES
728 .cookie_init_seq = cookie_v6_init_sequence,
729 #endif
730 .route_req = tcp_v6_route_req,
731 .init_seq = tcp_v6_init_sequence,
732 .send_synack = tcp_v6_send_synack,
733 };
734
735 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
736 u32 ack, u32 win, u32 tsval, u32 tsecr,
737 int oif, struct tcp_md5sig_key *key, int rst,
738 u8 tclass, u32 label)
739 {
740 const struct tcphdr *th = tcp_hdr(skb);
741 struct tcphdr *t1;
742 struct sk_buff *buff;
743 struct flowi6 fl6;
744 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
745 struct sock *ctl_sk = net->ipv6.tcp_sk;
746 unsigned int tot_len = sizeof(struct tcphdr);
747 struct dst_entry *dst;
748 __be32 *topt;
749
750 if (tsecr)
751 tot_len += TCPOLEN_TSTAMP_ALIGNED;
752 #ifdef CONFIG_TCP_MD5SIG
753 if (key)
754 tot_len += TCPOLEN_MD5SIG_ALIGNED;
755 #endif
756
757 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
758 GFP_ATOMIC);
759 if (!buff)
760 return;
761
762 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
763
764 t1 = (struct tcphdr *) skb_push(buff, tot_len);
765 skb_reset_transport_header(buff);
766
767 /* Swap the send and the receive. */
768 memset(t1, 0, sizeof(*t1));
769 t1->dest = th->source;
770 t1->source = th->dest;
771 t1->doff = tot_len / 4;
772 t1->seq = htonl(seq);
773 t1->ack_seq = htonl(ack);
774 t1->ack = !rst || !th->ack;
775 t1->rst = rst;
776 t1->window = htons(win);
777
778 topt = (__be32 *)(t1 + 1);
779
780 if (tsecr) {
781 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
782 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
783 *topt++ = htonl(tsval);
784 *topt++ = htonl(tsecr);
785 }
786
787 #ifdef CONFIG_TCP_MD5SIG
788 if (key) {
789 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
790 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
791 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
792 &ipv6_hdr(skb)->saddr,
793 &ipv6_hdr(skb)->daddr, t1);
794 }
795 #endif
796
797 memset(&fl6, 0, sizeof(fl6));
798 fl6.daddr = ipv6_hdr(skb)->saddr;
799 fl6.saddr = ipv6_hdr(skb)->daddr;
800 fl6.flowlabel = label;
801
802 buff->ip_summed = CHECKSUM_PARTIAL;
803 buff->csum = 0;
804
805 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
806
807 fl6.flowi6_proto = IPPROTO_TCP;
808 if (rt6_need_strict(&fl6.daddr) && !oif)
809 fl6.flowi6_oif = tcp_v6_iif(skb);
810 else
811 fl6.flowi6_oif = oif;
812 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
813 fl6.fl6_dport = t1->dest;
814 fl6.fl6_sport = t1->source;
815 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
816
817 /* Pass a socket to ip6_dst_lookup either it is for RST
818 * Underlying function will use this to retrieve the network
819 * namespace
820 */
821 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
822 if (!IS_ERR(dst)) {
823 skb_dst_set(buff, dst);
824 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
825 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
826 if (rst)
827 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
828 return;
829 }
830
831 kfree_skb(buff);
832 }
833
834 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
835 {
836 const struct tcphdr *th = tcp_hdr(skb);
837 u32 seq = 0, ack_seq = 0;
838 struct tcp_md5sig_key *key = NULL;
839 #ifdef CONFIG_TCP_MD5SIG
840 const __u8 *hash_location = NULL;
841 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
842 unsigned char newhash[16];
843 int genhash;
844 struct sock *sk1 = NULL;
845 #endif
846 int oif;
847
848 if (th->rst)
849 return;
850
851 /* If sk not NULL, it means we did a successful lookup and incoming
852 * route had to be correct. prequeue might have dropped our dst.
853 */
854 if (!sk && !ipv6_unicast_destination(skb))
855 return;
856
857 #ifdef CONFIG_TCP_MD5SIG
858 hash_location = tcp_parse_md5sig_option(th);
859 if (sk && sk_fullsock(sk)) {
860 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
861 } else if (hash_location) {
862 /*
863 * active side is lost. Try to find listening socket through
864 * source port, and then find md5 key through listening socket.
865 * we are not loose security here:
866 * Incoming packet is checked with md5 hash with finding key,
867 * no RST generated if md5 hash doesn't match.
868 */
869 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
870 &tcp_hashinfo, &ipv6h->saddr,
871 th->source, &ipv6h->daddr,
872 ntohs(th->source), tcp_v6_iif(skb));
873 if (!sk1)
874 return;
875
876 rcu_read_lock();
877 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
878 if (!key)
879 goto release_sk1;
880
881 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
882 if (genhash || memcmp(hash_location, newhash, 16) != 0)
883 goto release_sk1;
884 }
885 #endif
886
887 if (th->ack)
888 seq = ntohl(th->ack_seq);
889 else
890 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
891 (th->doff << 2);
892
893 oif = sk ? sk->sk_bound_dev_if : 0;
894 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
895
896 #ifdef CONFIG_TCP_MD5SIG
897 release_sk1:
898 if (sk1) {
899 rcu_read_unlock();
900 sock_put(sk1);
901 }
902 #endif
903 }
904
905 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
906 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
907 struct tcp_md5sig_key *key, u8 tclass,
908 u32 label)
909 {
910 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
911 tclass, label);
912 }
913
914 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
915 {
916 struct inet_timewait_sock *tw = inet_twsk(sk);
917 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
918
919 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
920 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
921 tcp_time_stamp + tcptw->tw_ts_offset,
922 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
923 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
924
925 inet_twsk_put(tw);
926 }
927
928 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
929 struct request_sock *req)
930 {
931 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
932 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
933 */
934 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
935 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
936 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
937 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
938 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
939 0, 0);
940 }
941
942
943 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
944 {
945 #ifdef CONFIG_SYN_COOKIES
946 const struct tcphdr *th = tcp_hdr(skb);
947
948 if (!th->syn)
949 sk = cookie_v6_check(sk, skb);
950 #endif
951 return sk;
952 }
953
954 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
955 {
956 if (skb->protocol == htons(ETH_P_IP))
957 return tcp_v4_conn_request(sk, skb);
958
959 if (!ipv6_unicast_destination(skb))
960 goto drop;
961
962 return tcp_conn_request(&tcp6_request_sock_ops,
963 &tcp_request_sock_ipv6_ops, sk, skb);
964
965 drop:
966 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
967 return 0; /* don't send reset */
968 }
969
970 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
971 struct request_sock *req,
972 struct dst_entry *dst,
973 struct request_sock *req_unhash,
974 bool *own_req)
975 {
976 struct inet_request_sock *ireq;
977 struct ipv6_pinfo *newnp;
978 const struct ipv6_pinfo *np = inet6_sk(sk);
979 struct ipv6_txoptions *opt;
980 struct tcp6_sock *newtcp6sk;
981 struct inet_sock *newinet;
982 struct tcp_sock *newtp;
983 struct sock *newsk;
984 #ifdef CONFIG_TCP_MD5SIG
985 struct tcp_md5sig_key *key;
986 #endif
987 struct flowi6 fl6;
988
989 if (skb->protocol == htons(ETH_P_IP)) {
990 /*
991 * v6 mapped
992 */
993
994 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
995 req_unhash, own_req);
996
997 if (!newsk)
998 return NULL;
999
1000 newtcp6sk = (struct tcp6_sock *)newsk;
1001 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1002
1003 newinet = inet_sk(newsk);
1004 newnp = inet6_sk(newsk);
1005 newtp = tcp_sk(newsk);
1006
1007 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1008
1009 newnp->saddr = newsk->sk_v6_rcv_saddr;
1010
1011 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1012 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1013 #ifdef CONFIG_TCP_MD5SIG
1014 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1015 #endif
1016
1017 newnp->ipv6_ac_list = NULL;
1018 newnp->ipv6_fl_list = NULL;
1019 newnp->pktoptions = NULL;
1020 newnp->opt = NULL;
1021 newnp->mcast_oif = tcp_v6_iif(skb);
1022 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1023 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1024 if (np->repflow)
1025 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1026
1027 /*
1028 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1029 * here, tcp_create_openreq_child now does this for us, see the comment in
1030 * that function for the gory details. -acme
1031 */
1032
1033 /* It is tricky place. Until this moment IPv4 tcp
1034 worked with IPv6 icsk.icsk_af_ops.
1035 Sync it now.
1036 */
1037 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1038
1039 return newsk;
1040 }
1041
1042 ireq = inet_rsk(req);
1043
1044 if (sk_acceptq_is_full(sk))
1045 goto out_overflow;
1046
1047 if (!dst) {
1048 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1049 if (!dst)
1050 goto out;
1051 }
1052
1053 newsk = tcp_create_openreq_child(sk, req, skb);
1054 if (!newsk)
1055 goto out_nonewsk;
1056
1057 /*
1058 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1059 * count here, tcp_create_openreq_child now does this for us, see the
1060 * comment in that function for the gory details. -acme
1061 */
1062
1063 newsk->sk_gso_type = SKB_GSO_TCPV6;
1064 ip6_dst_store(newsk, dst, NULL, NULL);
1065 inet6_sk_rx_dst_set(newsk, skb);
1066
1067 newtcp6sk = (struct tcp6_sock *)newsk;
1068 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1069
1070 newtp = tcp_sk(newsk);
1071 newinet = inet_sk(newsk);
1072 newnp = inet6_sk(newsk);
1073
1074 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1075
1076 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1077 newnp->saddr = ireq->ir_v6_loc_addr;
1078 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1079 newsk->sk_bound_dev_if = ireq->ir_iif;
1080
1081 /* Now IPv6 options...
1082
1083 First: no IPv4 options.
1084 */
1085 newinet->inet_opt = NULL;
1086 newnp->ipv6_ac_list = NULL;
1087 newnp->ipv6_fl_list = NULL;
1088
1089 /* Clone RX bits */
1090 newnp->rxopt.all = np->rxopt.all;
1091
1092 newnp->pktoptions = NULL;
1093 newnp->opt = NULL;
1094 newnp->mcast_oif = tcp_v6_iif(skb);
1095 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1096 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1097 if (np->repflow)
1098 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1099
1100 /* Clone native IPv6 options from listening socket (if any)
1101
1102 Yes, keeping reference count would be much more clever,
1103 but we make one more one thing there: reattach optmem
1104 to newsk.
1105 */
1106 opt = rcu_dereference(np->opt);
1107 if (opt) {
1108 opt = ipv6_dup_options(newsk, opt);
1109 RCU_INIT_POINTER(newnp->opt, opt);
1110 }
1111 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1112 if (opt)
1113 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1114 opt->opt_flen;
1115
1116 tcp_ca_openreq_child(newsk, dst);
1117
1118 tcp_sync_mss(newsk, dst_mtu(dst));
1119 newtp->advmss = dst_metric_advmss(dst);
1120 if (tcp_sk(sk)->rx_opt.user_mss &&
1121 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1122 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1123
1124 tcp_initialize_rcv_mss(newsk);
1125
1126 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1127 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1128
1129 #ifdef CONFIG_TCP_MD5SIG
1130 /* Copy over the MD5 key from the original socket */
1131 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1132 if (key) {
1133 /* We're using one, so create a matching key
1134 * on the newsk structure. If we fail to get
1135 * memory, then we end up not copying the key
1136 * across. Shucks.
1137 */
1138 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1139 AF_INET6, key->key, key->keylen,
1140 sk_gfp_mask(sk, GFP_ATOMIC));
1141 }
1142 #endif
1143
1144 if (__inet_inherit_port(sk, newsk) < 0) {
1145 inet_csk_prepare_forced_close(newsk);
1146 tcp_done(newsk);
1147 goto out;
1148 }
1149 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1150 if (*own_req) {
1151 tcp_move_syn(newtp, req);
1152
1153 /* Clone pktoptions received with SYN, if we own the req */
1154 if (ireq->pktopts) {
1155 newnp->pktoptions = skb_clone(ireq->pktopts,
1156 sk_gfp_mask(sk, GFP_ATOMIC));
1157 consume_skb(ireq->pktopts);
1158 ireq->pktopts = NULL;
1159 if (newnp->pktoptions)
1160 skb_set_owner_r(newnp->pktoptions, newsk);
1161 }
1162 }
1163
1164 return newsk;
1165
1166 out_overflow:
1167 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1168 out_nonewsk:
1169 dst_release(dst);
1170 out:
1171 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1172 return NULL;
1173 }
1174
1175 /* The socket must have it's spinlock held when we get
1176 * here, unless it is a TCP_LISTEN socket.
1177 *
1178 * We have a potential double-lock case here, so even when
1179 * doing backlog processing we use the BH locking scheme.
1180 * This is because we cannot sleep with the original spinlock
1181 * held.
1182 */
1183 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1184 {
1185 struct ipv6_pinfo *np = inet6_sk(sk);
1186 struct tcp_sock *tp;
1187 struct sk_buff *opt_skb = NULL;
1188
1189 /* Imagine: socket is IPv6. IPv4 packet arrives,
1190 goes to IPv4 receive handler and backlogged.
1191 From backlog it always goes here. Kerboom...
1192 Fortunately, tcp_rcv_established and rcv_established
1193 handle them correctly, but it is not case with
1194 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1195 */
1196
1197 if (skb->protocol == htons(ETH_P_IP))
1198 return tcp_v4_do_rcv(sk, skb);
1199
1200 if (sk_filter(sk, skb))
1201 goto discard;
1202
1203 /*
1204 * socket locking is here for SMP purposes as backlog rcv
1205 * is currently called with bh processing disabled.
1206 */
1207
1208 /* Do Stevens' IPV6_PKTOPTIONS.
1209
1210 Yes, guys, it is the only place in our code, where we
1211 may make it not affecting IPv4.
1212 The rest of code is protocol independent,
1213 and I do not like idea to uglify IPv4.
1214
1215 Actually, all the idea behind IPV6_PKTOPTIONS
1216 looks not very well thought. For now we latch
1217 options, received in the last packet, enqueued
1218 by tcp. Feel free to propose better solution.
1219 --ANK (980728)
1220 */
1221 if (np->rxopt.all)
1222 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1223
1224 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1225 struct dst_entry *dst = sk->sk_rx_dst;
1226
1227 sock_rps_save_rxhash(sk, skb);
1228 sk_mark_napi_id(sk, skb);
1229 if (dst) {
1230 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1231 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1232 dst_release(dst);
1233 sk->sk_rx_dst = NULL;
1234 }
1235 }
1236
1237 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1238 if (opt_skb)
1239 goto ipv6_pktoptions;
1240 return 0;
1241 }
1242
1243 if (tcp_checksum_complete(skb))
1244 goto csum_err;
1245
1246 if (sk->sk_state == TCP_LISTEN) {
1247 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1248
1249 if (!nsk)
1250 goto discard;
1251
1252 if (nsk != sk) {
1253 sock_rps_save_rxhash(nsk, skb);
1254 sk_mark_napi_id(nsk, skb);
1255 if (tcp_child_process(sk, nsk, skb))
1256 goto reset;
1257 if (opt_skb)
1258 __kfree_skb(opt_skb);
1259 return 0;
1260 }
1261 } else
1262 sock_rps_save_rxhash(sk, skb);
1263
1264 if (tcp_rcv_state_process(sk, skb))
1265 goto reset;
1266 if (opt_skb)
1267 goto ipv6_pktoptions;
1268 return 0;
1269
1270 reset:
1271 tcp_v6_send_reset(sk, skb);
1272 discard:
1273 if (opt_skb)
1274 __kfree_skb(opt_skb);
1275 kfree_skb(skb);
1276 return 0;
1277 csum_err:
1278 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1279 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1280 goto discard;
1281
1282
1283 ipv6_pktoptions:
1284 /* Do you ask, what is it?
1285
1286 1. skb was enqueued by tcp.
1287 2. skb is added to tail of read queue, rather than out of order.
1288 3. socket is not in passive state.
1289 4. Finally, it really contains options, which user wants to receive.
1290 */
1291 tp = tcp_sk(sk);
1292 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1293 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1294 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1295 np->mcast_oif = tcp_v6_iif(opt_skb);
1296 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1297 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1298 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1299 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1300 if (np->repflow)
1301 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1302 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1303 skb_set_owner_r(opt_skb, sk);
1304 opt_skb = xchg(&np->pktoptions, opt_skb);
1305 } else {
1306 __kfree_skb(opt_skb);
1307 opt_skb = xchg(&np->pktoptions, NULL);
1308 }
1309 }
1310
1311 kfree_skb(opt_skb);
1312 return 0;
1313 }
1314
1315 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1316 const struct tcphdr *th)
1317 {
1318 /* This is tricky: we move IP6CB at its correct location into
1319 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1320 * _decode_session6() uses IP6CB().
1321 * barrier() makes sure compiler won't play aliasing games.
1322 */
1323 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1324 sizeof(struct inet6_skb_parm));
1325 barrier();
1326
1327 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1328 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1329 skb->len - th->doff*4);
1330 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1331 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1332 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1333 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1334 TCP_SKB_CB(skb)->sacked = 0;
1335 }
1336
1337 static void tcp_v6_restore_cb(struct sk_buff *skb)
1338 {
1339 /* We need to move header back to the beginning if xfrm6_policy_check()
1340 * and tcp_v6_fill_cb() are going to be called again.
1341 */
1342 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1343 sizeof(struct inet6_skb_parm));
1344 }
1345
1346 static int tcp_v6_rcv(struct sk_buff *skb)
1347 {
1348 const struct tcphdr *th;
1349 const struct ipv6hdr *hdr;
1350 struct sock *sk;
1351 int ret;
1352 struct net *net = dev_net(skb->dev);
1353
1354 if (skb->pkt_type != PACKET_HOST)
1355 goto discard_it;
1356
1357 /*
1358 * Count it even if it's bad.
1359 */
1360 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1361
1362 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1363 goto discard_it;
1364
1365 th = tcp_hdr(skb);
1366
1367 if (th->doff < sizeof(struct tcphdr)/4)
1368 goto bad_packet;
1369 if (!pskb_may_pull(skb, th->doff*4))
1370 goto discard_it;
1371
1372 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1373 goto csum_error;
1374
1375 th = tcp_hdr(skb);
1376 hdr = ipv6_hdr(skb);
1377
1378 lookup:
1379 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1380 inet6_iif(skb));
1381 if (!sk)
1382 goto no_tcp_socket;
1383
1384 process:
1385 if (sk->sk_state == TCP_TIME_WAIT)
1386 goto do_time_wait;
1387
1388 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1389 struct request_sock *req = inet_reqsk(sk);
1390 struct sock *nsk = NULL;
1391
1392 sk = req->rsk_listener;
1393 tcp_v6_fill_cb(skb, hdr, th);
1394 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1395 reqsk_put(req);
1396 goto discard_it;
1397 }
1398 if (likely(sk->sk_state == TCP_LISTEN)) {
1399 nsk = tcp_check_req(sk, skb, req, false);
1400 } else {
1401 inet_csk_reqsk_queue_drop_and_put(sk, req);
1402 goto lookup;
1403 }
1404 if (!nsk) {
1405 reqsk_put(req);
1406 goto discard_it;
1407 }
1408 if (nsk == sk) {
1409 sock_hold(sk);
1410 reqsk_put(req);
1411 tcp_v6_restore_cb(skb);
1412 } else if (tcp_child_process(sk, nsk, skb)) {
1413 tcp_v6_send_reset(nsk, skb);
1414 goto discard_it;
1415 } else {
1416 return 0;
1417 }
1418 }
1419 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1420 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1421 goto discard_and_relse;
1422 }
1423
1424 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1425 goto discard_and_relse;
1426
1427 tcp_v6_fill_cb(skb, hdr, th);
1428
1429 if (tcp_v6_inbound_md5_hash(sk, skb))
1430 goto discard_and_relse;
1431
1432 if (sk_filter(sk, skb))
1433 goto discard_and_relse;
1434
1435 skb->dev = NULL;
1436
1437 if (sk->sk_state == TCP_LISTEN) {
1438 ret = tcp_v6_do_rcv(sk, skb);
1439 goto put_and_return;
1440 }
1441
1442 sk_incoming_cpu_update(sk);
1443
1444 bh_lock_sock_nested(sk);
1445 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1446 ret = 0;
1447 if (!sock_owned_by_user(sk)) {
1448 if (!tcp_prequeue(sk, skb))
1449 ret = tcp_v6_do_rcv(sk, skb);
1450 } else if (unlikely(sk_add_backlog(sk, skb,
1451 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1452 bh_unlock_sock(sk);
1453 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1454 goto discard_and_relse;
1455 }
1456 bh_unlock_sock(sk);
1457
1458 put_and_return:
1459 sock_put(sk);
1460 return ret ? -1 : 0;
1461
1462 no_tcp_socket:
1463 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1464 goto discard_it;
1465
1466 tcp_v6_fill_cb(skb, hdr, th);
1467
1468 if (tcp_checksum_complete(skb)) {
1469 csum_error:
1470 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1471 bad_packet:
1472 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1473 } else {
1474 tcp_v6_send_reset(NULL, skb);
1475 }
1476
1477 discard_it:
1478 kfree_skb(skb);
1479 return 0;
1480
1481 discard_and_relse:
1482 sock_put(sk);
1483 goto discard_it;
1484
1485 do_time_wait:
1486 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1487 inet_twsk_put(inet_twsk(sk));
1488 goto discard_it;
1489 }
1490
1491 tcp_v6_fill_cb(skb, hdr, th);
1492
1493 if (tcp_checksum_complete(skb)) {
1494 inet_twsk_put(inet_twsk(sk));
1495 goto csum_error;
1496 }
1497
1498 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1499 case TCP_TW_SYN:
1500 {
1501 struct sock *sk2;
1502
1503 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1504 &ipv6_hdr(skb)->saddr, th->source,
1505 &ipv6_hdr(skb)->daddr,
1506 ntohs(th->dest), tcp_v6_iif(skb));
1507 if (sk2) {
1508 struct inet_timewait_sock *tw = inet_twsk(sk);
1509 inet_twsk_deschedule_put(tw);
1510 sk = sk2;
1511 tcp_v6_restore_cb(skb);
1512 goto process;
1513 }
1514 /* Fall through to ACK */
1515 }
1516 case TCP_TW_ACK:
1517 tcp_v6_timewait_ack(sk, skb);
1518 break;
1519 case TCP_TW_RST:
1520 tcp_v6_restore_cb(skb);
1521 tcp_v6_send_reset(sk, skb);
1522 inet_twsk_deschedule_put(inet_twsk(sk));
1523 goto discard_it;
1524 case TCP_TW_SUCCESS:
1525 ;
1526 }
1527 goto discard_it;
1528 }
1529
1530 static void tcp_v6_early_demux(struct sk_buff *skb)
1531 {
1532 const struct ipv6hdr *hdr;
1533 const struct tcphdr *th;
1534 struct sock *sk;
1535
1536 if (skb->pkt_type != PACKET_HOST)
1537 return;
1538
1539 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1540 return;
1541
1542 hdr = ipv6_hdr(skb);
1543 th = tcp_hdr(skb);
1544
1545 if (th->doff < sizeof(struct tcphdr) / 4)
1546 return;
1547
1548 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1549 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1550 &hdr->saddr, th->source,
1551 &hdr->daddr, ntohs(th->dest),
1552 inet6_iif(skb));
1553 if (sk) {
1554 skb->sk = sk;
1555 skb->destructor = sock_edemux;
1556 if (sk_fullsock(sk)) {
1557 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1558
1559 if (dst)
1560 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1561 if (dst &&
1562 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1563 skb_dst_set_noref(skb, dst);
1564 }
1565 }
1566 }
1567
1568 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1569 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1570 .twsk_unique = tcp_twsk_unique,
1571 .twsk_destructor = tcp_twsk_destructor,
1572 };
1573
1574 static const struct inet_connection_sock_af_ops ipv6_specific = {
1575 .queue_xmit = inet6_csk_xmit,
1576 .send_check = tcp_v6_send_check,
1577 .rebuild_header = inet6_sk_rebuild_header,
1578 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1579 .conn_request = tcp_v6_conn_request,
1580 .syn_recv_sock = tcp_v6_syn_recv_sock,
1581 .net_header_len = sizeof(struct ipv6hdr),
1582 .net_frag_header_len = sizeof(struct frag_hdr),
1583 .setsockopt = ipv6_setsockopt,
1584 .getsockopt = ipv6_getsockopt,
1585 .addr2sockaddr = inet6_csk_addr2sockaddr,
1586 .sockaddr_len = sizeof(struct sockaddr_in6),
1587 .bind_conflict = inet6_csk_bind_conflict,
1588 #ifdef CONFIG_COMPAT
1589 .compat_setsockopt = compat_ipv6_setsockopt,
1590 .compat_getsockopt = compat_ipv6_getsockopt,
1591 #endif
1592 .mtu_reduced = tcp_v6_mtu_reduced,
1593 };
1594
1595 #ifdef CONFIG_TCP_MD5SIG
1596 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1597 .md5_lookup = tcp_v6_md5_lookup,
1598 .calc_md5_hash = tcp_v6_md5_hash_skb,
1599 .md5_parse = tcp_v6_parse_md5_keys,
1600 };
1601 #endif
1602
1603 /*
1604 * TCP over IPv4 via INET6 API
1605 */
1606 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1607 .queue_xmit = ip_queue_xmit,
1608 .send_check = tcp_v4_send_check,
1609 .rebuild_header = inet_sk_rebuild_header,
1610 .sk_rx_dst_set = inet_sk_rx_dst_set,
1611 .conn_request = tcp_v6_conn_request,
1612 .syn_recv_sock = tcp_v6_syn_recv_sock,
1613 .net_header_len = sizeof(struct iphdr),
1614 .setsockopt = ipv6_setsockopt,
1615 .getsockopt = ipv6_getsockopt,
1616 .addr2sockaddr = inet6_csk_addr2sockaddr,
1617 .sockaddr_len = sizeof(struct sockaddr_in6),
1618 .bind_conflict = inet6_csk_bind_conflict,
1619 #ifdef CONFIG_COMPAT
1620 .compat_setsockopt = compat_ipv6_setsockopt,
1621 .compat_getsockopt = compat_ipv6_getsockopt,
1622 #endif
1623 .mtu_reduced = tcp_v4_mtu_reduced,
1624 };
1625
1626 #ifdef CONFIG_TCP_MD5SIG
1627 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1628 .md5_lookup = tcp_v4_md5_lookup,
1629 .calc_md5_hash = tcp_v4_md5_hash_skb,
1630 .md5_parse = tcp_v6_parse_md5_keys,
1631 };
1632 #endif
1633
1634 /* NOTE: A lot of things set to zero explicitly by call to
1635 * sk_alloc() so need not be done here.
1636 */
1637 static int tcp_v6_init_sock(struct sock *sk)
1638 {
1639 struct inet_connection_sock *icsk = inet_csk(sk);
1640
1641 tcp_init_sock(sk);
1642
1643 icsk->icsk_af_ops = &ipv6_specific;
1644
1645 #ifdef CONFIG_TCP_MD5SIG
1646 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1647 #endif
1648
1649 return 0;
1650 }
1651
1652 static void tcp_v6_destroy_sock(struct sock *sk)
1653 {
1654 tcp_v4_destroy_sock(sk);
1655 inet6_destroy_sock(sk);
1656 }
1657
1658 #ifdef CONFIG_PROC_FS
1659 /* Proc filesystem TCPv6 sock list dumping. */
1660 static void get_openreq6(struct seq_file *seq,
1661 const struct request_sock *req, int i)
1662 {
1663 long ttd = req->rsk_timer.expires - jiffies;
1664 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1665 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1666
1667 if (ttd < 0)
1668 ttd = 0;
1669
1670 seq_printf(seq,
1671 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1672 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1673 i,
1674 src->s6_addr32[0], src->s6_addr32[1],
1675 src->s6_addr32[2], src->s6_addr32[3],
1676 inet_rsk(req)->ir_num,
1677 dest->s6_addr32[0], dest->s6_addr32[1],
1678 dest->s6_addr32[2], dest->s6_addr32[3],
1679 ntohs(inet_rsk(req)->ir_rmt_port),
1680 TCP_SYN_RECV,
1681 0, 0, /* could print option size, but that is af dependent. */
1682 1, /* timers active (only the expire timer) */
1683 jiffies_to_clock_t(ttd),
1684 req->num_timeout,
1685 from_kuid_munged(seq_user_ns(seq),
1686 sock_i_uid(req->rsk_listener)),
1687 0, /* non standard timer */
1688 0, /* open_requests have no inode */
1689 0, req);
1690 }
1691
1692 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1693 {
1694 const struct in6_addr *dest, *src;
1695 __u16 destp, srcp;
1696 int timer_active;
1697 unsigned long timer_expires;
1698 const struct inet_sock *inet = inet_sk(sp);
1699 const struct tcp_sock *tp = tcp_sk(sp);
1700 const struct inet_connection_sock *icsk = inet_csk(sp);
1701 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1702 int rx_queue;
1703 int state;
1704
1705 dest = &sp->sk_v6_daddr;
1706 src = &sp->sk_v6_rcv_saddr;
1707 destp = ntohs(inet->inet_dport);
1708 srcp = ntohs(inet->inet_sport);
1709
1710 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1711 timer_active = 1;
1712 timer_expires = icsk->icsk_timeout;
1713 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1714 timer_active = 4;
1715 timer_expires = icsk->icsk_timeout;
1716 } else if (timer_pending(&sp->sk_timer)) {
1717 timer_active = 2;
1718 timer_expires = sp->sk_timer.expires;
1719 } else {
1720 timer_active = 0;
1721 timer_expires = jiffies;
1722 }
1723
1724 state = sk_state_load(sp);
1725 if (state == TCP_LISTEN)
1726 rx_queue = sp->sk_ack_backlog;
1727 else
1728 /* Because we don't lock the socket,
1729 * we might find a transient negative value.
1730 */
1731 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1732
1733 seq_printf(seq,
1734 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1735 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1736 i,
1737 src->s6_addr32[0], src->s6_addr32[1],
1738 src->s6_addr32[2], src->s6_addr32[3], srcp,
1739 dest->s6_addr32[0], dest->s6_addr32[1],
1740 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1741 state,
1742 tp->write_seq - tp->snd_una,
1743 rx_queue,
1744 timer_active,
1745 jiffies_delta_to_clock_t(timer_expires - jiffies),
1746 icsk->icsk_retransmits,
1747 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1748 icsk->icsk_probes_out,
1749 sock_i_ino(sp),
1750 atomic_read(&sp->sk_refcnt), sp,
1751 jiffies_to_clock_t(icsk->icsk_rto),
1752 jiffies_to_clock_t(icsk->icsk_ack.ato),
1753 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1754 tp->snd_cwnd,
1755 state == TCP_LISTEN ?
1756 fastopenq->max_qlen :
1757 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1758 );
1759 }
1760
1761 static void get_timewait6_sock(struct seq_file *seq,
1762 struct inet_timewait_sock *tw, int i)
1763 {
1764 long delta = tw->tw_timer.expires - jiffies;
1765 const struct in6_addr *dest, *src;
1766 __u16 destp, srcp;
1767
1768 dest = &tw->tw_v6_daddr;
1769 src = &tw->tw_v6_rcv_saddr;
1770 destp = ntohs(tw->tw_dport);
1771 srcp = ntohs(tw->tw_sport);
1772
1773 seq_printf(seq,
1774 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1775 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1776 i,
1777 src->s6_addr32[0], src->s6_addr32[1],
1778 src->s6_addr32[2], src->s6_addr32[3], srcp,
1779 dest->s6_addr32[0], dest->s6_addr32[1],
1780 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1781 tw->tw_substate, 0, 0,
1782 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1783 atomic_read(&tw->tw_refcnt), tw);
1784 }
1785
1786 static int tcp6_seq_show(struct seq_file *seq, void *v)
1787 {
1788 struct tcp_iter_state *st;
1789 struct sock *sk = v;
1790
1791 if (v == SEQ_START_TOKEN) {
1792 seq_puts(seq,
1793 " sl "
1794 "local_address "
1795 "remote_address "
1796 "st tx_queue rx_queue tr tm->when retrnsmt"
1797 " uid timeout inode\n");
1798 goto out;
1799 }
1800 st = seq->private;
1801
1802 if (sk->sk_state == TCP_TIME_WAIT)
1803 get_timewait6_sock(seq, v, st->num);
1804 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1805 get_openreq6(seq, v, st->num);
1806 else
1807 get_tcp6_sock(seq, v, st->num);
1808 out:
1809 return 0;
1810 }
1811
1812 static const struct file_operations tcp6_afinfo_seq_fops = {
1813 .owner = THIS_MODULE,
1814 .open = tcp_seq_open,
1815 .read = seq_read,
1816 .llseek = seq_lseek,
1817 .release = seq_release_net
1818 };
1819
1820 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1821 .name = "tcp6",
1822 .family = AF_INET6,
1823 .seq_fops = &tcp6_afinfo_seq_fops,
1824 .seq_ops = {
1825 .show = tcp6_seq_show,
1826 },
1827 };
1828
1829 int __net_init tcp6_proc_init(struct net *net)
1830 {
1831 return tcp_proc_register(net, &tcp6_seq_afinfo);
1832 }
1833
1834 void tcp6_proc_exit(struct net *net)
1835 {
1836 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1837 }
1838 #endif
1839
1840 static void tcp_v6_clear_sk(struct sock *sk, int size)
1841 {
1842 struct inet_sock *inet = inet_sk(sk);
1843
1844 /* we do not want to clear pinet6 field, because of RCU lookups */
1845 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1846
1847 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1848 memset(&inet->pinet6 + 1, 0, size);
1849 }
1850
1851 struct proto tcpv6_prot = {
1852 .name = "TCPv6",
1853 .owner = THIS_MODULE,
1854 .close = tcp_close,
1855 .connect = tcp_v6_connect,
1856 .disconnect = tcp_disconnect,
1857 .accept = inet_csk_accept,
1858 .ioctl = tcp_ioctl,
1859 .init = tcp_v6_init_sock,
1860 .destroy = tcp_v6_destroy_sock,
1861 .shutdown = tcp_shutdown,
1862 .setsockopt = tcp_setsockopt,
1863 .getsockopt = tcp_getsockopt,
1864 .recvmsg = tcp_recvmsg,
1865 .sendmsg = tcp_sendmsg,
1866 .sendpage = tcp_sendpage,
1867 .backlog_rcv = tcp_v6_do_rcv,
1868 .release_cb = tcp_release_cb,
1869 .hash = inet_hash,
1870 .unhash = inet_unhash,
1871 .get_port = inet_csk_get_port,
1872 .enter_memory_pressure = tcp_enter_memory_pressure,
1873 .stream_memory_free = tcp_stream_memory_free,
1874 .sockets_allocated = &tcp_sockets_allocated,
1875 .memory_allocated = &tcp_memory_allocated,
1876 .memory_pressure = &tcp_memory_pressure,
1877 .orphan_count = &tcp_orphan_count,
1878 .sysctl_mem = sysctl_tcp_mem,
1879 .sysctl_wmem = sysctl_tcp_wmem,
1880 .sysctl_rmem = sysctl_tcp_rmem,
1881 .max_header = MAX_TCP_HEADER,
1882 .obj_size = sizeof(struct tcp6_sock),
1883 .slab_flags = SLAB_DESTROY_BY_RCU,
1884 .twsk_prot = &tcp6_timewait_sock_ops,
1885 .rsk_prot = &tcp6_request_sock_ops,
1886 .h.hashinfo = &tcp_hashinfo,
1887 .no_autobind = true,
1888 #ifdef CONFIG_COMPAT
1889 .compat_setsockopt = compat_tcp_setsockopt,
1890 .compat_getsockopt = compat_tcp_getsockopt,
1891 #endif
1892 #ifdef CONFIG_MEMCG_KMEM
1893 .proto_cgroup = tcp_proto_cgroup,
1894 #endif
1895 .clear_sk = tcp_v6_clear_sk,
1896 .diag_destroy = tcp_abort,
1897 };
1898
1899 static const struct inet6_protocol tcpv6_protocol = {
1900 .early_demux = tcp_v6_early_demux,
1901 .handler = tcp_v6_rcv,
1902 .err_handler = tcp_v6_err,
1903 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1904 };
1905
1906 static struct inet_protosw tcpv6_protosw = {
1907 .type = SOCK_STREAM,
1908 .protocol = IPPROTO_TCP,
1909 .prot = &tcpv6_prot,
1910 .ops = &inet6_stream_ops,
1911 .flags = INET_PROTOSW_PERMANENT |
1912 INET_PROTOSW_ICSK,
1913 };
1914
1915 static int __net_init tcpv6_net_init(struct net *net)
1916 {
1917 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1918 SOCK_RAW, IPPROTO_TCP, net);
1919 }
1920
1921 static void __net_exit tcpv6_net_exit(struct net *net)
1922 {
1923 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1924 }
1925
1926 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1927 {
1928 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1929 }
1930
1931 static struct pernet_operations tcpv6_net_ops = {
1932 .init = tcpv6_net_init,
1933 .exit = tcpv6_net_exit,
1934 .exit_batch = tcpv6_net_exit_batch,
1935 };
1936
1937 int __init tcpv6_init(void)
1938 {
1939 int ret;
1940
1941 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1942 if (ret)
1943 goto out;
1944
1945 /* register inet6 protocol */
1946 ret = inet6_register_protosw(&tcpv6_protosw);
1947 if (ret)
1948 goto out_tcpv6_protocol;
1949
1950 ret = register_pernet_subsys(&tcpv6_net_ops);
1951 if (ret)
1952 goto out_tcpv6_protosw;
1953 out:
1954 return ret;
1955
1956 out_tcpv6_protosw:
1957 inet6_unregister_protosw(&tcpv6_protosw);
1958 out_tcpv6_protocol:
1959 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1960 goto out;
1961 }
1962
1963 void tcpv6_exit(void)
1964 {
1965 unregister_pernet_subsys(&tcpv6_net_ops);
1966 inet6_unregister_protosw(&tcpv6_protosw);
1967 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1968 }