]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/tcp_ipv6.c
xfrm: policy: remove garbage_collect callback
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86 {
87 return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102 }
103
104 static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
105 {
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source, tsoff);
110 }
111
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 int addr_len)
114 {
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
122 struct flowi6 fl6;
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
127
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
130
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
133
134 memset(&fl6, 0, sizeof(fl6));
135
136 if (np->sndflow) {
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 if (!flowlabel)
143 return -EINVAL;
144 fl6_sock_release(flowlabel);
145 }
146 }
147
148 /*
149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */
151
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
154
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
156
157 if (addr_type & IPV6_ADDR_MULTICAST)
158 return -ENETUNREACH;
159
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
164 * must coincide.
165 */
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
169
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
171 }
172
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
176 }
177
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
183 }
184
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
187
188 /*
189 * TCP over IPv4
190 */
191
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
195
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
200
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
210
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213 if (err) {
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220 goto failure;
221 }
222 np->saddr = sk->sk_v6_rcv_saddr;
223
224 return err;
225 }
226
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
229
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
237 fl6.flowi6_uid = sk->sk_uid;
238
239 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
240 final_p = fl6_update_dst(&fl6, opt, &final);
241
242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
243
244 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
245 if (IS_ERR(dst)) {
246 err = PTR_ERR(dst);
247 goto failure;
248 }
249
250 if (!saddr) {
251 saddr = &fl6.saddr;
252 sk->sk_v6_rcv_saddr = *saddr;
253 }
254
255 /* set the source address */
256 np->saddr = *saddr;
257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
258
259 sk->sk_gso_type = SKB_GSO_TCPV6;
260 ip6_dst_store(sk, dst, NULL, NULL);
261
262 if (tcp_death_row->sysctl_tw_recycle &&
263 !tp->rx_opt.ts_recent_stamp &&
264 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
265 tcp_fetch_timewait_stamp(sk, dst);
266
267 icsk->icsk_ext_hdr_len = 0;
268 if (opt)
269 icsk->icsk_ext_hdr_len = opt->opt_flen +
270 opt->opt_nflen;
271
272 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
273
274 inet->inet_dport = usin->sin6_port;
275
276 tcp_set_state(sk, TCP_SYN_SENT);
277 err = inet6_hash_connect(tcp_death_row, sk);
278 if (err)
279 goto late_failure;
280
281 sk_set_txhash(sk);
282
283 if (!tp->write_seq && likely(!tp->repair))
284 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
285 sk->sk_v6_daddr.s6_addr32,
286 inet->inet_sport,
287 inet->inet_dport,
288 &tp->tsoffset);
289
290 if (tcp_fastopen_defer_connect(sk, &err))
291 return err;
292 if (err)
293 goto late_failure;
294
295 err = tcp_connect(sk);
296 if (err)
297 goto late_failure;
298
299 return 0;
300
301 late_failure:
302 tcp_set_state(sk, TCP_CLOSE);
303 failure:
304 inet->inet_dport = 0;
305 sk->sk_route_caps = 0;
306 return err;
307 }
308
309 static void tcp_v6_mtu_reduced(struct sock *sk)
310 {
311 struct dst_entry *dst;
312
313 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
314 return;
315
316 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
317 if (!dst)
318 return;
319
320 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
321 tcp_sync_mss(sk, dst_mtu(dst));
322 tcp_simple_retransmit(sk);
323 }
324 }
325
326 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
327 u8 type, u8 code, int offset, __be32 info)
328 {
329 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
330 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
331 struct net *net = dev_net(skb->dev);
332 struct request_sock *fastopen;
333 struct ipv6_pinfo *np;
334 struct tcp_sock *tp;
335 __u32 seq, snd_una;
336 struct sock *sk;
337 bool fatal;
338 int err;
339
340 sk = __inet6_lookup_established(net, &tcp_hashinfo,
341 &hdr->daddr, th->dest,
342 &hdr->saddr, ntohs(th->source),
343 skb->dev->ifindex);
344
345 if (!sk) {
346 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
347 ICMP6_MIB_INERRORS);
348 return;
349 }
350
351 if (sk->sk_state == TCP_TIME_WAIT) {
352 inet_twsk_put(inet_twsk(sk));
353 return;
354 }
355 seq = ntohl(th->seq);
356 fatal = icmpv6_err_convert(type, code, &err);
357 if (sk->sk_state == TCP_NEW_SYN_RECV)
358 return tcp_req_err(sk, seq, fatal);
359
360 bh_lock_sock(sk);
361 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
362 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
363
364 if (sk->sk_state == TCP_CLOSE)
365 goto out;
366
367 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
368 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
369 goto out;
370 }
371
372 tp = tcp_sk(sk);
373 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
374 fastopen = tp->fastopen_rsk;
375 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, snd_una, tp->snd_nxt)) {
378 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 goto out;
380 }
381
382 np = inet6_sk(sk);
383
384 if (type == NDISC_REDIRECT) {
385 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
386
387 if (dst)
388 dst->ops->redirect(dst, sk, skb);
389 goto out;
390 }
391
392 if (type == ICMPV6_PKT_TOOBIG) {
393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
396 */
397 if (sk->sk_state == TCP_LISTEN)
398 goto out;
399
400 if (!ip6_sk_accept_pmtu(sk))
401 goto out;
402
403 tp->mtu_info = ntohl(info);
404 if (!sock_owned_by_user(sk))
405 tcp_v6_mtu_reduced(sk);
406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
407 &sk->sk_tsq_flags))
408 sock_hold(sk);
409 goto out;
410 }
411
412
413 /* Might be for an request_sock */
414 switch (sk->sk_state) {
415 case TCP_SYN_SENT:
416 case TCP_SYN_RECV:
417 /* Only in fast or simultaneous open. If a fast open socket is
418 * is already accepted it is treated as a connected one below.
419 */
420 if (fastopen && !fastopen->sk)
421 break;
422
423 if (!sock_owned_by_user(sk)) {
424 sk->sk_err = err;
425 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
426
427 tcp_done(sk);
428 } else
429 sk->sk_err_soft = err;
430 goto out;
431 }
432
433 if (!sock_owned_by_user(sk) && np->recverr) {
434 sk->sk_err = err;
435 sk->sk_error_report(sk);
436 } else
437 sk->sk_err_soft = err;
438
439 out:
440 bh_unlock_sock(sk);
441 sock_put(sk);
442 }
443
444
445 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
446 struct flowi *fl,
447 struct request_sock *req,
448 struct tcp_fastopen_cookie *foc,
449 enum tcp_synack_type synack_type)
450 {
451 struct inet_request_sock *ireq = inet_rsk(req);
452 struct ipv6_pinfo *np = inet6_sk(sk);
453 struct ipv6_txoptions *opt;
454 struct flowi6 *fl6 = &fl->u.ip6;
455 struct sk_buff *skb;
456 int err = -ENOMEM;
457
458 /* First, grab a route. */
459 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
460 IPPROTO_TCP)) == NULL)
461 goto done;
462
463 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
464
465 if (skb) {
466 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
467 &ireq->ir_v6_rmt_addr);
468
469 fl6->daddr = ireq->ir_v6_rmt_addr;
470 if (np->repflow && ireq->pktopts)
471 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
472
473 rcu_read_lock();
474 opt = ireq->ipv6_opt;
475 if (!opt)
476 opt = rcu_dereference(np->opt);
477 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
478 rcu_read_unlock();
479 err = net_xmit_eval(err);
480 }
481
482 done:
483 return err;
484 }
485
486
487 static void tcp_v6_reqsk_destructor(struct request_sock *req)
488 {
489 kfree(inet_rsk(req)->ipv6_opt);
490 kfree_skb(inet_rsk(req)->pktopts);
491 }
492
493 #ifdef CONFIG_TCP_MD5SIG
494 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
495 const struct in6_addr *addr)
496 {
497 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
498 }
499
500 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
501 const struct sock *addr_sk)
502 {
503 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
504 }
505
506 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
507 int optlen)
508 {
509 struct tcp_md5sig cmd;
510 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
511
512 if (optlen < sizeof(cmd))
513 return -EINVAL;
514
515 if (copy_from_user(&cmd, optval, sizeof(cmd)))
516 return -EFAULT;
517
518 if (sin6->sin6_family != AF_INET6)
519 return -EINVAL;
520
521 if (!cmd.tcpm_keylen) {
522 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
523 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
524 AF_INET);
525 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6);
527 }
528
529 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
530 return -EINVAL;
531
532 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
533 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
534 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
535
536 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
538 }
539
540 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
541 const struct in6_addr *daddr,
542 const struct in6_addr *saddr,
543 const struct tcphdr *th, int nbytes)
544 {
545 struct tcp6_pseudohdr *bp;
546 struct scatterlist sg;
547 struct tcphdr *_th;
548
549 bp = hp->scratch;
550 /* 1. TCP pseudo-header (RFC2460) */
551 bp->saddr = *saddr;
552 bp->daddr = *daddr;
553 bp->protocol = cpu_to_be32(IPPROTO_TCP);
554 bp->len = cpu_to_be32(nbytes);
555
556 _th = (struct tcphdr *)(bp + 1);
557 memcpy(_th, th, sizeof(*th));
558 _th->check = 0;
559
560 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
561 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
562 sizeof(*bp) + sizeof(*th));
563 return crypto_ahash_update(hp->md5_req);
564 }
565
566 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
567 const struct in6_addr *daddr, struct in6_addr *saddr,
568 const struct tcphdr *th)
569 {
570 struct tcp_md5sig_pool *hp;
571 struct ahash_request *req;
572
573 hp = tcp_get_md5sig_pool();
574 if (!hp)
575 goto clear_hash_noput;
576 req = hp->md5_req;
577
578 if (crypto_ahash_init(req))
579 goto clear_hash;
580 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
581 goto clear_hash;
582 if (tcp_md5_hash_key(hp, key))
583 goto clear_hash;
584 ahash_request_set_crypt(req, NULL, md5_hash, 0);
585 if (crypto_ahash_final(req))
586 goto clear_hash;
587
588 tcp_put_md5sig_pool();
589 return 0;
590
591 clear_hash:
592 tcp_put_md5sig_pool();
593 clear_hash_noput:
594 memset(md5_hash, 0, 16);
595 return 1;
596 }
597
598 static int tcp_v6_md5_hash_skb(char *md5_hash,
599 const struct tcp_md5sig_key *key,
600 const struct sock *sk,
601 const struct sk_buff *skb)
602 {
603 const struct in6_addr *saddr, *daddr;
604 struct tcp_md5sig_pool *hp;
605 struct ahash_request *req;
606 const struct tcphdr *th = tcp_hdr(skb);
607
608 if (sk) { /* valid for establish/request sockets */
609 saddr = &sk->sk_v6_rcv_saddr;
610 daddr = &sk->sk_v6_daddr;
611 } else {
612 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
613 saddr = &ip6h->saddr;
614 daddr = &ip6h->daddr;
615 }
616
617 hp = tcp_get_md5sig_pool();
618 if (!hp)
619 goto clear_hash_noput;
620 req = hp->md5_req;
621
622 if (crypto_ahash_init(req))
623 goto clear_hash;
624
625 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
626 goto clear_hash;
627 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
628 goto clear_hash;
629 if (tcp_md5_hash_key(hp, key))
630 goto clear_hash;
631 ahash_request_set_crypt(req, NULL, md5_hash, 0);
632 if (crypto_ahash_final(req))
633 goto clear_hash;
634
635 tcp_put_md5sig_pool();
636 return 0;
637
638 clear_hash:
639 tcp_put_md5sig_pool();
640 clear_hash_noput:
641 memset(md5_hash, 0, 16);
642 return 1;
643 }
644
645 #endif
646
647 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
648 const struct sk_buff *skb)
649 {
650 #ifdef CONFIG_TCP_MD5SIG
651 const __u8 *hash_location = NULL;
652 struct tcp_md5sig_key *hash_expected;
653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
654 const struct tcphdr *th = tcp_hdr(skb);
655 int genhash;
656 u8 newhash[16];
657
658 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
659 hash_location = tcp_parse_md5sig_option(th);
660
661 /* We've parsed the options - do we have a hash? */
662 if (!hash_expected && !hash_location)
663 return false;
664
665 if (hash_expected && !hash_location) {
666 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
667 return true;
668 }
669
670 if (!hash_expected && hash_location) {
671 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
672 return true;
673 }
674
675 /* check the signature */
676 genhash = tcp_v6_md5_hash_skb(newhash,
677 hash_expected,
678 NULL, skb);
679
680 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
681 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
682 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
683 genhash ? "failed" : "mismatch",
684 &ip6h->saddr, ntohs(th->source),
685 &ip6h->daddr, ntohs(th->dest));
686 return true;
687 }
688 #endif
689 return false;
690 }
691
692 static void tcp_v6_init_req(struct request_sock *req,
693 const struct sock *sk_listener,
694 struct sk_buff *skb)
695 {
696 struct inet_request_sock *ireq = inet_rsk(req);
697 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
698
699 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
700 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
701
702 /* So that link locals have meaning */
703 if (!sk_listener->sk_bound_dev_if &&
704 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
705 ireq->ir_iif = tcp_v6_iif(skb);
706
707 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
708 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
709 np->rxopt.bits.rxinfo ||
710 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
711 np->rxopt.bits.rxohlim || np->repflow)) {
712 atomic_inc(&skb->users);
713 ireq->pktopts = skb;
714 }
715 }
716
717 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
718 struct flowi *fl,
719 const struct request_sock *req,
720 bool *strict)
721 {
722 if (strict)
723 *strict = true;
724 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
725 }
726
727 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
728 .family = AF_INET6,
729 .obj_size = sizeof(struct tcp6_request_sock),
730 .rtx_syn_ack = tcp_rtx_synack,
731 .send_ack = tcp_v6_reqsk_send_ack,
732 .destructor = tcp_v6_reqsk_destructor,
733 .send_reset = tcp_v6_send_reset,
734 .syn_ack_timeout = tcp_syn_ack_timeout,
735 };
736
737 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
738 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
739 sizeof(struct ipv6hdr),
740 #ifdef CONFIG_TCP_MD5SIG
741 .req_md5_lookup = tcp_v6_md5_lookup,
742 .calc_md5_hash = tcp_v6_md5_hash_skb,
743 #endif
744 .init_req = tcp_v6_init_req,
745 #ifdef CONFIG_SYN_COOKIES
746 .cookie_init_seq = cookie_v6_init_sequence,
747 #endif
748 .route_req = tcp_v6_route_req,
749 .init_seq = tcp_v6_init_sequence,
750 .send_synack = tcp_v6_send_synack,
751 };
752
753 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
754 u32 ack, u32 win, u32 tsval, u32 tsecr,
755 int oif, struct tcp_md5sig_key *key, int rst,
756 u8 tclass, __be32 label)
757 {
758 const struct tcphdr *th = tcp_hdr(skb);
759 struct tcphdr *t1;
760 struct sk_buff *buff;
761 struct flowi6 fl6;
762 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
763 struct sock *ctl_sk = net->ipv6.tcp_sk;
764 unsigned int tot_len = sizeof(struct tcphdr);
765 struct dst_entry *dst;
766 __be32 *topt;
767
768 if (tsecr)
769 tot_len += TCPOLEN_TSTAMP_ALIGNED;
770 #ifdef CONFIG_TCP_MD5SIG
771 if (key)
772 tot_len += TCPOLEN_MD5SIG_ALIGNED;
773 #endif
774
775 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
776 GFP_ATOMIC);
777 if (!buff)
778 return;
779
780 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
781
782 t1 = (struct tcphdr *) skb_push(buff, tot_len);
783 skb_reset_transport_header(buff);
784
785 /* Swap the send and the receive. */
786 memset(t1, 0, sizeof(*t1));
787 t1->dest = th->source;
788 t1->source = th->dest;
789 t1->doff = tot_len / 4;
790 t1->seq = htonl(seq);
791 t1->ack_seq = htonl(ack);
792 t1->ack = !rst || !th->ack;
793 t1->rst = rst;
794 t1->window = htons(win);
795
796 topt = (__be32 *)(t1 + 1);
797
798 if (tsecr) {
799 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
801 *topt++ = htonl(tsval);
802 *topt++ = htonl(tsecr);
803 }
804
805 #ifdef CONFIG_TCP_MD5SIG
806 if (key) {
807 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
808 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
809 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
810 &ipv6_hdr(skb)->saddr,
811 &ipv6_hdr(skb)->daddr, t1);
812 }
813 #endif
814
815 memset(&fl6, 0, sizeof(fl6));
816 fl6.daddr = ipv6_hdr(skb)->saddr;
817 fl6.saddr = ipv6_hdr(skb)->daddr;
818 fl6.flowlabel = label;
819
820 buff->ip_summed = CHECKSUM_PARTIAL;
821 buff->csum = 0;
822
823 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
824
825 fl6.flowi6_proto = IPPROTO_TCP;
826 if (rt6_need_strict(&fl6.daddr) && !oif)
827 fl6.flowi6_oif = tcp_v6_iif(skb);
828 else {
829 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
830 oif = skb->skb_iif;
831
832 fl6.flowi6_oif = oif;
833 }
834
835 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
836 fl6.fl6_dport = t1->dest;
837 fl6.fl6_sport = t1->source;
838 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
839 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
840
841 /* Pass a socket to ip6_dst_lookup either it is for RST
842 * Underlying function will use this to retrieve the network
843 * namespace
844 */
845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
846 if (!IS_ERR(dst)) {
847 skb_dst_set(buff, dst);
848 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
850 if (rst)
851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
852 return;
853 }
854
855 kfree_skb(buff);
856 }
857
858 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
859 {
860 const struct tcphdr *th = tcp_hdr(skb);
861 u32 seq = 0, ack_seq = 0;
862 struct tcp_md5sig_key *key = NULL;
863 #ifdef CONFIG_TCP_MD5SIG
864 const __u8 *hash_location = NULL;
865 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
866 unsigned char newhash[16];
867 int genhash;
868 struct sock *sk1 = NULL;
869 #endif
870 int oif;
871
872 if (th->rst)
873 return;
874
875 /* If sk not NULL, it means we did a successful lookup and incoming
876 * route had to be correct. prequeue might have dropped our dst.
877 */
878 if (!sk && !ipv6_unicast_destination(skb))
879 return;
880
881 #ifdef CONFIG_TCP_MD5SIG
882 rcu_read_lock();
883 hash_location = tcp_parse_md5sig_option(th);
884 if (sk && sk_fullsock(sk)) {
885 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
886 } else if (hash_location) {
887 /*
888 * active side is lost. Try to find listening socket through
889 * source port, and then find md5 key through listening socket.
890 * we are not loose security here:
891 * Incoming packet is checked with md5 hash with finding key,
892 * no RST generated if md5 hash doesn't match.
893 */
894 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
895 &tcp_hashinfo, NULL, 0,
896 &ipv6h->saddr,
897 th->source, &ipv6h->daddr,
898 ntohs(th->source), tcp_v6_iif(skb));
899 if (!sk1)
900 goto out;
901
902 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
903 if (!key)
904 goto out;
905
906 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
907 if (genhash || memcmp(hash_location, newhash, 16) != 0)
908 goto out;
909 }
910 #endif
911
912 if (th->ack)
913 seq = ntohl(th->ack_seq);
914 else
915 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
916 (th->doff << 2);
917
918 oif = sk ? sk->sk_bound_dev_if : 0;
919 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
920
921 #ifdef CONFIG_TCP_MD5SIG
922 out:
923 rcu_read_unlock();
924 #endif
925 }
926
927 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
928 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
929 struct tcp_md5sig_key *key, u8 tclass,
930 __be32 label)
931 {
932 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
933 tclass, label);
934 }
935
936 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
937 {
938 struct inet_timewait_sock *tw = inet_twsk(sk);
939 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
940
941 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
942 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
943 tcp_time_stamp + tcptw->tw_ts_offset,
944 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
945 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
946
947 inet_twsk_put(tw);
948 }
949
950 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
951 struct request_sock *req)
952 {
953 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
954 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
955 */
956 /* RFC 7323 2.3
957 * The window field (SEG.WND) of every outgoing segment, with the
958 * exception of <SYN> segments, MUST be right-shifted by
959 * Rcv.Wind.Shift bits:
960 */
961 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
962 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
963 tcp_rsk(req)->rcv_nxt,
964 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
965 tcp_time_stamp + tcp_rsk(req)->ts_off,
966 req->ts_recent, sk->sk_bound_dev_if,
967 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
968 0, 0);
969 }
970
971
972 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
973 {
974 #ifdef CONFIG_SYN_COOKIES
975 const struct tcphdr *th = tcp_hdr(skb);
976
977 if (!th->syn)
978 sk = cookie_v6_check(sk, skb);
979 #endif
980 return sk;
981 }
982
983 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
984 {
985 if (skb->protocol == htons(ETH_P_IP))
986 return tcp_v4_conn_request(sk, skb);
987
988 if (!ipv6_unicast_destination(skb))
989 goto drop;
990
991 return tcp_conn_request(&tcp6_request_sock_ops,
992 &tcp_request_sock_ipv6_ops, sk, skb);
993
994 drop:
995 tcp_listendrop(sk);
996 return 0; /* don't send reset */
997 }
998
999 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1000 struct request_sock *req,
1001 struct dst_entry *dst,
1002 struct request_sock *req_unhash,
1003 bool *own_req)
1004 {
1005 struct inet_request_sock *ireq;
1006 struct ipv6_pinfo *newnp;
1007 const struct ipv6_pinfo *np = inet6_sk(sk);
1008 struct ipv6_txoptions *opt;
1009 struct tcp6_sock *newtcp6sk;
1010 struct inet_sock *newinet;
1011 struct tcp_sock *newtp;
1012 struct sock *newsk;
1013 #ifdef CONFIG_TCP_MD5SIG
1014 struct tcp_md5sig_key *key;
1015 #endif
1016 struct flowi6 fl6;
1017
1018 if (skb->protocol == htons(ETH_P_IP)) {
1019 /*
1020 * v6 mapped
1021 */
1022
1023 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1024 req_unhash, own_req);
1025
1026 if (!newsk)
1027 return NULL;
1028
1029 newtcp6sk = (struct tcp6_sock *)newsk;
1030 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1031
1032 newinet = inet_sk(newsk);
1033 newnp = inet6_sk(newsk);
1034 newtp = tcp_sk(newsk);
1035
1036 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1037
1038 newnp->saddr = newsk->sk_v6_rcv_saddr;
1039
1040 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1041 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1042 #ifdef CONFIG_TCP_MD5SIG
1043 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1044 #endif
1045
1046 newnp->ipv6_ac_list = NULL;
1047 newnp->ipv6_fl_list = NULL;
1048 newnp->pktoptions = NULL;
1049 newnp->opt = NULL;
1050 newnp->mcast_oif = tcp_v6_iif(skb);
1051 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1052 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1053 if (np->repflow)
1054 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1055
1056 /*
1057 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1058 * here, tcp_create_openreq_child now does this for us, see the comment in
1059 * that function for the gory details. -acme
1060 */
1061
1062 /* It is tricky place. Until this moment IPv4 tcp
1063 worked with IPv6 icsk.icsk_af_ops.
1064 Sync it now.
1065 */
1066 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1067
1068 return newsk;
1069 }
1070
1071 ireq = inet_rsk(req);
1072
1073 if (sk_acceptq_is_full(sk))
1074 goto out_overflow;
1075
1076 if (!dst) {
1077 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1078 if (!dst)
1079 goto out;
1080 }
1081
1082 newsk = tcp_create_openreq_child(sk, req, skb);
1083 if (!newsk)
1084 goto out_nonewsk;
1085
1086 /*
1087 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1088 * count here, tcp_create_openreq_child now does this for us, see the
1089 * comment in that function for the gory details. -acme
1090 */
1091
1092 newsk->sk_gso_type = SKB_GSO_TCPV6;
1093 ip6_dst_store(newsk, dst, NULL, NULL);
1094 inet6_sk_rx_dst_set(newsk, skb);
1095
1096 newtcp6sk = (struct tcp6_sock *)newsk;
1097 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1098
1099 newtp = tcp_sk(newsk);
1100 newinet = inet_sk(newsk);
1101 newnp = inet6_sk(newsk);
1102
1103 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1104
1105 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1106 newnp->saddr = ireq->ir_v6_loc_addr;
1107 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1108 newsk->sk_bound_dev_if = ireq->ir_iif;
1109
1110 /* Now IPv6 options...
1111
1112 First: no IPv4 options.
1113 */
1114 newinet->inet_opt = NULL;
1115 newnp->ipv6_ac_list = NULL;
1116 newnp->ipv6_fl_list = NULL;
1117
1118 /* Clone RX bits */
1119 newnp->rxopt.all = np->rxopt.all;
1120
1121 newnp->pktoptions = NULL;
1122 newnp->opt = NULL;
1123 newnp->mcast_oif = tcp_v6_iif(skb);
1124 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1125 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1126 if (np->repflow)
1127 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1128
1129 /* Clone native IPv6 options from listening socket (if any)
1130
1131 Yes, keeping reference count would be much more clever,
1132 but we make one more one thing there: reattach optmem
1133 to newsk.
1134 */
1135 opt = ireq->ipv6_opt;
1136 if (!opt)
1137 opt = rcu_dereference(np->opt);
1138 if (opt) {
1139 opt = ipv6_dup_options(newsk, opt);
1140 RCU_INIT_POINTER(newnp->opt, opt);
1141 }
1142 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1143 if (opt)
1144 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1145 opt->opt_flen;
1146
1147 tcp_ca_openreq_child(newsk, dst);
1148
1149 tcp_sync_mss(newsk, dst_mtu(dst));
1150 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1151
1152 tcp_initialize_rcv_mss(newsk);
1153
1154 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1155 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1156
1157 #ifdef CONFIG_TCP_MD5SIG
1158 /* Copy over the MD5 key from the original socket */
1159 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1160 if (key) {
1161 /* We're using one, so create a matching key
1162 * on the newsk structure. If we fail to get
1163 * memory, then we end up not copying the key
1164 * across. Shucks.
1165 */
1166 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1167 AF_INET6, key->key, key->keylen,
1168 sk_gfp_mask(sk, GFP_ATOMIC));
1169 }
1170 #endif
1171
1172 if (__inet_inherit_port(sk, newsk) < 0) {
1173 inet_csk_prepare_forced_close(newsk);
1174 tcp_done(newsk);
1175 goto out;
1176 }
1177 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1178 if (*own_req) {
1179 tcp_move_syn(newtp, req);
1180
1181 /* Clone pktoptions received with SYN, if we own the req */
1182 if (ireq->pktopts) {
1183 newnp->pktoptions = skb_clone(ireq->pktopts,
1184 sk_gfp_mask(sk, GFP_ATOMIC));
1185 consume_skb(ireq->pktopts);
1186 ireq->pktopts = NULL;
1187 if (newnp->pktoptions)
1188 skb_set_owner_r(newnp->pktoptions, newsk);
1189 }
1190 }
1191
1192 return newsk;
1193
1194 out_overflow:
1195 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1196 out_nonewsk:
1197 dst_release(dst);
1198 out:
1199 tcp_listendrop(sk);
1200 return NULL;
1201 }
1202
1203 static void tcp_v6_restore_cb(struct sk_buff *skb)
1204 {
1205 /* We need to move header back to the beginning if xfrm6_policy_check()
1206 * and tcp_v6_fill_cb() are going to be called again.
1207 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1208 */
1209 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1210 sizeof(struct inet6_skb_parm));
1211 }
1212
1213 /* The socket must have it's spinlock held when we get
1214 * here, unless it is a TCP_LISTEN socket.
1215 *
1216 * We have a potential double-lock case here, so even when
1217 * doing backlog processing we use the BH locking scheme.
1218 * This is because we cannot sleep with the original spinlock
1219 * held.
1220 */
1221 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1222 {
1223 struct ipv6_pinfo *np = inet6_sk(sk);
1224 struct tcp_sock *tp;
1225 struct sk_buff *opt_skb = NULL;
1226
1227 /* Imagine: socket is IPv6. IPv4 packet arrives,
1228 goes to IPv4 receive handler and backlogged.
1229 From backlog it always goes here. Kerboom...
1230 Fortunately, tcp_rcv_established and rcv_established
1231 handle them correctly, but it is not case with
1232 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1233 */
1234
1235 if (skb->protocol == htons(ETH_P_IP))
1236 return tcp_v4_do_rcv(sk, skb);
1237
1238 if (tcp_filter(sk, skb))
1239 goto discard;
1240
1241 /*
1242 * socket locking is here for SMP purposes as backlog rcv
1243 * is currently called with bh processing disabled.
1244 */
1245
1246 /* Do Stevens' IPV6_PKTOPTIONS.
1247
1248 Yes, guys, it is the only place in our code, where we
1249 may make it not affecting IPv4.
1250 The rest of code is protocol independent,
1251 and I do not like idea to uglify IPv4.
1252
1253 Actually, all the idea behind IPV6_PKTOPTIONS
1254 looks not very well thought. For now we latch
1255 options, received in the last packet, enqueued
1256 by tcp. Feel free to propose better solution.
1257 --ANK (980728)
1258 */
1259 if (np->rxopt.all)
1260 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1261
1262 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1263 struct dst_entry *dst = sk->sk_rx_dst;
1264
1265 sock_rps_save_rxhash(sk, skb);
1266 sk_mark_napi_id(sk, skb);
1267 if (dst) {
1268 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1269 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1270 dst_release(dst);
1271 sk->sk_rx_dst = NULL;
1272 }
1273 }
1274
1275 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1276 if (opt_skb)
1277 goto ipv6_pktoptions;
1278 return 0;
1279 }
1280
1281 if (tcp_checksum_complete(skb))
1282 goto csum_err;
1283
1284 if (sk->sk_state == TCP_LISTEN) {
1285 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1286
1287 if (!nsk)
1288 goto discard;
1289
1290 if (nsk != sk) {
1291 sock_rps_save_rxhash(nsk, skb);
1292 sk_mark_napi_id(nsk, skb);
1293 if (tcp_child_process(sk, nsk, skb))
1294 goto reset;
1295 if (opt_skb)
1296 __kfree_skb(opt_skb);
1297 return 0;
1298 }
1299 } else
1300 sock_rps_save_rxhash(sk, skb);
1301
1302 if (tcp_rcv_state_process(sk, skb))
1303 goto reset;
1304 if (opt_skb)
1305 goto ipv6_pktoptions;
1306 return 0;
1307
1308 reset:
1309 tcp_v6_send_reset(sk, skb);
1310 discard:
1311 if (opt_skb)
1312 __kfree_skb(opt_skb);
1313 kfree_skb(skb);
1314 return 0;
1315 csum_err:
1316 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1317 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1318 goto discard;
1319
1320
1321 ipv6_pktoptions:
1322 /* Do you ask, what is it?
1323
1324 1. skb was enqueued by tcp.
1325 2. skb is added to tail of read queue, rather than out of order.
1326 3. socket is not in passive state.
1327 4. Finally, it really contains options, which user wants to receive.
1328 */
1329 tp = tcp_sk(sk);
1330 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1331 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1332 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1333 np->mcast_oif = tcp_v6_iif(opt_skb);
1334 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1335 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1336 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1337 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1338 if (np->repflow)
1339 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1340 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1341 skb_set_owner_r(opt_skb, sk);
1342 tcp_v6_restore_cb(opt_skb);
1343 opt_skb = xchg(&np->pktoptions, opt_skb);
1344 } else {
1345 __kfree_skb(opt_skb);
1346 opt_skb = xchg(&np->pktoptions, NULL);
1347 }
1348 }
1349
1350 kfree_skb(opt_skb);
1351 return 0;
1352 }
1353
1354 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1355 const struct tcphdr *th)
1356 {
1357 /* This is tricky: we move IP6CB at its correct location into
1358 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1359 * _decode_session6() uses IP6CB().
1360 * barrier() makes sure compiler won't play aliasing games.
1361 */
1362 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1363 sizeof(struct inet6_skb_parm));
1364 barrier();
1365
1366 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1367 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1368 skb->len - th->doff*4);
1369 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1370 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1371 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1372 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1373 TCP_SKB_CB(skb)->sacked = 0;
1374 }
1375
1376 static int tcp_v6_rcv(struct sk_buff *skb)
1377 {
1378 const struct tcphdr *th;
1379 const struct ipv6hdr *hdr;
1380 bool refcounted;
1381 struct sock *sk;
1382 int ret;
1383 struct net *net = dev_net(skb->dev);
1384
1385 if (skb->pkt_type != PACKET_HOST)
1386 goto discard_it;
1387
1388 /*
1389 * Count it even if it's bad.
1390 */
1391 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1392
1393 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1394 goto discard_it;
1395
1396 th = (const struct tcphdr *)skb->data;
1397
1398 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1399 goto bad_packet;
1400 if (!pskb_may_pull(skb, th->doff*4))
1401 goto discard_it;
1402
1403 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1404 goto csum_error;
1405
1406 th = (const struct tcphdr *)skb->data;
1407 hdr = ipv6_hdr(skb);
1408
1409 lookup:
1410 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1411 th->source, th->dest, inet6_iif(skb),
1412 &refcounted);
1413 if (!sk)
1414 goto no_tcp_socket;
1415
1416 process:
1417 if (sk->sk_state == TCP_TIME_WAIT)
1418 goto do_time_wait;
1419
1420 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1421 struct request_sock *req = inet_reqsk(sk);
1422 struct sock *nsk;
1423
1424 sk = req->rsk_listener;
1425 tcp_v6_fill_cb(skb, hdr, th);
1426 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1427 sk_drops_add(sk, skb);
1428 reqsk_put(req);
1429 goto discard_it;
1430 }
1431 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1432 inet_csk_reqsk_queue_drop_and_put(sk, req);
1433 goto lookup;
1434 }
1435 sock_hold(sk);
1436 refcounted = true;
1437 nsk = tcp_check_req(sk, skb, req, false);
1438 if (!nsk) {
1439 reqsk_put(req);
1440 goto discard_and_relse;
1441 }
1442 if (nsk == sk) {
1443 reqsk_put(req);
1444 tcp_v6_restore_cb(skb);
1445 } else if (tcp_child_process(sk, nsk, skb)) {
1446 tcp_v6_send_reset(nsk, skb);
1447 goto discard_and_relse;
1448 } else {
1449 sock_put(sk);
1450 return 0;
1451 }
1452 }
1453 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1454 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1455 goto discard_and_relse;
1456 }
1457
1458 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1459 goto discard_and_relse;
1460
1461 tcp_v6_fill_cb(skb, hdr, th);
1462
1463 if (tcp_v6_inbound_md5_hash(sk, skb))
1464 goto discard_and_relse;
1465
1466 if (tcp_filter(sk, skb))
1467 goto discard_and_relse;
1468 th = (const struct tcphdr *)skb->data;
1469 hdr = ipv6_hdr(skb);
1470
1471 skb->dev = NULL;
1472
1473 if (sk->sk_state == TCP_LISTEN) {
1474 ret = tcp_v6_do_rcv(sk, skb);
1475 goto put_and_return;
1476 }
1477
1478 sk_incoming_cpu_update(sk);
1479
1480 bh_lock_sock_nested(sk);
1481 tcp_segs_in(tcp_sk(sk), skb);
1482 ret = 0;
1483 if (!sock_owned_by_user(sk)) {
1484 if (!tcp_prequeue(sk, skb))
1485 ret = tcp_v6_do_rcv(sk, skb);
1486 } else if (tcp_add_backlog(sk, skb)) {
1487 goto discard_and_relse;
1488 }
1489 bh_unlock_sock(sk);
1490
1491 put_and_return:
1492 if (refcounted)
1493 sock_put(sk);
1494 return ret ? -1 : 0;
1495
1496 no_tcp_socket:
1497 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1498 goto discard_it;
1499
1500 tcp_v6_fill_cb(skb, hdr, th);
1501
1502 if (tcp_checksum_complete(skb)) {
1503 csum_error:
1504 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1505 bad_packet:
1506 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1507 } else {
1508 tcp_v6_send_reset(NULL, skb);
1509 }
1510
1511 discard_it:
1512 kfree_skb(skb);
1513 return 0;
1514
1515 discard_and_relse:
1516 sk_drops_add(sk, skb);
1517 if (refcounted)
1518 sock_put(sk);
1519 goto discard_it;
1520
1521 do_time_wait:
1522 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1523 inet_twsk_put(inet_twsk(sk));
1524 goto discard_it;
1525 }
1526
1527 tcp_v6_fill_cb(skb, hdr, th);
1528
1529 if (tcp_checksum_complete(skb)) {
1530 inet_twsk_put(inet_twsk(sk));
1531 goto csum_error;
1532 }
1533
1534 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1535 case TCP_TW_SYN:
1536 {
1537 struct sock *sk2;
1538
1539 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1540 skb, __tcp_hdrlen(th),
1541 &ipv6_hdr(skb)->saddr, th->source,
1542 &ipv6_hdr(skb)->daddr,
1543 ntohs(th->dest), tcp_v6_iif(skb));
1544 if (sk2) {
1545 struct inet_timewait_sock *tw = inet_twsk(sk);
1546 inet_twsk_deschedule_put(tw);
1547 sk = sk2;
1548 tcp_v6_restore_cb(skb);
1549 refcounted = false;
1550 goto process;
1551 }
1552 /* Fall through to ACK */
1553 }
1554 case TCP_TW_ACK:
1555 tcp_v6_timewait_ack(sk, skb);
1556 break;
1557 case TCP_TW_RST:
1558 tcp_v6_restore_cb(skb);
1559 tcp_v6_send_reset(sk, skb);
1560 inet_twsk_deschedule_put(inet_twsk(sk));
1561 goto discard_it;
1562 case TCP_TW_SUCCESS:
1563 ;
1564 }
1565 goto discard_it;
1566 }
1567
1568 static void tcp_v6_early_demux(struct sk_buff *skb)
1569 {
1570 const struct ipv6hdr *hdr;
1571 const struct tcphdr *th;
1572 struct sock *sk;
1573
1574 if (skb->pkt_type != PACKET_HOST)
1575 return;
1576
1577 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1578 return;
1579
1580 hdr = ipv6_hdr(skb);
1581 th = tcp_hdr(skb);
1582
1583 if (th->doff < sizeof(struct tcphdr) / 4)
1584 return;
1585
1586 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1587 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1588 &hdr->saddr, th->source,
1589 &hdr->daddr, ntohs(th->dest),
1590 inet6_iif(skb));
1591 if (sk) {
1592 skb->sk = sk;
1593 skb->destructor = sock_edemux;
1594 if (sk_fullsock(sk)) {
1595 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1596
1597 if (dst)
1598 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1599 if (dst &&
1600 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1601 skb_dst_set_noref(skb, dst);
1602 }
1603 }
1604 }
1605
1606 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1607 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1608 .twsk_unique = tcp_twsk_unique,
1609 .twsk_destructor = tcp_twsk_destructor,
1610 };
1611
1612 static const struct inet_connection_sock_af_ops ipv6_specific = {
1613 .queue_xmit = inet6_csk_xmit,
1614 .send_check = tcp_v6_send_check,
1615 .rebuild_header = inet6_sk_rebuild_header,
1616 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1617 .conn_request = tcp_v6_conn_request,
1618 .syn_recv_sock = tcp_v6_syn_recv_sock,
1619 .net_header_len = sizeof(struct ipv6hdr),
1620 .net_frag_header_len = sizeof(struct frag_hdr),
1621 .setsockopt = ipv6_setsockopt,
1622 .getsockopt = ipv6_getsockopt,
1623 .addr2sockaddr = inet6_csk_addr2sockaddr,
1624 .sockaddr_len = sizeof(struct sockaddr_in6),
1625 #ifdef CONFIG_COMPAT
1626 .compat_setsockopt = compat_ipv6_setsockopt,
1627 .compat_getsockopt = compat_ipv6_getsockopt,
1628 #endif
1629 .mtu_reduced = tcp_v6_mtu_reduced,
1630 };
1631
1632 #ifdef CONFIG_TCP_MD5SIG
1633 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1634 .md5_lookup = tcp_v6_md5_lookup,
1635 .calc_md5_hash = tcp_v6_md5_hash_skb,
1636 .md5_parse = tcp_v6_parse_md5_keys,
1637 };
1638 #endif
1639
1640 /*
1641 * TCP over IPv4 via INET6 API
1642 */
1643 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1644 .queue_xmit = ip_queue_xmit,
1645 .send_check = tcp_v4_send_check,
1646 .rebuild_header = inet_sk_rebuild_header,
1647 .sk_rx_dst_set = inet_sk_rx_dst_set,
1648 .conn_request = tcp_v6_conn_request,
1649 .syn_recv_sock = tcp_v6_syn_recv_sock,
1650 .net_header_len = sizeof(struct iphdr),
1651 .setsockopt = ipv6_setsockopt,
1652 .getsockopt = ipv6_getsockopt,
1653 .addr2sockaddr = inet6_csk_addr2sockaddr,
1654 .sockaddr_len = sizeof(struct sockaddr_in6),
1655 #ifdef CONFIG_COMPAT
1656 .compat_setsockopt = compat_ipv6_setsockopt,
1657 .compat_getsockopt = compat_ipv6_getsockopt,
1658 #endif
1659 .mtu_reduced = tcp_v4_mtu_reduced,
1660 };
1661
1662 #ifdef CONFIG_TCP_MD5SIG
1663 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1664 .md5_lookup = tcp_v4_md5_lookup,
1665 .calc_md5_hash = tcp_v4_md5_hash_skb,
1666 .md5_parse = tcp_v6_parse_md5_keys,
1667 };
1668 #endif
1669
1670 /* NOTE: A lot of things set to zero explicitly by call to
1671 * sk_alloc() so need not be done here.
1672 */
1673 static int tcp_v6_init_sock(struct sock *sk)
1674 {
1675 struct inet_connection_sock *icsk = inet_csk(sk);
1676
1677 tcp_init_sock(sk);
1678
1679 icsk->icsk_af_ops = &ipv6_specific;
1680
1681 #ifdef CONFIG_TCP_MD5SIG
1682 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1683 #endif
1684
1685 return 0;
1686 }
1687
1688 static void tcp_v6_destroy_sock(struct sock *sk)
1689 {
1690 tcp_v4_destroy_sock(sk);
1691 inet6_destroy_sock(sk);
1692 }
1693
1694 #ifdef CONFIG_PROC_FS
1695 /* Proc filesystem TCPv6 sock list dumping. */
1696 static void get_openreq6(struct seq_file *seq,
1697 const struct request_sock *req, int i)
1698 {
1699 long ttd = req->rsk_timer.expires - jiffies;
1700 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1701 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1702
1703 if (ttd < 0)
1704 ttd = 0;
1705
1706 seq_printf(seq,
1707 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1708 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1709 i,
1710 src->s6_addr32[0], src->s6_addr32[1],
1711 src->s6_addr32[2], src->s6_addr32[3],
1712 inet_rsk(req)->ir_num,
1713 dest->s6_addr32[0], dest->s6_addr32[1],
1714 dest->s6_addr32[2], dest->s6_addr32[3],
1715 ntohs(inet_rsk(req)->ir_rmt_port),
1716 TCP_SYN_RECV,
1717 0, 0, /* could print option size, but that is af dependent. */
1718 1, /* timers active (only the expire timer) */
1719 jiffies_to_clock_t(ttd),
1720 req->num_timeout,
1721 from_kuid_munged(seq_user_ns(seq),
1722 sock_i_uid(req->rsk_listener)),
1723 0, /* non standard timer */
1724 0, /* open_requests have no inode */
1725 0, req);
1726 }
1727
1728 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1729 {
1730 const struct in6_addr *dest, *src;
1731 __u16 destp, srcp;
1732 int timer_active;
1733 unsigned long timer_expires;
1734 const struct inet_sock *inet = inet_sk(sp);
1735 const struct tcp_sock *tp = tcp_sk(sp);
1736 const struct inet_connection_sock *icsk = inet_csk(sp);
1737 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1738 int rx_queue;
1739 int state;
1740
1741 dest = &sp->sk_v6_daddr;
1742 src = &sp->sk_v6_rcv_saddr;
1743 destp = ntohs(inet->inet_dport);
1744 srcp = ntohs(inet->inet_sport);
1745
1746 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1747 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1748 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1749 timer_active = 1;
1750 timer_expires = icsk->icsk_timeout;
1751 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1752 timer_active = 4;
1753 timer_expires = icsk->icsk_timeout;
1754 } else if (timer_pending(&sp->sk_timer)) {
1755 timer_active = 2;
1756 timer_expires = sp->sk_timer.expires;
1757 } else {
1758 timer_active = 0;
1759 timer_expires = jiffies;
1760 }
1761
1762 state = sk_state_load(sp);
1763 if (state == TCP_LISTEN)
1764 rx_queue = sp->sk_ack_backlog;
1765 else
1766 /* Because we don't lock the socket,
1767 * we might find a transient negative value.
1768 */
1769 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1770
1771 seq_printf(seq,
1772 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1773 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1774 i,
1775 src->s6_addr32[0], src->s6_addr32[1],
1776 src->s6_addr32[2], src->s6_addr32[3], srcp,
1777 dest->s6_addr32[0], dest->s6_addr32[1],
1778 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1779 state,
1780 tp->write_seq - tp->snd_una,
1781 rx_queue,
1782 timer_active,
1783 jiffies_delta_to_clock_t(timer_expires - jiffies),
1784 icsk->icsk_retransmits,
1785 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1786 icsk->icsk_probes_out,
1787 sock_i_ino(sp),
1788 atomic_read(&sp->sk_refcnt), sp,
1789 jiffies_to_clock_t(icsk->icsk_rto),
1790 jiffies_to_clock_t(icsk->icsk_ack.ato),
1791 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1792 tp->snd_cwnd,
1793 state == TCP_LISTEN ?
1794 fastopenq->max_qlen :
1795 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1796 );
1797 }
1798
1799 static void get_timewait6_sock(struct seq_file *seq,
1800 struct inet_timewait_sock *tw, int i)
1801 {
1802 long delta = tw->tw_timer.expires - jiffies;
1803 const struct in6_addr *dest, *src;
1804 __u16 destp, srcp;
1805
1806 dest = &tw->tw_v6_daddr;
1807 src = &tw->tw_v6_rcv_saddr;
1808 destp = ntohs(tw->tw_dport);
1809 srcp = ntohs(tw->tw_sport);
1810
1811 seq_printf(seq,
1812 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1813 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1814 i,
1815 src->s6_addr32[0], src->s6_addr32[1],
1816 src->s6_addr32[2], src->s6_addr32[3], srcp,
1817 dest->s6_addr32[0], dest->s6_addr32[1],
1818 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1819 tw->tw_substate, 0, 0,
1820 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1821 atomic_read(&tw->tw_refcnt), tw);
1822 }
1823
1824 static int tcp6_seq_show(struct seq_file *seq, void *v)
1825 {
1826 struct tcp_iter_state *st;
1827 struct sock *sk = v;
1828
1829 if (v == SEQ_START_TOKEN) {
1830 seq_puts(seq,
1831 " sl "
1832 "local_address "
1833 "remote_address "
1834 "st tx_queue rx_queue tr tm->when retrnsmt"
1835 " uid timeout inode\n");
1836 goto out;
1837 }
1838 st = seq->private;
1839
1840 if (sk->sk_state == TCP_TIME_WAIT)
1841 get_timewait6_sock(seq, v, st->num);
1842 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1843 get_openreq6(seq, v, st->num);
1844 else
1845 get_tcp6_sock(seq, v, st->num);
1846 out:
1847 return 0;
1848 }
1849
1850 static const struct file_operations tcp6_afinfo_seq_fops = {
1851 .owner = THIS_MODULE,
1852 .open = tcp_seq_open,
1853 .read = seq_read,
1854 .llseek = seq_lseek,
1855 .release = seq_release_net
1856 };
1857
1858 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1859 .name = "tcp6",
1860 .family = AF_INET6,
1861 .seq_fops = &tcp6_afinfo_seq_fops,
1862 .seq_ops = {
1863 .show = tcp6_seq_show,
1864 },
1865 };
1866
1867 int __net_init tcp6_proc_init(struct net *net)
1868 {
1869 return tcp_proc_register(net, &tcp6_seq_afinfo);
1870 }
1871
1872 void tcp6_proc_exit(struct net *net)
1873 {
1874 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1875 }
1876 #endif
1877
1878 struct proto tcpv6_prot = {
1879 .name = "TCPv6",
1880 .owner = THIS_MODULE,
1881 .close = tcp_close,
1882 .connect = tcp_v6_connect,
1883 .disconnect = tcp_disconnect,
1884 .accept = inet_csk_accept,
1885 .ioctl = tcp_ioctl,
1886 .init = tcp_v6_init_sock,
1887 .destroy = tcp_v6_destroy_sock,
1888 .shutdown = tcp_shutdown,
1889 .setsockopt = tcp_setsockopt,
1890 .getsockopt = tcp_getsockopt,
1891 .keepalive = tcp_set_keepalive,
1892 .recvmsg = tcp_recvmsg,
1893 .sendmsg = tcp_sendmsg,
1894 .sendpage = tcp_sendpage,
1895 .backlog_rcv = tcp_v6_do_rcv,
1896 .release_cb = tcp_release_cb,
1897 .hash = inet6_hash,
1898 .unhash = inet_unhash,
1899 .get_port = inet_csk_get_port,
1900 .enter_memory_pressure = tcp_enter_memory_pressure,
1901 .stream_memory_free = tcp_stream_memory_free,
1902 .sockets_allocated = &tcp_sockets_allocated,
1903 .memory_allocated = &tcp_memory_allocated,
1904 .memory_pressure = &tcp_memory_pressure,
1905 .orphan_count = &tcp_orphan_count,
1906 .sysctl_mem = sysctl_tcp_mem,
1907 .sysctl_wmem = sysctl_tcp_wmem,
1908 .sysctl_rmem = sysctl_tcp_rmem,
1909 .max_header = MAX_TCP_HEADER,
1910 .obj_size = sizeof(struct tcp6_sock),
1911 .slab_flags = SLAB_DESTROY_BY_RCU,
1912 .twsk_prot = &tcp6_timewait_sock_ops,
1913 .rsk_prot = &tcp6_request_sock_ops,
1914 .h.hashinfo = &tcp_hashinfo,
1915 .no_autobind = true,
1916 #ifdef CONFIG_COMPAT
1917 .compat_setsockopt = compat_tcp_setsockopt,
1918 .compat_getsockopt = compat_tcp_getsockopt,
1919 #endif
1920 .diag_destroy = tcp_abort,
1921 };
1922
1923 static const struct inet6_protocol tcpv6_protocol = {
1924 .early_demux = tcp_v6_early_demux,
1925 .handler = tcp_v6_rcv,
1926 .err_handler = tcp_v6_err,
1927 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1928 };
1929
1930 static struct inet_protosw tcpv6_protosw = {
1931 .type = SOCK_STREAM,
1932 .protocol = IPPROTO_TCP,
1933 .prot = &tcpv6_prot,
1934 .ops = &inet6_stream_ops,
1935 .flags = INET_PROTOSW_PERMANENT |
1936 INET_PROTOSW_ICSK,
1937 };
1938
1939 static int __net_init tcpv6_net_init(struct net *net)
1940 {
1941 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1942 SOCK_RAW, IPPROTO_TCP, net);
1943 }
1944
1945 static void __net_exit tcpv6_net_exit(struct net *net)
1946 {
1947 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1948 }
1949
1950 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1951 {
1952 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1953 }
1954
1955 static struct pernet_operations tcpv6_net_ops = {
1956 .init = tcpv6_net_init,
1957 .exit = tcpv6_net_exit,
1958 .exit_batch = tcpv6_net_exit_batch,
1959 };
1960
1961 int __init tcpv6_init(void)
1962 {
1963 int ret;
1964
1965 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1966 if (ret)
1967 goto out;
1968
1969 /* register inet6 protocol */
1970 ret = inet6_register_protosw(&tcpv6_protosw);
1971 if (ret)
1972 goto out_tcpv6_protocol;
1973
1974 ret = register_pernet_subsys(&tcpv6_net_ops);
1975 if (ret)
1976 goto out_tcpv6_protosw;
1977 out:
1978 return ret;
1979
1980 out_tcpv6_protosw:
1981 inet6_unregister_protosw(&tcpv6_protosw);
1982 out_tcpv6_protocol:
1983 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1984 goto out;
1985 }
1986
1987 void tcpv6_exit(void)
1988 {
1989 unregister_pernet_subsys(&tcpv6_net_ops);
1990 inet6_unregister_protosw(&tcpv6_protosw);
1991 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1992 }