]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv6/tcp_ipv6.c
Merge tag 'drm-fixes-for-v4.10-final' of git://people.freedesktop.org/~airlied/linux
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86 {
87 return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102 }
103
104 static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
105 {
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source, tsoff);
110 }
111
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 int addr_len)
114 {
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
122 struct flowi6 fl6;
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
132
133 memset(&fl6, 0, sizeof(fl6));
134
135 if (np->sndflow) {
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 if (!flowlabel)
142 return -EINVAL;
143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
151 if (ipv6_addr_any(&usin->sin6_addr)) {
152 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
153 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
154 &usin->sin6_addr);
155 else
156 usin->sin6_addr = in6addr_loopback;
157 }
158
159 addr_type = ipv6_addr_type(&usin->sin6_addr);
160
161 if (addr_type & IPV6_ADDR_MULTICAST)
162 return -ENETUNREACH;
163
164 if (addr_type&IPV6_ADDR_LINKLOCAL) {
165 if (addr_len >= sizeof(struct sockaddr_in6) &&
166 usin->sin6_scope_id) {
167 /* If interface is set while binding, indices
168 * must coincide.
169 */
170 if (sk->sk_bound_dev_if &&
171 sk->sk_bound_dev_if != usin->sin6_scope_id)
172 return -EINVAL;
173
174 sk->sk_bound_dev_if = usin->sin6_scope_id;
175 }
176
177 /* Connect to link-local address requires an interface */
178 if (!sk->sk_bound_dev_if)
179 return -EINVAL;
180 }
181
182 if (tp->rx_opt.ts_recent_stamp &&
183 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
184 tp->rx_opt.ts_recent = 0;
185 tp->rx_opt.ts_recent_stamp = 0;
186 tp->write_seq = 0;
187 }
188
189 sk->sk_v6_daddr = usin->sin6_addr;
190 np->flow_label = fl6.flowlabel;
191
192 /*
193 * TCP over IPv4
194 */
195
196 if (addr_type & IPV6_ADDR_MAPPED) {
197 u32 exthdrlen = icsk->icsk_ext_hdr_len;
198 struct sockaddr_in sin;
199
200 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
201
202 if (__ipv6_only_sock(sk))
203 return -ENETUNREACH;
204
205 sin.sin_family = AF_INET;
206 sin.sin_port = usin->sin6_port;
207 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
208
209 icsk->icsk_af_ops = &ipv6_mapped;
210 sk->sk_backlog_rcv = tcp_v4_do_rcv;
211 #ifdef CONFIG_TCP_MD5SIG
212 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
213 #endif
214
215 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
216
217 if (err) {
218 icsk->icsk_ext_hdr_len = exthdrlen;
219 icsk->icsk_af_ops = &ipv6_specific;
220 sk->sk_backlog_rcv = tcp_v6_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_specific;
223 #endif
224 goto failure;
225 }
226 np->saddr = sk->sk_v6_rcv_saddr;
227
228 return err;
229 }
230
231 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
232 saddr = &sk->sk_v6_rcv_saddr;
233
234 fl6.flowi6_proto = IPPROTO_TCP;
235 fl6.daddr = sk->sk_v6_daddr;
236 fl6.saddr = saddr ? *saddr : np->saddr;
237 fl6.flowi6_oif = sk->sk_bound_dev_if;
238 fl6.flowi6_mark = sk->sk_mark;
239 fl6.fl6_dport = usin->sin6_port;
240 fl6.fl6_sport = inet->inet_sport;
241 fl6.flowi6_uid = sk->sk_uid;
242
243 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
244 final_p = fl6_update_dst(&fl6, opt, &final);
245
246 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
247
248 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
249 if (IS_ERR(dst)) {
250 err = PTR_ERR(dst);
251 goto failure;
252 }
253
254 if (!saddr) {
255 saddr = &fl6.saddr;
256 sk->sk_v6_rcv_saddr = *saddr;
257 }
258
259 /* set the source address */
260 np->saddr = *saddr;
261 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
262
263 sk->sk_gso_type = SKB_GSO_TCPV6;
264 ip6_dst_store(sk, dst, NULL, NULL);
265
266 if (tcp_death_row.sysctl_tw_recycle &&
267 !tp->rx_opt.ts_recent_stamp &&
268 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
269 tcp_fetch_timewait_stamp(sk, dst);
270
271 icsk->icsk_ext_hdr_len = 0;
272 if (opt)
273 icsk->icsk_ext_hdr_len = opt->opt_flen +
274 opt->opt_nflen;
275
276 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
277
278 inet->inet_dport = usin->sin6_port;
279
280 tcp_set_state(sk, TCP_SYN_SENT);
281 err = inet6_hash_connect(&tcp_death_row, sk);
282 if (err)
283 goto late_failure;
284
285 sk_set_txhash(sk);
286
287 if (!tp->write_seq && likely(!tp->repair))
288 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
289 sk->sk_v6_daddr.s6_addr32,
290 inet->inet_sport,
291 inet->inet_dport,
292 &tp->tsoffset);
293
294 err = tcp_connect(sk);
295 if (err)
296 goto late_failure;
297
298 return 0;
299
300 late_failure:
301 tcp_set_state(sk, TCP_CLOSE);
302 __sk_dst_reset(sk);
303 failure:
304 inet->inet_dport = 0;
305 sk->sk_route_caps = 0;
306 return err;
307 }
308
309 static void tcp_v6_mtu_reduced(struct sock *sk)
310 {
311 struct dst_entry *dst;
312
313 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
314 return;
315
316 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
317 if (!dst)
318 return;
319
320 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
321 tcp_sync_mss(sk, dst_mtu(dst));
322 tcp_simple_retransmit(sk);
323 }
324 }
325
326 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
327 u8 type, u8 code, int offset, __be32 info)
328 {
329 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
330 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
331 struct net *net = dev_net(skb->dev);
332 struct request_sock *fastopen;
333 struct ipv6_pinfo *np;
334 struct tcp_sock *tp;
335 __u32 seq, snd_una;
336 struct sock *sk;
337 bool fatal;
338 int err;
339
340 sk = __inet6_lookup_established(net, &tcp_hashinfo,
341 &hdr->daddr, th->dest,
342 &hdr->saddr, ntohs(th->source),
343 skb->dev->ifindex);
344
345 if (!sk) {
346 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
347 ICMP6_MIB_INERRORS);
348 return;
349 }
350
351 if (sk->sk_state == TCP_TIME_WAIT) {
352 inet_twsk_put(inet_twsk(sk));
353 return;
354 }
355 seq = ntohl(th->seq);
356 fatal = icmpv6_err_convert(type, code, &err);
357 if (sk->sk_state == TCP_NEW_SYN_RECV)
358 return tcp_req_err(sk, seq, fatal);
359
360 bh_lock_sock(sk);
361 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
362 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
363
364 if (sk->sk_state == TCP_CLOSE)
365 goto out;
366
367 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
368 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
369 goto out;
370 }
371
372 tp = tcp_sk(sk);
373 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
374 fastopen = tp->fastopen_rsk;
375 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, snd_una, tp->snd_nxt)) {
378 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 goto out;
380 }
381
382 np = inet6_sk(sk);
383
384 if (type == NDISC_REDIRECT) {
385 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
386
387 if (dst)
388 dst->ops->redirect(dst, sk, skb);
389 goto out;
390 }
391
392 if (type == ICMPV6_PKT_TOOBIG) {
393 /* We are not interested in TCP_LISTEN and open_requests
394 * (SYN-ACKs send out by Linux are always <576bytes so
395 * they should go through unfragmented).
396 */
397 if (sk->sk_state == TCP_LISTEN)
398 goto out;
399
400 if (!ip6_sk_accept_pmtu(sk))
401 goto out;
402
403 tp->mtu_info = ntohl(info);
404 if (!sock_owned_by_user(sk))
405 tcp_v6_mtu_reduced(sk);
406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
407 &sk->sk_tsq_flags))
408 sock_hold(sk);
409 goto out;
410 }
411
412
413 /* Might be for an request_sock */
414 switch (sk->sk_state) {
415 case TCP_SYN_SENT:
416 case TCP_SYN_RECV:
417 /* Only in fast or simultaneous open. If a fast open socket is
418 * is already accepted it is treated as a connected one below.
419 */
420 if (fastopen && !fastopen->sk)
421 break;
422
423 if (!sock_owned_by_user(sk)) {
424 sk->sk_err = err;
425 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
426
427 tcp_done(sk);
428 } else
429 sk->sk_err_soft = err;
430 goto out;
431 }
432
433 if (!sock_owned_by_user(sk) && np->recverr) {
434 sk->sk_err = err;
435 sk->sk_error_report(sk);
436 } else
437 sk->sk_err_soft = err;
438
439 out:
440 bh_unlock_sock(sk);
441 sock_put(sk);
442 }
443
444
445 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
446 struct flowi *fl,
447 struct request_sock *req,
448 struct tcp_fastopen_cookie *foc,
449 enum tcp_synack_type synack_type)
450 {
451 struct inet_request_sock *ireq = inet_rsk(req);
452 struct ipv6_pinfo *np = inet6_sk(sk);
453 struct ipv6_txoptions *opt;
454 struct flowi6 *fl6 = &fl->u.ip6;
455 struct sk_buff *skb;
456 int err = -ENOMEM;
457
458 /* First, grab a route. */
459 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
460 IPPROTO_TCP)) == NULL)
461 goto done;
462
463 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
464
465 if (skb) {
466 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
467 &ireq->ir_v6_rmt_addr);
468
469 fl6->daddr = ireq->ir_v6_rmt_addr;
470 if (np->repflow && ireq->pktopts)
471 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
472
473 rcu_read_lock();
474 opt = ireq->ipv6_opt;
475 if (!opt)
476 opt = rcu_dereference(np->opt);
477 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
478 rcu_read_unlock();
479 err = net_xmit_eval(err);
480 }
481
482 done:
483 return err;
484 }
485
486
487 static void tcp_v6_reqsk_destructor(struct request_sock *req)
488 {
489 kfree(inet_rsk(req)->ipv6_opt);
490 kfree_skb(inet_rsk(req)->pktopts);
491 }
492
493 #ifdef CONFIG_TCP_MD5SIG
494 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
495 const struct in6_addr *addr)
496 {
497 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
498 }
499
500 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
501 const struct sock *addr_sk)
502 {
503 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
504 }
505
506 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
507 int optlen)
508 {
509 struct tcp_md5sig cmd;
510 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
511
512 if (optlen < sizeof(cmd))
513 return -EINVAL;
514
515 if (copy_from_user(&cmd, optval, sizeof(cmd)))
516 return -EFAULT;
517
518 if (sin6->sin6_family != AF_INET6)
519 return -EINVAL;
520
521 if (!cmd.tcpm_keylen) {
522 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
523 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
524 AF_INET);
525 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6);
527 }
528
529 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
530 return -EINVAL;
531
532 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
533 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
534 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
535
536 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
538 }
539
540 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
541 const struct in6_addr *daddr,
542 const struct in6_addr *saddr,
543 const struct tcphdr *th, int nbytes)
544 {
545 struct tcp6_pseudohdr *bp;
546 struct scatterlist sg;
547 struct tcphdr *_th;
548
549 bp = hp->scratch;
550 /* 1. TCP pseudo-header (RFC2460) */
551 bp->saddr = *saddr;
552 bp->daddr = *daddr;
553 bp->protocol = cpu_to_be32(IPPROTO_TCP);
554 bp->len = cpu_to_be32(nbytes);
555
556 _th = (struct tcphdr *)(bp + 1);
557 memcpy(_th, th, sizeof(*th));
558 _th->check = 0;
559
560 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
561 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
562 sizeof(*bp) + sizeof(*th));
563 return crypto_ahash_update(hp->md5_req);
564 }
565
566 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
567 const struct in6_addr *daddr, struct in6_addr *saddr,
568 const struct tcphdr *th)
569 {
570 struct tcp_md5sig_pool *hp;
571 struct ahash_request *req;
572
573 hp = tcp_get_md5sig_pool();
574 if (!hp)
575 goto clear_hash_noput;
576 req = hp->md5_req;
577
578 if (crypto_ahash_init(req))
579 goto clear_hash;
580 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
581 goto clear_hash;
582 if (tcp_md5_hash_key(hp, key))
583 goto clear_hash;
584 ahash_request_set_crypt(req, NULL, md5_hash, 0);
585 if (crypto_ahash_final(req))
586 goto clear_hash;
587
588 tcp_put_md5sig_pool();
589 return 0;
590
591 clear_hash:
592 tcp_put_md5sig_pool();
593 clear_hash_noput:
594 memset(md5_hash, 0, 16);
595 return 1;
596 }
597
598 static int tcp_v6_md5_hash_skb(char *md5_hash,
599 const struct tcp_md5sig_key *key,
600 const struct sock *sk,
601 const struct sk_buff *skb)
602 {
603 const struct in6_addr *saddr, *daddr;
604 struct tcp_md5sig_pool *hp;
605 struct ahash_request *req;
606 const struct tcphdr *th = tcp_hdr(skb);
607
608 if (sk) { /* valid for establish/request sockets */
609 saddr = &sk->sk_v6_rcv_saddr;
610 daddr = &sk->sk_v6_daddr;
611 } else {
612 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
613 saddr = &ip6h->saddr;
614 daddr = &ip6h->daddr;
615 }
616
617 hp = tcp_get_md5sig_pool();
618 if (!hp)
619 goto clear_hash_noput;
620 req = hp->md5_req;
621
622 if (crypto_ahash_init(req))
623 goto clear_hash;
624
625 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
626 goto clear_hash;
627 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
628 goto clear_hash;
629 if (tcp_md5_hash_key(hp, key))
630 goto clear_hash;
631 ahash_request_set_crypt(req, NULL, md5_hash, 0);
632 if (crypto_ahash_final(req))
633 goto clear_hash;
634
635 tcp_put_md5sig_pool();
636 return 0;
637
638 clear_hash:
639 tcp_put_md5sig_pool();
640 clear_hash_noput:
641 memset(md5_hash, 0, 16);
642 return 1;
643 }
644
645 #endif
646
647 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
648 const struct sk_buff *skb)
649 {
650 #ifdef CONFIG_TCP_MD5SIG
651 const __u8 *hash_location = NULL;
652 struct tcp_md5sig_key *hash_expected;
653 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
654 const struct tcphdr *th = tcp_hdr(skb);
655 int genhash;
656 u8 newhash[16];
657
658 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
659 hash_location = tcp_parse_md5sig_option(th);
660
661 /* We've parsed the options - do we have a hash? */
662 if (!hash_expected && !hash_location)
663 return false;
664
665 if (hash_expected && !hash_location) {
666 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
667 return true;
668 }
669
670 if (!hash_expected && hash_location) {
671 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
672 return true;
673 }
674
675 /* check the signature */
676 genhash = tcp_v6_md5_hash_skb(newhash,
677 hash_expected,
678 NULL, skb);
679
680 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
681 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
682 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
683 genhash ? "failed" : "mismatch",
684 &ip6h->saddr, ntohs(th->source),
685 &ip6h->daddr, ntohs(th->dest));
686 return true;
687 }
688 #endif
689 return false;
690 }
691
692 static void tcp_v6_init_req(struct request_sock *req,
693 const struct sock *sk_listener,
694 struct sk_buff *skb)
695 {
696 struct inet_request_sock *ireq = inet_rsk(req);
697 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
698
699 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
700 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
701
702 /* So that link locals have meaning */
703 if (!sk_listener->sk_bound_dev_if &&
704 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
705 ireq->ir_iif = tcp_v6_iif(skb);
706
707 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
708 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
709 np->rxopt.bits.rxinfo ||
710 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
711 np->rxopt.bits.rxohlim || np->repflow)) {
712 atomic_inc(&skb->users);
713 ireq->pktopts = skb;
714 }
715 }
716
717 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
718 struct flowi *fl,
719 const struct request_sock *req,
720 bool *strict)
721 {
722 if (strict)
723 *strict = true;
724 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
725 }
726
727 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
728 .family = AF_INET6,
729 .obj_size = sizeof(struct tcp6_request_sock),
730 .rtx_syn_ack = tcp_rtx_synack,
731 .send_ack = tcp_v6_reqsk_send_ack,
732 .destructor = tcp_v6_reqsk_destructor,
733 .send_reset = tcp_v6_send_reset,
734 .syn_ack_timeout = tcp_syn_ack_timeout,
735 };
736
737 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
738 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
739 sizeof(struct ipv6hdr),
740 #ifdef CONFIG_TCP_MD5SIG
741 .req_md5_lookup = tcp_v6_md5_lookup,
742 .calc_md5_hash = tcp_v6_md5_hash_skb,
743 #endif
744 .init_req = tcp_v6_init_req,
745 #ifdef CONFIG_SYN_COOKIES
746 .cookie_init_seq = cookie_v6_init_sequence,
747 #endif
748 .route_req = tcp_v6_route_req,
749 .init_seq = tcp_v6_init_sequence,
750 .send_synack = tcp_v6_send_synack,
751 };
752
753 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
754 u32 ack, u32 win, u32 tsval, u32 tsecr,
755 int oif, struct tcp_md5sig_key *key, int rst,
756 u8 tclass, __be32 label)
757 {
758 const struct tcphdr *th = tcp_hdr(skb);
759 struct tcphdr *t1;
760 struct sk_buff *buff;
761 struct flowi6 fl6;
762 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
763 struct sock *ctl_sk = net->ipv6.tcp_sk;
764 unsigned int tot_len = sizeof(struct tcphdr);
765 struct dst_entry *dst;
766 __be32 *topt;
767
768 if (tsecr)
769 tot_len += TCPOLEN_TSTAMP_ALIGNED;
770 #ifdef CONFIG_TCP_MD5SIG
771 if (key)
772 tot_len += TCPOLEN_MD5SIG_ALIGNED;
773 #endif
774
775 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
776 GFP_ATOMIC);
777 if (!buff)
778 return;
779
780 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
781
782 t1 = (struct tcphdr *) skb_push(buff, tot_len);
783 skb_reset_transport_header(buff);
784
785 /* Swap the send and the receive. */
786 memset(t1, 0, sizeof(*t1));
787 t1->dest = th->source;
788 t1->source = th->dest;
789 t1->doff = tot_len / 4;
790 t1->seq = htonl(seq);
791 t1->ack_seq = htonl(ack);
792 t1->ack = !rst || !th->ack;
793 t1->rst = rst;
794 t1->window = htons(win);
795
796 topt = (__be32 *)(t1 + 1);
797
798 if (tsecr) {
799 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
800 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
801 *topt++ = htonl(tsval);
802 *topt++ = htonl(tsecr);
803 }
804
805 #ifdef CONFIG_TCP_MD5SIG
806 if (key) {
807 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
808 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
809 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
810 &ipv6_hdr(skb)->saddr,
811 &ipv6_hdr(skb)->daddr, t1);
812 }
813 #endif
814
815 memset(&fl6, 0, sizeof(fl6));
816 fl6.daddr = ipv6_hdr(skb)->saddr;
817 fl6.saddr = ipv6_hdr(skb)->daddr;
818 fl6.flowlabel = label;
819
820 buff->ip_summed = CHECKSUM_PARTIAL;
821 buff->csum = 0;
822
823 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
824
825 fl6.flowi6_proto = IPPROTO_TCP;
826 if (rt6_need_strict(&fl6.daddr) && !oif)
827 fl6.flowi6_oif = tcp_v6_iif(skb);
828 else {
829 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
830 oif = skb->skb_iif;
831
832 fl6.flowi6_oif = oif;
833 }
834
835 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
836 fl6.fl6_dport = t1->dest;
837 fl6.fl6_sport = t1->source;
838 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
839 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
840
841 /* Pass a socket to ip6_dst_lookup either it is for RST
842 * Underlying function will use this to retrieve the network
843 * namespace
844 */
845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
846 if (!IS_ERR(dst)) {
847 skb_dst_set(buff, dst);
848 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
850 if (rst)
851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
852 return;
853 }
854
855 kfree_skb(buff);
856 }
857
858 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
859 {
860 const struct tcphdr *th = tcp_hdr(skb);
861 u32 seq = 0, ack_seq = 0;
862 struct tcp_md5sig_key *key = NULL;
863 #ifdef CONFIG_TCP_MD5SIG
864 const __u8 *hash_location = NULL;
865 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
866 unsigned char newhash[16];
867 int genhash;
868 struct sock *sk1 = NULL;
869 #endif
870 int oif;
871
872 if (th->rst)
873 return;
874
875 /* If sk not NULL, it means we did a successful lookup and incoming
876 * route had to be correct. prequeue might have dropped our dst.
877 */
878 if (!sk && !ipv6_unicast_destination(skb))
879 return;
880
881 #ifdef CONFIG_TCP_MD5SIG
882 rcu_read_lock();
883 hash_location = tcp_parse_md5sig_option(th);
884 if (sk && sk_fullsock(sk)) {
885 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
886 } else if (hash_location) {
887 /*
888 * active side is lost. Try to find listening socket through
889 * source port, and then find md5 key through listening socket.
890 * we are not loose security here:
891 * Incoming packet is checked with md5 hash with finding key,
892 * no RST generated if md5 hash doesn't match.
893 */
894 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
895 &tcp_hashinfo, NULL, 0,
896 &ipv6h->saddr,
897 th->source, &ipv6h->daddr,
898 ntohs(th->source), tcp_v6_iif(skb));
899 if (!sk1)
900 goto out;
901
902 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
903 if (!key)
904 goto out;
905
906 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
907 if (genhash || memcmp(hash_location, newhash, 16) != 0)
908 goto out;
909 }
910 #endif
911
912 if (th->ack)
913 seq = ntohl(th->ack_seq);
914 else
915 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
916 (th->doff << 2);
917
918 oif = sk ? sk->sk_bound_dev_if : 0;
919 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
920
921 #ifdef CONFIG_TCP_MD5SIG
922 out:
923 rcu_read_unlock();
924 #endif
925 }
926
927 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
928 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
929 struct tcp_md5sig_key *key, u8 tclass,
930 __be32 label)
931 {
932 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
933 tclass, label);
934 }
935
936 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
937 {
938 struct inet_timewait_sock *tw = inet_twsk(sk);
939 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
940
941 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
942 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
943 tcp_time_stamp + tcptw->tw_ts_offset,
944 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
945 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
946
947 inet_twsk_put(tw);
948 }
949
950 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
951 struct request_sock *req)
952 {
953 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
954 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
955 */
956 /* RFC 7323 2.3
957 * The window field (SEG.WND) of every outgoing segment, with the
958 * exception of <SYN> segments, MUST be right-shifted by
959 * Rcv.Wind.Shift bits:
960 */
961 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
962 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
963 tcp_rsk(req)->rcv_nxt,
964 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
965 tcp_time_stamp + tcp_rsk(req)->ts_off,
966 req->ts_recent, sk->sk_bound_dev_if,
967 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
968 0, 0);
969 }
970
971
972 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
973 {
974 #ifdef CONFIG_SYN_COOKIES
975 const struct tcphdr *th = tcp_hdr(skb);
976
977 if (!th->syn)
978 sk = cookie_v6_check(sk, skb);
979 #endif
980 return sk;
981 }
982
983 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
984 {
985 if (skb->protocol == htons(ETH_P_IP))
986 return tcp_v4_conn_request(sk, skb);
987
988 if (!ipv6_unicast_destination(skb))
989 goto drop;
990
991 return tcp_conn_request(&tcp6_request_sock_ops,
992 &tcp_request_sock_ipv6_ops, sk, skb);
993
994 drop:
995 tcp_listendrop(sk);
996 return 0; /* don't send reset */
997 }
998
999 static void tcp_v6_restore_cb(struct sk_buff *skb)
1000 {
1001 /* We need to move header back to the beginning if xfrm6_policy_check()
1002 * and tcp_v6_fill_cb() are going to be called again.
1003 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1004 */
1005 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1006 sizeof(struct inet6_skb_parm));
1007 }
1008
1009 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1010 struct request_sock *req,
1011 struct dst_entry *dst,
1012 struct request_sock *req_unhash,
1013 bool *own_req)
1014 {
1015 struct inet_request_sock *ireq;
1016 struct ipv6_pinfo *newnp;
1017 const struct ipv6_pinfo *np = inet6_sk(sk);
1018 struct ipv6_txoptions *opt;
1019 struct tcp6_sock *newtcp6sk;
1020 struct inet_sock *newinet;
1021 struct tcp_sock *newtp;
1022 struct sock *newsk;
1023 #ifdef CONFIG_TCP_MD5SIG
1024 struct tcp_md5sig_key *key;
1025 #endif
1026 struct flowi6 fl6;
1027
1028 if (skb->protocol == htons(ETH_P_IP)) {
1029 /*
1030 * v6 mapped
1031 */
1032
1033 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1034 req_unhash, own_req);
1035
1036 if (!newsk)
1037 return NULL;
1038
1039 newtcp6sk = (struct tcp6_sock *)newsk;
1040 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1041
1042 newinet = inet_sk(newsk);
1043 newnp = inet6_sk(newsk);
1044 newtp = tcp_sk(newsk);
1045
1046 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1047
1048 newnp->saddr = newsk->sk_v6_rcv_saddr;
1049
1050 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1051 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1052 #ifdef CONFIG_TCP_MD5SIG
1053 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1054 #endif
1055
1056 newnp->ipv6_ac_list = NULL;
1057 newnp->ipv6_fl_list = NULL;
1058 newnp->pktoptions = NULL;
1059 newnp->opt = NULL;
1060 newnp->mcast_oif = tcp_v6_iif(skb);
1061 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1062 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1063 if (np->repflow)
1064 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1065
1066 /*
1067 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1068 * here, tcp_create_openreq_child now does this for us, see the comment in
1069 * that function for the gory details. -acme
1070 */
1071
1072 /* It is tricky place. Until this moment IPv4 tcp
1073 worked with IPv6 icsk.icsk_af_ops.
1074 Sync it now.
1075 */
1076 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1077
1078 return newsk;
1079 }
1080
1081 ireq = inet_rsk(req);
1082
1083 if (sk_acceptq_is_full(sk))
1084 goto out_overflow;
1085
1086 if (!dst) {
1087 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1088 if (!dst)
1089 goto out;
1090 }
1091
1092 newsk = tcp_create_openreq_child(sk, req, skb);
1093 if (!newsk)
1094 goto out_nonewsk;
1095
1096 /*
1097 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1098 * count here, tcp_create_openreq_child now does this for us, see the
1099 * comment in that function for the gory details. -acme
1100 */
1101
1102 newsk->sk_gso_type = SKB_GSO_TCPV6;
1103 ip6_dst_store(newsk, dst, NULL, NULL);
1104 inet6_sk_rx_dst_set(newsk, skb);
1105
1106 newtcp6sk = (struct tcp6_sock *)newsk;
1107 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1108
1109 newtp = tcp_sk(newsk);
1110 newinet = inet_sk(newsk);
1111 newnp = inet6_sk(newsk);
1112
1113 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1114
1115 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1116 newnp->saddr = ireq->ir_v6_loc_addr;
1117 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1118 newsk->sk_bound_dev_if = ireq->ir_iif;
1119
1120 /* Now IPv6 options...
1121
1122 First: no IPv4 options.
1123 */
1124 newinet->inet_opt = NULL;
1125 newnp->ipv6_ac_list = NULL;
1126 newnp->ipv6_fl_list = NULL;
1127
1128 /* Clone RX bits */
1129 newnp->rxopt.all = np->rxopt.all;
1130
1131 newnp->pktoptions = NULL;
1132 newnp->opt = NULL;
1133 newnp->mcast_oif = tcp_v6_iif(skb);
1134 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1135 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1136 if (np->repflow)
1137 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1138
1139 /* Clone native IPv6 options from listening socket (if any)
1140
1141 Yes, keeping reference count would be much more clever,
1142 but we make one more one thing there: reattach optmem
1143 to newsk.
1144 */
1145 opt = ireq->ipv6_opt;
1146 if (!opt)
1147 opt = rcu_dereference(np->opt);
1148 if (opt) {
1149 opt = ipv6_dup_options(newsk, opt);
1150 RCU_INIT_POINTER(newnp->opt, opt);
1151 }
1152 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1153 if (opt)
1154 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1155 opt->opt_flen;
1156
1157 tcp_ca_openreq_child(newsk, dst);
1158
1159 tcp_sync_mss(newsk, dst_mtu(dst));
1160 newtp->advmss = dst_metric_advmss(dst);
1161 if (tcp_sk(sk)->rx_opt.user_mss &&
1162 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1163 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1164
1165 tcp_initialize_rcv_mss(newsk);
1166
1167 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1168 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1169
1170 #ifdef CONFIG_TCP_MD5SIG
1171 /* Copy over the MD5 key from the original socket */
1172 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1173 if (key) {
1174 /* We're using one, so create a matching key
1175 * on the newsk structure. If we fail to get
1176 * memory, then we end up not copying the key
1177 * across. Shucks.
1178 */
1179 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1180 AF_INET6, key->key, key->keylen,
1181 sk_gfp_mask(sk, GFP_ATOMIC));
1182 }
1183 #endif
1184
1185 if (__inet_inherit_port(sk, newsk) < 0) {
1186 inet_csk_prepare_forced_close(newsk);
1187 tcp_done(newsk);
1188 goto out;
1189 }
1190 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1191 if (*own_req) {
1192 tcp_move_syn(newtp, req);
1193
1194 /* Clone pktoptions received with SYN, if we own the req */
1195 if (ireq->pktopts) {
1196 newnp->pktoptions = skb_clone(ireq->pktopts,
1197 sk_gfp_mask(sk, GFP_ATOMIC));
1198 consume_skb(ireq->pktopts);
1199 ireq->pktopts = NULL;
1200 if (newnp->pktoptions) {
1201 tcp_v6_restore_cb(newnp->pktoptions);
1202 skb_set_owner_r(newnp->pktoptions, newsk);
1203 }
1204 }
1205 }
1206
1207 return newsk;
1208
1209 out_overflow:
1210 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1211 out_nonewsk:
1212 dst_release(dst);
1213 out:
1214 tcp_listendrop(sk);
1215 return NULL;
1216 }
1217
1218 /* The socket must have it's spinlock held when we get
1219 * here, unless it is a TCP_LISTEN socket.
1220 *
1221 * We have a potential double-lock case here, so even when
1222 * doing backlog processing we use the BH locking scheme.
1223 * This is because we cannot sleep with the original spinlock
1224 * held.
1225 */
1226 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1227 {
1228 struct ipv6_pinfo *np = inet6_sk(sk);
1229 struct tcp_sock *tp;
1230 struct sk_buff *opt_skb = NULL;
1231
1232 /* Imagine: socket is IPv6. IPv4 packet arrives,
1233 goes to IPv4 receive handler and backlogged.
1234 From backlog it always goes here. Kerboom...
1235 Fortunately, tcp_rcv_established and rcv_established
1236 handle them correctly, but it is not case with
1237 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1238 */
1239
1240 if (skb->protocol == htons(ETH_P_IP))
1241 return tcp_v4_do_rcv(sk, skb);
1242
1243 if (tcp_filter(sk, skb))
1244 goto discard;
1245
1246 /*
1247 * socket locking is here for SMP purposes as backlog rcv
1248 * is currently called with bh processing disabled.
1249 */
1250
1251 /* Do Stevens' IPV6_PKTOPTIONS.
1252
1253 Yes, guys, it is the only place in our code, where we
1254 may make it not affecting IPv4.
1255 The rest of code is protocol independent,
1256 and I do not like idea to uglify IPv4.
1257
1258 Actually, all the idea behind IPV6_PKTOPTIONS
1259 looks not very well thought. For now we latch
1260 options, received in the last packet, enqueued
1261 by tcp. Feel free to propose better solution.
1262 --ANK (980728)
1263 */
1264 if (np->rxopt.all)
1265 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1266
1267 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1268 struct dst_entry *dst = sk->sk_rx_dst;
1269
1270 sock_rps_save_rxhash(sk, skb);
1271 sk_mark_napi_id(sk, skb);
1272 if (dst) {
1273 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1274 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1275 dst_release(dst);
1276 sk->sk_rx_dst = NULL;
1277 }
1278 }
1279
1280 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1281 if (opt_skb)
1282 goto ipv6_pktoptions;
1283 return 0;
1284 }
1285
1286 if (tcp_checksum_complete(skb))
1287 goto csum_err;
1288
1289 if (sk->sk_state == TCP_LISTEN) {
1290 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1291
1292 if (!nsk)
1293 goto discard;
1294
1295 if (nsk != sk) {
1296 sock_rps_save_rxhash(nsk, skb);
1297 sk_mark_napi_id(nsk, skb);
1298 if (tcp_child_process(sk, nsk, skb))
1299 goto reset;
1300 if (opt_skb)
1301 __kfree_skb(opt_skb);
1302 return 0;
1303 }
1304 } else
1305 sock_rps_save_rxhash(sk, skb);
1306
1307 if (tcp_rcv_state_process(sk, skb))
1308 goto reset;
1309 if (opt_skb)
1310 goto ipv6_pktoptions;
1311 return 0;
1312
1313 reset:
1314 tcp_v6_send_reset(sk, skb);
1315 discard:
1316 if (opt_skb)
1317 __kfree_skb(opt_skb);
1318 kfree_skb(skb);
1319 return 0;
1320 csum_err:
1321 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1322 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1323 goto discard;
1324
1325
1326 ipv6_pktoptions:
1327 /* Do you ask, what is it?
1328
1329 1. skb was enqueued by tcp.
1330 2. skb is added to tail of read queue, rather than out of order.
1331 3. socket is not in passive state.
1332 4. Finally, it really contains options, which user wants to receive.
1333 */
1334 tp = tcp_sk(sk);
1335 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1336 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1337 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1338 np->mcast_oif = tcp_v6_iif(opt_skb);
1339 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1340 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1341 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1342 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1343 if (np->repflow)
1344 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1345 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1346 skb_set_owner_r(opt_skb, sk);
1347 tcp_v6_restore_cb(opt_skb);
1348 opt_skb = xchg(&np->pktoptions, opt_skb);
1349 } else {
1350 __kfree_skb(opt_skb);
1351 opt_skb = xchg(&np->pktoptions, NULL);
1352 }
1353 }
1354
1355 kfree_skb(opt_skb);
1356 return 0;
1357 }
1358
1359 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1360 const struct tcphdr *th)
1361 {
1362 /* This is tricky: we move IP6CB at its correct location into
1363 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1364 * _decode_session6() uses IP6CB().
1365 * barrier() makes sure compiler won't play aliasing games.
1366 */
1367 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1368 sizeof(struct inet6_skb_parm));
1369 barrier();
1370
1371 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1372 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1373 skb->len - th->doff*4);
1374 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1375 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1376 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1377 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1378 TCP_SKB_CB(skb)->sacked = 0;
1379 }
1380
1381 static int tcp_v6_rcv(struct sk_buff *skb)
1382 {
1383 const struct tcphdr *th;
1384 const struct ipv6hdr *hdr;
1385 bool refcounted;
1386 struct sock *sk;
1387 int ret;
1388 struct net *net = dev_net(skb->dev);
1389
1390 if (skb->pkt_type != PACKET_HOST)
1391 goto discard_it;
1392
1393 /*
1394 * Count it even if it's bad.
1395 */
1396 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1397
1398 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1399 goto discard_it;
1400
1401 th = (const struct tcphdr *)skb->data;
1402
1403 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1404 goto bad_packet;
1405 if (!pskb_may_pull(skb, th->doff*4))
1406 goto discard_it;
1407
1408 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1409 goto csum_error;
1410
1411 th = (const struct tcphdr *)skb->data;
1412 hdr = ipv6_hdr(skb);
1413
1414 lookup:
1415 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1416 th->source, th->dest, inet6_iif(skb),
1417 &refcounted);
1418 if (!sk)
1419 goto no_tcp_socket;
1420
1421 process:
1422 if (sk->sk_state == TCP_TIME_WAIT)
1423 goto do_time_wait;
1424
1425 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1426 struct request_sock *req = inet_reqsk(sk);
1427 struct sock *nsk;
1428
1429 sk = req->rsk_listener;
1430 tcp_v6_fill_cb(skb, hdr, th);
1431 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1432 sk_drops_add(sk, skb);
1433 reqsk_put(req);
1434 goto discard_it;
1435 }
1436 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1437 inet_csk_reqsk_queue_drop_and_put(sk, req);
1438 goto lookup;
1439 }
1440 sock_hold(sk);
1441 refcounted = true;
1442 nsk = tcp_check_req(sk, skb, req, false);
1443 if (!nsk) {
1444 reqsk_put(req);
1445 goto discard_and_relse;
1446 }
1447 if (nsk == sk) {
1448 reqsk_put(req);
1449 tcp_v6_restore_cb(skb);
1450 } else if (tcp_child_process(sk, nsk, skb)) {
1451 tcp_v6_send_reset(nsk, skb);
1452 goto discard_and_relse;
1453 } else {
1454 sock_put(sk);
1455 return 0;
1456 }
1457 }
1458 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1459 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1460 goto discard_and_relse;
1461 }
1462
1463 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1464 goto discard_and_relse;
1465
1466 tcp_v6_fill_cb(skb, hdr, th);
1467
1468 if (tcp_v6_inbound_md5_hash(sk, skb))
1469 goto discard_and_relse;
1470
1471 if (tcp_filter(sk, skb))
1472 goto discard_and_relse;
1473 th = (const struct tcphdr *)skb->data;
1474 hdr = ipv6_hdr(skb);
1475
1476 skb->dev = NULL;
1477
1478 if (sk->sk_state == TCP_LISTEN) {
1479 ret = tcp_v6_do_rcv(sk, skb);
1480 goto put_and_return;
1481 }
1482
1483 sk_incoming_cpu_update(sk);
1484
1485 bh_lock_sock_nested(sk);
1486 tcp_segs_in(tcp_sk(sk), skb);
1487 ret = 0;
1488 if (!sock_owned_by_user(sk)) {
1489 if (!tcp_prequeue(sk, skb))
1490 ret = tcp_v6_do_rcv(sk, skb);
1491 } else if (tcp_add_backlog(sk, skb)) {
1492 goto discard_and_relse;
1493 }
1494 bh_unlock_sock(sk);
1495
1496 put_and_return:
1497 if (refcounted)
1498 sock_put(sk);
1499 return ret ? -1 : 0;
1500
1501 no_tcp_socket:
1502 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1503 goto discard_it;
1504
1505 tcp_v6_fill_cb(skb, hdr, th);
1506
1507 if (tcp_checksum_complete(skb)) {
1508 csum_error:
1509 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1510 bad_packet:
1511 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1512 } else {
1513 tcp_v6_send_reset(NULL, skb);
1514 }
1515
1516 discard_it:
1517 kfree_skb(skb);
1518 return 0;
1519
1520 discard_and_relse:
1521 sk_drops_add(sk, skb);
1522 if (refcounted)
1523 sock_put(sk);
1524 goto discard_it;
1525
1526 do_time_wait:
1527 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1528 inet_twsk_put(inet_twsk(sk));
1529 goto discard_it;
1530 }
1531
1532 tcp_v6_fill_cb(skb, hdr, th);
1533
1534 if (tcp_checksum_complete(skb)) {
1535 inet_twsk_put(inet_twsk(sk));
1536 goto csum_error;
1537 }
1538
1539 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1540 case TCP_TW_SYN:
1541 {
1542 struct sock *sk2;
1543
1544 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1545 skb, __tcp_hdrlen(th),
1546 &ipv6_hdr(skb)->saddr, th->source,
1547 &ipv6_hdr(skb)->daddr,
1548 ntohs(th->dest), tcp_v6_iif(skb));
1549 if (sk2) {
1550 struct inet_timewait_sock *tw = inet_twsk(sk);
1551 inet_twsk_deschedule_put(tw);
1552 sk = sk2;
1553 tcp_v6_restore_cb(skb);
1554 refcounted = false;
1555 goto process;
1556 }
1557 /* Fall through to ACK */
1558 }
1559 case TCP_TW_ACK:
1560 tcp_v6_timewait_ack(sk, skb);
1561 break;
1562 case TCP_TW_RST:
1563 tcp_v6_restore_cb(skb);
1564 tcp_v6_send_reset(sk, skb);
1565 inet_twsk_deschedule_put(inet_twsk(sk));
1566 goto discard_it;
1567 case TCP_TW_SUCCESS:
1568 ;
1569 }
1570 goto discard_it;
1571 }
1572
1573 static void tcp_v6_early_demux(struct sk_buff *skb)
1574 {
1575 const struct ipv6hdr *hdr;
1576 const struct tcphdr *th;
1577 struct sock *sk;
1578
1579 if (skb->pkt_type != PACKET_HOST)
1580 return;
1581
1582 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1583 return;
1584
1585 hdr = ipv6_hdr(skb);
1586 th = tcp_hdr(skb);
1587
1588 if (th->doff < sizeof(struct tcphdr) / 4)
1589 return;
1590
1591 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1592 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1593 &hdr->saddr, th->source,
1594 &hdr->daddr, ntohs(th->dest),
1595 inet6_iif(skb));
1596 if (sk) {
1597 skb->sk = sk;
1598 skb->destructor = sock_edemux;
1599 if (sk_fullsock(sk)) {
1600 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1601
1602 if (dst)
1603 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1604 if (dst &&
1605 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1606 skb_dst_set_noref(skb, dst);
1607 }
1608 }
1609 }
1610
1611 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1612 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1613 .twsk_unique = tcp_twsk_unique,
1614 .twsk_destructor = tcp_twsk_destructor,
1615 };
1616
1617 static const struct inet_connection_sock_af_ops ipv6_specific = {
1618 .queue_xmit = inet6_csk_xmit,
1619 .send_check = tcp_v6_send_check,
1620 .rebuild_header = inet6_sk_rebuild_header,
1621 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1622 .conn_request = tcp_v6_conn_request,
1623 .syn_recv_sock = tcp_v6_syn_recv_sock,
1624 .net_header_len = sizeof(struct ipv6hdr),
1625 .net_frag_header_len = sizeof(struct frag_hdr),
1626 .setsockopt = ipv6_setsockopt,
1627 .getsockopt = ipv6_getsockopt,
1628 .addr2sockaddr = inet6_csk_addr2sockaddr,
1629 .sockaddr_len = sizeof(struct sockaddr_in6),
1630 .bind_conflict = inet6_csk_bind_conflict,
1631 #ifdef CONFIG_COMPAT
1632 .compat_setsockopt = compat_ipv6_setsockopt,
1633 .compat_getsockopt = compat_ipv6_getsockopt,
1634 #endif
1635 .mtu_reduced = tcp_v6_mtu_reduced,
1636 };
1637
1638 #ifdef CONFIG_TCP_MD5SIG
1639 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1640 .md5_lookup = tcp_v6_md5_lookup,
1641 .calc_md5_hash = tcp_v6_md5_hash_skb,
1642 .md5_parse = tcp_v6_parse_md5_keys,
1643 };
1644 #endif
1645
1646 /*
1647 * TCP over IPv4 via INET6 API
1648 */
1649 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1650 .queue_xmit = ip_queue_xmit,
1651 .send_check = tcp_v4_send_check,
1652 .rebuild_header = inet_sk_rebuild_header,
1653 .sk_rx_dst_set = inet_sk_rx_dst_set,
1654 .conn_request = tcp_v6_conn_request,
1655 .syn_recv_sock = tcp_v6_syn_recv_sock,
1656 .net_header_len = sizeof(struct iphdr),
1657 .setsockopt = ipv6_setsockopt,
1658 .getsockopt = ipv6_getsockopt,
1659 .addr2sockaddr = inet6_csk_addr2sockaddr,
1660 .sockaddr_len = sizeof(struct sockaddr_in6),
1661 .bind_conflict = inet6_csk_bind_conflict,
1662 #ifdef CONFIG_COMPAT
1663 .compat_setsockopt = compat_ipv6_setsockopt,
1664 .compat_getsockopt = compat_ipv6_getsockopt,
1665 #endif
1666 .mtu_reduced = tcp_v4_mtu_reduced,
1667 };
1668
1669 #ifdef CONFIG_TCP_MD5SIG
1670 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1671 .md5_lookup = tcp_v4_md5_lookup,
1672 .calc_md5_hash = tcp_v4_md5_hash_skb,
1673 .md5_parse = tcp_v6_parse_md5_keys,
1674 };
1675 #endif
1676
1677 /* NOTE: A lot of things set to zero explicitly by call to
1678 * sk_alloc() so need not be done here.
1679 */
1680 static int tcp_v6_init_sock(struct sock *sk)
1681 {
1682 struct inet_connection_sock *icsk = inet_csk(sk);
1683
1684 tcp_init_sock(sk);
1685
1686 icsk->icsk_af_ops = &ipv6_specific;
1687
1688 #ifdef CONFIG_TCP_MD5SIG
1689 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1690 #endif
1691
1692 return 0;
1693 }
1694
1695 static void tcp_v6_destroy_sock(struct sock *sk)
1696 {
1697 tcp_v4_destroy_sock(sk);
1698 inet6_destroy_sock(sk);
1699 }
1700
1701 #ifdef CONFIG_PROC_FS
1702 /* Proc filesystem TCPv6 sock list dumping. */
1703 static void get_openreq6(struct seq_file *seq,
1704 const struct request_sock *req, int i)
1705 {
1706 long ttd = req->rsk_timer.expires - jiffies;
1707 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1708 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1709
1710 if (ttd < 0)
1711 ttd = 0;
1712
1713 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1716 i,
1717 src->s6_addr32[0], src->s6_addr32[1],
1718 src->s6_addr32[2], src->s6_addr32[3],
1719 inet_rsk(req)->ir_num,
1720 dest->s6_addr32[0], dest->s6_addr32[1],
1721 dest->s6_addr32[2], dest->s6_addr32[3],
1722 ntohs(inet_rsk(req)->ir_rmt_port),
1723 TCP_SYN_RECV,
1724 0, 0, /* could print option size, but that is af dependent. */
1725 1, /* timers active (only the expire timer) */
1726 jiffies_to_clock_t(ttd),
1727 req->num_timeout,
1728 from_kuid_munged(seq_user_ns(seq),
1729 sock_i_uid(req->rsk_listener)),
1730 0, /* non standard timer */
1731 0, /* open_requests have no inode */
1732 0, req);
1733 }
1734
1735 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1736 {
1737 const struct in6_addr *dest, *src;
1738 __u16 destp, srcp;
1739 int timer_active;
1740 unsigned long timer_expires;
1741 const struct inet_sock *inet = inet_sk(sp);
1742 const struct tcp_sock *tp = tcp_sk(sp);
1743 const struct inet_connection_sock *icsk = inet_csk(sp);
1744 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1745 int rx_queue;
1746 int state;
1747
1748 dest = &sp->sk_v6_daddr;
1749 src = &sp->sk_v6_rcv_saddr;
1750 destp = ntohs(inet->inet_dport);
1751 srcp = ntohs(inet->inet_sport);
1752
1753 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1754 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1755 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1756 timer_active = 1;
1757 timer_expires = icsk->icsk_timeout;
1758 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1759 timer_active = 4;
1760 timer_expires = icsk->icsk_timeout;
1761 } else if (timer_pending(&sp->sk_timer)) {
1762 timer_active = 2;
1763 timer_expires = sp->sk_timer.expires;
1764 } else {
1765 timer_active = 0;
1766 timer_expires = jiffies;
1767 }
1768
1769 state = sk_state_load(sp);
1770 if (state == TCP_LISTEN)
1771 rx_queue = sp->sk_ack_backlog;
1772 else
1773 /* Because we don't lock the socket,
1774 * we might find a transient negative value.
1775 */
1776 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1777
1778 seq_printf(seq,
1779 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1780 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1781 i,
1782 src->s6_addr32[0], src->s6_addr32[1],
1783 src->s6_addr32[2], src->s6_addr32[3], srcp,
1784 dest->s6_addr32[0], dest->s6_addr32[1],
1785 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1786 state,
1787 tp->write_seq - tp->snd_una,
1788 rx_queue,
1789 timer_active,
1790 jiffies_delta_to_clock_t(timer_expires - jiffies),
1791 icsk->icsk_retransmits,
1792 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1793 icsk->icsk_probes_out,
1794 sock_i_ino(sp),
1795 atomic_read(&sp->sk_refcnt), sp,
1796 jiffies_to_clock_t(icsk->icsk_rto),
1797 jiffies_to_clock_t(icsk->icsk_ack.ato),
1798 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1799 tp->snd_cwnd,
1800 state == TCP_LISTEN ?
1801 fastopenq->max_qlen :
1802 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1803 );
1804 }
1805
1806 static void get_timewait6_sock(struct seq_file *seq,
1807 struct inet_timewait_sock *tw, int i)
1808 {
1809 long delta = tw->tw_timer.expires - jiffies;
1810 const struct in6_addr *dest, *src;
1811 __u16 destp, srcp;
1812
1813 dest = &tw->tw_v6_daddr;
1814 src = &tw->tw_v6_rcv_saddr;
1815 destp = ntohs(tw->tw_dport);
1816 srcp = ntohs(tw->tw_sport);
1817
1818 seq_printf(seq,
1819 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1820 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1821 i,
1822 src->s6_addr32[0], src->s6_addr32[1],
1823 src->s6_addr32[2], src->s6_addr32[3], srcp,
1824 dest->s6_addr32[0], dest->s6_addr32[1],
1825 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1826 tw->tw_substate, 0, 0,
1827 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1828 atomic_read(&tw->tw_refcnt), tw);
1829 }
1830
1831 static int tcp6_seq_show(struct seq_file *seq, void *v)
1832 {
1833 struct tcp_iter_state *st;
1834 struct sock *sk = v;
1835
1836 if (v == SEQ_START_TOKEN) {
1837 seq_puts(seq,
1838 " sl "
1839 "local_address "
1840 "remote_address "
1841 "st tx_queue rx_queue tr tm->when retrnsmt"
1842 " uid timeout inode\n");
1843 goto out;
1844 }
1845 st = seq->private;
1846
1847 if (sk->sk_state == TCP_TIME_WAIT)
1848 get_timewait6_sock(seq, v, st->num);
1849 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1850 get_openreq6(seq, v, st->num);
1851 else
1852 get_tcp6_sock(seq, v, st->num);
1853 out:
1854 return 0;
1855 }
1856
1857 static const struct file_operations tcp6_afinfo_seq_fops = {
1858 .owner = THIS_MODULE,
1859 .open = tcp_seq_open,
1860 .read = seq_read,
1861 .llseek = seq_lseek,
1862 .release = seq_release_net
1863 };
1864
1865 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1866 .name = "tcp6",
1867 .family = AF_INET6,
1868 .seq_fops = &tcp6_afinfo_seq_fops,
1869 .seq_ops = {
1870 .show = tcp6_seq_show,
1871 },
1872 };
1873
1874 int __net_init tcp6_proc_init(struct net *net)
1875 {
1876 return tcp_proc_register(net, &tcp6_seq_afinfo);
1877 }
1878
1879 void tcp6_proc_exit(struct net *net)
1880 {
1881 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1882 }
1883 #endif
1884
1885 struct proto tcpv6_prot = {
1886 .name = "TCPv6",
1887 .owner = THIS_MODULE,
1888 .close = tcp_close,
1889 .connect = tcp_v6_connect,
1890 .disconnect = tcp_disconnect,
1891 .accept = inet_csk_accept,
1892 .ioctl = tcp_ioctl,
1893 .init = tcp_v6_init_sock,
1894 .destroy = tcp_v6_destroy_sock,
1895 .shutdown = tcp_shutdown,
1896 .setsockopt = tcp_setsockopt,
1897 .getsockopt = tcp_getsockopt,
1898 .recvmsg = tcp_recvmsg,
1899 .sendmsg = tcp_sendmsg,
1900 .sendpage = tcp_sendpage,
1901 .backlog_rcv = tcp_v6_do_rcv,
1902 .release_cb = tcp_release_cb,
1903 .hash = inet6_hash,
1904 .unhash = inet_unhash,
1905 .get_port = inet_csk_get_port,
1906 .enter_memory_pressure = tcp_enter_memory_pressure,
1907 .stream_memory_free = tcp_stream_memory_free,
1908 .sockets_allocated = &tcp_sockets_allocated,
1909 .memory_allocated = &tcp_memory_allocated,
1910 .memory_pressure = &tcp_memory_pressure,
1911 .orphan_count = &tcp_orphan_count,
1912 .sysctl_mem = sysctl_tcp_mem,
1913 .sysctl_wmem = sysctl_tcp_wmem,
1914 .sysctl_rmem = sysctl_tcp_rmem,
1915 .max_header = MAX_TCP_HEADER,
1916 .obj_size = sizeof(struct tcp6_sock),
1917 .slab_flags = SLAB_DESTROY_BY_RCU,
1918 .twsk_prot = &tcp6_timewait_sock_ops,
1919 .rsk_prot = &tcp6_request_sock_ops,
1920 .h.hashinfo = &tcp_hashinfo,
1921 .no_autobind = true,
1922 #ifdef CONFIG_COMPAT
1923 .compat_setsockopt = compat_tcp_setsockopt,
1924 .compat_getsockopt = compat_tcp_getsockopt,
1925 #endif
1926 .diag_destroy = tcp_abort,
1927 };
1928
1929 static const struct inet6_protocol tcpv6_protocol = {
1930 .early_demux = tcp_v6_early_demux,
1931 .handler = tcp_v6_rcv,
1932 .err_handler = tcp_v6_err,
1933 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1934 };
1935
1936 static struct inet_protosw tcpv6_protosw = {
1937 .type = SOCK_STREAM,
1938 .protocol = IPPROTO_TCP,
1939 .prot = &tcpv6_prot,
1940 .ops = &inet6_stream_ops,
1941 .flags = INET_PROTOSW_PERMANENT |
1942 INET_PROTOSW_ICSK,
1943 };
1944
1945 static int __net_init tcpv6_net_init(struct net *net)
1946 {
1947 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1948 SOCK_RAW, IPPROTO_TCP, net);
1949 }
1950
1951 static void __net_exit tcpv6_net_exit(struct net *net)
1952 {
1953 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1954 }
1955
1956 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1957 {
1958 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1959 }
1960
1961 static struct pernet_operations tcpv6_net_ops = {
1962 .init = tcpv6_net_init,
1963 .exit = tcpv6_net_exit,
1964 .exit_batch = tcpv6_net_exit_batch,
1965 };
1966
1967 int __init tcpv6_init(void)
1968 {
1969 int ret;
1970
1971 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1972 if (ret)
1973 goto out;
1974
1975 /* register inet6 protocol */
1976 ret = inet6_register_protosw(&tcpv6_protosw);
1977 if (ret)
1978 goto out_tcpv6_protocol;
1979
1980 ret = register_pernet_subsys(&tcpv6_net_ops);
1981 if (ret)
1982 goto out_tcpv6_protosw;
1983 out:
1984 return ret;
1985
1986 out_tcpv6_protosw:
1987 inet6_unregister_protosw(&tcpv6_protosw);
1988 out_tcpv6_protocol:
1989 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1990 goto out;
1991 }
1992
1993 void tcpv6_exit(void)
1994 {
1995 unregister_pernet_subsys(&tcpv6_net_ops);
1996 inet6_unregister_protosw(&tcpv6_protosw);
1997 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1998 }