]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/tcp_ipv6.c
MAINTAINERS: Update MAX77802 PMIC entry
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86 {
87 return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102 }
103
104 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
105 {
106 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110 }
111
112 static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
113 {
114 return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32);
116 }
117
118 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
119 int addr_len)
120 {
121 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
122 struct inet_sock *inet = inet_sk(sk);
123 struct inet_connection_sock *icsk = inet_csk(sk);
124 struct ipv6_pinfo *np = inet6_sk(sk);
125 struct tcp_sock *tp = tcp_sk(sk);
126 struct in6_addr *saddr = NULL, *final_p, final;
127 struct ipv6_txoptions *opt;
128 struct flowi6 fl6;
129 struct dst_entry *dst;
130 int addr_type;
131 int err;
132 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
133
134 if (addr_len < SIN6_LEN_RFC2133)
135 return -EINVAL;
136
137 if (usin->sin6_family != AF_INET6)
138 return -EAFNOSUPPORT;
139
140 memset(&fl6, 0, sizeof(fl6));
141
142 if (np->sndflow) {
143 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl6.flowlabel);
145 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
148 if (!flowlabel)
149 return -EINVAL;
150 fl6_sock_release(flowlabel);
151 }
152 }
153
154 /*
155 * connect() to INADDR_ANY means loopback (BSD'ism).
156 */
157
158 if (ipv6_addr_any(&usin->sin6_addr)) {
159 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
161 &usin->sin6_addr);
162 else
163 usin->sin6_addr = in6addr_loopback;
164 }
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if (addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 sk->sk_v6_daddr = usin->sin6_addr;
197 np->flow_label = fl6.flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type & IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231 goto failure;
232 }
233 np->saddr = sk->sk_v6_rcv_saddr;
234
235 return err;
236 }
237
238 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
239 saddr = &sk->sk_v6_rcv_saddr;
240
241 fl6.flowi6_proto = IPPROTO_TCP;
242 fl6.daddr = sk->sk_v6_daddr;
243 fl6.saddr = saddr ? *saddr : np->saddr;
244 fl6.flowi6_oif = sk->sk_bound_dev_if;
245 fl6.flowi6_mark = sk->sk_mark;
246 fl6.fl6_dport = usin->sin6_port;
247 fl6.fl6_sport = inet->inet_sport;
248 fl6.flowi6_uid = sk->sk_uid;
249
250 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
251 final_p = fl6_update_dst(&fl6, opt, &final);
252
253 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
254
255 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
256 if (IS_ERR(dst)) {
257 err = PTR_ERR(dst);
258 goto failure;
259 }
260
261 if (!saddr) {
262 saddr = &fl6.saddr;
263 sk->sk_v6_rcv_saddr = *saddr;
264 }
265
266 /* set the source address */
267 np->saddr = *saddr;
268 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
269
270 sk->sk_gso_type = SKB_GSO_TCPV6;
271 ip6_dst_store(sk, dst, NULL, NULL);
272
273 icsk->icsk_ext_hdr_len = 0;
274 if (opt)
275 icsk->icsk_ext_hdr_len = opt->opt_flen +
276 opt->opt_nflen;
277
278 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
279
280 inet->inet_dport = usin->sin6_port;
281
282 tcp_set_state(sk, TCP_SYN_SENT);
283 err = inet6_hash_connect(tcp_death_row, sk);
284 if (err)
285 goto late_failure;
286
287 sk_set_txhash(sk);
288
289 if (likely(!tp->repair)) {
290 if (!tp->write_seq)
291 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
292 sk->sk_v6_daddr.s6_addr32,
293 inet->inet_sport,
294 inet->inet_dport);
295 tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
296 sk->sk_v6_daddr.s6_addr32);
297 }
298
299 if (tcp_fastopen_defer_connect(sk, &err))
300 return err;
301 if (err)
302 goto late_failure;
303
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310 late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 failure:
313 inet->inet_dport = 0;
314 sk->sk_route_caps = 0;
315 return err;
316 }
317
318 static void tcp_v6_mtu_reduced(struct sock *sk)
319 {
320 struct dst_entry *dst;
321
322 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323 return;
324
325 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326 if (!dst)
327 return;
328
329 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330 tcp_sync_mss(sk, dst_mtu(dst));
331 tcp_simple_retransmit(sk);
332 }
333 }
334
335 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
336 u8 type, u8 code, int offset, __be32 info)
337 {
338 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
339 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
340 struct net *net = dev_net(skb->dev);
341 struct request_sock *fastopen;
342 struct ipv6_pinfo *np;
343 struct tcp_sock *tp;
344 __u32 seq, snd_una;
345 struct sock *sk;
346 bool fatal;
347 int err;
348
349 sk = __inet6_lookup_established(net, &tcp_hashinfo,
350 &hdr->daddr, th->dest,
351 &hdr->saddr, ntohs(th->source),
352 skb->dev->ifindex);
353
354 if (!sk) {
355 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
361 inet_twsk_put(inet_twsk(sk));
362 return;
363 }
364 seq = ntohl(th->seq);
365 fatal = icmpv6_err_convert(type, code, &err);
366 if (sk->sk_state == TCP_NEW_SYN_RECV)
367 return tcp_req_err(sk, seq, fatal);
368
369 bh_lock_sock(sk);
370 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
371 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
372
373 if (sk->sk_state == TCP_CLOSE)
374 goto out;
375
376 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
377 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
378 goto out;
379 }
380
381 tp = tcp_sk(sk);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
385 if (sk->sk_state != TCP_LISTEN &&
386 !between(seq, snd_una, tp->snd_nxt)) {
387 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
388 goto out;
389 }
390
391 np = inet6_sk(sk);
392
393 if (type == NDISC_REDIRECT) {
394 if (!sock_owned_by_user(sk)) {
395 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
396
397 if (dst)
398 dst->ops->redirect(dst, sk, skb);
399 }
400 goto out;
401 }
402
403 if (type == ICMPV6_PKT_TOOBIG) {
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
411 if (!ip6_sk_accept_pmtu(sk))
412 goto out;
413
414 tp->mtu_info = ntohl(info);
415 if (!sock_owned_by_user(sk))
416 tcp_v6_mtu_reduced(sk);
417 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
418 &sk->sk_tsq_flags))
419 sock_hold(sk);
420 goto out;
421 }
422
423
424 /* Might be for an request_sock */
425 switch (sk->sk_state) {
426 case TCP_SYN_SENT:
427 case TCP_SYN_RECV:
428 /* Only in fast or simultaneous open. If a fast open socket is
429 * is already accepted it is treated as a connected one below.
430 */
431 if (fastopen && !fastopen->sk)
432 break;
433
434 if (!sock_owned_by_user(sk)) {
435 sk->sk_err = err;
436 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
437
438 tcp_done(sk);
439 } else
440 sk->sk_err_soft = err;
441 goto out;
442 }
443
444 if (!sock_owned_by_user(sk) && np->recverr) {
445 sk->sk_err = err;
446 sk->sk_error_report(sk);
447 } else
448 sk->sk_err_soft = err;
449
450 out:
451 bh_unlock_sock(sk);
452 sock_put(sk);
453 }
454
455
456 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
457 struct flowi *fl,
458 struct request_sock *req,
459 struct tcp_fastopen_cookie *foc,
460 enum tcp_synack_type synack_type)
461 {
462 struct inet_request_sock *ireq = inet_rsk(req);
463 struct ipv6_pinfo *np = inet6_sk(sk);
464 struct ipv6_txoptions *opt;
465 struct flowi6 *fl6 = &fl->u.ip6;
466 struct sk_buff *skb;
467 int err = -ENOMEM;
468
469 /* First, grab a route. */
470 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
471 IPPROTO_TCP)) == NULL)
472 goto done;
473
474 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
475
476 if (skb) {
477 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
478 &ireq->ir_v6_rmt_addr);
479
480 fl6->daddr = ireq->ir_v6_rmt_addr;
481 if (np->repflow && ireq->pktopts)
482 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
483
484 rcu_read_lock();
485 opt = ireq->ipv6_opt;
486 if (!opt)
487 opt = rcu_dereference(np->opt);
488 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
489 rcu_read_unlock();
490 err = net_xmit_eval(err);
491 }
492
493 done:
494 return err;
495 }
496
497
498 static void tcp_v6_reqsk_destructor(struct request_sock *req)
499 {
500 kfree(inet_rsk(req)->ipv6_opt);
501 kfree_skb(inet_rsk(req)->pktopts);
502 }
503
504 #ifdef CONFIG_TCP_MD5SIG
505 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
506 const struct in6_addr *addr)
507 {
508 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
509 }
510
511 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
512 const struct sock *addr_sk)
513 {
514 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
515 }
516
517 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
518 int optlen)
519 {
520 struct tcp_md5sig cmd;
521 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
522
523 if (optlen < sizeof(cmd))
524 return -EINVAL;
525
526 if (copy_from_user(&cmd, optval, sizeof(cmd)))
527 return -EFAULT;
528
529 if (sin6->sin6_family != AF_INET6)
530 return -EINVAL;
531
532 if (!cmd.tcpm_keylen) {
533 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
534 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
535 AF_INET);
536 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
537 AF_INET6);
538 }
539
540 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
541 return -EINVAL;
542
543 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
544 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
545 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
546
547 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
548 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
549 }
550
551 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
552 const struct in6_addr *daddr,
553 const struct in6_addr *saddr,
554 const struct tcphdr *th, int nbytes)
555 {
556 struct tcp6_pseudohdr *bp;
557 struct scatterlist sg;
558 struct tcphdr *_th;
559
560 bp = hp->scratch;
561 /* 1. TCP pseudo-header (RFC2460) */
562 bp->saddr = *saddr;
563 bp->daddr = *daddr;
564 bp->protocol = cpu_to_be32(IPPROTO_TCP);
565 bp->len = cpu_to_be32(nbytes);
566
567 _th = (struct tcphdr *)(bp + 1);
568 memcpy(_th, th, sizeof(*th));
569 _th->check = 0;
570
571 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
572 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
573 sizeof(*bp) + sizeof(*th));
574 return crypto_ahash_update(hp->md5_req);
575 }
576
577 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
578 const struct in6_addr *daddr, struct in6_addr *saddr,
579 const struct tcphdr *th)
580 {
581 struct tcp_md5sig_pool *hp;
582 struct ahash_request *req;
583
584 hp = tcp_get_md5sig_pool();
585 if (!hp)
586 goto clear_hash_noput;
587 req = hp->md5_req;
588
589 if (crypto_ahash_init(req))
590 goto clear_hash;
591 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
592 goto clear_hash;
593 if (tcp_md5_hash_key(hp, key))
594 goto clear_hash;
595 ahash_request_set_crypt(req, NULL, md5_hash, 0);
596 if (crypto_ahash_final(req))
597 goto clear_hash;
598
599 tcp_put_md5sig_pool();
600 return 0;
601
602 clear_hash:
603 tcp_put_md5sig_pool();
604 clear_hash_noput:
605 memset(md5_hash, 0, 16);
606 return 1;
607 }
608
609 static int tcp_v6_md5_hash_skb(char *md5_hash,
610 const struct tcp_md5sig_key *key,
611 const struct sock *sk,
612 const struct sk_buff *skb)
613 {
614 const struct in6_addr *saddr, *daddr;
615 struct tcp_md5sig_pool *hp;
616 struct ahash_request *req;
617 const struct tcphdr *th = tcp_hdr(skb);
618
619 if (sk) { /* valid for establish/request sockets */
620 saddr = &sk->sk_v6_rcv_saddr;
621 daddr = &sk->sk_v6_daddr;
622 } else {
623 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
624 saddr = &ip6h->saddr;
625 daddr = &ip6h->daddr;
626 }
627
628 hp = tcp_get_md5sig_pool();
629 if (!hp)
630 goto clear_hash_noput;
631 req = hp->md5_req;
632
633 if (crypto_ahash_init(req))
634 goto clear_hash;
635
636 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
637 goto clear_hash;
638 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
639 goto clear_hash;
640 if (tcp_md5_hash_key(hp, key))
641 goto clear_hash;
642 ahash_request_set_crypt(req, NULL, md5_hash, 0);
643 if (crypto_ahash_final(req))
644 goto clear_hash;
645
646 tcp_put_md5sig_pool();
647 return 0;
648
649 clear_hash:
650 tcp_put_md5sig_pool();
651 clear_hash_noput:
652 memset(md5_hash, 0, 16);
653 return 1;
654 }
655
656 #endif
657
658 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
659 const struct sk_buff *skb)
660 {
661 #ifdef CONFIG_TCP_MD5SIG
662 const __u8 *hash_location = NULL;
663 struct tcp_md5sig_key *hash_expected;
664 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
665 const struct tcphdr *th = tcp_hdr(skb);
666 int genhash;
667 u8 newhash[16];
668
669 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
670 hash_location = tcp_parse_md5sig_option(th);
671
672 /* We've parsed the options - do we have a hash? */
673 if (!hash_expected && !hash_location)
674 return false;
675
676 if (hash_expected && !hash_location) {
677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
678 return true;
679 }
680
681 if (!hash_expected && hash_location) {
682 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
683 return true;
684 }
685
686 /* check the signature */
687 genhash = tcp_v6_md5_hash_skb(newhash,
688 hash_expected,
689 NULL, skb);
690
691 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
692 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
693 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
694 genhash ? "failed" : "mismatch",
695 &ip6h->saddr, ntohs(th->source),
696 &ip6h->daddr, ntohs(th->dest));
697 return true;
698 }
699 #endif
700 return false;
701 }
702
703 static void tcp_v6_init_req(struct request_sock *req,
704 const struct sock *sk_listener,
705 struct sk_buff *skb)
706 {
707 struct inet_request_sock *ireq = inet_rsk(req);
708 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
709
710 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
711 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
712
713 /* So that link locals have meaning */
714 if (!sk_listener->sk_bound_dev_if &&
715 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
716 ireq->ir_iif = tcp_v6_iif(skb);
717
718 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
719 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
720 np->rxopt.bits.rxinfo ||
721 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
722 np->rxopt.bits.rxohlim || np->repflow)) {
723 atomic_inc(&skb->users);
724 ireq->pktopts = skb;
725 }
726 }
727
728 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
729 struct flowi *fl,
730 const struct request_sock *req)
731 {
732 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
733 }
734
735 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
736 .family = AF_INET6,
737 .obj_size = sizeof(struct tcp6_request_sock),
738 .rtx_syn_ack = tcp_rtx_synack,
739 .send_ack = tcp_v6_reqsk_send_ack,
740 .destructor = tcp_v6_reqsk_destructor,
741 .send_reset = tcp_v6_send_reset,
742 .syn_ack_timeout = tcp_syn_ack_timeout,
743 };
744
745 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
746 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
747 sizeof(struct ipv6hdr),
748 #ifdef CONFIG_TCP_MD5SIG
749 .req_md5_lookup = tcp_v6_md5_lookup,
750 .calc_md5_hash = tcp_v6_md5_hash_skb,
751 #endif
752 .init_req = tcp_v6_init_req,
753 #ifdef CONFIG_SYN_COOKIES
754 .cookie_init_seq = cookie_v6_init_sequence,
755 #endif
756 .route_req = tcp_v6_route_req,
757 .init_seq = tcp_v6_init_seq,
758 .init_ts_off = tcp_v6_init_ts_off,
759 .send_synack = tcp_v6_send_synack,
760 };
761
762 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
763 u32 ack, u32 win, u32 tsval, u32 tsecr,
764 int oif, struct tcp_md5sig_key *key, int rst,
765 u8 tclass, __be32 label)
766 {
767 const struct tcphdr *th = tcp_hdr(skb);
768 struct tcphdr *t1;
769 struct sk_buff *buff;
770 struct flowi6 fl6;
771 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
772 struct sock *ctl_sk = net->ipv6.tcp_sk;
773 unsigned int tot_len = sizeof(struct tcphdr);
774 struct dst_entry *dst;
775 __be32 *topt;
776
777 if (tsecr)
778 tot_len += TCPOLEN_TSTAMP_ALIGNED;
779 #ifdef CONFIG_TCP_MD5SIG
780 if (key)
781 tot_len += TCPOLEN_MD5SIG_ALIGNED;
782 #endif
783
784 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
785 GFP_ATOMIC);
786 if (!buff)
787 return;
788
789 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
790
791 t1 = (struct tcphdr *) skb_push(buff, tot_len);
792 skb_reset_transport_header(buff);
793
794 /* Swap the send and the receive. */
795 memset(t1, 0, sizeof(*t1));
796 t1->dest = th->source;
797 t1->source = th->dest;
798 t1->doff = tot_len / 4;
799 t1->seq = htonl(seq);
800 t1->ack_seq = htonl(ack);
801 t1->ack = !rst || !th->ack;
802 t1->rst = rst;
803 t1->window = htons(win);
804
805 topt = (__be32 *)(t1 + 1);
806
807 if (tsecr) {
808 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
809 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
810 *topt++ = htonl(tsval);
811 *topt++ = htonl(tsecr);
812 }
813
814 #ifdef CONFIG_TCP_MD5SIG
815 if (key) {
816 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
817 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
818 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
819 &ipv6_hdr(skb)->saddr,
820 &ipv6_hdr(skb)->daddr, t1);
821 }
822 #endif
823
824 memset(&fl6, 0, sizeof(fl6));
825 fl6.daddr = ipv6_hdr(skb)->saddr;
826 fl6.saddr = ipv6_hdr(skb)->daddr;
827 fl6.flowlabel = label;
828
829 buff->ip_summed = CHECKSUM_PARTIAL;
830 buff->csum = 0;
831
832 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
833
834 fl6.flowi6_proto = IPPROTO_TCP;
835 if (rt6_need_strict(&fl6.daddr) && !oif)
836 fl6.flowi6_oif = tcp_v6_iif(skb);
837 else {
838 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
839 oif = skb->skb_iif;
840
841 fl6.flowi6_oif = oif;
842 }
843
844 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
845 fl6.fl6_dport = t1->dest;
846 fl6.fl6_sport = t1->source;
847 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
848 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
849
850 /* Pass a socket to ip6_dst_lookup either it is for RST
851 * Underlying function will use this to retrieve the network
852 * namespace
853 */
854 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
855 if (!IS_ERR(dst)) {
856 skb_dst_set(buff, dst);
857 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
858 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
859 if (rst)
860 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
861 return;
862 }
863
864 kfree_skb(buff);
865 }
866
867 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
868 {
869 const struct tcphdr *th = tcp_hdr(skb);
870 u32 seq = 0, ack_seq = 0;
871 struct tcp_md5sig_key *key = NULL;
872 #ifdef CONFIG_TCP_MD5SIG
873 const __u8 *hash_location = NULL;
874 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
875 unsigned char newhash[16];
876 int genhash;
877 struct sock *sk1 = NULL;
878 #endif
879 int oif;
880
881 if (th->rst)
882 return;
883
884 /* If sk not NULL, it means we did a successful lookup and incoming
885 * route had to be correct. prequeue might have dropped our dst.
886 */
887 if (!sk && !ipv6_unicast_destination(skb))
888 return;
889
890 #ifdef CONFIG_TCP_MD5SIG
891 rcu_read_lock();
892 hash_location = tcp_parse_md5sig_option(th);
893 if (sk && sk_fullsock(sk)) {
894 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
895 } else if (hash_location) {
896 /*
897 * active side is lost. Try to find listening socket through
898 * source port, and then find md5 key through listening socket.
899 * we are not loose security here:
900 * Incoming packet is checked with md5 hash with finding key,
901 * no RST generated if md5 hash doesn't match.
902 */
903 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
904 &tcp_hashinfo, NULL, 0,
905 &ipv6h->saddr,
906 th->source, &ipv6h->daddr,
907 ntohs(th->source), tcp_v6_iif(skb));
908 if (!sk1)
909 goto out;
910
911 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
912 if (!key)
913 goto out;
914
915 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
916 if (genhash || memcmp(hash_location, newhash, 16) != 0)
917 goto out;
918 }
919 #endif
920
921 if (th->ack)
922 seq = ntohl(th->ack_seq);
923 else
924 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
925 (th->doff << 2);
926
927 oif = sk ? sk->sk_bound_dev_if : 0;
928 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
929
930 #ifdef CONFIG_TCP_MD5SIG
931 out:
932 rcu_read_unlock();
933 #endif
934 }
935
936 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
937 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
938 struct tcp_md5sig_key *key, u8 tclass,
939 __be32 label)
940 {
941 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
942 tclass, label);
943 }
944
945 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
946 {
947 struct inet_timewait_sock *tw = inet_twsk(sk);
948 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
949
950 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
951 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
952 tcp_time_stamp + tcptw->tw_ts_offset,
953 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
954 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
955
956 inet_twsk_put(tw);
957 }
958
959 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
960 struct request_sock *req)
961 {
962 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
963 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
964 */
965 /* RFC 7323 2.3
966 * The window field (SEG.WND) of every outgoing segment, with the
967 * exception of <SYN> segments, MUST be right-shifted by
968 * Rcv.Wind.Shift bits:
969 */
970 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
971 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
972 tcp_rsk(req)->rcv_nxt,
973 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
974 tcp_time_stamp + tcp_rsk(req)->ts_off,
975 req->ts_recent, sk->sk_bound_dev_if,
976 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
977 0, 0);
978 }
979
980
981 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
982 {
983 #ifdef CONFIG_SYN_COOKIES
984 const struct tcphdr *th = tcp_hdr(skb);
985
986 if (!th->syn)
987 sk = cookie_v6_check(sk, skb);
988 #endif
989 return sk;
990 }
991
992 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
993 {
994 if (skb->protocol == htons(ETH_P_IP))
995 return tcp_v4_conn_request(sk, skb);
996
997 if (!ipv6_unicast_destination(skb))
998 goto drop;
999
1000 return tcp_conn_request(&tcp6_request_sock_ops,
1001 &tcp_request_sock_ipv6_ops, sk, skb);
1002
1003 drop:
1004 tcp_listendrop(sk);
1005 return 0; /* don't send reset */
1006 }
1007
1008 static void tcp_v6_restore_cb(struct sk_buff *skb)
1009 {
1010 /* We need to move header back to the beginning if xfrm6_policy_check()
1011 * and tcp_v6_fill_cb() are going to be called again.
1012 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1013 */
1014 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1015 sizeof(struct inet6_skb_parm));
1016 }
1017
1018 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1019 struct request_sock *req,
1020 struct dst_entry *dst,
1021 struct request_sock *req_unhash,
1022 bool *own_req)
1023 {
1024 struct inet_request_sock *ireq;
1025 struct ipv6_pinfo *newnp;
1026 const struct ipv6_pinfo *np = inet6_sk(sk);
1027 struct ipv6_txoptions *opt;
1028 struct tcp6_sock *newtcp6sk;
1029 struct inet_sock *newinet;
1030 struct tcp_sock *newtp;
1031 struct sock *newsk;
1032 #ifdef CONFIG_TCP_MD5SIG
1033 struct tcp_md5sig_key *key;
1034 #endif
1035 struct flowi6 fl6;
1036
1037 if (skb->protocol == htons(ETH_P_IP)) {
1038 /*
1039 * v6 mapped
1040 */
1041
1042 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1043 req_unhash, own_req);
1044
1045 if (!newsk)
1046 return NULL;
1047
1048 newtcp6sk = (struct tcp6_sock *)newsk;
1049 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1050
1051 newinet = inet_sk(newsk);
1052 newnp = inet6_sk(newsk);
1053 newtp = tcp_sk(newsk);
1054
1055 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1056
1057 newnp->saddr = newsk->sk_v6_rcv_saddr;
1058
1059 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1060 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1061 #ifdef CONFIG_TCP_MD5SIG
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063 #endif
1064
1065 newnp->ipv6_ac_list = NULL;
1066 newnp->ipv6_fl_list = NULL;
1067 newnp->pktoptions = NULL;
1068 newnp->opt = NULL;
1069 newnp->mcast_oif = tcp_v6_iif(skb);
1070 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1071 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1072 if (np->repflow)
1073 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1074
1075 /*
1076 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1077 * here, tcp_create_openreq_child now does this for us, see the comment in
1078 * that function for the gory details. -acme
1079 */
1080
1081 /* It is tricky place. Until this moment IPv4 tcp
1082 worked with IPv6 icsk.icsk_af_ops.
1083 Sync it now.
1084 */
1085 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1086
1087 return newsk;
1088 }
1089
1090 ireq = inet_rsk(req);
1091
1092 if (sk_acceptq_is_full(sk))
1093 goto out_overflow;
1094
1095 if (!dst) {
1096 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1097 if (!dst)
1098 goto out;
1099 }
1100
1101 newsk = tcp_create_openreq_child(sk, req, skb);
1102 if (!newsk)
1103 goto out_nonewsk;
1104
1105 /*
1106 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1107 * count here, tcp_create_openreq_child now does this for us, see the
1108 * comment in that function for the gory details. -acme
1109 */
1110
1111 newsk->sk_gso_type = SKB_GSO_TCPV6;
1112 ip6_dst_store(newsk, dst, NULL, NULL);
1113 inet6_sk_rx_dst_set(newsk, skb);
1114
1115 newtcp6sk = (struct tcp6_sock *)newsk;
1116 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1117
1118 newtp = tcp_sk(newsk);
1119 newinet = inet_sk(newsk);
1120 newnp = inet6_sk(newsk);
1121
1122 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1123
1124 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1125 newnp->saddr = ireq->ir_v6_loc_addr;
1126 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1127 newsk->sk_bound_dev_if = ireq->ir_iif;
1128
1129 /* Now IPv6 options...
1130
1131 First: no IPv4 options.
1132 */
1133 newinet->inet_opt = NULL;
1134 newnp->ipv6_ac_list = NULL;
1135 newnp->ipv6_fl_list = NULL;
1136
1137 /* Clone RX bits */
1138 newnp->rxopt.all = np->rxopt.all;
1139
1140 newnp->pktoptions = NULL;
1141 newnp->opt = NULL;
1142 newnp->mcast_oif = tcp_v6_iif(skb);
1143 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1144 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1145 if (np->repflow)
1146 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1147
1148 /* Clone native IPv6 options from listening socket (if any)
1149
1150 Yes, keeping reference count would be much more clever,
1151 but we make one more one thing there: reattach optmem
1152 to newsk.
1153 */
1154 opt = ireq->ipv6_opt;
1155 if (!opt)
1156 opt = rcu_dereference(np->opt);
1157 if (opt) {
1158 opt = ipv6_dup_options(newsk, opt);
1159 RCU_INIT_POINTER(newnp->opt, opt);
1160 }
1161 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1162 if (opt)
1163 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1164 opt->opt_flen;
1165
1166 tcp_ca_openreq_child(newsk, dst);
1167
1168 tcp_sync_mss(newsk, dst_mtu(dst));
1169 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1170
1171 tcp_initialize_rcv_mss(newsk);
1172
1173 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1174 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1175
1176 #ifdef CONFIG_TCP_MD5SIG
1177 /* Copy over the MD5 key from the original socket */
1178 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1179 if (key) {
1180 /* We're using one, so create a matching key
1181 * on the newsk structure. If we fail to get
1182 * memory, then we end up not copying the key
1183 * across. Shucks.
1184 */
1185 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1186 AF_INET6, key->key, key->keylen,
1187 sk_gfp_mask(sk, GFP_ATOMIC));
1188 }
1189 #endif
1190
1191 if (__inet_inherit_port(sk, newsk) < 0) {
1192 inet_csk_prepare_forced_close(newsk);
1193 tcp_done(newsk);
1194 goto out;
1195 }
1196 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1197 if (*own_req) {
1198 tcp_move_syn(newtp, req);
1199
1200 /* Clone pktoptions received with SYN, if we own the req */
1201 if (ireq->pktopts) {
1202 newnp->pktoptions = skb_clone(ireq->pktopts,
1203 sk_gfp_mask(sk, GFP_ATOMIC));
1204 consume_skb(ireq->pktopts);
1205 ireq->pktopts = NULL;
1206 if (newnp->pktoptions) {
1207 tcp_v6_restore_cb(newnp->pktoptions);
1208 skb_set_owner_r(newnp->pktoptions, newsk);
1209 }
1210 }
1211 }
1212
1213 return newsk;
1214
1215 out_overflow:
1216 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1217 out_nonewsk:
1218 dst_release(dst);
1219 out:
1220 tcp_listendrop(sk);
1221 return NULL;
1222 }
1223
1224 /* The socket must have it's spinlock held when we get
1225 * here, unless it is a TCP_LISTEN socket.
1226 *
1227 * We have a potential double-lock case here, so even when
1228 * doing backlog processing we use the BH locking scheme.
1229 * This is because we cannot sleep with the original spinlock
1230 * held.
1231 */
1232 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1233 {
1234 struct ipv6_pinfo *np = inet6_sk(sk);
1235 struct tcp_sock *tp;
1236 struct sk_buff *opt_skb = NULL;
1237
1238 /* Imagine: socket is IPv6. IPv4 packet arrives,
1239 goes to IPv4 receive handler and backlogged.
1240 From backlog it always goes here. Kerboom...
1241 Fortunately, tcp_rcv_established and rcv_established
1242 handle them correctly, but it is not case with
1243 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1244 */
1245
1246 if (skb->protocol == htons(ETH_P_IP))
1247 return tcp_v4_do_rcv(sk, skb);
1248
1249 if (tcp_filter(sk, skb))
1250 goto discard;
1251
1252 /*
1253 * socket locking is here for SMP purposes as backlog rcv
1254 * is currently called with bh processing disabled.
1255 */
1256
1257 /* Do Stevens' IPV6_PKTOPTIONS.
1258
1259 Yes, guys, it is the only place in our code, where we
1260 may make it not affecting IPv4.
1261 The rest of code is protocol independent,
1262 and I do not like idea to uglify IPv4.
1263
1264 Actually, all the idea behind IPV6_PKTOPTIONS
1265 looks not very well thought. For now we latch
1266 options, received in the last packet, enqueued
1267 by tcp. Feel free to propose better solution.
1268 --ANK (980728)
1269 */
1270 if (np->rxopt.all)
1271 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1272
1273 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1274 struct dst_entry *dst = sk->sk_rx_dst;
1275
1276 sock_rps_save_rxhash(sk, skb);
1277 sk_mark_napi_id(sk, skb);
1278 if (dst) {
1279 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1280 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1281 dst_release(dst);
1282 sk->sk_rx_dst = NULL;
1283 }
1284 }
1285
1286 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1287 if (opt_skb)
1288 goto ipv6_pktoptions;
1289 return 0;
1290 }
1291
1292 if (tcp_checksum_complete(skb))
1293 goto csum_err;
1294
1295 if (sk->sk_state == TCP_LISTEN) {
1296 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1297
1298 if (!nsk)
1299 goto discard;
1300
1301 if (nsk != sk) {
1302 if (tcp_child_process(sk, nsk, skb))
1303 goto reset;
1304 if (opt_skb)
1305 __kfree_skb(opt_skb);
1306 return 0;
1307 }
1308 } else
1309 sock_rps_save_rxhash(sk, skb);
1310
1311 if (tcp_rcv_state_process(sk, skb))
1312 goto reset;
1313 if (opt_skb)
1314 goto ipv6_pktoptions;
1315 return 0;
1316
1317 reset:
1318 tcp_v6_send_reset(sk, skb);
1319 discard:
1320 if (opt_skb)
1321 __kfree_skb(opt_skb);
1322 kfree_skb(skb);
1323 return 0;
1324 csum_err:
1325 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1326 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1327 goto discard;
1328
1329
1330 ipv6_pktoptions:
1331 /* Do you ask, what is it?
1332
1333 1. skb was enqueued by tcp.
1334 2. skb is added to tail of read queue, rather than out of order.
1335 3. socket is not in passive state.
1336 4. Finally, it really contains options, which user wants to receive.
1337 */
1338 tp = tcp_sk(sk);
1339 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1340 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1341 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1342 np->mcast_oif = tcp_v6_iif(opt_skb);
1343 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1344 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1345 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1346 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1347 if (np->repflow)
1348 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1349 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1350 skb_set_owner_r(opt_skb, sk);
1351 tcp_v6_restore_cb(opt_skb);
1352 opt_skb = xchg(&np->pktoptions, opt_skb);
1353 } else {
1354 __kfree_skb(opt_skb);
1355 opt_skb = xchg(&np->pktoptions, NULL);
1356 }
1357 }
1358
1359 kfree_skb(opt_skb);
1360 return 0;
1361 }
1362
1363 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1364 const struct tcphdr *th)
1365 {
1366 /* This is tricky: we move IP6CB at its correct location into
1367 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1368 * _decode_session6() uses IP6CB().
1369 * barrier() makes sure compiler won't play aliasing games.
1370 */
1371 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1372 sizeof(struct inet6_skb_parm));
1373 barrier();
1374
1375 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1376 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1377 skb->len - th->doff*4);
1378 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1379 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1380 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1381 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1382 TCP_SKB_CB(skb)->sacked = 0;
1383 }
1384
1385 static int tcp_v6_rcv(struct sk_buff *skb)
1386 {
1387 const struct tcphdr *th;
1388 const struct ipv6hdr *hdr;
1389 bool refcounted;
1390 struct sock *sk;
1391 int ret;
1392 struct net *net = dev_net(skb->dev);
1393
1394 if (skb->pkt_type != PACKET_HOST)
1395 goto discard_it;
1396
1397 /*
1398 * Count it even if it's bad.
1399 */
1400 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1401
1402 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1403 goto discard_it;
1404
1405 th = (const struct tcphdr *)skb->data;
1406
1407 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1408 goto bad_packet;
1409 if (!pskb_may_pull(skb, th->doff*4))
1410 goto discard_it;
1411
1412 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1413 goto csum_error;
1414
1415 th = (const struct tcphdr *)skb->data;
1416 hdr = ipv6_hdr(skb);
1417
1418 lookup:
1419 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1420 th->source, th->dest, inet6_iif(skb),
1421 &refcounted);
1422 if (!sk)
1423 goto no_tcp_socket;
1424
1425 process:
1426 if (sk->sk_state == TCP_TIME_WAIT)
1427 goto do_time_wait;
1428
1429 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1430 struct request_sock *req = inet_reqsk(sk);
1431 struct sock *nsk;
1432
1433 sk = req->rsk_listener;
1434 tcp_v6_fill_cb(skb, hdr, th);
1435 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1436 sk_drops_add(sk, skb);
1437 reqsk_put(req);
1438 goto discard_it;
1439 }
1440 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1441 inet_csk_reqsk_queue_drop_and_put(sk, req);
1442 goto lookup;
1443 }
1444 sock_hold(sk);
1445 refcounted = true;
1446 nsk = tcp_check_req(sk, skb, req, false);
1447 if (!nsk) {
1448 reqsk_put(req);
1449 goto discard_and_relse;
1450 }
1451 if (nsk == sk) {
1452 reqsk_put(req);
1453 tcp_v6_restore_cb(skb);
1454 } else if (tcp_child_process(sk, nsk, skb)) {
1455 tcp_v6_send_reset(nsk, skb);
1456 goto discard_and_relse;
1457 } else {
1458 sock_put(sk);
1459 return 0;
1460 }
1461 }
1462 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1463 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1464 goto discard_and_relse;
1465 }
1466
1467 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1468 goto discard_and_relse;
1469
1470 tcp_v6_fill_cb(skb, hdr, th);
1471
1472 if (tcp_v6_inbound_md5_hash(sk, skb))
1473 goto discard_and_relse;
1474
1475 if (tcp_filter(sk, skb))
1476 goto discard_and_relse;
1477 th = (const struct tcphdr *)skb->data;
1478 hdr = ipv6_hdr(skb);
1479
1480 skb->dev = NULL;
1481
1482 if (sk->sk_state == TCP_LISTEN) {
1483 ret = tcp_v6_do_rcv(sk, skb);
1484 goto put_and_return;
1485 }
1486
1487 sk_incoming_cpu_update(sk);
1488
1489 bh_lock_sock_nested(sk);
1490 tcp_segs_in(tcp_sk(sk), skb);
1491 ret = 0;
1492 if (!sock_owned_by_user(sk)) {
1493 if (!tcp_prequeue(sk, skb))
1494 ret = tcp_v6_do_rcv(sk, skb);
1495 } else if (tcp_add_backlog(sk, skb)) {
1496 goto discard_and_relse;
1497 }
1498 bh_unlock_sock(sk);
1499
1500 put_and_return:
1501 if (refcounted)
1502 sock_put(sk);
1503 return ret ? -1 : 0;
1504
1505 no_tcp_socket:
1506 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1507 goto discard_it;
1508
1509 tcp_v6_fill_cb(skb, hdr, th);
1510
1511 if (tcp_checksum_complete(skb)) {
1512 csum_error:
1513 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1514 bad_packet:
1515 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1516 } else {
1517 tcp_v6_send_reset(NULL, skb);
1518 }
1519
1520 discard_it:
1521 kfree_skb(skb);
1522 return 0;
1523
1524 discard_and_relse:
1525 sk_drops_add(sk, skb);
1526 if (refcounted)
1527 sock_put(sk);
1528 goto discard_it;
1529
1530 do_time_wait:
1531 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1532 inet_twsk_put(inet_twsk(sk));
1533 goto discard_it;
1534 }
1535
1536 tcp_v6_fill_cb(skb, hdr, th);
1537
1538 if (tcp_checksum_complete(skb)) {
1539 inet_twsk_put(inet_twsk(sk));
1540 goto csum_error;
1541 }
1542
1543 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1544 case TCP_TW_SYN:
1545 {
1546 struct sock *sk2;
1547
1548 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1549 skb, __tcp_hdrlen(th),
1550 &ipv6_hdr(skb)->saddr, th->source,
1551 &ipv6_hdr(skb)->daddr,
1552 ntohs(th->dest), tcp_v6_iif(skb));
1553 if (sk2) {
1554 struct inet_timewait_sock *tw = inet_twsk(sk);
1555 inet_twsk_deschedule_put(tw);
1556 sk = sk2;
1557 tcp_v6_restore_cb(skb);
1558 refcounted = false;
1559 goto process;
1560 }
1561 /* Fall through to ACK */
1562 }
1563 case TCP_TW_ACK:
1564 tcp_v6_timewait_ack(sk, skb);
1565 break;
1566 case TCP_TW_RST:
1567 tcp_v6_restore_cb(skb);
1568 tcp_v6_send_reset(sk, skb);
1569 inet_twsk_deschedule_put(inet_twsk(sk));
1570 goto discard_it;
1571 case TCP_TW_SUCCESS:
1572 ;
1573 }
1574 goto discard_it;
1575 }
1576
1577 static void tcp_v6_early_demux(struct sk_buff *skb)
1578 {
1579 const struct ipv6hdr *hdr;
1580 const struct tcphdr *th;
1581 struct sock *sk;
1582
1583 if (skb->pkt_type != PACKET_HOST)
1584 return;
1585
1586 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1587 return;
1588
1589 hdr = ipv6_hdr(skb);
1590 th = tcp_hdr(skb);
1591
1592 if (th->doff < sizeof(struct tcphdr) / 4)
1593 return;
1594
1595 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1596 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1597 &hdr->saddr, th->source,
1598 &hdr->daddr, ntohs(th->dest),
1599 inet6_iif(skb));
1600 if (sk) {
1601 skb->sk = sk;
1602 skb->destructor = sock_edemux;
1603 if (sk_fullsock(sk)) {
1604 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1605
1606 if (dst)
1607 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1608 if (dst &&
1609 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1610 skb_dst_set_noref(skb, dst);
1611 }
1612 }
1613 }
1614
1615 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1616 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1617 .twsk_unique = tcp_twsk_unique,
1618 .twsk_destructor = tcp_twsk_destructor,
1619 };
1620
1621 static const struct inet_connection_sock_af_ops ipv6_specific = {
1622 .queue_xmit = inet6_csk_xmit,
1623 .send_check = tcp_v6_send_check,
1624 .rebuild_header = inet6_sk_rebuild_header,
1625 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1626 .conn_request = tcp_v6_conn_request,
1627 .syn_recv_sock = tcp_v6_syn_recv_sock,
1628 .net_header_len = sizeof(struct ipv6hdr),
1629 .net_frag_header_len = sizeof(struct frag_hdr),
1630 .setsockopt = ipv6_setsockopt,
1631 .getsockopt = ipv6_getsockopt,
1632 .addr2sockaddr = inet6_csk_addr2sockaddr,
1633 .sockaddr_len = sizeof(struct sockaddr_in6),
1634 #ifdef CONFIG_COMPAT
1635 .compat_setsockopt = compat_ipv6_setsockopt,
1636 .compat_getsockopt = compat_ipv6_getsockopt,
1637 #endif
1638 .mtu_reduced = tcp_v6_mtu_reduced,
1639 };
1640
1641 #ifdef CONFIG_TCP_MD5SIG
1642 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1643 .md5_lookup = tcp_v6_md5_lookup,
1644 .calc_md5_hash = tcp_v6_md5_hash_skb,
1645 .md5_parse = tcp_v6_parse_md5_keys,
1646 };
1647 #endif
1648
1649 /*
1650 * TCP over IPv4 via INET6 API
1651 */
1652 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1653 .queue_xmit = ip_queue_xmit,
1654 .send_check = tcp_v4_send_check,
1655 .rebuild_header = inet_sk_rebuild_header,
1656 .sk_rx_dst_set = inet_sk_rx_dst_set,
1657 .conn_request = tcp_v6_conn_request,
1658 .syn_recv_sock = tcp_v6_syn_recv_sock,
1659 .net_header_len = sizeof(struct iphdr),
1660 .setsockopt = ipv6_setsockopt,
1661 .getsockopt = ipv6_getsockopt,
1662 .addr2sockaddr = inet6_csk_addr2sockaddr,
1663 .sockaddr_len = sizeof(struct sockaddr_in6),
1664 #ifdef CONFIG_COMPAT
1665 .compat_setsockopt = compat_ipv6_setsockopt,
1666 .compat_getsockopt = compat_ipv6_getsockopt,
1667 #endif
1668 .mtu_reduced = tcp_v4_mtu_reduced,
1669 };
1670
1671 #ifdef CONFIG_TCP_MD5SIG
1672 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1673 .md5_lookup = tcp_v4_md5_lookup,
1674 .calc_md5_hash = tcp_v4_md5_hash_skb,
1675 .md5_parse = tcp_v6_parse_md5_keys,
1676 };
1677 #endif
1678
1679 /* NOTE: A lot of things set to zero explicitly by call to
1680 * sk_alloc() so need not be done here.
1681 */
1682 static int tcp_v6_init_sock(struct sock *sk)
1683 {
1684 struct inet_connection_sock *icsk = inet_csk(sk);
1685
1686 tcp_init_sock(sk);
1687
1688 icsk->icsk_af_ops = &ipv6_specific;
1689
1690 #ifdef CONFIG_TCP_MD5SIG
1691 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1692 #endif
1693
1694 return 0;
1695 }
1696
1697 static void tcp_v6_destroy_sock(struct sock *sk)
1698 {
1699 tcp_v4_destroy_sock(sk);
1700 inet6_destroy_sock(sk);
1701 }
1702
1703 #ifdef CONFIG_PROC_FS
1704 /* Proc filesystem TCPv6 sock list dumping. */
1705 static void get_openreq6(struct seq_file *seq,
1706 const struct request_sock *req, int i)
1707 {
1708 long ttd = req->rsk_timer.expires - jiffies;
1709 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1710 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1711
1712 if (ttd < 0)
1713 ttd = 0;
1714
1715 seq_printf(seq,
1716 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1717 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1718 i,
1719 src->s6_addr32[0], src->s6_addr32[1],
1720 src->s6_addr32[2], src->s6_addr32[3],
1721 inet_rsk(req)->ir_num,
1722 dest->s6_addr32[0], dest->s6_addr32[1],
1723 dest->s6_addr32[2], dest->s6_addr32[3],
1724 ntohs(inet_rsk(req)->ir_rmt_port),
1725 TCP_SYN_RECV,
1726 0, 0, /* could print option size, but that is af dependent. */
1727 1, /* timers active (only the expire timer) */
1728 jiffies_to_clock_t(ttd),
1729 req->num_timeout,
1730 from_kuid_munged(seq_user_ns(seq),
1731 sock_i_uid(req->rsk_listener)),
1732 0, /* non standard timer */
1733 0, /* open_requests have no inode */
1734 0, req);
1735 }
1736
1737 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1738 {
1739 const struct in6_addr *dest, *src;
1740 __u16 destp, srcp;
1741 int timer_active;
1742 unsigned long timer_expires;
1743 const struct inet_sock *inet = inet_sk(sp);
1744 const struct tcp_sock *tp = tcp_sk(sp);
1745 const struct inet_connection_sock *icsk = inet_csk(sp);
1746 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1747 int rx_queue;
1748 int state;
1749
1750 dest = &sp->sk_v6_daddr;
1751 src = &sp->sk_v6_rcv_saddr;
1752 destp = ntohs(inet->inet_dport);
1753 srcp = ntohs(inet->inet_sport);
1754
1755 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1756 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1757 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1758 timer_active = 1;
1759 timer_expires = icsk->icsk_timeout;
1760 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1761 timer_active = 4;
1762 timer_expires = icsk->icsk_timeout;
1763 } else if (timer_pending(&sp->sk_timer)) {
1764 timer_active = 2;
1765 timer_expires = sp->sk_timer.expires;
1766 } else {
1767 timer_active = 0;
1768 timer_expires = jiffies;
1769 }
1770
1771 state = sk_state_load(sp);
1772 if (state == TCP_LISTEN)
1773 rx_queue = sp->sk_ack_backlog;
1774 else
1775 /* Because we don't lock the socket,
1776 * we might find a transient negative value.
1777 */
1778 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1779
1780 seq_printf(seq,
1781 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1782 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1783 i,
1784 src->s6_addr32[0], src->s6_addr32[1],
1785 src->s6_addr32[2], src->s6_addr32[3], srcp,
1786 dest->s6_addr32[0], dest->s6_addr32[1],
1787 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1788 state,
1789 tp->write_seq - tp->snd_una,
1790 rx_queue,
1791 timer_active,
1792 jiffies_delta_to_clock_t(timer_expires - jiffies),
1793 icsk->icsk_retransmits,
1794 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1795 icsk->icsk_probes_out,
1796 sock_i_ino(sp),
1797 atomic_read(&sp->sk_refcnt), sp,
1798 jiffies_to_clock_t(icsk->icsk_rto),
1799 jiffies_to_clock_t(icsk->icsk_ack.ato),
1800 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1801 tp->snd_cwnd,
1802 state == TCP_LISTEN ?
1803 fastopenq->max_qlen :
1804 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1805 );
1806 }
1807
1808 static void get_timewait6_sock(struct seq_file *seq,
1809 struct inet_timewait_sock *tw, int i)
1810 {
1811 long delta = tw->tw_timer.expires - jiffies;
1812 const struct in6_addr *dest, *src;
1813 __u16 destp, srcp;
1814
1815 dest = &tw->tw_v6_daddr;
1816 src = &tw->tw_v6_rcv_saddr;
1817 destp = ntohs(tw->tw_dport);
1818 srcp = ntohs(tw->tw_sport);
1819
1820 seq_printf(seq,
1821 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1822 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1823 i,
1824 src->s6_addr32[0], src->s6_addr32[1],
1825 src->s6_addr32[2], src->s6_addr32[3], srcp,
1826 dest->s6_addr32[0], dest->s6_addr32[1],
1827 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1828 tw->tw_substate, 0, 0,
1829 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1830 atomic_read(&tw->tw_refcnt), tw);
1831 }
1832
1833 static int tcp6_seq_show(struct seq_file *seq, void *v)
1834 {
1835 struct tcp_iter_state *st;
1836 struct sock *sk = v;
1837
1838 if (v == SEQ_START_TOKEN) {
1839 seq_puts(seq,
1840 " sl "
1841 "local_address "
1842 "remote_address "
1843 "st tx_queue rx_queue tr tm->when retrnsmt"
1844 " uid timeout inode\n");
1845 goto out;
1846 }
1847 st = seq->private;
1848
1849 if (sk->sk_state == TCP_TIME_WAIT)
1850 get_timewait6_sock(seq, v, st->num);
1851 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1852 get_openreq6(seq, v, st->num);
1853 else
1854 get_tcp6_sock(seq, v, st->num);
1855 out:
1856 return 0;
1857 }
1858
1859 static const struct file_operations tcp6_afinfo_seq_fops = {
1860 .owner = THIS_MODULE,
1861 .open = tcp_seq_open,
1862 .read = seq_read,
1863 .llseek = seq_lseek,
1864 .release = seq_release_net
1865 };
1866
1867 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1868 .name = "tcp6",
1869 .family = AF_INET6,
1870 .seq_fops = &tcp6_afinfo_seq_fops,
1871 .seq_ops = {
1872 .show = tcp6_seq_show,
1873 },
1874 };
1875
1876 int __net_init tcp6_proc_init(struct net *net)
1877 {
1878 return tcp_proc_register(net, &tcp6_seq_afinfo);
1879 }
1880
1881 void tcp6_proc_exit(struct net *net)
1882 {
1883 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1884 }
1885 #endif
1886
1887 struct proto tcpv6_prot = {
1888 .name = "TCPv6",
1889 .owner = THIS_MODULE,
1890 .close = tcp_close,
1891 .connect = tcp_v6_connect,
1892 .disconnect = tcp_disconnect,
1893 .accept = inet_csk_accept,
1894 .ioctl = tcp_ioctl,
1895 .init = tcp_v6_init_sock,
1896 .destroy = tcp_v6_destroy_sock,
1897 .shutdown = tcp_shutdown,
1898 .setsockopt = tcp_setsockopt,
1899 .getsockopt = tcp_getsockopt,
1900 .keepalive = tcp_set_keepalive,
1901 .recvmsg = tcp_recvmsg,
1902 .sendmsg = tcp_sendmsg,
1903 .sendpage = tcp_sendpage,
1904 .backlog_rcv = tcp_v6_do_rcv,
1905 .release_cb = tcp_release_cb,
1906 .hash = inet6_hash,
1907 .unhash = inet_unhash,
1908 .get_port = inet_csk_get_port,
1909 .enter_memory_pressure = tcp_enter_memory_pressure,
1910 .stream_memory_free = tcp_stream_memory_free,
1911 .sockets_allocated = &tcp_sockets_allocated,
1912 .memory_allocated = &tcp_memory_allocated,
1913 .memory_pressure = &tcp_memory_pressure,
1914 .orphan_count = &tcp_orphan_count,
1915 .sysctl_mem = sysctl_tcp_mem,
1916 .sysctl_wmem = sysctl_tcp_wmem,
1917 .sysctl_rmem = sysctl_tcp_rmem,
1918 .max_header = MAX_TCP_HEADER,
1919 .obj_size = sizeof(struct tcp6_sock),
1920 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1921 .twsk_prot = &tcp6_timewait_sock_ops,
1922 .rsk_prot = &tcp6_request_sock_ops,
1923 .h.hashinfo = &tcp_hashinfo,
1924 .no_autobind = true,
1925 #ifdef CONFIG_COMPAT
1926 .compat_setsockopt = compat_tcp_setsockopt,
1927 .compat_getsockopt = compat_tcp_getsockopt,
1928 #endif
1929 .diag_destroy = tcp_abort,
1930 };
1931
1932 static struct inet6_protocol tcpv6_protocol = {
1933 .early_demux = tcp_v6_early_demux,
1934 .early_demux_handler = tcp_v6_early_demux,
1935 .handler = tcp_v6_rcv,
1936 .err_handler = tcp_v6_err,
1937 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1938 };
1939
1940 static struct inet_protosw tcpv6_protosw = {
1941 .type = SOCK_STREAM,
1942 .protocol = IPPROTO_TCP,
1943 .prot = &tcpv6_prot,
1944 .ops = &inet6_stream_ops,
1945 .flags = INET_PROTOSW_PERMANENT |
1946 INET_PROTOSW_ICSK,
1947 };
1948
1949 static int __net_init tcpv6_net_init(struct net *net)
1950 {
1951 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1952 SOCK_RAW, IPPROTO_TCP, net);
1953 }
1954
1955 static void __net_exit tcpv6_net_exit(struct net *net)
1956 {
1957 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1958 }
1959
1960 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1961 {
1962 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1963 }
1964
1965 static struct pernet_operations tcpv6_net_ops = {
1966 .init = tcpv6_net_init,
1967 .exit = tcpv6_net_exit,
1968 .exit_batch = tcpv6_net_exit_batch,
1969 };
1970
1971 int __init tcpv6_init(void)
1972 {
1973 int ret;
1974
1975 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1976 if (ret)
1977 goto out;
1978
1979 /* register inet6 protocol */
1980 ret = inet6_register_protosw(&tcpv6_protosw);
1981 if (ret)
1982 goto out_tcpv6_protocol;
1983
1984 ret = register_pernet_subsys(&tcpv6_net_ops);
1985 if (ret)
1986 goto out_tcpv6_protosw;
1987 out:
1988 return ret;
1989
1990 out_tcpv6_protosw:
1991 inet6_unregister_protosw(&tcpv6_protosw);
1992 out_tcpv6_protocol:
1993 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1994 goto out;
1995 }
1996
1997 void tcpv6_exit(void)
1998 {
1999 unregister_pernet_subsys(&tcpv6_net_ops);
2000 inet6_unregister_protosw(&tcpv6_protosw);
2001 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2002 }