]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv6/tcp_ipv6.c
gro: Fix handling of headers that extend over the tail
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41
42#include <linux/ipv6.h>
43#include <linux/icmpv6.h>
44#include <linux/random.h>
45
46#include <net/tcp.h>
47#include <net/ndisc.h>
5324a040 48#include <net/inet6_hashtables.h>
8129765a 49#include <net/inet6_connection_sock.h>
1da177e4
LT
50#include <net/ipv6.h>
51#include <net/transp_v6.h>
52#include <net/addrconf.h>
53#include <net/ip6_route.h>
54#include <net/ip6_checksum.h>
55#include <net/inet_ecn.h>
56#include <net/protocol.h>
57#include <net/xfrm.h>
1da177e4
LT
58#include <net/snmp.h>
59#include <net/dsfield.h>
6d6ee43e 60#include <net/timewait_sock.h>
18134bed 61#include <net/netdma.h>
3d58b5fa 62#include <net/inet_common.h>
1da177e4
LT
63
64#include <asm/uaccess.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
cfb6eeb4
YH
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
cfb6eeb4 72static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
73static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
1da177e4
LT
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
8292a17a
ACM
78static struct inet_connection_sock_af_ops ipv6_mapped;
79static struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
81static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr)
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
1da177e4
LT
91static void tcp_v6_hash(struct sock *sk)
92{
93 if (sk->sk_state != TCP_CLOSE) {
8292a17a 94 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
95 tcp_prot.hash(sk);
96 return;
97 }
98 local_bh_disable();
ab1e0a13 99 __inet6_hash(sk);
1da177e4
LT
100 local_bh_enable();
101 }
102}
103
684f2176 104static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
105 struct in6_addr *saddr,
106 struct in6_addr *daddr,
868c86bc 107 __wsum base)
1da177e4
LT
108{
109 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
110}
111
a94f723d 112static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 113{
0660e03f
ACM
114 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
116 tcp_hdr(skb)->dest,
117 tcp_hdr(skb)->source);
1da177e4
LT
118}
119
1ab1457c 120static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
121 int addr_len)
122{
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 124 struct inet_sock *inet = inet_sk(sk);
d83d8461 125 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p = NULL, final;
129 struct flowi fl;
130 struct dst_entry *dst;
131 int addr_type;
132 int err;
133
1ab1457c 134 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
135 return -EINVAL;
136
1ab1457c 137 if (usin->sin6_family != AF_INET6)
1da177e4
LT
138 return(-EAFNOSUPPORT);
139
140 memset(&fl, 0, sizeof(fl));
141
142 if (np->sndflow) {
143 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl.fl6_flowlabel);
145 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
148 if (flowlabel == NULL)
149 return -EINVAL;
150 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
151 fl6_sock_release(flowlabel);
152 }
153 }
154
155 /*
1ab1457c
YH
156 * connect() to INADDR_ANY means loopback (BSD'ism).
157 */
158
159 if(ipv6_addr_any(&usin->sin6_addr))
160 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
161
162 addr_type = ipv6_addr_type(&usin->sin6_addr);
163
164 if(addr_type & IPV6_ADDR_MULTICAST)
165 return -ENETUNREACH;
166
167 if (addr_type&IPV6_ADDR_LINKLOCAL) {
168 if (addr_len >= sizeof(struct sockaddr_in6) &&
169 usin->sin6_scope_id) {
170 /* If interface is set while binding, indices
171 * must coincide.
172 */
173 if (sk->sk_bound_dev_if &&
174 sk->sk_bound_dev_if != usin->sin6_scope_id)
175 return -EINVAL;
176
177 sk->sk_bound_dev_if = usin->sin6_scope_id;
178 }
179
180 /* Connect to link-local address requires an interface */
181 if (!sk->sk_bound_dev_if)
182 return -EINVAL;
183 }
184
185 if (tp->rx_opt.ts_recent_stamp &&
186 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
187 tp->rx_opt.ts_recent = 0;
188 tp->rx_opt.ts_recent_stamp = 0;
189 tp->write_seq = 0;
190 }
191
192 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
193 np->flow_label = fl.fl6_flowlabel;
194
195 /*
196 * TCP over IPv4
197 */
198
199 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 200 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
201 struct sockaddr_in sin;
202
203 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
204
205 if (__ipv6_only_sock(sk))
206 return -ENETUNREACH;
207
208 sin.sin_family = AF_INET;
209 sin.sin_port = usin->sin6_port;
210 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
211
d83d8461 212 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 213 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
214#ifdef CONFIG_TCP_MD5SIG
215 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
216#endif
1da177e4
LT
217
218 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
219
220 if (err) {
d83d8461
ACM
221 icsk->icsk_ext_hdr_len = exthdrlen;
222 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 223 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
224#ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_specific;
226#endif
1da177e4
LT
227 goto failure;
228 } else {
229 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
230 inet->saddr);
231 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
232 inet->rcv_saddr);
233 }
234
235 return err;
236 }
237
238 if (!ipv6_addr_any(&np->rcv_saddr))
239 saddr = &np->rcv_saddr;
240
241 fl.proto = IPPROTO_TCP;
242 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if;
246 fl.fl_ip_dport = usin->sin6_port;
247 fl.fl_ip_sport = inet->sport;
248
249 if (np->opt && np->opt->srcrt) {
250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
251 ipv6_addr_copy(&final, &fl.fl6_dst);
252 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
253 final_p = &final;
254 }
255
beb8d13b
VY
256 security_sk_classify_flow(sk, &fl);
257
1da177e4
LT
258 err = ip6_dst_lookup(sk, &dst, &fl);
259 if (err)
260 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
263
52479b62
AD
264 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 if (err < 0) {
14e50e57
DM
266 if (err == -EREMOTE)
267 err = ip6_dst_blackhole(sk, &dst, &fl);
268 if (err < 0)
269 goto failure;
270 }
1da177e4
LT
271
272 if (saddr == NULL) {
273 saddr = &fl.fl6_src;
274 ipv6_addr_copy(&np->rcv_saddr, saddr);
275 }
276
277 /* set the source address */
278 ipv6_addr_copy(&np->saddr, saddr);
279 inet->rcv_saddr = LOOPBACK4_IPV6;
280
f83ef8c0 281 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 282 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 283
d83d8461 284 icsk->icsk_ext_hdr_len = 0;
1da177e4 285 if (np->opt)
d83d8461
ACM
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
1da177e4
LT
288
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290
291 inet->dport = usin->sin6_port;
292
293 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 294 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
295 if (err)
296 goto late_failure;
297
298 if (!tp->write_seq)
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 np->daddr.s6_addr32,
301 inet->sport,
302 inet->dport);
303
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
307
308 return 0;
309
310late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk);
313failure:
314 inet->dport = 0;
315 sk->sk_route_caps = 0;
316 return err;
317}
318
319static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
04ce6909 320 int type, int code, int offset, __be32 info)
1da177e4
LT
321{
322 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
324 struct ipv6_pinfo *np;
325 struct sock *sk;
326 int err;
1ab1457c 327 struct tcp_sock *tp;
1da177e4 328 __u32 seq;
ca12a1a4 329 struct net *net = dev_net(skb->dev);
1da177e4 330
ca12a1a4 331 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 332 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
333
334 if (sk == NULL) {
e41b5368
DL
335 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
336 ICMP6_MIB_INERRORS);
1da177e4
LT
337 return;
338 }
339
340 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 341 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
342 return;
343 }
344
345 bh_lock_sock(sk);
346 if (sock_owned_by_user(sk))
de0744af 347 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
348
349 if (sk->sk_state == TCP_CLOSE)
350 goto out;
351
352 tp = tcp_sk(sk);
1ab1457c 353 seq = ntohl(th->seq);
1da177e4
LT
354 if (sk->sk_state != TCP_LISTEN &&
355 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 356 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
357 goto out;
358 }
359
360 np = inet6_sk(sk);
361
362 if (type == ICMPV6_PKT_TOOBIG) {
363 struct dst_entry *dst = NULL;
364
365 if (sock_owned_by_user(sk))
366 goto out;
367 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
368 goto out;
369
370 /* icmp should have updated the destination cache entry */
371 dst = __sk_dst_check(sk, np->dst_cookie);
372
373 if (dst == NULL) {
374 struct inet_sock *inet = inet_sk(sk);
375 struct flowi fl;
376
377 /* BUGGG_FUTURE: Again, it is not clear how
378 to handle rthdr case. Ignore this complexity
379 for now.
380 */
381 memset(&fl, 0, sizeof(fl));
382 fl.proto = IPPROTO_TCP;
383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
384 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
385 fl.oif = sk->sk_bound_dev_if;
386 fl.fl_ip_dport = inet->dport;
387 fl.fl_ip_sport = inet->sport;
beb8d13b 388 security_skb_classify_flow(skb, &fl);
1da177e4
LT
389
390 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
391 sk->sk_err_soft = -err;
392 goto out;
393 }
394
52479b62 395 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
1da177e4
LT
396 sk->sk_err_soft = -err;
397 goto out;
398 }
399
400 } else
401 dst_hold(dst);
402
d83d8461 403 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
404 tcp_sync_mss(sk, dst_mtu(dst));
405 tcp_simple_retransmit(sk);
406 } /* else let the usual retransmit timer handle it */
407 dst_release(dst);
408 goto out;
409 }
410
411 icmpv6_err_convert(type, code, &err);
412
60236fdd 413 /* Might be for an request_sock */
1da177e4 414 switch (sk->sk_state) {
60236fdd 415 struct request_sock *req, **prev;
1da177e4
LT
416 case TCP_LISTEN:
417 if (sock_owned_by_user(sk))
418 goto out;
419
8129765a
ACM
420 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
421 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
422 if (!req)
423 goto out;
424
425 /* ICMPs are not backlogged, hence we cannot get
426 * an established socket here.
427 */
547b792c 428 WARN_ON(req->sk != NULL);
1da177e4 429
2e6599cb 430 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 431 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
432 goto out;
433 }
434
463c84b9 435 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
436 goto out;
437
438 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 440 It can, it SYNs are crossed. --ANK */
1da177e4 441 if (!sock_owned_by_user(sk)) {
1da177e4
LT
442 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
444
445 tcp_done(sk);
446 } else
447 sk->sk_err_soft = err;
448 goto out;
449 }
450
451 if (!sock_owned_by_user(sk) && np->recverr) {
452 sk->sk_err = err;
453 sk->sk_error_report(sk);
454 } else
455 sk->sk_err_soft = err;
456
457out:
458 bh_unlock_sock(sk);
459 sock_put(sk);
460}
461
462
fd80eb94 463static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
1da177e4 464{
ca304b61 465 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
466 struct ipv6_pinfo *np = inet6_sk(sk);
467 struct sk_buff * skb;
468 struct ipv6_txoptions *opt = NULL;
469 struct in6_addr * final_p = NULL, final;
470 struct flowi fl;
fd80eb94 471 struct dst_entry *dst;
1da177e4
LT
472 int err = -1;
473
474 memset(&fl, 0, sizeof(fl));
475 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
476 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
477 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 478 fl.fl6_flowlabel = 0;
2e6599cb
ACM
479 fl.oif = treq->iif;
480 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 481 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 482 security_req_classify_flow(req, &fl);
1da177e4 483
fd80eb94
DL
484 opt = np->opt;
485 if (opt && opt->srcrt) {
486 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
487 ipv6_addr_copy(&final, &fl.fl6_dst);
488 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
489 final_p = &final;
1da177e4
LT
490 }
491
fd80eb94
DL
492 err = ip6_dst_lookup(sk, &dst, &fl);
493 if (err)
494 goto done;
495 if (final_p)
496 ipv6_addr_copy(&fl.fl6_dst, final_p);
52479b62 497 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
fd80eb94
DL
498 goto done;
499
1da177e4
LT
500 skb = tcp_make_synack(sk, dst, req);
501 if (skb) {
aa8223c7 502 struct tcphdr *th = tcp_hdr(skb);
1da177e4 503
684f2176 504 th->check = tcp_v6_check(skb->len,
2e6599cb 505 &treq->loc_addr, &treq->rmt_addr,
07f0757a 506 csum_partial(th, skb->len, skb->csum));
1da177e4 507
2e6599cb 508 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4 509 err = ip6_xmit(sk, skb, &fl, opt, 0);
b9df3cb8 510 err = net_xmit_eval(err);
1da177e4
LT
511 }
512
513done:
1ab1457c 514 if (opt && opt != np->opt)
1da177e4 515 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 516 dst_release(dst);
1da177e4
LT
517 return err;
518}
519
c6aefafb
GG
520static inline void syn_flood_warning(struct sk_buff *skb)
521{
522#ifdef CONFIG_SYN_COOKIES
523 if (sysctl_tcp_syncookies)
524 printk(KERN_INFO
525 "TCPv6: Possible SYN flooding on port %d. "
526 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
527 else
528#endif
529 printk(KERN_INFO
530 "TCPv6: Possible SYN flooding on port %d. "
531 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
532}
533
60236fdd 534static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 535{
800d55f1 536 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
537}
538
cfb6eeb4
YH
539#ifdef CONFIG_TCP_MD5SIG
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 struct in6_addr *addr)
542{
543 struct tcp_sock *tp = tcp_sk(sk);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 553 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
554 }
555 return NULL;
556}
557
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
559 struct sock *addr_sk)
560{
561 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
562}
563
564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
565 struct request_sock *req)
566{
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568}
569
570static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
b0a713e9 574 struct tcp_md5sig_key *key;
cfb6eeb4
YH
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
b0a713e9 578 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
579 if (key) {
580 /* modify existing entry - just update that one */
b0a713e9
MD
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
cfb6eeb4
YH
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
3d7dbeac 592 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 593 }
aacbe8c8
YH
594 if (tcp_alloc_md5sig_pool() == NULL) {
595 kfree(newkey);
596 return -ENOMEM;
597 }
cfb6eeb4
YH
598 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
599 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601
602 if (!keys) {
603 tcp_free_md5sig_pool();
604 kfree(newkey);
605 return -ENOMEM;
606 }
607
608 if (tp->md5sig_info->entries6)
609 memmove(keys, tp->md5sig_info->keys6,
610 (sizeof (tp->md5sig_info->keys6[0]) *
611 tp->md5sig_info->entries6));
612
613 kfree(tp->md5sig_info->keys6);
614 tp->md5sig_info->keys6 = keys;
615 tp->md5sig_info->alloced6++;
616 }
617
618 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
619 peer);
f8ab18d2
DM
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
622
623 tp->md5sig_info->entries6++;
624 }
625 return 0;
626}
627
628static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
629 u8 *newkey, __u8 newkeylen)
630{
631 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
632 newkey, newkeylen);
633}
634
635static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
636{
637 struct tcp_sock *tp = tcp_sk(sk);
638 int i;
639
640 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 641 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 642 /* Free the key */
f8ab18d2 643 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
644 tp->md5sig_info->entries6--;
645
646 if (tp->md5sig_info->entries6 == 0) {
647 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL;
ca983cef 649 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
650 } else {
651 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i)
653 memmove(&tp->md5sig_info->keys6[i],
654 &tp->md5sig_info->keys6[i+1],
655 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0]));
657 }
77adefdc
YH
658 tcp_free_md5sig_pool();
659 return 0;
cfb6eeb4
YH
660 }
661 }
662 return -ENOENT;
663}
664
665static void tcp_v6_clear_md5_list (struct sock *sk)
666{
667 struct tcp_sock *tp = tcp_sk(sk);
668 int i;
669
670 if (tp->md5sig_info->entries6) {
671 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 672 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
673 tp->md5sig_info->entries6 = 0;
674 tcp_free_md5sig_pool();
675 }
676
677 kfree(tp->md5sig_info->keys6);
678 tp->md5sig_info->keys6 = NULL;
679 tp->md5sig_info->alloced6 = 0;
680
681 if (tp->md5sig_info->entries4) {
682 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 683 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
684 tp->md5sig_info->entries4 = 0;
685 tcp_free_md5sig_pool();
686 }
687
688 kfree(tp->md5sig_info->keys4);
689 tp->md5sig_info->keys4 = NULL;
690 tp->md5sig_info->alloced4 = 0;
691}
692
693static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
694 int optlen)
695{
696 struct tcp_md5sig cmd;
697 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
698 u8 *newkey;
699
700 if (optlen < sizeof(cmd))
701 return -EINVAL;
702
703 if (copy_from_user(&cmd, optval, sizeof(cmd)))
704 return -EFAULT;
705
706 if (sin6->sin6_family != AF_INET6)
707 return -EINVAL;
708
709 if (!cmd.tcpm_keylen) {
710 if (!tcp_sk(sk)->md5sig_info)
711 return -ENOENT;
e773e4fa 712 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
713 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
714 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
715 }
716
717 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
718 return -EINVAL;
719
720 if (!tcp_sk(sk)->md5sig_info) {
721 struct tcp_sock *tp = tcp_sk(sk);
722 struct tcp_md5sig_info *p;
723
724 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
725 if (!p)
726 return -ENOMEM;
727
728 tp->md5sig_info = p;
3d7dbeac 729 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
730 }
731
af879cc7 732 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
733 if (!newkey)
734 return -ENOMEM;
e773e4fa 735 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
736 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
737 newkey, cmd.tcpm_keylen);
738 }
739 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
740}
741
49a72dfb
AL
742static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
743 struct in6_addr *daddr,
744 struct in6_addr *saddr, int nbytes)
cfb6eeb4 745{
cfb6eeb4 746 struct tcp6_pseudohdr *bp;
49a72dfb 747 struct scatterlist sg;
8d26d76d 748
cfb6eeb4 749 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
750 /* 1. TCP pseudo-header (RFC2460) */
751 ipv6_addr_copy(&bp->saddr, saddr);
752 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 753 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 754 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 755
49a72dfb
AL
756 sg_init_one(&sg, bp, sizeof(*bp));
757 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
758}
c7da57a1 759
49a72dfb
AL
760static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
761 struct in6_addr *daddr, struct in6_addr *saddr,
762 struct tcphdr *th)
763{
764 struct tcp_md5sig_pool *hp;
765 struct hash_desc *desc;
766
767 hp = tcp_get_md5sig_pool();
768 if (!hp)
769 goto clear_hash_noput;
770 desc = &hp->md5_desc;
771
772 if (crypto_hash_init(desc))
773 goto clear_hash;
774 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
775 goto clear_hash;
776 if (tcp_md5_hash_header(hp, th))
777 goto clear_hash;
778 if (tcp_md5_hash_key(hp, key))
779 goto clear_hash;
780 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 781 goto clear_hash;
cfb6eeb4 782
cfb6eeb4 783 tcp_put_md5sig_pool();
cfb6eeb4 784 return 0;
49a72dfb 785
cfb6eeb4
YH
786clear_hash:
787 tcp_put_md5sig_pool();
788clear_hash_noput:
789 memset(md5_hash, 0, 16);
49a72dfb 790 return 1;
cfb6eeb4
YH
791}
792
49a72dfb
AL
793static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
794 struct sock *sk, struct request_sock *req,
795 struct sk_buff *skb)
cfb6eeb4
YH
796{
797 struct in6_addr *saddr, *daddr;
49a72dfb
AL
798 struct tcp_md5sig_pool *hp;
799 struct hash_desc *desc;
800 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
801
802 if (sk) {
803 saddr = &inet6_sk(sk)->saddr;
804 daddr = &inet6_sk(sk)->daddr;
49a72dfb 805 } else if (req) {
cfb6eeb4
YH
806 saddr = &inet6_rsk(req)->loc_addr;
807 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
808 } else {
809 struct ipv6hdr *ip6h = ipv6_hdr(skb);
810 saddr = &ip6h->saddr;
811 daddr = &ip6h->daddr;
cfb6eeb4 812 }
49a72dfb
AL
813
814 hp = tcp_get_md5sig_pool();
815 if (!hp)
816 goto clear_hash_noput;
817 desc = &hp->md5_desc;
818
819 if (crypto_hash_init(desc))
820 goto clear_hash;
821
822 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
823 goto clear_hash;
824 if (tcp_md5_hash_header(hp, th))
825 goto clear_hash;
826 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
827 goto clear_hash;
828 if (tcp_md5_hash_key(hp, key))
829 goto clear_hash;
830 if (crypto_hash_final(desc, md5_hash))
831 goto clear_hash;
832
833 tcp_put_md5sig_pool();
834 return 0;
835
836clear_hash:
837 tcp_put_md5sig_pool();
838clear_hash_noput:
839 memset(md5_hash, 0, 16);
840 return 1;
cfb6eeb4
YH
841}
842
843static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
844{
845 __u8 *hash_location = NULL;
846 struct tcp_md5sig_key *hash_expected;
0660e03f 847 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 848 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 849 int genhash;
cfb6eeb4
YH
850 u8 newhash[16];
851
852 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 853 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 854
785957d3
DM
855 /* We've parsed the options - do we have a hash? */
856 if (!hash_expected && !hash_location)
857 return 0;
858
859 if (hash_expected && !hash_location) {
860 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
861 return 1;
862 }
863
785957d3
DM
864 if (!hash_expected && hash_location) {
865 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
866 return 1;
867 }
868
869 /* check the signature */
49a72dfb
AL
870 genhash = tcp_v6_md5_hash_skb(newhash,
871 hash_expected,
872 NULL, NULL, skb);
873
cfb6eeb4
YH
874 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
875 if (net_ratelimit()) {
5b095d98 876 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n",
cfb6eeb4 877 genhash ? "failed" : "mismatch",
0c6ce78a
HH
878 &ip6h->saddr, ntohs(th->source),
879 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
880 }
881 return 1;
882 }
883 return 0;
884}
885#endif
886
c6aefafb 887struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 888 .family = AF_INET6,
2e6599cb 889 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 890 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
891 .send_ack = tcp_v6_reqsk_send_ack,
892 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
893 .send_reset = tcp_v6_send_reset
894};
895
cfb6eeb4 896#ifdef CONFIG_TCP_MD5SIG
b6332e6c 897static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 898 .md5_lookup = tcp_v6_reqsk_md5_lookup,
cfb6eeb4 899};
b6332e6c 900#endif
cfb6eeb4 901
6d6ee43e
ACM
902static struct timewait_sock_ops tcp6_timewait_sock_ops = {
903 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
904 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 905 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
906};
907
8292a17a 908static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
909{
910 struct ipv6_pinfo *np = inet6_sk(sk);
aa8223c7 911 struct tcphdr *th = tcp_hdr(skb);
1da177e4 912
84fa7933 913 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4 914 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
663ead3b 915 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 916 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 917 } else {
1ab1457c 918 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
07f0757a 919 csum_partial(th, th->doff<<2,
1da177e4
LT
920 skb->csum));
921 }
922}
923
a430a43d
HX
924static int tcp_v6_gso_send_check(struct sk_buff *skb)
925{
926 struct ipv6hdr *ipv6h;
927 struct tcphdr *th;
928
929 if (!pskb_may_pull(skb, sizeof(*th)))
930 return -EINVAL;
931
0660e03f 932 ipv6h = ipv6_hdr(skb);
aa8223c7 933 th = tcp_hdr(skb);
a430a43d
HX
934
935 th->check = 0;
936 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
937 IPPROTO_TCP, 0);
663ead3b 938 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 939 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 940 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
941 return 0;
942}
1da177e4 943
684f2176
HX
944struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
945{
946 struct ipv6hdr *iph = ipv6_hdr(skb);
947
948 switch (skb->ip_summed) {
949 case CHECKSUM_COMPLETE:
86911732 950 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
951 skb->csum)) {
952 skb->ip_summed = CHECKSUM_UNNECESSARY;
953 break;
954 }
955
956 /* fall through */
957 case CHECKSUM_NONE:
958 NAPI_GRO_CB(skb)->flush = 1;
959 return NULL;
960 }
961
962 return tcp_gro_receive(head, skb);
963}
964EXPORT_SYMBOL(tcp6_gro_receive);
965
966int tcp6_gro_complete(struct sk_buff *skb)
967{
968 struct ipv6hdr *iph = ipv6_hdr(skb);
969 struct tcphdr *th = tcp_hdr(skb);
970
971 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
972 &iph->saddr, &iph->daddr, 0);
973 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
974
975 return tcp_gro_complete(skb);
976}
977EXPORT_SYMBOL(tcp6_gro_complete);
978
626e264d
IJ
979static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
980 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 981{
aa8223c7 982 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
983 struct sk_buff *buff;
984 struct flowi fl;
c346dca1 985 struct net *net = dev_net(skb->dst->dev);
e5047992 986 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 987 unsigned int tot_len = sizeof(struct tcphdr);
81ada62d 988 __be32 *topt;
1da177e4 989
626e264d
IJ
990 if (ts)
991 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 992#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
993 if (key)
994 tot_len += TCPOLEN_MD5SIG_ALIGNED;
995#endif
996
cfb6eeb4 997 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 998 GFP_ATOMIC);
1ab1457c
YH
999 if (buff == NULL)
1000 return;
1da177e4 1001
cfb6eeb4 1002 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1003
cfb6eeb4 1004 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1da177e4
LT
1005
1006 /* Swap the send and the receive. */
1007 memset(t1, 0, sizeof(*t1));
1008 t1->dest = th->source;
1009 t1->source = th->dest;
cfb6eeb4 1010 t1->doff = tot_len / 4;
626e264d
IJ
1011 t1->seq = htonl(seq);
1012 t1->ack_seq = htonl(ack);
1013 t1->ack = !rst || !th->ack;
1014 t1->rst = rst;
1015 t1->window = htons(win);
1da177e4 1016
81ada62d
IJ
1017 topt = (__be32 *)(t1 + 1);
1018
626e264d
IJ
1019 if (ts) {
1020 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1021 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1022 *topt++ = htonl(tcp_time_stamp);
1023 *topt++ = htonl(ts);
1024 }
1025
cfb6eeb4
YH
1026#ifdef CONFIG_TCP_MD5SIG
1027 if (key) {
81ada62d
IJ
1028 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1029 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1030 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1031 &ipv6_hdr(skb)->saddr,
1032 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1033 }
1034#endif
1035
07f0757a 1036 buff->csum = csum_partial(t1, tot_len, 0);
1da177e4
LT
1037
1038 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1039 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1040 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1041
1042 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
52cd5750 1043 tot_len, IPPROTO_TCP,
1da177e4
LT
1044 buff->csum);
1045
1046 fl.proto = IPPROTO_TCP;
505cbfc5 1047 fl.oif = inet6_iif(skb);
1da177e4
LT
1048 fl.fl_ip_dport = t1->dest;
1049 fl.fl_ip_sport = t1->source;
beb8d13b 1050 security_skb_classify_flow(skb, &fl);
1da177e4 1051
c20121ae
DL
1052 /* Pass a socket to ip6_dst_lookup either it is for RST
1053 * Underlying function will use this to retrieve the network
1054 * namespace
1055 */
e5047992 1056 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
52479b62 1057 if (xfrm_lookup(net, &buff->dst, &fl, NULL, 0) >= 0) {
e5047992 1058 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
63231bdd 1059 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
626e264d
IJ
1060 if (rst)
1061 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1062 return;
ecc51b6d 1063 }
1da177e4
LT
1064 }
1065
1066 kfree_skb(buff);
1067}
1068
626e264d 1069static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1070{
626e264d
IJ
1071 struct tcphdr *th = tcp_hdr(skb);
1072 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1073 struct tcp_md5sig_key *key = NULL;
1da177e4 1074
626e264d 1075 if (th->rst)
1da177e4
LT
1076 return;
1077
626e264d
IJ
1078 if (!ipv6_unicast_destination(skb))
1079 return;
1da177e4 1080
cfb6eeb4 1081#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1082 if (sk)
1083 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1084#endif
1085
626e264d
IJ
1086 if (th->ack)
1087 seq = ntohl(th->ack_seq);
1088 else
1089 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1090 (th->doff << 2);
1da177e4 1091
626e264d
IJ
1092 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1093}
1da177e4 1094
626e264d
IJ
1095static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1096 struct tcp_md5sig_key *key)
1097{
1098 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1099}
1100
1101static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1102{
8feaf0c0 1103 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1104 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1105
9501f972 1106 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1107 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1108 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1109
8feaf0c0 1110 inet_twsk_put(tw);
1da177e4
LT
1111}
1112
6edafaaf
GJ
1113static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1114 struct request_sock *req)
1da177e4 1115{
9501f972 1116 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1117 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1118}
1119
1120
1121static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1122{
60236fdd 1123 struct request_sock *req, **prev;
aa8223c7 1124 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1125 struct sock *nsk;
1126
1127 /* Find possible connection requests. */
8129765a 1128 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1129 &ipv6_hdr(skb)->saddr,
1130 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1131 if (req)
1132 return tcp_check_req(sk, skb, req, prev);
1133
3b1e0a65 1134 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1135 &ipv6_hdr(skb)->saddr, th->source,
1136 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1137
1138 if (nsk) {
1139 if (nsk->sk_state != TCP_TIME_WAIT) {
1140 bh_lock_sock(nsk);
1141 return nsk;
1142 }
9469c7b4 1143 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1144 return NULL;
1145 }
1146
c6aefafb 1147#ifdef CONFIG_SYN_COOKIES
1da177e4 1148 if (!th->rst && !th->syn && th->ack)
c6aefafb 1149 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1150#endif
1151 return sk;
1152}
1153
1da177e4
LT
1154/* FIXME: this is substantially similar to the ipv4 code.
1155 * Can some kind of merge be done? -- erics
1156 */
1157static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1158{
ca304b61 1159 struct inet6_request_sock *treq;
1da177e4
LT
1160 struct ipv6_pinfo *np = inet6_sk(sk);
1161 struct tcp_options_received tmp_opt;
1162 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 1163 struct request_sock *req = NULL;
1da177e4 1164 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1165#ifdef CONFIG_SYN_COOKIES
1166 int want_cookie = 0;
1167#else
1168#define want_cookie 0
1169#endif
1da177e4
LT
1170
1171 if (skb->protocol == htons(ETH_P_IP))
1172 return tcp_v4_conn_request(sk, skb);
1173
1174 if (!ipv6_unicast_destination(skb))
1ab1457c 1175 goto drop;
1da177e4 1176
463c84b9 1177 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1178 if (net_ratelimit())
c6aefafb
GG
1179 syn_flood_warning(skb);
1180#ifdef CONFIG_SYN_COOKIES
1181 if (sysctl_tcp_syncookies)
1182 want_cookie = 1;
1183 else
1184#endif
1ab1457c 1185 goto drop;
1da177e4
LT
1186 }
1187
463c84b9 1188 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1189 goto drop;
1190
ca304b61 1191 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1192 if (req == NULL)
1193 goto drop;
1194
cfb6eeb4
YH
1195#ifdef CONFIG_TCP_MD5SIG
1196 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1197#endif
1198
1da177e4
LT
1199 tcp_clear_options(&tmp_opt);
1200 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1201 tmp_opt.user_mss = tp->rx_opt.user_mss;
1202
1203 tcp_parse_options(skb, &tmp_opt, 0);
1204
4dfc2817 1205 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1206 tcp_clear_options(&tmp_opt);
c6aefafb 1207
1da177e4
LT
1208 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1209 tcp_openreq_init(req, &tmp_opt, skb);
1210
ca304b61 1211 treq = inet6_rsk(req);
0660e03f
ACM
1212 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1213 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1214 if (!want_cookie)
1215 TCP_ECN_create_request(req, tcp_hdr(skb));
1216
1217 if (want_cookie) {
1218 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1219 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1220 } else if (!isn) {
1221 if (ipv6_opt_accepted(sk, skb) ||
1222 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1223 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1224 atomic_inc(&skb->users);
1225 treq->pktopts = skb;
1226 }
1227 treq->iif = sk->sk_bound_dev_if;
1da177e4 1228
c6aefafb
GG
1229 /* So that link locals have meaning */
1230 if (!sk->sk_bound_dev_if &&
1231 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1232 treq->iif = inet6_iif(skb);
1da177e4 1233
a94f723d 1234 isn = tcp_v6_init_sequence(skb);
c6aefafb 1235 }
1da177e4 1236
2e6599cb 1237 tcp_rsk(req)->snt_isn = isn;
1da177e4 1238
4237c75c
VY
1239 security_inet_conn_request(sk, skb, req);
1240
fd80eb94 1241 if (tcp_v6_send_synack(sk, req))
1da177e4
LT
1242 goto drop;
1243
c6aefafb
GG
1244 if (!want_cookie) {
1245 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1246 return 0;
1247 }
1da177e4
LT
1248
1249drop:
1250 if (req)
60236fdd 1251 reqsk_free(req);
1da177e4 1252
1da177e4
LT
1253 return 0; /* don't send reset */
1254}
1255
1256static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1257 struct request_sock *req,
1da177e4
LT
1258 struct dst_entry *dst)
1259{
78d15e82 1260 struct inet6_request_sock *treq;
1da177e4
LT
1261 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1262 struct tcp6_sock *newtcp6sk;
1263 struct inet_sock *newinet;
1264 struct tcp_sock *newtp;
1265 struct sock *newsk;
1266 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1267#ifdef CONFIG_TCP_MD5SIG
1268 struct tcp_md5sig_key *key;
1269#endif
1da177e4
LT
1270
1271 if (skb->protocol == htons(ETH_P_IP)) {
1272 /*
1273 * v6 mapped
1274 */
1275
1276 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1277
1ab1457c 1278 if (newsk == NULL)
1da177e4
LT
1279 return NULL;
1280
1281 newtcp6sk = (struct tcp6_sock *)newsk;
1282 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1283
1284 newinet = inet_sk(newsk);
1285 newnp = inet6_sk(newsk);
1286 newtp = tcp_sk(newsk);
1287
1288 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1289
1290 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1291 newinet->daddr);
1292
1293 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1294 newinet->saddr);
1295
1296 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1297
8292a17a 1298 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1299 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1300#ifdef CONFIG_TCP_MD5SIG
1301 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1302#endif
1303
1da177e4
LT
1304 newnp->pktoptions = NULL;
1305 newnp->opt = NULL;
505cbfc5 1306 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1307 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1308
e6848976
ACM
1309 /*
1310 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1311 * here, tcp_create_openreq_child now does this for us, see the comment in
1312 * that function for the gory details. -acme
1da177e4 1313 */
1da177e4
LT
1314
1315 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1316 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1317 Sync it now.
1318 */
d83d8461 1319 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1320
1321 return newsk;
1322 }
1323
78d15e82 1324 treq = inet6_rsk(req);
1da177e4
LT
1325 opt = np->opt;
1326
1327 if (sk_acceptq_is_full(sk))
1328 goto out_overflow;
1329
1da177e4
LT
1330 if (dst == NULL) {
1331 struct in6_addr *final_p = NULL, final;
1332 struct flowi fl;
1333
1334 memset(&fl, 0, sizeof(fl));
1335 fl.proto = IPPROTO_TCP;
2e6599cb 1336 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1337 if (opt && opt->srcrt) {
1338 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1339 ipv6_addr_copy(&final, &fl.fl6_dst);
1340 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1341 final_p = &final;
1342 }
2e6599cb 1343 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1344 fl.oif = sk->sk_bound_dev_if;
2e6599cb 1345 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fd507037 1346 fl.fl_ip_sport = inet_rsk(req)->loc_port;
4237c75c 1347 security_req_classify_flow(req, &fl);
1da177e4
LT
1348
1349 if (ip6_dst_lookup(sk, &dst, &fl))
1350 goto out;
1351
1352 if (final_p)
1353 ipv6_addr_copy(&fl.fl6_dst, final_p);
1354
52479b62 1355 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1da177e4 1356 goto out;
1ab1457c 1357 }
1da177e4
LT
1358
1359 newsk = tcp_create_openreq_child(sk, req, skb);
1360 if (newsk == NULL)
1361 goto out;
1362
e6848976
ACM
1363 /*
1364 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1365 * count here, tcp_create_openreq_child now does this for us, see the
1366 * comment in that function for the gory details. -acme
1367 */
1da177e4 1368
59eed279 1369 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1370 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1371
1372 newtcp6sk = (struct tcp6_sock *)newsk;
1373 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1374
1375 newtp = tcp_sk(newsk);
1376 newinet = inet_sk(newsk);
1377 newnp = inet6_sk(newsk);
1378
1379 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1380
2e6599cb
ACM
1381 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1382 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1383 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1384 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1385
1ab1457c 1386 /* Now IPv6 options...
1da177e4
LT
1387
1388 First: no IPv4 options.
1389 */
1390 newinet->opt = NULL;
d35690be 1391 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1392
1393 /* Clone RX bits */
1394 newnp->rxopt.all = np->rxopt.all;
1395
1396 /* Clone pktoptions received with SYN */
1397 newnp->pktoptions = NULL;
2e6599cb
ACM
1398 if (treq->pktopts != NULL) {
1399 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1400 kfree_skb(treq->pktopts);
1401 treq->pktopts = NULL;
1da177e4
LT
1402 if (newnp->pktoptions)
1403 skb_set_owner_r(newnp->pktoptions, newsk);
1404 }
1405 newnp->opt = NULL;
505cbfc5 1406 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1407 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1408
1409 /* Clone native IPv6 options from listening socket (if any)
1410
1411 Yes, keeping reference count would be much more clever,
1412 but we make one more one thing there: reattach optmem
1413 to newsk.
1414 */
1415 if (opt) {
1416 newnp->opt = ipv6_dup_options(newsk, opt);
1417 if (opt != np->opt)
1418 sock_kfree_s(sk, opt, opt->tot_len);
1419 }
1420
d83d8461 1421 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1422 if (newnp->opt)
d83d8461
ACM
1423 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1424 newnp->opt->opt_flen);
1da177e4 1425
5d424d5a 1426 tcp_mtup_init(newsk);
1da177e4
LT
1427 tcp_sync_mss(newsk, dst_mtu(dst));
1428 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1429 tcp_initialize_rcv_mss(newsk);
1430
1431 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1432
cfb6eeb4
YH
1433#ifdef CONFIG_TCP_MD5SIG
1434 /* Copy over the MD5 key from the original socket */
1435 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1436 /* We're using one, so create a matching key
1437 * on the newsk structure. If we fail to get
1438 * memory, then we end up not copying the key
1439 * across. Shucks.
1440 */
af879cc7
ACM
1441 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1442 if (newkey != NULL)
cfb6eeb4
YH
1443 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1444 newkey, key->keylen);
cfb6eeb4
YH
1445 }
1446#endif
1447
ab1e0a13 1448 __inet6_hash(newsk);
e56d8b8a 1449 __inet_inherit_port(sk, newsk);
1da177e4
LT
1450
1451 return newsk;
1452
1453out_overflow:
de0744af 1454 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1455out:
de0744af 1456 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1457 if (opt && opt != np->opt)
1458 sock_kfree_s(sk, opt, opt->tot_len);
1459 dst_release(dst);
1460 return NULL;
1461}
1462
b51655b9 1463static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1464{
84fa7933 1465 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1466 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1467 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1469 return 0;
fb286bb2 1470 }
1da177e4 1471 }
fb286bb2 1472
684f2176 1473 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1474 &ipv6_hdr(skb)->saddr,
1475 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1476
1da177e4 1477 if (skb->len <= 76) {
fb286bb2 1478 return __skb_checksum_complete(skb);
1da177e4
LT
1479 }
1480 return 0;
1481}
1482
1483/* The socket must have it's spinlock held when we get
1484 * here.
1485 *
1486 * We have a potential double-lock case here, so even when
1487 * doing backlog processing we use the BH locking scheme.
1488 * This is because we cannot sleep with the original spinlock
1489 * held.
1490 */
1491static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1492{
1493 struct ipv6_pinfo *np = inet6_sk(sk);
1494 struct tcp_sock *tp;
1495 struct sk_buff *opt_skb = NULL;
1496
1497 /* Imagine: socket is IPv6. IPv4 packet arrives,
1498 goes to IPv4 receive handler and backlogged.
1499 From backlog it always goes here. Kerboom...
1500 Fortunately, tcp_rcv_established and rcv_established
1501 handle them correctly, but it is not case with
1502 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1503 */
1504
1505 if (skb->protocol == htons(ETH_P_IP))
1506 return tcp_v4_do_rcv(sk, skb);
1507
cfb6eeb4
YH
1508#ifdef CONFIG_TCP_MD5SIG
1509 if (tcp_v6_inbound_md5_hash (sk, skb))
1510 goto discard;
1511#endif
1512
fda9ef5d 1513 if (sk_filter(sk, skb))
1da177e4
LT
1514 goto discard;
1515
1516 /*
1517 * socket locking is here for SMP purposes as backlog rcv
1518 * is currently called with bh processing disabled.
1519 */
1520
1521 /* Do Stevens' IPV6_PKTOPTIONS.
1522
1523 Yes, guys, it is the only place in our code, where we
1524 may make it not affecting IPv4.
1525 The rest of code is protocol independent,
1526 and I do not like idea to uglify IPv4.
1527
1528 Actually, all the idea behind IPV6_PKTOPTIONS
1529 looks not very well thought. For now we latch
1530 options, received in the last packet, enqueued
1531 by tcp. Feel free to propose better solution.
1ab1457c 1532 --ANK (980728)
1da177e4
LT
1533 */
1534 if (np->rxopt.all)
1535 opt_skb = skb_clone(skb, GFP_ATOMIC);
1536
1537 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1538 TCP_CHECK_TIMER(sk);
aa8223c7 1539 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1540 goto reset;
1541 TCP_CHECK_TIMER(sk);
1542 if (opt_skb)
1543 goto ipv6_pktoptions;
1544 return 0;
1545 }
1546
ab6a5bb6 1547 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1548 goto csum_err;
1549
1ab1457c 1550 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1551 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1552 if (!nsk)
1553 goto discard;
1554
1555 /*
1556 * Queue it on the new socket if the new socket is active,
1557 * otherwise we just shortcircuit this and continue with
1558 * the new socket..
1559 */
1ab1457c 1560 if(nsk != sk) {
1da177e4
LT
1561 if (tcp_child_process(sk, nsk, skb))
1562 goto reset;
1563 if (opt_skb)
1564 __kfree_skb(opt_skb);
1565 return 0;
1566 }
1567 }
1568
1569 TCP_CHECK_TIMER(sk);
aa8223c7 1570 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1571 goto reset;
1572 TCP_CHECK_TIMER(sk);
1573 if (opt_skb)
1574 goto ipv6_pktoptions;
1575 return 0;
1576
1577reset:
cfb6eeb4 1578 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1579discard:
1580 if (opt_skb)
1581 __kfree_skb(opt_skb);
1582 kfree_skb(skb);
1583 return 0;
1584csum_err:
63231bdd 1585 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1586 goto discard;
1587
1588
1589ipv6_pktoptions:
1590 /* Do you ask, what is it?
1591
1592 1. skb was enqueued by tcp.
1593 2. skb is added to tail of read queue, rather than out of order.
1594 3. socket is not in passive state.
1595 4. Finally, it really contains options, which user wants to receive.
1596 */
1597 tp = tcp_sk(sk);
1598 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1599 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1600 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1601 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1602 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1603 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1604 if (ipv6_opt_accepted(sk, opt_skb)) {
1605 skb_set_owner_r(opt_skb, sk);
1606 opt_skb = xchg(&np->pktoptions, opt_skb);
1607 } else {
1608 __kfree_skb(opt_skb);
1609 opt_skb = xchg(&np->pktoptions, NULL);
1610 }
1611 }
1612
800d55f1 1613 kfree_skb(opt_skb);
1da177e4
LT
1614 return 0;
1615}
1616
e5bbef20 1617static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1618{
1ab1457c 1619 struct tcphdr *th;
1da177e4
LT
1620 struct sock *sk;
1621 int ret;
a86b1e30 1622 struct net *net = dev_net(skb->dev);
1da177e4
LT
1623
1624 if (skb->pkt_type != PACKET_HOST)
1625 goto discard_it;
1626
1627 /*
1628 * Count it even if it's bad.
1629 */
63231bdd 1630 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1631
1632 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1633 goto discard_it;
1634
aa8223c7 1635 th = tcp_hdr(skb);
1da177e4
LT
1636
1637 if (th->doff < sizeof(struct tcphdr)/4)
1638 goto bad_packet;
1639 if (!pskb_may_pull(skb, th->doff*4))
1640 goto discard_it;
1641
60476372 1642 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1643 goto bad_packet;
1644
aa8223c7 1645 th = tcp_hdr(skb);
1da177e4
LT
1646 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1647 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1648 skb->len - th->doff*4);
1649 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1650 TCP_SKB_CB(skb)->when = 0;
0660e03f 1651 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1652 TCP_SKB_CB(skb)->sacked = 0;
1653
9a1f27c4 1654 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1655 if (!sk)
1656 goto no_tcp_socket;
1657
1658process:
1659 if (sk->sk_state == TCP_TIME_WAIT)
1660 goto do_time_wait;
1661
1662 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1663 goto discard_and_relse;
1664
fda9ef5d 1665 if (sk_filter(sk, skb))
1da177e4
LT
1666 goto discard_and_relse;
1667
1668 skb->dev = NULL;
1669
293b9c42 1670 bh_lock_sock_nested(sk);
1da177e4
LT
1671 ret = 0;
1672 if (!sock_owned_by_user(sk)) {
1a2449a8 1673#ifdef CONFIG_NET_DMA
1ab1457c 1674 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1675 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1676 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1677 if (tp->ucopy.dma_chan)
1678 ret = tcp_v6_do_rcv(sk, skb);
1679 else
1a2449a8
CL
1680#endif
1681 {
1682 if (!tcp_prequeue(sk, skb))
1683 ret = tcp_v6_do_rcv(sk, skb);
1684 }
1da177e4
LT
1685 } else
1686 sk_add_backlog(sk, skb);
1687 bh_unlock_sock(sk);
1688
1689 sock_put(sk);
1690 return ret ? -1 : 0;
1691
1692no_tcp_socket:
1693 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1694 goto discard_it;
1695
1696 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1697bad_packet:
63231bdd 1698 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1699 } else {
cfb6eeb4 1700 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1701 }
1702
1703discard_it:
1704
1705 /*
1706 * Discard frame
1707 */
1708
1709 kfree_skb(skb);
1710 return 0;
1711
1712discard_and_relse:
1713 sock_put(sk);
1714 goto discard_it;
1715
1716do_time_wait:
1717 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1718 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1719 goto discard_it;
1720 }
1721
1722 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1723 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1724 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1725 goto discard_it;
1726 }
1727
9469c7b4 1728 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1729 case TCP_TW_SYN:
1730 {
1731 struct sock *sk2;
1732
c346dca1 1733 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1734 &ipv6_hdr(skb)->daddr,
505cbfc5 1735 ntohs(th->dest), inet6_iif(skb));
1da177e4 1736 if (sk2 != NULL) {
295ff7ed
ACM
1737 struct inet_timewait_sock *tw = inet_twsk(sk);
1738 inet_twsk_deschedule(tw, &tcp_death_row);
1739 inet_twsk_put(tw);
1da177e4
LT
1740 sk = sk2;
1741 goto process;
1742 }
1743 /* Fall through to ACK */
1744 }
1745 case TCP_TW_ACK:
1746 tcp_v6_timewait_ack(sk, skb);
1747 break;
1748 case TCP_TW_RST:
1749 goto no_tcp_socket;
1750 case TCP_TW_SUCCESS:;
1751 }
1752 goto discard_it;
1753}
1754
1da177e4
LT
1755static int tcp_v6_remember_stamp(struct sock *sk)
1756{
1757 /* Alas, not yet... */
1758 return 0;
1759}
1760
8292a17a 1761static struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1762 .queue_xmit = inet6_csk_xmit,
1763 .send_check = tcp_v6_send_check,
1764 .rebuild_header = inet6_sk_rebuild_header,
1765 .conn_request = tcp_v6_conn_request,
1766 .syn_recv_sock = tcp_v6_syn_recv_sock,
1767 .remember_stamp = tcp_v6_remember_stamp,
1768 .net_header_len = sizeof(struct ipv6hdr),
1769 .setsockopt = ipv6_setsockopt,
1770 .getsockopt = ipv6_getsockopt,
1771 .addr2sockaddr = inet6_csk_addr2sockaddr,
1772 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1773 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1774#ifdef CONFIG_COMPAT
543d9cfe
ACM
1775 .compat_setsockopt = compat_ipv6_setsockopt,
1776 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1777#endif
1da177e4
LT
1778};
1779
cfb6eeb4 1780#ifdef CONFIG_TCP_MD5SIG
a928630a 1781static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1782 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1783 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1784 .md5_add = tcp_v6_md5_add_func,
1785 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1786};
a928630a 1787#endif
cfb6eeb4 1788
1da177e4
LT
1789/*
1790 * TCP over IPv4 via INET6 API
1791 */
1792
8292a17a 1793static struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1794 .queue_xmit = ip_queue_xmit,
1795 .send_check = tcp_v4_send_check,
1796 .rebuild_header = inet_sk_rebuild_header,
1797 .conn_request = tcp_v6_conn_request,
1798 .syn_recv_sock = tcp_v6_syn_recv_sock,
1799 .remember_stamp = tcp_v4_remember_stamp,
1800 .net_header_len = sizeof(struct iphdr),
1801 .setsockopt = ipv6_setsockopt,
1802 .getsockopt = ipv6_getsockopt,
1803 .addr2sockaddr = inet6_csk_addr2sockaddr,
1804 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1805 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1806#ifdef CONFIG_COMPAT
543d9cfe
ACM
1807 .compat_setsockopt = compat_ipv6_setsockopt,
1808 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1809#endif
1da177e4
LT
1810};
1811
cfb6eeb4 1812#ifdef CONFIG_TCP_MD5SIG
a928630a 1813static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1814 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1815 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1816 .md5_add = tcp_v6_md5_add_func,
1817 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1818};
a928630a 1819#endif
cfb6eeb4 1820
1da177e4
LT
1821/* NOTE: A lot of things set to zero explicitly by call to
1822 * sk_alloc() so need not be done here.
1823 */
1824static int tcp_v6_init_sock(struct sock *sk)
1825{
6687e988 1826 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1827 struct tcp_sock *tp = tcp_sk(sk);
1828
1829 skb_queue_head_init(&tp->out_of_order_queue);
1830 tcp_init_xmit_timers(sk);
1831 tcp_prequeue_init(tp);
1832
6687e988 1833 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1834 tp->mdev = TCP_TIMEOUT_INIT;
1835
1836 /* So many TCP implementations out there (incorrectly) count the
1837 * initial SYN frame in their delayed-ACK and congestion control
1838 * algorithms that we must have the following bandaid to talk
1839 * efficiently to them. -DaveM
1840 */
1841 tp->snd_cwnd = 2;
1842
1843 /* See draft-stevens-tcpca-spec-01 for discussion of the
1844 * initialization of these values.
1845 */
1846 tp->snd_ssthresh = 0x7fffffff;
1847 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1848 tp->mss_cache = 536;
1da177e4
LT
1849
1850 tp->reordering = sysctl_tcp_reordering;
1851
1852 sk->sk_state = TCP_CLOSE;
1853
8292a17a 1854 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1855 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1856 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1857 sk->sk_write_space = sk_stream_write_space;
1858 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1859
cfb6eeb4
YH
1860#ifdef CONFIG_TCP_MD5SIG
1861 tp->af_specific = &tcp_sock_ipv6_specific;
1862#endif
1863
1da177e4
LT
1864 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1865 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1866
eb4dea58 1867 local_bh_disable();
1748376b 1868 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1869 local_bh_enable();
1da177e4
LT
1870
1871 return 0;
1872}
1873
7d06b2e0 1874static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1875{
cfb6eeb4
YH
1876#ifdef CONFIG_TCP_MD5SIG
1877 /* Clean up the MD5 key list */
1878 if (tcp_sk(sk)->md5sig_info)
1879 tcp_v6_clear_md5_list(sk);
1880#endif
1da177e4 1881 tcp_v4_destroy_sock(sk);
7d06b2e0 1882 inet6_destroy_sock(sk);
1da177e4
LT
1883}
1884
952a10be 1885#ifdef CONFIG_PROC_FS
1da177e4 1886/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1887static void get_openreq6(struct seq_file *seq,
60236fdd 1888 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1889{
1da177e4 1890 int ttd = req->expires - jiffies;
ca304b61
ACM
1891 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1892 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1893
1894 if (ttd < 0)
1895 ttd = 0;
1896
1da177e4
LT
1897 seq_printf(seq,
1898 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1899 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1900 i,
1901 src->s6_addr32[0], src->s6_addr32[1],
1902 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1903 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1904 dest->s6_addr32[0], dest->s6_addr32[1],
1905 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1906 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1907 TCP_SYN_RECV,
1908 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1909 1, /* timers active (only the expire timer) */
1910 jiffies_to_clock_t(ttd),
1da177e4
LT
1911 req->retrans,
1912 uid,
1ab1457c 1913 0, /* non standard timer */
1da177e4
LT
1914 0, /* open_requests have no inode */
1915 0, req);
1916}
1917
1918static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1919{
1920 struct in6_addr *dest, *src;
1921 __u16 destp, srcp;
1922 int timer_active;
1923 unsigned long timer_expires;
1924 struct inet_sock *inet = inet_sk(sp);
1925 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1926 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1927 struct ipv6_pinfo *np = inet6_sk(sp);
1928
1929 dest = &np->daddr;
1930 src = &np->rcv_saddr;
1931 destp = ntohs(inet->dport);
1932 srcp = ntohs(inet->sport);
463c84b9
ACM
1933
1934 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1935 timer_active = 1;
463c84b9
ACM
1936 timer_expires = icsk->icsk_timeout;
1937 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1938 timer_active = 4;
463c84b9 1939 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1940 } else if (timer_pending(&sp->sk_timer)) {
1941 timer_active = 2;
1942 timer_expires = sp->sk_timer.expires;
1943 } else {
1944 timer_active = 0;
1945 timer_expires = jiffies;
1946 }
1947
1948 seq_printf(seq,
1949 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 1950 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
1951 i,
1952 src->s6_addr32[0], src->s6_addr32[1],
1953 src->s6_addr32[2], src->s6_addr32[3], srcp,
1954 dest->s6_addr32[0], dest->s6_addr32[1],
1955 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1956 sp->sk_state,
47da8ee6
SS
1957 tp->write_seq-tp->snd_una,
1958 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1959 timer_active,
1960 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1961 icsk->icsk_retransmits,
1da177e4 1962 sock_i_uid(sp),
6687e988 1963 icsk->icsk_probes_out,
1da177e4
LT
1964 sock_i_ino(sp),
1965 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1966 jiffies_to_clock_t(icsk->icsk_rto),
1967 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 1968 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1969 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1970 );
1971}
1972
1ab1457c 1973static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1974 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1975{
1976 struct in6_addr *dest, *src;
1977 __u16 destp, srcp;
0fa1a53e 1978 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1979 int ttd = tw->tw_ttd - jiffies;
1980
1981 if (ttd < 0)
1982 ttd = 0;
1983
0fa1a53e
ACM
1984 dest = &tw6->tw_v6_daddr;
1985 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1986 destp = ntohs(tw->tw_dport);
1987 srcp = ntohs(tw->tw_sport);
1988
1989 seq_printf(seq,
1990 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1991 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1992 i,
1993 src->s6_addr32[0], src->s6_addr32[1],
1994 src->s6_addr32[2], src->s6_addr32[3], srcp,
1995 dest->s6_addr32[0], dest->s6_addr32[1],
1996 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1997 tw->tw_substate, 0, 0,
1998 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1999 atomic_read(&tw->tw_refcnt), tw);
2000}
2001
1da177e4
LT
2002static int tcp6_seq_show(struct seq_file *seq, void *v)
2003{
2004 struct tcp_iter_state *st;
2005
2006 if (v == SEQ_START_TOKEN) {
2007 seq_puts(seq,
2008 " sl "
2009 "local_address "
2010 "remote_address "
2011 "st tx_queue rx_queue tr tm->when retrnsmt"
2012 " uid timeout inode\n");
2013 goto out;
2014 }
2015 st = seq->private;
2016
2017 switch (st->state) {
2018 case TCP_SEQ_STATE_LISTENING:
2019 case TCP_SEQ_STATE_ESTABLISHED:
2020 get_tcp6_sock(seq, v, st->num);
2021 break;
2022 case TCP_SEQ_STATE_OPENREQ:
2023 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2024 break;
2025 case TCP_SEQ_STATE_TIME_WAIT:
2026 get_timewait6_sock(seq, v, st->num);
2027 break;
2028 }
2029out:
2030 return 0;
2031}
2032
1da177e4 2033static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2034 .name = "tcp6",
2035 .family = AF_INET6,
5f4472c5
DL
2036 .seq_fops = {
2037 .owner = THIS_MODULE,
2038 },
9427c4b3
DL
2039 .seq_ops = {
2040 .show = tcp6_seq_show,
2041 },
1da177e4
LT
2042};
2043
6f8b13bc 2044int tcp6_proc_init(struct net *net)
1da177e4 2045{
6f8b13bc 2046 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2047}
2048
6f8b13bc 2049void tcp6_proc_exit(struct net *net)
1da177e4 2050{
6f8b13bc 2051 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2052}
2053#endif
2054
2055struct proto tcpv6_prot = {
2056 .name = "TCPv6",
2057 .owner = THIS_MODULE,
2058 .close = tcp_close,
2059 .connect = tcp_v6_connect,
2060 .disconnect = tcp_disconnect,
463c84b9 2061 .accept = inet_csk_accept,
1da177e4
LT
2062 .ioctl = tcp_ioctl,
2063 .init = tcp_v6_init_sock,
2064 .destroy = tcp_v6_destroy_sock,
2065 .shutdown = tcp_shutdown,
2066 .setsockopt = tcp_setsockopt,
2067 .getsockopt = tcp_getsockopt,
1da177e4
LT
2068 .recvmsg = tcp_recvmsg,
2069 .backlog_rcv = tcp_v6_do_rcv,
2070 .hash = tcp_v6_hash,
ab1e0a13
ACM
2071 .unhash = inet_unhash,
2072 .get_port = inet_csk_get_port,
1da177e4
LT
2073 .enter_memory_pressure = tcp_enter_memory_pressure,
2074 .sockets_allocated = &tcp_sockets_allocated,
2075 .memory_allocated = &tcp_memory_allocated,
2076 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2077 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2078 .sysctl_mem = sysctl_tcp_mem,
2079 .sysctl_wmem = sysctl_tcp_wmem,
2080 .sysctl_rmem = sysctl_tcp_rmem,
2081 .max_header = MAX_TCP_HEADER,
2082 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2083 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2084 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2085 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2086 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2087#ifdef CONFIG_COMPAT
2088 .compat_setsockopt = compat_tcp_setsockopt,
2089 .compat_getsockopt = compat_tcp_getsockopt,
2090#endif
1da177e4
LT
2091};
2092
2093static struct inet6_protocol tcpv6_protocol = {
2094 .handler = tcp_v6_rcv,
2095 .err_handler = tcp_v6_err,
a430a43d 2096 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2097 .gso_segment = tcp_tso_segment,
684f2176
HX
2098 .gro_receive = tcp6_gro_receive,
2099 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2101};
2102
1da177e4
LT
2103static struct inet_protosw tcpv6_protosw = {
2104 .type = SOCK_STREAM,
2105 .protocol = IPPROTO_TCP,
2106 .prot = &tcpv6_prot,
2107 .ops = &inet6_stream_ops,
2108 .capability = -1,
2109 .no_check = 0,
d83d8461
ACM
2110 .flags = INET_PROTOSW_PERMANENT |
2111 INET_PROTOSW_ICSK,
1da177e4
LT
2112};
2113
93ec926b
DL
2114static int tcpv6_net_init(struct net *net)
2115{
5677242f
DL
2116 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2117 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2118}
2119
2120static void tcpv6_net_exit(struct net *net)
2121{
5677242f 2122 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
d315492b 2123 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2124}
2125
2126static struct pernet_operations tcpv6_net_ops = {
2127 .init = tcpv6_net_init,
2128 .exit = tcpv6_net_exit,
2129};
2130
7f4e4868 2131int __init tcpv6_init(void)
1da177e4 2132{
7f4e4868
DL
2133 int ret;
2134
2135 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2136 if (ret)
2137 goto out;
2138
1da177e4 2139 /* register inet6 protocol */
7f4e4868
DL
2140 ret = inet6_register_protosw(&tcpv6_protosw);
2141 if (ret)
2142 goto out_tcpv6_protocol;
2143
93ec926b 2144 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2145 if (ret)
2146 goto out_tcpv6_protosw;
2147out:
2148 return ret;
ae0f7d5f 2149
7f4e4868
DL
2150out_tcpv6_protocol:
2151 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2152out_tcpv6_protosw:
2153 inet6_unregister_protosw(&tcpv6_protosw);
2154 goto out;
2155}
2156
09f7709f 2157void tcpv6_exit(void)
7f4e4868 2158{
93ec926b 2159 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2160 inet6_unregister_protosw(&tcpv6_protosw);
2161 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2162}