]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/ipv6/tcp_ipv6.c
mlx4: fix kfree on error path in new_steering_entry()
[mirror_ubuntu-hirsute-kernel.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
1da177e4
LT
64
65#include <asm/uaccess.h>
66
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
cfb6eeb4
YH
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
cfb6eeb4 73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
1da177e4
LT
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96
HX
78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr,
80 struct in6_addr *daddr);
1da177e4 81
3b401a81
SH
82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 84#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
85static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr)
90{
91 return NULL;
92}
a928630a 93#endif
1da177e4 94
1da177e4
LT
95static void tcp_v6_hash(struct sock *sk)
96{
97 if (sk->sk_state != TCP_CLOSE) {
8292a17a 98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
9327f705 103 __inet6_hash(sk, NULL);
1da177e4
LT
104 local_bh_enable();
105 }
106}
107
684f2176 108static __inline__ __sum16 tcp_v6_check(int len,
1ab1457c
YH
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
868c86bc 111 __wsum base)
1da177e4
LT
112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114}
115
a94f723d 116static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 117{
0660e03f
ACM
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
1da177e4
LT
122}
123
1ab1457c 124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
125 int addr_len)
126{
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 128 struct inet_sock *inet = inet_sk(sk);
d83d8461 129 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 132 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 133 struct rt6_info *rt;
4c9483b2 134 struct flowi6 fl6;
1da177e4
LT
135 struct dst_entry *dst;
136 int addr_type;
137 int err;
138
1ab1457c 139 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
140 return -EINVAL;
141
1ab1457c 142 if (usin->sin6_family != AF_INET6)
a02cec21 143 return -EAFNOSUPPORT;
1da177e4 144
4c9483b2 145 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
146
147 if (np->sndflow) {
4c9483b2
DM
148 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
149 IP6_ECN_flow_init(fl6.flowlabel);
150 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 151 struct ip6_flowlabel *flowlabel;
4c9483b2 152 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
153 if (flowlabel == NULL)
154 return -EINVAL;
155 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
156 fl6_sock_release(flowlabel);
157 }
158 }
159
160 /*
1ab1457c
YH
161 * connect() to INADDR_ANY means loopback (BSD'ism).
162 */
163
164 if(ipv6_addr_any(&usin->sin6_addr))
165 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
166
167 addr_type = ipv6_addr_type(&usin->sin6_addr);
168
169 if(addr_type & IPV6_ADDR_MULTICAST)
170 return -ENETUNREACH;
171
172 if (addr_type&IPV6_ADDR_LINKLOCAL) {
173 if (addr_len >= sizeof(struct sockaddr_in6) &&
174 usin->sin6_scope_id) {
175 /* If interface is set while binding, indices
176 * must coincide.
177 */
178 if (sk->sk_bound_dev_if &&
179 sk->sk_bound_dev_if != usin->sin6_scope_id)
180 return -EINVAL;
181
182 sk->sk_bound_dev_if = usin->sin6_scope_id;
183 }
184
185 /* Connect to link-local address requires an interface */
186 if (!sk->sk_bound_dev_if)
187 return -EINVAL;
188 }
189
190 if (tp->rx_opt.ts_recent_stamp &&
191 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
194 tp->write_seq = 0;
195 }
196
197 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
4c9483b2 198 np->flow_label = fl6.flowlabel;
1da177e4
LT
199
200 /*
201 * TCP over IPv4
202 */
203
204 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 205 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
206 struct sockaddr_in sin;
207
208 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209
210 if (__ipv6_only_sock(sk))
211 return -ENETUNREACH;
212
213 sin.sin_family = AF_INET;
214 sin.sin_port = usin->sin6_port;
215 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216
d83d8461 217 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 218 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
219#ifdef CONFIG_TCP_MD5SIG
220 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
221#endif
1da177e4
LT
222
223 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
224
225 if (err) {
d83d8461
ACM
226 icsk->icsk_ext_hdr_len = exthdrlen;
227 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 228 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
229#ifdef CONFIG_TCP_MD5SIG
230 tp->af_specific = &tcp_sock_ipv6_specific;
231#endif
1da177e4
LT
232 goto failure;
233 } else {
c720c7e8
ED
234 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
235 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
236 &np->rcv_saddr);
1da177e4
LT
237 }
238
239 return err;
240 }
241
242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr;
244
4c9483b2
DM
245 fl6.flowi6_proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl6.daddr, &np->daddr);
247 ipv6_addr_copy(&fl6.saddr,
1da177e4 248 (saddr ? saddr : &np->saddr));
4c9483b2
DM
249 fl6.flowi6_oif = sk->sk_bound_dev_if;
250 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
251 fl6.fl6_dport = usin->sin6_port;
252 fl6.fl6_sport = inet->inet_sport;
1da177e4 253
4c9483b2 254 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 255
4c9483b2 256 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 257
4c9483b2 258 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
68d0c6d3
DM
259 if (IS_ERR(dst)) {
260 err = PTR_ERR(dst);
1da177e4 261 goto failure;
14e50e57 262 }
1da177e4
LT
263
264 if (saddr == NULL) {
4c9483b2 265 saddr = &fl6.saddr;
1da177e4
LT
266 ipv6_addr_copy(&np->rcv_saddr, saddr);
267 }
268
269 /* set the source address */
270 ipv6_addr_copy(&np->saddr, saddr);
c720c7e8 271 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 272
f83ef8c0 273 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 274 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 275
493f377d
DM
276 rt = (struct rt6_info *) dst;
277 if (tcp_death_row.sysctl_tw_recycle &&
278 !tp->rx_opt.ts_recent_stamp &&
279 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
280 struct inet_peer *peer = rt6_get_peer(rt);
281 /*
282 * VJ's idea. We save last timestamp seen from
283 * the destination in peer table, when entering state
284 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
285 * when trying new connection.
286 */
287 if (peer) {
288 inet_peer_refcheck(peer);
289 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
290 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
291 tp->rx_opt.ts_recent = peer->tcp_ts;
292 }
293 }
294 }
295
d83d8461 296 icsk->icsk_ext_hdr_len = 0;
1da177e4 297 if (np->opt)
d83d8461
ACM
298 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
299 np->opt->opt_nflen);
1da177e4
LT
300
301 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
302
c720c7e8 303 inet->inet_dport = usin->sin6_port;
1da177e4
LT
304
305 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 306 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
307 if (err)
308 goto late_failure;
309
310 if (!tp->write_seq)
311 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
312 np->daddr.s6_addr32,
c720c7e8
ED
313 inet->inet_sport,
314 inet->inet_dport);
1da177e4
LT
315
316 err = tcp_connect(sk);
317 if (err)
318 goto late_failure;
319
320 return 0;
321
322late_failure:
323 tcp_set_state(sk, TCP_CLOSE);
324 __sk_dst_reset(sk);
325failure:
c720c7e8 326 inet->inet_dport = 0;
1da177e4
LT
327 sk->sk_route_caps = 0;
328 return err;
329}
330
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 332 u8 type, u8 code, int offset, __be32 info)
1da177e4
LT
333{
334 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
336 struct ipv6_pinfo *np;
337 struct sock *sk;
338 int err;
1ab1457c 339 struct tcp_sock *tp;
1da177e4 340 __u32 seq;
ca12a1a4 341 struct net *net = dev_net(skb->dev);
1da177e4 342
ca12a1a4 343 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 344 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
345
346 if (sk == NULL) {
e41b5368
DL
347 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
348 ICMP6_MIB_INERRORS);
1da177e4
LT
349 return;
350 }
351
352 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 353 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
354 return;
355 }
356
357 bh_lock_sock(sk);
358 if (sock_owned_by_user(sk))
de0744af 359 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
360
361 if (sk->sk_state == TCP_CLOSE)
362 goto out;
363
e802af9c
SH
364 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
365 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
366 goto out;
367 }
368
1da177e4 369 tp = tcp_sk(sk);
1ab1457c 370 seq = ntohl(th->seq);
1da177e4
LT
371 if (sk->sk_state != TCP_LISTEN &&
372 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 373 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
374 goto out;
375 }
376
377 np = inet6_sk(sk);
378
379 if (type == ICMPV6_PKT_TOOBIG) {
68d0c6d3 380 struct dst_entry *dst;
1da177e4
LT
381
382 if (sock_owned_by_user(sk))
383 goto out;
384 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
385 goto out;
386
387 /* icmp should have updated the destination cache entry */
388 dst = __sk_dst_check(sk, np->dst_cookie);
389
390 if (dst == NULL) {
391 struct inet_sock *inet = inet_sk(sk);
4c9483b2 392 struct flowi6 fl6;
1da177e4
LT
393
394 /* BUGGG_FUTURE: Again, it is not clear how
395 to handle rthdr case. Ignore this complexity
396 for now.
397 */
4c9483b2
DM
398 memset(&fl6, 0, sizeof(fl6));
399 fl6.flowi6_proto = IPPROTO_TCP;
400 ipv6_addr_copy(&fl6.daddr, &np->daddr);
401 ipv6_addr_copy(&fl6.saddr, &np->saddr);
402 fl6.flowi6_oif = sk->sk_bound_dev_if;
403 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
404 fl6.fl6_dport = inet->inet_dport;
405 fl6.fl6_sport = inet->inet_sport;
4c9483b2
DM
406 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
407
408 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
68d0c6d3
DM
409 if (IS_ERR(dst)) {
410 sk->sk_err_soft = -PTR_ERR(dst);
1da177e4
LT
411 goto out;
412 }
413
414 } else
415 dst_hold(dst);
416
d83d8461 417 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
418 tcp_sync_mss(sk, dst_mtu(dst));
419 tcp_simple_retransmit(sk);
420 } /* else let the usual retransmit timer handle it */
421 dst_release(dst);
422 goto out;
423 }
424
425 icmpv6_err_convert(type, code, &err);
426
60236fdd 427 /* Might be for an request_sock */
1da177e4 428 switch (sk->sk_state) {
60236fdd 429 struct request_sock *req, **prev;
1da177e4
LT
430 case TCP_LISTEN:
431 if (sock_owned_by_user(sk))
432 goto out;
433
8129765a
ACM
434 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
435 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
436 if (!req)
437 goto out;
438
439 /* ICMPs are not backlogged, hence we cannot get
440 * an established socket here.
441 */
547b792c 442 WARN_ON(req->sk != NULL);
1da177e4 443
2e6599cb 444 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 445 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
446 goto out;
447 }
448
463c84b9 449 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
450 goto out;
451
452 case TCP_SYN_SENT:
453 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 454 It can, it SYNs are crossed. --ANK */
1da177e4 455 if (!sock_owned_by_user(sk)) {
1da177e4
LT
456 sk->sk_err = err;
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
458
459 tcp_done(sk);
460 } else
461 sk->sk_err_soft = err;
462 goto out;
463 }
464
465 if (!sock_owned_by_user(sk) && np->recverr) {
466 sk->sk_err = err;
467 sk->sk_error_report(sk);
468 } else
469 sk->sk_err_soft = err;
470
471out:
472 bh_unlock_sock(sk);
473 sock_put(sk);
474}
475
476
e6b4d113
WAS
477static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
478 struct request_values *rvp)
1da177e4 479{
ca304b61 480 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
481 struct ipv6_pinfo *np = inet6_sk(sk);
482 struct sk_buff * skb;
483 struct ipv6_txoptions *opt = NULL;
20c59de2 484 struct in6_addr * final_p, final;
4c9483b2 485 struct flowi6 fl6;
fd80eb94 486 struct dst_entry *dst;
68d0c6d3 487 int err;
1da177e4 488
4c9483b2
DM
489 memset(&fl6, 0, sizeof(fl6));
490 fl6.flowi6_proto = IPPROTO_TCP;
491 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
492 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
493 fl6.flowlabel = 0;
494 fl6.flowi6_oif = treq->iif;
495 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
496 fl6.fl6_dport = inet_rsk(req)->rmt_port;
497 fl6.fl6_sport = inet_rsk(req)->loc_port;
4c9483b2 498 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1da177e4 499
fd80eb94 500 opt = np->opt;
4c9483b2 501 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 502
4c9483b2 503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
68d0c6d3
DM
504 if (IS_ERR(dst)) {
505 err = PTR_ERR(dst);
fd80eb94 506 goto done;
68d0c6d3 507 }
e6b4d113 508 skb = tcp_make_synack(sk, dst, req, rvp);
68d0c6d3 509 err = -ENOMEM;
1da177e4 510 if (skb) {
8ad50d96 511 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 512
4c9483b2
DM
513 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
514 err = ip6_xmit(sk, skb, &fl6, opt);
b9df3cb8 515 err = net_xmit_eval(err);
1da177e4
LT
516 }
517
518done:
1ab1457c 519 if (opt && opt != np->opt)
1da177e4 520 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 521 dst_release(dst);
1da177e4
LT
522 return err;
523}
524
72659ecc
OP
525static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
526 struct request_values *rvp)
527{
528 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
529 return tcp_v6_send_synack(sk, req, rvp);
530}
531
c6aefafb
GG
532static inline void syn_flood_warning(struct sk_buff *skb)
533{
534#ifdef CONFIG_SYN_COOKIES
535 if (sysctl_tcp_syncookies)
536 printk(KERN_INFO
537 "TCPv6: Possible SYN flooding on port %d. "
538 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
539 else
540#endif
541 printk(KERN_INFO
542 "TCPv6: Possible SYN flooding on port %d. "
543 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
544}
545
60236fdd 546static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 547{
800d55f1 548 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
549}
550
cfb6eeb4
YH
551#ifdef CONFIG_TCP_MD5SIG
552static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
553 struct in6_addr *addr)
554{
555 struct tcp_sock *tp = tcp_sk(sk);
556 int i;
557
558 BUG_ON(tp == NULL);
559
560 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
561 return NULL;
562
563 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 564 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 565 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
566 }
567 return NULL;
568}
569
570static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
571 struct sock *addr_sk)
572{
573 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
574}
575
576static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
577 struct request_sock *req)
578{
579 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
580}
581
582static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
583 char *newkey, u8 newkeylen)
584{
585 /* Add key to the list */
b0a713e9 586 struct tcp_md5sig_key *key;
cfb6eeb4
YH
587 struct tcp_sock *tp = tcp_sk(sk);
588 struct tcp6_md5sig_key *keys;
589
b0a713e9 590 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
591 if (key) {
592 /* modify existing entry - just update that one */
b0a713e9
MD
593 kfree(key->key);
594 key->key = newkey;
595 key->keylen = newkeylen;
cfb6eeb4
YH
596 } else {
597 /* reallocate new list if current one is full. */
598 if (!tp->md5sig_info) {
599 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
600 if (!tp->md5sig_info) {
601 kfree(newkey);
602 return -ENOMEM;
603 }
a465419b 604 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 605 }
aa133076 606 if (tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
607 kfree(newkey);
608 return -ENOMEM;
609 }
cfb6eeb4
YH
610 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
611 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
612 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
613
614 if (!keys) {
615 tcp_free_md5sig_pool();
616 kfree(newkey);
617 return -ENOMEM;
618 }
619
620 if (tp->md5sig_info->entries6)
621 memmove(keys, tp->md5sig_info->keys6,
622 (sizeof (tp->md5sig_info->keys6[0]) *
623 tp->md5sig_info->entries6));
624
625 kfree(tp->md5sig_info->keys6);
626 tp->md5sig_info->keys6 = keys;
627 tp->md5sig_info->alloced6++;
628 }
629
630 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
631 peer);
f8ab18d2
DM
632 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
633 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
634
635 tp->md5sig_info->entries6++;
636 }
637 return 0;
638}
639
640static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
641 u8 *newkey, __u8 newkeylen)
642{
643 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
644 newkey, newkeylen);
645}
646
647static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
648{
649 struct tcp_sock *tp = tcp_sk(sk);
650 int i;
651
652 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 653 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 654 /* Free the key */
f8ab18d2 655 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
656 tp->md5sig_info->entries6--;
657
658 if (tp->md5sig_info->entries6 == 0) {
659 kfree(tp->md5sig_info->keys6);
660 tp->md5sig_info->keys6 = NULL;
ca983cef 661 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
662 } else {
663 /* shrink the database */
664 if (tp->md5sig_info->entries6 != i)
665 memmove(&tp->md5sig_info->keys6[i],
666 &tp->md5sig_info->keys6[i+1],
667 (tp->md5sig_info->entries6 - i)
668 * sizeof (tp->md5sig_info->keys6[0]));
669 }
77adefdc
YH
670 tcp_free_md5sig_pool();
671 return 0;
cfb6eeb4
YH
672 }
673 }
674 return -ENOENT;
675}
676
677static void tcp_v6_clear_md5_list (struct sock *sk)
678{
679 struct tcp_sock *tp = tcp_sk(sk);
680 int i;
681
682 if (tp->md5sig_info->entries6) {
683 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 684 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
685 tp->md5sig_info->entries6 = 0;
686 tcp_free_md5sig_pool();
687 }
688
689 kfree(tp->md5sig_info->keys6);
690 tp->md5sig_info->keys6 = NULL;
691 tp->md5sig_info->alloced6 = 0;
692
693 if (tp->md5sig_info->entries4) {
694 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 695 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
696 tp->md5sig_info->entries4 = 0;
697 tcp_free_md5sig_pool();
698 }
699
700 kfree(tp->md5sig_info->keys4);
701 tp->md5sig_info->keys4 = NULL;
702 tp->md5sig_info->alloced4 = 0;
703}
704
705static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
706 int optlen)
707{
708 struct tcp_md5sig cmd;
709 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
710 u8 *newkey;
711
712 if (optlen < sizeof(cmd))
713 return -EINVAL;
714
715 if (copy_from_user(&cmd, optval, sizeof(cmd)))
716 return -EFAULT;
717
718 if (sin6->sin6_family != AF_INET6)
719 return -EINVAL;
720
721 if (!cmd.tcpm_keylen) {
722 if (!tcp_sk(sk)->md5sig_info)
723 return -ENOENT;
e773e4fa 724 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
725 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
726 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
727 }
728
729 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
730 return -EINVAL;
731
732 if (!tcp_sk(sk)->md5sig_info) {
733 struct tcp_sock *tp = tcp_sk(sk);
734 struct tcp_md5sig_info *p;
735
736 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
737 if (!p)
738 return -ENOMEM;
739
740 tp->md5sig_info = p;
a465419b 741 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
742 }
743
af879cc7 744 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
745 if (!newkey)
746 return -ENOMEM;
e773e4fa 747 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
748 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
749 newkey, cmd.tcpm_keylen);
750 }
751 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
752}
753
49a72dfb
AL
754static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
755 struct in6_addr *daddr,
756 struct in6_addr *saddr, int nbytes)
cfb6eeb4 757{
cfb6eeb4 758 struct tcp6_pseudohdr *bp;
49a72dfb 759 struct scatterlist sg;
8d26d76d 760
cfb6eeb4 761 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
762 /* 1. TCP pseudo-header (RFC2460) */
763 ipv6_addr_copy(&bp->saddr, saddr);
764 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 765 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 766 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 767
49a72dfb
AL
768 sg_init_one(&sg, bp, sizeof(*bp));
769 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
770}
c7da57a1 771
49a72dfb
AL
772static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
773 struct in6_addr *daddr, struct in6_addr *saddr,
774 struct tcphdr *th)
775{
776 struct tcp_md5sig_pool *hp;
777 struct hash_desc *desc;
778
779 hp = tcp_get_md5sig_pool();
780 if (!hp)
781 goto clear_hash_noput;
782 desc = &hp->md5_desc;
783
784 if (crypto_hash_init(desc))
785 goto clear_hash;
786 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
787 goto clear_hash;
788 if (tcp_md5_hash_header(hp, th))
789 goto clear_hash;
790 if (tcp_md5_hash_key(hp, key))
791 goto clear_hash;
792 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 793 goto clear_hash;
cfb6eeb4 794
cfb6eeb4 795 tcp_put_md5sig_pool();
cfb6eeb4 796 return 0;
49a72dfb 797
cfb6eeb4
YH
798clear_hash:
799 tcp_put_md5sig_pool();
800clear_hash_noput:
801 memset(md5_hash, 0, 16);
49a72dfb 802 return 1;
cfb6eeb4
YH
803}
804
49a72dfb
AL
805static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
806 struct sock *sk, struct request_sock *req,
807 struct sk_buff *skb)
cfb6eeb4
YH
808{
809 struct in6_addr *saddr, *daddr;
49a72dfb
AL
810 struct tcp_md5sig_pool *hp;
811 struct hash_desc *desc;
812 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
813
814 if (sk) {
815 saddr = &inet6_sk(sk)->saddr;
816 daddr = &inet6_sk(sk)->daddr;
49a72dfb 817 } else if (req) {
cfb6eeb4
YH
818 saddr = &inet6_rsk(req)->loc_addr;
819 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
820 } else {
821 struct ipv6hdr *ip6h = ipv6_hdr(skb);
822 saddr = &ip6h->saddr;
823 daddr = &ip6h->daddr;
cfb6eeb4 824 }
49a72dfb
AL
825
826 hp = tcp_get_md5sig_pool();
827 if (!hp)
828 goto clear_hash_noput;
829 desc = &hp->md5_desc;
830
831 if (crypto_hash_init(desc))
832 goto clear_hash;
833
834 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
835 goto clear_hash;
836 if (tcp_md5_hash_header(hp, th))
837 goto clear_hash;
838 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
839 goto clear_hash;
840 if (tcp_md5_hash_key(hp, key))
841 goto clear_hash;
842 if (crypto_hash_final(desc, md5_hash))
843 goto clear_hash;
844
845 tcp_put_md5sig_pool();
846 return 0;
847
848clear_hash:
849 tcp_put_md5sig_pool();
850clear_hash_noput:
851 memset(md5_hash, 0, 16);
852 return 1;
cfb6eeb4
YH
853}
854
855static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
856{
857 __u8 *hash_location = NULL;
858 struct tcp_md5sig_key *hash_expected;
0660e03f 859 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 860 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 861 int genhash;
cfb6eeb4
YH
862 u8 newhash[16];
863
864 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 865 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 866
785957d3
DM
867 /* We've parsed the options - do we have a hash? */
868 if (!hash_expected && !hash_location)
869 return 0;
870
871 if (hash_expected && !hash_location) {
872 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
873 return 1;
874 }
875
785957d3
DM
876 if (!hash_expected && hash_location) {
877 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
878 return 1;
879 }
880
881 /* check the signature */
49a72dfb
AL
882 genhash = tcp_v6_md5_hash_skb(newhash,
883 hash_expected,
884 NULL, NULL, skb);
885
cfb6eeb4
YH
886 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
887 if (net_ratelimit()) {
5856b606 888 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 889 genhash ? "failed" : "mismatch",
0c6ce78a
HH
890 &ip6h->saddr, ntohs(th->source),
891 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
892 }
893 return 1;
894 }
895 return 0;
896}
897#endif
898
c6aefafb 899struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 900 .family = AF_INET6,
2e6599cb 901 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 902 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
903 .send_ack = tcp_v6_reqsk_send_ack,
904 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
905 .send_reset = tcp_v6_send_reset,
906 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
907};
908
cfb6eeb4 909#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 910static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 911 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 912 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 913};
b6332e6c 914#endif
cfb6eeb4 915
8ad50d96
HX
916static void __tcp_v6_send_check(struct sk_buff *skb,
917 struct in6_addr *saddr, struct in6_addr *daddr)
1da177e4 918{
aa8223c7 919 struct tcphdr *th = tcp_hdr(skb);
1da177e4 920
84fa7933 921 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 922 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 923 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 924 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 925 } else {
8ad50d96
HX
926 th->check = tcp_v6_check(skb->len, saddr, daddr,
927 csum_partial(th, th->doff << 2,
928 skb->csum));
1da177e4
LT
929 }
930}
931
bb296246 932static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
933{
934 struct ipv6_pinfo *np = inet6_sk(sk);
935
936 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
937}
938
a430a43d
HX
939static int tcp_v6_gso_send_check(struct sk_buff *skb)
940{
941 struct ipv6hdr *ipv6h;
942 struct tcphdr *th;
943
944 if (!pskb_may_pull(skb, sizeof(*th)))
945 return -EINVAL;
946
0660e03f 947 ipv6h = ipv6_hdr(skb);
aa8223c7 948 th = tcp_hdr(skb);
a430a43d
HX
949
950 th->check = 0;
84fa7933 951 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 952 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
953 return 0;
954}
1da177e4 955
36990673
HX
956static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
957 struct sk_buff *skb)
684f2176 958{
36e7b1b8 959 struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
960
961 switch (skb->ip_summed) {
962 case CHECKSUM_COMPLETE:
86911732 963 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
964 skb->csum)) {
965 skb->ip_summed = CHECKSUM_UNNECESSARY;
966 break;
967 }
968
969 /* fall through */
970 case CHECKSUM_NONE:
971 NAPI_GRO_CB(skb)->flush = 1;
972 return NULL;
973 }
974
975 return tcp_gro_receive(head, skb);
976}
684f2176 977
36990673 978static int tcp6_gro_complete(struct sk_buff *skb)
684f2176
HX
979{
980 struct ipv6hdr *iph = ipv6_hdr(skb);
981 struct tcphdr *th = tcp_hdr(skb);
982
983 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
984 &iph->saddr, &iph->daddr, 0);
985 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
986
987 return tcp_gro_complete(skb);
988}
684f2176 989
626e264d
IJ
990static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
991 u32 ts, struct tcp_md5sig_key *key, int rst)
1da177e4 992{
aa8223c7 993 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4 994 struct sk_buff *buff;
4c9483b2 995 struct flowi6 fl6;
adf30907 996 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 997 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 998 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 999 struct dst_entry *dst;
81ada62d 1000 __be32 *topt;
1da177e4 1001
626e264d
IJ
1002 if (ts)
1003 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 1004#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1005 if (key)
1006 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1007#endif
1008
cfb6eeb4 1009 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 1010 GFP_ATOMIC);
1ab1457c
YH
1011 if (buff == NULL)
1012 return;
1da177e4 1013
cfb6eeb4 1014 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1015
cfb6eeb4 1016 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 1017 skb_reset_transport_header(buff);
1da177e4
LT
1018
1019 /* Swap the send and the receive. */
1020 memset(t1, 0, sizeof(*t1));
1021 t1->dest = th->source;
1022 t1->source = th->dest;
cfb6eeb4 1023 t1->doff = tot_len / 4;
626e264d
IJ
1024 t1->seq = htonl(seq);
1025 t1->ack_seq = htonl(ack);
1026 t1->ack = !rst || !th->ack;
1027 t1->rst = rst;
1028 t1->window = htons(win);
1da177e4 1029
81ada62d
IJ
1030 topt = (__be32 *)(t1 + 1);
1031
626e264d
IJ
1032 if (ts) {
1033 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1034 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1035 *topt++ = htonl(tcp_time_stamp);
1036 *topt++ = htonl(ts);
1037 }
1038
cfb6eeb4
YH
1039#ifdef CONFIG_TCP_MD5SIG
1040 if (key) {
81ada62d
IJ
1041 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1042 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1043 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1044 &ipv6_hdr(skb)->saddr,
1045 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1046 }
1047#endif
1048
4c9483b2
DM
1049 memset(&fl6, 0, sizeof(fl6));
1050 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1051 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1da177e4 1052
e5700aff
DM
1053 buff->ip_summed = CHECKSUM_PARTIAL;
1054 buff->csum = 0;
1055
4c9483b2 1056 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 1057
4c9483b2
DM
1058 fl6.flowi6_proto = IPPROTO_TCP;
1059 fl6.flowi6_oif = inet6_iif(skb);
1958b856
DM
1060 fl6.fl6_dport = t1->dest;
1061 fl6.fl6_sport = t1->source;
4c9483b2 1062 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 1063
c20121ae
DL
1064 /* Pass a socket to ip6_dst_lookup either it is for RST
1065 * Underlying function will use this to retrieve the network
1066 * namespace
1067 */
4c9483b2 1068 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
68d0c6d3
DM
1069 if (!IS_ERR(dst)) {
1070 skb_dst_set(buff, dst);
4c9483b2 1071 ip6_xmit(ctl_sk, buff, &fl6, NULL);
68d0c6d3
DM
1072 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1073 if (rst)
1074 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1075 return;
1da177e4
LT
1076 }
1077
1078 kfree_skb(buff);
1079}
1080
626e264d 1081static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1082{
626e264d
IJ
1083 struct tcphdr *th = tcp_hdr(skb);
1084 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1085 struct tcp_md5sig_key *key = NULL;
1da177e4 1086
626e264d 1087 if (th->rst)
1da177e4
LT
1088 return;
1089
626e264d
IJ
1090 if (!ipv6_unicast_destination(skb))
1091 return;
1da177e4 1092
cfb6eeb4 1093#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1094 if (sk)
1095 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1096#endif
1097
626e264d
IJ
1098 if (th->ack)
1099 seq = ntohl(th->ack_seq);
1100 else
1101 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1102 (th->doff << 2);
1da177e4 1103
626e264d
IJ
1104 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1105}
1da177e4 1106
626e264d
IJ
1107static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1108 struct tcp_md5sig_key *key)
1109{
1110 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1da177e4
LT
1111}
1112
1113static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1114{
8feaf0c0 1115 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1116 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1117
9501f972 1118 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1119 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1120 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1121
8feaf0c0 1122 inet_twsk_put(tw);
1da177e4
LT
1123}
1124
6edafaaf
GJ
1125static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1126 struct request_sock *req)
1da177e4 1127{
9501f972 1128 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1129 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1130}
1131
1132
1133static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1134{
60236fdd 1135 struct request_sock *req, **prev;
aa8223c7 1136 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1137 struct sock *nsk;
1138
1139 /* Find possible connection requests. */
8129765a 1140 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1141 &ipv6_hdr(skb)->saddr,
1142 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1143 if (req)
1144 return tcp_check_req(sk, skb, req, prev);
1145
3b1e0a65 1146 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1147 &ipv6_hdr(skb)->saddr, th->source,
1148 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1149
1150 if (nsk) {
1151 if (nsk->sk_state != TCP_TIME_WAIT) {
1152 bh_lock_sock(nsk);
1153 return nsk;
1154 }
9469c7b4 1155 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1156 return NULL;
1157 }
1158
c6aefafb 1159#ifdef CONFIG_SYN_COOKIES
af9b4738 1160 if (!th->syn)
c6aefafb 1161 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1162#endif
1163 return sk;
1164}
1165
1da177e4
LT
1166/* FIXME: this is substantially similar to the ipv4 code.
1167 * Can some kind of merge be done? -- erics
1168 */
1169static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1170{
4957faad 1171 struct tcp_extend_values tmp_ext;
e6b4d113 1172 struct tcp_options_received tmp_opt;
4957faad 1173 u8 *hash_location;
e6b4d113 1174 struct request_sock *req;
ca304b61 1175 struct inet6_request_sock *treq;
1da177e4 1176 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1177 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1178 __u32 isn = TCP_SKB_CB(skb)->when;
493f377d 1179 struct dst_entry *dst = NULL;
c6aefafb
GG
1180#ifdef CONFIG_SYN_COOKIES
1181 int want_cookie = 0;
1182#else
1183#define want_cookie 0
1184#endif
1da177e4
LT
1185
1186 if (skb->protocol == htons(ETH_P_IP))
1187 return tcp_v4_conn_request(sk, skb);
1188
1189 if (!ipv6_unicast_destination(skb))
1ab1457c 1190 goto drop;
1da177e4 1191
463c84b9 1192 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1193 if (net_ratelimit())
c6aefafb
GG
1194 syn_flood_warning(skb);
1195#ifdef CONFIG_SYN_COOKIES
1196 if (sysctl_tcp_syncookies)
1197 want_cookie = 1;
1198 else
1199#endif
1ab1457c 1200 goto drop;
1da177e4
LT
1201 }
1202
463c84b9 1203 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1204 goto drop;
1205
ca304b61 1206 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1207 if (req == NULL)
1208 goto drop;
1209
cfb6eeb4
YH
1210#ifdef CONFIG_TCP_MD5SIG
1211 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1212#endif
1213
1da177e4
LT
1214 tcp_clear_options(&tmp_opt);
1215 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1216 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1217 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1218
1219 if (tmp_opt.cookie_plus > 0 &&
1220 tmp_opt.saw_tstamp &&
1221 !tp->rx_opt.cookie_out_never &&
1222 (sysctl_tcp_cookie_size > 0 ||
1223 (tp->cookie_values != NULL &&
1224 tp->cookie_values->cookie_desired > 0))) {
1225 u8 *c;
1226 u32 *d;
1227 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1228 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1229
1230 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1231 goto drop_and_free;
1232
1233 /* Secret recipe starts with IP addresses */
0eae88f3 1234 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1235 *mess++ ^= *d++;
1236 *mess++ ^= *d++;
1237 *mess++ ^= *d++;
1238 *mess++ ^= *d++;
0eae88f3 1239 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1240 *mess++ ^= *d++;
1241 *mess++ ^= *d++;
1242 *mess++ ^= *d++;
1243 *mess++ ^= *d++;
1244
1245 /* plus variable length Initiator Cookie */
1246 c = (u8 *)mess;
1247 while (l-- > 0)
1248 *c++ ^= *hash_location++;
1da177e4 1249
4957faad
WAS
1250#ifdef CONFIG_SYN_COOKIES
1251 want_cookie = 0; /* not our kind of cookie */
1252#endif
1253 tmp_ext.cookie_out_never = 0; /* false */
1254 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1255 } else if (!tp->rx_opt.cookie_in_always) {
1256 /* redundant indications, but ensure initialization. */
1257 tmp_ext.cookie_out_never = 1; /* true */
1258 tmp_ext.cookie_plus = 0;
1259 } else {
1260 goto drop_and_free;
1261 }
1262 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1263
4dfc2817 1264 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1265 tcp_clear_options(&tmp_opt);
c6aefafb 1266
1da177e4
LT
1267 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1268 tcp_openreq_init(req, &tmp_opt, skb);
1269
ca304b61 1270 treq = inet6_rsk(req);
0660e03f
ACM
1271 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1272 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
172d69e6 1273 if (!want_cookie || tmp_opt.tstamp_ok)
c6aefafb
GG
1274 TCP_ECN_create_request(req, tcp_hdr(skb));
1275
2bbdf389 1276 if (!isn) {
493f377d
DM
1277 struct inet_peer *peer = NULL;
1278
c6aefafb
GG
1279 if (ipv6_opt_accepted(sk, skb) ||
1280 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1281 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1282 atomic_inc(&skb->users);
1283 treq->pktopts = skb;
1284 }
1285 treq->iif = sk->sk_bound_dev_if;
1da177e4 1286
c6aefafb
GG
1287 /* So that link locals have meaning */
1288 if (!sk->sk_bound_dev_if &&
1289 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1290 treq->iif = inet6_iif(skb);
493f377d
DM
1291
1292 if (want_cookie) {
2bbdf389
FW
1293 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1294 req->cookie_ts = tmp_opt.tstamp_ok;
493f377d
DM
1295 goto have_isn;
1296 }
1297
1298 /* VJ's idea. We save last timestamp seen
1299 * from the destination in peer table, when entering
1300 * state TIME-WAIT, and check against it before
1301 * accepting new connection request.
1302 *
1303 * If "isn" is not zero, this request hit alive
1304 * timewait bucket, so that all the necessary checks
1305 * are made in the function processing timewait state.
1306 */
1307 if (tmp_opt.saw_tstamp &&
1308 tcp_death_row.sysctl_tw_recycle &&
1309 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1310 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
7a71ed89 1311 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
493f377d
DM
1312 &treq->rmt_addr)) {
1313 inet_peer_refcheck(peer);
1314 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1315 (s32)(peer->tcp_ts - req->ts_recent) >
1316 TCP_PAWS_WINDOW) {
1317 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1318 goto drop_and_release;
1319 }
1320 }
1321 /* Kill the following clause, if you dislike this way. */
1322 else if (!sysctl_tcp_syncookies &&
1323 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1324 (sysctl_max_syn_backlog >> 2)) &&
1325 (!peer || !peer->tcp_ts_stamp) &&
1326 (!dst || !dst_metric(dst, RTAX_RTT))) {
1327 /* Without syncookies last quarter of
1328 * backlog is filled with destinations,
1329 * proven to be alive.
1330 * It means that we continue to communicate
1331 * to destinations, already remembered
1332 * to the moment of synflood.
1333 */
1334 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1335 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1336 goto drop_and_release;
2bbdf389 1337 }
493f377d
DM
1338
1339 isn = tcp_v6_init_sequence(skb);
c6aefafb 1340 }
493f377d 1341have_isn:
2e6599cb 1342 tcp_rsk(req)->snt_isn = isn;
1da177e4 1343
4237c75c
VY
1344 security_inet_conn_request(sk, skb, req);
1345
4957faad
WAS
1346 if (tcp_v6_send_synack(sk, req,
1347 (struct request_values *)&tmp_ext) ||
1348 want_cookie)
e6b4d113 1349 goto drop_and_free;
1da177e4 1350
e6b4d113
WAS
1351 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1352 return 0;
1da177e4 1353
493f377d
DM
1354drop_and_release:
1355 dst_release(dst);
e6b4d113
WAS
1356drop_and_free:
1357 reqsk_free(req);
1da177e4 1358drop:
1da177e4
LT
1359 return 0; /* don't send reset */
1360}
1361
1362static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1363 struct request_sock *req,
1da177e4
LT
1364 struct dst_entry *dst)
1365{
78d15e82 1366 struct inet6_request_sock *treq;
1da177e4
LT
1367 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1368 struct tcp6_sock *newtcp6sk;
1369 struct inet_sock *newinet;
1370 struct tcp_sock *newtp;
1371 struct sock *newsk;
1372 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1373#ifdef CONFIG_TCP_MD5SIG
1374 struct tcp_md5sig_key *key;
1375#endif
1da177e4
LT
1376
1377 if (skb->protocol == htons(ETH_P_IP)) {
1378 /*
1379 * v6 mapped
1380 */
1381
1382 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1383
1ab1457c 1384 if (newsk == NULL)
1da177e4
LT
1385 return NULL;
1386
1387 newtcp6sk = (struct tcp6_sock *)newsk;
1388 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1389
1390 newinet = inet_sk(newsk);
1391 newnp = inet6_sk(newsk);
1392 newtp = tcp_sk(newsk);
1393
1394 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1395
c720c7e8 1396 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1397
c720c7e8 1398 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4
LT
1399
1400 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1401
8292a17a 1402 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1403 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1404#ifdef CONFIG_TCP_MD5SIG
1405 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1406#endif
1407
1da177e4
LT
1408 newnp->pktoptions = NULL;
1409 newnp->opt = NULL;
505cbfc5 1410 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1411 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1412
e6848976
ACM
1413 /*
1414 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1415 * here, tcp_create_openreq_child now does this for us, see the comment in
1416 * that function for the gory details. -acme
1da177e4 1417 */
1da177e4
LT
1418
1419 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1420 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1421 Sync it now.
1422 */
d83d8461 1423 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1424
1425 return newsk;
1426 }
1427
78d15e82 1428 treq = inet6_rsk(req);
1da177e4
LT
1429 opt = np->opt;
1430
1431 if (sk_acceptq_is_full(sk))
1432 goto out_overflow;
1433
493f377d
DM
1434 if (!dst) {
1435 dst = inet6_csk_route_req(sk, req);
1436 if (!dst)
1da177e4 1437 goto out;
1ab1457c 1438 }
1da177e4
LT
1439
1440 newsk = tcp_create_openreq_child(sk, req, skb);
1441 if (newsk == NULL)
093d2823 1442 goto out_nonewsk;
1da177e4 1443
e6848976
ACM
1444 /*
1445 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1446 * count here, tcp_create_openreq_child now does this for us, see the
1447 * comment in that function for the gory details. -acme
1448 */
1da177e4 1449
59eed279 1450 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1451 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1452
1453 newtcp6sk = (struct tcp6_sock *)newsk;
1454 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1455
1456 newtp = tcp_sk(newsk);
1457 newinet = inet_sk(newsk);
1458 newnp = inet6_sk(newsk);
1459
1460 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1461
2e6599cb
ACM
1462 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1463 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1464 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1465 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1466
1ab1457c 1467 /* Now IPv6 options...
1da177e4
LT
1468
1469 First: no IPv4 options.
1470 */
1471 newinet->opt = NULL;
d35690be 1472 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1473
1474 /* Clone RX bits */
1475 newnp->rxopt.all = np->rxopt.all;
1476
1477 /* Clone pktoptions received with SYN */
1478 newnp->pktoptions = NULL;
2e6599cb
ACM
1479 if (treq->pktopts != NULL) {
1480 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1481 kfree_skb(treq->pktopts);
1482 treq->pktopts = NULL;
1da177e4
LT
1483 if (newnp->pktoptions)
1484 skb_set_owner_r(newnp->pktoptions, newsk);
1485 }
1486 newnp->opt = NULL;
505cbfc5 1487 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1488 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1489
1490 /* Clone native IPv6 options from listening socket (if any)
1491
1492 Yes, keeping reference count would be much more clever,
1493 but we make one more one thing there: reattach optmem
1494 to newsk.
1495 */
1496 if (opt) {
1497 newnp->opt = ipv6_dup_options(newsk, opt);
1498 if (opt != np->opt)
1499 sock_kfree_s(sk, opt, opt->tot_len);
1500 }
1501
d83d8461 1502 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1503 if (newnp->opt)
d83d8461
ACM
1504 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1505 newnp->opt->opt_flen);
1da177e4 1506
5d424d5a 1507 tcp_mtup_init(newsk);
1da177e4 1508 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1509 newtp->advmss = dst_metric_advmss(dst);
1da177e4
LT
1510 tcp_initialize_rcv_mss(newsk);
1511
c720c7e8
ED
1512 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1513 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1514
cfb6eeb4
YH
1515#ifdef CONFIG_TCP_MD5SIG
1516 /* Copy over the MD5 key from the original socket */
1517 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1518 /* We're using one, so create a matching key
1519 * on the newsk structure. If we fail to get
1520 * memory, then we end up not copying the key
1521 * across. Shucks.
1522 */
af879cc7
ACM
1523 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1524 if (newkey != NULL)
e547bc1e 1525 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1526 newkey, key->keylen);
cfb6eeb4
YH
1527 }
1528#endif
1529
093d2823
BS
1530 if (__inet_inherit_port(sk, newsk) < 0) {
1531 sock_put(newsk);
1532 goto out;
1533 }
9327f705 1534 __inet6_hash(newsk, NULL);
1da177e4
LT
1535
1536 return newsk;
1537
1538out_overflow:
de0744af 1539 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1540out_nonewsk:
1da177e4
LT
1541 if (opt && opt != np->opt)
1542 sock_kfree_s(sk, opt, opt->tot_len);
1543 dst_release(dst);
093d2823
BS
1544out:
1545 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1546 return NULL;
1547}
1548
b51655b9 1549static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1550{
84fa7933 1551 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1552 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1553 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1554 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1555 return 0;
fb286bb2 1556 }
1da177e4 1557 }
fb286bb2 1558
684f2176 1559 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1560 &ipv6_hdr(skb)->saddr,
1561 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1562
1da177e4 1563 if (skb->len <= 76) {
fb286bb2 1564 return __skb_checksum_complete(skb);
1da177e4
LT
1565 }
1566 return 0;
1567}
1568
1569/* The socket must have it's spinlock held when we get
1570 * here.
1571 *
1572 * We have a potential double-lock case here, so even when
1573 * doing backlog processing we use the BH locking scheme.
1574 * This is because we cannot sleep with the original spinlock
1575 * held.
1576 */
1577static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1578{
1579 struct ipv6_pinfo *np = inet6_sk(sk);
1580 struct tcp_sock *tp;
1581 struct sk_buff *opt_skb = NULL;
1582
1583 /* Imagine: socket is IPv6. IPv4 packet arrives,
1584 goes to IPv4 receive handler and backlogged.
1585 From backlog it always goes here. Kerboom...
1586 Fortunately, tcp_rcv_established and rcv_established
1587 handle them correctly, but it is not case with
1588 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1589 */
1590
1591 if (skb->protocol == htons(ETH_P_IP))
1592 return tcp_v4_do_rcv(sk, skb);
1593
cfb6eeb4
YH
1594#ifdef CONFIG_TCP_MD5SIG
1595 if (tcp_v6_inbound_md5_hash (sk, skb))
1596 goto discard;
1597#endif
1598
fda9ef5d 1599 if (sk_filter(sk, skb))
1da177e4
LT
1600 goto discard;
1601
1602 /*
1603 * socket locking is here for SMP purposes as backlog rcv
1604 * is currently called with bh processing disabled.
1605 */
1606
1607 /* Do Stevens' IPV6_PKTOPTIONS.
1608
1609 Yes, guys, it is the only place in our code, where we
1610 may make it not affecting IPv4.
1611 The rest of code is protocol independent,
1612 and I do not like idea to uglify IPv4.
1613
1614 Actually, all the idea behind IPV6_PKTOPTIONS
1615 looks not very well thought. For now we latch
1616 options, received in the last packet, enqueued
1617 by tcp. Feel free to propose better solution.
1ab1457c 1618 --ANK (980728)
1da177e4
LT
1619 */
1620 if (np->rxopt.all)
1621 opt_skb = skb_clone(skb, GFP_ATOMIC);
1622
1623 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
aa8223c7 1624 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1625 goto reset;
1da177e4
LT
1626 if (opt_skb)
1627 goto ipv6_pktoptions;
1628 return 0;
1629 }
1630
ab6a5bb6 1631 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1632 goto csum_err;
1633
1ab1457c 1634 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1635 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1636 if (!nsk)
1637 goto discard;
1638
1639 /*
1640 * Queue it on the new socket if the new socket is active,
1641 * otherwise we just shortcircuit this and continue with
1642 * the new socket..
1643 */
1ab1457c 1644 if(nsk != sk) {
1da177e4
LT
1645 if (tcp_child_process(sk, nsk, skb))
1646 goto reset;
1647 if (opt_skb)
1648 __kfree_skb(opt_skb);
1649 return 0;
1650 }
1651 }
1652
aa8223c7 1653 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1654 goto reset;
1da177e4
LT
1655 if (opt_skb)
1656 goto ipv6_pktoptions;
1657 return 0;
1658
1659reset:
cfb6eeb4 1660 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1661discard:
1662 if (opt_skb)
1663 __kfree_skb(opt_skb);
1664 kfree_skb(skb);
1665 return 0;
1666csum_err:
63231bdd 1667 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1668 goto discard;
1669
1670
1671ipv6_pktoptions:
1672 /* Do you ask, what is it?
1673
1674 1. skb was enqueued by tcp.
1675 2. skb is added to tail of read queue, rather than out of order.
1676 3. socket is not in passive state.
1677 4. Finally, it really contains options, which user wants to receive.
1678 */
1679 tp = tcp_sk(sk);
1680 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1681 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1682 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1683 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1684 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1685 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1686 if (ipv6_opt_accepted(sk, opt_skb)) {
1687 skb_set_owner_r(opt_skb, sk);
1688 opt_skb = xchg(&np->pktoptions, opt_skb);
1689 } else {
1690 __kfree_skb(opt_skb);
1691 opt_skb = xchg(&np->pktoptions, NULL);
1692 }
1693 }
1694
800d55f1 1695 kfree_skb(opt_skb);
1da177e4
LT
1696 return 0;
1697}
1698
e5bbef20 1699static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1700{
1ab1457c 1701 struct tcphdr *th;
e802af9c 1702 struct ipv6hdr *hdr;
1da177e4
LT
1703 struct sock *sk;
1704 int ret;
a86b1e30 1705 struct net *net = dev_net(skb->dev);
1da177e4
LT
1706
1707 if (skb->pkt_type != PACKET_HOST)
1708 goto discard_it;
1709
1710 /*
1711 * Count it even if it's bad.
1712 */
63231bdd 1713 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1714
1715 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1716 goto discard_it;
1717
aa8223c7 1718 th = tcp_hdr(skb);
1da177e4
LT
1719
1720 if (th->doff < sizeof(struct tcphdr)/4)
1721 goto bad_packet;
1722 if (!pskb_may_pull(skb, th->doff*4))
1723 goto discard_it;
1724
60476372 1725 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1726 goto bad_packet;
1727
aa8223c7 1728 th = tcp_hdr(skb);
e802af9c 1729 hdr = ipv6_hdr(skb);
1da177e4
LT
1730 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1731 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1732 skb->len - th->doff*4);
1733 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1734 TCP_SKB_CB(skb)->when = 0;
e802af9c 1735 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1da177e4
LT
1736 TCP_SKB_CB(skb)->sacked = 0;
1737
9a1f27c4 1738 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1739 if (!sk)
1740 goto no_tcp_socket;
1741
1742process:
1743 if (sk->sk_state == TCP_TIME_WAIT)
1744 goto do_time_wait;
1745
e802af9c
SH
1746 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1747 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1748 goto discard_and_relse;
1749 }
1750
1da177e4
LT
1751 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1752 goto discard_and_relse;
1753
fda9ef5d 1754 if (sk_filter(sk, skb))
1da177e4
LT
1755 goto discard_and_relse;
1756
1757 skb->dev = NULL;
1758
293b9c42 1759 bh_lock_sock_nested(sk);
1da177e4
LT
1760 ret = 0;
1761 if (!sock_owned_by_user(sk)) {
1a2449a8 1762#ifdef CONFIG_NET_DMA
1ab1457c 1763 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1764 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1765 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1766 if (tp->ucopy.dma_chan)
1767 ret = tcp_v6_do_rcv(sk, skb);
1768 else
1a2449a8
CL
1769#endif
1770 {
1771 if (!tcp_prequeue(sk, skb))
1772 ret = tcp_v6_do_rcv(sk, skb);
1773 }
6cce09f8 1774 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1775 bh_unlock_sock(sk);
6cce09f8 1776 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1777 goto discard_and_relse;
1778 }
1da177e4
LT
1779 bh_unlock_sock(sk);
1780
1781 sock_put(sk);
1782 return ret ? -1 : 0;
1783
1784no_tcp_socket:
1785 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1786 goto discard_it;
1787
1788 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1789bad_packet:
63231bdd 1790 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1791 } else {
cfb6eeb4 1792 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1793 }
1794
1795discard_it:
1796
1797 /*
1798 * Discard frame
1799 */
1800
1801 kfree_skb(skb);
1802 return 0;
1803
1804discard_and_relse:
1805 sock_put(sk);
1806 goto discard_it;
1807
1808do_time_wait:
1809 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1810 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1811 goto discard_it;
1812 }
1813
1814 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1815 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1816 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1817 goto discard_it;
1818 }
1819
9469c7b4 1820 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1821 case TCP_TW_SYN:
1822 {
1823 struct sock *sk2;
1824
c346dca1 1825 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1826 &ipv6_hdr(skb)->daddr,
505cbfc5 1827 ntohs(th->dest), inet6_iif(skb));
1da177e4 1828 if (sk2 != NULL) {
295ff7ed
ACM
1829 struct inet_timewait_sock *tw = inet_twsk(sk);
1830 inet_twsk_deschedule(tw, &tcp_death_row);
1831 inet_twsk_put(tw);
1da177e4
LT
1832 sk = sk2;
1833 goto process;
1834 }
1835 /* Fall through to ACK */
1836 }
1837 case TCP_TW_ACK:
1838 tcp_v6_timewait_ack(sk, skb);
1839 break;
1840 case TCP_TW_RST:
1841 goto no_tcp_socket;
1842 case TCP_TW_SUCCESS:;
1843 }
1844 goto discard_it;
1845}
1846
ccb7c410
DM
1847static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1848{
db3949c4
DM
1849 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1850 struct ipv6_pinfo *np = inet6_sk(sk);
1851 struct inet_peer *peer;
1852
1853 if (!rt ||
1854 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1855 peer = inet_getpeer_v6(&np->daddr, 1);
1856 *release_it = true;
1857 } else {
1858 if (!rt->rt6i_peer)
1859 rt6_bind_peer(rt, 1);
1860 peer = rt->rt6i_peer;
457de438 1861 *release_it = false;
db3949c4
DM
1862 }
1863
1864 return peer;
ccb7c410
DM
1865}
1866
1867static void *tcp_v6_tw_get_peer(struct sock *sk)
1da177e4 1868{
db3949c4 1869 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
ccb7c410
DM
1870 struct inet_timewait_sock *tw = inet_twsk(sk);
1871
1872 if (tw->tw_family == AF_INET)
1873 return tcp_v4_tw_get_peer(sk);
1874
db3949c4 1875 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1da177e4
LT
1876}
1877
ccb7c410
DM
1878static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1879 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1880 .twsk_unique = tcp_twsk_unique,
1881 .twsk_destructor= tcp_twsk_destructor,
1882 .twsk_getpeer = tcp_v6_tw_get_peer,
1883};
1884
3b401a81 1885static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1886 .queue_xmit = inet6_csk_xmit,
1887 .send_check = tcp_v6_send_check,
1888 .rebuild_header = inet6_sk_rebuild_header,
1889 .conn_request = tcp_v6_conn_request,
1890 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1891 .get_peer = tcp_v6_get_peer,
543d9cfe
ACM
1892 .net_header_len = sizeof(struct ipv6hdr),
1893 .setsockopt = ipv6_setsockopt,
1894 .getsockopt = ipv6_getsockopt,
1895 .addr2sockaddr = inet6_csk_addr2sockaddr,
1896 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1897 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1898#ifdef CONFIG_COMPAT
543d9cfe
ACM
1899 .compat_setsockopt = compat_ipv6_setsockopt,
1900 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1901#endif
1da177e4
LT
1902};
1903
cfb6eeb4 1904#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1905static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1906 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1907 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1908 .md5_add = tcp_v6_md5_add_func,
1909 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1910};
a928630a 1911#endif
cfb6eeb4 1912
1da177e4
LT
1913/*
1914 * TCP over IPv4 via INET6 API
1915 */
1916
3b401a81 1917static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1918 .queue_xmit = ip_queue_xmit,
1919 .send_check = tcp_v4_send_check,
1920 .rebuild_header = inet_sk_rebuild_header,
1921 .conn_request = tcp_v6_conn_request,
1922 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1923 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1924 .net_header_len = sizeof(struct iphdr),
1925 .setsockopt = ipv6_setsockopt,
1926 .getsockopt = ipv6_getsockopt,
1927 .addr2sockaddr = inet6_csk_addr2sockaddr,
1928 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1929 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1930#ifdef CONFIG_COMPAT
543d9cfe
ACM
1931 .compat_setsockopt = compat_ipv6_setsockopt,
1932 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1933#endif
1da177e4
LT
1934};
1935
cfb6eeb4 1936#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1937static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1938 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1939 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1940 .md5_add = tcp_v6_md5_add_func,
1941 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1942};
a928630a 1943#endif
cfb6eeb4 1944
1da177e4
LT
1945/* NOTE: A lot of things set to zero explicitly by call to
1946 * sk_alloc() so need not be done here.
1947 */
1948static int tcp_v6_init_sock(struct sock *sk)
1949{
6687e988 1950 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1951 struct tcp_sock *tp = tcp_sk(sk);
1952
1953 skb_queue_head_init(&tp->out_of_order_queue);
1954 tcp_init_xmit_timers(sk);
1955 tcp_prequeue_init(tp);
1956
6687e988 1957 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1958 tp->mdev = TCP_TIMEOUT_INIT;
1959
1960 /* So many TCP implementations out there (incorrectly) count the
1961 * initial SYN frame in their delayed-ACK and congestion control
1962 * algorithms that we must have the following bandaid to talk
1963 * efficiently to them. -DaveM
1964 */
1965 tp->snd_cwnd = 2;
1966
1967 /* See draft-stevens-tcpca-spec-01 for discussion of the
1968 * initialization of these values.
1969 */
0b6a05c1 1970 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1971 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1972 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1973
1974 tp->reordering = sysctl_tcp_reordering;
1975
1976 sk->sk_state = TCP_CLOSE;
1977
8292a17a 1978 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1979 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1980 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1981 sk->sk_write_space = sk_stream_write_space;
1982 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1983
cfb6eeb4
YH
1984#ifdef CONFIG_TCP_MD5SIG
1985 tp->af_specific = &tcp_sock_ipv6_specific;
1986#endif
1987
435cf559
WAS
1988 /* TCP Cookie Transactions */
1989 if (sysctl_tcp_cookie_size > 0) {
1990 /* Default, cookies without s_data_payload. */
1991 tp->cookie_values =
1992 kzalloc(sizeof(*tp->cookie_values),
1993 sk->sk_allocation);
1994 if (tp->cookie_values != NULL)
1995 kref_init(&tp->cookie_values->kref);
1996 }
1997 /* Presumed zeroed, in order of appearance:
1998 * cookie_in_always, cookie_out_never,
1999 * s_data_constant, s_data_in, s_data_out
2000 */
1da177e4
LT
2001 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2002 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2003
eb4dea58 2004 local_bh_disable();
1748376b 2005 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 2006 local_bh_enable();
1da177e4
LT
2007
2008 return 0;
2009}
2010
7d06b2e0 2011static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 2012{
cfb6eeb4
YH
2013#ifdef CONFIG_TCP_MD5SIG
2014 /* Clean up the MD5 key list */
2015 if (tcp_sk(sk)->md5sig_info)
2016 tcp_v6_clear_md5_list(sk);
2017#endif
1da177e4 2018 tcp_v4_destroy_sock(sk);
7d06b2e0 2019 inet6_destroy_sock(sk);
1da177e4
LT
2020}
2021
952a10be 2022#ifdef CONFIG_PROC_FS
1da177e4 2023/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 2024static void get_openreq6(struct seq_file *seq,
60236fdd 2025 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 2026{
1da177e4 2027 int ttd = req->expires - jiffies;
ca304b61
ACM
2028 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2029 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
2030
2031 if (ttd < 0)
2032 ttd = 0;
2033
1da177e4
LT
2034 seq_printf(seq,
2035 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2036 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2037 i,
2038 src->s6_addr32[0], src->s6_addr32[1],
2039 src->s6_addr32[2], src->s6_addr32[3],
fd507037 2040 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
2041 dest->s6_addr32[0], dest->s6_addr32[1],
2042 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 2043 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
2044 TCP_SYN_RECV,
2045 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
2046 1, /* timers active (only the expire timer) */
2047 jiffies_to_clock_t(ttd),
1da177e4
LT
2048 req->retrans,
2049 uid,
1ab1457c 2050 0, /* non standard timer */
1da177e4
LT
2051 0, /* open_requests have no inode */
2052 0, req);
2053}
2054
2055static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2056{
2057 struct in6_addr *dest, *src;
2058 __u16 destp, srcp;
2059 int timer_active;
2060 unsigned long timer_expires;
2061 struct inet_sock *inet = inet_sk(sp);
2062 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2063 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
2064 struct ipv6_pinfo *np = inet6_sk(sp);
2065
2066 dest = &np->daddr;
2067 src = &np->rcv_saddr;
c720c7e8
ED
2068 destp = ntohs(inet->inet_dport);
2069 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2070
2071 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2072 timer_active = 1;
463c84b9
ACM
2073 timer_expires = icsk->icsk_timeout;
2074 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2075 timer_active = 4;
463c84b9 2076 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2077 } else if (timer_pending(&sp->sk_timer)) {
2078 timer_active = 2;
2079 timer_expires = sp->sk_timer.expires;
2080 } else {
2081 timer_active = 0;
2082 timer_expires = jiffies;
2083 }
2084
2085 seq_printf(seq,
2086 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 2087 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
2088 i,
2089 src->s6_addr32[0], src->s6_addr32[1],
2090 src->s6_addr32[2], src->s6_addr32[3], srcp,
2091 dest->s6_addr32[0], dest->s6_addr32[1],
2092 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2093 sp->sk_state,
47da8ee6
SS
2094 tp->write_seq-tp->snd_una,
2095 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2096 timer_active,
2097 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2098 icsk->icsk_retransmits,
1da177e4 2099 sock_i_uid(sp),
6687e988 2100 icsk->icsk_probes_out,
1da177e4
LT
2101 sock_i_ino(sp),
2102 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2103 jiffies_to_clock_t(icsk->icsk_rto),
2104 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2105 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2106 tp->snd_cwnd,
2107 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2108 );
2109}
2110
1ab1457c 2111static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2112 struct inet_timewait_sock *tw, int i)
1da177e4
LT
2113{
2114 struct in6_addr *dest, *src;
2115 __u16 destp, srcp;
0fa1a53e 2116 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2117 int ttd = tw->tw_ttd - jiffies;
2118
2119 if (ttd < 0)
2120 ttd = 0;
2121
0fa1a53e
ACM
2122 dest = &tw6->tw_v6_daddr;
2123 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2124 destp = ntohs(tw->tw_dport);
2125 srcp = ntohs(tw->tw_sport);
2126
2127 seq_printf(seq,
2128 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2129 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2130 i,
2131 src->s6_addr32[0], src->s6_addr32[1],
2132 src->s6_addr32[2], src->s6_addr32[3], srcp,
2133 dest->s6_addr32[0], dest->s6_addr32[1],
2134 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2135 tw->tw_substate, 0, 0,
2136 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2137 atomic_read(&tw->tw_refcnt), tw);
2138}
2139
1da177e4
LT
2140static int tcp6_seq_show(struct seq_file *seq, void *v)
2141{
2142 struct tcp_iter_state *st;
2143
2144 if (v == SEQ_START_TOKEN) {
2145 seq_puts(seq,
2146 " sl "
2147 "local_address "
2148 "remote_address "
2149 "st tx_queue rx_queue tr tm->when retrnsmt"
2150 " uid timeout inode\n");
2151 goto out;
2152 }
2153 st = seq->private;
2154
2155 switch (st->state) {
2156 case TCP_SEQ_STATE_LISTENING:
2157 case TCP_SEQ_STATE_ESTABLISHED:
2158 get_tcp6_sock(seq, v, st->num);
2159 break;
2160 case TCP_SEQ_STATE_OPENREQ:
2161 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2162 break;
2163 case TCP_SEQ_STATE_TIME_WAIT:
2164 get_timewait6_sock(seq, v, st->num);
2165 break;
2166 }
2167out:
2168 return 0;
2169}
2170
1da177e4 2171static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2172 .name = "tcp6",
2173 .family = AF_INET6,
5f4472c5
DL
2174 .seq_fops = {
2175 .owner = THIS_MODULE,
2176 },
9427c4b3
DL
2177 .seq_ops = {
2178 .show = tcp6_seq_show,
2179 },
1da177e4
LT
2180};
2181
2c8c1e72 2182int __net_init tcp6_proc_init(struct net *net)
1da177e4 2183{
6f8b13bc 2184 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2185}
2186
6f8b13bc 2187void tcp6_proc_exit(struct net *net)
1da177e4 2188{
6f8b13bc 2189 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2190}
2191#endif
2192
2193struct proto tcpv6_prot = {
2194 .name = "TCPv6",
2195 .owner = THIS_MODULE,
2196 .close = tcp_close,
2197 .connect = tcp_v6_connect,
2198 .disconnect = tcp_disconnect,
463c84b9 2199 .accept = inet_csk_accept,
1da177e4
LT
2200 .ioctl = tcp_ioctl,
2201 .init = tcp_v6_init_sock,
2202 .destroy = tcp_v6_destroy_sock,
2203 .shutdown = tcp_shutdown,
2204 .setsockopt = tcp_setsockopt,
2205 .getsockopt = tcp_getsockopt,
1da177e4 2206 .recvmsg = tcp_recvmsg,
7ba42910
CG
2207 .sendmsg = tcp_sendmsg,
2208 .sendpage = tcp_sendpage,
1da177e4
LT
2209 .backlog_rcv = tcp_v6_do_rcv,
2210 .hash = tcp_v6_hash,
ab1e0a13
ACM
2211 .unhash = inet_unhash,
2212 .get_port = inet_csk_get_port,
1da177e4
LT
2213 .enter_memory_pressure = tcp_enter_memory_pressure,
2214 .sockets_allocated = &tcp_sockets_allocated,
2215 .memory_allocated = &tcp_memory_allocated,
2216 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2217 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2218 .sysctl_mem = sysctl_tcp_mem,
2219 .sysctl_wmem = sysctl_tcp_wmem,
2220 .sysctl_rmem = sysctl_tcp_rmem,
2221 .max_header = MAX_TCP_HEADER,
2222 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2223 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2224 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2225 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2226 .h.hashinfo = &tcp_hashinfo,
7ba42910 2227 .no_autobind = true,
543d9cfe
ACM
2228#ifdef CONFIG_COMPAT
2229 .compat_setsockopt = compat_tcp_setsockopt,
2230 .compat_getsockopt = compat_tcp_getsockopt,
2231#endif
1da177e4
LT
2232};
2233
41135cc8 2234static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2235 .handler = tcp_v6_rcv,
2236 .err_handler = tcp_v6_err,
a430a43d 2237 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2238 .gso_segment = tcp_tso_segment,
684f2176
HX
2239 .gro_receive = tcp6_gro_receive,
2240 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2241 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2242};
2243
1da177e4
LT
2244static struct inet_protosw tcpv6_protosw = {
2245 .type = SOCK_STREAM,
2246 .protocol = IPPROTO_TCP,
2247 .prot = &tcpv6_prot,
2248 .ops = &inet6_stream_ops,
1da177e4 2249 .no_check = 0,
d83d8461
ACM
2250 .flags = INET_PROTOSW_PERMANENT |
2251 INET_PROTOSW_ICSK,
1da177e4
LT
2252};
2253
2c8c1e72 2254static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2255{
5677242f
DL
2256 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2257 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2258}
2259
2c8c1e72 2260static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2261{
5677242f 2262 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2263}
2264
2c8c1e72 2265static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2266{
2267 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2268}
2269
2270static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2271 .init = tcpv6_net_init,
2272 .exit = tcpv6_net_exit,
2273 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2274};
2275
7f4e4868 2276int __init tcpv6_init(void)
1da177e4 2277{
7f4e4868
DL
2278 int ret;
2279
2280 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2281 if (ret)
2282 goto out;
2283
1da177e4 2284 /* register inet6 protocol */
7f4e4868
DL
2285 ret = inet6_register_protosw(&tcpv6_protosw);
2286 if (ret)
2287 goto out_tcpv6_protocol;
2288
93ec926b 2289 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2290 if (ret)
2291 goto out_tcpv6_protosw;
2292out:
2293 return ret;
ae0f7d5f 2294
7f4e4868
DL
2295out_tcpv6_protocol:
2296 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2297out_tcpv6_protosw:
2298 inet6_unregister_protosw(&tcpv6_protosw);
2299 goto out;
2300}
2301
09f7709f 2302void tcpv6_exit(void)
7f4e4868 2303{
93ec926b 2304 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2305 inet6_unregister_protosw(&tcpv6_protosw);
2306 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2307}