]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * TCP over IPv6 | |
3 | * Linux INET6 implementation | |
4 | * | |
5 | * Authors: | |
6 | * Pedro Roque <roque@di.fc.ul.pt> | |
7 | * | |
8 | * Based on: | |
9 | * linux/net/ipv4/tcp.c | |
10 | * linux/net/ipv4/tcp_input.c | |
11 | * linux/net/ipv4/tcp_output.c | |
12 | * | |
13 | * Fixes: | |
14 | * Hideaki YOSHIFUJI : sin6_scope_id support | |
15 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which | |
16 | * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind | |
17 | * a single port at the same time. | |
18 | * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * as published by the Free Software Foundation; either version | |
23 | * 2 of the License, or (at your option) any later version. | |
24 | */ | |
25 | ||
26 | #include <linux/bottom_half.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/types.h> | |
30 | #include <linux/socket.h> | |
31 | #include <linux/sockios.h> | |
32 | #include <linux/net.h> | |
33 | #include <linux/jiffies.h> | |
34 | #include <linux/in.h> | |
35 | #include <linux/in6.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/jhash.h> | |
39 | #include <linux/ipsec.h> | |
40 | #include <linux/times.h> | |
41 | #include <linux/slab.h> | |
42 | #include <linux/uaccess.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/icmpv6.h> | |
45 | #include <linux/random.h> | |
46 | ||
47 | #include <net/tcp.h> | |
48 | #include <net/ndisc.h> | |
49 | #include <net/inet6_hashtables.h> | |
50 | #include <net/inet6_connection_sock.h> | |
51 | #include <net/ipv6.h> | |
52 | #include <net/transp_v6.h> | |
53 | #include <net/addrconf.h> | |
54 | #include <net/ip6_route.h> | |
55 | #include <net/ip6_checksum.h> | |
56 | #include <net/inet_ecn.h> | |
57 | #include <net/protocol.h> | |
58 | #include <net/xfrm.h> | |
59 | #include <net/snmp.h> | |
60 | #include <net/dsfield.h> | |
61 | #include <net/timewait_sock.h> | |
62 | #include <net/inet_common.h> | |
63 | #include <net/secure_seq.h> | |
64 | #include <net/busy_poll.h> | |
65 | ||
66 | #include <linux/proc_fs.h> | |
67 | #include <linux/seq_file.h> | |
68 | ||
69 | #include <crypto/hash.h> | |
70 | #include <linux/scatterlist.h> | |
71 | ||
72 | static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); | |
73 | static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |
74 | struct request_sock *req); | |
75 | ||
76 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | |
77 | ||
78 | static const struct inet_connection_sock_af_ops ipv6_mapped; | |
79 | static const struct inet_connection_sock_af_ops ipv6_specific; | |
80 | #ifdef CONFIG_TCP_MD5SIG | |
81 | static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; | |
82 | static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; | |
83 | #else | |
84 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, | |
85 | const struct in6_addr *addr) | |
86 | { | |
87 | return NULL; | |
88 | } | |
89 | #endif | |
90 | ||
91 | static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |
92 | { | |
93 | struct dst_entry *dst = skb_dst(skb); | |
94 | ||
95 | if (dst && dst_hold_safe(dst)) { | |
96 | const struct rt6_info *rt = (const struct rt6_info *)dst; | |
97 | ||
98 | sk->sk_rx_dst = dst; | |
99 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | |
100 | inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); | |
101 | } | |
102 | } | |
103 | ||
104 | static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff) | |
105 | { | |
106 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, | |
107 | ipv6_hdr(skb)->saddr.s6_addr32, | |
108 | tcp_hdr(skb)->dest, | |
109 | tcp_hdr(skb)->source, tsoff); | |
110 | } | |
111 | ||
112 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |
113 | int addr_len) | |
114 | { | |
115 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | |
116 | struct inet_sock *inet = inet_sk(sk); | |
117 | struct inet_connection_sock *icsk = inet_csk(sk); | |
118 | struct ipv6_pinfo *np = inet6_sk(sk); | |
119 | struct tcp_sock *tp = tcp_sk(sk); | |
120 | struct in6_addr *saddr = NULL, *final_p, final; | |
121 | struct ipv6_txoptions *opt; | |
122 | struct flowi6 fl6; | |
123 | struct dst_entry *dst; | |
124 | int addr_type; | |
125 | int err; | |
126 | ||
127 | if (addr_len < SIN6_LEN_RFC2133) | |
128 | return -EINVAL; | |
129 | ||
130 | if (usin->sin6_family != AF_INET6) | |
131 | return -EAFNOSUPPORT; | |
132 | ||
133 | memset(&fl6, 0, sizeof(fl6)); | |
134 | ||
135 | if (np->sndflow) { | |
136 | fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; | |
137 | IP6_ECN_flow_init(fl6.flowlabel); | |
138 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { | |
139 | struct ip6_flowlabel *flowlabel; | |
140 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | |
141 | if (!flowlabel) | |
142 | return -EINVAL; | |
143 | fl6_sock_release(flowlabel); | |
144 | } | |
145 | } | |
146 | ||
147 | /* | |
148 | * connect() to INADDR_ANY means loopback (BSD'ism). | |
149 | */ | |
150 | ||
151 | if (ipv6_addr_any(&usin->sin6_addr)) { | |
152 | if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) | |
153 | ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), | |
154 | &usin->sin6_addr); | |
155 | else | |
156 | usin->sin6_addr = in6addr_loopback; | |
157 | } | |
158 | ||
159 | addr_type = ipv6_addr_type(&usin->sin6_addr); | |
160 | ||
161 | if (addr_type & IPV6_ADDR_MULTICAST) | |
162 | return -ENETUNREACH; | |
163 | ||
164 | if (addr_type&IPV6_ADDR_LINKLOCAL) { | |
165 | if (addr_len >= sizeof(struct sockaddr_in6) && | |
166 | usin->sin6_scope_id) { | |
167 | /* If interface is set while binding, indices | |
168 | * must coincide. | |
169 | */ | |
170 | if (sk->sk_bound_dev_if && | |
171 | sk->sk_bound_dev_if != usin->sin6_scope_id) | |
172 | return -EINVAL; | |
173 | ||
174 | sk->sk_bound_dev_if = usin->sin6_scope_id; | |
175 | } | |
176 | ||
177 | /* Connect to link-local address requires an interface */ | |
178 | if (!sk->sk_bound_dev_if) | |
179 | return -EINVAL; | |
180 | } | |
181 | ||
182 | if (tp->rx_opt.ts_recent_stamp && | |
183 | !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { | |
184 | tp->rx_opt.ts_recent = 0; | |
185 | tp->rx_opt.ts_recent_stamp = 0; | |
186 | tp->write_seq = 0; | |
187 | } | |
188 | ||
189 | sk->sk_v6_daddr = usin->sin6_addr; | |
190 | np->flow_label = fl6.flowlabel; | |
191 | ||
192 | /* | |
193 | * TCP over IPv4 | |
194 | */ | |
195 | ||
196 | if (addr_type & IPV6_ADDR_MAPPED) { | |
197 | u32 exthdrlen = icsk->icsk_ext_hdr_len; | |
198 | struct sockaddr_in sin; | |
199 | ||
200 | SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); | |
201 | ||
202 | if (__ipv6_only_sock(sk)) | |
203 | return -ENETUNREACH; | |
204 | ||
205 | sin.sin_family = AF_INET; | |
206 | sin.sin_port = usin->sin6_port; | |
207 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; | |
208 | ||
209 | icsk->icsk_af_ops = &ipv6_mapped; | |
210 | sk->sk_backlog_rcv = tcp_v4_do_rcv; | |
211 | #ifdef CONFIG_TCP_MD5SIG | |
212 | tp->af_specific = &tcp_sock_ipv6_mapped_specific; | |
213 | #endif | |
214 | ||
215 | err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | |
216 | ||
217 | if (err) { | |
218 | icsk->icsk_ext_hdr_len = exthdrlen; | |
219 | icsk->icsk_af_ops = &ipv6_specific; | |
220 | sk->sk_backlog_rcv = tcp_v6_do_rcv; | |
221 | #ifdef CONFIG_TCP_MD5SIG | |
222 | tp->af_specific = &tcp_sock_ipv6_specific; | |
223 | #endif | |
224 | goto failure; | |
225 | } | |
226 | np->saddr = sk->sk_v6_rcv_saddr; | |
227 | ||
228 | return err; | |
229 | } | |
230 | ||
231 | if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) | |
232 | saddr = &sk->sk_v6_rcv_saddr; | |
233 | ||
234 | fl6.flowi6_proto = IPPROTO_TCP; | |
235 | fl6.daddr = sk->sk_v6_daddr; | |
236 | fl6.saddr = saddr ? *saddr : np->saddr; | |
237 | fl6.flowi6_oif = sk->sk_bound_dev_if; | |
238 | fl6.flowi6_mark = sk->sk_mark; | |
239 | fl6.fl6_dport = usin->sin6_port; | |
240 | fl6.fl6_sport = inet->inet_sport; | |
241 | fl6.flowi6_uid = sk->sk_uid; | |
242 | ||
243 | opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); | |
244 | final_p = fl6_update_dst(&fl6, opt, &final); | |
245 | ||
246 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | |
247 | ||
248 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | |
249 | if (IS_ERR(dst)) { | |
250 | err = PTR_ERR(dst); | |
251 | goto failure; | |
252 | } | |
253 | ||
254 | if (!saddr) { | |
255 | saddr = &fl6.saddr; | |
256 | sk->sk_v6_rcv_saddr = *saddr; | |
257 | } | |
258 | ||
259 | /* set the source address */ | |
260 | np->saddr = *saddr; | |
261 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | |
262 | ||
263 | sk->sk_gso_type = SKB_GSO_TCPV6; | |
264 | ip6_dst_store(sk, dst, NULL, NULL); | |
265 | ||
266 | if (tcp_death_row.sysctl_tw_recycle && | |
267 | !tp->rx_opt.ts_recent_stamp && | |
268 | ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr)) | |
269 | tcp_fetch_timewait_stamp(sk, dst); | |
270 | ||
271 | icsk->icsk_ext_hdr_len = 0; | |
272 | if (opt) | |
273 | icsk->icsk_ext_hdr_len = opt->opt_flen + | |
274 | opt->opt_nflen; | |
275 | ||
276 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | |
277 | ||
278 | inet->inet_dport = usin->sin6_port; | |
279 | ||
280 | tcp_set_state(sk, TCP_SYN_SENT); | |
281 | err = inet6_hash_connect(&tcp_death_row, sk); | |
282 | if (err) | |
283 | goto late_failure; | |
284 | ||
285 | sk_set_txhash(sk); | |
286 | ||
287 | if (!tp->write_seq && likely(!tp->repair)) | |
288 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, | |
289 | sk->sk_v6_daddr.s6_addr32, | |
290 | inet->inet_sport, | |
291 | inet->inet_dport, | |
292 | &tp->tsoffset); | |
293 | ||
294 | err = tcp_connect(sk); | |
295 | if (err) | |
296 | goto late_failure; | |
297 | ||
298 | return 0; | |
299 | ||
300 | late_failure: | |
301 | tcp_set_state(sk, TCP_CLOSE); | |
302 | __sk_dst_reset(sk); | |
303 | failure: | |
304 | inet->inet_dport = 0; | |
305 | sk->sk_route_caps = 0; | |
306 | return err; | |
307 | } | |
308 | ||
309 | static void tcp_v6_mtu_reduced(struct sock *sk) | |
310 | { | |
311 | struct dst_entry *dst; | |
312 | ||
313 | if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) | |
314 | return; | |
315 | ||
316 | dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); | |
317 | if (!dst) | |
318 | return; | |
319 | ||
320 | if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { | |
321 | tcp_sync_mss(sk, dst_mtu(dst)); | |
322 | tcp_simple_retransmit(sk); | |
323 | } | |
324 | } | |
325 | ||
326 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |
327 | u8 type, u8 code, int offset, __be32 info) | |
328 | { | |
329 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; | |
330 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); | |
331 | struct net *net = dev_net(skb->dev); | |
332 | struct request_sock *fastopen; | |
333 | struct ipv6_pinfo *np; | |
334 | struct tcp_sock *tp; | |
335 | __u32 seq, snd_una; | |
336 | struct sock *sk; | |
337 | bool fatal; | |
338 | int err; | |
339 | ||
340 | sk = __inet6_lookup_established(net, &tcp_hashinfo, | |
341 | &hdr->daddr, th->dest, | |
342 | &hdr->saddr, ntohs(th->source), | |
343 | skb->dev->ifindex); | |
344 | ||
345 | if (!sk) { | |
346 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), | |
347 | ICMP6_MIB_INERRORS); | |
348 | return; | |
349 | } | |
350 | ||
351 | if (sk->sk_state == TCP_TIME_WAIT) { | |
352 | inet_twsk_put(inet_twsk(sk)); | |
353 | return; | |
354 | } | |
355 | seq = ntohl(th->seq); | |
356 | fatal = icmpv6_err_convert(type, code, &err); | |
357 | if (sk->sk_state == TCP_NEW_SYN_RECV) | |
358 | return tcp_req_err(sk, seq, fatal); | |
359 | ||
360 | bh_lock_sock(sk); | |
361 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) | |
362 | __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); | |
363 | ||
364 | if (sk->sk_state == TCP_CLOSE) | |
365 | goto out; | |
366 | ||
367 | if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { | |
368 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); | |
369 | goto out; | |
370 | } | |
371 | ||
372 | tp = tcp_sk(sk); | |
373 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ | |
374 | fastopen = tp->fastopen_rsk; | |
375 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; | |
376 | if (sk->sk_state != TCP_LISTEN && | |
377 | !between(seq, snd_una, tp->snd_nxt)) { | |
378 | __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); | |
379 | goto out; | |
380 | } | |
381 | ||
382 | np = inet6_sk(sk); | |
383 | ||
384 | if (type == NDISC_REDIRECT) { | |
385 | if (!sock_owned_by_user(sk)) { | |
386 | struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); | |
387 | ||
388 | if (dst) | |
389 | dst->ops->redirect(dst, sk, skb); | |
390 | } | |
391 | goto out; | |
392 | } | |
393 | ||
394 | if (type == ICMPV6_PKT_TOOBIG) { | |
395 | /* We are not interested in TCP_LISTEN and open_requests | |
396 | * (SYN-ACKs send out by Linux are always <576bytes so | |
397 | * they should go through unfragmented). | |
398 | */ | |
399 | if (sk->sk_state == TCP_LISTEN) | |
400 | goto out; | |
401 | ||
402 | if (!ip6_sk_accept_pmtu(sk)) | |
403 | goto out; | |
404 | ||
405 | tp->mtu_info = ntohl(info); | |
406 | if (!sock_owned_by_user(sk)) | |
407 | tcp_v6_mtu_reduced(sk); | |
408 | else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, | |
409 | &sk->sk_tsq_flags)) | |
410 | sock_hold(sk); | |
411 | goto out; | |
412 | } | |
413 | ||
414 | ||
415 | /* Might be for an request_sock */ | |
416 | switch (sk->sk_state) { | |
417 | case TCP_SYN_SENT: | |
418 | case TCP_SYN_RECV: | |
419 | /* Only in fast or simultaneous open. If a fast open socket is | |
420 | * is already accepted it is treated as a connected one below. | |
421 | */ | |
422 | if (fastopen && !fastopen->sk) | |
423 | break; | |
424 | ||
425 | if (!sock_owned_by_user(sk)) { | |
426 | sk->sk_err = err; | |
427 | sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ | |
428 | ||
429 | tcp_done(sk); | |
430 | } else | |
431 | sk->sk_err_soft = err; | |
432 | goto out; | |
433 | } | |
434 | ||
435 | if (!sock_owned_by_user(sk) && np->recverr) { | |
436 | sk->sk_err = err; | |
437 | sk->sk_error_report(sk); | |
438 | } else | |
439 | sk->sk_err_soft = err; | |
440 | ||
441 | out: | |
442 | bh_unlock_sock(sk); | |
443 | sock_put(sk); | |
444 | } | |
445 | ||
446 | ||
447 | static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |
448 | struct flowi *fl, | |
449 | struct request_sock *req, | |
450 | struct tcp_fastopen_cookie *foc, | |
451 | enum tcp_synack_type synack_type) | |
452 | { | |
453 | struct inet_request_sock *ireq = inet_rsk(req); | |
454 | struct ipv6_pinfo *np = inet6_sk(sk); | |
455 | struct ipv6_txoptions *opt; | |
456 | struct flowi6 *fl6 = &fl->u.ip6; | |
457 | struct sk_buff *skb; | |
458 | int err = -ENOMEM; | |
459 | ||
460 | /* First, grab a route. */ | |
461 | if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, | |
462 | IPPROTO_TCP)) == NULL) | |
463 | goto done; | |
464 | ||
465 | skb = tcp_make_synack(sk, dst, req, foc, synack_type); | |
466 | ||
467 | if (skb) { | |
468 | __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, | |
469 | &ireq->ir_v6_rmt_addr); | |
470 | ||
471 | fl6->daddr = ireq->ir_v6_rmt_addr; | |
472 | if (np->repflow && ireq->pktopts) | |
473 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | |
474 | ||
475 | rcu_read_lock(); | |
476 | opt = ireq->ipv6_opt; | |
477 | if (!opt) | |
478 | opt = rcu_dereference(np->opt); | |
479 | err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass); | |
480 | rcu_read_unlock(); | |
481 | err = net_xmit_eval(err); | |
482 | } | |
483 | ||
484 | done: | |
485 | return err; | |
486 | } | |
487 | ||
488 | ||
489 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | |
490 | { | |
491 | kfree(inet_rsk(req)->ipv6_opt); | |
492 | kfree_skb(inet_rsk(req)->pktopts); | |
493 | } | |
494 | ||
495 | #ifdef CONFIG_TCP_MD5SIG | |
496 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, | |
497 | const struct in6_addr *addr) | |
498 | { | |
499 | return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); | |
500 | } | |
501 | ||
502 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, | |
503 | const struct sock *addr_sk) | |
504 | { | |
505 | return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); | |
506 | } | |
507 | ||
508 | static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, | |
509 | int optlen) | |
510 | { | |
511 | struct tcp_md5sig cmd; | |
512 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; | |
513 | ||
514 | if (optlen < sizeof(cmd)) | |
515 | return -EINVAL; | |
516 | ||
517 | if (copy_from_user(&cmd, optval, sizeof(cmd))) | |
518 | return -EFAULT; | |
519 | ||
520 | if (sin6->sin6_family != AF_INET6) | |
521 | return -EINVAL; | |
522 | ||
523 | if (!cmd.tcpm_keylen) { | |
524 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) | |
525 | return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], | |
526 | AF_INET); | |
527 | return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, | |
528 | AF_INET6); | |
529 | } | |
530 | ||
531 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) | |
532 | return -EINVAL; | |
533 | ||
534 | if (ipv6_addr_v4mapped(&sin6->sin6_addr)) | |
535 | return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], | |
536 | AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | |
537 | ||
538 | return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, | |
539 | AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | |
540 | } | |
541 | ||
542 | static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, | |
543 | const struct in6_addr *daddr, | |
544 | const struct in6_addr *saddr, | |
545 | const struct tcphdr *th, int nbytes) | |
546 | { | |
547 | struct tcp6_pseudohdr *bp; | |
548 | struct scatterlist sg; | |
549 | struct tcphdr *_th; | |
550 | ||
551 | bp = hp->scratch; | |
552 | /* 1. TCP pseudo-header (RFC2460) */ | |
553 | bp->saddr = *saddr; | |
554 | bp->daddr = *daddr; | |
555 | bp->protocol = cpu_to_be32(IPPROTO_TCP); | |
556 | bp->len = cpu_to_be32(nbytes); | |
557 | ||
558 | _th = (struct tcphdr *)(bp + 1); | |
559 | memcpy(_th, th, sizeof(*th)); | |
560 | _th->check = 0; | |
561 | ||
562 | sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); | |
563 | ahash_request_set_crypt(hp->md5_req, &sg, NULL, | |
564 | sizeof(*bp) + sizeof(*th)); | |
565 | return crypto_ahash_update(hp->md5_req); | |
566 | } | |
567 | ||
568 | static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, | |
569 | const struct in6_addr *daddr, struct in6_addr *saddr, | |
570 | const struct tcphdr *th) | |
571 | { | |
572 | struct tcp_md5sig_pool *hp; | |
573 | struct ahash_request *req; | |
574 | ||
575 | hp = tcp_get_md5sig_pool(); | |
576 | if (!hp) | |
577 | goto clear_hash_noput; | |
578 | req = hp->md5_req; | |
579 | ||
580 | if (crypto_ahash_init(req)) | |
581 | goto clear_hash; | |
582 | if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) | |
583 | goto clear_hash; | |
584 | if (tcp_md5_hash_key(hp, key)) | |
585 | goto clear_hash; | |
586 | ahash_request_set_crypt(req, NULL, md5_hash, 0); | |
587 | if (crypto_ahash_final(req)) | |
588 | goto clear_hash; | |
589 | ||
590 | tcp_put_md5sig_pool(); | |
591 | return 0; | |
592 | ||
593 | clear_hash: | |
594 | tcp_put_md5sig_pool(); | |
595 | clear_hash_noput: | |
596 | memset(md5_hash, 0, 16); | |
597 | return 1; | |
598 | } | |
599 | ||
600 | static int tcp_v6_md5_hash_skb(char *md5_hash, | |
601 | const struct tcp_md5sig_key *key, | |
602 | const struct sock *sk, | |
603 | const struct sk_buff *skb) | |
604 | { | |
605 | const struct in6_addr *saddr, *daddr; | |
606 | struct tcp_md5sig_pool *hp; | |
607 | struct ahash_request *req; | |
608 | const struct tcphdr *th = tcp_hdr(skb); | |
609 | ||
610 | if (sk) { /* valid for establish/request sockets */ | |
611 | saddr = &sk->sk_v6_rcv_saddr; | |
612 | daddr = &sk->sk_v6_daddr; | |
613 | } else { | |
614 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | |
615 | saddr = &ip6h->saddr; | |
616 | daddr = &ip6h->daddr; | |
617 | } | |
618 | ||
619 | hp = tcp_get_md5sig_pool(); | |
620 | if (!hp) | |
621 | goto clear_hash_noput; | |
622 | req = hp->md5_req; | |
623 | ||
624 | if (crypto_ahash_init(req)) | |
625 | goto clear_hash; | |
626 | ||
627 | if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len)) | |
628 | goto clear_hash; | |
629 | if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) | |
630 | goto clear_hash; | |
631 | if (tcp_md5_hash_key(hp, key)) | |
632 | goto clear_hash; | |
633 | ahash_request_set_crypt(req, NULL, md5_hash, 0); | |
634 | if (crypto_ahash_final(req)) | |
635 | goto clear_hash; | |
636 | ||
637 | tcp_put_md5sig_pool(); | |
638 | return 0; | |
639 | ||
640 | clear_hash: | |
641 | tcp_put_md5sig_pool(); | |
642 | clear_hash_noput: | |
643 | memset(md5_hash, 0, 16); | |
644 | return 1; | |
645 | } | |
646 | ||
647 | #endif | |
648 | ||
649 | static bool tcp_v6_inbound_md5_hash(const struct sock *sk, | |
650 | const struct sk_buff *skb) | |
651 | { | |
652 | #ifdef CONFIG_TCP_MD5SIG | |
653 | const __u8 *hash_location = NULL; | |
654 | struct tcp_md5sig_key *hash_expected; | |
655 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | |
656 | const struct tcphdr *th = tcp_hdr(skb); | |
657 | int genhash; | |
658 | u8 newhash[16]; | |
659 | ||
660 | hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); | |
661 | hash_location = tcp_parse_md5sig_option(th); | |
662 | ||
663 | /* We've parsed the options - do we have a hash? */ | |
664 | if (!hash_expected && !hash_location) | |
665 | return false; | |
666 | ||
667 | if (hash_expected && !hash_location) { | |
668 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); | |
669 | return true; | |
670 | } | |
671 | ||
672 | if (!hash_expected && hash_location) { | |
673 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); | |
674 | return true; | |
675 | } | |
676 | ||
677 | /* check the signature */ | |
678 | genhash = tcp_v6_md5_hash_skb(newhash, | |
679 | hash_expected, | |
680 | NULL, skb); | |
681 | ||
682 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | |
683 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); | |
684 | net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", | |
685 | genhash ? "failed" : "mismatch", | |
686 | &ip6h->saddr, ntohs(th->source), | |
687 | &ip6h->daddr, ntohs(th->dest)); | |
688 | return true; | |
689 | } | |
690 | #endif | |
691 | return false; | |
692 | } | |
693 | ||
694 | static void tcp_v6_init_req(struct request_sock *req, | |
695 | const struct sock *sk_listener, | |
696 | struct sk_buff *skb) | |
697 | { | |
698 | struct inet_request_sock *ireq = inet_rsk(req); | |
699 | const struct ipv6_pinfo *np = inet6_sk(sk_listener); | |
700 | ||
701 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; | |
702 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; | |
703 | ||
704 | /* So that link locals have meaning */ | |
705 | if (!sk_listener->sk_bound_dev_if && | |
706 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | |
707 | ireq->ir_iif = tcp_v6_iif(skb); | |
708 | ||
709 | if (!TCP_SKB_CB(skb)->tcp_tw_isn && | |
710 | (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || | |
711 | np->rxopt.bits.rxinfo || | |
712 | np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || | |
713 | np->rxopt.bits.rxohlim || np->repflow)) { | |
714 | atomic_inc(&skb->users); | |
715 | ireq->pktopts = skb; | |
716 | } | |
717 | } | |
718 | ||
719 | static struct dst_entry *tcp_v6_route_req(const struct sock *sk, | |
720 | struct flowi *fl, | |
721 | const struct request_sock *req, | |
722 | bool *strict) | |
723 | { | |
724 | if (strict) | |
725 | *strict = true; | |
726 | return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); | |
727 | } | |
728 | ||
729 | struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | |
730 | .family = AF_INET6, | |
731 | .obj_size = sizeof(struct tcp6_request_sock), | |
732 | .rtx_syn_ack = tcp_rtx_synack, | |
733 | .send_ack = tcp_v6_reqsk_send_ack, | |
734 | .destructor = tcp_v6_reqsk_destructor, | |
735 | .send_reset = tcp_v6_send_reset, | |
736 | .syn_ack_timeout = tcp_syn_ack_timeout, | |
737 | }; | |
738 | ||
739 | static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |
740 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - | |
741 | sizeof(struct ipv6hdr), | |
742 | #ifdef CONFIG_TCP_MD5SIG | |
743 | .req_md5_lookup = tcp_v6_md5_lookup, | |
744 | .calc_md5_hash = tcp_v6_md5_hash_skb, | |
745 | #endif | |
746 | .init_req = tcp_v6_init_req, | |
747 | #ifdef CONFIG_SYN_COOKIES | |
748 | .cookie_init_seq = cookie_v6_init_sequence, | |
749 | #endif | |
750 | .route_req = tcp_v6_route_req, | |
751 | .init_seq = tcp_v6_init_sequence, | |
752 | .send_synack = tcp_v6_send_synack, | |
753 | }; | |
754 | ||
755 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, | |
756 | u32 ack, u32 win, u32 tsval, u32 tsecr, | |
757 | int oif, struct tcp_md5sig_key *key, int rst, | |
758 | u8 tclass, __be32 label) | |
759 | { | |
760 | const struct tcphdr *th = tcp_hdr(skb); | |
761 | struct tcphdr *t1; | |
762 | struct sk_buff *buff; | |
763 | struct flowi6 fl6; | |
764 | struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); | |
765 | struct sock *ctl_sk = net->ipv6.tcp_sk; | |
766 | unsigned int tot_len = sizeof(struct tcphdr); | |
767 | struct dst_entry *dst; | |
768 | __be32 *topt; | |
769 | ||
770 | if (tsecr) | |
771 | tot_len += TCPOLEN_TSTAMP_ALIGNED; | |
772 | #ifdef CONFIG_TCP_MD5SIG | |
773 | if (key) | |
774 | tot_len += TCPOLEN_MD5SIG_ALIGNED; | |
775 | #endif | |
776 | ||
777 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, | |
778 | GFP_ATOMIC); | |
779 | if (!buff) | |
780 | return; | |
781 | ||
782 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); | |
783 | ||
784 | t1 = (struct tcphdr *) skb_push(buff, tot_len); | |
785 | skb_reset_transport_header(buff); | |
786 | ||
787 | /* Swap the send and the receive. */ | |
788 | memset(t1, 0, sizeof(*t1)); | |
789 | t1->dest = th->source; | |
790 | t1->source = th->dest; | |
791 | t1->doff = tot_len / 4; | |
792 | t1->seq = htonl(seq); | |
793 | t1->ack_seq = htonl(ack); | |
794 | t1->ack = !rst || !th->ack; | |
795 | t1->rst = rst; | |
796 | t1->window = htons(win); | |
797 | ||
798 | topt = (__be32 *)(t1 + 1); | |
799 | ||
800 | if (tsecr) { | |
801 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | |
802 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | |
803 | *topt++ = htonl(tsval); | |
804 | *topt++ = htonl(tsecr); | |
805 | } | |
806 | ||
807 | #ifdef CONFIG_TCP_MD5SIG | |
808 | if (key) { | |
809 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | |
810 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); | |
811 | tcp_v6_md5_hash_hdr((__u8 *)topt, key, | |
812 | &ipv6_hdr(skb)->saddr, | |
813 | &ipv6_hdr(skb)->daddr, t1); | |
814 | } | |
815 | #endif | |
816 | ||
817 | memset(&fl6, 0, sizeof(fl6)); | |
818 | fl6.daddr = ipv6_hdr(skb)->saddr; | |
819 | fl6.saddr = ipv6_hdr(skb)->daddr; | |
820 | fl6.flowlabel = label; | |
821 | ||
822 | buff->ip_summed = CHECKSUM_PARTIAL; | |
823 | buff->csum = 0; | |
824 | ||
825 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); | |
826 | ||
827 | fl6.flowi6_proto = IPPROTO_TCP; | |
828 | if (rt6_need_strict(&fl6.daddr) && !oif) | |
829 | fl6.flowi6_oif = tcp_v6_iif(skb); | |
830 | else { | |
831 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) | |
832 | oif = skb->skb_iif; | |
833 | ||
834 | fl6.flowi6_oif = oif; | |
835 | } | |
836 | ||
837 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); | |
838 | fl6.fl6_dport = t1->dest; | |
839 | fl6.fl6_sport = t1->source; | |
840 | fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); | |
841 | security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); | |
842 | ||
843 | /* Pass a socket to ip6_dst_lookup either it is for RST | |
844 | * Underlying function will use this to retrieve the network | |
845 | * namespace | |
846 | */ | |
847 | dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); | |
848 | if (!IS_ERR(dst)) { | |
849 | skb_dst_set(buff, dst); | |
850 | ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); | |
851 | TCP_INC_STATS(net, TCP_MIB_OUTSEGS); | |
852 | if (rst) | |
853 | TCP_INC_STATS(net, TCP_MIB_OUTRSTS); | |
854 | return; | |
855 | } | |
856 | ||
857 | kfree_skb(buff); | |
858 | } | |
859 | ||
860 | static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) | |
861 | { | |
862 | const struct tcphdr *th = tcp_hdr(skb); | |
863 | u32 seq = 0, ack_seq = 0; | |
864 | struct tcp_md5sig_key *key = NULL; | |
865 | #ifdef CONFIG_TCP_MD5SIG | |
866 | const __u8 *hash_location = NULL; | |
867 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | |
868 | unsigned char newhash[16]; | |
869 | int genhash; | |
870 | struct sock *sk1 = NULL; | |
871 | #endif | |
872 | int oif; | |
873 | ||
874 | if (th->rst) | |
875 | return; | |
876 | ||
877 | /* If sk not NULL, it means we did a successful lookup and incoming | |
878 | * route had to be correct. prequeue might have dropped our dst. | |
879 | */ | |
880 | if (!sk && !ipv6_unicast_destination(skb)) | |
881 | return; | |
882 | ||
883 | #ifdef CONFIG_TCP_MD5SIG | |
884 | rcu_read_lock(); | |
885 | hash_location = tcp_parse_md5sig_option(th); | |
886 | if (sk && sk_fullsock(sk)) { | |
887 | key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); | |
888 | } else if (hash_location) { | |
889 | /* | |
890 | * active side is lost. Try to find listening socket through | |
891 | * source port, and then find md5 key through listening socket. | |
892 | * we are not loose security here: | |
893 | * Incoming packet is checked with md5 hash with finding key, | |
894 | * no RST generated if md5 hash doesn't match. | |
895 | */ | |
896 | sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), | |
897 | &tcp_hashinfo, NULL, 0, | |
898 | &ipv6h->saddr, | |
899 | th->source, &ipv6h->daddr, | |
900 | ntohs(th->source), tcp_v6_iif(skb)); | |
901 | if (!sk1) | |
902 | goto out; | |
903 | ||
904 | key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); | |
905 | if (!key) | |
906 | goto out; | |
907 | ||
908 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); | |
909 | if (genhash || memcmp(hash_location, newhash, 16) != 0) | |
910 | goto out; | |
911 | } | |
912 | #endif | |
913 | ||
914 | if (th->ack) | |
915 | seq = ntohl(th->ack_seq); | |
916 | else | |
917 | ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - | |
918 | (th->doff << 2); | |
919 | ||
920 | oif = sk ? sk->sk_bound_dev_if : 0; | |
921 | tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); | |
922 | ||
923 | #ifdef CONFIG_TCP_MD5SIG | |
924 | out: | |
925 | rcu_read_unlock(); | |
926 | #endif | |
927 | } | |
928 | ||
929 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, | |
930 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, | |
931 | struct tcp_md5sig_key *key, u8 tclass, | |
932 | __be32 label) | |
933 | { | |
934 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, | |
935 | tclass, label); | |
936 | } | |
937 | ||
938 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | |
939 | { | |
940 | struct inet_timewait_sock *tw = inet_twsk(sk); | |
941 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | |
942 | ||
943 | tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | |
944 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | |
945 | tcp_time_stamp + tcptw->tw_ts_offset, | |
946 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), | |
947 | tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); | |
948 | ||
949 | inet_twsk_put(tw); | |
950 | } | |
951 | ||
952 | static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |
953 | struct request_sock *req) | |
954 | { | |
955 | /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV | |
956 | * sk->sk_state == TCP_SYN_RECV -> for Fast Open. | |
957 | */ | |
958 | /* RFC 7323 2.3 | |
959 | * The window field (SEG.WND) of every outgoing segment, with the | |
960 | * exception of <SYN> segments, MUST be right-shifted by | |
961 | * Rcv.Wind.Shift bits: | |
962 | */ | |
963 | tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? | |
964 | tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, | |
965 | tcp_rsk(req)->rcv_nxt, | |
966 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, | |
967 | tcp_time_stamp + tcp_rsk(req)->ts_off, | |
968 | req->ts_recent, sk->sk_bound_dev_if, | |
969 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), | |
970 | 0, 0); | |
971 | } | |
972 | ||
973 | ||
974 | static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) | |
975 | { | |
976 | #ifdef CONFIG_SYN_COOKIES | |
977 | const struct tcphdr *th = tcp_hdr(skb); | |
978 | ||
979 | if (!th->syn) | |
980 | sk = cookie_v6_check(sk, skb); | |
981 | #endif | |
982 | return sk; | |
983 | } | |
984 | ||
985 | static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |
986 | { | |
987 | if (skb->protocol == htons(ETH_P_IP)) | |
988 | return tcp_v4_conn_request(sk, skb); | |
989 | ||
990 | if (!ipv6_unicast_destination(skb)) | |
991 | goto drop; | |
992 | ||
993 | return tcp_conn_request(&tcp6_request_sock_ops, | |
994 | &tcp_request_sock_ipv6_ops, sk, skb); | |
995 | ||
996 | drop: | |
997 | tcp_listendrop(sk); | |
998 | return 0; /* don't send reset */ | |
999 | } | |
1000 | ||
1001 | static void tcp_v6_restore_cb(struct sk_buff *skb) | |
1002 | { | |
1003 | /* We need to move header back to the beginning if xfrm6_policy_check() | |
1004 | * and tcp_v6_fill_cb() are going to be called again. | |
1005 | * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. | |
1006 | */ | |
1007 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | |
1008 | sizeof(struct inet6_skb_parm)); | |
1009 | } | |
1010 | ||
1011 | static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, | |
1012 | struct request_sock *req, | |
1013 | struct dst_entry *dst, | |
1014 | struct request_sock *req_unhash, | |
1015 | bool *own_req) | |
1016 | { | |
1017 | struct inet_request_sock *ireq; | |
1018 | struct ipv6_pinfo *newnp; | |
1019 | const struct ipv6_pinfo *np = inet6_sk(sk); | |
1020 | struct ipv6_txoptions *opt; | |
1021 | struct tcp6_sock *newtcp6sk; | |
1022 | struct inet_sock *newinet; | |
1023 | struct tcp_sock *newtp; | |
1024 | struct sock *newsk; | |
1025 | #ifdef CONFIG_TCP_MD5SIG | |
1026 | struct tcp_md5sig_key *key; | |
1027 | #endif | |
1028 | struct flowi6 fl6; | |
1029 | ||
1030 | if (skb->protocol == htons(ETH_P_IP)) { | |
1031 | /* | |
1032 | * v6 mapped | |
1033 | */ | |
1034 | ||
1035 | newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, | |
1036 | req_unhash, own_req); | |
1037 | ||
1038 | if (!newsk) | |
1039 | return NULL; | |
1040 | ||
1041 | newtcp6sk = (struct tcp6_sock *)newsk; | |
1042 | inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; | |
1043 | ||
1044 | newinet = inet_sk(newsk); | |
1045 | newnp = inet6_sk(newsk); | |
1046 | newtp = tcp_sk(newsk); | |
1047 | ||
1048 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | |
1049 | ||
1050 | newnp->saddr = newsk->sk_v6_rcv_saddr; | |
1051 | ||
1052 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; | |
1053 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; | |
1054 | #ifdef CONFIG_TCP_MD5SIG | |
1055 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; | |
1056 | #endif | |
1057 | ||
1058 | newnp->ipv6_mc_list = NULL; | |
1059 | newnp->ipv6_ac_list = NULL; | |
1060 | newnp->ipv6_fl_list = NULL; | |
1061 | newnp->pktoptions = NULL; | |
1062 | newnp->opt = NULL; | |
1063 | newnp->mcast_oif = tcp_v6_iif(skb); | |
1064 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | |
1065 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); | |
1066 | if (np->repflow) | |
1067 | newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); | |
1068 | ||
1069 | /* | |
1070 | * No need to charge this sock to the relevant IPv6 refcnt debug socks count | |
1071 | * here, tcp_create_openreq_child now does this for us, see the comment in | |
1072 | * that function for the gory details. -acme | |
1073 | */ | |
1074 | ||
1075 | /* It is tricky place. Until this moment IPv4 tcp | |
1076 | worked with IPv6 icsk.icsk_af_ops. | |
1077 | Sync it now. | |
1078 | */ | |
1079 | tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); | |
1080 | ||
1081 | return newsk; | |
1082 | } | |
1083 | ||
1084 | ireq = inet_rsk(req); | |
1085 | ||
1086 | if (sk_acceptq_is_full(sk)) | |
1087 | goto out_overflow; | |
1088 | ||
1089 | if (!dst) { | |
1090 | dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); | |
1091 | if (!dst) | |
1092 | goto out; | |
1093 | } | |
1094 | ||
1095 | newsk = tcp_create_openreq_child(sk, req, skb); | |
1096 | if (!newsk) | |
1097 | goto out_nonewsk; | |
1098 | ||
1099 | /* | |
1100 | * No need to charge this sock to the relevant IPv6 refcnt debug socks | |
1101 | * count here, tcp_create_openreq_child now does this for us, see the | |
1102 | * comment in that function for the gory details. -acme | |
1103 | */ | |
1104 | ||
1105 | newsk->sk_gso_type = SKB_GSO_TCPV6; | |
1106 | ip6_dst_store(newsk, dst, NULL, NULL); | |
1107 | inet6_sk_rx_dst_set(newsk, skb); | |
1108 | ||
1109 | newtcp6sk = (struct tcp6_sock *)newsk; | |
1110 | inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; | |
1111 | ||
1112 | newtp = tcp_sk(newsk); | |
1113 | newinet = inet_sk(newsk); | |
1114 | newnp = inet6_sk(newsk); | |
1115 | ||
1116 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | |
1117 | ||
1118 | newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; | |
1119 | newnp->saddr = ireq->ir_v6_loc_addr; | |
1120 | newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; | |
1121 | newsk->sk_bound_dev_if = ireq->ir_iif; | |
1122 | ||
1123 | /* Now IPv6 options... | |
1124 | ||
1125 | First: no IPv4 options. | |
1126 | */ | |
1127 | newinet->inet_opt = NULL; | |
1128 | newnp->ipv6_mc_list = NULL; | |
1129 | newnp->ipv6_ac_list = NULL; | |
1130 | newnp->ipv6_fl_list = NULL; | |
1131 | ||
1132 | /* Clone RX bits */ | |
1133 | newnp->rxopt.all = np->rxopt.all; | |
1134 | ||
1135 | newnp->pktoptions = NULL; | |
1136 | newnp->opt = NULL; | |
1137 | newnp->mcast_oif = tcp_v6_iif(skb); | |
1138 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | |
1139 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); | |
1140 | if (np->repflow) | |
1141 | newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); | |
1142 | ||
1143 | /* Clone native IPv6 options from listening socket (if any) | |
1144 | ||
1145 | Yes, keeping reference count would be much more clever, | |
1146 | but we make one more one thing there: reattach optmem | |
1147 | to newsk. | |
1148 | */ | |
1149 | opt = ireq->ipv6_opt; | |
1150 | if (!opt) | |
1151 | opt = rcu_dereference(np->opt); | |
1152 | if (opt) { | |
1153 | opt = ipv6_dup_options(newsk, opt); | |
1154 | RCU_INIT_POINTER(newnp->opt, opt); | |
1155 | } | |
1156 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | |
1157 | if (opt) | |
1158 | inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + | |
1159 | opt->opt_flen; | |
1160 | ||
1161 | tcp_ca_openreq_child(newsk, dst); | |
1162 | ||
1163 | tcp_sync_mss(newsk, dst_mtu(dst)); | |
1164 | newtp->advmss = dst_metric_advmss(dst); | |
1165 | if (tcp_sk(sk)->rx_opt.user_mss && | |
1166 | tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) | |
1167 | newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; | |
1168 | ||
1169 | tcp_initialize_rcv_mss(newsk); | |
1170 | ||
1171 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; | |
1172 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | |
1173 | ||
1174 | #ifdef CONFIG_TCP_MD5SIG | |
1175 | /* Copy over the MD5 key from the original socket */ | |
1176 | key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); | |
1177 | if (key) { | |
1178 | /* We're using one, so create a matching key | |
1179 | * on the newsk structure. If we fail to get | |
1180 | * memory, then we end up not copying the key | |
1181 | * across. Shucks. | |
1182 | */ | |
1183 | tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, | |
1184 | AF_INET6, key->key, key->keylen, | |
1185 | sk_gfp_mask(sk, GFP_ATOMIC)); | |
1186 | } | |
1187 | #endif | |
1188 | ||
1189 | if (__inet_inherit_port(sk, newsk) < 0) { | |
1190 | inet_csk_prepare_forced_close(newsk); | |
1191 | tcp_done(newsk); | |
1192 | goto out; | |
1193 | } | |
1194 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); | |
1195 | if (*own_req) { | |
1196 | tcp_move_syn(newtp, req); | |
1197 | ||
1198 | /* Clone pktoptions received with SYN, if we own the req */ | |
1199 | if (ireq->pktopts) { | |
1200 | newnp->pktoptions = skb_clone(ireq->pktopts, | |
1201 | sk_gfp_mask(sk, GFP_ATOMIC)); | |
1202 | consume_skb(ireq->pktopts); | |
1203 | ireq->pktopts = NULL; | |
1204 | if (newnp->pktoptions) { | |
1205 | tcp_v6_restore_cb(newnp->pktoptions); | |
1206 | skb_set_owner_r(newnp->pktoptions, newsk); | |
1207 | } | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | return newsk; | |
1212 | ||
1213 | out_overflow: | |
1214 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | |
1215 | out_nonewsk: | |
1216 | dst_release(dst); | |
1217 | out: | |
1218 | tcp_listendrop(sk); | |
1219 | return NULL; | |
1220 | } | |
1221 | ||
1222 | /* The socket must have it's spinlock held when we get | |
1223 | * here, unless it is a TCP_LISTEN socket. | |
1224 | * | |
1225 | * We have a potential double-lock case here, so even when | |
1226 | * doing backlog processing we use the BH locking scheme. | |
1227 | * This is because we cannot sleep with the original spinlock | |
1228 | * held. | |
1229 | */ | |
1230 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |
1231 | { | |
1232 | struct ipv6_pinfo *np = inet6_sk(sk); | |
1233 | struct tcp_sock *tp; | |
1234 | struct sk_buff *opt_skb = NULL; | |
1235 | ||
1236 | /* Imagine: socket is IPv6. IPv4 packet arrives, | |
1237 | goes to IPv4 receive handler and backlogged. | |
1238 | From backlog it always goes here. Kerboom... | |
1239 | Fortunately, tcp_rcv_established and rcv_established | |
1240 | handle them correctly, but it is not case with | |
1241 | tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK | |
1242 | */ | |
1243 | ||
1244 | if (skb->protocol == htons(ETH_P_IP)) | |
1245 | return tcp_v4_do_rcv(sk, skb); | |
1246 | ||
1247 | if (tcp_filter(sk, skb)) | |
1248 | goto discard; | |
1249 | ||
1250 | /* | |
1251 | * socket locking is here for SMP purposes as backlog rcv | |
1252 | * is currently called with bh processing disabled. | |
1253 | */ | |
1254 | ||
1255 | /* Do Stevens' IPV6_PKTOPTIONS. | |
1256 | ||
1257 | Yes, guys, it is the only place in our code, where we | |
1258 | may make it not affecting IPv4. | |
1259 | The rest of code is protocol independent, | |
1260 | and I do not like idea to uglify IPv4. | |
1261 | ||
1262 | Actually, all the idea behind IPV6_PKTOPTIONS | |
1263 | looks not very well thought. For now we latch | |
1264 | options, received in the last packet, enqueued | |
1265 | by tcp. Feel free to propose better solution. | |
1266 | --ANK (980728) | |
1267 | */ | |
1268 | if (np->rxopt.all) | |
1269 | opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); | |
1270 | ||
1271 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | |
1272 | struct dst_entry *dst = sk->sk_rx_dst; | |
1273 | ||
1274 | sock_rps_save_rxhash(sk, skb); | |
1275 | sk_mark_napi_id(sk, skb); | |
1276 | if (dst) { | |
1277 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || | |
1278 | dst->ops->check(dst, np->rx_dst_cookie) == NULL) { | |
1279 | dst_release(dst); | |
1280 | sk->sk_rx_dst = NULL; | |
1281 | } | |
1282 | } | |
1283 | ||
1284 | tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); | |
1285 | if (opt_skb) | |
1286 | goto ipv6_pktoptions; | |
1287 | return 0; | |
1288 | } | |
1289 | ||
1290 | if (tcp_checksum_complete(skb)) | |
1291 | goto csum_err; | |
1292 | ||
1293 | if (sk->sk_state == TCP_LISTEN) { | |
1294 | struct sock *nsk = tcp_v6_cookie_check(sk, skb); | |
1295 | ||
1296 | if (!nsk) | |
1297 | goto discard; | |
1298 | ||
1299 | if (nsk != sk) { | |
1300 | sock_rps_save_rxhash(nsk, skb); | |
1301 | sk_mark_napi_id(nsk, skb); | |
1302 | if (tcp_child_process(sk, nsk, skb)) | |
1303 | goto reset; | |
1304 | if (opt_skb) | |
1305 | __kfree_skb(opt_skb); | |
1306 | return 0; | |
1307 | } | |
1308 | } else | |
1309 | sock_rps_save_rxhash(sk, skb); | |
1310 | ||
1311 | if (tcp_rcv_state_process(sk, skb)) | |
1312 | goto reset; | |
1313 | if (opt_skb) | |
1314 | goto ipv6_pktoptions; | |
1315 | return 0; | |
1316 | ||
1317 | reset: | |
1318 | tcp_v6_send_reset(sk, skb); | |
1319 | discard: | |
1320 | if (opt_skb) | |
1321 | __kfree_skb(opt_skb); | |
1322 | kfree_skb(skb); | |
1323 | return 0; | |
1324 | csum_err: | |
1325 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); | |
1326 | TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); | |
1327 | goto discard; | |
1328 | ||
1329 | ||
1330 | ipv6_pktoptions: | |
1331 | /* Do you ask, what is it? | |
1332 | ||
1333 | 1. skb was enqueued by tcp. | |
1334 | 2. skb is added to tail of read queue, rather than out of order. | |
1335 | 3. socket is not in passive state. | |
1336 | 4. Finally, it really contains options, which user wants to receive. | |
1337 | */ | |
1338 | tp = tcp_sk(sk); | |
1339 | if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && | |
1340 | !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { | |
1341 | if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) | |
1342 | np->mcast_oif = tcp_v6_iif(opt_skb); | |
1343 | if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) | |
1344 | np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; | |
1345 | if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) | |
1346 | np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); | |
1347 | if (np->repflow) | |
1348 | np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); | |
1349 | if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { | |
1350 | skb_set_owner_r(opt_skb, sk); | |
1351 | tcp_v6_restore_cb(opt_skb); | |
1352 | opt_skb = xchg(&np->pktoptions, opt_skb); | |
1353 | } else { | |
1354 | __kfree_skb(opt_skb); | |
1355 | opt_skb = xchg(&np->pktoptions, NULL); | |
1356 | } | |
1357 | } | |
1358 | ||
1359 | kfree_skb(opt_skb); | |
1360 | return 0; | |
1361 | } | |
1362 | ||
1363 | static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, | |
1364 | const struct tcphdr *th) | |
1365 | { | |
1366 | /* This is tricky: we move IP6CB at its correct location into | |
1367 | * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because | |
1368 | * _decode_session6() uses IP6CB(). | |
1369 | * barrier() makes sure compiler won't play aliasing games. | |
1370 | */ | |
1371 | memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), | |
1372 | sizeof(struct inet6_skb_parm)); | |
1373 | barrier(); | |
1374 | ||
1375 | TCP_SKB_CB(skb)->seq = ntohl(th->seq); | |
1376 | TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + | |
1377 | skb->len - th->doff*4); | |
1378 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); | |
1379 | TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); | |
1380 | TCP_SKB_CB(skb)->tcp_tw_isn = 0; | |
1381 | TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); | |
1382 | TCP_SKB_CB(skb)->sacked = 0; | |
1383 | } | |
1384 | ||
1385 | static int tcp_v6_rcv(struct sk_buff *skb) | |
1386 | { | |
1387 | const struct tcphdr *th; | |
1388 | const struct ipv6hdr *hdr; | |
1389 | bool refcounted; | |
1390 | struct sock *sk; | |
1391 | int ret; | |
1392 | struct net *net = dev_net(skb->dev); | |
1393 | ||
1394 | if (skb->pkt_type != PACKET_HOST) | |
1395 | goto discard_it; | |
1396 | ||
1397 | /* | |
1398 | * Count it even if it's bad. | |
1399 | */ | |
1400 | __TCP_INC_STATS(net, TCP_MIB_INSEGS); | |
1401 | ||
1402 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) | |
1403 | goto discard_it; | |
1404 | ||
1405 | th = (const struct tcphdr *)skb->data; | |
1406 | ||
1407 | if (unlikely(th->doff < sizeof(struct tcphdr)/4)) | |
1408 | goto bad_packet; | |
1409 | if (!pskb_may_pull(skb, th->doff*4)) | |
1410 | goto discard_it; | |
1411 | ||
1412 | if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) | |
1413 | goto csum_error; | |
1414 | ||
1415 | th = (const struct tcphdr *)skb->data; | |
1416 | hdr = ipv6_hdr(skb); | |
1417 | ||
1418 | lookup: | |
1419 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), | |
1420 | th->source, th->dest, inet6_iif(skb), | |
1421 | &refcounted); | |
1422 | if (!sk) | |
1423 | goto no_tcp_socket; | |
1424 | ||
1425 | process: | |
1426 | if (sk->sk_state == TCP_TIME_WAIT) | |
1427 | goto do_time_wait; | |
1428 | ||
1429 | if (sk->sk_state == TCP_NEW_SYN_RECV) { | |
1430 | struct request_sock *req = inet_reqsk(sk); | |
1431 | struct sock *nsk; | |
1432 | ||
1433 | sk = req->rsk_listener; | |
1434 | tcp_v6_fill_cb(skb, hdr, th); | |
1435 | if (tcp_v6_inbound_md5_hash(sk, skb)) { | |
1436 | sk_drops_add(sk, skb); | |
1437 | reqsk_put(req); | |
1438 | goto discard_it; | |
1439 | } | |
1440 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | |
1441 | inet_csk_reqsk_queue_drop_and_put(sk, req); | |
1442 | goto lookup; | |
1443 | } | |
1444 | sock_hold(sk); | |
1445 | refcounted = true; | |
1446 | nsk = tcp_check_req(sk, skb, req, false); | |
1447 | if (!nsk) { | |
1448 | reqsk_put(req); | |
1449 | goto discard_and_relse; | |
1450 | } | |
1451 | if (nsk == sk) { | |
1452 | reqsk_put(req); | |
1453 | tcp_v6_restore_cb(skb); | |
1454 | } else if (tcp_child_process(sk, nsk, skb)) { | |
1455 | tcp_v6_send_reset(nsk, skb); | |
1456 | goto discard_and_relse; | |
1457 | } else { | |
1458 | sock_put(sk); | |
1459 | return 0; | |
1460 | } | |
1461 | } | |
1462 | if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { | |
1463 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); | |
1464 | goto discard_and_relse; | |
1465 | } | |
1466 | ||
1467 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | |
1468 | goto discard_and_relse; | |
1469 | ||
1470 | tcp_v6_fill_cb(skb, hdr, th); | |
1471 | ||
1472 | if (tcp_v6_inbound_md5_hash(sk, skb)) | |
1473 | goto discard_and_relse; | |
1474 | ||
1475 | if (tcp_filter(sk, skb)) | |
1476 | goto discard_and_relse; | |
1477 | th = (const struct tcphdr *)skb->data; | |
1478 | hdr = ipv6_hdr(skb); | |
1479 | ||
1480 | skb->dev = NULL; | |
1481 | ||
1482 | if (sk->sk_state == TCP_LISTEN) { | |
1483 | ret = tcp_v6_do_rcv(sk, skb); | |
1484 | goto put_and_return; | |
1485 | } | |
1486 | ||
1487 | sk_incoming_cpu_update(sk); | |
1488 | ||
1489 | bh_lock_sock_nested(sk); | |
1490 | tcp_segs_in(tcp_sk(sk), skb); | |
1491 | ret = 0; | |
1492 | if (!sock_owned_by_user(sk)) { | |
1493 | if (!tcp_prequeue(sk, skb)) | |
1494 | ret = tcp_v6_do_rcv(sk, skb); | |
1495 | } else if (tcp_add_backlog(sk, skb)) { | |
1496 | goto discard_and_relse; | |
1497 | } | |
1498 | bh_unlock_sock(sk); | |
1499 | ||
1500 | put_and_return: | |
1501 | if (refcounted) | |
1502 | sock_put(sk); | |
1503 | return ret ? -1 : 0; | |
1504 | ||
1505 | no_tcp_socket: | |
1506 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | |
1507 | goto discard_it; | |
1508 | ||
1509 | tcp_v6_fill_cb(skb, hdr, th); | |
1510 | ||
1511 | if (tcp_checksum_complete(skb)) { | |
1512 | csum_error: | |
1513 | __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); | |
1514 | bad_packet: | |
1515 | __TCP_INC_STATS(net, TCP_MIB_INERRS); | |
1516 | } else { | |
1517 | tcp_v6_send_reset(NULL, skb); | |
1518 | } | |
1519 | ||
1520 | discard_it: | |
1521 | kfree_skb(skb); | |
1522 | return 0; | |
1523 | ||
1524 | discard_and_relse: | |
1525 | sk_drops_add(sk, skb); | |
1526 | if (refcounted) | |
1527 | sock_put(sk); | |
1528 | goto discard_it; | |
1529 | ||
1530 | do_time_wait: | |
1531 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | |
1532 | inet_twsk_put(inet_twsk(sk)); | |
1533 | goto discard_it; | |
1534 | } | |
1535 | ||
1536 | tcp_v6_fill_cb(skb, hdr, th); | |
1537 | ||
1538 | if (tcp_checksum_complete(skb)) { | |
1539 | inet_twsk_put(inet_twsk(sk)); | |
1540 | goto csum_error; | |
1541 | } | |
1542 | ||
1543 | switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { | |
1544 | case TCP_TW_SYN: | |
1545 | { | |
1546 | struct sock *sk2; | |
1547 | ||
1548 | sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, | |
1549 | skb, __tcp_hdrlen(th), | |
1550 | &ipv6_hdr(skb)->saddr, th->source, | |
1551 | &ipv6_hdr(skb)->daddr, | |
1552 | ntohs(th->dest), tcp_v6_iif(skb)); | |
1553 | if (sk2) { | |
1554 | struct inet_timewait_sock *tw = inet_twsk(sk); | |
1555 | inet_twsk_deschedule_put(tw); | |
1556 | sk = sk2; | |
1557 | tcp_v6_restore_cb(skb); | |
1558 | refcounted = false; | |
1559 | goto process; | |
1560 | } | |
1561 | /* Fall through to ACK */ | |
1562 | } | |
1563 | case TCP_TW_ACK: | |
1564 | tcp_v6_timewait_ack(sk, skb); | |
1565 | break; | |
1566 | case TCP_TW_RST: | |
1567 | tcp_v6_restore_cb(skb); | |
1568 | tcp_v6_send_reset(sk, skb); | |
1569 | inet_twsk_deschedule_put(inet_twsk(sk)); | |
1570 | goto discard_it; | |
1571 | case TCP_TW_SUCCESS: | |
1572 | ; | |
1573 | } | |
1574 | goto discard_it; | |
1575 | } | |
1576 | ||
1577 | static void tcp_v6_early_demux(struct sk_buff *skb) | |
1578 | { | |
1579 | const struct ipv6hdr *hdr; | |
1580 | const struct tcphdr *th; | |
1581 | struct sock *sk; | |
1582 | ||
1583 | if (skb->pkt_type != PACKET_HOST) | |
1584 | return; | |
1585 | ||
1586 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) | |
1587 | return; | |
1588 | ||
1589 | hdr = ipv6_hdr(skb); | |
1590 | th = tcp_hdr(skb); | |
1591 | ||
1592 | if (th->doff < sizeof(struct tcphdr) / 4) | |
1593 | return; | |
1594 | ||
1595 | /* Note : We use inet6_iif() here, not tcp_v6_iif() */ | |
1596 | sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, | |
1597 | &hdr->saddr, th->source, | |
1598 | &hdr->daddr, ntohs(th->dest), | |
1599 | inet6_iif(skb)); | |
1600 | if (sk) { | |
1601 | skb->sk = sk; | |
1602 | skb->destructor = sock_edemux; | |
1603 | if (sk_fullsock(sk)) { | |
1604 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); | |
1605 | ||
1606 | if (dst) | |
1607 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); | |
1608 | if (dst && | |
1609 | inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) | |
1610 | skb_dst_set_noref(skb, dst); | |
1611 | } | |
1612 | } | |
1613 | } | |
1614 | ||
1615 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { | |
1616 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), | |
1617 | .twsk_unique = tcp_twsk_unique, | |
1618 | .twsk_destructor = tcp_twsk_destructor, | |
1619 | }; | |
1620 | ||
1621 | static const struct inet_connection_sock_af_ops ipv6_specific = { | |
1622 | .queue_xmit = inet6_csk_xmit, | |
1623 | .send_check = tcp_v6_send_check, | |
1624 | .rebuild_header = inet6_sk_rebuild_header, | |
1625 | .sk_rx_dst_set = inet6_sk_rx_dst_set, | |
1626 | .conn_request = tcp_v6_conn_request, | |
1627 | .syn_recv_sock = tcp_v6_syn_recv_sock, | |
1628 | .net_header_len = sizeof(struct ipv6hdr), | |
1629 | .net_frag_header_len = sizeof(struct frag_hdr), | |
1630 | .setsockopt = ipv6_setsockopt, | |
1631 | .getsockopt = ipv6_getsockopt, | |
1632 | .addr2sockaddr = inet6_csk_addr2sockaddr, | |
1633 | .sockaddr_len = sizeof(struct sockaddr_in6), | |
1634 | .bind_conflict = inet6_csk_bind_conflict, | |
1635 | #ifdef CONFIG_COMPAT | |
1636 | .compat_setsockopt = compat_ipv6_setsockopt, | |
1637 | .compat_getsockopt = compat_ipv6_getsockopt, | |
1638 | #endif | |
1639 | .mtu_reduced = tcp_v6_mtu_reduced, | |
1640 | }; | |
1641 | ||
1642 | #ifdef CONFIG_TCP_MD5SIG | |
1643 | static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { | |
1644 | .md5_lookup = tcp_v6_md5_lookup, | |
1645 | .calc_md5_hash = tcp_v6_md5_hash_skb, | |
1646 | .md5_parse = tcp_v6_parse_md5_keys, | |
1647 | }; | |
1648 | #endif | |
1649 | ||
1650 | /* | |
1651 | * TCP over IPv4 via INET6 API | |
1652 | */ | |
1653 | static const struct inet_connection_sock_af_ops ipv6_mapped = { | |
1654 | .queue_xmit = ip_queue_xmit, | |
1655 | .send_check = tcp_v4_send_check, | |
1656 | .rebuild_header = inet_sk_rebuild_header, | |
1657 | .sk_rx_dst_set = inet_sk_rx_dst_set, | |
1658 | .conn_request = tcp_v6_conn_request, | |
1659 | .syn_recv_sock = tcp_v6_syn_recv_sock, | |
1660 | .net_header_len = sizeof(struct iphdr), | |
1661 | .setsockopt = ipv6_setsockopt, | |
1662 | .getsockopt = ipv6_getsockopt, | |
1663 | .addr2sockaddr = inet6_csk_addr2sockaddr, | |
1664 | .sockaddr_len = sizeof(struct sockaddr_in6), | |
1665 | .bind_conflict = inet6_csk_bind_conflict, | |
1666 | #ifdef CONFIG_COMPAT | |
1667 | .compat_setsockopt = compat_ipv6_setsockopt, | |
1668 | .compat_getsockopt = compat_ipv6_getsockopt, | |
1669 | #endif | |
1670 | .mtu_reduced = tcp_v4_mtu_reduced, | |
1671 | }; | |
1672 | ||
1673 | #ifdef CONFIG_TCP_MD5SIG | |
1674 | static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { | |
1675 | .md5_lookup = tcp_v4_md5_lookup, | |
1676 | .calc_md5_hash = tcp_v4_md5_hash_skb, | |
1677 | .md5_parse = tcp_v6_parse_md5_keys, | |
1678 | }; | |
1679 | #endif | |
1680 | ||
1681 | /* NOTE: A lot of things set to zero explicitly by call to | |
1682 | * sk_alloc() so need not be done here. | |
1683 | */ | |
1684 | static int tcp_v6_init_sock(struct sock *sk) | |
1685 | { | |
1686 | struct inet_connection_sock *icsk = inet_csk(sk); | |
1687 | ||
1688 | tcp_init_sock(sk); | |
1689 | ||
1690 | icsk->icsk_af_ops = &ipv6_specific; | |
1691 | ||
1692 | #ifdef CONFIG_TCP_MD5SIG | |
1693 | tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; | |
1694 | #endif | |
1695 | ||
1696 | return 0; | |
1697 | } | |
1698 | ||
1699 | static void tcp_v6_destroy_sock(struct sock *sk) | |
1700 | { | |
1701 | tcp_v4_destroy_sock(sk); | |
1702 | inet6_destroy_sock(sk); | |
1703 | } | |
1704 | ||
1705 | #ifdef CONFIG_PROC_FS | |
1706 | /* Proc filesystem TCPv6 sock list dumping. */ | |
1707 | static void get_openreq6(struct seq_file *seq, | |
1708 | const struct request_sock *req, int i) | |
1709 | { | |
1710 | long ttd = req->rsk_timer.expires - jiffies; | |
1711 | const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; | |
1712 | const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; | |
1713 | ||
1714 | if (ttd < 0) | |
1715 | ttd = 0; | |
1716 | ||
1717 | seq_printf(seq, | |
1718 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | |
1719 | "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", | |
1720 | i, | |
1721 | src->s6_addr32[0], src->s6_addr32[1], | |
1722 | src->s6_addr32[2], src->s6_addr32[3], | |
1723 | inet_rsk(req)->ir_num, | |
1724 | dest->s6_addr32[0], dest->s6_addr32[1], | |
1725 | dest->s6_addr32[2], dest->s6_addr32[3], | |
1726 | ntohs(inet_rsk(req)->ir_rmt_port), | |
1727 | TCP_SYN_RECV, | |
1728 | 0, 0, /* could print option size, but that is af dependent. */ | |
1729 | 1, /* timers active (only the expire timer) */ | |
1730 | jiffies_to_clock_t(ttd), | |
1731 | req->num_timeout, | |
1732 | from_kuid_munged(seq_user_ns(seq), | |
1733 | sock_i_uid(req->rsk_listener)), | |
1734 | 0, /* non standard timer */ | |
1735 | 0, /* open_requests have no inode */ | |
1736 | 0, req); | |
1737 | } | |
1738 | ||
1739 | static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |
1740 | { | |
1741 | const struct in6_addr *dest, *src; | |
1742 | __u16 destp, srcp; | |
1743 | int timer_active; | |
1744 | unsigned long timer_expires; | |
1745 | const struct inet_sock *inet = inet_sk(sp); | |
1746 | const struct tcp_sock *tp = tcp_sk(sp); | |
1747 | const struct inet_connection_sock *icsk = inet_csk(sp); | |
1748 | const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; | |
1749 | int rx_queue; | |
1750 | int state; | |
1751 | ||
1752 | dest = &sp->sk_v6_daddr; | |
1753 | src = &sp->sk_v6_rcv_saddr; | |
1754 | destp = ntohs(inet->inet_dport); | |
1755 | srcp = ntohs(inet->inet_sport); | |
1756 | ||
1757 | if (icsk->icsk_pending == ICSK_TIME_RETRANS || | |
1758 | icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || | |
1759 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { | |
1760 | timer_active = 1; | |
1761 | timer_expires = icsk->icsk_timeout; | |
1762 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { | |
1763 | timer_active = 4; | |
1764 | timer_expires = icsk->icsk_timeout; | |
1765 | } else if (timer_pending(&sp->sk_timer)) { | |
1766 | timer_active = 2; | |
1767 | timer_expires = sp->sk_timer.expires; | |
1768 | } else { | |
1769 | timer_active = 0; | |
1770 | timer_expires = jiffies; | |
1771 | } | |
1772 | ||
1773 | state = sk_state_load(sp); | |
1774 | if (state == TCP_LISTEN) | |
1775 | rx_queue = sp->sk_ack_backlog; | |
1776 | else | |
1777 | /* Because we don't lock the socket, | |
1778 | * we might find a transient negative value. | |
1779 | */ | |
1780 | rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); | |
1781 | ||
1782 | seq_printf(seq, | |
1783 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | |
1784 | "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", | |
1785 | i, | |
1786 | src->s6_addr32[0], src->s6_addr32[1], | |
1787 | src->s6_addr32[2], src->s6_addr32[3], srcp, | |
1788 | dest->s6_addr32[0], dest->s6_addr32[1], | |
1789 | dest->s6_addr32[2], dest->s6_addr32[3], destp, | |
1790 | state, | |
1791 | tp->write_seq - tp->snd_una, | |
1792 | rx_queue, | |
1793 | timer_active, | |
1794 | jiffies_delta_to_clock_t(timer_expires - jiffies), | |
1795 | icsk->icsk_retransmits, | |
1796 | from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), | |
1797 | icsk->icsk_probes_out, | |
1798 | sock_i_ino(sp), | |
1799 | atomic_read(&sp->sk_refcnt), sp, | |
1800 | jiffies_to_clock_t(icsk->icsk_rto), | |
1801 | jiffies_to_clock_t(icsk->icsk_ack.ato), | |
1802 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | |
1803 | tp->snd_cwnd, | |
1804 | state == TCP_LISTEN ? | |
1805 | fastopenq->max_qlen : | |
1806 | (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) | |
1807 | ); | |
1808 | } | |
1809 | ||
1810 | static void get_timewait6_sock(struct seq_file *seq, | |
1811 | struct inet_timewait_sock *tw, int i) | |
1812 | { | |
1813 | long delta = tw->tw_timer.expires - jiffies; | |
1814 | const struct in6_addr *dest, *src; | |
1815 | __u16 destp, srcp; | |
1816 | ||
1817 | dest = &tw->tw_v6_daddr; | |
1818 | src = &tw->tw_v6_rcv_saddr; | |
1819 | destp = ntohs(tw->tw_dport); | |
1820 | srcp = ntohs(tw->tw_sport); | |
1821 | ||
1822 | seq_printf(seq, | |
1823 | "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " | |
1824 | "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", | |
1825 | i, | |
1826 | src->s6_addr32[0], src->s6_addr32[1], | |
1827 | src->s6_addr32[2], src->s6_addr32[3], srcp, | |
1828 | dest->s6_addr32[0], dest->s6_addr32[1], | |
1829 | dest->s6_addr32[2], dest->s6_addr32[3], destp, | |
1830 | tw->tw_substate, 0, 0, | |
1831 | 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, | |
1832 | atomic_read(&tw->tw_refcnt), tw); | |
1833 | } | |
1834 | ||
1835 | static int tcp6_seq_show(struct seq_file *seq, void *v) | |
1836 | { | |
1837 | struct tcp_iter_state *st; | |
1838 | struct sock *sk = v; | |
1839 | ||
1840 | if (v == SEQ_START_TOKEN) { | |
1841 | seq_puts(seq, | |
1842 | " sl " | |
1843 | "local_address " | |
1844 | "remote_address " | |
1845 | "st tx_queue rx_queue tr tm->when retrnsmt" | |
1846 | " uid timeout inode\n"); | |
1847 | goto out; | |
1848 | } | |
1849 | st = seq->private; | |
1850 | ||
1851 | if (sk->sk_state == TCP_TIME_WAIT) | |
1852 | get_timewait6_sock(seq, v, st->num); | |
1853 | else if (sk->sk_state == TCP_NEW_SYN_RECV) | |
1854 | get_openreq6(seq, v, st->num); | |
1855 | else | |
1856 | get_tcp6_sock(seq, v, st->num); | |
1857 | out: | |
1858 | return 0; | |
1859 | } | |
1860 | ||
1861 | static const struct file_operations tcp6_afinfo_seq_fops = { | |
1862 | .owner = THIS_MODULE, | |
1863 | .open = tcp_seq_open, | |
1864 | .read = seq_read, | |
1865 | .llseek = seq_lseek, | |
1866 | .release = seq_release_net | |
1867 | }; | |
1868 | ||
1869 | static struct tcp_seq_afinfo tcp6_seq_afinfo = { | |
1870 | .name = "tcp6", | |
1871 | .family = AF_INET6, | |
1872 | .seq_fops = &tcp6_afinfo_seq_fops, | |
1873 | .seq_ops = { | |
1874 | .show = tcp6_seq_show, | |
1875 | }, | |
1876 | }; | |
1877 | ||
1878 | int __net_init tcp6_proc_init(struct net *net) | |
1879 | { | |
1880 | return tcp_proc_register(net, &tcp6_seq_afinfo); | |
1881 | } | |
1882 | ||
1883 | void tcp6_proc_exit(struct net *net) | |
1884 | { | |
1885 | tcp_proc_unregister(net, &tcp6_seq_afinfo); | |
1886 | } | |
1887 | #endif | |
1888 | ||
1889 | struct proto tcpv6_prot = { | |
1890 | .name = "TCPv6", | |
1891 | .owner = THIS_MODULE, | |
1892 | .close = tcp_close, | |
1893 | .connect = tcp_v6_connect, | |
1894 | .disconnect = tcp_disconnect, | |
1895 | .accept = inet_csk_accept, | |
1896 | .ioctl = tcp_ioctl, | |
1897 | .init = tcp_v6_init_sock, | |
1898 | .destroy = tcp_v6_destroy_sock, | |
1899 | .shutdown = tcp_shutdown, | |
1900 | .setsockopt = tcp_setsockopt, | |
1901 | .getsockopt = tcp_getsockopt, | |
1902 | .recvmsg = tcp_recvmsg, | |
1903 | .sendmsg = tcp_sendmsg, | |
1904 | .sendpage = tcp_sendpage, | |
1905 | .backlog_rcv = tcp_v6_do_rcv, | |
1906 | .release_cb = tcp_release_cb, | |
1907 | .hash = inet6_hash, | |
1908 | .unhash = inet_unhash, | |
1909 | .get_port = inet_csk_get_port, | |
1910 | .enter_memory_pressure = tcp_enter_memory_pressure, | |
1911 | .stream_memory_free = tcp_stream_memory_free, | |
1912 | .sockets_allocated = &tcp_sockets_allocated, | |
1913 | .memory_allocated = &tcp_memory_allocated, | |
1914 | .memory_pressure = &tcp_memory_pressure, | |
1915 | .orphan_count = &tcp_orphan_count, | |
1916 | .sysctl_mem = sysctl_tcp_mem, | |
1917 | .sysctl_wmem = sysctl_tcp_wmem, | |
1918 | .sysctl_rmem = sysctl_tcp_rmem, | |
1919 | .max_header = MAX_TCP_HEADER, | |
1920 | .obj_size = sizeof(struct tcp6_sock), | |
1921 | .slab_flags = SLAB_DESTROY_BY_RCU, | |
1922 | .twsk_prot = &tcp6_timewait_sock_ops, | |
1923 | .rsk_prot = &tcp6_request_sock_ops, | |
1924 | .h.hashinfo = &tcp_hashinfo, | |
1925 | .no_autobind = true, | |
1926 | #ifdef CONFIG_COMPAT | |
1927 | .compat_setsockopt = compat_tcp_setsockopt, | |
1928 | .compat_getsockopt = compat_tcp_getsockopt, | |
1929 | #endif | |
1930 | .diag_destroy = tcp_abort, | |
1931 | }; | |
1932 | ||
1933 | static const struct inet6_protocol tcpv6_protocol = { | |
1934 | .early_demux = tcp_v6_early_demux, | |
1935 | .handler = tcp_v6_rcv, | |
1936 | .err_handler = tcp_v6_err, | |
1937 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | |
1938 | }; | |
1939 | ||
1940 | static struct inet_protosw tcpv6_protosw = { | |
1941 | .type = SOCK_STREAM, | |
1942 | .protocol = IPPROTO_TCP, | |
1943 | .prot = &tcpv6_prot, | |
1944 | .ops = &inet6_stream_ops, | |
1945 | .flags = INET_PROTOSW_PERMANENT | | |
1946 | INET_PROTOSW_ICSK, | |
1947 | }; | |
1948 | ||
1949 | static int __net_init tcpv6_net_init(struct net *net) | |
1950 | { | |
1951 | return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, | |
1952 | SOCK_RAW, IPPROTO_TCP, net); | |
1953 | } | |
1954 | ||
1955 | static void __net_exit tcpv6_net_exit(struct net *net) | |
1956 | { | |
1957 | inet_ctl_sock_destroy(net->ipv6.tcp_sk); | |
1958 | } | |
1959 | ||
1960 | static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) | |
1961 | { | |
1962 | inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); | |
1963 | } | |
1964 | ||
1965 | static struct pernet_operations tcpv6_net_ops = { | |
1966 | .init = tcpv6_net_init, | |
1967 | .exit = tcpv6_net_exit, | |
1968 | .exit_batch = tcpv6_net_exit_batch, | |
1969 | }; | |
1970 | ||
1971 | int __init tcpv6_init(void) | |
1972 | { | |
1973 | int ret; | |
1974 | ||
1975 | ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); | |
1976 | if (ret) | |
1977 | goto out; | |
1978 | ||
1979 | /* register inet6 protocol */ | |
1980 | ret = inet6_register_protosw(&tcpv6_protosw); | |
1981 | if (ret) | |
1982 | goto out_tcpv6_protocol; | |
1983 | ||
1984 | ret = register_pernet_subsys(&tcpv6_net_ops); | |
1985 | if (ret) | |
1986 | goto out_tcpv6_protosw; | |
1987 | out: | |
1988 | return ret; | |
1989 | ||
1990 | out_tcpv6_protosw: | |
1991 | inet6_unregister_protosw(&tcpv6_protosw); | |
1992 | out_tcpv6_protocol: | |
1993 | inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); | |
1994 | goto out; | |
1995 | } | |
1996 | ||
1997 | void tcpv6_exit(void) | |
1998 | { | |
1999 | unregister_pernet_subsys(&tcpv6_net_ops); | |
2000 | inet6_unregister_protosw(&tcpv6_protosw); | |
2001 | inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); | |
2002 | } |