]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv4/tcp_ipv4.c
perf annotate: Return arch from symbol__disassemble() and save it in browser
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / tcp_ipv4.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
86
87 int sysctl_tcp_low_latency __read_mostly;
88
89 #ifdef CONFIG_TCP_MD5SIG
90 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
91 __be32 daddr, __be32 saddr, const struct tcphdr *th);
92 #endif
93
94 struct inet_hashinfo tcp_hashinfo;
95 EXPORT_SYMBOL(tcp_hashinfo);
96
97 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
98 {
99 return secure_tcp_seq(ip_hdr(skb)->daddr,
100 ip_hdr(skb)->saddr,
101 tcp_hdr(skb)->dest,
102 tcp_hdr(skb)->source);
103 }
104
105 static u32 tcp_v4_init_ts_off(const struct sk_buff *skb)
106 {
107 return secure_tcp_ts_off(ip_hdr(skb)->daddr,
108 ip_hdr(skb)->saddr);
109 }
110
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 {
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
115
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
122 holder.
123
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
126 */
127 if (tcptw->tw_ts_recent_stamp &&
128 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
132 tp->write_seq = 1;
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
135 sock_hold(sktw);
136 return 1;
137 }
138
139 return 0;
140 }
141 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142
143 /* This will initiate an outgoing connection. */
144 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
145 {
146 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 __be16 orig_sport, orig_dport;
150 __be32 daddr, nexthop;
151 struct flowi4 *fl4;
152 struct rtable *rt;
153 int err;
154 struct ip_options_rcu *inet_opt;
155 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
156
157 if (addr_len < sizeof(struct sockaddr_in))
158 return -EINVAL;
159
160 if (usin->sin_family != AF_INET)
161 return -EAFNOSUPPORT;
162
163 nexthop = daddr = usin->sin_addr.s_addr;
164 inet_opt = rcu_dereference_protected(inet->inet_opt,
165 lockdep_sock_is_held(sk));
166 if (inet_opt && inet_opt->opt.srr) {
167 if (!daddr)
168 return -EINVAL;
169 nexthop = inet_opt->opt.faddr;
170 }
171
172 orig_sport = inet->inet_sport;
173 orig_dport = usin->sin_port;
174 fl4 = &inet->cork.fl.u.ip4;
175 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
176 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
177 IPPROTO_TCP,
178 orig_sport, orig_dport, sk);
179 if (IS_ERR(rt)) {
180 err = PTR_ERR(rt);
181 if (err == -ENETUNREACH)
182 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 return err;
184 }
185
186 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 ip_rt_put(rt);
188 return -ENETUNREACH;
189 }
190
191 if (!inet_opt || !inet_opt->opt.srr)
192 daddr = fl4->daddr;
193
194 if (!inet->inet_saddr)
195 inet->inet_saddr = fl4->saddr;
196 sk_rcv_saddr_set(sk, inet->inet_saddr);
197
198 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
199 /* Reset inherited state */
200 tp->rx_opt.ts_recent = 0;
201 tp->rx_opt.ts_recent_stamp = 0;
202 if (likely(!tp->repair))
203 tp->write_seq = 0;
204 }
205
206 inet->inet_dport = usin->sin_port;
207 sk_daddr_set(sk, daddr);
208
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(tcp_death_row, sk);
222 if (err)
223 goto failure;
224
225 sk_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
232 goto failure;
233 }
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
237 rt = NULL;
238
239 if (likely(!tp->repair)) {
240 if (!tp->write_seq)
241 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
242 inet->inet_daddr,
243 inet->inet_sport,
244 usin->sin_port);
245 tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr,
246 inet->inet_daddr);
247 }
248
249 inet->inet_id = tp->write_seq ^ jiffies;
250
251 if (tcp_fastopen_defer_connect(sk, &err))
252 return err;
253 if (err)
254 goto failure;
255
256 err = tcp_connect(sk);
257
258 if (err)
259 goto failure;
260
261 return 0;
262
263 failure:
264 /*
265 * This unhashes the socket and releases the local port,
266 * if necessary.
267 */
268 tcp_set_state(sk, TCP_CLOSE);
269 ip_rt_put(rt);
270 sk->sk_route_caps = 0;
271 inet->inet_dport = 0;
272 return err;
273 }
274 EXPORT_SYMBOL(tcp_v4_connect);
275
276 /*
277 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
278 * It can be called through tcp_release_cb() if socket was owned by user
279 * at the time tcp_v4_err() was called to handle ICMP message.
280 */
281 void tcp_v4_mtu_reduced(struct sock *sk)
282 {
283 struct inet_sock *inet = inet_sk(sk);
284 struct dst_entry *dst;
285 u32 mtu;
286
287 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
288 return;
289 mtu = tcp_sk(sk)->mtu_info;
290 dst = inet_csk_update_pmtu(sk, mtu);
291 if (!dst)
292 return;
293
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
296 */
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
299
300 mtu = dst_mtu(dst);
301
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303 ip_sk_accept_pmtu(sk) &&
304 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
305 tcp_sync_mss(sk, mtu);
306
307 /* Resend the TCP packet because it's
308 * clear that the old packet has been
309 * dropped. This is the new "fast" path mtu
310 * discovery.
311 */
312 tcp_simple_retransmit(sk);
313 } /* else let the usual retransmit timer handle it */
314 }
315 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
316
317 static void do_redirect(struct sk_buff *skb, struct sock *sk)
318 {
319 struct dst_entry *dst = __sk_dst_check(sk, 0);
320
321 if (dst)
322 dst->ops->redirect(dst, sk, skb);
323 }
324
325
326 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
327 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
328 {
329 struct request_sock *req = inet_reqsk(sk);
330 struct net *net = sock_net(sk);
331
332 /* ICMPs are not backlogged, hence we cannot get
333 * an established socket here.
334 */
335 if (seq != tcp_rsk(req)->snt_isn) {
336 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
337 } else if (abort) {
338 /*
339 * Still in SYN_RECV, just remove it silently.
340 * There is no good way to pass the error to the newly
341 * created socket, and POSIX does not want network
342 * errors returned from accept().
343 */
344 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
345 tcp_listendrop(req->rsk_listener);
346 }
347 reqsk_put(req);
348 }
349 EXPORT_SYMBOL(tcp_req_err);
350
351 /*
352 * This routine is called by the ICMP module when it gets some
353 * sort of error condition. If err < 0 then the socket should
354 * be closed and the error returned to the user. If err > 0
355 * it's just the icmp type << 8 | icmp code. After adjustment
356 * header points to the first 8 bytes of the tcp header. We need
357 * to find the appropriate port.
358 *
359 * The locking strategy used here is very "optimistic". When
360 * someone else accesses the socket the ICMP is just dropped
361 * and for some paths there is no check at all.
362 * A more general error queue to queue errors for later handling
363 * is probably better.
364 *
365 */
366
367 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
368 {
369 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
370 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
371 struct inet_connection_sock *icsk;
372 struct tcp_sock *tp;
373 struct inet_sock *inet;
374 const int type = icmp_hdr(icmp_skb)->type;
375 const int code = icmp_hdr(icmp_skb)->code;
376 struct sock *sk;
377 struct sk_buff *skb;
378 struct request_sock *fastopen;
379 __u32 seq, snd_una;
380 __u32 remaining;
381 int err;
382 struct net *net = dev_net(icmp_skb->dev);
383
384 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
385 th->dest, iph->saddr, ntohs(th->source),
386 inet_iif(icmp_skb));
387 if (!sk) {
388 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
389 return;
390 }
391 if (sk->sk_state == TCP_TIME_WAIT) {
392 inet_twsk_put(inet_twsk(sk));
393 return;
394 }
395 seq = ntohl(th->seq);
396 if (sk->sk_state == TCP_NEW_SYN_RECV)
397 return tcp_req_err(sk, seq,
398 type == ICMP_PARAMETERPROB ||
399 type == ICMP_TIME_EXCEEDED ||
400 (type == ICMP_DEST_UNREACH &&
401 (code == ICMP_NET_UNREACH ||
402 code == ICMP_HOST_UNREACH)));
403
404 bh_lock_sock(sk);
405 /* If too many ICMPs get dropped on busy
406 * servers this needs to be solved differently.
407 * We do take care of PMTU discovery (RFC1191) special case :
408 * we can receive locally generated ICMP messages while socket is held.
409 */
410 if (sock_owned_by_user(sk)) {
411 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
413 }
414 if (sk->sk_state == TCP_CLOSE)
415 goto out;
416
417 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
419 goto out;
420 }
421
422 icsk = inet_csk(sk);
423 tp = tcp_sk(sk);
424 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
425 fastopen = tp->fastopen_rsk;
426 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
427 if (sk->sk_state != TCP_LISTEN &&
428 !between(seq, snd_una, tp->snd_nxt)) {
429 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
430 goto out;
431 }
432
433 switch (type) {
434 case ICMP_REDIRECT:
435 if (!sock_owned_by_user(sk))
436 do_redirect(icmp_skb, sk);
437 goto out;
438 case ICMP_SOURCE_QUENCH:
439 /* Just silently ignore these. */
440 goto out;
441 case ICMP_PARAMETERPROB:
442 err = EPROTO;
443 break;
444 case ICMP_DEST_UNREACH:
445 if (code > NR_ICMP_UNREACH)
446 goto out;
447
448 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
449 /* We are not interested in TCP_LISTEN and open_requests
450 * (SYN-ACKs send out by Linux are always <576bytes so
451 * they should go through unfragmented).
452 */
453 if (sk->sk_state == TCP_LISTEN)
454 goto out;
455
456 tp->mtu_info = info;
457 if (!sock_owned_by_user(sk)) {
458 tcp_v4_mtu_reduced(sk);
459 } else {
460 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
461 sock_hold(sk);
462 }
463 goto out;
464 }
465
466 err = icmp_err_convert[code].errno;
467 /* check if icmp_skb allows revert of backoff
468 * (see draft-zimmermann-tcp-lcd) */
469 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
470 break;
471 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
472 !icsk->icsk_backoff || fastopen)
473 break;
474
475 if (sock_owned_by_user(sk))
476 break;
477
478 icsk->icsk_backoff--;
479 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
480 TCP_TIMEOUT_INIT;
481 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
482
483 skb = tcp_write_queue_head(sk);
484 BUG_ON(!skb);
485
486 remaining = icsk->icsk_rto -
487 min(icsk->icsk_rto,
488 tcp_time_stamp - tcp_skb_timestamp(skb));
489
490 if (remaining) {
491 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
492 remaining, TCP_RTO_MAX);
493 } else {
494 /* RTO revert clocked out retransmission.
495 * Will retransmit now */
496 tcp_retransmit_timer(sk);
497 }
498
499 break;
500 case ICMP_TIME_EXCEEDED:
501 err = EHOSTUNREACH;
502 break;
503 default:
504 goto out;
505 }
506
507 switch (sk->sk_state) {
508 case TCP_SYN_SENT:
509 case TCP_SYN_RECV:
510 /* Only in fast or simultaneous open. If a fast open socket is
511 * is already accepted it is treated as a connected one below.
512 */
513 if (fastopen && !fastopen->sk)
514 break;
515
516 if (!sock_owned_by_user(sk)) {
517 sk->sk_err = err;
518
519 sk->sk_error_report(sk);
520
521 tcp_done(sk);
522 } else {
523 sk->sk_err_soft = err;
524 }
525 goto out;
526 }
527
528 /* If we've already connected we will keep trying
529 * until we time out, or the user gives up.
530 *
531 * rfc1122 4.2.3.9 allows to consider as hard errors
532 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
533 * but it is obsoleted by pmtu discovery).
534 *
535 * Note, that in modern internet, where routing is unreliable
536 * and in each dark corner broken firewalls sit, sending random
537 * errors ordered by their masters even this two messages finally lose
538 * their original sense (even Linux sends invalid PORT_UNREACHs)
539 *
540 * Now we are in compliance with RFCs.
541 * --ANK (980905)
542 */
543
544 inet = inet_sk(sk);
545 if (!sock_owned_by_user(sk) && inet->recverr) {
546 sk->sk_err = err;
547 sk->sk_error_report(sk);
548 } else { /* Only an error on timeout */
549 sk->sk_err_soft = err;
550 }
551
552 out:
553 bh_unlock_sock(sk);
554 sock_put(sk);
555 }
556
557 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
558 {
559 struct tcphdr *th = tcp_hdr(skb);
560
561 if (skb->ip_summed == CHECKSUM_PARTIAL) {
562 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
563 skb->csum_start = skb_transport_header(skb) - skb->head;
564 skb->csum_offset = offsetof(struct tcphdr, check);
565 } else {
566 th->check = tcp_v4_check(skb->len, saddr, daddr,
567 csum_partial(th,
568 th->doff << 2,
569 skb->csum));
570 }
571 }
572
573 /* This routine computes an IPv4 TCP checksum. */
574 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
575 {
576 const struct inet_sock *inet = inet_sk(sk);
577
578 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
579 }
580 EXPORT_SYMBOL(tcp_v4_send_check);
581
582 /*
583 * This routine will send an RST to the other tcp.
584 *
585 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
586 * for reset.
587 * Answer: if a packet caused RST, it is not for a socket
588 * existing in our system, if it is matched to a socket,
589 * it is just duplicate segment or bug in other side's TCP.
590 * So that we build reply only basing on parameters
591 * arrived with segment.
592 * Exception: precedence violation. We do not implement it in any case.
593 */
594
595 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
596 {
597 const struct tcphdr *th = tcp_hdr(skb);
598 struct {
599 struct tcphdr th;
600 #ifdef CONFIG_TCP_MD5SIG
601 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
602 #endif
603 } rep;
604 struct ip_reply_arg arg;
605 #ifdef CONFIG_TCP_MD5SIG
606 struct tcp_md5sig_key *key = NULL;
607 const __u8 *hash_location = NULL;
608 unsigned char newhash[16];
609 int genhash;
610 struct sock *sk1 = NULL;
611 #endif
612 struct net *net;
613
614 /* Never send a reset in response to a reset. */
615 if (th->rst)
616 return;
617
618 /* If sk not NULL, it means we did a successful lookup and incoming
619 * route had to be correct. prequeue might have dropped our dst.
620 */
621 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
622 return;
623
624 /* Swap the send and the receive. */
625 memset(&rep, 0, sizeof(rep));
626 rep.th.dest = th->source;
627 rep.th.source = th->dest;
628 rep.th.doff = sizeof(struct tcphdr) / 4;
629 rep.th.rst = 1;
630
631 if (th->ack) {
632 rep.th.seq = th->ack_seq;
633 } else {
634 rep.th.ack = 1;
635 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 skb->len - (th->doff << 2));
637 }
638
639 memset(&arg, 0, sizeof(arg));
640 arg.iov[0].iov_base = (unsigned char *)&rep;
641 arg.iov[0].iov_len = sizeof(rep.th);
642
643 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
644 #ifdef CONFIG_TCP_MD5SIG
645 rcu_read_lock();
646 hash_location = tcp_parse_md5sig_option(th);
647 if (sk && sk_fullsock(sk)) {
648 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
649 &ip_hdr(skb)->saddr, AF_INET);
650 } else if (hash_location) {
651 /*
652 * active side is lost. Try to find listening socket through
653 * source port, and then find md5 key through listening socket.
654 * we are not loose security here:
655 * Incoming packet is checked with md5 hash with finding key,
656 * no RST generated if md5 hash doesn't match.
657 */
658 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
659 ip_hdr(skb)->saddr,
660 th->source, ip_hdr(skb)->daddr,
661 ntohs(th->source), inet_iif(skb));
662 /* don't send rst if it can't find key */
663 if (!sk1)
664 goto out;
665
666 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
667 &ip_hdr(skb)->saddr, AF_INET);
668 if (!key)
669 goto out;
670
671
672 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
673 if (genhash || memcmp(hash_location, newhash, 16) != 0)
674 goto out;
675
676 }
677
678 if (key) {
679 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
680 (TCPOPT_NOP << 16) |
681 (TCPOPT_MD5SIG << 8) |
682 TCPOLEN_MD5SIG);
683 /* Update length and the length the header thinks exists */
684 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
685 rep.th.doff = arg.iov[0].iov_len / 4;
686
687 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
688 key, ip_hdr(skb)->saddr,
689 ip_hdr(skb)->daddr, &rep.th);
690 }
691 #endif
692 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
693 ip_hdr(skb)->saddr, /* XXX */
694 arg.iov[0].iov_len, IPPROTO_TCP, 0);
695 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
696 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
697
698 /* When socket is gone, all binding information is lost.
699 * routing might fail in this case. No choice here, if we choose to force
700 * input interface, we will misroute in case of asymmetric route.
701 */
702 if (sk)
703 arg.bound_dev_if = sk->sk_bound_dev_if;
704
705 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
706 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
707
708 arg.tos = ip_hdr(skb)->tos;
709 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
710 local_bh_disable();
711 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
712 skb, &TCP_SKB_CB(skb)->header.h4.opt,
713 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
714 &arg, arg.iov[0].iov_len);
715
716 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
717 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
718 local_bh_enable();
719
720 #ifdef CONFIG_TCP_MD5SIG
721 out:
722 rcu_read_unlock();
723 #endif
724 }
725
726 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
727 outside socket context is ugly, certainly. What can I do?
728 */
729
730 static void tcp_v4_send_ack(const struct sock *sk,
731 struct sk_buff *skb, u32 seq, u32 ack,
732 u32 win, u32 tsval, u32 tsecr, int oif,
733 struct tcp_md5sig_key *key,
734 int reply_flags, u8 tos)
735 {
736 const struct tcphdr *th = tcp_hdr(skb);
737 struct {
738 struct tcphdr th;
739 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
740 #ifdef CONFIG_TCP_MD5SIG
741 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
742 #endif
743 ];
744 } rep;
745 struct net *net = sock_net(sk);
746 struct ip_reply_arg arg;
747
748 memset(&rep.th, 0, sizeof(struct tcphdr));
749 memset(&arg, 0, sizeof(arg));
750
751 arg.iov[0].iov_base = (unsigned char *)&rep;
752 arg.iov[0].iov_len = sizeof(rep.th);
753 if (tsecr) {
754 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
755 (TCPOPT_TIMESTAMP << 8) |
756 TCPOLEN_TIMESTAMP);
757 rep.opt[1] = htonl(tsval);
758 rep.opt[2] = htonl(tsecr);
759 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
760 }
761
762 /* Swap the send and the receive. */
763 rep.th.dest = th->source;
764 rep.th.source = th->dest;
765 rep.th.doff = arg.iov[0].iov_len / 4;
766 rep.th.seq = htonl(seq);
767 rep.th.ack_seq = htonl(ack);
768 rep.th.ack = 1;
769 rep.th.window = htons(win);
770
771 #ifdef CONFIG_TCP_MD5SIG
772 if (key) {
773 int offset = (tsecr) ? 3 : 0;
774
775 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
776 (TCPOPT_NOP << 16) |
777 (TCPOPT_MD5SIG << 8) |
778 TCPOLEN_MD5SIG);
779 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
780 rep.th.doff = arg.iov[0].iov_len/4;
781
782 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
783 key, ip_hdr(skb)->saddr,
784 ip_hdr(skb)->daddr, &rep.th);
785 }
786 #endif
787 arg.flags = reply_flags;
788 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
789 ip_hdr(skb)->saddr, /* XXX */
790 arg.iov[0].iov_len, IPPROTO_TCP, 0);
791 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
792 if (oif)
793 arg.bound_dev_if = oif;
794 arg.tos = tos;
795 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
796 local_bh_disable();
797 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
798 skb, &TCP_SKB_CB(skb)->header.h4.opt,
799 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
800 &arg, arg.iov[0].iov_len);
801
802 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
803 local_bh_enable();
804 }
805
806 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
807 {
808 struct inet_timewait_sock *tw = inet_twsk(sk);
809 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
810
811 tcp_v4_send_ack(sk, skb,
812 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
813 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
814 tcp_time_stamp + tcptw->tw_ts_offset,
815 tcptw->tw_ts_recent,
816 tw->tw_bound_dev_if,
817 tcp_twsk_md5_key(tcptw),
818 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
819 tw->tw_tos
820 );
821
822 inet_twsk_put(tw);
823 }
824
825 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
826 struct request_sock *req)
827 {
828 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
829 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
830 */
831 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
832 tcp_sk(sk)->snd_nxt;
833
834 /* RFC 7323 2.3
835 * The window field (SEG.WND) of every outgoing segment, with the
836 * exception of <SYN> segments, MUST be right-shifted by
837 * Rcv.Wind.Shift bits:
838 */
839 tcp_v4_send_ack(sk, skb, seq,
840 tcp_rsk(req)->rcv_nxt,
841 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
842 tcp_time_stamp + tcp_rsk(req)->ts_off,
843 req->ts_recent,
844 0,
845 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
846 AF_INET),
847 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
848 ip_hdr(skb)->tos);
849 }
850
851 /*
852 * Send a SYN-ACK after having received a SYN.
853 * This still operates on a request_sock only, not on a big
854 * socket.
855 */
856 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
857 struct flowi *fl,
858 struct request_sock *req,
859 struct tcp_fastopen_cookie *foc,
860 enum tcp_synack_type synack_type)
861 {
862 const struct inet_request_sock *ireq = inet_rsk(req);
863 struct flowi4 fl4;
864 int err = -1;
865 struct sk_buff *skb;
866
867 /* First, grab a route. */
868 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
869 return -1;
870
871 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
872
873 if (skb) {
874 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
875
876 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
877 ireq->ir_rmt_addr,
878 ireq->opt);
879 err = net_xmit_eval(err);
880 }
881
882 return err;
883 }
884
885 /*
886 * IPv4 request_sock destructor.
887 */
888 static void tcp_v4_reqsk_destructor(struct request_sock *req)
889 {
890 kfree(inet_rsk(req)->opt);
891 }
892
893 #ifdef CONFIG_TCP_MD5SIG
894 /*
895 * RFC2385 MD5 checksumming requires a mapping of
896 * IP address->MD5 Key.
897 * We need to maintain these in the sk structure.
898 */
899
900 /* Find the Key structure for an address. */
901 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
902 const union tcp_md5_addr *addr,
903 int family)
904 {
905 const struct tcp_sock *tp = tcp_sk(sk);
906 struct tcp_md5sig_key *key;
907 unsigned int size = sizeof(struct in_addr);
908 const struct tcp_md5sig_info *md5sig;
909
910 /* caller either holds rcu_read_lock() or socket lock */
911 md5sig = rcu_dereference_check(tp->md5sig_info,
912 lockdep_sock_is_held(sk));
913 if (!md5sig)
914 return NULL;
915 #if IS_ENABLED(CONFIG_IPV6)
916 if (family == AF_INET6)
917 size = sizeof(struct in6_addr);
918 #endif
919 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
920 if (key->family != family)
921 continue;
922 if (!memcmp(&key->addr, addr, size))
923 return key;
924 }
925 return NULL;
926 }
927 EXPORT_SYMBOL(tcp_md5_do_lookup);
928
929 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
930 const struct sock *addr_sk)
931 {
932 const union tcp_md5_addr *addr;
933
934 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
935 return tcp_md5_do_lookup(sk, addr, AF_INET);
936 }
937 EXPORT_SYMBOL(tcp_v4_md5_lookup);
938
939 /* This can be called on a newly created socket, from other files */
940 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
941 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
942 {
943 /* Add Key to the list */
944 struct tcp_md5sig_key *key;
945 struct tcp_sock *tp = tcp_sk(sk);
946 struct tcp_md5sig_info *md5sig;
947
948 key = tcp_md5_do_lookup(sk, addr, family);
949 if (key) {
950 /* Pre-existing entry - just update that one. */
951 memcpy(key->key, newkey, newkeylen);
952 key->keylen = newkeylen;
953 return 0;
954 }
955
956 md5sig = rcu_dereference_protected(tp->md5sig_info,
957 lockdep_sock_is_held(sk));
958 if (!md5sig) {
959 md5sig = kmalloc(sizeof(*md5sig), gfp);
960 if (!md5sig)
961 return -ENOMEM;
962
963 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
964 INIT_HLIST_HEAD(&md5sig->head);
965 rcu_assign_pointer(tp->md5sig_info, md5sig);
966 }
967
968 key = sock_kmalloc(sk, sizeof(*key), gfp);
969 if (!key)
970 return -ENOMEM;
971 if (!tcp_alloc_md5sig_pool()) {
972 sock_kfree_s(sk, key, sizeof(*key));
973 return -ENOMEM;
974 }
975
976 memcpy(key->key, newkey, newkeylen);
977 key->keylen = newkeylen;
978 key->family = family;
979 memcpy(&key->addr, addr,
980 (family == AF_INET6) ? sizeof(struct in6_addr) :
981 sizeof(struct in_addr));
982 hlist_add_head_rcu(&key->node, &md5sig->head);
983 return 0;
984 }
985 EXPORT_SYMBOL(tcp_md5_do_add);
986
987 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
988 {
989 struct tcp_md5sig_key *key;
990
991 key = tcp_md5_do_lookup(sk, addr, family);
992 if (!key)
993 return -ENOENT;
994 hlist_del_rcu(&key->node);
995 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
996 kfree_rcu(key, rcu);
997 return 0;
998 }
999 EXPORT_SYMBOL(tcp_md5_do_del);
1000
1001 static void tcp_clear_md5_list(struct sock *sk)
1002 {
1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_key *key;
1005 struct hlist_node *n;
1006 struct tcp_md5sig_info *md5sig;
1007
1008 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1009
1010 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1011 hlist_del_rcu(&key->node);
1012 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1013 kfree_rcu(key, rcu);
1014 }
1015 }
1016
1017 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1018 int optlen)
1019 {
1020 struct tcp_md5sig cmd;
1021 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1022
1023 if (optlen < sizeof(cmd))
1024 return -EINVAL;
1025
1026 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1027 return -EFAULT;
1028
1029 if (sin->sin_family != AF_INET)
1030 return -EINVAL;
1031
1032 if (!cmd.tcpm_keylen)
1033 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1034 AF_INET);
1035
1036 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1037 return -EINVAL;
1038
1039 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1040 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1041 GFP_KERNEL);
1042 }
1043
1044 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1045 __be32 daddr, __be32 saddr,
1046 const struct tcphdr *th, int nbytes)
1047 {
1048 struct tcp4_pseudohdr *bp;
1049 struct scatterlist sg;
1050 struct tcphdr *_th;
1051
1052 bp = hp->scratch;
1053 bp->saddr = saddr;
1054 bp->daddr = daddr;
1055 bp->pad = 0;
1056 bp->protocol = IPPROTO_TCP;
1057 bp->len = cpu_to_be16(nbytes);
1058
1059 _th = (struct tcphdr *)(bp + 1);
1060 memcpy(_th, th, sizeof(*th));
1061 _th->check = 0;
1062
1063 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1064 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1065 sizeof(*bp) + sizeof(*th));
1066 return crypto_ahash_update(hp->md5_req);
1067 }
1068
1069 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1070 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1071 {
1072 struct tcp_md5sig_pool *hp;
1073 struct ahash_request *req;
1074
1075 hp = tcp_get_md5sig_pool();
1076 if (!hp)
1077 goto clear_hash_noput;
1078 req = hp->md5_req;
1079
1080 if (crypto_ahash_init(req))
1081 goto clear_hash;
1082 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1083 goto clear_hash;
1084 if (tcp_md5_hash_key(hp, key))
1085 goto clear_hash;
1086 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1087 if (crypto_ahash_final(req))
1088 goto clear_hash;
1089
1090 tcp_put_md5sig_pool();
1091 return 0;
1092
1093 clear_hash:
1094 tcp_put_md5sig_pool();
1095 clear_hash_noput:
1096 memset(md5_hash, 0, 16);
1097 return 1;
1098 }
1099
1100 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1101 const struct sock *sk,
1102 const struct sk_buff *skb)
1103 {
1104 struct tcp_md5sig_pool *hp;
1105 struct ahash_request *req;
1106 const struct tcphdr *th = tcp_hdr(skb);
1107 __be32 saddr, daddr;
1108
1109 if (sk) { /* valid for establish/request sockets */
1110 saddr = sk->sk_rcv_saddr;
1111 daddr = sk->sk_daddr;
1112 } else {
1113 const struct iphdr *iph = ip_hdr(skb);
1114 saddr = iph->saddr;
1115 daddr = iph->daddr;
1116 }
1117
1118 hp = tcp_get_md5sig_pool();
1119 if (!hp)
1120 goto clear_hash_noput;
1121 req = hp->md5_req;
1122
1123 if (crypto_ahash_init(req))
1124 goto clear_hash;
1125
1126 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1127 goto clear_hash;
1128 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1129 goto clear_hash;
1130 if (tcp_md5_hash_key(hp, key))
1131 goto clear_hash;
1132 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1133 if (crypto_ahash_final(req))
1134 goto clear_hash;
1135
1136 tcp_put_md5sig_pool();
1137 return 0;
1138
1139 clear_hash:
1140 tcp_put_md5sig_pool();
1141 clear_hash_noput:
1142 memset(md5_hash, 0, 16);
1143 return 1;
1144 }
1145 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1146
1147 #endif
1148
1149 /* Called with rcu_read_lock() */
1150 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1151 const struct sk_buff *skb)
1152 {
1153 #ifdef CONFIG_TCP_MD5SIG
1154 /*
1155 * This gets called for each TCP segment that arrives
1156 * so we want to be efficient.
1157 * We have 3 drop cases:
1158 * o No MD5 hash and one expected.
1159 * o MD5 hash and we're not expecting one.
1160 * o MD5 hash and its wrong.
1161 */
1162 const __u8 *hash_location = NULL;
1163 struct tcp_md5sig_key *hash_expected;
1164 const struct iphdr *iph = ip_hdr(skb);
1165 const struct tcphdr *th = tcp_hdr(skb);
1166 int genhash;
1167 unsigned char newhash[16];
1168
1169 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1170 AF_INET);
1171 hash_location = tcp_parse_md5sig_option(th);
1172
1173 /* We've parsed the options - do we have a hash? */
1174 if (!hash_expected && !hash_location)
1175 return false;
1176
1177 if (hash_expected && !hash_location) {
1178 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1179 return true;
1180 }
1181
1182 if (!hash_expected && hash_location) {
1183 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1184 return true;
1185 }
1186
1187 /* Okay, so this is hash_expected and hash_location -
1188 * so we need to calculate the checksum.
1189 */
1190 genhash = tcp_v4_md5_hash_skb(newhash,
1191 hash_expected,
1192 NULL, skb);
1193
1194 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1195 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1196 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1197 &iph->saddr, ntohs(th->source),
1198 &iph->daddr, ntohs(th->dest),
1199 genhash ? " tcp_v4_calc_md5_hash failed"
1200 : "");
1201 return true;
1202 }
1203 return false;
1204 #endif
1205 return false;
1206 }
1207
1208 static void tcp_v4_init_req(struct request_sock *req,
1209 const struct sock *sk_listener,
1210 struct sk_buff *skb)
1211 {
1212 struct inet_request_sock *ireq = inet_rsk(req);
1213
1214 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1215 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1216 ireq->opt = tcp_v4_save_options(skb);
1217 }
1218
1219 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1220 struct flowi *fl,
1221 const struct request_sock *req)
1222 {
1223 return inet_csk_route_req(sk, &fl->u.ip4, req);
1224 }
1225
1226 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1227 .family = PF_INET,
1228 .obj_size = sizeof(struct tcp_request_sock),
1229 .rtx_syn_ack = tcp_rtx_synack,
1230 .send_ack = tcp_v4_reqsk_send_ack,
1231 .destructor = tcp_v4_reqsk_destructor,
1232 .send_reset = tcp_v4_send_reset,
1233 .syn_ack_timeout = tcp_syn_ack_timeout,
1234 };
1235
1236 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1237 .mss_clamp = TCP_MSS_DEFAULT,
1238 #ifdef CONFIG_TCP_MD5SIG
1239 .req_md5_lookup = tcp_v4_md5_lookup,
1240 .calc_md5_hash = tcp_v4_md5_hash_skb,
1241 #endif
1242 .init_req = tcp_v4_init_req,
1243 #ifdef CONFIG_SYN_COOKIES
1244 .cookie_init_seq = cookie_v4_init_sequence,
1245 #endif
1246 .route_req = tcp_v4_route_req,
1247 .init_seq = tcp_v4_init_seq,
1248 .init_ts_off = tcp_v4_init_ts_off,
1249 .send_synack = tcp_v4_send_synack,
1250 };
1251
1252 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1253 {
1254 /* Never answer to SYNs send to broadcast or multicast */
1255 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1256 goto drop;
1257
1258 return tcp_conn_request(&tcp_request_sock_ops,
1259 &tcp_request_sock_ipv4_ops, sk, skb);
1260
1261 drop:
1262 tcp_listendrop(sk);
1263 return 0;
1264 }
1265 EXPORT_SYMBOL(tcp_v4_conn_request);
1266
1267
1268 /*
1269 * The three way handshake has completed - we got a valid synack -
1270 * now create the new socket.
1271 */
1272 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1273 struct request_sock *req,
1274 struct dst_entry *dst,
1275 struct request_sock *req_unhash,
1276 bool *own_req)
1277 {
1278 struct inet_request_sock *ireq;
1279 struct inet_sock *newinet;
1280 struct tcp_sock *newtp;
1281 struct sock *newsk;
1282 #ifdef CONFIG_TCP_MD5SIG
1283 struct tcp_md5sig_key *key;
1284 #endif
1285 struct ip_options_rcu *inet_opt;
1286
1287 if (sk_acceptq_is_full(sk))
1288 goto exit_overflow;
1289
1290 newsk = tcp_create_openreq_child(sk, req, skb);
1291 if (!newsk)
1292 goto exit_nonewsk;
1293
1294 newsk->sk_gso_type = SKB_GSO_TCPV4;
1295 inet_sk_rx_dst_set(newsk, skb);
1296
1297 newtp = tcp_sk(newsk);
1298 newinet = inet_sk(newsk);
1299 ireq = inet_rsk(req);
1300 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1301 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1302 newsk->sk_bound_dev_if = ireq->ir_iif;
1303 newinet->inet_saddr = ireq->ir_loc_addr;
1304 inet_opt = ireq->opt;
1305 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1306 ireq->opt = NULL;
1307 newinet->mc_index = inet_iif(skb);
1308 newinet->mc_ttl = ip_hdr(skb)->ttl;
1309 newinet->rcv_tos = ip_hdr(skb)->tos;
1310 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1311 if (inet_opt)
1312 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1313 newinet->inet_id = newtp->write_seq ^ jiffies;
1314
1315 if (!dst) {
1316 dst = inet_csk_route_child_sock(sk, newsk, req);
1317 if (!dst)
1318 goto put_and_exit;
1319 } else {
1320 /* syncookie case : see end of cookie_v4_check() */
1321 }
1322 sk_setup_caps(newsk, dst);
1323
1324 tcp_ca_openreq_child(newsk, dst);
1325
1326 tcp_sync_mss(newsk, dst_mtu(dst));
1327 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1328
1329 tcp_initialize_rcv_mss(newsk);
1330
1331 #ifdef CONFIG_TCP_MD5SIG
1332 /* Copy over the MD5 key from the original socket */
1333 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1334 AF_INET);
1335 if (key) {
1336 /*
1337 * We're using one, so create a matching key
1338 * on the newsk structure. If we fail to get
1339 * memory, then we end up not copying the key
1340 * across. Shucks.
1341 */
1342 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1343 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1344 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1345 }
1346 #endif
1347
1348 if (__inet_inherit_port(sk, newsk) < 0)
1349 goto put_and_exit;
1350 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1351 if (*own_req)
1352 tcp_move_syn(newtp, req);
1353
1354 return newsk;
1355
1356 exit_overflow:
1357 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1358 exit_nonewsk:
1359 dst_release(dst);
1360 exit:
1361 tcp_listendrop(sk);
1362 return NULL;
1363 put_and_exit:
1364 inet_csk_prepare_forced_close(newsk);
1365 tcp_done(newsk);
1366 goto exit;
1367 }
1368 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1369
1370 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1371 {
1372 #ifdef CONFIG_SYN_COOKIES
1373 const struct tcphdr *th = tcp_hdr(skb);
1374
1375 if (!th->syn)
1376 sk = cookie_v4_check(sk, skb);
1377 #endif
1378 return sk;
1379 }
1380
1381 /* The socket must have it's spinlock held when we get
1382 * here, unless it is a TCP_LISTEN socket.
1383 *
1384 * We have a potential double-lock case here, so even when
1385 * doing backlog processing we use the BH locking scheme.
1386 * This is because we cannot sleep with the original spinlock
1387 * held.
1388 */
1389 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1390 {
1391 struct sock *rsk;
1392
1393 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1394 struct dst_entry *dst = sk->sk_rx_dst;
1395
1396 sock_rps_save_rxhash(sk, skb);
1397 sk_mark_napi_id(sk, skb);
1398 if (dst) {
1399 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1400 !dst->ops->check(dst, 0)) {
1401 dst_release(dst);
1402 sk->sk_rx_dst = NULL;
1403 }
1404 }
1405 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1406 return 0;
1407 }
1408
1409 if (tcp_checksum_complete(skb))
1410 goto csum_err;
1411
1412 if (sk->sk_state == TCP_LISTEN) {
1413 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1414
1415 if (!nsk)
1416 goto discard;
1417 if (nsk != sk) {
1418 if (tcp_child_process(sk, nsk, skb)) {
1419 rsk = nsk;
1420 goto reset;
1421 }
1422 return 0;
1423 }
1424 } else
1425 sock_rps_save_rxhash(sk, skb);
1426
1427 if (tcp_rcv_state_process(sk, skb)) {
1428 rsk = sk;
1429 goto reset;
1430 }
1431 return 0;
1432
1433 reset:
1434 tcp_v4_send_reset(rsk, skb);
1435 discard:
1436 kfree_skb(skb);
1437 /* Be careful here. If this function gets more complicated and
1438 * gcc suffers from register pressure on the x86, sk (in %ebx)
1439 * might be destroyed here. This current version compiles correctly,
1440 * but you have been warned.
1441 */
1442 return 0;
1443
1444 csum_err:
1445 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1446 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1447 goto discard;
1448 }
1449 EXPORT_SYMBOL(tcp_v4_do_rcv);
1450
1451 void tcp_v4_early_demux(struct sk_buff *skb)
1452 {
1453 const struct iphdr *iph;
1454 const struct tcphdr *th;
1455 struct sock *sk;
1456
1457 if (skb->pkt_type != PACKET_HOST)
1458 return;
1459
1460 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1461 return;
1462
1463 iph = ip_hdr(skb);
1464 th = tcp_hdr(skb);
1465
1466 if (th->doff < sizeof(struct tcphdr) / 4)
1467 return;
1468
1469 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1470 iph->saddr, th->source,
1471 iph->daddr, ntohs(th->dest),
1472 skb->skb_iif);
1473 if (sk) {
1474 skb->sk = sk;
1475 skb->destructor = sock_edemux;
1476 if (sk_fullsock(sk)) {
1477 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1478
1479 if (dst)
1480 dst = dst_check(dst, 0);
1481 if (dst &&
1482 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1483 skb_dst_set_noref(skb, dst);
1484 }
1485 }
1486 }
1487
1488 /* Packet is added to VJ-style prequeue for processing in process
1489 * context, if a reader task is waiting. Apparently, this exciting
1490 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1491 * failed somewhere. Latency? Burstiness? Well, at least now we will
1492 * see, why it failed. 8)8) --ANK
1493 *
1494 */
1495 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1496 {
1497 struct tcp_sock *tp = tcp_sk(sk);
1498
1499 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1500 return false;
1501
1502 if (skb->len <= tcp_hdrlen(skb) &&
1503 skb_queue_len(&tp->ucopy.prequeue) == 0)
1504 return false;
1505
1506 /* Before escaping RCU protected region, we need to take care of skb
1507 * dst. Prequeue is only enabled for established sockets.
1508 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1509 * Instead of doing full sk_rx_dst validity here, let's perform
1510 * an optimistic check.
1511 */
1512 if (likely(sk->sk_rx_dst))
1513 skb_dst_drop(skb);
1514 else
1515 skb_dst_force_safe(skb);
1516
1517 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1518 tp->ucopy.memory += skb->truesize;
1519 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1520 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
1521 struct sk_buff *skb1;
1522
1523 BUG_ON(sock_owned_by_user(sk));
1524 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1525 skb_queue_len(&tp->ucopy.prequeue));
1526
1527 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1528 sk_backlog_rcv(sk, skb1);
1529
1530 tp->ucopy.memory = 0;
1531 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1532 wake_up_interruptible_sync_poll(sk_sleep(sk),
1533 POLLIN | POLLRDNORM | POLLRDBAND);
1534 if (!inet_csk_ack_scheduled(sk))
1535 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1536 (3 * tcp_rto_min(sk)) / 4,
1537 TCP_RTO_MAX);
1538 }
1539 return true;
1540 }
1541 EXPORT_SYMBOL(tcp_prequeue);
1542
1543 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1544 {
1545 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1546
1547 /* Only socket owner can try to collapse/prune rx queues
1548 * to reduce memory overhead, so add a little headroom here.
1549 * Few sockets backlog are possibly concurrently non empty.
1550 */
1551 limit += 64*1024;
1552
1553 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1554 * we can fix skb->truesize to its real value to avoid future drops.
1555 * This is valid because skb is not yet charged to the socket.
1556 * It has been noticed pure SACK packets were sometimes dropped
1557 * (if cooked by drivers without copybreak feature).
1558 */
1559 skb_condense(skb);
1560
1561 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1562 bh_unlock_sock(sk);
1563 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1564 return true;
1565 }
1566 return false;
1567 }
1568 EXPORT_SYMBOL(tcp_add_backlog);
1569
1570 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1571 {
1572 struct tcphdr *th = (struct tcphdr *)skb->data;
1573 unsigned int eaten = skb->len;
1574 int err;
1575
1576 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1577 if (!err) {
1578 eaten -= skb->len;
1579 TCP_SKB_CB(skb)->end_seq -= eaten;
1580 }
1581 return err;
1582 }
1583 EXPORT_SYMBOL(tcp_filter);
1584
1585 /*
1586 * From tcp_input.c
1587 */
1588
1589 int tcp_v4_rcv(struct sk_buff *skb)
1590 {
1591 struct net *net = dev_net(skb->dev);
1592 const struct iphdr *iph;
1593 const struct tcphdr *th;
1594 bool refcounted;
1595 struct sock *sk;
1596 int ret;
1597
1598 if (skb->pkt_type != PACKET_HOST)
1599 goto discard_it;
1600
1601 /* Count it even if it's bad */
1602 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1603
1604 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605 goto discard_it;
1606
1607 th = (const struct tcphdr *)skb->data;
1608
1609 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1610 goto bad_packet;
1611 if (!pskb_may_pull(skb, th->doff * 4))
1612 goto discard_it;
1613
1614 /* An explanation is required here, I think.
1615 * Packet length and doff are validated by header prediction,
1616 * provided case of th->doff==0 is eliminated.
1617 * So, we defer the checks. */
1618
1619 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1620 goto csum_error;
1621
1622 th = (const struct tcphdr *)skb->data;
1623 iph = ip_hdr(skb);
1624 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1625 * barrier() makes sure compiler wont play fool^Waliasing games.
1626 */
1627 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1628 sizeof(struct inet_skb_parm));
1629 barrier();
1630
1631 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1632 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1633 skb->len - th->doff * 4);
1634 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1635 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1636 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1637 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1638 TCP_SKB_CB(skb)->sacked = 0;
1639
1640 lookup:
1641 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1642 th->dest, &refcounted);
1643 if (!sk)
1644 goto no_tcp_socket;
1645
1646 process:
1647 if (sk->sk_state == TCP_TIME_WAIT)
1648 goto do_time_wait;
1649
1650 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1651 struct request_sock *req = inet_reqsk(sk);
1652 struct sock *nsk;
1653
1654 sk = req->rsk_listener;
1655 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1656 sk_drops_add(sk, skb);
1657 reqsk_put(req);
1658 goto discard_it;
1659 }
1660 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1661 inet_csk_reqsk_queue_drop_and_put(sk, req);
1662 goto lookup;
1663 }
1664 /* We own a reference on the listener, increase it again
1665 * as we might lose it too soon.
1666 */
1667 sock_hold(sk);
1668 refcounted = true;
1669 nsk = tcp_check_req(sk, skb, req, false);
1670 if (!nsk) {
1671 reqsk_put(req);
1672 goto discard_and_relse;
1673 }
1674 if (nsk == sk) {
1675 reqsk_put(req);
1676 } else if (tcp_child_process(sk, nsk, skb)) {
1677 tcp_v4_send_reset(nsk, skb);
1678 goto discard_and_relse;
1679 } else {
1680 sock_put(sk);
1681 return 0;
1682 }
1683 }
1684 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1685 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1686 goto discard_and_relse;
1687 }
1688
1689 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1690 goto discard_and_relse;
1691
1692 if (tcp_v4_inbound_md5_hash(sk, skb))
1693 goto discard_and_relse;
1694
1695 nf_reset(skb);
1696
1697 if (tcp_filter(sk, skb))
1698 goto discard_and_relse;
1699 th = (const struct tcphdr *)skb->data;
1700 iph = ip_hdr(skb);
1701
1702 skb->dev = NULL;
1703
1704 if (sk->sk_state == TCP_LISTEN) {
1705 ret = tcp_v4_do_rcv(sk, skb);
1706 goto put_and_return;
1707 }
1708
1709 sk_incoming_cpu_update(sk);
1710
1711 bh_lock_sock_nested(sk);
1712 tcp_segs_in(tcp_sk(sk), skb);
1713 ret = 0;
1714 if (!sock_owned_by_user(sk)) {
1715 if (!tcp_prequeue(sk, skb))
1716 ret = tcp_v4_do_rcv(sk, skb);
1717 } else if (tcp_add_backlog(sk, skb)) {
1718 goto discard_and_relse;
1719 }
1720 bh_unlock_sock(sk);
1721
1722 put_and_return:
1723 if (refcounted)
1724 sock_put(sk);
1725
1726 return ret;
1727
1728 no_tcp_socket:
1729 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1730 goto discard_it;
1731
1732 if (tcp_checksum_complete(skb)) {
1733 csum_error:
1734 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1735 bad_packet:
1736 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1737 } else {
1738 tcp_v4_send_reset(NULL, skb);
1739 }
1740
1741 discard_it:
1742 /* Discard frame. */
1743 kfree_skb(skb);
1744 return 0;
1745
1746 discard_and_relse:
1747 sk_drops_add(sk, skb);
1748 if (refcounted)
1749 sock_put(sk);
1750 goto discard_it;
1751
1752 do_time_wait:
1753 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1754 inet_twsk_put(inet_twsk(sk));
1755 goto discard_it;
1756 }
1757
1758 if (tcp_checksum_complete(skb)) {
1759 inet_twsk_put(inet_twsk(sk));
1760 goto csum_error;
1761 }
1762 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1763 case TCP_TW_SYN: {
1764 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1765 &tcp_hashinfo, skb,
1766 __tcp_hdrlen(th),
1767 iph->saddr, th->source,
1768 iph->daddr, th->dest,
1769 inet_iif(skb));
1770 if (sk2) {
1771 inet_twsk_deschedule_put(inet_twsk(sk));
1772 sk = sk2;
1773 refcounted = false;
1774 goto process;
1775 }
1776 /* Fall through to ACK */
1777 }
1778 case TCP_TW_ACK:
1779 tcp_v4_timewait_ack(sk, skb);
1780 break;
1781 case TCP_TW_RST:
1782 tcp_v4_send_reset(sk, skb);
1783 inet_twsk_deschedule_put(inet_twsk(sk));
1784 goto discard_it;
1785 case TCP_TW_SUCCESS:;
1786 }
1787 goto discard_it;
1788 }
1789
1790 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1791 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1792 .twsk_unique = tcp_twsk_unique,
1793 .twsk_destructor= tcp_twsk_destructor,
1794 };
1795
1796 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1797 {
1798 struct dst_entry *dst = skb_dst(skb);
1799
1800 if (dst && dst_hold_safe(dst)) {
1801 sk->sk_rx_dst = dst;
1802 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1803 }
1804 }
1805 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1806
1807 const struct inet_connection_sock_af_ops ipv4_specific = {
1808 .queue_xmit = ip_queue_xmit,
1809 .send_check = tcp_v4_send_check,
1810 .rebuild_header = inet_sk_rebuild_header,
1811 .sk_rx_dst_set = inet_sk_rx_dst_set,
1812 .conn_request = tcp_v4_conn_request,
1813 .syn_recv_sock = tcp_v4_syn_recv_sock,
1814 .net_header_len = sizeof(struct iphdr),
1815 .setsockopt = ip_setsockopt,
1816 .getsockopt = ip_getsockopt,
1817 .addr2sockaddr = inet_csk_addr2sockaddr,
1818 .sockaddr_len = sizeof(struct sockaddr_in),
1819 #ifdef CONFIG_COMPAT
1820 .compat_setsockopt = compat_ip_setsockopt,
1821 .compat_getsockopt = compat_ip_getsockopt,
1822 #endif
1823 .mtu_reduced = tcp_v4_mtu_reduced,
1824 };
1825 EXPORT_SYMBOL(ipv4_specific);
1826
1827 #ifdef CONFIG_TCP_MD5SIG
1828 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1829 .md5_lookup = tcp_v4_md5_lookup,
1830 .calc_md5_hash = tcp_v4_md5_hash_skb,
1831 .md5_parse = tcp_v4_parse_md5_keys,
1832 };
1833 #endif
1834
1835 /* NOTE: A lot of things set to zero explicitly by call to
1836 * sk_alloc() so need not be done here.
1837 */
1838 static int tcp_v4_init_sock(struct sock *sk)
1839 {
1840 struct inet_connection_sock *icsk = inet_csk(sk);
1841
1842 tcp_init_sock(sk);
1843
1844 icsk->icsk_af_ops = &ipv4_specific;
1845
1846 #ifdef CONFIG_TCP_MD5SIG
1847 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1848 #endif
1849
1850 return 0;
1851 }
1852
1853 void tcp_v4_destroy_sock(struct sock *sk)
1854 {
1855 struct tcp_sock *tp = tcp_sk(sk);
1856
1857 tcp_clear_xmit_timers(sk);
1858
1859 tcp_cleanup_congestion_control(sk);
1860
1861 /* Cleanup up the write buffer. */
1862 tcp_write_queue_purge(sk);
1863
1864 /* Check if we want to disable active TFO */
1865 tcp_fastopen_active_disable_ofo_check(sk);
1866
1867 /* Cleans up our, hopefully empty, out_of_order_queue. */
1868 skb_rbtree_purge(&tp->out_of_order_queue);
1869
1870 #ifdef CONFIG_TCP_MD5SIG
1871 /* Clean up the MD5 key list, if any */
1872 if (tp->md5sig_info) {
1873 tcp_clear_md5_list(sk);
1874 kfree_rcu(tp->md5sig_info, rcu);
1875 tp->md5sig_info = NULL;
1876 }
1877 #endif
1878
1879 /* Clean prequeue, it must be empty really */
1880 __skb_queue_purge(&tp->ucopy.prequeue);
1881
1882 /* Clean up a referenced TCP bind bucket. */
1883 if (inet_csk(sk)->icsk_bind_hash)
1884 inet_put_port(sk);
1885
1886 BUG_ON(tp->fastopen_rsk);
1887
1888 /* If socket is aborted during connect operation */
1889 tcp_free_fastopen_req(tp);
1890 tcp_saved_syn_free(tp);
1891
1892 sk_sockets_allocated_dec(sk);
1893 }
1894 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1895
1896 #ifdef CONFIG_PROC_FS
1897 /* Proc filesystem TCP sock list dumping. */
1898
1899 /*
1900 * Get next listener socket follow cur. If cur is NULL, get first socket
1901 * starting from bucket given in st->bucket; when st->bucket is zero the
1902 * very first socket in the hash table is returned.
1903 */
1904 static void *listening_get_next(struct seq_file *seq, void *cur)
1905 {
1906 struct tcp_iter_state *st = seq->private;
1907 struct net *net = seq_file_net(seq);
1908 struct inet_listen_hashbucket *ilb;
1909 struct sock *sk = cur;
1910
1911 if (!sk) {
1912 get_head:
1913 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1914 spin_lock(&ilb->lock);
1915 sk = sk_head(&ilb->head);
1916 st->offset = 0;
1917 goto get_sk;
1918 }
1919 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1920 ++st->num;
1921 ++st->offset;
1922
1923 sk = sk_next(sk);
1924 get_sk:
1925 sk_for_each_from(sk) {
1926 if (!net_eq(sock_net(sk), net))
1927 continue;
1928 if (sk->sk_family == st->family)
1929 return sk;
1930 }
1931 spin_unlock(&ilb->lock);
1932 st->offset = 0;
1933 if (++st->bucket < INET_LHTABLE_SIZE)
1934 goto get_head;
1935 return NULL;
1936 }
1937
1938 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1939 {
1940 struct tcp_iter_state *st = seq->private;
1941 void *rc;
1942
1943 st->bucket = 0;
1944 st->offset = 0;
1945 rc = listening_get_next(seq, NULL);
1946
1947 while (rc && *pos) {
1948 rc = listening_get_next(seq, rc);
1949 --*pos;
1950 }
1951 return rc;
1952 }
1953
1954 static inline bool empty_bucket(const struct tcp_iter_state *st)
1955 {
1956 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1957 }
1958
1959 /*
1960 * Get first established socket starting from bucket given in st->bucket.
1961 * If st->bucket is zero, the very first socket in the hash is returned.
1962 */
1963 static void *established_get_first(struct seq_file *seq)
1964 {
1965 struct tcp_iter_state *st = seq->private;
1966 struct net *net = seq_file_net(seq);
1967 void *rc = NULL;
1968
1969 st->offset = 0;
1970 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1971 struct sock *sk;
1972 struct hlist_nulls_node *node;
1973 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1974
1975 /* Lockless fast path for the common case of empty buckets */
1976 if (empty_bucket(st))
1977 continue;
1978
1979 spin_lock_bh(lock);
1980 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1981 if (sk->sk_family != st->family ||
1982 !net_eq(sock_net(sk), net)) {
1983 continue;
1984 }
1985 rc = sk;
1986 goto out;
1987 }
1988 spin_unlock_bh(lock);
1989 }
1990 out:
1991 return rc;
1992 }
1993
1994 static void *established_get_next(struct seq_file *seq, void *cur)
1995 {
1996 struct sock *sk = cur;
1997 struct hlist_nulls_node *node;
1998 struct tcp_iter_state *st = seq->private;
1999 struct net *net = seq_file_net(seq);
2000
2001 ++st->num;
2002 ++st->offset;
2003
2004 sk = sk_nulls_next(sk);
2005
2006 sk_nulls_for_each_from(sk, node) {
2007 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2008 return sk;
2009 }
2010
2011 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2012 ++st->bucket;
2013 return established_get_first(seq);
2014 }
2015
2016 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2017 {
2018 struct tcp_iter_state *st = seq->private;
2019 void *rc;
2020
2021 st->bucket = 0;
2022 rc = established_get_first(seq);
2023
2024 while (rc && pos) {
2025 rc = established_get_next(seq, rc);
2026 --pos;
2027 }
2028 return rc;
2029 }
2030
2031 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2032 {
2033 void *rc;
2034 struct tcp_iter_state *st = seq->private;
2035
2036 st->state = TCP_SEQ_STATE_LISTENING;
2037 rc = listening_get_idx(seq, &pos);
2038
2039 if (!rc) {
2040 st->state = TCP_SEQ_STATE_ESTABLISHED;
2041 rc = established_get_idx(seq, pos);
2042 }
2043
2044 return rc;
2045 }
2046
2047 static void *tcp_seek_last_pos(struct seq_file *seq)
2048 {
2049 struct tcp_iter_state *st = seq->private;
2050 int offset = st->offset;
2051 int orig_num = st->num;
2052 void *rc = NULL;
2053
2054 switch (st->state) {
2055 case TCP_SEQ_STATE_LISTENING:
2056 if (st->bucket >= INET_LHTABLE_SIZE)
2057 break;
2058 st->state = TCP_SEQ_STATE_LISTENING;
2059 rc = listening_get_next(seq, NULL);
2060 while (offset-- && rc)
2061 rc = listening_get_next(seq, rc);
2062 if (rc)
2063 break;
2064 st->bucket = 0;
2065 st->state = TCP_SEQ_STATE_ESTABLISHED;
2066 /* Fallthrough */
2067 case TCP_SEQ_STATE_ESTABLISHED:
2068 if (st->bucket > tcp_hashinfo.ehash_mask)
2069 break;
2070 rc = established_get_first(seq);
2071 while (offset-- && rc)
2072 rc = established_get_next(seq, rc);
2073 }
2074
2075 st->num = orig_num;
2076
2077 return rc;
2078 }
2079
2080 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2081 {
2082 struct tcp_iter_state *st = seq->private;
2083 void *rc;
2084
2085 if (*pos && *pos == st->last_pos) {
2086 rc = tcp_seek_last_pos(seq);
2087 if (rc)
2088 goto out;
2089 }
2090
2091 st->state = TCP_SEQ_STATE_LISTENING;
2092 st->num = 0;
2093 st->bucket = 0;
2094 st->offset = 0;
2095 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2096
2097 out:
2098 st->last_pos = *pos;
2099 return rc;
2100 }
2101
2102 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2103 {
2104 struct tcp_iter_state *st = seq->private;
2105 void *rc = NULL;
2106
2107 if (v == SEQ_START_TOKEN) {
2108 rc = tcp_get_idx(seq, 0);
2109 goto out;
2110 }
2111
2112 switch (st->state) {
2113 case TCP_SEQ_STATE_LISTENING:
2114 rc = listening_get_next(seq, v);
2115 if (!rc) {
2116 st->state = TCP_SEQ_STATE_ESTABLISHED;
2117 st->bucket = 0;
2118 st->offset = 0;
2119 rc = established_get_first(seq);
2120 }
2121 break;
2122 case TCP_SEQ_STATE_ESTABLISHED:
2123 rc = established_get_next(seq, v);
2124 break;
2125 }
2126 out:
2127 ++*pos;
2128 st->last_pos = *pos;
2129 return rc;
2130 }
2131
2132 static void tcp_seq_stop(struct seq_file *seq, void *v)
2133 {
2134 struct tcp_iter_state *st = seq->private;
2135
2136 switch (st->state) {
2137 case TCP_SEQ_STATE_LISTENING:
2138 if (v != SEQ_START_TOKEN)
2139 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2140 break;
2141 case TCP_SEQ_STATE_ESTABLISHED:
2142 if (v)
2143 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2144 break;
2145 }
2146 }
2147
2148 int tcp_seq_open(struct inode *inode, struct file *file)
2149 {
2150 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2151 struct tcp_iter_state *s;
2152 int err;
2153
2154 err = seq_open_net(inode, file, &afinfo->seq_ops,
2155 sizeof(struct tcp_iter_state));
2156 if (err < 0)
2157 return err;
2158
2159 s = ((struct seq_file *)file->private_data)->private;
2160 s->family = afinfo->family;
2161 s->last_pos = 0;
2162 return 0;
2163 }
2164 EXPORT_SYMBOL(tcp_seq_open);
2165
2166 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2167 {
2168 int rc = 0;
2169 struct proc_dir_entry *p;
2170
2171 afinfo->seq_ops.start = tcp_seq_start;
2172 afinfo->seq_ops.next = tcp_seq_next;
2173 afinfo->seq_ops.stop = tcp_seq_stop;
2174
2175 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2176 afinfo->seq_fops, afinfo);
2177 if (!p)
2178 rc = -ENOMEM;
2179 return rc;
2180 }
2181 EXPORT_SYMBOL(tcp_proc_register);
2182
2183 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2184 {
2185 remove_proc_entry(afinfo->name, net->proc_net);
2186 }
2187 EXPORT_SYMBOL(tcp_proc_unregister);
2188
2189 static void get_openreq4(const struct request_sock *req,
2190 struct seq_file *f, int i)
2191 {
2192 const struct inet_request_sock *ireq = inet_rsk(req);
2193 long delta = req->rsk_timer.expires - jiffies;
2194
2195 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2196 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2197 i,
2198 ireq->ir_loc_addr,
2199 ireq->ir_num,
2200 ireq->ir_rmt_addr,
2201 ntohs(ireq->ir_rmt_port),
2202 TCP_SYN_RECV,
2203 0, 0, /* could print option size, but that is af dependent. */
2204 1, /* timers active (only the expire timer) */
2205 jiffies_delta_to_clock_t(delta),
2206 req->num_timeout,
2207 from_kuid_munged(seq_user_ns(f),
2208 sock_i_uid(req->rsk_listener)),
2209 0, /* non standard timer */
2210 0, /* open_requests have no inode */
2211 0,
2212 req);
2213 }
2214
2215 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2216 {
2217 int timer_active;
2218 unsigned long timer_expires;
2219 const struct tcp_sock *tp = tcp_sk(sk);
2220 const struct inet_connection_sock *icsk = inet_csk(sk);
2221 const struct inet_sock *inet = inet_sk(sk);
2222 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2223 __be32 dest = inet->inet_daddr;
2224 __be32 src = inet->inet_rcv_saddr;
2225 __u16 destp = ntohs(inet->inet_dport);
2226 __u16 srcp = ntohs(inet->inet_sport);
2227 int rx_queue;
2228 int state;
2229
2230 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2231 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2232 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2233 timer_active = 1;
2234 timer_expires = icsk->icsk_timeout;
2235 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2236 timer_active = 4;
2237 timer_expires = icsk->icsk_timeout;
2238 } else if (timer_pending(&sk->sk_timer)) {
2239 timer_active = 2;
2240 timer_expires = sk->sk_timer.expires;
2241 } else {
2242 timer_active = 0;
2243 timer_expires = jiffies;
2244 }
2245
2246 state = sk_state_load(sk);
2247 if (state == TCP_LISTEN)
2248 rx_queue = sk->sk_ack_backlog;
2249 else
2250 /* Because we don't lock the socket,
2251 * we might find a transient negative value.
2252 */
2253 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2254
2255 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2256 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2257 i, src, srcp, dest, destp, state,
2258 tp->write_seq - tp->snd_una,
2259 rx_queue,
2260 timer_active,
2261 jiffies_delta_to_clock_t(timer_expires - jiffies),
2262 icsk->icsk_retransmits,
2263 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2264 icsk->icsk_probes_out,
2265 sock_i_ino(sk),
2266 atomic_read(&sk->sk_refcnt), sk,
2267 jiffies_to_clock_t(icsk->icsk_rto),
2268 jiffies_to_clock_t(icsk->icsk_ack.ato),
2269 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2270 tp->snd_cwnd,
2271 state == TCP_LISTEN ?
2272 fastopenq->max_qlen :
2273 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2274 }
2275
2276 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2277 struct seq_file *f, int i)
2278 {
2279 long delta = tw->tw_timer.expires - jiffies;
2280 __be32 dest, src;
2281 __u16 destp, srcp;
2282
2283 dest = tw->tw_daddr;
2284 src = tw->tw_rcv_saddr;
2285 destp = ntohs(tw->tw_dport);
2286 srcp = ntohs(tw->tw_sport);
2287
2288 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2289 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2290 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2291 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2292 atomic_read(&tw->tw_refcnt), tw);
2293 }
2294
2295 #define TMPSZ 150
2296
2297 static int tcp4_seq_show(struct seq_file *seq, void *v)
2298 {
2299 struct tcp_iter_state *st;
2300 struct sock *sk = v;
2301
2302 seq_setwidth(seq, TMPSZ - 1);
2303 if (v == SEQ_START_TOKEN) {
2304 seq_puts(seq, " sl local_address rem_address st tx_queue "
2305 "rx_queue tr tm->when retrnsmt uid timeout "
2306 "inode");
2307 goto out;
2308 }
2309 st = seq->private;
2310
2311 if (sk->sk_state == TCP_TIME_WAIT)
2312 get_timewait4_sock(v, seq, st->num);
2313 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2314 get_openreq4(v, seq, st->num);
2315 else
2316 get_tcp4_sock(v, seq, st->num);
2317 out:
2318 seq_pad(seq, '\n');
2319 return 0;
2320 }
2321
2322 static const struct file_operations tcp_afinfo_seq_fops = {
2323 .owner = THIS_MODULE,
2324 .open = tcp_seq_open,
2325 .read = seq_read,
2326 .llseek = seq_lseek,
2327 .release = seq_release_net
2328 };
2329
2330 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2331 .name = "tcp",
2332 .family = AF_INET,
2333 .seq_fops = &tcp_afinfo_seq_fops,
2334 .seq_ops = {
2335 .show = tcp4_seq_show,
2336 },
2337 };
2338
2339 static int __net_init tcp4_proc_init_net(struct net *net)
2340 {
2341 return tcp_proc_register(net, &tcp4_seq_afinfo);
2342 }
2343
2344 static void __net_exit tcp4_proc_exit_net(struct net *net)
2345 {
2346 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2347 }
2348
2349 static struct pernet_operations tcp4_net_ops = {
2350 .init = tcp4_proc_init_net,
2351 .exit = tcp4_proc_exit_net,
2352 };
2353
2354 int __init tcp4_proc_init(void)
2355 {
2356 return register_pernet_subsys(&tcp4_net_ops);
2357 }
2358
2359 void tcp4_proc_exit(void)
2360 {
2361 unregister_pernet_subsys(&tcp4_net_ops);
2362 }
2363 #endif /* CONFIG_PROC_FS */
2364
2365 struct proto tcp_prot = {
2366 .name = "TCP",
2367 .owner = THIS_MODULE,
2368 .close = tcp_close,
2369 .connect = tcp_v4_connect,
2370 .disconnect = tcp_disconnect,
2371 .accept = inet_csk_accept,
2372 .ioctl = tcp_ioctl,
2373 .init = tcp_v4_init_sock,
2374 .destroy = tcp_v4_destroy_sock,
2375 .shutdown = tcp_shutdown,
2376 .setsockopt = tcp_setsockopt,
2377 .getsockopt = tcp_getsockopt,
2378 .keepalive = tcp_set_keepalive,
2379 .recvmsg = tcp_recvmsg,
2380 .sendmsg = tcp_sendmsg,
2381 .sendpage = tcp_sendpage,
2382 .backlog_rcv = tcp_v4_do_rcv,
2383 .release_cb = tcp_release_cb,
2384 .hash = inet_hash,
2385 .unhash = inet_unhash,
2386 .get_port = inet_csk_get_port,
2387 .enter_memory_pressure = tcp_enter_memory_pressure,
2388 .stream_memory_free = tcp_stream_memory_free,
2389 .sockets_allocated = &tcp_sockets_allocated,
2390 .orphan_count = &tcp_orphan_count,
2391 .memory_allocated = &tcp_memory_allocated,
2392 .memory_pressure = &tcp_memory_pressure,
2393 .sysctl_mem = sysctl_tcp_mem,
2394 .sysctl_wmem = sysctl_tcp_wmem,
2395 .sysctl_rmem = sysctl_tcp_rmem,
2396 .max_header = MAX_TCP_HEADER,
2397 .obj_size = sizeof(struct tcp_sock),
2398 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2399 .twsk_prot = &tcp_timewait_sock_ops,
2400 .rsk_prot = &tcp_request_sock_ops,
2401 .h.hashinfo = &tcp_hashinfo,
2402 .no_autobind = true,
2403 #ifdef CONFIG_COMPAT
2404 .compat_setsockopt = compat_tcp_setsockopt,
2405 .compat_getsockopt = compat_tcp_getsockopt,
2406 #endif
2407 .diag_destroy = tcp_abort,
2408 };
2409 EXPORT_SYMBOL(tcp_prot);
2410
2411 static void __net_exit tcp_sk_exit(struct net *net)
2412 {
2413 int cpu;
2414
2415 for_each_possible_cpu(cpu)
2416 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2417 free_percpu(net->ipv4.tcp_sk);
2418 }
2419
2420 static int __net_init tcp_sk_init(struct net *net)
2421 {
2422 int res, cpu, cnt;
2423
2424 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2425 if (!net->ipv4.tcp_sk)
2426 return -ENOMEM;
2427
2428 for_each_possible_cpu(cpu) {
2429 struct sock *sk;
2430
2431 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2432 IPPROTO_TCP, net);
2433 if (res)
2434 goto fail;
2435 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2436 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2437 }
2438
2439 net->ipv4.sysctl_tcp_ecn = 2;
2440 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2441
2442 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2443 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2444 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2445
2446 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2447 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2448 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2449
2450 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2451 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2452 net->ipv4.sysctl_tcp_syncookies = 1;
2453 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2454 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2455 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2456 net->ipv4.sysctl_tcp_orphan_retries = 0;
2457 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2458 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2459 net->ipv4.sysctl_tcp_tw_reuse = 0;
2460
2461 cnt = tcp_hashinfo.ehash_mask + 1;
2462 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2463 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2464
2465 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2466
2467 return 0;
2468 fail:
2469 tcp_sk_exit(net);
2470
2471 return res;
2472 }
2473
2474 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2475 {
2476 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2477 }
2478
2479 static struct pernet_operations __net_initdata tcp_sk_ops = {
2480 .init = tcp_sk_init,
2481 .exit = tcp_sk_exit,
2482 .exit_batch = tcp_sk_exit_batch,
2483 };
2484
2485 void __init tcp_v4_init(void)
2486 {
2487 if (register_pernet_subsys(&tcp_sk_ops))
2488 panic("Failed to create the TCP control socket.\n");
2489 }