]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - net/ipv4/tcp_ipv4.c
ipv4: implement support for NOPREFIXROUTE ifa flag for ipv4 address
[mirror_ubuntu-eoan-kernel.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
d1a4c0b3 76#include <net/tcp_memcontrol.h>
076bb0c8 77#include <net/busy_poll.h>
1da177e4
LT
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
cfb6eeb4
YH
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
ab32ea5d
BH
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 90EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 91
cfb6eeb4 92#ifdef CONFIG_TCP_MD5SIG
a915da9b 93static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
95#endif
96
5caea4ea 97struct inet_hashinfo tcp_hashinfo;
4bc2f18b 98EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 99
936b8bdb 100static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 101{
eddc9ec5
ACM
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
aa8223c7
ACM
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
1da177e4
LT
106}
107
6d6ee43e
ACM
108int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109{
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
51456b29 125 (!twp || (sysctl_tcp_tw_reuse &&
9d729f72 126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137}
6d6ee43e
ACM
138EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
1da177e4
LT
140/* This will initiate an outgoing connection. */
141int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142{
2d7192d6 143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 146 __be16 orig_sport, orig_dport;
bada8adc 147 __be32 daddr, nexthop;
da905bd1 148 struct flowi4 *fl4;
2d7192d6 149 struct rtable *rt;
1da177e4 150 int err;
f6d8bd05 151 struct ip_options_rcu *inet_opt;
1da177e4
LT
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
163 if (!daddr)
164 return -EINVAL;
f6d8bd05 165 nexthop = inet_opt->opt.faddr;
1da177e4
LT
166 }
167
dca8b089
DM
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
da905bd1
DM
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
0e0d44ab 174 orig_sport, orig_dport, sk);
b23dd4fe
DM
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
f1d8cba6 178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 179 return err;
584bdf8c 180 }
1da177e4
LT
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
f6d8bd05 187 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 188 daddr = fl4->daddr;
1da177e4 189
c720c7e8 190 if (!inet->inet_saddr)
da905bd1 191 inet->inet_saddr = fl4->saddr;
d1e559d0 192 sk_rcv_saddr_set(sk, inet->inet_saddr);
1da177e4 193
c720c7e8 194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
1da177e4
LT
200 }
201
295ff7ed 202 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 205
c720c7e8 206 inet->inet_dport = usin->sin_port;
d1e559d0 207 sk_daddr_set(sk, daddr);
1da177e4 208
d83d8461 209 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 212
bee7ca9e 213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 221 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
222 if (err)
223 goto failure;
224
877d1f62 225 sk_set_txhash(sk);
9e7ceb06 226
da905bd1 227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
1da177e4 232 goto failure;
b23dd4fe 233 }
1da177e4 234 /* OK, now commit destination to socket. */
bcd76111 235 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 236 sk_setup_caps(sk, &rt->dst);
1da177e4 237
ee995283 238 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
1da177e4
LT
242 usin->sin_port);
243
c720c7e8 244 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 245
2b916477 246 err = tcp_connect(sk);
ee995283 247
1da177e4
LT
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254failure:
7174259e
ACM
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
1da177e4
LT
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
c720c7e8 262 inet->inet_dport = 0;
1da177e4
LT
263 return err;
264}
4bc2f18b 265EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 266
1da177e4 267/*
563d34d0
ED
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 271 */
4fab9071 272void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
273{
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
563d34d0 276 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 277
80d0a69f
DM
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
1da177e4
LT
280 return;
281
1da177e4
LT
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
284 */
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
287
288 mtu = dst_mtu(dst);
289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 291 ip_sk_accept_pmtu(sk) &&
d83d8461 292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
293 tcp_sync_mss(sk, mtu);
294
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
299 */
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
302}
4fab9071 303EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 304
55be7a9c
DM
305static void do_redirect(struct sk_buff *skb, struct sock *sk)
306{
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
308
1ed5c48f 309 if (dst)
6700c270 310 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
311}
312
26e37360
ED
313
314/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315void tcp_req_err(struct sock *sk, u32 seq)
316{
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
319
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
322 */
323 WARN_ON(req->sk);
324
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 } else {
328 /*
329 * Still in SYN_RECV, just remove it silently.
330 * There is no good way to pass the error to the newly
331 * created socket, and POSIX does not want network
332 * errors returned from accept().
333 */
c6973669 334 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
ef84d8ce 335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
26e37360 336 }
ef84d8ce 337 reqsk_put(req);
26e37360
ED
338}
339EXPORT_SYMBOL(tcp_req_err);
340
1da177e4
LT
341/*
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
348 *
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
354 *
355 */
356
4d1a2d9e 357void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 358{
b71d1d42 359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 361 struct inet_connection_sock *icsk;
1da177e4
LT
362 struct tcp_sock *tp;
363 struct inet_sock *inet;
4d1a2d9e
DL
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 366 struct sock *sk;
f1ecd5d9 367 struct sk_buff *skb;
0a672f74
YC
368 struct request_sock *fastopen;
369 __u32 seq, snd_una;
f1ecd5d9 370 __u32 remaining;
1da177e4 371 int err;
4d1a2d9e 372 struct net *net = dev_net(icmp_skb->dev);
1da177e4 373
26e37360
ED
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
376 inet_iif(icmp_skb));
1da177e4 377 if (!sk) {
dcfc23ca 378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
379 return;
380 }
381 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 382 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
383 return;
384 }
26e37360
ED
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
1da177e4
LT
388
389 bh_lock_sock(sk);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
563d34d0
ED
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
1da177e4 394 */
b74aa930
ED
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 }
1da177e4
LT
399 if (sk->sk_state == TCP_CLOSE)
400 goto out;
401
97e3ecd1 402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 goto out;
405 }
406
f1ecd5d9 407 icsk = inet_csk(sk);
1da177e4 408 tp = tcp_sk(sk);
0a672f74
YC
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 412 if (sk->sk_state != TCP_LISTEN &&
0a672f74 413 !between(seq, snd_una, tp->snd_nxt)) {
de0744af 414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
415 goto out;
416 }
417
418 switch (type) {
55be7a9c
DM
419 case ICMP_REDIRECT:
420 do_redirect(icmp_skb, sk);
421 goto out;
1da177e4
LT
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
424 goto out;
425 case ICMP_PARAMETERPROB:
426 err = EPROTO;
427 break;
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
430 goto out;
431
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
436 */
437 if (sk->sk_state == TCP_LISTEN)
438 goto out;
439
563d34d0 440 tp->mtu_info = info;
144d56e9 441 if (!sock_owned_by_user(sk)) {
563d34d0 442 tcp_v4_mtu_reduced(sk);
144d56e9
ED
443 } else {
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 sock_hold(sk);
446 }
1da177e4
LT
447 goto out;
448 }
449
450 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 break;
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 456 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
457 break;
458
8f49c270
DM
459 if (sock_owned_by_user(sk))
460 break;
461
f1ecd5d9 462 icsk->icsk_backoff--;
fcdd1cf4
ED
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 TCP_TIMEOUT_INIT;
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
466
467 skb = tcp_write_queue_head(sk);
468 BUG_ON(!skb);
469
7faee5c0
ED
470 remaining = icsk->icsk_rto -
471 min(icsk->icsk_rto,
472 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
473
474 if (remaining) {
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
477 } else {
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
481 }
482
1da177e4
LT
483 break;
484 case ICMP_TIME_EXCEEDED:
485 err = EHOSTUNREACH;
486 break;
487 default:
488 goto out;
489 }
490
491 switch (sk->sk_state) {
1da177e4 492 case TCP_SYN_SENT:
0a672f74
YC
493 case TCP_SYN_RECV:
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
496 */
51456b29 497 if (fastopen && !fastopen->sk)
0a672f74
YC
498 break;
499
1da177e4 500 if (!sock_owned_by_user(sk)) {
1da177e4
LT
501 sk->sk_err = err;
502
503 sk->sk_error_report(sk);
504
505 tcp_done(sk);
506 } else {
507 sk->sk_err_soft = err;
508 }
509 goto out;
510 }
511
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
514 *
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
518 *
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 *
524 * Now we are in compliance with RFCs.
525 * --ANK (980905)
526 */
527
528 inet = inet_sk(sk);
529 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_err = err;
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
534 }
535
536out:
537 bh_unlock_sock(sk);
538 sock_put(sk);
539}
540
28850dc7 541void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 542{
aa8223c7 543 struct tcphdr *th = tcp_hdr(skb);
1da177e4 544
84fa7933 545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 547 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 548 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 549 } else {
419f9f89 550 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 551 csum_partial(th,
1da177e4
LT
552 th->doff << 2,
553 skb->csum));
554 }
555}
556
419f9f89 557/* This routine computes an IPv4 TCP checksum. */
bb296246 558void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 559{
cf533ea5 560 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
561
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563}
4bc2f18b 564EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 565
1da177e4
LT
566/*
567 * This routine will send an RST to the other tcp.
568 *
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * for reset.
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
577 */
578
a00e7444 579static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 580{
cf533ea5 581 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
582 struct {
583 struct tcphdr th;
584#ifdef CONFIG_TCP_MD5SIG
714e85be 585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
586#endif
587 } rep;
1da177e4 588 struct ip_reply_arg arg;
cfb6eeb4
YH
589#ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
658ddaaf
SL
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
593 int genhash;
594 struct sock *sk1 = NULL;
cfb6eeb4 595#endif
a86b1e30 596 struct net *net;
1da177e4
LT
597
598 /* Never send a reset in response to a reset. */
599 if (th->rst)
600 return;
601
c3658e8d
ED
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
604 */
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
606 return;
607
608 /* Swap the send and the receive. */
cfb6eeb4
YH
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
613 rep.th.rst = 1;
1da177e4
LT
614
615 if (th->ack) {
cfb6eeb4 616 rep.th.seq = th->ack_seq;
1da177e4 617 } else {
cfb6eeb4
YH
618 rep.th.ack = 1;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
1da177e4
LT
621 }
622
7174259e 623 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
626
0f85feae 627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 628#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
631 /*
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
637 */
0f85feae 638 sk1 = __inet_lookup_listener(net,
da5e3630
TH
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
643 if (!sk1)
644 return;
645 rcu_read_lock();
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
648 if (!key)
649 goto release_sk1;
650
39f8e58e 651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf
SL
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 }
659
cfb6eeb4
YH
660 if (key) {
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_NOP << 16) |
663 (TCPOPT_MD5SIG << 8) |
664 TCPOLEN_MD5SIG);
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
668
49a72dfb 669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
672 }
673#endif
eddc9ec5
ACM
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
52cd5750 676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
e2446eaa 679 /* When socket is gone, all binding information is lost.
4c675258
AK
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
e2446eaa 682 */
4c675258
AK
683 if (sk)
684 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 685
66b13d99 686 arg.tos = ip_hdr(skb)->tos;
bdbbb852
ED
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
1da177e4 691
63231bdd
PE
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
694
695#ifdef CONFIG_TCP_MD5SIG
696release_sk1:
697 if (sk1) {
698 rcu_read_unlock();
699 sock_put(sk1);
700 }
701#endif
1da177e4
LT
702}
703
704/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
706 */
707
9501f972 708static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 709 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 710 struct tcp_md5sig_key *key,
66b13d99 711 int reply_flags, u8 tos)
1da177e4 712{
cf533ea5 713 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
714 struct {
715 struct tcphdr th;
714e85be 716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 717#ifdef CONFIG_TCP_MD5SIG
714e85be 718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
719#endif
720 ];
1da177e4
LT
721 } rep;
722 struct ip_reply_arg arg;
adf30907 723 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
724
725 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 726 memset(&arg, 0, sizeof(arg));
1da177e4
LT
727
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 730 if (tsecr) {
cfb6eeb4
YH
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
733 TCPOLEN_TIMESTAMP);
ee684b6f
AV
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
cb48cfe8 736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
737 }
738
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
745 rep.th.ack = 1;
746 rep.th.window = htons(win);
747
cfb6eeb4 748#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 749 if (key) {
ee684b6f 750 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
751
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 (TCPOPT_NOP << 16) |
754 (TCPOPT_MD5SIG << 8) |
755 TCPOLEN_MD5SIG);
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
758
49a72dfb 759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
762 }
763#endif
88ef4a5a 764 arg.flags = reply_flags;
eddc9ec5
ACM
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
769 if (oif)
770 arg.bound_dev_if = oif;
66b13d99 771 arg.tos = tos;
bdbbb852
ED
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
1da177e4 776
63231bdd 777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
778}
779
780static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781{
8feaf0c0 782 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 784
9501f972 785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 787 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
788 tcptw->tw_ts_recent,
789 tw->tw_bound_dev_if,
88ef4a5a 790 tcp_twsk_md5_key(tcptw),
66b13d99
ED
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 tw->tw_tos
9501f972 793 );
1da177e4 794
8feaf0c0 795 inet_twsk_put(tw);
1da177e4
LT
796}
797
a00e7444 798static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
7174259e 799 struct request_sock *req)
1da177e4 800{
168a8f58
JC
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 */
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
ed53d0ab 806 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
ee684b6f 807 tcp_time_stamp,
9501f972
YH
808 req->ts_recent,
809 0,
a915da9b
ED
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 AF_INET),
66b13d99
ED
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 ip_hdr(skb)->tos);
1da177e4
LT
814}
815
1da177e4 816/*
9bf1d83e 817 * Send a SYN-ACK after having received a SYN.
60236fdd 818 * This still operates on a request_sock only, not on a big
1da177e4
LT
819 * socket.
820 */
0f935dbe 821static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 822 struct flowi *fl,
72659ecc 823 struct request_sock *req,
ca6fb065
ED
824 struct tcp_fastopen_cookie *foc,
825 bool attach_req)
1da177e4 826{
2e6599cb 827 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 828 struct flowi4 fl4;
1da177e4 829 int err = -1;
d41db5af 830 struct sk_buff *skb;
1da177e4
LT
831
832 /* First, grab a route. */
ba3f7f04 833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 834 return -1;
1da177e4 835
ca6fb065 836 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
1da177e4
LT
837
838 if (skb) {
634fb979 839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 840
634fb979
ED
841 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842 ireq->ir_rmt_addr,
2e6599cb 843 ireq->opt);
b9df3cb8 844 err = net_xmit_eval(err);
1da177e4
LT
845 }
846
1da177e4
LT
847 return err;
848}
849
850/*
60236fdd 851 * IPv4 request_sock destructor.
1da177e4 852 */
60236fdd 853static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 854{
a51482bd 855 kfree(inet_rsk(req)->opt);
1da177e4
LT
856}
857
1da177e4 858
cfb6eeb4
YH
859#ifdef CONFIG_TCP_MD5SIG
860/*
861 * RFC2385 MD5 checksumming requires a mapping of
862 * IP address->MD5 Key.
863 * We need to maintain these in the sk structure.
864 */
865
866/* Find the Key structure for an address. */
b83e3deb 867struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
868 const union tcp_md5_addr *addr,
869 int family)
cfb6eeb4 870{
fd3a154a 871 const struct tcp_sock *tp = tcp_sk(sk);
a915da9b 872 struct tcp_md5sig_key *key;
a915da9b 873 unsigned int size = sizeof(struct in_addr);
fd3a154a 874 const struct tcp_md5sig_info *md5sig;
cfb6eeb4 875
a8afca03
ED
876 /* caller either holds rcu_read_lock() or socket lock */
877 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea 878 sock_owned_by_user(sk) ||
b83e3deb 879 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
a8afca03 880 if (!md5sig)
cfb6eeb4 881 return NULL;
a915da9b
ED
882#if IS_ENABLED(CONFIG_IPV6)
883 if (family == AF_INET6)
884 size = sizeof(struct in6_addr);
885#endif
b67bfe0d 886 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
887 if (key->family != family)
888 continue;
889 if (!memcmp(&key->addr, addr, size))
890 return key;
cfb6eeb4
YH
891 }
892 return NULL;
893}
a915da9b 894EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4 895
b83e3deb 896struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 897 const struct sock *addr_sk)
cfb6eeb4 898{
b52e6921 899 const union tcp_md5_addr *addr;
a915da9b 900
b52e6921 901 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
a915da9b 902 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 903}
cfb6eeb4
YH
904EXPORT_SYMBOL(tcp_v4_md5_lookup);
905
cfb6eeb4 906/* This can be called on a newly created socket, from other files */
a915da9b
ED
907int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
908 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
909{
910 /* Add Key to the list */
b0a713e9 911 struct tcp_md5sig_key *key;
cfb6eeb4 912 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 913 struct tcp_md5sig_info *md5sig;
cfb6eeb4 914
c0353c7b 915 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
916 if (key) {
917 /* Pre-existing entry - just update that one. */
a915da9b 918 memcpy(key->key, newkey, newkeylen);
b0a713e9 919 key->keylen = newkeylen;
a915da9b
ED
920 return 0;
921 }
260fcbeb 922
a8afca03
ED
923 md5sig = rcu_dereference_protected(tp->md5sig_info,
924 sock_owned_by_user(sk));
a915da9b
ED
925 if (!md5sig) {
926 md5sig = kmalloc(sizeof(*md5sig), gfp);
927 if (!md5sig)
cfb6eeb4 928 return -ENOMEM;
cfb6eeb4 929
a915da9b
ED
930 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
931 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 932 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 933 }
cfb6eeb4 934
5f3d9cb2 935 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
936 if (!key)
937 return -ENOMEM;
71cea17e 938 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 939 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 940 return -ENOMEM;
cfb6eeb4 941 }
a915da9b
ED
942
943 memcpy(key->key, newkey, newkeylen);
944 key->keylen = newkeylen;
945 key->family = family;
946 memcpy(&key->addr, addr,
947 (family == AF_INET6) ? sizeof(struct in6_addr) :
948 sizeof(struct in_addr));
949 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
950 return 0;
951}
a915da9b 952EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 953
a915da9b 954int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 955{
a915da9b
ED
956 struct tcp_md5sig_key *key;
957
c0353c7b 958 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
959 if (!key)
960 return -ENOENT;
961 hlist_del_rcu(&key->node);
5f3d9cb2 962 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 963 kfree_rcu(key, rcu);
a915da9b 964 return 0;
cfb6eeb4 965}
a915da9b 966EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 967
e0683e70 968static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
969{
970 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 971 struct tcp_md5sig_key *key;
b67bfe0d 972 struct hlist_node *n;
a8afca03 973 struct tcp_md5sig_info *md5sig;
cfb6eeb4 974
a8afca03
ED
975 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
976
b67bfe0d 977 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 978 hlist_del_rcu(&key->node);
5f3d9cb2 979 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 980 kfree_rcu(key, rcu);
cfb6eeb4
YH
981 }
982}
983
7174259e
ACM
984static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
985 int optlen)
cfb6eeb4
YH
986{
987 struct tcp_md5sig cmd;
988 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
989
990 if (optlen < sizeof(cmd))
991 return -EINVAL;
992
7174259e 993 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
994 return -EFAULT;
995
996 if (sin->sin_family != AF_INET)
997 return -EINVAL;
998
64a124ed 999 if (!cmd.tcpm_keylen)
a915da9b
ED
1000 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1001 AF_INET);
cfb6eeb4
YH
1002
1003 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1004 return -EINVAL;
1005
a915da9b
ED
1006 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1008 GFP_KERNEL);
cfb6eeb4
YH
1009}
1010
49a72dfb
AL
1011static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1012 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1013{
cfb6eeb4 1014 struct tcp4_pseudohdr *bp;
49a72dfb 1015 struct scatterlist sg;
cfb6eeb4
YH
1016
1017 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1018
1019 /*
49a72dfb 1020 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1021 * destination IP address, zero-padded protocol number, and
1022 * segment length)
1023 */
1024 bp->saddr = saddr;
1025 bp->daddr = daddr;
1026 bp->pad = 0;
076fb722 1027 bp->protocol = IPPROTO_TCP;
49a72dfb 1028 bp->len = cpu_to_be16(nbytes);
c7da57a1 1029
49a72dfb
AL
1030 sg_init_one(&sg, bp, sizeof(*bp));
1031 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1032}
1033
a915da9b 1034static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1035 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1036{
1037 struct tcp_md5sig_pool *hp;
1038 struct hash_desc *desc;
1039
1040 hp = tcp_get_md5sig_pool();
1041 if (!hp)
1042 goto clear_hash_noput;
1043 desc = &hp->md5_desc;
1044
1045 if (crypto_hash_init(desc))
1046 goto clear_hash;
1047 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1048 goto clear_hash;
1049 if (tcp_md5_hash_header(hp, th))
1050 goto clear_hash;
1051 if (tcp_md5_hash_key(hp, key))
1052 goto clear_hash;
1053 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1054 goto clear_hash;
1055
cfb6eeb4 1056 tcp_put_md5sig_pool();
cfb6eeb4 1057 return 0;
49a72dfb 1058
cfb6eeb4
YH
1059clear_hash:
1060 tcp_put_md5sig_pool();
1061clear_hash_noput:
1062 memset(md5_hash, 0, 16);
49a72dfb 1063 return 1;
cfb6eeb4
YH
1064}
1065
39f8e58e
ED
1066int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1067 const struct sock *sk,
318cf7aa 1068 const struct sk_buff *skb)
cfb6eeb4 1069{
49a72dfb
AL
1070 struct tcp_md5sig_pool *hp;
1071 struct hash_desc *desc;
318cf7aa 1072 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1073 __be32 saddr, daddr;
1074
39f8e58e
ED
1075 if (sk) { /* valid for establish/request sockets */
1076 saddr = sk->sk_rcv_saddr;
1077 daddr = sk->sk_daddr;
cfb6eeb4 1078 } else {
49a72dfb
AL
1079 const struct iphdr *iph = ip_hdr(skb);
1080 saddr = iph->saddr;
1081 daddr = iph->daddr;
cfb6eeb4 1082 }
49a72dfb
AL
1083
1084 hp = tcp_get_md5sig_pool();
1085 if (!hp)
1086 goto clear_hash_noput;
1087 desc = &hp->md5_desc;
1088
1089 if (crypto_hash_init(desc))
1090 goto clear_hash;
1091
1092 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1093 goto clear_hash;
1094 if (tcp_md5_hash_header(hp, th))
1095 goto clear_hash;
1096 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1097 goto clear_hash;
1098 if (tcp_md5_hash_key(hp, key))
1099 goto clear_hash;
1100 if (crypto_hash_final(desc, md5_hash))
1101 goto clear_hash;
1102
1103 tcp_put_md5sig_pool();
1104 return 0;
1105
1106clear_hash:
1107 tcp_put_md5sig_pool();
1108clear_hash_noput:
1109 memset(md5_hash, 0, 16);
1110 return 1;
cfb6eeb4 1111}
49a72dfb 1112EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1113
ba8e275a
ED
1114#endif
1115
ff74e23f 1116/* Called with rcu_read_lock() */
ba8e275a 1117static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
ff74e23f 1118 const struct sk_buff *skb)
cfb6eeb4 1119{
ba8e275a 1120#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1121 /*
1122 * This gets called for each TCP segment that arrives
1123 * so we want to be efficient.
1124 * We have 3 drop cases:
1125 * o No MD5 hash and one expected.
1126 * o MD5 hash and we're not expecting one.
1127 * o MD5 hash and its wrong.
1128 */
cf533ea5 1129 const __u8 *hash_location = NULL;
cfb6eeb4 1130 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1131 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1132 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1133 int genhash;
cfb6eeb4
YH
1134 unsigned char newhash[16];
1135
a915da9b
ED
1136 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1137 AF_INET);
7d5d5525 1138 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1139
cfb6eeb4
YH
1140 /* We've parsed the options - do we have a hash? */
1141 if (!hash_expected && !hash_location)
a2a385d6 1142 return false;
cfb6eeb4
YH
1143
1144 if (hash_expected && !hash_location) {
785957d3 1145 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1146 return true;
cfb6eeb4
YH
1147 }
1148
1149 if (!hash_expected && hash_location) {
785957d3 1150 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1151 return true;
cfb6eeb4
YH
1152 }
1153
1154 /* Okay, so this is hash_expected and hash_location -
1155 * so we need to calculate the checksum.
1156 */
49a72dfb
AL
1157 genhash = tcp_v4_md5_hash_skb(newhash,
1158 hash_expected,
39f8e58e 1159 NULL, skb);
cfb6eeb4
YH
1160
1161 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1162 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1163 &iph->saddr, ntohs(th->source),
1164 &iph->daddr, ntohs(th->dest),
1165 genhash ? " tcp_v4_calc_md5_hash failed"
1166 : "");
a2a385d6 1167 return true;
cfb6eeb4 1168 }
a2a385d6 1169 return false;
cfb6eeb4 1170#endif
ba8e275a
ED
1171 return false;
1172}
cfb6eeb4 1173
b40cf18e
ED
1174static void tcp_v4_init_req(struct request_sock *req,
1175 const struct sock *sk_listener,
16bea70a
OP
1176 struct sk_buff *skb)
1177{
1178 struct inet_request_sock *ireq = inet_rsk(req);
1179
08d2cc3b
ED
1180 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1181 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1182 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
16bea70a
OP
1183 ireq->opt = tcp_v4_save_options(skb);
1184}
1185
f964629e
ED
1186static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1187 struct flowi *fl,
d94e0417
OP
1188 const struct request_sock *req,
1189 bool *strict)
1190{
1191 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1192
1193 if (strict) {
1194 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1195 *strict = true;
1196 else
1197 *strict = false;
1198 }
1199
1200 return dst;
1201}
1202
72a3effa 1203struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1204 .family = PF_INET,
2e6599cb 1205 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1206 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1207 .send_ack = tcp_v4_reqsk_send_ack,
1208 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1209 .send_reset = tcp_v4_send_reset,
688d1945 1210 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1211};
1212
b2e4b3de 1213static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1214 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1215#ifdef CONFIG_TCP_MD5SIG
fd3a154a 1216 .req_md5_lookup = tcp_v4_md5_lookup,
e3afe7b7 1217 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1218#endif
16bea70a 1219 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1220#ifdef CONFIG_SYN_COOKIES
1221 .cookie_init_seq = cookie_v4_init_sequence,
1222#endif
d94e0417 1223 .route_req = tcp_v4_route_req,
936b8bdb 1224 .init_seq = tcp_v4_init_sequence,
d6274bd8 1225 .send_synack = tcp_v4_send_synack,
16bea70a 1226};
cfb6eeb4 1227
1da177e4
LT
1228int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1229{
1da177e4 1230 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1231 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1232 goto drop;
1233
1fb6f159
OP
1234 return tcp_conn_request(&tcp_request_sock_ops,
1235 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1236
1da177e4 1237drop:
848bf15f 1238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1239 return 0;
1240}
4bc2f18b 1241EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1242
1243
1244/*
1245 * The three way handshake has completed - we got a valid synack -
1246 * now create the new socket.
1247 */
0c27171e 1248struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
60236fdd 1249 struct request_sock *req,
1da177e4
LT
1250 struct dst_entry *dst)
1251{
2e6599cb 1252 struct inet_request_sock *ireq;
1da177e4
LT
1253 struct inet_sock *newinet;
1254 struct tcp_sock *newtp;
1255 struct sock *newsk;
cfb6eeb4
YH
1256#ifdef CONFIG_TCP_MD5SIG
1257 struct tcp_md5sig_key *key;
1258#endif
f6d8bd05 1259 struct ip_options_rcu *inet_opt;
1da177e4
LT
1260
1261 if (sk_acceptq_is_full(sk))
1262 goto exit_overflow;
1263
1da177e4
LT
1264 newsk = tcp_create_openreq_child(sk, req, skb);
1265 if (!newsk)
093d2823 1266 goto exit_nonewsk;
1da177e4 1267
bcd76111 1268 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1269 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1270
1271 newtp = tcp_sk(newsk);
1272 newinet = inet_sk(newsk);
2e6599cb 1273 ireq = inet_rsk(req);
d1e559d0
ED
1274 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1275 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
634fb979 1276 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1277 inet_opt = ireq->opt;
1278 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1279 ireq->opt = NULL;
463c84b9 1280 newinet->mc_index = inet_iif(skb);
eddc9ec5 1281 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1282 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1283 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1284 if (inet_opt)
1285 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1286 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1287
dfd25fff
ED
1288 if (!dst) {
1289 dst = inet_csk_route_child_sock(sk, newsk, req);
1290 if (!dst)
1291 goto put_and_exit;
1292 } else {
1293 /* syncookie case : see end of cookie_v4_check() */
1294 }
0e734419
DM
1295 sk_setup_caps(newsk, dst);
1296
81164413
DB
1297 tcp_ca_openreq_child(newsk, dst);
1298
1da177e4 1299 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1300 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1301 if (tcp_sk(sk)->rx_opt.user_mss &&
1302 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1303 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1304
1da177e4
LT
1305 tcp_initialize_rcv_mss(newsk);
1306
cfb6eeb4
YH
1307#ifdef CONFIG_TCP_MD5SIG
1308 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1309 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1310 AF_INET);
00db4124 1311 if (key) {
cfb6eeb4
YH
1312 /*
1313 * We're using one, so create a matching key
1314 * on the newsk structure. If we fail to get
1315 * memory, then we end up not copying the key
1316 * across. Shucks.
1317 */
a915da9b
ED
1318 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1319 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1320 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1321 }
1322#endif
1323
0e734419
DM
1324 if (__inet_inherit_port(sk, newsk) < 0)
1325 goto put_and_exit;
9327f705 1326 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1327
1328 return newsk;
1329
1330exit_overflow:
de0744af 1331 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1332exit_nonewsk:
1333 dst_release(dst);
1da177e4 1334exit:
de0744af 1335 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1336 return NULL;
0e734419 1337put_and_exit:
e337e24d
CP
1338 inet_csk_prepare_forced_close(newsk);
1339 tcp_done(newsk);
0e734419 1340 goto exit;
1da177e4 1341}
4bc2f18b 1342EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4 1343
079096f1 1344static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1345{
079096f1 1346#ifdef CONFIG_SYN_COOKIES
52452c54 1347 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1348
af9b4738 1349 if (!th->syn)
461b74c3 1350 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1351#endif
1352 return sk;
1353}
1354
1da177e4 1355/* The socket must have it's spinlock held when we get
e994b2f0 1356 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1357 *
1358 * We have a potential double-lock case here, so even when
1359 * doing backlog processing we use the BH locking scheme.
1360 * This is because we cannot sleep with the original spinlock
1361 * held.
1362 */
1363int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1364{
cfb6eeb4 1365 struct sock *rsk;
cfb6eeb4 1366
1da177e4 1367 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1368 struct dst_entry *dst = sk->sk_rx_dst;
1369
bdeab991 1370 sock_rps_save_rxhash(sk, skb);
3d97379a 1371 sk_mark_napi_id(sk, skb);
404e0a8b 1372 if (dst) {
505fbcf0 1373 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
51456b29 1374 !dst->ops->check(dst, 0)) {
92101b3b
DM
1375 dst_release(dst);
1376 sk->sk_rx_dst = NULL;
1377 }
1378 }
c995ae22 1379 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1380 return 0;
1381 }
1382
12e25e10 1383 if (tcp_checksum_complete(skb))
1da177e4
LT
1384 goto csum_err;
1385
1386 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1387 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1388
1da177e4
LT
1389 if (!nsk)
1390 goto discard;
1da177e4 1391 if (nsk != sk) {
bdeab991 1392 sock_rps_save_rxhash(nsk, skb);
38cb5245 1393 sk_mark_napi_id(nsk, skb);
cfb6eeb4
YH
1394 if (tcp_child_process(sk, nsk, skb)) {
1395 rsk = nsk;
1da177e4 1396 goto reset;
cfb6eeb4 1397 }
1da177e4
LT
1398 return 0;
1399 }
ca55158c 1400 } else
bdeab991 1401 sock_rps_save_rxhash(sk, skb);
ca55158c 1402
72ab4a86 1403 if (tcp_rcv_state_process(sk, skb)) {
cfb6eeb4 1404 rsk = sk;
1da177e4 1405 goto reset;
cfb6eeb4 1406 }
1da177e4
LT
1407 return 0;
1408
1409reset:
cfb6eeb4 1410 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1411discard:
1412 kfree_skb(skb);
1413 /* Be careful here. If this function gets more complicated and
1414 * gcc suffers from register pressure on the x86, sk (in %ebx)
1415 * might be destroyed here. This current version compiles correctly,
1416 * but you have been warned.
1417 */
1418 return 0;
1419
1420csum_err:
6a5dc9e5 1421 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
63231bdd 1422 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1423 goto discard;
1424}
4bc2f18b 1425EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1426
160eb5a6 1427void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1428{
41063e9d
DM
1429 const struct iphdr *iph;
1430 const struct tcphdr *th;
1431 struct sock *sk;
41063e9d 1432
41063e9d 1433 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1434 return;
41063e9d 1435
45f00f99 1436 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1437 return;
41063e9d
DM
1438
1439 iph = ip_hdr(skb);
45f00f99 1440 th = tcp_hdr(skb);
41063e9d
DM
1441
1442 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1443 return;
41063e9d 1444
45f00f99 1445 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1446 iph->saddr, th->source,
7011d085 1447 iph->daddr, ntohs(th->dest),
9cb429d6 1448 skb->skb_iif);
41063e9d
DM
1449 if (sk) {
1450 skb->sk = sk;
1451 skb->destructor = sock_edemux;
f7e4eb03 1452 if (sk_fullsock(sk)) {
d0c294c5 1453 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
505fbcf0 1454
41063e9d
DM
1455 if (dst)
1456 dst = dst_check(dst, 0);
92101b3b 1457 if (dst &&
505fbcf0 1458 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1459 skb_dst_set_noref(skb, dst);
41063e9d
DM
1460 }
1461 }
41063e9d
DM
1462}
1463
b2fb4f54
ED
1464/* Packet is added to VJ-style prequeue for processing in process
1465 * context, if a reader task is waiting. Apparently, this exciting
1466 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1467 * failed somewhere. Latency? Burstiness? Well, at least now we will
1468 * see, why it failed. 8)8) --ANK
1469 *
1470 */
1471bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1472{
1473 struct tcp_sock *tp = tcp_sk(sk);
1474
1475 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1476 return false;
1477
1478 if (skb->len <= tcp_hdrlen(skb) &&
1479 skb_queue_len(&tp->ucopy.prequeue) == 0)
1480 return false;
1481
ca777eff
ED
1482 /* Before escaping RCU protected region, we need to take care of skb
1483 * dst. Prequeue is only enabled for established sockets.
1484 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1485 * Instead of doing full sk_rx_dst validity here, let's perform
1486 * an optimistic check.
1487 */
1488 if (likely(sk->sk_rx_dst))
1489 skb_dst_drop(skb);
1490 else
1491 skb_dst_force(skb);
1492
b2fb4f54
ED
1493 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1494 tp->ucopy.memory += skb->truesize;
1495 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1496 struct sk_buff *skb1;
1497
1498 BUG_ON(sock_owned_by_user(sk));
1499
1500 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1501 sk_backlog_rcv(sk, skb1);
1502 NET_INC_STATS_BH(sock_net(sk),
1503 LINUX_MIB_TCPPREQUEUEDROPPED);
1504 }
1505
1506 tp->ucopy.memory = 0;
1507 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1508 wake_up_interruptible_sync_poll(sk_sleep(sk),
1509 POLLIN | POLLRDNORM | POLLRDBAND);
1510 if (!inet_csk_ack_scheduled(sk))
1511 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1512 (3 * tcp_rto_min(sk)) / 4,
1513 TCP_RTO_MAX);
1514 }
1515 return true;
1516}
1517EXPORT_SYMBOL(tcp_prequeue);
1518
1da177e4
LT
1519/*
1520 * From tcp_input.c
1521 */
1522
1523int tcp_v4_rcv(struct sk_buff *skb)
1524{
eddc9ec5 1525 const struct iphdr *iph;
cf533ea5 1526 const struct tcphdr *th;
1da177e4
LT
1527 struct sock *sk;
1528 int ret;
a86b1e30 1529 struct net *net = dev_net(skb->dev);
1da177e4
LT
1530
1531 if (skb->pkt_type != PACKET_HOST)
1532 goto discard_it;
1533
1534 /* Count it even if it's bad */
63231bdd 1535 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1536
1537 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1538 goto discard_it;
1539
aa8223c7 1540 th = tcp_hdr(skb);
1da177e4
LT
1541
1542 if (th->doff < sizeof(struct tcphdr) / 4)
1543 goto bad_packet;
1544 if (!pskb_may_pull(skb, th->doff * 4))
1545 goto discard_it;
1546
1547 /* An explanation is required here, I think.
1548 * Packet length and doff are validated by header prediction,
caa20d9a 1549 * provided case of th->doff==0 is eliminated.
1da177e4 1550 * So, we defer the checks. */
ed70fcfc
TH
1551
1552 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1553 goto csum_error;
1da177e4 1554
aa8223c7 1555 th = tcp_hdr(skb);
eddc9ec5 1556 iph = ip_hdr(skb);
971f10ec
ED
1557 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1558 * barrier() makes sure compiler wont play fool^Waliasing games.
1559 */
1560 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1561 sizeof(struct inet_skb_parm));
1562 barrier();
1563
1da177e4
LT
1564 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1565 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1566 skb->len - th->doff * 4);
1567 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1568 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1569 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1570 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1571 TCP_SKB_CB(skb)->sacked = 0;
1572
4bdc3d66 1573lookup:
9a1f27c4 1574 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1575 if (!sk)
1576 goto no_tcp_socket;
1577
bb134d5d
ED
1578process:
1579 if (sk->sk_state == TCP_TIME_WAIT)
1580 goto do_time_wait;
1581
079096f1
ED
1582 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1583 struct request_sock *req = inet_reqsk(sk);
1584 struct sock *nsk = NULL;
1585
1586 sk = req->rsk_listener;
1587 if (tcp_v4_inbound_md5_hash(sk, skb))
1588 goto discard_and_relse;
4bdc3d66 1589 if (likely(sk->sk_state == TCP_LISTEN)) {
079096f1 1590 nsk = tcp_check_req(sk, skb, req, false);
4bdc3d66 1591 } else {
f03f2e15 1592 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1593 goto lookup;
1594 }
079096f1
ED
1595 if (!nsk) {
1596 reqsk_put(req);
1597 goto discard_it;
1598 }
1599 if (nsk == sk) {
1600 sock_hold(sk);
1601 reqsk_put(req);
1602 } else if (tcp_child_process(sk, nsk, skb)) {
1603 tcp_v4_send_reset(nsk, skb);
1604 goto discard_it;
1605 } else {
1606 return 0;
1607 }
1608 }
6cce09f8
ED
1609 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1610 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1611 goto discard_and_relse;
6cce09f8 1612 }
d218d111 1613
1da177e4
LT
1614 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1615 goto discard_and_relse;
9ea88a15 1616
9ea88a15
DP
1617 if (tcp_v4_inbound_md5_hash(sk, skb))
1618 goto discard_and_relse;
9ea88a15 1619
b59c2701 1620 nf_reset(skb);
1da177e4 1621
fda9ef5d 1622 if (sk_filter(sk, skb))
1da177e4
LT
1623 goto discard_and_relse;
1624
1625 skb->dev = NULL;
1626
e994b2f0
ED
1627 if (sk->sk_state == TCP_LISTEN) {
1628 ret = tcp_v4_do_rcv(sk, skb);
1629 goto put_and_return;
1630 }
1631
1632 sk_incoming_cpu_update(sk);
1633
c6366184 1634 bh_lock_sock_nested(sk);
2efd055c 1635 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1da177e4
LT
1636 ret = 0;
1637 if (!sock_owned_by_user(sk)) {
7bced397 1638 if (!tcp_prequeue(sk, skb))
1da177e4 1639 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1640 } else if (unlikely(sk_add_backlog(sk, skb,
1641 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1642 bh_unlock_sock(sk);
6cce09f8 1643 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1644 goto discard_and_relse;
1645 }
1da177e4
LT
1646 bh_unlock_sock(sk);
1647
e994b2f0 1648put_and_return:
1da177e4
LT
1649 sock_put(sk);
1650
1651 return ret;
1652
1653no_tcp_socket:
1654 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1655 goto discard_it;
1656
12e25e10 1657 if (tcp_checksum_complete(skb)) {
6a5dc9e5
ED
1658csum_error:
1659 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1da177e4 1660bad_packet:
63231bdd 1661 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1662 } else {
cfb6eeb4 1663 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1664 }
1665
1666discard_it:
1667 /* Discard frame. */
1668 kfree_skb(skb);
e905a9ed 1669 return 0;
1da177e4
LT
1670
1671discard_and_relse:
1672 sock_put(sk);
1673 goto discard_it;
1674
1675do_time_wait:
1676 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1677 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1678 goto discard_it;
1679 }
1680
6a5dc9e5
ED
1681 if (tcp_checksum_complete(skb)) {
1682 inet_twsk_put(inet_twsk(sk));
1683 goto csum_error;
1da177e4 1684 }
9469c7b4 1685 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1686 case TCP_TW_SYN: {
c346dca1 1687 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1688 &tcp_hashinfo,
da5e3630 1689 iph->saddr, th->source,
eddc9ec5 1690 iph->daddr, th->dest,
463c84b9 1691 inet_iif(skb));
1da177e4 1692 if (sk2) {
dbe7faa4 1693 inet_twsk_deschedule_put(inet_twsk(sk));
1da177e4
LT
1694 sk = sk2;
1695 goto process;
1696 }
1697 /* Fall through to ACK */
1698 }
1699 case TCP_TW_ACK:
1700 tcp_v4_timewait_ack(sk, skb);
1701 break;
1702 case TCP_TW_RST:
1703 goto no_tcp_socket;
1704 case TCP_TW_SUCCESS:;
1705 }
1706 goto discard_it;
1707}
1708
ccb7c410
DM
1709static struct timewait_sock_ops tcp_timewait_sock_ops = {
1710 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1711 .twsk_unique = tcp_twsk_unique,
1712 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1713};
1da177e4 1714
63d02d15 1715void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1716{
1717 struct dst_entry *dst = skb_dst(skb);
1718
ca777eff
ED
1719 if (dst) {
1720 dst_hold(dst);
1721 sk->sk_rx_dst = dst;
1722 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1723 }
5d299f3d 1724}
63d02d15 1725EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1726
3b401a81 1727const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1728 .queue_xmit = ip_queue_xmit,
1729 .send_check = tcp_v4_send_check,
1730 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1731 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1732 .conn_request = tcp_v4_conn_request,
1733 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1734 .net_header_len = sizeof(struct iphdr),
1735 .setsockopt = ip_setsockopt,
1736 .getsockopt = ip_getsockopt,
1737 .addr2sockaddr = inet_csk_addr2sockaddr,
1738 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1739 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1740#ifdef CONFIG_COMPAT
543d9cfe
ACM
1741 .compat_setsockopt = compat_ip_setsockopt,
1742 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1743#endif
4fab9071 1744 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1745};
4bc2f18b 1746EXPORT_SYMBOL(ipv4_specific);
1da177e4 1747
cfb6eeb4 1748#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1749static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1750 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1751 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1752 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1753};
b6332e6c 1754#endif
cfb6eeb4 1755
1da177e4
LT
1756/* NOTE: A lot of things set to zero explicitly by call to
1757 * sk_alloc() so need not be done here.
1758 */
1759static int tcp_v4_init_sock(struct sock *sk)
1760{
6687e988 1761 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1762
900f65d3 1763 tcp_init_sock(sk);
1da177e4 1764
8292a17a 1765 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1766
cfb6eeb4 1767#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1768 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1769#endif
1da177e4 1770
1da177e4
LT
1771 return 0;
1772}
1773
7d06b2e0 1774void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1775{
1776 struct tcp_sock *tp = tcp_sk(sk);
1777
1778 tcp_clear_xmit_timers(sk);
1779
6687e988 1780 tcp_cleanup_congestion_control(sk);
317a76f9 1781
1da177e4 1782 /* Cleanup up the write buffer. */
fe067e8a 1783 tcp_write_queue_purge(sk);
1da177e4
LT
1784
1785 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1786 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1787
cfb6eeb4
YH
1788#ifdef CONFIG_TCP_MD5SIG
1789 /* Clean up the MD5 key list, if any */
1790 if (tp->md5sig_info) {
a915da9b 1791 tcp_clear_md5_list(sk);
a8afca03 1792 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1793 tp->md5sig_info = NULL;
1794 }
1795#endif
1a2449a8 1796
1da177e4
LT
1797 /* Clean prequeue, it must be empty really */
1798 __skb_queue_purge(&tp->ucopy.prequeue);
1799
1800 /* Clean up a referenced TCP bind bucket. */
463c84b9 1801 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1802 inet_put_port(sk);
1da177e4 1803
00db4124 1804 BUG_ON(tp->fastopen_rsk);
435cf559 1805
cf60af03
YC
1806 /* If socket is aborted during connect operation */
1807 tcp_free_fastopen_req(tp);
cd8ae852 1808 tcp_saved_syn_free(tp);
cf60af03 1809
180d8cd9 1810 sk_sockets_allocated_dec(sk);
d1a4c0b3 1811 sock_release_memcg(sk);
1da177e4 1812}
1da177e4
LT
1813EXPORT_SYMBOL(tcp_v4_destroy_sock);
1814
1815#ifdef CONFIG_PROC_FS
1816/* Proc filesystem TCP sock list dumping. */
1817
a8b690f9
TH
1818/*
1819 * Get next listener socket follow cur. If cur is NULL, get first socket
1820 * starting from bucket given in st->bucket; when st->bucket is zero the
1821 * very first socket in the hash table is returned.
1822 */
1da177e4
LT
1823static void *listening_get_next(struct seq_file *seq, void *cur)
1824{
463c84b9 1825 struct inet_connection_sock *icsk;
c25eb3bf 1826 struct hlist_nulls_node *node;
1da177e4 1827 struct sock *sk = cur;
5caea4ea 1828 struct inet_listen_hashbucket *ilb;
5799de0b 1829 struct tcp_iter_state *st = seq->private;
a4146b1b 1830 struct net *net = seq_file_net(seq);
1da177e4
LT
1831
1832 if (!sk) {
a8b690f9 1833 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1834 spin_lock_bh(&ilb->lock);
c25eb3bf 1835 sk = sk_nulls_head(&ilb->head);
a8b690f9 1836 st->offset = 0;
1da177e4
LT
1837 goto get_sk;
1838 }
5caea4ea 1839 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1840 ++st->num;
a8b690f9 1841 ++st->offset;
1da177e4 1842
079096f1 1843 sk = sk_nulls_next(sk);
1da177e4 1844get_sk:
c25eb3bf 1845 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
1846 if (!net_eq(sock_net(sk), net))
1847 continue;
1848 if (sk->sk_family == st->family) {
1da177e4
LT
1849 cur = sk;
1850 goto out;
1851 }
e905a9ed 1852 icsk = inet_csk(sk);
1da177e4 1853 }
5caea4ea 1854 spin_unlock_bh(&ilb->lock);
a8b690f9 1855 st->offset = 0;
0f7ff927 1856 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
1857 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1858 spin_lock_bh(&ilb->lock);
c25eb3bf 1859 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1860 goto get_sk;
1861 }
1862 cur = NULL;
1863out:
1864 return cur;
1865}
1866
1867static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1868{
a8b690f9
TH
1869 struct tcp_iter_state *st = seq->private;
1870 void *rc;
1871
1872 st->bucket = 0;
1873 st->offset = 0;
1874 rc = listening_get_next(seq, NULL);
1da177e4
LT
1875
1876 while (rc && *pos) {
1877 rc = listening_get_next(seq, rc);
1878 --*pos;
1879 }
1880 return rc;
1881}
1882
05dbc7b5 1883static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1884{
05dbc7b5 1885 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1886}
1887
a8b690f9
TH
1888/*
1889 * Get first established socket starting from bucket given in st->bucket.
1890 * If st->bucket is zero, the very first socket in the hash is returned.
1891 */
1da177e4
LT
1892static void *established_get_first(struct seq_file *seq)
1893{
5799de0b 1894 struct tcp_iter_state *st = seq->private;
a4146b1b 1895 struct net *net = seq_file_net(seq);
1da177e4
LT
1896 void *rc = NULL;
1897
a8b690f9
TH
1898 st->offset = 0;
1899 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1900 struct sock *sk;
3ab5aee7 1901 struct hlist_nulls_node *node;
9db66bdc 1902 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1903
6eac5604
AK
1904 /* Lockless fast path for the common case of empty buckets */
1905 if (empty_bucket(st))
1906 continue;
1907
9db66bdc 1908 spin_lock_bh(lock);
3ab5aee7 1909 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1910 if (sk->sk_family != st->family ||
878628fb 1911 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1912 continue;
1913 }
1914 rc = sk;
1915 goto out;
1916 }
9db66bdc 1917 spin_unlock_bh(lock);
1da177e4
LT
1918 }
1919out:
1920 return rc;
1921}
1922
1923static void *established_get_next(struct seq_file *seq, void *cur)
1924{
1925 struct sock *sk = cur;
3ab5aee7 1926 struct hlist_nulls_node *node;
5799de0b 1927 struct tcp_iter_state *st = seq->private;
a4146b1b 1928 struct net *net = seq_file_net(seq);
1da177e4
LT
1929
1930 ++st->num;
a8b690f9 1931 ++st->offset;
1da177e4 1932
05dbc7b5 1933 sk = sk_nulls_next(sk);
1da177e4 1934
3ab5aee7 1935 sk_nulls_for_each_from(sk, node) {
878628fb 1936 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 1937 return sk;
1da177e4
LT
1938 }
1939
05dbc7b5
ED
1940 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1941 ++st->bucket;
1942 return established_get_first(seq);
1da177e4
LT
1943}
1944
1945static void *established_get_idx(struct seq_file *seq, loff_t pos)
1946{
a8b690f9
TH
1947 struct tcp_iter_state *st = seq->private;
1948 void *rc;
1949
1950 st->bucket = 0;
1951 rc = established_get_first(seq);
1da177e4
LT
1952
1953 while (rc && pos) {
1954 rc = established_get_next(seq, rc);
1955 --pos;
7174259e 1956 }
1da177e4
LT
1957 return rc;
1958}
1959
1960static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1961{
1962 void *rc;
5799de0b 1963 struct tcp_iter_state *st = seq->private;
1da177e4 1964
1da177e4
LT
1965 st->state = TCP_SEQ_STATE_LISTENING;
1966 rc = listening_get_idx(seq, &pos);
1967
1968 if (!rc) {
1da177e4
LT
1969 st->state = TCP_SEQ_STATE_ESTABLISHED;
1970 rc = established_get_idx(seq, pos);
1971 }
1972
1973 return rc;
1974}
1975
a8b690f9
TH
1976static void *tcp_seek_last_pos(struct seq_file *seq)
1977{
1978 struct tcp_iter_state *st = seq->private;
1979 int offset = st->offset;
1980 int orig_num = st->num;
1981 void *rc = NULL;
1982
1983 switch (st->state) {
a8b690f9
TH
1984 case TCP_SEQ_STATE_LISTENING:
1985 if (st->bucket >= INET_LHTABLE_SIZE)
1986 break;
1987 st->state = TCP_SEQ_STATE_LISTENING;
1988 rc = listening_get_next(seq, NULL);
1989 while (offset-- && rc)
1990 rc = listening_get_next(seq, rc);
1991 if (rc)
1992 break;
1993 st->bucket = 0;
05dbc7b5 1994 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
1995 /* Fallthrough */
1996 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
1997 if (st->bucket > tcp_hashinfo.ehash_mask)
1998 break;
1999 rc = established_get_first(seq);
2000 while (offset-- && rc)
2001 rc = established_get_next(seq, rc);
2002 }
2003
2004 st->num = orig_num;
2005
2006 return rc;
2007}
2008
1da177e4
LT
2009static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2010{
5799de0b 2011 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2012 void *rc;
2013
2014 if (*pos && *pos == st->last_pos) {
2015 rc = tcp_seek_last_pos(seq);
2016 if (rc)
2017 goto out;
2018 }
2019
1da177e4
LT
2020 st->state = TCP_SEQ_STATE_LISTENING;
2021 st->num = 0;
a8b690f9
TH
2022 st->bucket = 0;
2023 st->offset = 0;
2024 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2025
2026out:
2027 st->last_pos = *pos;
2028 return rc;
1da177e4
LT
2029}
2030
2031static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2032{
a8b690f9 2033 struct tcp_iter_state *st = seq->private;
1da177e4 2034 void *rc = NULL;
1da177e4
LT
2035
2036 if (v == SEQ_START_TOKEN) {
2037 rc = tcp_get_idx(seq, 0);
2038 goto out;
2039 }
1da177e4
LT
2040
2041 switch (st->state) {
1da177e4
LT
2042 case TCP_SEQ_STATE_LISTENING:
2043 rc = listening_get_next(seq, v);
2044 if (!rc) {
1da177e4 2045 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2046 st->bucket = 0;
2047 st->offset = 0;
1da177e4
LT
2048 rc = established_get_first(seq);
2049 }
2050 break;
2051 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2052 rc = established_get_next(seq, v);
2053 break;
2054 }
2055out:
2056 ++*pos;
a8b690f9 2057 st->last_pos = *pos;
1da177e4
LT
2058 return rc;
2059}
2060
2061static void tcp_seq_stop(struct seq_file *seq, void *v)
2062{
5799de0b 2063 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2064
2065 switch (st->state) {
1da177e4
LT
2066 case TCP_SEQ_STATE_LISTENING:
2067 if (v != SEQ_START_TOKEN)
5caea4ea 2068 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2069 break;
1da177e4
LT
2070 case TCP_SEQ_STATE_ESTABLISHED:
2071 if (v)
9db66bdc 2072 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2073 break;
2074 }
2075}
2076
73cb88ec 2077int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2078{
d9dda78b 2079 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2080 struct tcp_iter_state *s;
52d6f3f1 2081 int err;
1da177e4 2082
52d6f3f1
DL
2083 err = seq_open_net(inode, file, &afinfo->seq_ops,
2084 sizeof(struct tcp_iter_state));
2085 if (err < 0)
2086 return err;
f40c8174 2087
52d6f3f1 2088 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2089 s->family = afinfo->family;
688d1945 2090 s->last_pos = 0;
f40c8174
DL
2091 return 0;
2092}
73cb88ec 2093EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2094
6f8b13bc 2095int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2096{
2097 int rc = 0;
2098 struct proc_dir_entry *p;
2099
9427c4b3
DL
2100 afinfo->seq_ops.start = tcp_seq_start;
2101 afinfo->seq_ops.next = tcp_seq_next;
2102 afinfo->seq_ops.stop = tcp_seq_stop;
2103
84841c3c 2104 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2105 afinfo->seq_fops, afinfo);
84841c3c 2106 if (!p)
1da177e4
LT
2107 rc = -ENOMEM;
2108 return rc;
2109}
4bc2f18b 2110EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2111
6f8b13bc 2112void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2113{
ece31ffd 2114 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2115}
4bc2f18b 2116EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2117
d4f06873 2118static void get_openreq4(const struct request_sock *req,
aa3a0c8c 2119 struct seq_file *f, int i)
1da177e4 2120{
2e6599cb 2121 const struct inet_request_sock *ireq = inet_rsk(req);
fa76ce73 2122 long delta = req->rsk_timer.expires - jiffies;
1da177e4 2123
5e659e4c 2124 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2125 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2126 i,
634fb979 2127 ireq->ir_loc_addr,
d4f06873 2128 ireq->ir_num,
634fb979
ED
2129 ireq->ir_rmt_addr,
2130 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2131 TCP_SYN_RECV,
2132 0, 0, /* could print option size, but that is af dependent. */
2133 1, /* timers active (only the expire timer) */
a399a805 2134 jiffies_delta_to_clock_t(delta),
e6c022a4 2135 req->num_timeout,
aa3a0c8c
ED
2136 from_kuid_munged(seq_user_ns(f),
2137 sock_i_uid(req->rsk_listener)),
1da177e4
LT
2138 0, /* non standard timer */
2139 0, /* open_requests have no inode */
d4f06873 2140 0,
652586df 2141 req);
1da177e4
LT
2142}
2143
652586df 2144static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2145{
2146 int timer_active;
2147 unsigned long timer_expires;
cf533ea5 2148 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2149 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2150 const struct inet_sock *inet = inet_sk(sk);
0536fcc0 2151 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2152 __be32 dest = inet->inet_daddr;
2153 __be32 src = inet->inet_rcv_saddr;
2154 __u16 destp = ntohs(inet->inet_dport);
2155 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2156 int rx_queue;
1da177e4 2157
6ba8a3b1
ND
2158 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2159 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2160 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2161 timer_active = 1;
463c84b9
ACM
2162 timer_expires = icsk->icsk_timeout;
2163 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2164 timer_active = 4;
463c84b9 2165 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2166 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2167 timer_active = 2;
cf4c6bf8 2168 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2169 } else {
2170 timer_active = 0;
2171 timer_expires = jiffies;
2172 }
2173
49d09007
ED
2174 if (sk->sk_state == TCP_LISTEN)
2175 rx_queue = sk->sk_ack_backlog;
2176 else
2177 /*
2178 * because we dont lock socket, we might find a transient negative value
2179 */
2180 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2181
5e659e4c 2182 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2183 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
cf4c6bf8 2184 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2185 tp->write_seq - tp->snd_una,
49d09007 2186 rx_queue,
1da177e4 2187 timer_active,
a399a805 2188 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2189 icsk->icsk_retransmits,
a7cb5a49 2190 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2191 icsk->icsk_probes_out,
cf4c6bf8
IJ
2192 sock_i_ino(sk),
2193 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2194 jiffies_to_clock_t(icsk->icsk_rto),
2195 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2196 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2197 tp->snd_cwnd,
168a8f58
JC
2198 sk->sk_state == TCP_LISTEN ?
2199 (fastopenq ? fastopenq->max_qlen : 0) :
652586df 2200 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2201}
2202
cf533ea5 2203static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2204 struct seq_file *f, int i)
1da177e4 2205{
789f558c 2206 long delta = tw->tw_timer.expires - jiffies;
23f33c2d 2207 __be32 dest, src;
1da177e4 2208 __u16 destp, srcp;
1da177e4
LT
2209
2210 dest = tw->tw_daddr;
2211 src = tw->tw_rcv_saddr;
2212 destp = ntohs(tw->tw_dport);
2213 srcp = ntohs(tw->tw_sport);
2214
5e659e4c 2215 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2216 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2217 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2218 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2219 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2220}
2221
2222#define TMPSZ 150
2223
2224static int tcp4_seq_show(struct seq_file *seq, void *v)
2225{
5799de0b 2226 struct tcp_iter_state *st;
05dbc7b5 2227 struct sock *sk = v;
1da177e4 2228
652586df 2229 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2230 if (v == SEQ_START_TOKEN) {
652586df 2231 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2232 "rx_queue tr tm->when retrnsmt uid timeout "
2233 "inode");
2234 goto out;
2235 }
2236 st = seq->private;
2237
079096f1
ED
2238 if (sk->sk_state == TCP_TIME_WAIT)
2239 get_timewait4_sock(v, seq, st->num);
2240 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 2241 get_openreq4(v, seq, st->num);
079096f1
ED
2242 else
2243 get_tcp4_sock(v, seq, st->num);
1da177e4 2244out:
652586df 2245 seq_pad(seq, '\n');
1da177e4
LT
2246 return 0;
2247}
2248
73cb88ec
AV
2249static const struct file_operations tcp_afinfo_seq_fops = {
2250 .owner = THIS_MODULE,
2251 .open = tcp_seq_open,
2252 .read = seq_read,
2253 .llseek = seq_lseek,
2254 .release = seq_release_net
2255};
2256
1da177e4 2257static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2258 .name = "tcp",
2259 .family = AF_INET,
73cb88ec 2260 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2261 .seq_ops = {
2262 .show = tcp4_seq_show,
2263 },
1da177e4
LT
2264};
2265
2c8c1e72 2266static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2267{
2268 return tcp_proc_register(net, &tcp4_seq_afinfo);
2269}
2270
2c8c1e72 2271static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2272{
2273 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2274}
2275
2276static struct pernet_operations tcp4_net_ops = {
2277 .init = tcp4_proc_init_net,
2278 .exit = tcp4_proc_exit_net,
2279};
2280
1da177e4
LT
2281int __init tcp4_proc_init(void)
2282{
757764f6 2283 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2284}
2285
2286void tcp4_proc_exit(void)
2287{
757764f6 2288 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2289}
2290#endif /* CONFIG_PROC_FS */
2291
2292struct proto tcp_prot = {
2293 .name = "TCP",
2294 .owner = THIS_MODULE,
2295 .close = tcp_close,
2296 .connect = tcp_v4_connect,
2297 .disconnect = tcp_disconnect,
463c84b9 2298 .accept = inet_csk_accept,
1da177e4
LT
2299 .ioctl = tcp_ioctl,
2300 .init = tcp_v4_init_sock,
2301 .destroy = tcp_v4_destroy_sock,
2302 .shutdown = tcp_shutdown,
2303 .setsockopt = tcp_setsockopt,
2304 .getsockopt = tcp_getsockopt,
1da177e4 2305 .recvmsg = tcp_recvmsg,
7ba42910
CG
2306 .sendmsg = tcp_sendmsg,
2307 .sendpage = tcp_sendpage,
1da177e4 2308 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2309 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2310 .hash = inet_hash,
2311 .unhash = inet_unhash,
2312 .get_port = inet_csk_get_port,
1da177e4 2313 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2314 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2315 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2316 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2317 .memory_allocated = &tcp_memory_allocated,
2318 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2319 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2320 .sysctl_wmem = sysctl_tcp_wmem,
2321 .sysctl_rmem = sysctl_tcp_rmem,
2322 .max_header = MAX_TCP_HEADER,
2323 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2324 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2325 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2326 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2327 .h.hashinfo = &tcp_hashinfo,
7ba42910 2328 .no_autobind = true,
543d9cfe
ACM
2329#ifdef CONFIG_COMPAT
2330 .compat_setsockopt = compat_tcp_setsockopt,
2331 .compat_getsockopt = compat_tcp_getsockopt,
2332#endif
c255a458 2333#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2334 .init_cgroup = tcp_init_cgroup,
2335 .destroy_cgroup = tcp_destroy_cgroup,
2336 .proto_cgroup = tcp_proto_cgroup,
2337#endif
1da177e4 2338};
4bc2f18b 2339EXPORT_SYMBOL(tcp_prot);
1da177e4 2340
bdbbb852
ED
2341static void __net_exit tcp_sk_exit(struct net *net)
2342{
2343 int cpu;
2344
2345 for_each_possible_cpu(cpu)
2346 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2347 free_percpu(net->ipv4.tcp_sk);
2348}
2349
046ee902
DL
2350static int __net_init tcp_sk_init(struct net *net)
2351{
bdbbb852
ED
2352 int res, cpu;
2353
2354 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2355 if (!net->ipv4.tcp_sk)
2356 return -ENOMEM;
2357
2358 for_each_possible_cpu(cpu) {
2359 struct sock *sk;
2360
2361 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2362 IPPROTO_TCP, net);
2363 if (res)
2364 goto fail;
2365 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2366 }
49213555 2367
5d134f1c 2368 net->ipv4.sysctl_tcp_ecn = 2;
49213555
DB
2369 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2370
b0f9ca53 2371 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
6b58e0a5 2372 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
05cbc0db 2373 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
046ee902 2374
49213555 2375 return 0;
bdbbb852
ED
2376fail:
2377 tcp_sk_exit(net);
2378
2379 return res;
b099ce26
EB
2380}
2381
2382static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2383{
2384 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2385}
2386
2387static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2388 .init = tcp_sk_init,
2389 .exit = tcp_sk_exit,
2390 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2391};
2392
9b0f976f 2393void __init tcp_v4_init(void)
1da177e4 2394{
5caea4ea 2395 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2396 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2397 panic("Failed to create the TCP control socket.\n");
1da177e4 2398}