]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/ipv4/tcp_ipv4.c
ipv6: remove obsolete inet6 functions
[mirror_ubuntu-hirsute-kernel.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
d1a4c0b3 76#include <net/tcp_memcontrol.h>
076bb0c8 77#include <net/busy_poll.h>
1da177e4
LT
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
cfb6eeb4
YH
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
ab32ea5d
BH
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 90EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 91
cfb6eeb4 92#ifdef CONFIG_TCP_MD5SIG
a915da9b 93static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
95#endif
96
5caea4ea 97struct inet_hashinfo tcp_hashinfo;
4bc2f18b 98EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 99
936b8bdb 100static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 101{
eddc9ec5
ACM
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
aa8223c7
ACM
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
1da177e4
LT
106}
107
6d6ee43e
ACM
108int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109{
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
51456b29 125 (!twp || (sysctl_tcp_tw_reuse &&
9d729f72 126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137}
6d6ee43e
ACM
138EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
1da177e4
LT
140/* This will initiate an outgoing connection. */
141int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142{
2d7192d6 143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 146 __be16 orig_sport, orig_dport;
bada8adc 147 __be32 daddr, nexthop;
da905bd1 148 struct flowi4 *fl4;
2d7192d6 149 struct rtable *rt;
1da177e4 150 int err;
f6d8bd05 151 struct ip_options_rcu *inet_opt;
1da177e4
LT
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
163 if (!daddr)
164 return -EINVAL;
f6d8bd05 165 nexthop = inet_opt->opt.faddr;
1da177e4
LT
166 }
167
dca8b089
DM
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
da905bd1
DM
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
0e0d44ab 174 orig_sport, orig_dport, sk);
b23dd4fe
DM
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
f1d8cba6 178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 179 return err;
584bdf8c 180 }
1da177e4
LT
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
f6d8bd05 187 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 188 daddr = fl4->daddr;
1da177e4 189
c720c7e8 190 if (!inet->inet_saddr)
da905bd1 191 inet->inet_saddr = fl4->saddr;
d1e559d0 192 sk_rcv_saddr_set(sk, inet->inet_saddr);
1da177e4 193
c720c7e8 194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
1da177e4
LT
200 }
201
295ff7ed 202 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 205
c720c7e8 206 inet->inet_dport = usin->sin_port;
d1e559d0 207 sk_daddr_set(sk, daddr);
1da177e4 208
d83d8461 209 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 212
bee7ca9e 213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 221 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
222 if (err)
223 goto failure;
224
877d1f62 225 sk_set_txhash(sk);
9e7ceb06 226
da905bd1 227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
1da177e4 232 goto failure;
b23dd4fe 233 }
1da177e4 234 /* OK, now commit destination to socket. */
bcd76111 235 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 236 sk_setup_caps(sk, &rt->dst);
1da177e4 237
ee995283 238 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
1da177e4
LT
242 usin->sin_port);
243
c720c7e8 244 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 245
2b916477 246 err = tcp_connect(sk);
ee995283 247
1da177e4
LT
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254failure:
7174259e
ACM
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
1da177e4
LT
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
c720c7e8 262 inet->inet_dport = 0;
1da177e4
LT
263 return err;
264}
4bc2f18b 265EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 266
1da177e4 267/*
563d34d0
ED
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 271 */
4fab9071 272void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
273{
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
563d34d0 276 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 277
80d0a69f
DM
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
1da177e4
LT
280 return;
281
1da177e4
LT
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
284 */
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
287
288 mtu = dst_mtu(dst);
289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 291 ip_sk_accept_pmtu(sk) &&
d83d8461 292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
293 tcp_sync_mss(sk, mtu);
294
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
299 */
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
302}
4fab9071 303EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 304
55be7a9c
DM
305static void do_redirect(struct sk_buff *skb, struct sock *sk)
306{
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
308
1ed5c48f 309 if (dst)
6700c270 310 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
311}
312
26e37360
ED
313
314/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315void tcp_req_err(struct sock *sk, u32 seq)
316{
317 struct request_sock *req = inet_reqsk(sk);
318 struct net *net = sock_net(sk);
319
320 /* ICMPs are not backlogged, hence we cannot get
321 * an established socket here.
322 */
323 WARN_ON(req->sk);
324
325 if (seq != tcp_rsk(req)->snt_isn) {
326 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
c6973669 327 reqsk_put(req);
26e37360
ED
328 } else {
329 /*
330 * Still in SYN_RECV, just remove it silently.
331 * There is no good way to pass the error to the newly
332 * created socket, and POSIX does not want network
333 * errors returned from accept().
334 */
26e37360 335 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
c6973669 336 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
26e37360 337 }
26e37360
ED
338}
339EXPORT_SYMBOL(tcp_req_err);
340
1da177e4
LT
341/*
342 * This routine is called by the ICMP module when it gets some
343 * sort of error condition. If err < 0 then the socket should
344 * be closed and the error returned to the user. If err > 0
345 * it's just the icmp type << 8 | icmp code. After adjustment
346 * header points to the first 8 bytes of the tcp header. We need
347 * to find the appropriate port.
348 *
349 * The locking strategy used here is very "optimistic". When
350 * someone else accesses the socket the ICMP is just dropped
351 * and for some paths there is no check at all.
352 * A more general error queue to queue errors for later handling
353 * is probably better.
354 *
355 */
356
4d1a2d9e 357void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 358{
b71d1d42 359 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 360 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 361 struct inet_connection_sock *icsk;
1da177e4
LT
362 struct tcp_sock *tp;
363 struct inet_sock *inet;
4d1a2d9e
DL
364 const int type = icmp_hdr(icmp_skb)->type;
365 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 366 struct sock *sk;
f1ecd5d9 367 struct sk_buff *skb;
0a672f74
YC
368 struct request_sock *fastopen;
369 __u32 seq, snd_una;
f1ecd5d9 370 __u32 remaining;
1da177e4 371 int err;
4d1a2d9e 372 struct net *net = dev_net(icmp_skb->dev);
1da177e4 373
26e37360
ED
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 th->dest, iph->saddr, ntohs(th->source),
376 inet_iif(icmp_skb));
1da177e4 377 if (!sk) {
dcfc23ca 378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
379 return;
380 }
381 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 382 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
383 return;
384 }
26e37360
ED
385 seq = ntohl(th->seq);
386 if (sk->sk_state == TCP_NEW_SYN_RECV)
387 return tcp_req_err(sk, seq);
1da177e4
LT
388
389 bh_lock_sock(sk);
390 /* If too many ICMPs get dropped on busy
391 * servers this needs to be solved differently.
563d34d0
ED
392 * We do take care of PMTU discovery (RFC1191) special case :
393 * we can receive locally generated ICMP messages while socket is held.
1da177e4 394 */
b74aa930
ED
395 if (sock_owned_by_user(sk)) {
396 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 }
1da177e4
LT
399 if (sk->sk_state == TCP_CLOSE)
400 goto out;
401
97e3ecd1 402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 goto out;
405 }
406
f1ecd5d9 407 icsk = inet_csk(sk);
1da177e4 408 tp = tcp_sk(sk);
0a672f74
YC
409 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 fastopen = tp->fastopen_rsk;
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 412 if (sk->sk_state != TCP_LISTEN &&
0a672f74 413 !between(seq, snd_una, tp->snd_nxt)) {
de0744af 414 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
415 goto out;
416 }
417
418 switch (type) {
55be7a9c
DM
419 case ICMP_REDIRECT:
420 do_redirect(icmp_skb, sk);
421 goto out;
1da177e4
LT
422 case ICMP_SOURCE_QUENCH:
423 /* Just silently ignore these. */
424 goto out;
425 case ICMP_PARAMETERPROB:
426 err = EPROTO;
427 break;
428 case ICMP_DEST_UNREACH:
429 if (code > NR_ICMP_UNREACH)
430 goto out;
431
432 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
433 /* We are not interested in TCP_LISTEN and open_requests
434 * (SYN-ACKs send out by Linux are always <576bytes so
435 * they should go through unfragmented).
436 */
437 if (sk->sk_state == TCP_LISTEN)
438 goto out;
439
563d34d0 440 tp->mtu_info = info;
144d56e9 441 if (!sock_owned_by_user(sk)) {
563d34d0 442 tcp_v4_mtu_reduced(sk);
144d56e9
ED
443 } else {
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 sock_hold(sk);
446 }
1da177e4
LT
447 goto out;
448 }
449
450 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
451 /* check if icmp_skb allows revert of backoff
452 * (see draft-zimmermann-tcp-lcd) */
453 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 break;
455 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 456 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
457 break;
458
8f49c270
DM
459 if (sock_owned_by_user(sk))
460 break;
461
f1ecd5d9 462 icsk->icsk_backoff--;
fcdd1cf4
ED
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 TCP_TIMEOUT_INIT;
465 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
466
467 skb = tcp_write_queue_head(sk);
468 BUG_ON(!skb);
469
7faee5c0
ED
470 remaining = icsk->icsk_rto -
471 min(icsk->icsk_rto,
472 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
473
474 if (remaining) {
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
477 } else {
478 /* RTO revert clocked out retransmission.
479 * Will retransmit now */
480 tcp_retransmit_timer(sk);
481 }
482
1da177e4
LT
483 break;
484 case ICMP_TIME_EXCEEDED:
485 err = EHOSTUNREACH;
486 break;
487 default:
488 goto out;
489 }
490
491 switch (sk->sk_state) {
1da177e4 492 case TCP_SYN_SENT:
0a672f74
YC
493 case TCP_SYN_RECV:
494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below.
496 */
51456b29 497 if (fastopen && !fastopen->sk)
0a672f74
YC
498 break;
499
1da177e4 500 if (!sock_owned_by_user(sk)) {
1da177e4
LT
501 sk->sk_err = err;
502
503 sk->sk_error_report(sk);
504
505 tcp_done(sk);
506 } else {
507 sk->sk_err_soft = err;
508 }
509 goto out;
510 }
511
512 /* If we've already connected we will keep trying
513 * until we time out, or the user gives up.
514 *
515 * rfc1122 4.2.3.9 allows to consider as hard errors
516 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 * but it is obsoleted by pmtu discovery).
518 *
519 * Note, that in modern internet, where routing is unreliable
520 * and in each dark corner broken firewalls sit, sending random
521 * errors ordered by their masters even this two messages finally lose
522 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 *
524 * Now we are in compliance with RFCs.
525 * --ANK (980905)
526 */
527
528 inet = inet_sk(sk);
529 if (!sock_owned_by_user(sk) && inet->recverr) {
530 sk->sk_err = err;
531 sk->sk_error_report(sk);
532 } else { /* Only an error on timeout */
533 sk->sk_err_soft = err;
534 }
535
536out:
537 bh_unlock_sock(sk);
538 sock_put(sk);
539}
540
28850dc7 541void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 542{
aa8223c7 543 struct tcphdr *th = tcp_hdr(skb);
1da177e4 544
84fa7933 545 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 546 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 547 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 548 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 549 } else {
419f9f89 550 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 551 csum_partial(th,
1da177e4
LT
552 th->doff << 2,
553 skb->csum));
554 }
555}
556
419f9f89 557/* This routine computes an IPv4 TCP checksum. */
bb296246 558void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 559{
cf533ea5 560 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
561
562 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563}
4bc2f18b 564EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 565
1da177e4
LT
566/*
567 * This routine will send an RST to the other tcp.
568 *
569 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * for reset.
571 * Answer: if a packet caused RST, it is not for a socket
572 * existing in our system, if it is matched to a socket,
573 * it is just duplicate segment or bug in other side's TCP.
574 * So that we build reply only basing on parameters
575 * arrived with segment.
576 * Exception: precedence violation. We do not implement it in any case.
577 */
578
a00e7444 579static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 580{
cf533ea5 581 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
582 struct {
583 struct tcphdr th;
584#ifdef CONFIG_TCP_MD5SIG
714e85be 585 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
586#endif
587 } rep;
1da177e4 588 struct ip_reply_arg arg;
cfb6eeb4
YH
589#ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key;
658ddaaf
SL
591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16];
593 int genhash;
594 struct sock *sk1 = NULL;
cfb6eeb4 595#endif
a86b1e30 596 struct net *net;
1da177e4
LT
597
598 /* Never send a reset in response to a reset. */
599 if (th->rst)
600 return;
601
c3658e8d
ED
602 /* If sk not NULL, it means we did a successful lookup and incoming
603 * route had to be correct. prequeue might have dropped our dst.
604 */
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
606 return;
607
608 /* Swap the send and the receive. */
cfb6eeb4
YH
609 memset(&rep, 0, sizeof(rep));
610 rep.th.dest = th->source;
611 rep.th.source = th->dest;
612 rep.th.doff = sizeof(struct tcphdr) / 4;
613 rep.th.rst = 1;
1da177e4
LT
614
615 if (th->ack) {
cfb6eeb4 616 rep.th.seq = th->ack_seq;
1da177e4 617 } else {
cfb6eeb4
YH
618 rep.th.ack = 1;
619 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 skb->len - (th->doff << 2));
1da177e4
LT
621 }
622
7174259e 623 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
624 arg.iov[0].iov_base = (unsigned char *)&rep;
625 arg.iov[0].iov_len = sizeof(rep.th);
626
0f85feae 627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 628#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) {
631 /*
632 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket.
634 * we are not loose security here:
635 * Incoming packet is checked with md5 hash with finding key,
636 * no RST generated if md5 hash doesn't match.
637 */
0f85feae 638 sk1 = __inet_lookup_listener(net,
da5e3630
TH
639 &tcp_hashinfo, ip_hdr(skb)->saddr,
640 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
641 ntohs(th->source), inet_iif(skb));
642 /* don't send rst if it can't find key */
643 if (!sk1)
644 return;
645 rcu_read_lock();
646 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 &ip_hdr(skb)->saddr, AF_INET);
648 if (!key)
649 goto release_sk1;
650
39f8e58e 651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf
SL
652 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 }
659
cfb6eeb4
YH
660 if (key) {
661 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 (TCPOPT_NOP << 16) |
663 (TCPOPT_MD5SIG << 8) |
664 TCPOLEN_MD5SIG);
665 /* Update length and the length the header thinks exists */
666 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 rep.th.doff = arg.iov[0].iov_len / 4;
668
49a72dfb 669 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
670 key, ip_hdr(skb)->saddr,
671 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
672 }
673#endif
eddc9ec5
ACM
674 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 ip_hdr(skb)->saddr, /* XXX */
52cd5750 676 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 677 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
e2446eaa 679 /* When socket is gone, all binding information is lost.
4c675258
AK
680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route.
e2446eaa 682 */
4c675258
AK
683 if (sk)
684 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 685
66b13d99 686 arg.tos = ip_hdr(skb)->tos;
bdbbb852
ED
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
689 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 &arg, arg.iov[0].iov_len);
1da177e4 691
63231bdd
PE
692 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
694
695#ifdef CONFIG_TCP_MD5SIG
696release_sk1:
697 if (sk1) {
698 rcu_read_unlock();
699 sock_put(sk1);
700 }
701#endif
1da177e4
LT
702}
703
704/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705 outside socket context is ugly, certainly. What can I do?
706 */
707
9501f972 708static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 709 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 710 struct tcp_md5sig_key *key,
66b13d99 711 int reply_flags, u8 tos)
1da177e4 712{
cf533ea5 713 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
714 struct {
715 struct tcphdr th;
714e85be 716 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 717#ifdef CONFIG_TCP_MD5SIG
714e85be 718 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
719#endif
720 ];
1da177e4
LT
721 } rep;
722 struct ip_reply_arg arg;
adf30907 723 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
724
725 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 726 memset(&arg, 0, sizeof(arg));
1da177e4
LT
727
728 arg.iov[0].iov_base = (unsigned char *)&rep;
729 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 730 if (tsecr) {
cfb6eeb4
YH
731 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 (TCPOPT_TIMESTAMP << 8) |
733 TCPOLEN_TIMESTAMP);
ee684b6f
AV
734 rep.opt[1] = htonl(tsval);
735 rep.opt[2] = htonl(tsecr);
cb48cfe8 736 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
737 }
738
739 /* Swap the send and the receive. */
740 rep.th.dest = th->source;
741 rep.th.source = th->dest;
742 rep.th.doff = arg.iov[0].iov_len / 4;
743 rep.th.seq = htonl(seq);
744 rep.th.ack_seq = htonl(ack);
745 rep.th.ack = 1;
746 rep.th.window = htons(win);
747
cfb6eeb4 748#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 749 if (key) {
ee684b6f 750 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
751
752 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 (TCPOPT_NOP << 16) |
754 (TCPOPT_MD5SIG << 8) |
755 TCPOLEN_MD5SIG);
756 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 rep.th.doff = arg.iov[0].iov_len/4;
758
49a72dfb 759 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
760 key, ip_hdr(skb)->saddr,
761 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
762 }
763#endif
88ef4a5a 764 arg.flags = reply_flags;
eddc9ec5
ACM
765 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
767 arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
769 if (oif)
770 arg.bound_dev_if = oif;
66b13d99 771 arg.tos = tos;
bdbbb852
ED
772 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
774 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 &arg, arg.iov[0].iov_len);
1da177e4 776
63231bdd 777 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
778}
779
780static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781{
8feaf0c0 782 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 784
9501f972 785 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 786 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 787 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
788 tcptw->tw_ts_recent,
789 tw->tw_bound_dev_if,
88ef4a5a 790 tcp_twsk_md5_key(tcptw),
66b13d99
ED
791 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 tw->tw_tos
9501f972 793 );
1da177e4 794
8feaf0c0 795 inet_twsk_put(tw);
1da177e4
LT
796}
797
a00e7444 798static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
7174259e 799 struct request_sock *req)
1da177e4 800{
168a8f58
JC
801 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 */
804 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
ee684b6f 807 tcp_time_stamp,
9501f972
YH
808 req->ts_recent,
809 0,
a915da9b
ED
810 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 AF_INET),
66b13d99
ED
812 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 ip_hdr(skb)->tos);
1da177e4
LT
814}
815
1da177e4 816/*
9bf1d83e 817 * Send a SYN-ACK after having received a SYN.
60236fdd 818 * This still operates on a request_sock only, not on a big
1da177e4
LT
819 * socket.
820 */
0f935dbe 821static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 822 struct flowi *fl,
72659ecc 823 struct request_sock *req,
843f4a55
YC
824 u16 queue_mapping,
825 struct tcp_fastopen_cookie *foc)
1da177e4 826{
2e6599cb 827 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 828 struct flowi4 fl4;
1da177e4 829 int err = -1;
d41db5af 830 struct sk_buff *skb;
1da177e4
LT
831
832 /* First, grab a route. */
ba3f7f04 833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 834 return -1;
1da177e4 835
843f4a55 836 skb = tcp_make_synack(sk, dst, req, foc);
1da177e4
LT
837
838 if (skb) {
634fb979 839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 840
fff32699 841 skb_set_queue_mapping(skb, queue_mapping);
634fb979
ED
842 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843 ireq->ir_rmt_addr,
2e6599cb 844 ireq->opt);
b9df3cb8 845 err = net_xmit_eval(err);
1da177e4
LT
846 }
847
1da177e4
LT
848 return err;
849}
850
851/*
60236fdd 852 * IPv4 request_sock destructor.
1da177e4 853 */
60236fdd 854static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 855{
a51482bd 856 kfree(inet_rsk(req)->opt);
1da177e4
LT
857}
858
1da177e4 859
cfb6eeb4
YH
860#ifdef CONFIG_TCP_MD5SIG
861/*
862 * RFC2385 MD5 checksumming requires a mapping of
863 * IP address->MD5 Key.
864 * We need to maintain these in the sk structure.
865 */
866
867/* Find the Key structure for an address. */
b83e3deb 868struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
869 const union tcp_md5_addr *addr,
870 int family)
cfb6eeb4 871{
fd3a154a 872 const struct tcp_sock *tp = tcp_sk(sk);
a915da9b 873 struct tcp_md5sig_key *key;
a915da9b 874 unsigned int size = sizeof(struct in_addr);
fd3a154a 875 const struct tcp_md5sig_info *md5sig;
cfb6eeb4 876
a8afca03
ED
877 /* caller either holds rcu_read_lock() or socket lock */
878 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea 879 sock_owned_by_user(sk) ||
b83e3deb 880 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
a8afca03 881 if (!md5sig)
cfb6eeb4 882 return NULL;
a915da9b
ED
883#if IS_ENABLED(CONFIG_IPV6)
884 if (family == AF_INET6)
885 size = sizeof(struct in6_addr);
886#endif
b67bfe0d 887 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
888 if (key->family != family)
889 continue;
890 if (!memcmp(&key->addr, addr, size))
891 return key;
cfb6eeb4
YH
892 }
893 return NULL;
894}
a915da9b 895EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4 896
b83e3deb 897struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 898 const struct sock *addr_sk)
cfb6eeb4 899{
b52e6921 900 const union tcp_md5_addr *addr;
a915da9b 901
b52e6921 902 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
a915da9b 903 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 904}
cfb6eeb4
YH
905EXPORT_SYMBOL(tcp_v4_md5_lookup);
906
cfb6eeb4 907/* This can be called on a newly created socket, from other files */
a915da9b
ED
908int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
910{
911 /* Add Key to the list */
b0a713e9 912 struct tcp_md5sig_key *key;
cfb6eeb4 913 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 914 struct tcp_md5sig_info *md5sig;
cfb6eeb4 915
c0353c7b 916 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
917 if (key) {
918 /* Pre-existing entry - just update that one. */
a915da9b 919 memcpy(key->key, newkey, newkeylen);
b0a713e9 920 key->keylen = newkeylen;
a915da9b
ED
921 return 0;
922 }
260fcbeb 923
a8afca03
ED
924 md5sig = rcu_dereference_protected(tp->md5sig_info,
925 sock_owned_by_user(sk));
a915da9b
ED
926 if (!md5sig) {
927 md5sig = kmalloc(sizeof(*md5sig), gfp);
928 if (!md5sig)
cfb6eeb4 929 return -ENOMEM;
cfb6eeb4 930
a915da9b
ED
931 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
932 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 933 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 934 }
cfb6eeb4 935
5f3d9cb2 936 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
937 if (!key)
938 return -ENOMEM;
71cea17e 939 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 940 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 941 return -ENOMEM;
cfb6eeb4 942 }
a915da9b
ED
943
944 memcpy(key->key, newkey, newkeylen);
945 key->keylen = newkeylen;
946 key->family = family;
947 memcpy(&key->addr, addr,
948 (family == AF_INET6) ? sizeof(struct in6_addr) :
949 sizeof(struct in_addr));
950 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
951 return 0;
952}
a915da9b 953EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 954
a915da9b 955int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 956{
a915da9b
ED
957 struct tcp_md5sig_key *key;
958
c0353c7b 959 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
960 if (!key)
961 return -ENOENT;
962 hlist_del_rcu(&key->node);
5f3d9cb2 963 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 964 kfree_rcu(key, rcu);
a915da9b 965 return 0;
cfb6eeb4 966}
a915da9b 967EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 968
e0683e70 969static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
970{
971 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 972 struct tcp_md5sig_key *key;
b67bfe0d 973 struct hlist_node *n;
a8afca03 974 struct tcp_md5sig_info *md5sig;
cfb6eeb4 975
a8afca03
ED
976 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977
b67bfe0d 978 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 979 hlist_del_rcu(&key->node);
5f3d9cb2 980 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 981 kfree_rcu(key, rcu);
cfb6eeb4
YH
982 }
983}
984
7174259e
ACM
985static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
986 int optlen)
cfb6eeb4
YH
987{
988 struct tcp_md5sig cmd;
989 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
990
991 if (optlen < sizeof(cmd))
992 return -EINVAL;
993
7174259e 994 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
995 return -EFAULT;
996
997 if (sin->sin_family != AF_INET)
998 return -EINVAL;
999
64a124ed 1000 if (!cmd.tcpm_keylen)
a915da9b
ED
1001 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1002 AF_INET);
cfb6eeb4
YH
1003
1004 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1005 return -EINVAL;
1006
a915da9b
ED
1007 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1009 GFP_KERNEL);
cfb6eeb4
YH
1010}
1011
49a72dfb
AL
1012static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1013 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1014{
cfb6eeb4 1015 struct tcp4_pseudohdr *bp;
49a72dfb 1016 struct scatterlist sg;
cfb6eeb4
YH
1017
1018 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1019
1020 /*
49a72dfb 1021 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1022 * destination IP address, zero-padded protocol number, and
1023 * segment length)
1024 */
1025 bp->saddr = saddr;
1026 bp->daddr = daddr;
1027 bp->pad = 0;
076fb722 1028 bp->protocol = IPPROTO_TCP;
49a72dfb 1029 bp->len = cpu_to_be16(nbytes);
c7da57a1 1030
49a72dfb
AL
1031 sg_init_one(&sg, bp, sizeof(*bp));
1032 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1033}
1034
a915da9b 1035static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1036 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1037{
1038 struct tcp_md5sig_pool *hp;
1039 struct hash_desc *desc;
1040
1041 hp = tcp_get_md5sig_pool();
1042 if (!hp)
1043 goto clear_hash_noput;
1044 desc = &hp->md5_desc;
1045
1046 if (crypto_hash_init(desc))
1047 goto clear_hash;
1048 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049 goto clear_hash;
1050 if (tcp_md5_hash_header(hp, th))
1051 goto clear_hash;
1052 if (tcp_md5_hash_key(hp, key))
1053 goto clear_hash;
1054 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1055 goto clear_hash;
1056
cfb6eeb4 1057 tcp_put_md5sig_pool();
cfb6eeb4 1058 return 0;
49a72dfb 1059
cfb6eeb4
YH
1060clear_hash:
1061 tcp_put_md5sig_pool();
1062clear_hash_noput:
1063 memset(md5_hash, 0, 16);
49a72dfb 1064 return 1;
cfb6eeb4
YH
1065}
1066
39f8e58e
ED
1067int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1068 const struct sock *sk,
318cf7aa 1069 const struct sk_buff *skb)
cfb6eeb4 1070{
49a72dfb
AL
1071 struct tcp_md5sig_pool *hp;
1072 struct hash_desc *desc;
318cf7aa 1073 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1074 __be32 saddr, daddr;
1075
39f8e58e
ED
1076 if (sk) { /* valid for establish/request sockets */
1077 saddr = sk->sk_rcv_saddr;
1078 daddr = sk->sk_daddr;
cfb6eeb4 1079 } else {
49a72dfb
AL
1080 const struct iphdr *iph = ip_hdr(skb);
1081 saddr = iph->saddr;
1082 daddr = iph->daddr;
cfb6eeb4 1083 }
49a72dfb
AL
1084
1085 hp = tcp_get_md5sig_pool();
1086 if (!hp)
1087 goto clear_hash_noput;
1088 desc = &hp->md5_desc;
1089
1090 if (crypto_hash_init(desc))
1091 goto clear_hash;
1092
1093 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094 goto clear_hash;
1095 if (tcp_md5_hash_header(hp, th))
1096 goto clear_hash;
1097 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098 goto clear_hash;
1099 if (tcp_md5_hash_key(hp, key))
1100 goto clear_hash;
1101 if (crypto_hash_final(desc, md5_hash))
1102 goto clear_hash;
1103
1104 tcp_put_md5sig_pool();
1105 return 0;
1106
1107clear_hash:
1108 tcp_put_md5sig_pool();
1109clear_hash_noput:
1110 memset(md5_hash, 0, 16);
1111 return 1;
cfb6eeb4 1112}
49a72dfb 1113EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1114
ba8e275a
ED
1115#endif
1116
ff74e23f 1117/* Called with rcu_read_lock() */
ba8e275a 1118static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
ff74e23f 1119 const struct sk_buff *skb)
cfb6eeb4 1120{
ba8e275a 1121#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1122 /*
1123 * This gets called for each TCP segment that arrives
1124 * so we want to be efficient.
1125 * We have 3 drop cases:
1126 * o No MD5 hash and one expected.
1127 * o MD5 hash and we're not expecting one.
1128 * o MD5 hash and its wrong.
1129 */
cf533ea5 1130 const __u8 *hash_location = NULL;
cfb6eeb4 1131 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1132 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1133 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1134 int genhash;
cfb6eeb4
YH
1135 unsigned char newhash[16];
1136
a915da9b
ED
1137 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1138 AF_INET);
7d5d5525 1139 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1140
cfb6eeb4
YH
1141 /* We've parsed the options - do we have a hash? */
1142 if (!hash_expected && !hash_location)
a2a385d6 1143 return false;
cfb6eeb4
YH
1144
1145 if (hash_expected && !hash_location) {
785957d3 1146 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1147 return true;
cfb6eeb4
YH
1148 }
1149
1150 if (!hash_expected && hash_location) {
785957d3 1151 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1152 return true;
cfb6eeb4
YH
1153 }
1154
1155 /* Okay, so this is hash_expected and hash_location -
1156 * so we need to calculate the checksum.
1157 */
49a72dfb
AL
1158 genhash = tcp_v4_md5_hash_skb(newhash,
1159 hash_expected,
39f8e58e 1160 NULL, skb);
cfb6eeb4
YH
1161
1162 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1163 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1164 &iph->saddr, ntohs(th->source),
1165 &iph->daddr, ntohs(th->dest),
1166 genhash ? " tcp_v4_calc_md5_hash failed"
1167 : "");
a2a385d6 1168 return true;
cfb6eeb4 1169 }
a2a385d6 1170 return false;
cfb6eeb4 1171#endif
ba8e275a
ED
1172 return false;
1173}
cfb6eeb4 1174
b40cf18e
ED
1175static void tcp_v4_init_req(struct request_sock *req,
1176 const struct sock *sk_listener,
16bea70a
OP
1177 struct sk_buff *skb)
1178{
1179 struct inet_request_sock *ireq = inet_rsk(req);
1180
08d2cc3b
ED
1181 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1182 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1183 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
16bea70a
OP
1184 ireq->opt = tcp_v4_save_options(skb);
1185}
1186
f964629e
ED
1187static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1188 struct flowi *fl,
d94e0417
OP
1189 const struct request_sock *req,
1190 bool *strict)
1191{
1192 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1193
1194 if (strict) {
1195 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1196 *strict = true;
1197 else
1198 *strict = false;
1199 }
1200
1201 return dst;
1202}
1203
72a3effa 1204struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1205 .family = PF_INET,
2e6599cb 1206 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1207 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1208 .send_ack = tcp_v4_reqsk_send_ack,
1209 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1210 .send_reset = tcp_v4_send_reset,
688d1945 1211 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1212};
1213
b2e4b3de 1214static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1215 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1216#ifdef CONFIG_TCP_MD5SIG
fd3a154a 1217 .req_md5_lookup = tcp_v4_md5_lookup,
e3afe7b7 1218 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1219#endif
16bea70a 1220 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1221#ifdef CONFIG_SYN_COOKIES
1222 .cookie_init_seq = cookie_v4_init_sequence,
1223#endif
d94e0417 1224 .route_req = tcp_v4_route_req,
936b8bdb 1225 .init_seq = tcp_v4_init_sequence,
d6274bd8 1226 .send_synack = tcp_v4_send_synack,
16bea70a 1227};
cfb6eeb4 1228
1da177e4
LT
1229int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1230{
1da177e4 1231 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1232 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1233 goto drop;
1234
1fb6f159
OP
1235 return tcp_conn_request(&tcp_request_sock_ops,
1236 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1237
1da177e4 1238drop:
848bf15f 1239 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1240 return 0;
1241}
4bc2f18b 1242EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1243
1244
1245/*
1246 * The three way handshake has completed - we got a valid synack -
1247 * now create the new socket.
1248 */
0c27171e 1249struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
60236fdd 1250 struct request_sock *req,
1da177e4
LT
1251 struct dst_entry *dst)
1252{
2e6599cb 1253 struct inet_request_sock *ireq;
1da177e4
LT
1254 struct inet_sock *newinet;
1255 struct tcp_sock *newtp;
1256 struct sock *newsk;
cfb6eeb4
YH
1257#ifdef CONFIG_TCP_MD5SIG
1258 struct tcp_md5sig_key *key;
1259#endif
f6d8bd05 1260 struct ip_options_rcu *inet_opt;
1da177e4
LT
1261
1262 if (sk_acceptq_is_full(sk))
1263 goto exit_overflow;
1264
1da177e4
LT
1265 newsk = tcp_create_openreq_child(sk, req, skb);
1266 if (!newsk)
093d2823 1267 goto exit_nonewsk;
1da177e4 1268
bcd76111 1269 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1270 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1271
1272 newtp = tcp_sk(newsk);
1273 newinet = inet_sk(newsk);
2e6599cb 1274 ireq = inet_rsk(req);
d1e559d0
ED
1275 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1276 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
634fb979 1277 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1278 inet_opt = ireq->opt;
1279 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1280 ireq->opt = NULL;
463c84b9 1281 newinet->mc_index = inet_iif(skb);
eddc9ec5 1282 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1283 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1284 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1285 if (inet_opt)
1286 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1287 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1288
dfd25fff
ED
1289 if (!dst) {
1290 dst = inet_csk_route_child_sock(sk, newsk, req);
1291 if (!dst)
1292 goto put_and_exit;
1293 } else {
1294 /* syncookie case : see end of cookie_v4_check() */
1295 }
0e734419
DM
1296 sk_setup_caps(newsk, dst);
1297
81164413
DB
1298 tcp_ca_openreq_child(newsk, dst);
1299
1da177e4 1300 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1301 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1302 if (tcp_sk(sk)->rx_opt.user_mss &&
1303 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1304 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1305
1da177e4
LT
1306 tcp_initialize_rcv_mss(newsk);
1307
cfb6eeb4
YH
1308#ifdef CONFIG_TCP_MD5SIG
1309 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1310 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1311 AF_INET);
00db4124 1312 if (key) {
cfb6eeb4
YH
1313 /*
1314 * We're using one, so create a matching key
1315 * on the newsk structure. If we fail to get
1316 * memory, then we end up not copying the key
1317 * across. Shucks.
1318 */
a915da9b
ED
1319 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1320 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1321 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1322 }
1323#endif
1324
0e734419
DM
1325 if (__inet_inherit_port(sk, newsk) < 0)
1326 goto put_and_exit;
9327f705 1327 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1328
1329 return newsk;
1330
1331exit_overflow:
de0744af 1332 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1333exit_nonewsk:
1334 dst_release(dst);
1da177e4 1335exit:
de0744af 1336 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1337 return NULL;
0e734419 1338put_and_exit:
e337e24d
CP
1339 inet_csk_prepare_forced_close(newsk);
1340 tcp_done(newsk);
0e734419 1341 goto exit;
1da177e4 1342}
4bc2f18b 1343EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4 1344
079096f1 1345static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1346{
079096f1 1347#ifdef CONFIG_SYN_COOKIES
52452c54 1348 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1349
af9b4738 1350 if (!th->syn)
461b74c3 1351 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1352#endif
1353 return sk;
1354}
1355
1da177e4
LT
1356/* The socket must have it's spinlock held when we get
1357 * here.
1358 *
1359 * We have a potential double-lock case here, so even when
1360 * doing backlog processing we use the BH locking scheme.
1361 * This is because we cannot sleep with the original spinlock
1362 * held.
1363 */
1364int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1365{
cfb6eeb4 1366 struct sock *rsk;
cfb6eeb4 1367
1da177e4 1368 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1369 struct dst_entry *dst = sk->sk_rx_dst;
1370
bdeab991 1371 sock_rps_save_rxhash(sk, skb);
3d97379a 1372 sk_mark_napi_id(sk, skb);
404e0a8b 1373 if (dst) {
505fbcf0 1374 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
51456b29 1375 !dst->ops->check(dst, 0)) {
92101b3b
DM
1376 dst_release(dst);
1377 sk->sk_rx_dst = NULL;
1378 }
1379 }
c995ae22 1380 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1381 return 0;
1382 }
1383
12e25e10 1384 if (tcp_checksum_complete(skb))
1da177e4
LT
1385 goto csum_err;
1386
1387 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1388 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1389
1da177e4
LT
1390 if (!nsk)
1391 goto discard;
1da177e4 1392 if (nsk != sk) {
bdeab991 1393 sock_rps_save_rxhash(nsk, skb);
38cb5245 1394 sk_mark_napi_id(nsk, skb);
cfb6eeb4
YH
1395 if (tcp_child_process(sk, nsk, skb)) {
1396 rsk = nsk;
1da177e4 1397 goto reset;
cfb6eeb4 1398 }
1da177e4
LT
1399 return 0;
1400 }
ca55158c 1401 } else
bdeab991 1402 sock_rps_save_rxhash(sk, skb);
ca55158c 1403
72ab4a86 1404 if (tcp_rcv_state_process(sk, skb)) {
cfb6eeb4 1405 rsk = sk;
1da177e4 1406 goto reset;
cfb6eeb4 1407 }
1da177e4
LT
1408 return 0;
1409
1410reset:
cfb6eeb4 1411 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1412discard:
1413 kfree_skb(skb);
1414 /* Be careful here. If this function gets more complicated and
1415 * gcc suffers from register pressure on the x86, sk (in %ebx)
1416 * might be destroyed here. This current version compiles correctly,
1417 * but you have been warned.
1418 */
1419 return 0;
1420
1421csum_err:
6a5dc9e5 1422 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
63231bdd 1423 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1424 goto discard;
1425}
4bc2f18b 1426EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1427
160eb5a6 1428void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1429{
41063e9d
DM
1430 const struct iphdr *iph;
1431 const struct tcphdr *th;
1432 struct sock *sk;
41063e9d 1433
41063e9d 1434 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1435 return;
41063e9d 1436
45f00f99 1437 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1438 return;
41063e9d
DM
1439
1440 iph = ip_hdr(skb);
45f00f99 1441 th = tcp_hdr(skb);
41063e9d
DM
1442
1443 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1444 return;
41063e9d 1445
45f00f99 1446 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1447 iph->saddr, th->source,
7011d085 1448 iph->daddr, ntohs(th->dest),
9cb429d6 1449 skb->skb_iif);
41063e9d
DM
1450 if (sk) {
1451 skb->sk = sk;
1452 skb->destructor = sock_edemux;
f7e4eb03 1453 if (sk_fullsock(sk)) {
d0c294c5 1454 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
505fbcf0 1455
41063e9d
DM
1456 if (dst)
1457 dst = dst_check(dst, 0);
92101b3b 1458 if (dst &&
505fbcf0 1459 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1460 skb_dst_set_noref(skb, dst);
41063e9d
DM
1461 }
1462 }
41063e9d
DM
1463}
1464
b2fb4f54
ED
1465/* Packet is added to VJ-style prequeue for processing in process
1466 * context, if a reader task is waiting. Apparently, this exciting
1467 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1468 * failed somewhere. Latency? Burstiness? Well, at least now we will
1469 * see, why it failed. 8)8) --ANK
1470 *
1471 */
1472bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1473{
1474 struct tcp_sock *tp = tcp_sk(sk);
1475
1476 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1477 return false;
1478
1479 if (skb->len <= tcp_hdrlen(skb) &&
1480 skb_queue_len(&tp->ucopy.prequeue) == 0)
1481 return false;
1482
ca777eff
ED
1483 /* Before escaping RCU protected region, we need to take care of skb
1484 * dst. Prequeue is only enabled for established sockets.
1485 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1486 * Instead of doing full sk_rx_dst validity here, let's perform
1487 * an optimistic check.
1488 */
1489 if (likely(sk->sk_rx_dst))
1490 skb_dst_drop(skb);
1491 else
1492 skb_dst_force(skb);
1493
b2fb4f54
ED
1494 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1495 tp->ucopy.memory += skb->truesize;
1496 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1497 struct sk_buff *skb1;
1498
1499 BUG_ON(sock_owned_by_user(sk));
1500
1501 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1502 sk_backlog_rcv(sk, skb1);
1503 NET_INC_STATS_BH(sock_net(sk),
1504 LINUX_MIB_TCPPREQUEUEDROPPED);
1505 }
1506
1507 tp->ucopy.memory = 0;
1508 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1509 wake_up_interruptible_sync_poll(sk_sleep(sk),
1510 POLLIN | POLLRDNORM | POLLRDBAND);
1511 if (!inet_csk_ack_scheduled(sk))
1512 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1513 (3 * tcp_rto_min(sk)) / 4,
1514 TCP_RTO_MAX);
1515 }
1516 return true;
1517}
1518EXPORT_SYMBOL(tcp_prequeue);
1519
1da177e4
LT
1520/*
1521 * From tcp_input.c
1522 */
1523
1524int tcp_v4_rcv(struct sk_buff *skb)
1525{
eddc9ec5 1526 const struct iphdr *iph;
cf533ea5 1527 const struct tcphdr *th;
1da177e4
LT
1528 struct sock *sk;
1529 int ret;
a86b1e30 1530 struct net *net = dev_net(skb->dev);
1da177e4
LT
1531
1532 if (skb->pkt_type != PACKET_HOST)
1533 goto discard_it;
1534
1535 /* Count it even if it's bad */
63231bdd 1536 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1537
1538 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1539 goto discard_it;
1540
aa8223c7 1541 th = tcp_hdr(skb);
1da177e4
LT
1542
1543 if (th->doff < sizeof(struct tcphdr) / 4)
1544 goto bad_packet;
1545 if (!pskb_may_pull(skb, th->doff * 4))
1546 goto discard_it;
1547
1548 /* An explanation is required here, I think.
1549 * Packet length and doff are validated by header prediction,
caa20d9a 1550 * provided case of th->doff==0 is eliminated.
1da177e4 1551 * So, we defer the checks. */
ed70fcfc
TH
1552
1553 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1554 goto csum_error;
1da177e4 1555
aa8223c7 1556 th = tcp_hdr(skb);
eddc9ec5 1557 iph = ip_hdr(skb);
971f10ec
ED
1558 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1559 * barrier() makes sure compiler wont play fool^Waliasing games.
1560 */
1561 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1562 sizeof(struct inet_skb_parm));
1563 barrier();
1564
1da177e4
LT
1565 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1566 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1567 skb->len - th->doff * 4);
1568 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1569 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1570 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1571 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1572 TCP_SKB_CB(skb)->sacked = 0;
1573
9a1f27c4 1574 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1575 if (!sk)
1576 goto no_tcp_socket;
1577
bb134d5d
ED
1578process:
1579 if (sk->sk_state == TCP_TIME_WAIT)
1580 goto do_time_wait;
1581
079096f1
ED
1582 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1583 struct request_sock *req = inet_reqsk(sk);
1584 struct sock *nsk = NULL;
1585
1586 sk = req->rsk_listener;
1587 if (tcp_v4_inbound_md5_hash(sk, skb))
1588 goto discard_and_relse;
1589 if (sk->sk_state == TCP_LISTEN)
1590 nsk = tcp_check_req(sk, skb, req, false);
1591 if (!nsk) {
1592 reqsk_put(req);
1593 goto discard_it;
1594 }
1595 if (nsk == sk) {
1596 sock_hold(sk);
1597 reqsk_put(req);
1598 } else if (tcp_child_process(sk, nsk, skb)) {
1599 tcp_v4_send_reset(nsk, skb);
1600 goto discard_it;
1601 } else {
1602 return 0;
1603 }
1604 }
6cce09f8
ED
1605 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1606 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1607 goto discard_and_relse;
6cce09f8 1608 }
d218d111 1609
1da177e4
LT
1610 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1611 goto discard_and_relse;
9ea88a15 1612
9ea88a15
DP
1613 if (tcp_v4_inbound_md5_hash(sk, skb))
1614 goto discard_and_relse;
9ea88a15 1615
b59c2701 1616 nf_reset(skb);
1da177e4 1617
fda9ef5d 1618 if (sk_filter(sk, skb))
1da177e4
LT
1619 goto discard_and_relse;
1620
2c8c56e1 1621 sk_incoming_cpu_update(sk);
1da177e4
LT
1622 skb->dev = NULL;
1623
c6366184 1624 bh_lock_sock_nested(sk);
2efd055c 1625 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1da177e4
LT
1626 ret = 0;
1627 if (!sock_owned_by_user(sk)) {
7bced397 1628 if (!tcp_prequeue(sk, skb))
1da177e4 1629 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1630 } else if (unlikely(sk_add_backlog(sk, skb,
1631 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1632 bh_unlock_sock(sk);
6cce09f8 1633 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1634 goto discard_and_relse;
1635 }
1da177e4
LT
1636 bh_unlock_sock(sk);
1637
1638 sock_put(sk);
1639
1640 return ret;
1641
1642no_tcp_socket:
1643 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1644 goto discard_it;
1645
12e25e10 1646 if (tcp_checksum_complete(skb)) {
6a5dc9e5
ED
1647csum_error:
1648 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1da177e4 1649bad_packet:
63231bdd 1650 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1651 } else {
cfb6eeb4 1652 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1653 }
1654
1655discard_it:
1656 /* Discard frame. */
1657 kfree_skb(skb);
e905a9ed 1658 return 0;
1da177e4
LT
1659
1660discard_and_relse:
1661 sock_put(sk);
1662 goto discard_it;
1663
1664do_time_wait:
1665 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1666 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1667 goto discard_it;
1668 }
1669
6a5dc9e5
ED
1670 if (tcp_checksum_complete(skb)) {
1671 inet_twsk_put(inet_twsk(sk));
1672 goto csum_error;
1da177e4 1673 }
9469c7b4 1674 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1675 case TCP_TW_SYN: {
c346dca1 1676 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1677 &tcp_hashinfo,
da5e3630 1678 iph->saddr, th->source,
eddc9ec5 1679 iph->daddr, th->dest,
463c84b9 1680 inet_iif(skb));
1da177e4 1681 if (sk2) {
dbe7faa4 1682 inet_twsk_deschedule_put(inet_twsk(sk));
1da177e4
LT
1683 sk = sk2;
1684 goto process;
1685 }
1686 /* Fall through to ACK */
1687 }
1688 case TCP_TW_ACK:
1689 tcp_v4_timewait_ack(sk, skb);
1690 break;
1691 case TCP_TW_RST:
1692 goto no_tcp_socket;
1693 case TCP_TW_SUCCESS:;
1694 }
1695 goto discard_it;
1696}
1697
ccb7c410
DM
1698static struct timewait_sock_ops tcp_timewait_sock_ops = {
1699 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1700 .twsk_unique = tcp_twsk_unique,
1701 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1702};
1da177e4 1703
63d02d15 1704void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1705{
1706 struct dst_entry *dst = skb_dst(skb);
1707
ca777eff
ED
1708 if (dst) {
1709 dst_hold(dst);
1710 sk->sk_rx_dst = dst;
1711 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1712 }
5d299f3d 1713}
63d02d15 1714EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1715
3b401a81 1716const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1717 .queue_xmit = ip_queue_xmit,
1718 .send_check = tcp_v4_send_check,
1719 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1720 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1721 .conn_request = tcp_v4_conn_request,
1722 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1723 .net_header_len = sizeof(struct iphdr),
1724 .setsockopt = ip_setsockopt,
1725 .getsockopt = ip_getsockopt,
1726 .addr2sockaddr = inet_csk_addr2sockaddr,
1727 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1728 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1729#ifdef CONFIG_COMPAT
543d9cfe
ACM
1730 .compat_setsockopt = compat_ip_setsockopt,
1731 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1732#endif
4fab9071 1733 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1734};
4bc2f18b 1735EXPORT_SYMBOL(ipv4_specific);
1da177e4 1736
cfb6eeb4 1737#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1738static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1739 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1740 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1741 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1742};
b6332e6c 1743#endif
cfb6eeb4 1744
1da177e4
LT
1745/* NOTE: A lot of things set to zero explicitly by call to
1746 * sk_alloc() so need not be done here.
1747 */
1748static int tcp_v4_init_sock(struct sock *sk)
1749{
6687e988 1750 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1751
900f65d3 1752 tcp_init_sock(sk);
1da177e4 1753
8292a17a 1754 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1755
cfb6eeb4 1756#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1757 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1758#endif
1da177e4 1759
1da177e4
LT
1760 return 0;
1761}
1762
7d06b2e0 1763void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1764{
1765 struct tcp_sock *tp = tcp_sk(sk);
1766
1767 tcp_clear_xmit_timers(sk);
1768
6687e988 1769 tcp_cleanup_congestion_control(sk);
317a76f9 1770
1da177e4 1771 /* Cleanup up the write buffer. */
fe067e8a 1772 tcp_write_queue_purge(sk);
1da177e4
LT
1773
1774 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1775 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1776
cfb6eeb4
YH
1777#ifdef CONFIG_TCP_MD5SIG
1778 /* Clean up the MD5 key list, if any */
1779 if (tp->md5sig_info) {
a915da9b 1780 tcp_clear_md5_list(sk);
a8afca03 1781 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1782 tp->md5sig_info = NULL;
1783 }
1784#endif
1a2449a8 1785
1da177e4
LT
1786 /* Clean prequeue, it must be empty really */
1787 __skb_queue_purge(&tp->ucopy.prequeue);
1788
1789 /* Clean up a referenced TCP bind bucket. */
463c84b9 1790 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1791 inet_put_port(sk);
1da177e4 1792
00db4124 1793 BUG_ON(tp->fastopen_rsk);
435cf559 1794
cf60af03
YC
1795 /* If socket is aborted during connect operation */
1796 tcp_free_fastopen_req(tp);
cd8ae852 1797 tcp_saved_syn_free(tp);
cf60af03 1798
180d8cd9 1799 sk_sockets_allocated_dec(sk);
d1a4c0b3 1800 sock_release_memcg(sk);
1da177e4 1801}
1da177e4
LT
1802EXPORT_SYMBOL(tcp_v4_destroy_sock);
1803
1804#ifdef CONFIG_PROC_FS
1805/* Proc filesystem TCP sock list dumping. */
1806
a8b690f9
TH
1807/*
1808 * Get next listener socket follow cur. If cur is NULL, get first socket
1809 * starting from bucket given in st->bucket; when st->bucket is zero the
1810 * very first socket in the hash table is returned.
1811 */
1da177e4
LT
1812static void *listening_get_next(struct seq_file *seq, void *cur)
1813{
463c84b9 1814 struct inet_connection_sock *icsk;
c25eb3bf 1815 struct hlist_nulls_node *node;
1da177e4 1816 struct sock *sk = cur;
5caea4ea 1817 struct inet_listen_hashbucket *ilb;
5799de0b 1818 struct tcp_iter_state *st = seq->private;
a4146b1b 1819 struct net *net = seq_file_net(seq);
1da177e4
LT
1820
1821 if (!sk) {
a8b690f9 1822 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1823 spin_lock_bh(&ilb->lock);
c25eb3bf 1824 sk = sk_nulls_head(&ilb->head);
a8b690f9 1825 st->offset = 0;
1da177e4
LT
1826 goto get_sk;
1827 }
5caea4ea 1828 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1829 ++st->num;
a8b690f9 1830 ++st->offset;
1da177e4 1831
079096f1 1832 sk = sk_nulls_next(sk);
1da177e4 1833get_sk:
c25eb3bf 1834 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
1835 if (!net_eq(sock_net(sk), net))
1836 continue;
1837 if (sk->sk_family == st->family) {
1da177e4
LT
1838 cur = sk;
1839 goto out;
1840 }
e905a9ed 1841 icsk = inet_csk(sk);
1da177e4 1842 }
5caea4ea 1843 spin_unlock_bh(&ilb->lock);
a8b690f9 1844 st->offset = 0;
0f7ff927 1845 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
1846 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1847 spin_lock_bh(&ilb->lock);
c25eb3bf 1848 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1849 goto get_sk;
1850 }
1851 cur = NULL;
1852out:
1853 return cur;
1854}
1855
1856static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1857{
a8b690f9
TH
1858 struct tcp_iter_state *st = seq->private;
1859 void *rc;
1860
1861 st->bucket = 0;
1862 st->offset = 0;
1863 rc = listening_get_next(seq, NULL);
1da177e4
LT
1864
1865 while (rc && *pos) {
1866 rc = listening_get_next(seq, rc);
1867 --*pos;
1868 }
1869 return rc;
1870}
1871
05dbc7b5 1872static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1873{
05dbc7b5 1874 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1875}
1876
a8b690f9
TH
1877/*
1878 * Get first established socket starting from bucket given in st->bucket.
1879 * If st->bucket is zero, the very first socket in the hash is returned.
1880 */
1da177e4
LT
1881static void *established_get_first(struct seq_file *seq)
1882{
5799de0b 1883 struct tcp_iter_state *st = seq->private;
a4146b1b 1884 struct net *net = seq_file_net(seq);
1da177e4
LT
1885 void *rc = NULL;
1886
a8b690f9
TH
1887 st->offset = 0;
1888 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1889 struct sock *sk;
3ab5aee7 1890 struct hlist_nulls_node *node;
9db66bdc 1891 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1892
6eac5604
AK
1893 /* Lockless fast path for the common case of empty buckets */
1894 if (empty_bucket(st))
1895 continue;
1896
9db66bdc 1897 spin_lock_bh(lock);
3ab5aee7 1898 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1899 if (sk->sk_family != st->family ||
878628fb 1900 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1901 continue;
1902 }
1903 rc = sk;
1904 goto out;
1905 }
9db66bdc 1906 spin_unlock_bh(lock);
1da177e4
LT
1907 }
1908out:
1909 return rc;
1910}
1911
1912static void *established_get_next(struct seq_file *seq, void *cur)
1913{
1914 struct sock *sk = cur;
3ab5aee7 1915 struct hlist_nulls_node *node;
5799de0b 1916 struct tcp_iter_state *st = seq->private;
a4146b1b 1917 struct net *net = seq_file_net(seq);
1da177e4
LT
1918
1919 ++st->num;
a8b690f9 1920 ++st->offset;
1da177e4 1921
05dbc7b5 1922 sk = sk_nulls_next(sk);
1da177e4 1923
3ab5aee7 1924 sk_nulls_for_each_from(sk, node) {
878628fb 1925 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 1926 return sk;
1da177e4
LT
1927 }
1928
05dbc7b5
ED
1929 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1930 ++st->bucket;
1931 return established_get_first(seq);
1da177e4
LT
1932}
1933
1934static void *established_get_idx(struct seq_file *seq, loff_t pos)
1935{
a8b690f9
TH
1936 struct tcp_iter_state *st = seq->private;
1937 void *rc;
1938
1939 st->bucket = 0;
1940 rc = established_get_first(seq);
1da177e4
LT
1941
1942 while (rc && pos) {
1943 rc = established_get_next(seq, rc);
1944 --pos;
7174259e 1945 }
1da177e4
LT
1946 return rc;
1947}
1948
1949static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1950{
1951 void *rc;
5799de0b 1952 struct tcp_iter_state *st = seq->private;
1da177e4 1953
1da177e4
LT
1954 st->state = TCP_SEQ_STATE_LISTENING;
1955 rc = listening_get_idx(seq, &pos);
1956
1957 if (!rc) {
1da177e4
LT
1958 st->state = TCP_SEQ_STATE_ESTABLISHED;
1959 rc = established_get_idx(seq, pos);
1960 }
1961
1962 return rc;
1963}
1964
a8b690f9
TH
1965static void *tcp_seek_last_pos(struct seq_file *seq)
1966{
1967 struct tcp_iter_state *st = seq->private;
1968 int offset = st->offset;
1969 int orig_num = st->num;
1970 void *rc = NULL;
1971
1972 switch (st->state) {
a8b690f9
TH
1973 case TCP_SEQ_STATE_LISTENING:
1974 if (st->bucket >= INET_LHTABLE_SIZE)
1975 break;
1976 st->state = TCP_SEQ_STATE_LISTENING;
1977 rc = listening_get_next(seq, NULL);
1978 while (offset-- && rc)
1979 rc = listening_get_next(seq, rc);
1980 if (rc)
1981 break;
1982 st->bucket = 0;
05dbc7b5 1983 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
1984 /* Fallthrough */
1985 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
1986 if (st->bucket > tcp_hashinfo.ehash_mask)
1987 break;
1988 rc = established_get_first(seq);
1989 while (offset-- && rc)
1990 rc = established_get_next(seq, rc);
1991 }
1992
1993 st->num = orig_num;
1994
1995 return rc;
1996}
1997
1da177e4
LT
1998static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1999{
5799de0b 2000 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2001 void *rc;
2002
2003 if (*pos && *pos == st->last_pos) {
2004 rc = tcp_seek_last_pos(seq);
2005 if (rc)
2006 goto out;
2007 }
2008
1da177e4
LT
2009 st->state = TCP_SEQ_STATE_LISTENING;
2010 st->num = 0;
a8b690f9
TH
2011 st->bucket = 0;
2012 st->offset = 0;
2013 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2014
2015out:
2016 st->last_pos = *pos;
2017 return rc;
1da177e4
LT
2018}
2019
2020static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2021{
a8b690f9 2022 struct tcp_iter_state *st = seq->private;
1da177e4 2023 void *rc = NULL;
1da177e4
LT
2024
2025 if (v == SEQ_START_TOKEN) {
2026 rc = tcp_get_idx(seq, 0);
2027 goto out;
2028 }
1da177e4
LT
2029
2030 switch (st->state) {
1da177e4
LT
2031 case TCP_SEQ_STATE_LISTENING:
2032 rc = listening_get_next(seq, v);
2033 if (!rc) {
1da177e4 2034 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2035 st->bucket = 0;
2036 st->offset = 0;
1da177e4
LT
2037 rc = established_get_first(seq);
2038 }
2039 break;
2040 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2041 rc = established_get_next(seq, v);
2042 break;
2043 }
2044out:
2045 ++*pos;
a8b690f9 2046 st->last_pos = *pos;
1da177e4
LT
2047 return rc;
2048}
2049
2050static void tcp_seq_stop(struct seq_file *seq, void *v)
2051{
5799de0b 2052 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2053
2054 switch (st->state) {
1da177e4
LT
2055 case TCP_SEQ_STATE_LISTENING:
2056 if (v != SEQ_START_TOKEN)
5caea4ea 2057 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2058 break;
1da177e4
LT
2059 case TCP_SEQ_STATE_ESTABLISHED:
2060 if (v)
9db66bdc 2061 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2062 break;
2063 }
2064}
2065
73cb88ec 2066int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2067{
d9dda78b 2068 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2069 struct tcp_iter_state *s;
52d6f3f1 2070 int err;
1da177e4 2071
52d6f3f1
DL
2072 err = seq_open_net(inode, file, &afinfo->seq_ops,
2073 sizeof(struct tcp_iter_state));
2074 if (err < 0)
2075 return err;
f40c8174 2076
52d6f3f1 2077 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2078 s->family = afinfo->family;
688d1945 2079 s->last_pos = 0;
f40c8174
DL
2080 return 0;
2081}
73cb88ec 2082EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2083
6f8b13bc 2084int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2085{
2086 int rc = 0;
2087 struct proc_dir_entry *p;
2088
9427c4b3
DL
2089 afinfo->seq_ops.start = tcp_seq_start;
2090 afinfo->seq_ops.next = tcp_seq_next;
2091 afinfo->seq_ops.stop = tcp_seq_stop;
2092
84841c3c 2093 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2094 afinfo->seq_fops, afinfo);
84841c3c 2095 if (!p)
1da177e4
LT
2096 rc = -ENOMEM;
2097 return rc;
2098}
4bc2f18b 2099EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2100
6f8b13bc 2101void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2102{
ece31ffd 2103 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2104}
4bc2f18b 2105EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2106
d4f06873 2107static void get_openreq4(const struct request_sock *req,
aa3a0c8c 2108 struct seq_file *f, int i)
1da177e4 2109{
2e6599cb 2110 const struct inet_request_sock *ireq = inet_rsk(req);
fa76ce73 2111 long delta = req->rsk_timer.expires - jiffies;
1da177e4 2112
5e659e4c 2113 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2114 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2115 i,
634fb979 2116 ireq->ir_loc_addr,
d4f06873 2117 ireq->ir_num,
634fb979
ED
2118 ireq->ir_rmt_addr,
2119 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2120 TCP_SYN_RECV,
2121 0, 0, /* could print option size, but that is af dependent. */
2122 1, /* timers active (only the expire timer) */
a399a805 2123 jiffies_delta_to_clock_t(delta),
e6c022a4 2124 req->num_timeout,
aa3a0c8c
ED
2125 from_kuid_munged(seq_user_ns(f),
2126 sock_i_uid(req->rsk_listener)),
1da177e4
LT
2127 0, /* non standard timer */
2128 0, /* open_requests have no inode */
d4f06873 2129 0,
652586df 2130 req);
1da177e4
LT
2131}
2132
652586df 2133static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2134{
2135 int timer_active;
2136 unsigned long timer_expires;
cf533ea5 2137 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2138 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2139 const struct inet_sock *inet = inet_sk(sk);
0536fcc0 2140 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2141 __be32 dest = inet->inet_daddr;
2142 __be32 src = inet->inet_rcv_saddr;
2143 __u16 destp = ntohs(inet->inet_dport);
2144 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2145 int rx_queue;
1da177e4 2146
6ba8a3b1
ND
2147 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2148 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2149 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2150 timer_active = 1;
463c84b9
ACM
2151 timer_expires = icsk->icsk_timeout;
2152 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2153 timer_active = 4;
463c84b9 2154 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2155 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2156 timer_active = 2;
cf4c6bf8 2157 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2158 } else {
2159 timer_active = 0;
2160 timer_expires = jiffies;
2161 }
2162
49d09007
ED
2163 if (sk->sk_state == TCP_LISTEN)
2164 rx_queue = sk->sk_ack_backlog;
2165 else
2166 /*
2167 * because we dont lock socket, we might find a transient negative value
2168 */
2169 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2170
5e659e4c 2171 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2172 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
cf4c6bf8 2173 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2174 tp->write_seq - tp->snd_una,
49d09007 2175 rx_queue,
1da177e4 2176 timer_active,
a399a805 2177 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2178 icsk->icsk_retransmits,
a7cb5a49 2179 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2180 icsk->icsk_probes_out,
cf4c6bf8
IJ
2181 sock_i_ino(sk),
2182 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2183 jiffies_to_clock_t(icsk->icsk_rto),
2184 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2185 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2186 tp->snd_cwnd,
168a8f58
JC
2187 sk->sk_state == TCP_LISTEN ?
2188 (fastopenq ? fastopenq->max_qlen : 0) :
652586df 2189 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2190}
2191
cf533ea5 2192static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2193 struct seq_file *f, int i)
1da177e4 2194{
789f558c 2195 long delta = tw->tw_timer.expires - jiffies;
23f33c2d 2196 __be32 dest, src;
1da177e4 2197 __u16 destp, srcp;
1da177e4
LT
2198
2199 dest = tw->tw_daddr;
2200 src = tw->tw_rcv_saddr;
2201 destp = ntohs(tw->tw_dport);
2202 srcp = ntohs(tw->tw_sport);
2203
5e659e4c 2204 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2205 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2206 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2207 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2208 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2209}
2210
2211#define TMPSZ 150
2212
2213static int tcp4_seq_show(struct seq_file *seq, void *v)
2214{
5799de0b 2215 struct tcp_iter_state *st;
05dbc7b5 2216 struct sock *sk = v;
1da177e4 2217
652586df 2218 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2219 if (v == SEQ_START_TOKEN) {
652586df 2220 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2221 "rx_queue tr tm->when retrnsmt uid timeout "
2222 "inode");
2223 goto out;
2224 }
2225 st = seq->private;
2226
079096f1
ED
2227 if (sk->sk_state == TCP_TIME_WAIT)
2228 get_timewait4_sock(v, seq, st->num);
2229 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 2230 get_openreq4(v, seq, st->num);
079096f1
ED
2231 else
2232 get_tcp4_sock(v, seq, st->num);
1da177e4 2233out:
652586df 2234 seq_pad(seq, '\n');
1da177e4
LT
2235 return 0;
2236}
2237
73cb88ec
AV
2238static const struct file_operations tcp_afinfo_seq_fops = {
2239 .owner = THIS_MODULE,
2240 .open = tcp_seq_open,
2241 .read = seq_read,
2242 .llseek = seq_lseek,
2243 .release = seq_release_net
2244};
2245
1da177e4 2246static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2247 .name = "tcp",
2248 .family = AF_INET,
73cb88ec 2249 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2250 .seq_ops = {
2251 .show = tcp4_seq_show,
2252 },
1da177e4
LT
2253};
2254
2c8c1e72 2255static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2256{
2257 return tcp_proc_register(net, &tcp4_seq_afinfo);
2258}
2259
2c8c1e72 2260static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2261{
2262 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2263}
2264
2265static struct pernet_operations tcp4_net_ops = {
2266 .init = tcp4_proc_init_net,
2267 .exit = tcp4_proc_exit_net,
2268};
2269
1da177e4
LT
2270int __init tcp4_proc_init(void)
2271{
757764f6 2272 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2273}
2274
2275void tcp4_proc_exit(void)
2276{
757764f6 2277 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2278}
2279#endif /* CONFIG_PROC_FS */
2280
2281struct proto tcp_prot = {
2282 .name = "TCP",
2283 .owner = THIS_MODULE,
2284 .close = tcp_close,
2285 .connect = tcp_v4_connect,
2286 .disconnect = tcp_disconnect,
463c84b9 2287 .accept = inet_csk_accept,
1da177e4
LT
2288 .ioctl = tcp_ioctl,
2289 .init = tcp_v4_init_sock,
2290 .destroy = tcp_v4_destroy_sock,
2291 .shutdown = tcp_shutdown,
2292 .setsockopt = tcp_setsockopt,
2293 .getsockopt = tcp_getsockopt,
1da177e4 2294 .recvmsg = tcp_recvmsg,
7ba42910
CG
2295 .sendmsg = tcp_sendmsg,
2296 .sendpage = tcp_sendpage,
1da177e4 2297 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2298 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2299 .hash = inet_hash,
2300 .unhash = inet_unhash,
2301 .get_port = inet_csk_get_port,
1da177e4 2302 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2303 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2304 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2305 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2306 .memory_allocated = &tcp_memory_allocated,
2307 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2308 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2309 .sysctl_wmem = sysctl_tcp_wmem,
2310 .sysctl_rmem = sysctl_tcp_rmem,
2311 .max_header = MAX_TCP_HEADER,
2312 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2313 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2314 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2315 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2316 .h.hashinfo = &tcp_hashinfo,
7ba42910 2317 .no_autobind = true,
543d9cfe
ACM
2318#ifdef CONFIG_COMPAT
2319 .compat_setsockopt = compat_tcp_setsockopt,
2320 .compat_getsockopt = compat_tcp_getsockopt,
2321#endif
c255a458 2322#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2323 .init_cgroup = tcp_init_cgroup,
2324 .destroy_cgroup = tcp_destroy_cgroup,
2325 .proto_cgroup = tcp_proto_cgroup,
2326#endif
1da177e4 2327};
4bc2f18b 2328EXPORT_SYMBOL(tcp_prot);
1da177e4 2329
bdbbb852
ED
2330static void __net_exit tcp_sk_exit(struct net *net)
2331{
2332 int cpu;
2333
2334 for_each_possible_cpu(cpu)
2335 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2336 free_percpu(net->ipv4.tcp_sk);
2337}
2338
046ee902
DL
2339static int __net_init tcp_sk_init(struct net *net)
2340{
bdbbb852
ED
2341 int res, cpu;
2342
2343 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2344 if (!net->ipv4.tcp_sk)
2345 return -ENOMEM;
2346
2347 for_each_possible_cpu(cpu) {
2348 struct sock *sk;
2349
2350 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2351 IPPROTO_TCP, net);
2352 if (res)
2353 goto fail;
2354 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2355 }
49213555 2356
5d134f1c 2357 net->ipv4.sysctl_tcp_ecn = 2;
49213555
DB
2358 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2359
b0f9ca53 2360 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
6b58e0a5 2361 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
05cbc0db 2362 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
046ee902 2363
49213555 2364 return 0;
bdbbb852
ED
2365fail:
2366 tcp_sk_exit(net);
2367
2368 return res;
b099ce26
EB
2369}
2370
2371static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2372{
2373 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2374}
2375
2376static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2377 .init = tcp_sk_init,
2378 .exit = tcp_sk_exit,
2379 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2380};
2381
9b0f976f 2382void __init tcp_v4_init(void)
1da177e4 2383{
5caea4ea 2384 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2385 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2386 panic("Failed to create the TCP control socket.\n");
1da177e4 2387}