]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/tcp_ipv4.c
tcp: Remove redundant checks when setting eff_sacks
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4
LT
53
54#include <linux/types.h>
55#include <linux/fcntl.h>
56#include <linux/module.h>
57#include <linux/random.h>
58#include <linux/cache.h>
59#include <linux/jhash.h>
60#include <linux/init.h>
61#include <linux/times.h>
62
457c4cbc 63#include <net/net_namespace.h>
1da177e4 64#include <net/icmp.h>
304a1618 65#include <net/inet_hashtables.h>
1da177e4 66#include <net/tcp.h>
20380731 67#include <net/transp_v6.h>
1da177e4
LT
68#include <net/ipv6.h>
69#include <net/inet_common.h>
6d6ee43e 70#include <net/timewait_sock.h>
1da177e4 71#include <net/xfrm.h>
1a2449a8 72#include <net/netdma.h>
1da177e4
LT
73
74#include <linux/inet.h>
75#include <linux/ipv6.h>
76#include <linux/stddef.h>
77#include <linux/proc_fs.h>
78#include <linux/seq_file.h>
79
cfb6eeb4
YH
80#include <linux/crypto.h>
81#include <linux/scatterlist.h>
82
ab32ea5d
BH
83int sysctl_tcp_tw_reuse __read_mostly;
84int sysctl_tcp_low_latency __read_mostly;
1da177e4 85
1da177e4 86
cfb6eeb4 87#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
88static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
89 __be32 addr);
49a72dfb
AL
90static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
91 __be32 daddr, __be32 saddr, struct tcphdr *th);
9501f972
YH
92#else
93static inline
94struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
95{
96 return NULL;
97}
cfb6eeb4
YH
98#endif
99
0f7ff927 100struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
7174259e
ACM
101 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
102 .lhash_users = ATOMIC_INIT(0),
103 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
1da177e4
LT
104};
105
a94f723d 106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 107{
eddc9ec5
ACM
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 ip_hdr(skb)->saddr,
aa8223c7
ACM
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
1da177e4
LT
112}
113
6d6ee43e
ACM
114int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
115{
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
118
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
122
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
125 holder.
126
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
129 */
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
135 tp->write_seq = 1;
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 sock_hold(sktw);
139 return 1;
140 }
141
142 return 0;
143}
144
145EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146
1da177e4
LT
147/* This will initiate an outgoing connection. */
148int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
149{
150 struct inet_sock *inet = inet_sk(sk);
151 struct tcp_sock *tp = tcp_sk(sk);
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct rtable *rt;
bada8adc 154 __be32 daddr, nexthop;
1da177e4
LT
155 int tmp;
156 int err;
157
158 if (addr_len < sizeof(struct sockaddr_in))
159 return -EINVAL;
160
161 if (usin->sin_family != AF_INET)
162 return -EAFNOSUPPORT;
163
164 nexthop = daddr = usin->sin_addr.s_addr;
165 if (inet->opt && inet->opt->srr) {
166 if (!daddr)
167 return -EINVAL;
168 nexthop = inet->opt->faddr;
169 }
170
171 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
8eb9086f 174 inet->sport, usin->sin_port, sk, 1);
584bdf8c
WD
175 if (tmp < 0) {
176 if (tmp == -ENETUNREACH)
7c73a6fa 177 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4 178 return tmp;
584bdf8c 179 }
1da177e4
LT
180
181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
182 ip_rt_put(rt);
183 return -ENETUNREACH;
184 }
185
186 if (!inet->opt || !inet->opt->srr)
187 daddr = rt->rt_dst;
188
189 if (!inet->saddr)
190 inet->saddr = rt->rt_src;
191 inet->rcv_saddr = inet->saddr;
192
193 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
194 /* Reset inherited state */
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 tp->write_seq = 0;
198 }
199
295ff7ed 200 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
201 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
202 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
203 /*
204 * VJ's idea. We save last timestamp seen from
205 * the destination in peer table, when entering state
206 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
207 * when trying new connection.
1da177e4 208 */
7174259e 209 if (peer != NULL &&
9d729f72 210 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
1da177e4
LT
211 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
212 tp->rx_opt.ts_recent = peer->tcp_ts;
213 }
214 }
215
216 inet->dport = usin->sin_port;
217 inet->daddr = daddr;
218
d83d8461 219 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 220 if (inet->opt)
d83d8461 221 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4
LT
222
223 tp->rx_opt.mss_clamp = 536;
224
225 /* Socket identity is still unknown (sport may be zero).
226 * However we set state to SYN-SENT and not releasing socket
227 * lock select source port, enter ourselves into the hash tables and
228 * complete initialization after this.
229 */
230 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 231 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
232 if (err)
233 goto failure;
234
7174259e
ACM
235 err = ip_route_newports(&rt, IPPROTO_TCP,
236 inet->sport, inet->dport, sk);
1da177e4
LT
237 if (err)
238 goto failure;
239
240 /* OK, now commit destination to socket. */
bcd76111 241 sk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 242 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
243
244 if (!tp->write_seq)
245 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
246 inet->daddr,
247 inet->sport,
248 usin->sin_port);
249
250 inet->id = tp->write_seq ^ jiffies;
251
252 err = tcp_connect(sk);
253 rt = NULL;
254 if (err)
255 goto failure;
256
257 return 0;
258
259failure:
7174259e
ACM
260 /*
261 * This unhashes the socket and releases the local port,
262 * if necessary.
263 */
1da177e4
LT
264 tcp_set_state(sk, TCP_CLOSE);
265 ip_rt_put(rt);
266 sk->sk_route_caps = 0;
267 inet->dport = 0;
268 return err;
269}
270
1da177e4
LT
271/*
272 * This routine does path mtu discovery as defined in RFC1191.
273 */
40efc6fa 274static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
275{
276 struct dst_entry *dst;
277 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
278
279 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
280 * send out by Linux are always <576bytes so they should go through
281 * unfragmented).
282 */
283 if (sk->sk_state == TCP_LISTEN)
284 return;
285
286 /* We don't check in the destentry if pmtu discovery is forbidden
287 * on this route. We just assume that no packet_to_big packets
288 * are send back when pmtu discovery is not active.
e905a9ed 289 * There is a small race when the user changes this flag in the
1da177e4
LT
290 * route, but I think that's acceptable.
291 */
292 if ((dst = __sk_dst_check(sk, 0)) == NULL)
293 return;
294
295 dst->ops->update_pmtu(dst, mtu);
296
297 /* Something is about to be wrong... Remember soft error
298 * for the case, if this connection will not able to recover.
299 */
300 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
301 sk->sk_err_soft = EMSGSIZE;
302
303 mtu = dst_mtu(dst);
304
305 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 306 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
307 tcp_sync_mss(sk, mtu);
308
309 /* Resend the TCP packet because it's
310 * clear that the old packet has been
311 * dropped. This is the new "fast" path mtu
312 * discovery.
313 */
314 tcp_simple_retransmit(sk);
315 } /* else let the usual retransmit timer handle it */
316}
317
318/*
319 * This routine is called by the ICMP module when it gets some
320 * sort of error condition. If err < 0 then the socket should
321 * be closed and the error returned to the user. If err > 0
322 * it's just the icmp type << 8 | icmp code. After adjustment
323 * header points to the first 8 bytes of the tcp header. We need
324 * to find the appropriate port.
325 *
326 * The locking strategy used here is very "optimistic". When
327 * someone else accesses the socket the ICMP is just dropped
328 * and for some paths there is no check at all.
329 * A more general error queue to queue errors for later handling
330 * is probably better.
331 *
332 */
333
334void tcp_v4_err(struct sk_buff *skb, u32 info)
335{
336 struct iphdr *iph = (struct iphdr *)skb->data;
337 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
338 struct tcp_sock *tp;
339 struct inet_sock *inet;
88c7664f
ACM
340 const int type = icmp_hdr(skb)->type;
341 const int code = icmp_hdr(skb)->code;
1da177e4
LT
342 struct sock *sk;
343 __u32 seq;
344 int err;
fd54d716 345 struct net *net = dev_net(skb->dev);
1da177e4
LT
346
347 if (skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
349 return;
350 }
351
fd54d716 352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
c67499c0 353 iph->saddr, th->source, inet_iif(skb));
1da177e4 354 if (!sk) {
dcfc23ca 355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
356 return;
357 }
358 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 359 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
360 return;
361 }
362
363 bh_lock_sock(sk);
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
366 */
367 if (sock_owned_by_user(sk))
de0744af 368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
369
370 if (sk->sk_state == TCP_CLOSE)
371 goto out;
372
373 tp = tcp_sk(sk);
374 seq = ntohl(th->seq);
375 if (sk->sk_state != TCP_LISTEN &&
376 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 377 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
378 goto out;
379 }
380
381 switch (type) {
382 case ICMP_SOURCE_QUENCH:
383 /* Just silently ignore these. */
384 goto out;
385 case ICMP_PARAMETERPROB:
386 err = EPROTO;
387 break;
388 case ICMP_DEST_UNREACH:
389 if (code > NR_ICMP_UNREACH)
390 goto out;
391
392 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
393 if (!sock_owned_by_user(sk))
394 do_pmtu_discovery(sk, iph, info);
395 goto out;
396 }
397
398 err = icmp_err_convert[code].errno;
399 break;
400 case ICMP_TIME_EXCEEDED:
401 err = EHOSTUNREACH;
402 break;
403 default:
404 goto out;
405 }
406
407 switch (sk->sk_state) {
60236fdd 408 struct request_sock *req, **prev;
1da177e4
LT
409 case TCP_LISTEN:
410 if (sock_owned_by_user(sk))
411 goto out;
412
463c84b9
ACM
413 req = inet_csk_search_req(sk, &prev, th->dest,
414 iph->daddr, iph->saddr);
1da177e4
LT
415 if (!req)
416 goto out;
417
418 /* ICMPs are not backlogged, hence we cannot get
419 an established socket here.
420 */
421 BUG_TRAP(!req->sk);
422
2e6599cb 423 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
425 goto out;
426 }
427
428 /*
429 * Still in SYN_RECV, just remove it silently.
430 * There is no good way to pass the error to the newly
431 * created socket, and POSIX does not want network
432 * errors returned from accept().
433 */
463c84b9 434 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
435 goto out;
436
437 case TCP_SYN_SENT:
438 case TCP_SYN_RECV: /* Cannot happen.
439 It can f.e. if SYNs crossed.
440 */
441 if (!sock_owned_by_user(sk)) {
1da177e4
LT
442 sk->sk_err = err;
443
444 sk->sk_error_report(sk);
445
446 tcp_done(sk);
447 } else {
448 sk->sk_err_soft = err;
449 }
450 goto out;
451 }
452
453 /* If we've already connected we will keep trying
454 * until we time out, or the user gives up.
455 *
456 * rfc1122 4.2.3.9 allows to consider as hard errors
457 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
458 * but it is obsoleted by pmtu discovery).
459 *
460 * Note, that in modern internet, where routing is unreliable
461 * and in each dark corner broken firewalls sit, sending random
462 * errors ordered by their masters even this two messages finally lose
463 * their original sense (even Linux sends invalid PORT_UNREACHs)
464 *
465 * Now we are in compliance with RFCs.
466 * --ANK (980905)
467 */
468
469 inet = inet_sk(sk);
470 if (!sock_owned_by_user(sk) && inet->recverr) {
471 sk->sk_err = err;
472 sk->sk_error_report(sk);
473 } else { /* Only an error on timeout */
474 sk->sk_err_soft = err;
475 }
476
477out:
478 bh_unlock_sock(sk);
479 sock_put(sk);
480}
481
482/* This routine computes an IPv4 TCP checksum. */
8292a17a 483void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
484{
485 struct inet_sock *inet = inet_sk(sk);
aa8223c7 486 struct tcphdr *th = tcp_hdr(skb);
1da177e4 487
84fa7933 488 if (skb->ip_summed == CHECKSUM_PARTIAL) {
ba7808ea
FD
489 th->check = ~tcp_v4_check(len, inet->saddr,
490 inet->daddr, 0);
663ead3b 491 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 492 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 493 } else {
ba7808ea 494 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
1da177e4
LT
495 csum_partial((char *)th,
496 th->doff << 2,
497 skb->csum));
498 }
499}
500
a430a43d
HX
501int tcp_v4_gso_send_check(struct sk_buff *skb)
502{
eddc9ec5 503 const struct iphdr *iph;
a430a43d
HX
504 struct tcphdr *th;
505
506 if (!pskb_may_pull(skb, sizeof(*th)))
507 return -EINVAL;
508
eddc9ec5 509 iph = ip_hdr(skb);
aa8223c7 510 th = tcp_hdr(skb);
a430a43d
HX
511
512 th->check = 0;
ba7808ea 513 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
663ead3b 514 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 515 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 516 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
517 return 0;
518}
519
1da177e4
LT
520/*
521 * This routine will send an RST to the other tcp.
522 *
523 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
524 * for reset.
525 * Answer: if a packet caused RST, it is not for a socket
526 * existing in our system, if it is matched to a socket,
527 * it is just duplicate segment or bug in other side's TCP.
528 * So that we build reply only basing on parameters
529 * arrived with segment.
530 * Exception: precedence violation. We do not implement it in any case.
531 */
532
cfb6eeb4 533static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 534{
aa8223c7 535 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
536 struct {
537 struct tcphdr th;
538#ifdef CONFIG_TCP_MD5SIG
714e85be 539 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
540#endif
541 } rep;
1da177e4 542 struct ip_reply_arg arg;
cfb6eeb4
YH
543#ifdef CONFIG_TCP_MD5SIG
544 struct tcp_md5sig_key *key;
545#endif
a86b1e30 546 struct net *net;
1da177e4
LT
547
548 /* Never send a reset in response to a reset. */
549 if (th->rst)
550 return;
551
ee6b9673 552 if (skb->rtable->rt_type != RTN_LOCAL)
1da177e4
LT
553 return;
554
555 /* Swap the send and the receive. */
cfb6eeb4
YH
556 memset(&rep, 0, sizeof(rep));
557 rep.th.dest = th->source;
558 rep.th.source = th->dest;
559 rep.th.doff = sizeof(struct tcphdr) / 4;
560 rep.th.rst = 1;
1da177e4
LT
561
562 if (th->ack) {
cfb6eeb4 563 rep.th.seq = th->ack_seq;
1da177e4 564 } else {
cfb6eeb4
YH
565 rep.th.ack = 1;
566 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
567 skb->len - (th->doff << 2));
1da177e4
LT
568 }
569
7174259e 570 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
571 arg.iov[0].iov_base = (unsigned char *)&rep;
572 arg.iov[0].iov_len = sizeof(rep.th);
573
574#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 575 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
576 if (key) {
577 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
578 (TCPOPT_NOP << 16) |
579 (TCPOPT_MD5SIG << 8) |
580 TCPOLEN_MD5SIG);
581 /* Update length and the length the header thinks exists */
582 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
583 rep.th.doff = arg.iov[0].iov_len / 4;
584
49a72dfb
AL
585 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
586 key, ip_hdr(skb)->daddr,
587 ip_hdr(skb)->saddr, &rep.th);
cfb6eeb4
YH
588 }
589#endif
eddc9ec5
ACM
590 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
591 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
592 sizeof(struct tcphdr), IPPROTO_TCP, 0);
593 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
594
a86b1e30
PE
595 net = dev_net(skb->dst->dev);
596 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 597 &arg, arg.iov[0].iov_len);
1da177e4 598
63231bdd
PE
599 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
600 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
601}
602
603/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
604 outside socket context is ugly, certainly. What can I do?
605 */
606
9501f972
YH
607static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
608 u32 win, u32 ts, int oif,
609 struct tcp_md5sig_key *key)
1da177e4 610{
aa8223c7 611 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
612 struct {
613 struct tcphdr th;
714e85be 614 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 615#ifdef CONFIG_TCP_MD5SIG
714e85be 616 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
617#endif
618 ];
1da177e4
LT
619 } rep;
620 struct ip_reply_arg arg;
a86b1e30 621 struct net *net = dev_net(skb->dev);
1da177e4
LT
622
623 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 624 memset(&arg, 0, sizeof(arg));
1da177e4
LT
625
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
628 if (ts) {
cfb6eeb4
YH
629 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
630 (TCPOPT_TIMESTAMP << 8) |
631 TCPOLEN_TIMESTAMP);
632 rep.opt[1] = htonl(tcp_time_stamp);
633 rep.opt[2] = htonl(ts);
cb48cfe8 634 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
635 }
636
637 /* Swap the send and the receive. */
638 rep.th.dest = th->source;
639 rep.th.source = th->dest;
640 rep.th.doff = arg.iov[0].iov_len / 4;
641 rep.th.seq = htonl(seq);
642 rep.th.ack_seq = htonl(ack);
643 rep.th.ack = 1;
644 rep.th.window = htons(win);
645
cfb6eeb4 646#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
647 if (key) {
648 int offset = (ts) ? 3 : 0;
649
650 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
651 (TCPOPT_NOP << 16) |
652 (TCPOPT_MD5SIG << 8) |
653 TCPOLEN_MD5SIG);
654 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
655 rep.th.doff = arg.iov[0].iov_len/4;
656
49a72dfb
AL
657 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
658 key, ip_hdr(skb)->daddr,
659 ip_hdr(skb)->saddr, &rep.th);
cfb6eeb4
YH
660 }
661#endif
eddc9ec5
ACM
662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
663 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
664 arg.iov[0].iov_len, IPPROTO_TCP, 0);
665 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
666 if (oif)
667 arg.bound_dev_if = oif;
1da177e4 668
a86b1e30 669 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 670 &arg, arg.iov[0].iov_len);
1da177e4 671
63231bdd 672 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
673}
674
675static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
676{
8feaf0c0 677 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 678 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 679
9501f972 680 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 681 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
682 tcptw->tw_ts_recent,
683 tw->tw_bound_dev_if,
684 tcp_twsk_md5_key(tcptw)
685 );
1da177e4 686
8feaf0c0 687 inet_twsk_put(tw);
1da177e4
LT
688}
689
7174259e
ACM
690static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
691 struct request_sock *req)
1da177e4 692{
9501f972 693 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
695 req->ts_recent,
696 0,
697 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr));
1da177e4
LT
698}
699
1da177e4 700/*
9bf1d83e 701 * Send a SYN-ACK after having received a SYN.
60236fdd 702 * This still operates on a request_sock only, not on a big
1da177e4
LT
703 * socket.
704 */
fd80eb94
DL
705static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
706 struct dst_entry *dst)
1da177e4 707{
2e6599cb 708 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
709 int err = -1;
710 struct sk_buff * skb;
711
712 /* First, grab a route. */
463c84b9 713 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 714 return -1;
1da177e4
LT
715
716 skb = tcp_make_synack(sk, dst, req);
717
718 if (skb) {
aa8223c7 719 struct tcphdr *th = tcp_hdr(skb);
1da177e4 720
ba7808ea 721 th->check = tcp_v4_check(skb->len,
2e6599cb
ACM
722 ireq->loc_addr,
723 ireq->rmt_addr,
1da177e4
LT
724 csum_partial((char *)th, skb->len,
725 skb->csum));
726
2e6599cb
ACM
727 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
728 ireq->rmt_addr,
729 ireq->opt);
b9df3cb8 730 err = net_xmit_eval(err);
1da177e4
LT
731 }
732
1da177e4
LT
733 dst_release(dst);
734 return err;
735}
736
fd80eb94
DL
737static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
738{
739 return __tcp_v4_send_synack(sk, req, NULL);
740}
741
1da177e4 742/*
60236fdd 743 * IPv4 request_sock destructor.
1da177e4 744 */
60236fdd 745static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 746{
a51482bd 747 kfree(inet_rsk(req)->opt);
1da177e4
LT
748}
749
80e40daa 750#ifdef CONFIG_SYN_COOKIES
40efc6fa 751static void syn_flood_warning(struct sk_buff *skb)
1da177e4
LT
752{
753 static unsigned long warntime;
754
755 if (time_after(jiffies, (warntime + HZ * 60))) {
756 warntime = jiffies;
757 printk(KERN_INFO
758 "possible SYN flooding on port %d. Sending cookies.\n",
aa8223c7 759 ntohs(tcp_hdr(skb)->dest));
1da177e4
LT
760 }
761}
80e40daa 762#endif
1da177e4
LT
763
764/*
60236fdd 765 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 766 */
40efc6fa
SH
767static struct ip_options *tcp_v4_save_options(struct sock *sk,
768 struct sk_buff *skb)
1da177e4
LT
769{
770 struct ip_options *opt = &(IPCB(skb)->opt);
771 struct ip_options *dopt = NULL;
772
773 if (opt && opt->optlen) {
774 int opt_size = optlength(opt);
775 dopt = kmalloc(opt_size, GFP_ATOMIC);
776 if (dopt) {
777 if (ip_options_echo(dopt, skb)) {
778 kfree(dopt);
779 dopt = NULL;
780 }
781 }
782 }
783 return dopt;
784}
785
cfb6eeb4
YH
786#ifdef CONFIG_TCP_MD5SIG
787/*
788 * RFC2385 MD5 checksumming requires a mapping of
789 * IP address->MD5 Key.
790 * We need to maintain these in the sk structure.
791 */
792
793/* Find the Key structure for an address. */
7174259e
ACM
794static struct tcp_md5sig_key *
795 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
796{
797 struct tcp_sock *tp = tcp_sk(sk);
798 int i;
799
800 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
801 return NULL;
802 for (i = 0; i < tp->md5sig_info->entries4; i++) {
803 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 804 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
805 }
806 return NULL;
807}
808
809struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
810 struct sock *addr_sk)
811{
812 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
813}
814
815EXPORT_SYMBOL(tcp_v4_md5_lookup);
816
f5b99bcd
AB
817static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
818 struct request_sock *req)
cfb6eeb4
YH
819{
820 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
821}
822
823/* This can be called on a newly created socket, from other files */
824int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
825 u8 *newkey, u8 newkeylen)
826{
827 /* Add Key to the list */
b0a713e9 828 struct tcp_md5sig_key *key;
cfb6eeb4
YH
829 struct tcp_sock *tp = tcp_sk(sk);
830 struct tcp4_md5sig_key *keys;
831
b0a713e9 832 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
833 if (key) {
834 /* Pre-existing entry - just update that one. */
b0a713e9
MD
835 kfree(key->key);
836 key->key = newkey;
837 key->keylen = newkeylen;
cfb6eeb4 838 } else {
f6685938
ACM
839 struct tcp_md5sig_info *md5sig;
840
cfb6eeb4 841 if (!tp->md5sig_info) {
f6685938
ACM
842 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
843 GFP_ATOMIC);
cfb6eeb4
YH
844 if (!tp->md5sig_info) {
845 kfree(newkey);
846 return -ENOMEM;
847 }
3d7dbeac 848 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
849 }
850 if (tcp_alloc_md5sig_pool() == NULL) {
851 kfree(newkey);
852 return -ENOMEM;
853 }
f6685938
ACM
854 md5sig = tp->md5sig_info;
855
856 if (md5sig->alloced4 == md5sig->entries4) {
857 keys = kmalloc((sizeof(*keys) *
e905a9ed 858 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
859 if (!keys) {
860 kfree(newkey);
861 tcp_free_md5sig_pool();
862 return -ENOMEM;
863 }
864
f6685938
ACM
865 if (md5sig->entries4)
866 memcpy(keys, md5sig->keys4,
867 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
868
869 /* Free old key list, and reference new one */
a80cc20d 870 kfree(md5sig->keys4);
f6685938
ACM
871 md5sig->keys4 = keys;
872 md5sig->alloced4++;
cfb6eeb4 873 }
f6685938 874 md5sig->entries4++;
f8ab18d2
DM
875 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
876 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
877 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
878 }
879 return 0;
880}
881
882EXPORT_SYMBOL(tcp_v4_md5_do_add);
883
884static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
885 u8 *newkey, u8 newkeylen)
886{
887 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
888 newkey, newkeylen);
889}
890
891int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
892{
893 struct tcp_sock *tp = tcp_sk(sk);
894 int i;
895
896 for (i = 0; i < tp->md5sig_info->entries4; i++) {
897 if (tp->md5sig_info->keys4[i].addr == addr) {
898 /* Free the key */
f8ab18d2 899 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
900 tp->md5sig_info->entries4--;
901
902 if (tp->md5sig_info->entries4 == 0) {
903 kfree(tp->md5sig_info->keys4);
904 tp->md5sig_info->keys4 = NULL;
8228a18d 905 tp->md5sig_info->alloced4 = 0;
7174259e 906 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 907 /* Need to do some manipulation */
354faf09
YH
908 memmove(&tp->md5sig_info->keys4[i],
909 &tp->md5sig_info->keys4[i+1],
910 (tp->md5sig_info->entries4 - i) *
911 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
912 }
913 tcp_free_md5sig_pool();
914 return 0;
915 }
916 }
917 return -ENOENT;
918}
919
920EXPORT_SYMBOL(tcp_v4_md5_do_del);
921
7174259e 922static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
923{
924 struct tcp_sock *tp = tcp_sk(sk);
925
926 /* Free each key, then the set of key keys,
927 * the crypto element, and then decrement our
928 * hold on the last resort crypto.
929 */
930 if (tp->md5sig_info->entries4) {
931 int i;
932 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 933 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
934 tp->md5sig_info->entries4 = 0;
935 tcp_free_md5sig_pool();
936 }
937 if (tp->md5sig_info->keys4) {
938 kfree(tp->md5sig_info->keys4);
939 tp->md5sig_info->keys4 = NULL;
940 tp->md5sig_info->alloced4 = 0;
941 }
942}
943
7174259e
ACM
944static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
945 int optlen)
cfb6eeb4
YH
946{
947 struct tcp_md5sig cmd;
948 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
949 u8 *newkey;
950
951 if (optlen < sizeof(cmd))
952 return -EINVAL;
953
7174259e 954 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
955 return -EFAULT;
956
957 if (sin->sin_family != AF_INET)
958 return -EINVAL;
959
960 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
961 if (!tcp_sk(sk)->md5sig_info)
962 return -ENOENT;
963 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
964 }
965
966 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
967 return -EINVAL;
968
969 if (!tcp_sk(sk)->md5sig_info) {
970 struct tcp_sock *tp = tcp_sk(sk);
7174259e 971 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
cfb6eeb4 972
cfb6eeb4
YH
973 if (!p)
974 return -EINVAL;
975
976 tp->md5sig_info = p;
3d7dbeac 977 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
978 }
979
f6685938 980 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
981 if (!newkey)
982 return -ENOMEM;
cfb6eeb4
YH
983 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
984 newkey, cmd.tcpm_keylen);
985}
986
49a72dfb
AL
987static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
988 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 989{
cfb6eeb4 990 struct tcp4_pseudohdr *bp;
49a72dfb 991 struct scatterlist sg;
cfb6eeb4
YH
992
993 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
994
995 /*
49a72dfb 996 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
997 * destination IP address, zero-padded protocol number, and
998 * segment length)
999 */
1000 bp->saddr = saddr;
1001 bp->daddr = daddr;
1002 bp->pad = 0;
076fb722 1003 bp->protocol = IPPROTO_TCP;
49a72dfb 1004 bp->len = cpu_to_be16(nbytes);
c7da57a1 1005
49a72dfb
AL
1006 sg_init_one(&sg, bp, sizeof(*bp));
1007 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1008}
1009
1010static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1011 __be32 daddr, __be32 saddr, struct tcphdr *th)
1012{
1013 struct tcp_md5sig_pool *hp;
1014 struct hash_desc *desc;
1015
1016 hp = tcp_get_md5sig_pool();
1017 if (!hp)
1018 goto clear_hash_noput;
1019 desc = &hp->md5_desc;
1020
1021 if (crypto_hash_init(desc))
1022 goto clear_hash;
1023 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1024 goto clear_hash;
1025 if (tcp_md5_hash_header(hp, th))
1026 goto clear_hash;
1027 if (tcp_md5_hash_key(hp, key))
1028 goto clear_hash;
1029 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1030 goto clear_hash;
1031
cfb6eeb4 1032 tcp_put_md5sig_pool();
cfb6eeb4 1033 return 0;
49a72dfb 1034
cfb6eeb4
YH
1035clear_hash:
1036 tcp_put_md5sig_pool();
1037clear_hash_noput:
1038 memset(md5_hash, 0, 16);
49a72dfb 1039 return 1;
cfb6eeb4
YH
1040}
1041
49a72dfb
AL
1042int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1043 struct sock *sk, struct request_sock *req,
1044 struct sk_buff *skb)
cfb6eeb4 1045{
49a72dfb
AL
1046 struct tcp_md5sig_pool *hp;
1047 struct hash_desc *desc;
1048 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1049 __be32 saddr, daddr;
1050
1051 if (sk) {
1052 saddr = inet_sk(sk)->saddr;
1053 daddr = inet_sk(sk)->daddr;
49a72dfb
AL
1054 } else if (req) {
1055 saddr = inet_rsk(req)->loc_addr;
1056 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1057 } else {
49a72dfb
AL
1058 const struct iphdr *iph = ip_hdr(skb);
1059 saddr = iph->saddr;
1060 daddr = iph->daddr;
cfb6eeb4 1061 }
49a72dfb
AL
1062
1063 hp = tcp_get_md5sig_pool();
1064 if (!hp)
1065 goto clear_hash_noput;
1066 desc = &hp->md5_desc;
1067
1068 if (crypto_hash_init(desc))
1069 goto clear_hash;
1070
1071 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1072 goto clear_hash;
1073 if (tcp_md5_hash_header(hp, th))
1074 goto clear_hash;
1075 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1076 goto clear_hash;
1077 if (tcp_md5_hash_key(hp, key))
1078 goto clear_hash;
1079 if (crypto_hash_final(desc, md5_hash))
1080 goto clear_hash;
1081
1082 tcp_put_md5sig_pool();
1083 return 0;
1084
1085clear_hash:
1086 tcp_put_md5sig_pool();
1087clear_hash_noput:
1088 memset(md5_hash, 0, 16);
1089 return 1;
cfb6eeb4
YH
1090}
1091
49a72dfb 1092EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1093
7174259e 1094static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1095{
1096 /*
1097 * This gets called for each TCP segment that arrives
1098 * so we want to be efficient.
1099 * We have 3 drop cases:
1100 * o No MD5 hash and one expected.
1101 * o MD5 hash and we're not expecting one.
1102 * o MD5 hash and its wrong.
1103 */
1104 __u8 *hash_location = NULL;
1105 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1106 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1107 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1108 int genhash;
cfb6eeb4
YH
1109 unsigned char newhash[16];
1110
1111 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1112 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1113
cfb6eeb4
YH
1114 /* We've parsed the options - do we have a hash? */
1115 if (!hash_expected && !hash_location)
1116 return 0;
1117
1118 if (hash_expected && !hash_location) {
a9fc00cc 1119 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
cfb6eeb4 1120 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
7174259e
ACM
1121 NIPQUAD(iph->saddr), ntohs(th->source),
1122 NIPQUAD(iph->daddr), ntohs(th->dest));
cfb6eeb4
YH
1123 return 1;
1124 }
1125
1126 if (!hash_expected && hash_location) {
7174259e 1127 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
cfb6eeb4 1128 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
7174259e
ACM
1129 NIPQUAD(iph->saddr), ntohs(th->source),
1130 NIPQUAD(iph->daddr), ntohs(th->dest));
cfb6eeb4
YH
1131 return 1;
1132 }
1133
1134 /* Okay, so this is hash_expected and hash_location -
1135 * so we need to calculate the checksum.
1136 */
49a72dfb
AL
1137 genhash = tcp_v4_md5_hash_skb(newhash,
1138 hash_expected,
1139 NULL, NULL, skb);
cfb6eeb4
YH
1140
1141 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1142 if (net_ratelimit()) {
1143 printk(KERN_INFO "MD5 Hash failed for "
1144 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
7174259e
ACM
1145 NIPQUAD(iph->saddr), ntohs(th->source),
1146 NIPQUAD(iph->daddr), ntohs(th->dest),
cfb6eeb4 1147 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1148 }
1149 return 1;
1150 }
1151 return 0;
1152}
1153
1154#endif
1155
72a3effa 1156struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1157 .family = PF_INET,
2e6599cb 1158 .obj_size = sizeof(struct tcp_request_sock),
1da177e4 1159 .rtx_syn_ack = tcp_v4_send_synack,
60236fdd
ACM
1160 .send_ack = tcp_v4_reqsk_send_ack,
1161 .destructor = tcp_v4_reqsk_destructor,
1da177e4
LT
1162 .send_reset = tcp_v4_send_reset,
1163};
1164
cfb6eeb4 1165#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1166static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1167 .md5_lookup = tcp_v4_reqsk_md5_lookup,
cfb6eeb4 1168};
b6332e6c 1169#endif
cfb6eeb4 1170
6d6ee43e
ACM
1171static struct timewait_sock_ops tcp_timewait_sock_ops = {
1172 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1173 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1174 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1175};
1176
1da177e4
LT
1177int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1178{
2e6599cb 1179 struct inet_request_sock *ireq;
1da177e4 1180 struct tcp_options_received tmp_opt;
60236fdd 1181 struct request_sock *req;
eddc9ec5
ACM
1182 __be32 saddr = ip_hdr(skb)->saddr;
1183 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4
LT
1184 __u32 isn = TCP_SKB_CB(skb)->when;
1185 struct dst_entry *dst = NULL;
1186#ifdef CONFIG_SYN_COOKIES
1187 int want_cookie = 0;
1188#else
1189#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1190#endif
1191
1192 /* Never answer to SYNs send to broadcast or multicast */
ee6b9673 1193 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1194 goto drop;
1195
1196 /* TW buckets are converted to open requests without
1197 * limitations, they conserve resources and peer is
1198 * evidently real one.
1199 */
463c84b9 1200 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
1201#ifdef CONFIG_SYN_COOKIES
1202 if (sysctl_tcp_syncookies) {
1203 want_cookie = 1;
1204 } else
1205#endif
1206 goto drop;
1207 }
1208
1209 /* Accept backlog is full. If we have already queued enough
1210 * of warm entries in syn queue, drop request. It is better than
1211 * clogging syn queue with openreqs with exponentially increasing
1212 * timeout.
1213 */
463c84b9 1214 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1215 goto drop;
1216
ce4a7d0d 1217 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1218 if (!req)
1219 goto drop;
1220
cfb6eeb4
YH
1221#ifdef CONFIG_TCP_MD5SIG
1222 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1223#endif
1224
1da177e4
LT
1225 tcp_clear_options(&tmp_opt);
1226 tmp_opt.mss_clamp = 536;
1227 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1228
1229 tcp_parse_options(skb, &tmp_opt, 0);
1230
4dfc2817 1231 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1232 tcp_clear_options(&tmp_opt);
1da177e4
LT
1233
1234 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1235 /* Some OSes (unknown ones, but I see them on web server, which
1236 * contains information interesting only for windows'
1237 * users) do not send their stamp in SYN. It is easy case.
1238 * We simply do not advertise TS support.
1239 */
1240 tmp_opt.saw_tstamp = 0;
1241 tmp_opt.tstamp_ok = 0;
1242 }
1243 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1244
1245 tcp_openreq_init(req, &tmp_opt, skb);
1246
4237c75c
VY
1247 if (security_inet_conn_request(sk, skb, req))
1248 goto drop_and_free;
1249
2e6599cb
ACM
1250 ireq = inet_rsk(req);
1251 ireq->loc_addr = daddr;
1252 ireq->rmt_addr = saddr;
1253 ireq->opt = tcp_v4_save_options(sk, skb);
1da177e4 1254 if (!want_cookie)
aa8223c7 1255 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1256
1257 if (want_cookie) {
1258#ifdef CONFIG_SYN_COOKIES
1259 syn_flood_warning(skb);
4dfc2817 1260 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1261#endif
1262 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1263 } else if (!isn) {
1264 struct inet_peer *peer = NULL;
1265
1266 /* VJ's idea. We save last timestamp seen
1267 * from the destination in peer table, when entering
1268 * state TIME-WAIT, and check against it before
1269 * accepting new connection request.
1270 *
1271 * If "isn" is not zero, this request hit alive
1272 * timewait bucket, so that all the necessary checks
1273 * are made in the function processing timewait state.
1274 */
1275 if (tmp_opt.saw_tstamp &&
295ff7ed 1276 tcp_death_row.sysctl_tw_recycle &&
463c84b9 1277 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
1278 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1279 peer->v4daddr == saddr) {
9d729f72 1280 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1da177e4
LT
1281 (s32)(peer->tcp_ts - req->ts_recent) >
1282 TCP_PAWS_WINDOW) {
de0744af 1283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1284 goto drop_and_release;
1da177e4
LT
1285 }
1286 }
1287 /* Kill the following clause, if you dislike this way. */
1288 else if (!sysctl_tcp_syncookies &&
463c84b9 1289 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1290 (sysctl_max_syn_backlog >> 2)) &&
1291 (!peer || !peer->tcp_ts_stamp) &&
1292 (!dst || !dst_metric(dst, RTAX_RTT))) {
1293 /* Without syncookies last quarter of
1294 * backlog is filled with destinations,
1295 * proven to be alive.
1296 * It means that we continue to communicate
1297 * to destinations, already remembered
1298 * to the moment of synflood.
1299 */
64ce2073 1300 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
a7d632b6 1301 "request from " NIPQUAD_FMT "/%u\n",
64ce2073 1302 NIPQUAD(saddr),
aa8223c7 1303 ntohs(tcp_hdr(skb)->source));
7cd04fa7 1304 goto drop_and_release;
1da177e4
LT
1305 }
1306
a94f723d 1307 isn = tcp_v4_init_sequence(skb);
1da177e4 1308 }
2e6599cb 1309 tcp_rsk(req)->snt_isn = isn;
1da177e4 1310
7cd04fa7 1311 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1da177e4
LT
1312 goto drop_and_free;
1313
7cd04fa7 1314 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1315 return 0;
1316
7cd04fa7
DL
1317drop_and_release:
1318 dst_release(dst);
1da177e4 1319drop_and_free:
60236fdd 1320 reqsk_free(req);
1da177e4 1321drop:
1da177e4
LT
1322 return 0;
1323}
1324
1325
1326/*
1327 * The three way handshake has completed - we got a valid synack -
1328 * now create the new socket.
1329 */
1330struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1331 struct request_sock *req,
1da177e4
LT
1332 struct dst_entry *dst)
1333{
2e6599cb 1334 struct inet_request_sock *ireq;
1da177e4
LT
1335 struct inet_sock *newinet;
1336 struct tcp_sock *newtp;
1337 struct sock *newsk;
cfb6eeb4
YH
1338#ifdef CONFIG_TCP_MD5SIG
1339 struct tcp_md5sig_key *key;
1340#endif
1da177e4
LT
1341
1342 if (sk_acceptq_is_full(sk))
1343 goto exit_overflow;
1344
463c84b9 1345 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1346 goto exit;
1347
1348 newsk = tcp_create_openreq_child(sk, req, skb);
1349 if (!newsk)
1350 goto exit;
1351
bcd76111 1352 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1353 sk_setup_caps(newsk, dst);
1da177e4
LT
1354
1355 newtp = tcp_sk(newsk);
1356 newinet = inet_sk(newsk);
2e6599cb
ACM
1357 ireq = inet_rsk(req);
1358 newinet->daddr = ireq->rmt_addr;
1359 newinet->rcv_saddr = ireq->loc_addr;
1360 newinet->saddr = ireq->loc_addr;
1361 newinet->opt = ireq->opt;
1362 ireq->opt = NULL;
463c84b9 1363 newinet->mc_index = inet_iif(skb);
eddc9ec5 1364 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1365 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1366 if (newinet->opt)
d83d8461 1367 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1da177e4
LT
1368 newinet->id = newtp->write_seq ^ jiffies;
1369
5d424d5a 1370 tcp_mtup_init(newsk);
1da177e4
LT
1371 tcp_sync_mss(newsk, dst_mtu(dst));
1372 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1373 tcp_initialize_rcv_mss(newsk);
1374
cfb6eeb4
YH
1375#ifdef CONFIG_TCP_MD5SIG
1376 /* Copy over the MD5 key from the original socket */
1377 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1378 /*
1379 * We're using one, so create a matching key
1380 * on the newsk structure. If we fail to get
1381 * memory, then we end up not copying the key
1382 * across. Shucks.
1383 */
f6685938
ACM
1384 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1385 if (newkey != NULL)
cfb6eeb4
YH
1386 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1387 newkey, key->keylen);
49a72dfb 1388 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
1389 }
1390#endif
1391
ab1e0a13
ACM
1392 __inet_hash_nolisten(newsk);
1393 __inet_inherit_port(sk, newsk);
1da177e4
LT
1394
1395 return newsk;
1396
1397exit_overflow:
de0744af 1398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1399exit:
de0744af 1400 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1401 dst_release(dst);
1402 return NULL;
1403}
1404
1405static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1406{
aa8223c7 1407 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1408 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1409 struct sock *nsk;
60236fdd 1410 struct request_sock **prev;
1da177e4 1411 /* Find possible connection requests. */
463c84b9
ACM
1412 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1413 iph->saddr, iph->daddr);
1da177e4
LT
1414 if (req)
1415 return tcp_check_req(sk, skb, req, prev);
1416
3b1e0a65 1417 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1418 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1419
1420 if (nsk) {
1421 if (nsk->sk_state != TCP_TIME_WAIT) {
1422 bh_lock_sock(nsk);
1423 return nsk;
1424 }
9469c7b4 1425 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1426 return NULL;
1427 }
1428
1429#ifdef CONFIG_SYN_COOKIES
1430 if (!th->rst && !th->syn && th->ack)
1431 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1432#endif
1433 return sk;
1434}
1435
b51655b9 1436static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1437{
eddc9ec5
ACM
1438 const struct iphdr *iph = ip_hdr(skb);
1439
84fa7933 1440 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1441 if (!tcp_v4_check(skb->len, iph->saddr,
1442 iph->daddr, skb->csum)) {
fb286bb2 1443 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1444 return 0;
fb286bb2 1445 }
1da177e4 1446 }
fb286bb2 1447
eddc9ec5 1448 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1449 skb->len, IPPROTO_TCP, 0);
1450
1da177e4 1451 if (skb->len <= 76) {
fb286bb2 1452 return __skb_checksum_complete(skb);
1da177e4
LT
1453 }
1454 return 0;
1455}
1456
1457
1458/* The socket must have it's spinlock held when we get
1459 * here.
1460 *
1461 * We have a potential double-lock case here, so even when
1462 * doing backlog processing we use the BH locking scheme.
1463 * This is because we cannot sleep with the original spinlock
1464 * held.
1465 */
1466int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1467{
cfb6eeb4
YH
1468 struct sock *rsk;
1469#ifdef CONFIG_TCP_MD5SIG
1470 /*
1471 * We really want to reject the packet as early as possible
1472 * if:
1473 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1474 * o There is an MD5 option and we're not expecting one
1475 */
7174259e 1476 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1477 goto discard;
1478#endif
1479
1da177e4
LT
1480 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1481 TCP_CHECK_TIMER(sk);
aa8223c7 1482 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1483 rsk = sk;
1da177e4 1484 goto reset;
cfb6eeb4 1485 }
1da177e4
LT
1486 TCP_CHECK_TIMER(sk);
1487 return 0;
1488 }
1489
ab6a5bb6 1490 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1491 goto csum_err;
1492
1493 if (sk->sk_state == TCP_LISTEN) {
1494 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1495 if (!nsk)
1496 goto discard;
1497
1498 if (nsk != sk) {
cfb6eeb4
YH
1499 if (tcp_child_process(sk, nsk, skb)) {
1500 rsk = nsk;
1da177e4 1501 goto reset;
cfb6eeb4 1502 }
1da177e4
LT
1503 return 0;
1504 }
1505 }
1506
1507 TCP_CHECK_TIMER(sk);
aa8223c7 1508 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1509 rsk = sk;
1da177e4 1510 goto reset;
cfb6eeb4 1511 }
1da177e4
LT
1512 TCP_CHECK_TIMER(sk);
1513 return 0;
1514
1515reset:
cfb6eeb4 1516 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1517discard:
1518 kfree_skb(skb);
1519 /* Be careful here. If this function gets more complicated and
1520 * gcc suffers from register pressure on the x86, sk (in %ebx)
1521 * might be destroyed here. This current version compiles correctly,
1522 * but you have been warned.
1523 */
1524 return 0;
1525
1526csum_err:
63231bdd 1527 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1528 goto discard;
1529}
1530
1531/*
1532 * From tcp_input.c
1533 */
1534
1535int tcp_v4_rcv(struct sk_buff *skb)
1536{
eddc9ec5 1537 const struct iphdr *iph;
1da177e4
LT
1538 struct tcphdr *th;
1539 struct sock *sk;
1540 int ret;
a86b1e30 1541 struct net *net = dev_net(skb->dev);
1da177e4
LT
1542
1543 if (skb->pkt_type != PACKET_HOST)
1544 goto discard_it;
1545
1546 /* Count it even if it's bad */
63231bdd 1547 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1548
1549 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1550 goto discard_it;
1551
aa8223c7 1552 th = tcp_hdr(skb);
1da177e4
LT
1553
1554 if (th->doff < sizeof(struct tcphdr) / 4)
1555 goto bad_packet;
1556 if (!pskb_may_pull(skb, th->doff * 4))
1557 goto discard_it;
1558
1559 /* An explanation is required here, I think.
1560 * Packet length and doff are validated by header prediction,
caa20d9a 1561 * provided case of th->doff==0 is eliminated.
1da177e4 1562 * So, we defer the checks. */
60476372 1563 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1564 goto bad_packet;
1565
aa8223c7 1566 th = tcp_hdr(skb);
eddc9ec5 1567 iph = ip_hdr(skb);
1da177e4
LT
1568 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1569 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1570 skb->len - th->doff * 4);
1571 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1572 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1573 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1574 TCP_SKB_CB(skb)->sacked = 0;
1575
a86b1e30 1576 sk = __inet_lookup(net, &tcp_hashinfo, iph->saddr,
c67499c0 1577 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1578 if (!sk)
1579 goto no_tcp_socket;
1580
1581process:
1582 if (sk->sk_state == TCP_TIME_WAIT)
1583 goto do_time_wait;
1584
1585 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1586 goto discard_and_relse;
b59c2701 1587 nf_reset(skb);
1da177e4 1588
fda9ef5d 1589 if (sk_filter(sk, skb))
1da177e4
LT
1590 goto discard_and_relse;
1591
1592 skb->dev = NULL;
1593
c6366184 1594 bh_lock_sock_nested(sk);
1da177e4
LT
1595 ret = 0;
1596 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1597#ifdef CONFIG_NET_DMA
1598 struct tcp_sock *tp = tcp_sk(sk);
1599 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1600 tp->ucopy.dma_chan = get_softnet_dma();
1601 if (tp->ucopy.dma_chan)
1da177e4 1602 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1603 else
1604#endif
1605 {
1606 if (!tcp_prequeue(sk, skb))
1607 ret = tcp_v4_do_rcv(sk, skb);
1608 }
1da177e4
LT
1609 } else
1610 sk_add_backlog(sk, skb);
1611 bh_unlock_sock(sk);
1612
1613 sock_put(sk);
1614
1615 return ret;
1616
1617no_tcp_socket:
1618 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1619 goto discard_it;
1620
1621 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1622bad_packet:
63231bdd 1623 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1624 } else {
cfb6eeb4 1625 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1626 }
1627
1628discard_it:
1629 /* Discard frame. */
1630 kfree_skb(skb);
e905a9ed 1631 return 0;
1da177e4
LT
1632
1633discard_and_relse:
1634 sock_put(sk);
1635 goto discard_it;
1636
1637do_time_wait:
1638 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1639 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1640 goto discard_it;
1641 }
1642
1643 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1644 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1645 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1646 goto discard_it;
1647 }
9469c7b4 1648 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1649 case TCP_TW_SYN: {
c346dca1 1650 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1651 &tcp_hashinfo,
eddc9ec5 1652 iph->daddr, th->dest,
463c84b9 1653 inet_iif(skb));
1da177e4 1654 if (sk2) {
9469c7b4
YH
1655 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1656 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1657 sk = sk2;
1658 goto process;
1659 }
1660 /* Fall through to ACK */
1661 }
1662 case TCP_TW_ACK:
1663 tcp_v4_timewait_ack(sk, skb);
1664 break;
1665 case TCP_TW_RST:
1666 goto no_tcp_socket;
1667 case TCP_TW_SUCCESS:;
1668 }
1669 goto discard_it;
1670}
1671
1da177e4
LT
1672/* VJ's idea. Save last timestamp seen from this destination
1673 * and hold it at least for normal timewait interval to use for duplicate
1674 * segment detection in subsequent connections, before they enter synchronized
1675 * state.
1676 */
1677
1678int tcp_v4_remember_stamp(struct sock *sk)
1679{
1680 struct inet_sock *inet = inet_sk(sk);
1681 struct tcp_sock *tp = tcp_sk(sk);
1682 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1683 struct inet_peer *peer = NULL;
1684 int release_it = 0;
1685
1686 if (!rt || rt->rt_dst != inet->daddr) {
1687 peer = inet_getpeer(inet->daddr, 1);
1688 release_it = 1;
1689 } else {
1690 if (!rt->peer)
1691 rt_bind_peer(rt, 1);
1692 peer = rt->peer;
1693 }
1694
1695 if (peer) {
1696 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
9d729f72 1697 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1da177e4
LT
1698 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1699 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1700 peer->tcp_ts = tp->rx_opt.ts_recent;
1701 }
1702 if (release_it)
1703 inet_putpeer(peer);
1704 return 1;
1705 }
1706
1707 return 0;
1708}
1709
8feaf0c0 1710int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1711{
8feaf0c0 1712 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1713
1714 if (peer) {
8feaf0c0
ACM
1715 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1716
1717 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
9d729f72 1718 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
8feaf0c0
ACM
1719 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1720 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1721 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1722 }
1723 inet_putpeer(peer);
1724 return 1;
1725 }
1726
1727 return 0;
1728}
1729
8292a17a 1730struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1731 .queue_xmit = ip_queue_xmit,
1732 .send_check = tcp_v4_send_check,
1733 .rebuild_header = inet_sk_rebuild_header,
1734 .conn_request = tcp_v4_conn_request,
1735 .syn_recv_sock = tcp_v4_syn_recv_sock,
1736 .remember_stamp = tcp_v4_remember_stamp,
1737 .net_header_len = sizeof(struct iphdr),
1738 .setsockopt = ip_setsockopt,
1739 .getsockopt = ip_getsockopt,
1740 .addr2sockaddr = inet_csk_addr2sockaddr,
1741 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1742 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1743#ifdef CONFIG_COMPAT
543d9cfe
ACM
1744 .compat_setsockopt = compat_ip_setsockopt,
1745 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1746#endif
1da177e4
LT
1747};
1748
cfb6eeb4 1749#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1750static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1751 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1752 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1753 .md5_add = tcp_v4_md5_add_func,
1754 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1755};
b6332e6c 1756#endif
cfb6eeb4 1757
1da177e4
LT
1758/* NOTE: A lot of things set to zero explicitly by call to
1759 * sk_alloc() so need not be done here.
1760 */
1761static int tcp_v4_init_sock(struct sock *sk)
1762{
6687e988 1763 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1764 struct tcp_sock *tp = tcp_sk(sk);
1765
1766 skb_queue_head_init(&tp->out_of_order_queue);
1767 tcp_init_xmit_timers(sk);
1768 tcp_prequeue_init(tp);
1769
6687e988 1770 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1771 tp->mdev = TCP_TIMEOUT_INIT;
1772
1773 /* So many TCP implementations out there (incorrectly) count the
1774 * initial SYN frame in their delayed-ACK and congestion control
1775 * algorithms that we must have the following bandaid to talk
1776 * efficiently to them. -DaveM
1777 */
1778 tp->snd_cwnd = 2;
1779
1780 /* See draft-stevens-tcpca-spec-01 for discussion of the
1781 * initialization of these values.
1782 */
1783 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1784 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1785 tp->mss_cache = 536;
1da177e4
LT
1786
1787 tp->reordering = sysctl_tcp_reordering;
6687e988 1788 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1789
1790 sk->sk_state = TCP_CLOSE;
1791
1792 sk->sk_write_space = sk_stream_write_space;
1793 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1794
8292a17a 1795 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1796 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1797#ifdef CONFIG_TCP_MD5SIG
1798 tp->af_specific = &tcp_sock_ipv4_specific;
1799#endif
1da177e4
LT
1800
1801 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1802 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1803
1804 atomic_inc(&tcp_sockets_allocated);
1805
1806 return 0;
1807}
1808
7d06b2e0 1809void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1810{
1811 struct tcp_sock *tp = tcp_sk(sk);
1812
1813 tcp_clear_xmit_timers(sk);
1814
6687e988 1815 tcp_cleanup_congestion_control(sk);
317a76f9 1816
1da177e4 1817 /* Cleanup up the write buffer. */
fe067e8a 1818 tcp_write_queue_purge(sk);
1da177e4
LT
1819
1820 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1821 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1822
cfb6eeb4
YH
1823#ifdef CONFIG_TCP_MD5SIG
1824 /* Clean up the MD5 key list, if any */
1825 if (tp->md5sig_info) {
1826 tcp_v4_clear_md5_list(sk);
1827 kfree(tp->md5sig_info);
1828 tp->md5sig_info = NULL;
1829 }
1830#endif
1831
1a2449a8
CL
1832#ifdef CONFIG_NET_DMA
1833 /* Cleans up our sk_async_wait_queue */
e905a9ed 1834 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1835#endif
1836
1da177e4
LT
1837 /* Clean prequeue, it must be empty really */
1838 __skb_queue_purge(&tp->ucopy.prequeue);
1839
1840 /* Clean up a referenced TCP bind bucket. */
463c84b9 1841 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1842 inet_put_port(sk);
1da177e4
LT
1843
1844 /*
1845 * If sendmsg cached page exists, toss it.
1846 */
1847 if (sk->sk_sndmsg_page) {
1848 __free_page(sk->sk_sndmsg_page);
1849 sk->sk_sndmsg_page = NULL;
1850 }
1851
1852 atomic_dec(&tcp_sockets_allocated);
1da177e4
LT
1853}
1854
1855EXPORT_SYMBOL(tcp_v4_destroy_sock);
1856
1857#ifdef CONFIG_PROC_FS
1858/* Proc filesystem TCP sock list dumping. */
1859
8feaf0c0 1860static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1da177e4
LT
1861{
1862 return hlist_empty(head) ? NULL :
8feaf0c0 1863 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1864}
1865
8feaf0c0 1866static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4
LT
1867{
1868 return tw->tw_node.next ?
1869 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1870}
1871
1872static void *listening_get_next(struct seq_file *seq, void *cur)
1873{
463c84b9 1874 struct inet_connection_sock *icsk;
1da177e4
LT
1875 struct hlist_node *node;
1876 struct sock *sk = cur;
1877 struct tcp_iter_state* st = seq->private;
a4146b1b 1878 struct net *net = seq_file_net(seq);
1da177e4
LT
1879
1880 if (!sk) {
1881 st->bucket = 0;
6e04e021 1882 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1da177e4
LT
1883 goto get_sk;
1884 }
1885
1886 ++st->num;
1887
1888 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1889 struct request_sock *req = cur;
1da177e4 1890
72a3effa 1891 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1892 req = req->dl_next;
1893 while (1) {
1894 while (req) {
f40c8174 1895 if (req->rsk_ops->family == st->family &&
878628fb 1896 net_eq(sock_net(req->sk), net)) {
1da177e4
LT
1897 cur = req;
1898 goto out;
1899 }
1900 req = req->dl_next;
1901 }
72a3effa 1902 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1903 break;
1904get_req:
463c84b9 1905 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
1906 }
1907 sk = sk_next(st->syn_wait_sk);
1908 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1909 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1910 } else {
e905a9ed 1911 icsk = inet_csk(sk);
463c84b9
ACM
1912 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1913 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1914 goto start_req;
463c84b9 1915 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1916 sk = sk_next(sk);
1917 }
1918get_sk:
1919 sk_for_each_from(sk, node) {
878628fb 1920 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
1921 cur = sk;
1922 goto out;
1923 }
e905a9ed 1924 icsk = inet_csk(sk);
463c84b9
ACM
1925 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1926 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1927start_req:
1928 st->uid = sock_i_uid(sk);
1929 st->syn_wait_sk = sk;
1930 st->state = TCP_SEQ_STATE_OPENREQ;
1931 st->sbucket = 0;
1932 goto get_req;
1933 }
463c84b9 1934 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1935 }
0f7ff927 1936 if (++st->bucket < INET_LHTABLE_SIZE) {
6e04e021 1937 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1da177e4
LT
1938 goto get_sk;
1939 }
1940 cur = NULL;
1941out:
1942 return cur;
1943}
1944
1945static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1946{
1947 void *rc = listening_get_next(seq, NULL);
1948
1949 while (rc && *pos) {
1950 rc = listening_get_next(seq, rc);
1951 --*pos;
1952 }
1953 return rc;
1954}
1955
1956static void *established_get_first(struct seq_file *seq)
1957{
1958 struct tcp_iter_state* st = seq->private;
a4146b1b 1959 struct net *net = seq_file_net(seq);
1da177e4
LT
1960 void *rc = NULL;
1961
6e04e021 1962 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1da177e4
LT
1963 struct sock *sk;
1964 struct hlist_node *node;
8feaf0c0 1965 struct inet_timewait_sock *tw;
230140cf 1966 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1967
230140cf 1968 read_lock_bh(lock);
6e04e021 1969 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1970 if (sk->sk_family != st->family ||
878628fb 1971 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1972 continue;
1973 }
1974 rc = sk;
1975 goto out;
1976 }
1977 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 1978 inet_twsk_for_each(tw, node,
dbca9b27 1979 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 1980 if (tw->tw_family != st->family ||
878628fb 1981 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
1982 continue;
1983 }
1984 rc = tw;
1985 goto out;
1986 }
230140cf 1987 read_unlock_bh(lock);
1da177e4
LT
1988 st->state = TCP_SEQ_STATE_ESTABLISHED;
1989 }
1990out:
1991 return rc;
1992}
1993
1994static void *established_get_next(struct seq_file *seq, void *cur)
1995{
1996 struct sock *sk = cur;
8feaf0c0 1997 struct inet_timewait_sock *tw;
1da177e4
LT
1998 struct hlist_node *node;
1999 struct tcp_iter_state* st = seq->private;
a4146b1b 2000 struct net *net = seq_file_net(seq);
1da177e4
LT
2001
2002 ++st->num;
2003
2004 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2005 tw = cur;
2006 tw = tw_next(tw);
2007get_tw:
878628fb 2008 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2009 tw = tw_next(tw);
2010 }
2011 if (tw) {
2012 cur = tw;
2013 goto out;
2014 }
230140cf 2015 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2016 st->state = TCP_SEQ_STATE_ESTABLISHED;
2017
6e04e021 2018 if (++st->bucket < tcp_hashinfo.ehash_size) {
230140cf 2019 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
6e04e021 2020 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4
LT
2021 } else {
2022 cur = NULL;
2023 goto out;
2024 }
2025 } else
2026 sk = sk_next(sk);
2027
2028 sk_for_each_from(sk, node) {
878628fb 2029 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2030 goto found;
2031 }
2032
2033 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2034 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2035 goto get_tw;
2036found:
2037 cur = sk;
2038out:
2039 return cur;
2040}
2041
2042static void *established_get_idx(struct seq_file *seq, loff_t pos)
2043{
2044 void *rc = established_get_first(seq);
2045
2046 while (rc && pos) {
2047 rc = established_get_next(seq, rc);
2048 --pos;
7174259e 2049 }
1da177e4
LT
2050 return rc;
2051}
2052
2053static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2054{
2055 void *rc;
2056 struct tcp_iter_state* st = seq->private;
2057
f3f05f70 2058 inet_listen_lock(&tcp_hashinfo);
1da177e4
LT
2059 st->state = TCP_SEQ_STATE_LISTENING;
2060 rc = listening_get_idx(seq, &pos);
2061
2062 if (!rc) {
f3f05f70 2063 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2064 st->state = TCP_SEQ_STATE_ESTABLISHED;
2065 rc = established_get_idx(seq, pos);
2066 }
2067
2068 return rc;
2069}
2070
2071static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2072{
2073 struct tcp_iter_state* st = seq->private;
2074 st->state = TCP_SEQ_STATE_LISTENING;
2075 st->num = 0;
2076 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2077}
2078
2079static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2080{
2081 void *rc = NULL;
2082 struct tcp_iter_state* st;
2083
2084 if (v == SEQ_START_TOKEN) {
2085 rc = tcp_get_idx(seq, 0);
2086 goto out;
2087 }
2088 st = seq->private;
2089
2090 switch (st->state) {
2091 case TCP_SEQ_STATE_OPENREQ:
2092 case TCP_SEQ_STATE_LISTENING:
2093 rc = listening_get_next(seq, v);
2094 if (!rc) {
f3f05f70 2095 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2096 st->state = TCP_SEQ_STATE_ESTABLISHED;
2097 rc = established_get_first(seq);
2098 }
2099 break;
2100 case TCP_SEQ_STATE_ESTABLISHED:
2101 case TCP_SEQ_STATE_TIME_WAIT:
2102 rc = established_get_next(seq, v);
2103 break;
2104 }
2105out:
2106 ++*pos;
2107 return rc;
2108}
2109
2110static void tcp_seq_stop(struct seq_file *seq, void *v)
2111{
2112 struct tcp_iter_state* st = seq->private;
2113
2114 switch (st->state) {
2115 case TCP_SEQ_STATE_OPENREQ:
2116 if (v) {
463c84b9
ACM
2117 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2118 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2119 }
2120 case TCP_SEQ_STATE_LISTENING:
2121 if (v != SEQ_START_TOKEN)
f3f05f70 2122 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2123 break;
2124 case TCP_SEQ_STATE_TIME_WAIT:
2125 case TCP_SEQ_STATE_ESTABLISHED:
2126 if (v)
230140cf 2127 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2128 break;
2129 }
2130}
2131
2132static int tcp_seq_open(struct inode *inode, struct file *file)
2133{
2134 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2135 struct tcp_iter_state *s;
52d6f3f1 2136 int err;
1da177e4 2137
52d6f3f1
DL
2138 err = seq_open_net(inode, file, &afinfo->seq_ops,
2139 sizeof(struct tcp_iter_state));
2140 if (err < 0)
2141 return err;
f40c8174 2142
52d6f3f1 2143 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2144 s->family = afinfo->family;
f40c8174
DL
2145 return 0;
2146}
2147
6f8b13bc 2148int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2149{
2150 int rc = 0;
2151 struct proc_dir_entry *p;
2152
68fcadd1
DL
2153 afinfo->seq_fops.open = tcp_seq_open;
2154 afinfo->seq_fops.read = seq_read;
2155 afinfo->seq_fops.llseek = seq_lseek;
2156 afinfo->seq_fops.release = seq_release_net;
7174259e 2157
9427c4b3
DL
2158 afinfo->seq_ops.start = tcp_seq_start;
2159 afinfo->seq_ops.next = tcp_seq_next;
2160 afinfo->seq_ops.stop = tcp_seq_stop;
2161
84841c3c
DL
2162 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2163 &afinfo->seq_fops, afinfo);
2164 if (!p)
1da177e4
LT
2165 rc = -ENOMEM;
2166 return rc;
2167}
2168
6f8b13bc 2169void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2170{
6f8b13bc 2171 proc_net_remove(net, afinfo->name);
1da177e4
LT
2172}
2173
60236fdd 2174static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2175 struct seq_file *f, int i, int uid, int *len)
1da177e4 2176{
2e6599cb 2177 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2178 int ttd = req->expires - jiffies;
2179
5e659e4c
PE
2180 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2181 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2182 i,
2e6599cb 2183 ireq->loc_addr,
1da177e4 2184 ntohs(inet_sk(sk)->sport),
2e6599cb
ACM
2185 ireq->rmt_addr,
2186 ntohs(ireq->rmt_port),
1da177e4
LT
2187 TCP_SYN_RECV,
2188 0, 0, /* could print option size, but that is af dependent. */
2189 1, /* timers active (only the expire timer) */
2190 jiffies_to_clock_t(ttd),
2191 req->retrans,
2192 uid,
2193 0, /* non standard timer */
2194 0, /* open_requests have no inode */
2195 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2196 req,
2197 len);
1da177e4
LT
2198}
2199
5e659e4c 2200static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2201{
2202 int timer_active;
2203 unsigned long timer_expires;
cf4c6bf8
IJ
2204 struct tcp_sock *tp = tcp_sk(sk);
2205 const struct inet_connection_sock *icsk = inet_csk(sk);
2206 struct inet_sock *inet = inet_sk(sk);
714e85be
AV
2207 __be32 dest = inet->daddr;
2208 __be32 src = inet->rcv_saddr;
1da177e4
LT
2209 __u16 destp = ntohs(inet->dport);
2210 __u16 srcp = ntohs(inet->sport);
2211
463c84b9 2212 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2213 timer_active = 1;
463c84b9
ACM
2214 timer_expires = icsk->icsk_timeout;
2215 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2216 timer_active = 4;
463c84b9 2217 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2218 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2219 timer_active = 2;
cf4c6bf8 2220 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2221 } else {
2222 timer_active = 0;
2223 timer_expires = jiffies;
2224 }
2225
5e659e4c 2226 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
7be87351 2227 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
cf4c6bf8 2228 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2229 tp->write_seq - tp->snd_una,
cf4c6bf8 2230 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
7174259e 2231 (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2232 timer_active,
2233 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2234 icsk->icsk_retransmits,
cf4c6bf8 2235 sock_i_uid(sk),
6687e988 2236 icsk->icsk_probes_out,
cf4c6bf8
IJ
2237 sock_i_ino(sk),
2238 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2239 jiffies_to_clock_t(icsk->icsk_rto),
2240 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2241 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2242 tp->snd_cwnd,
5e659e4c
PE
2243 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2244 len);
1da177e4
LT
2245}
2246
7174259e 2247static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2248 struct seq_file *f, int i, int *len)
1da177e4 2249{
23f33c2d 2250 __be32 dest, src;
1da177e4
LT
2251 __u16 destp, srcp;
2252 int ttd = tw->tw_ttd - jiffies;
2253
2254 if (ttd < 0)
2255 ttd = 0;
2256
2257 dest = tw->tw_daddr;
2258 src = tw->tw_rcv_saddr;
2259 destp = ntohs(tw->tw_dport);
2260 srcp = ntohs(tw->tw_sport);
2261
5e659e4c
PE
2262 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2263 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2264 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2265 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2266 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2267}
2268
2269#define TMPSZ 150
2270
2271static int tcp4_seq_show(struct seq_file *seq, void *v)
2272{
2273 struct tcp_iter_state* st;
5e659e4c 2274 int len;
1da177e4
LT
2275
2276 if (v == SEQ_START_TOKEN) {
2277 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2278 " sl local_address rem_address st tx_queue "
2279 "rx_queue tr tm->when retrnsmt uid timeout "
2280 "inode");
2281 goto out;
2282 }
2283 st = seq->private;
2284
2285 switch (st->state) {
2286 case TCP_SEQ_STATE_LISTENING:
2287 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2288 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2289 break;
2290 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2291 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2292 break;
2293 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2294 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2295 break;
2296 }
5e659e4c 2297 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2298out:
2299 return 0;
2300}
2301
1da177e4 2302static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2303 .name = "tcp",
2304 .family = AF_INET,
5f4472c5
DL
2305 .seq_fops = {
2306 .owner = THIS_MODULE,
2307 },
9427c4b3
DL
2308 .seq_ops = {
2309 .show = tcp4_seq_show,
2310 },
1da177e4
LT
2311};
2312
757764f6
PE
2313static int tcp4_proc_init_net(struct net *net)
2314{
2315 return tcp_proc_register(net, &tcp4_seq_afinfo);
2316}
2317
2318static void tcp4_proc_exit_net(struct net *net)
2319{
2320 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2321}
2322
2323static struct pernet_operations tcp4_net_ops = {
2324 .init = tcp4_proc_init_net,
2325 .exit = tcp4_proc_exit_net,
2326};
2327
1da177e4
LT
2328int __init tcp4_proc_init(void)
2329{
757764f6 2330 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2331}
2332
2333void tcp4_proc_exit(void)
2334{
757764f6 2335 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2336}
2337#endif /* CONFIG_PROC_FS */
2338
2339struct proto tcp_prot = {
2340 .name = "TCP",
2341 .owner = THIS_MODULE,
2342 .close = tcp_close,
2343 .connect = tcp_v4_connect,
2344 .disconnect = tcp_disconnect,
463c84b9 2345 .accept = inet_csk_accept,
1da177e4
LT
2346 .ioctl = tcp_ioctl,
2347 .init = tcp_v4_init_sock,
2348 .destroy = tcp_v4_destroy_sock,
2349 .shutdown = tcp_shutdown,
2350 .setsockopt = tcp_setsockopt,
2351 .getsockopt = tcp_getsockopt,
1da177e4
LT
2352 .recvmsg = tcp_recvmsg,
2353 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2354 .hash = inet_hash,
2355 .unhash = inet_unhash,
2356 .get_port = inet_csk_get_port,
1da177e4
LT
2357 .enter_memory_pressure = tcp_enter_memory_pressure,
2358 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2359 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2360 .memory_allocated = &tcp_memory_allocated,
2361 .memory_pressure = &tcp_memory_pressure,
2362 .sysctl_mem = sysctl_tcp_mem,
2363 .sysctl_wmem = sysctl_tcp_wmem,
2364 .sysctl_rmem = sysctl_tcp_rmem,
2365 .max_header = MAX_TCP_HEADER,
2366 .obj_size = sizeof(struct tcp_sock),
6d6ee43e 2367 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2368 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2369 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2370#ifdef CONFIG_COMPAT
2371 .compat_setsockopt = compat_tcp_setsockopt,
2372 .compat_getsockopt = compat_tcp_getsockopt,
2373#endif
1da177e4
LT
2374};
2375
046ee902
DL
2376
2377static int __net_init tcp_sk_init(struct net *net)
2378{
2379 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2380 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2381}
2382
2383static void __net_exit tcp_sk_exit(struct net *net)
2384{
2385 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2386}
2387
2388static struct pernet_operations __net_initdata tcp_sk_ops = {
2389 .init = tcp_sk_init,
2390 .exit = tcp_sk_exit,
2391};
2392
9b0f976f 2393void __init tcp_v4_init(void)
1da177e4 2394{
046ee902 2395 if (register_pernet_device(&tcp_sk_ops))
1da177e4 2396 panic("Failed to create the TCP control socket.\n");
1da177e4
LT
2397}
2398
2399EXPORT_SYMBOL(ipv4_specific);
1da177e4 2400EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 2401EXPORT_SYMBOL(tcp_prot);
1da177e4
LT
2402EXPORT_SYMBOL(tcp_v4_conn_request);
2403EXPORT_SYMBOL(tcp_v4_connect);
2404EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
2405EXPORT_SYMBOL(tcp_v4_remember_stamp);
2406EXPORT_SYMBOL(tcp_v4_send_check);
2407EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2408
2409#ifdef CONFIG_PROC_FS
2410EXPORT_SYMBOL(tcp_proc_register);
2411EXPORT_SYMBOL(tcp_proc_unregister);
2412#endif
1da177e4 2413EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 2414