]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/tcp.c
[ICSK]: Introduce inet_csk_clone
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
250#include <linux/config.h>
251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
256#include <linux/smp_lock.h>
257#include <linux/fs.h>
258#include <linux/random.h>
259#include <linux/bootmem.h>
260
261#include <net/icmp.h>
262#include <net/tcp.h>
263#include <net/xfrm.h>
264#include <net/ip.h>
265
266
267#include <asm/uaccess.h>
268#include <asm/ioctls.h>
269
270int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271
272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273
1da177e4
LT
274atomic_t tcp_orphan_count = ATOMIC_INIT(0);
275
276int sysctl_tcp_mem[3];
277int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
278int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
279
280EXPORT_SYMBOL(sysctl_tcp_mem);
281EXPORT_SYMBOL(sysctl_tcp_rmem);
282EXPORT_SYMBOL(sysctl_tcp_wmem);
283
284atomic_t tcp_memory_allocated; /* Current allocated memory. */
285atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
286
287EXPORT_SYMBOL(tcp_memory_allocated);
288EXPORT_SYMBOL(tcp_sockets_allocated);
289
290/*
291 * Pressure flag: try to collapse.
292 * Technical note: it is used by multiple contexts non atomically.
293 * All the sk_stream_mem_schedule() is of this nature: accounting
294 * is strict, actions are advisory and have some latency.
295 */
296int tcp_memory_pressure;
297
298EXPORT_SYMBOL(tcp_memory_pressure);
299
300void tcp_enter_memory_pressure(void)
301{
302 if (!tcp_memory_pressure) {
303 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
304 tcp_memory_pressure = 1;
305 }
306}
307
308EXPORT_SYMBOL(tcp_enter_memory_pressure);
309
310/*
311 * LISTEN is a special case for poll..
312 */
313static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
314 poll_table *wait)
315{
463c84b9 316 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0;
1da177e4
LT
317}
318
319/*
320 * Wait for a TCP event.
321 *
322 * Note that we don't need to lock the socket, as the upper poll layers
323 * take care of normal races (between the test and the event) and we don't
324 * go look at any of the socket buffers directly.
325 */
326unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
327{
328 unsigned int mask;
329 struct sock *sk = sock->sk;
330 struct tcp_sock *tp = tcp_sk(sk);
331
332 poll_wait(file, sk->sk_sleep, wait);
333 if (sk->sk_state == TCP_LISTEN)
334 return tcp_listen_poll(sk, wait);
335
336 /* Socket is not locked. We are protected from async events
337 by poll logic and correct handling of state changes
338 made by another threads is impossible in any case.
339 */
340
341 mask = 0;
342 if (sk->sk_err)
343 mask = POLLERR;
344
345 /*
346 * POLLHUP is certainly not done right. But poll() doesn't
347 * have a notion of HUP in just one direction, and for a
348 * socket the read side is more interesting.
349 *
350 * Some poll() documentation says that POLLHUP is incompatible
351 * with the POLLOUT/POLLWR flags, so somebody should check this
352 * all. But careful, it tends to be safer to return too many
353 * bits than too few, and you can easily break real applications
354 * if you don't tell them that something has hung up!
355 *
356 * Check-me.
357 *
358 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
359 * our fs/select.c). It means that after we received EOF,
360 * poll always returns immediately, making impossible poll() on write()
361 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
362 * if and only if shutdown has been made in both directions.
363 * Actually, it is interesting to look how Solaris and DUX
364 * solve this dilemma. I would prefer, if PULLHUP were maskable,
365 * then we could set it on SND_SHUTDOWN. BTW examples given
366 * in Stevens' books assume exactly this behaviour, it explains
367 * why PULLHUP is incompatible with POLLOUT. --ANK
368 *
369 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
370 * blocking on fresh not-connected or disconnected socket. --ANK
371 */
372 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
373 mask |= POLLHUP;
374 if (sk->sk_shutdown & RCV_SHUTDOWN)
375 mask |= POLLIN | POLLRDNORM;
376
377 /* Connected? */
378 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
379 /* Potential race condition. If read of tp below will
380 * escape above sk->sk_state, we can be illegally awaken
381 * in SYN_* states. */
382 if ((tp->rcv_nxt != tp->copied_seq) &&
383 (tp->urg_seq != tp->copied_seq ||
384 tp->rcv_nxt != tp->copied_seq + 1 ||
385 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
386 mask |= POLLIN | POLLRDNORM;
387
388 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
389 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
390 mask |= POLLOUT | POLLWRNORM;
391 } else { /* send SIGIO later */
392 set_bit(SOCK_ASYNC_NOSPACE,
393 &sk->sk_socket->flags);
394 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
395
396 /* Race breaker. If space is freed after
397 * wspace test but before the flags are set,
398 * IO signal will be lost.
399 */
400 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
401 mask |= POLLOUT | POLLWRNORM;
402 }
403 }
404
405 if (tp->urg_data & TCP_URG_VALID)
406 mask |= POLLPRI;
407 }
408 return mask;
409}
410
411int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
412{
413 struct tcp_sock *tp = tcp_sk(sk);
414 int answ;
415
416 switch (cmd) {
417 case SIOCINQ:
418 if (sk->sk_state == TCP_LISTEN)
419 return -EINVAL;
420
421 lock_sock(sk);
422 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
423 answ = 0;
424 else if (sock_flag(sk, SOCK_URGINLINE) ||
425 !tp->urg_data ||
426 before(tp->urg_seq, tp->copied_seq) ||
427 !before(tp->urg_seq, tp->rcv_nxt)) {
428 answ = tp->rcv_nxt - tp->copied_seq;
429
430 /* Subtract 1, if FIN is in queue. */
431 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
432 answ -=
433 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
434 } else
435 answ = tp->urg_seq - tp->copied_seq;
436 release_sock(sk);
437 break;
438 case SIOCATMARK:
439 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
440 break;
441 case SIOCOUTQ:
442 if (sk->sk_state == TCP_LISTEN)
443 return -EINVAL;
444
445 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
446 answ = 0;
447 else
448 answ = tp->write_seq - tp->snd_una;
449 break;
450 default:
451 return -ENOIOCTLCMD;
452 };
453
454 return put_user(answ, (int __user *)arg);
455}
456
457
458int tcp_listen_start(struct sock *sk)
459{
460 struct inet_sock *inet = inet_sk(sk);
463c84b9
ACM
461 struct inet_connection_sock *icsk = inet_csk(sk);
462 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE);
0e87506f
ACM
463
464 if (rc != 0)
465 return rc;
1da177e4
LT
466
467 sk->sk_max_ack_backlog = 0;
468 sk->sk_ack_backlog = 0;
463c84b9 469 inet_csk_delack_init(sk);
1da177e4 470
1da177e4
LT
471 /* There is race window here: we announce ourselves listening,
472 * but this transition is still not validated by get_port().
473 * It is OK, because this socket enters to hash table only
474 * after validation is complete.
475 */
476 sk->sk_state = TCP_LISTEN;
477 if (!sk->sk_prot->get_port(sk, inet->num)) {
478 inet->sport = htons(inet->num);
479
480 sk_dst_reset(sk);
481 sk->sk_prot->hash(sk);
482
483 return 0;
484 }
485
486 sk->sk_state = TCP_CLOSE;
463c84b9 487 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
1da177e4
LT
488 return -EADDRINUSE;
489}
490
491/*
492 * This routine closes sockets which have been at least partially
493 * opened, but not yet accepted.
494 */
495
496static void tcp_listen_stop (struct sock *sk)
497{
463c84b9 498 struct inet_connection_sock *icsk = inet_csk(sk);
0e87506f 499 struct request_sock *acc_req;
60236fdd 500 struct request_sock *req;
1da177e4 501
463c84b9 502 inet_csk_delete_keepalive_timer(sk);
1da177e4
LT
503
504 /* make all the listen_opt local to us */
463c84b9 505 acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
1da177e4 506
83e3609e
ACM
507 /* Following specs, it would be better either to send FIN
508 * (and enter FIN-WAIT-1, it is normal close)
509 * or to send active reset (abort).
510 * Certainly, it is pretty dangerous while synflood, but it is
511 * bad justification for our negligence 8)
512 * To be honest, we are not able to make either
513 * of the variants now. --ANK
514 */
463c84b9 515 reqsk_queue_destroy(&icsk->icsk_accept_queue);
1da177e4
LT
516
517 while ((req = acc_req) != NULL) {
518 struct sock *child = req->sk;
519
520 acc_req = req->dl_next;
521
522 local_bh_disable();
523 bh_lock_sock(child);
524 BUG_TRAP(!sock_owned_by_user(child));
525 sock_hold(child);
526
527 tcp_disconnect(child, O_NONBLOCK);
528
529 sock_orphan(child);
530
531 atomic_inc(&tcp_orphan_count);
532
533 tcp_destroy_sock(child);
534
535 bh_unlock_sock(child);
536 local_bh_enable();
537 sock_put(child);
538
539 sk_acceptq_removed(sk);
60236fdd 540 __reqsk_free(req);
1da177e4
LT
541 }
542 BUG_TRAP(!sk->sk_ack_backlog);
543}
544
545static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
546{
547 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
548 tp->pushed_seq = tp->write_seq;
549}
550
551static inline int forced_push(struct tcp_sock *tp)
552{
553 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
554}
555
556static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
557 struct sk_buff *skb)
558{
559 skb->csum = 0;
560 TCP_SKB_CB(skb)->seq = tp->write_seq;
561 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
562 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
563 TCP_SKB_CB(skb)->sacked = 0;
564 skb_header_release(skb);
565 __skb_queue_tail(&sk->sk_write_queue, skb);
566 sk_charge_skb(sk, skb);
567 if (!sk->sk_send_head)
568 sk->sk_send_head = skb;
89ebd197 569 if (tp->nonagle & TCP_NAGLE_PUSH)
1da177e4
LT
570 tp->nonagle &= ~TCP_NAGLE_PUSH;
571}
572
573static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
574 struct sk_buff *skb)
575{
576 if (flags & MSG_OOB) {
577 tp->urg_mode = 1;
578 tp->snd_up = tp->write_seq;
579 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
580 }
581}
582
583static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
584 int mss_now, int nonagle)
585{
586 if (sk->sk_send_head) {
587 struct sk_buff *skb = sk->sk_write_queue.prev;
588 if (!(flags & MSG_MORE) || forced_push(tp))
589 tcp_mark_push(tp, skb);
590 tcp_mark_urg(tp, flags, skb);
591 __tcp_push_pending_frames(sk, tp, mss_now,
592 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
593 }
594}
595
596static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
597 size_t psize, int flags)
598{
599 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 600 int mss_now, size_goal;
1da177e4
LT
601 int err;
602 ssize_t copied;
603 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
604
605 /* Wait for a connection to finish. */
606 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
607 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
608 goto out_err;
609
610 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
611
612 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 613 size_goal = tp->xmit_size_goal;
1da177e4
LT
614 copied = 0;
615
616 err = -EPIPE;
617 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
618 goto do_error;
619
620 while (psize > 0) {
621 struct sk_buff *skb = sk->sk_write_queue.prev;
622 struct page *page = pages[poffset / PAGE_SIZE];
623 int copy, i, can_coalesce;
624 int offset = poffset % PAGE_SIZE;
625 int size = min_t(size_t, psize, PAGE_SIZE - offset);
626
c1b4a7e6 627 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
628new_segment:
629 if (!sk_stream_memory_free(sk))
630 goto wait_for_sndbuf;
631
632 skb = sk_stream_alloc_pskb(sk, 0, 0,
633 sk->sk_allocation);
634 if (!skb)
635 goto wait_for_memory;
636
637 skb_entail(sk, tp, skb);
c1b4a7e6 638 copy = size_goal;
1da177e4
LT
639 }
640
641 if (copy > size)
642 copy = size;
643
644 i = skb_shinfo(skb)->nr_frags;
645 can_coalesce = skb_can_coalesce(skb, i, page, offset);
646 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
647 tcp_mark_push(tp, skb);
648 goto new_segment;
649 }
650 if (sk->sk_forward_alloc < copy &&
651 !sk_stream_mem_schedule(sk, copy, 0))
652 goto wait_for_memory;
653
654 if (can_coalesce) {
655 skb_shinfo(skb)->frags[i - 1].size += copy;
656 } else {
657 get_page(page);
658 skb_fill_page_desc(skb, i, page, offset, copy);
659 }
660
661 skb->len += copy;
662 skb->data_len += copy;
663 skb->truesize += copy;
664 sk->sk_wmem_queued += copy;
665 sk->sk_forward_alloc -= copy;
666 skb->ip_summed = CHECKSUM_HW;
667 tp->write_seq += copy;
668 TCP_SKB_CB(skb)->end_seq += copy;
669 skb_shinfo(skb)->tso_segs = 0;
670
671 if (!copied)
672 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
673
674 copied += copy;
675 poffset += copy;
676 if (!(psize -= copy))
677 goto out;
678
c1b4a7e6 679 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
680 continue;
681
682 if (forced_push(tp)) {
683 tcp_mark_push(tp, skb);
684 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
685 } else if (skb == sk->sk_send_head)
686 tcp_push_one(sk, mss_now);
687 continue;
688
689wait_for_sndbuf:
690 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
691wait_for_memory:
692 if (copied)
693 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
694
695 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
696 goto do_error;
697
698 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 699 size_goal = tp->xmit_size_goal;
1da177e4
LT
700 }
701
702out:
703 if (copied)
704 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
705 return copied;
706
707do_error:
708 if (copied)
709 goto out;
710out_err:
711 return sk_stream_error(sk, flags, err);
712}
713
714ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
715 size_t size, int flags)
716{
717 ssize_t res;
718 struct sock *sk = sock->sk;
719
720#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
721
722 if (!(sk->sk_route_caps & NETIF_F_SG) ||
723 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
724 return sock_no_sendpage(sock, page, offset, size, flags);
725
726#undef TCP_ZC_CSUM_FLAGS
727
728 lock_sock(sk);
729 TCP_CHECK_TIMER(sk);
730 res = do_tcp_sendpages(sk, &page, offset, size, flags);
731 TCP_CHECK_TIMER(sk);
732 release_sock(sk);
733 return res;
734}
735
736#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
737#define TCP_OFF(sk) (sk->sk_sndmsg_off)
738
739static inline int select_size(struct sock *sk, struct tcp_sock *tp)
740{
c1b4a7e6 741 int tmp = tp->mss_cache;
1da177e4 742
b4e26f5e
DM
743 if (sk->sk_route_caps & NETIF_F_SG) {
744 if (sk->sk_route_caps & NETIF_F_TSO)
745 tmp = 0;
746 else {
747 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
748
749 if (tmp >= pgbreak &&
750 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
751 tmp = pgbreak;
752 }
753 }
1da177e4 754
1da177e4
LT
755 return tmp;
756}
757
758int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
759 size_t size)
760{
761 struct iovec *iov;
762 struct tcp_sock *tp = tcp_sk(sk);
763 struct sk_buff *skb;
764 int iovlen, flags;
c1b4a7e6 765 int mss_now, size_goal;
1da177e4
LT
766 int err, copied;
767 long timeo;
768
769 lock_sock(sk);
770 TCP_CHECK_TIMER(sk);
771
772 flags = msg->msg_flags;
773 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
774
775 /* Wait for a connection to finish. */
776 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
777 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
778 goto out_err;
779
780 /* This should be in poll */
781 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
782
783 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 784 size_goal = tp->xmit_size_goal;
1da177e4
LT
785
786 /* Ok commence sending. */
787 iovlen = msg->msg_iovlen;
788 iov = msg->msg_iov;
789 copied = 0;
790
791 err = -EPIPE;
792 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
793 goto do_error;
794
795 while (--iovlen >= 0) {
796 int seglen = iov->iov_len;
797 unsigned char __user *from = iov->iov_base;
798
799 iov++;
800
801 while (seglen > 0) {
802 int copy;
803
804 skb = sk->sk_write_queue.prev;
805
806 if (!sk->sk_send_head ||
c1b4a7e6 807 (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
808
809new_segment:
810 /* Allocate new segment. If the interface is SG,
811 * allocate skb fitting to single page.
812 */
813 if (!sk_stream_memory_free(sk))
814 goto wait_for_sndbuf;
815
816 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
817 0, sk->sk_allocation);
818 if (!skb)
819 goto wait_for_memory;
820
821 /*
822 * Check whether we can use HW checksum.
823 */
824 if (sk->sk_route_caps &
825 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
826 NETIF_F_HW_CSUM))
827 skb->ip_summed = CHECKSUM_HW;
828
829 skb_entail(sk, tp, skb);
c1b4a7e6 830 copy = size_goal;
1da177e4
LT
831 }
832
833 /* Try to append data to the end of skb. */
834 if (copy > seglen)
835 copy = seglen;
836
837 /* Where to copy to? */
838 if (skb_tailroom(skb) > 0) {
839 /* We have some space in skb head. Superb! */
840 if (copy > skb_tailroom(skb))
841 copy = skb_tailroom(skb);
842 if ((err = skb_add_data(skb, from, copy)) != 0)
843 goto do_fault;
844 } else {
845 int merge = 0;
846 int i = skb_shinfo(skb)->nr_frags;
847 struct page *page = TCP_PAGE(sk);
848 int off = TCP_OFF(sk);
849
850 if (skb_can_coalesce(skb, i, page, off) &&
851 off != PAGE_SIZE) {
852 /* We can extend the last page
853 * fragment. */
854 merge = 1;
855 } else if (i == MAX_SKB_FRAGS ||
856 (!i &&
857 !(sk->sk_route_caps & NETIF_F_SG))) {
858 /* Need to add new fragment and cannot
859 * do this because interface is non-SG,
860 * or because all the page slots are
861 * busy. */
862 tcp_mark_push(tp, skb);
863 goto new_segment;
864 } else if (page) {
1da177e4
LT
865 if (off == PAGE_SIZE) {
866 put_page(page);
867 TCP_PAGE(sk) = page = NULL;
868 }
869 }
870
871 if (!page) {
872 /* Allocate new cache page. */
873 if (!(page = sk_stream_alloc_page(sk)))
874 goto wait_for_memory;
875 off = 0;
876 }
877
878 if (copy > PAGE_SIZE - off)
879 copy = PAGE_SIZE - off;
880
881 /* Time to copy data. We are close to
882 * the end! */
883 err = skb_copy_to_page(sk, from, skb, page,
884 off, copy);
885 if (err) {
886 /* If this page was new, give it to the
887 * socket so it does not get leaked.
888 */
889 if (!TCP_PAGE(sk)) {
890 TCP_PAGE(sk) = page;
891 TCP_OFF(sk) = 0;
892 }
893 goto do_error;
894 }
895
896 /* Update the skb. */
897 if (merge) {
898 skb_shinfo(skb)->frags[i - 1].size +=
899 copy;
900 } else {
901 skb_fill_page_desc(skb, i, page, off, copy);
902 if (TCP_PAGE(sk)) {
903 get_page(page);
904 } else if (off + copy < PAGE_SIZE) {
905 get_page(page);
906 TCP_PAGE(sk) = page;
907 }
908 }
909
910 TCP_OFF(sk) = off + copy;
911 }
912
913 if (!copied)
914 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
915
916 tp->write_seq += copy;
917 TCP_SKB_CB(skb)->end_seq += copy;
918 skb_shinfo(skb)->tso_segs = 0;
919
920 from += copy;
921 copied += copy;
922 if ((seglen -= copy) == 0 && iovlen == 0)
923 goto out;
924
c1b4a7e6 925 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
926 continue;
927
928 if (forced_push(tp)) {
929 tcp_mark_push(tp, skb);
930 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
931 } else if (skb == sk->sk_send_head)
932 tcp_push_one(sk, mss_now);
933 continue;
934
935wait_for_sndbuf:
936 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
937wait_for_memory:
938 if (copied)
939 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
940
941 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
942 goto do_error;
943
944 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 945 size_goal = tp->xmit_size_goal;
1da177e4
LT
946 }
947 }
948
949out:
950 if (copied)
951 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
952 TCP_CHECK_TIMER(sk);
953 release_sock(sk);
954 return copied;
955
956do_fault:
957 if (!skb->len) {
958 if (sk->sk_send_head == skb)
959 sk->sk_send_head = NULL;
8728b834 960 __skb_unlink(skb, &sk->sk_write_queue);
1da177e4
LT
961 sk_stream_free_skb(sk, skb);
962 }
963
964do_error:
965 if (copied)
966 goto out;
967out_err:
968 err = sk_stream_error(sk, flags, err);
969 TCP_CHECK_TIMER(sk);
970 release_sock(sk);
971 return err;
972}
973
974/*
975 * Handle reading urgent data. BSD has very simple semantics for
976 * this, no blocking and very strange errors 8)
977 */
978
979static int tcp_recv_urg(struct sock *sk, long timeo,
980 struct msghdr *msg, int len, int flags,
981 int *addr_len)
982{
983 struct tcp_sock *tp = tcp_sk(sk);
984
985 /* No URG data to read. */
986 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
987 tp->urg_data == TCP_URG_READ)
988 return -EINVAL; /* Yes this is right ! */
989
990 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
991 return -ENOTCONN;
992
993 if (tp->urg_data & TCP_URG_VALID) {
994 int err = 0;
995 char c = tp->urg_data;
996
997 if (!(flags & MSG_PEEK))
998 tp->urg_data = TCP_URG_READ;
999
1000 /* Read urgent data. */
1001 msg->msg_flags |= MSG_OOB;
1002
1003 if (len > 0) {
1004 if (!(flags & MSG_TRUNC))
1005 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1006 len = 1;
1007 } else
1008 msg->msg_flags |= MSG_TRUNC;
1009
1010 return err ? -EFAULT : len;
1011 }
1012
1013 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1014 return 0;
1015
1016 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1017 * the available implementations agree in this case:
1018 * this call should never block, independent of the
1019 * blocking state of the socket.
1020 * Mike <pall@rz.uni-karlsruhe.de>
1021 */
1022 return -EAGAIN;
1023}
1024
1025/* Clean up the receive buffer for full frames taken by the user,
1026 * then send an ACK if necessary. COPIED is the number of bytes
1027 * tcp_recvmsg has given to the user so far, it speeds up the
1028 * calculation of whether or not we must ACK for the sake of
1029 * a window update.
1030 */
1031static void cleanup_rbuf(struct sock *sk, int copied)
1032{
1033 struct tcp_sock *tp = tcp_sk(sk);
1034 int time_to_ack = 0;
1035
1036#if TCP_DEBUG
1037 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1038
1039 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1040#endif
1041
463c84b9
ACM
1042 if (inet_csk_ack_scheduled(sk)) {
1043 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1044 /* Delayed ACKs frequently hit locked sockets during bulk
1045 * receive. */
463c84b9 1046 if (icsk->icsk_ack.blocked ||
1da177e4 1047 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 1048 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
1049 /*
1050 * If this read emptied read buffer, we send ACK, if
1051 * connection is not bidirectional, user drained
1052 * receive buffer and there was a small segment
1053 * in queue.
1054 */
463c84b9
ACM
1055 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1056 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1da177e4
LT
1057 time_to_ack = 1;
1058 }
1059
1060 /* We send an ACK if we can now advertise a non-zero window
1061 * which has been raised "significantly".
1062 *
1063 * Even if window raised up to infinity, do not send window open ACK
1064 * in states, where we will not receive more. It is useless.
1065 */
1066 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1067 __u32 rcv_window_now = tcp_receive_window(tp);
1068
1069 /* Optimize, __tcp_select_window() is not cheap. */
1070 if (2*rcv_window_now <= tp->window_clamp) {
1071 __u32 new_window = __tcp_select_window(sk);
1072
1073 /* Send ACK now, if this read freed lots of space
1074 * in our buffer. Certainly, new_window is new window.
1075 * We can advertise it now, if it is not less than current one.
1076 * "Lots" means "at least twice" here.
1077 */
1078 if (new_window && new_window >= 2 * rcv_window_now)
1079 time_to_ack = 1;
1080 }
1081 }
1082 if (time_to_ack)
1083 tcp_send_ack(sk);
1084}
1085
1086static void tcp_prequeue_process(struct sock *sk)
1087{
1088 struct sk_buff *skb;
1089 struct tcp_sock *tp = tcp_sk(sk);
1090
b03efcfb 1091 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1da177e4
LT
1092
1093 /* RX process wants to run with disabled BHs, though it is not
1094 * necessary */
1095 local_bh_disable();
1096 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1097 sk->sk_backlog_rcv(sk, skb);
1098 local_bh_enable();
1099
1100 /* Clear memory counter. */
1101 tp->ucopy.memory = 0;
1102}
1103
1104static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1105{
1106 struct sk_buff *skb;
1107 u32 offset;
1108
1109 skb_queue_walk(&sk->sk_receive_queue, skb) {
1110 offset = seq - TCP_SKB_CB(skb)->seq;
1111 if (skb->h.th->syn)
1112 offset--;
1113 if (offset < skb->len || skb->h.th->fin) {
1114 *off = offset;
1115 return skb;
1116 }
1117 }
1118 return NULL;
1119}
1120
1121/*
1122 * This routine provides an alternative to tcp_recvmsg() for routines
1123 * that would like to handle copying from skbuffs directly in 'sendfile'
1124 * fashion.
1125 * Note:
1126 * - It is assumed that the socket was locked by the caller.
1127 * - The routine does not block.
1128 * - At present, there is no support for reading OOB data
1129 * or for 'peeking' the socket using this routine
1130 * (although both would be easy to implement).
1131 */
1132int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1133 sk_read_actor_t recv_actor)
1134{
1135 struct sk_buff *skb;
1136 struct tcp_sock *tp = tcp_sk(sk);
1137 u32 seq = tp->copied_seq;
1138 u32 offset;
1139 int copied = 0;
1140
1141 if (sk->sk_state == TCP_LISTEN)
1142 return -ENOTCONN;
1143 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1144 if (offset < skb->len) {
1145 size_t used, len;
1146
1147 len = skb->len - offset;
1148 /* Stop reading if we hit a patch of urgent data */
1149 if (tp->urg_data) {
1150 u32 urg_offset = tp->urg_seq - seq;
1151 if (urg_offset < len)
1152 len = urg_offset;
1153 if (!len)
1154 break;
1155 }
1156 used = recv_actor(desc, skb, offset, len);
1157 if (used <= len) {
1158 seq += used;
1159 copied += used;
1160 offset += used;
1161 }
1162 if (offset != skb->len)
1163 break;
1164 }
1165 if (skb->h.th->fin) {
1166 sk_eat_skb(sk, skb);
1167 ++seq;
1168 break;
1169 }
1170 sk_eat_skb(sk, skb);
1171 if (!desc->count)
1172 break;
1173 }
1174 tp->copied_seq = seq;
1175
1176 tcp_rcv_space_adjust(sk);
1177
1178 /* Clean up data we have read: This will do ACK frames. */
1179 if (copied)
1180 cleanup_rbuf(sk, copied);
1181 return copied;
1182}
1183
1184/*
1185 * This routine copies from a sock struct into the user buffer.
1186 *
1187 * Technical note: in 2.3 we work on _locked_ socket, so that
1188 * tricks with *seq access order and skb->users are not required.
1189 * Probably, code can be easily improved even more.
1190 */
1191
1192int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1193 size_t len, int nonblock, int flags, int *addr_len)
1194{
1195 struct tcp_sock *tp = tcp_sk(sk);
1196 int copied = 0;
1197 u32 peek_seq;
1198 u32 *seq;
1199 unsigned long used;
1200 int err;
1201 int target; /* Read at least this many bytes */
1202 long timeo;
1203 struct task_struct *user_recv = NULL;
1204
1205 lock_sock(sk);
1206
1207 TCP_CHECK_TIMER(sk);
1208
1209 err = -ENOTCONN;
1210 if (sk->sk_state == TCP_LISTEN)
1211 goto out;
1212
1213 timeo = sock_rcvtimeo(sk, nonblock);
1214
1215 /* Urgent data needs to be handled specially. */
1216 if (flags & MSG_OOB)
1217 goto recv_urg;
1218
1219 seq = &tp->copied_seq;
1220 if (flags & MSG_PEEK) {
1221 peek_seq = tp->copied_seq;
1222 seq = &peek_seq;
1223 }
1224
1225 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1226
1227 do {
1228 struct sk_buff *skb;
1229 u32 offset;
1230
1231 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1232 if (tp->urg_data && tp->urg_seq == *seq) {
1233 if (copied)
1234 break;
1235 if (signal_pending(current)) {
1236 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1237 break;
1238 }
1239 }
1240
1241 /* Next get a buffer. */
1242
1243 skb = skb_peek(&sk->sk_receive_queue);
1244 do {
1245 if (!skb)
1246 break;
1247
1248 /* Now that we have two receive queues this
1249 * shouldn't happen.
1250 */
1251 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1252 printk(KERN_INFO "recvmsg bug: copied %X "
1253 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1254 break;
1255 }
1256 offset = *seq - TCP_SKB_CB(skb)->seq;
1257 if (skb->h.th->syn)
1258 offset--;
1259 if (offset < skb->len)
1260 goto found_ok_skb;
1261 if (skb->h.th->fin)
1262 goto found_fin_ok;
1263 BUG_TRAP(flags & MSG_PEEK);
1264 skb = skb->next;
1265 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1266
1267 /* Well, if we have backlog, try to process it now yet. */
1268
1269 if (copied >= target && !sk->sk_backlog.tail)
1270 break;
1271
1272 if (copied) {
1273 if (sk->sk_err ||
1274 sk->sk_state == TCP_CLOSE ||
1275 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1276 !timeo ||
1277 signal_pending(current) ||
1278 (flags & MSG_PEEK))
1279 break;
1280 } else {
1281 if (sock_flag(sk, SOCK_DONE))
1282 break;
1283
1284 if (sk->sk_err) {
1285 copied = sock_error(sk);
1286 break;
1287 }
1288
1289 if (sk->sk_shutdown & RCV_SHUTDOWN)
1290 break;
1291
1292 if (sk->sk_state == TCP_CLOSE) {
1293 if (!sock_flag(sk, SOCK_DONE)) {
1294 /* This occurs when user tries to read
1295 * from never connected socket.
1296 */
1297 copied = -ENOTCONN;
1298 break;
1299 }
1300 break;
1301 }
1302
1303 if (!timeo) {
1304 copied = -EAGAIN;
1305 break;
1306 }
1307
1308 if (signal_pending(current)) {
1309 copied = sock_intr_errno(timeo);
1310 break;
1311 }
1312 }
1313
1314 cleanup_rbuf(sk, copied);
1315
7df55125 1316 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1317 /* Install new reader */
1318 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1319 user_recv = current;
1320 tp->ucopy.task = user_recv;
1321 tp->ucopy.iov = msg->msg_iov;
1322 }
1323
1324 tp->ucopy.len = len;
1325
1326 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1327 (flags & (MSG_PEEK | MSG_TRUNC)));
1328
1329 /* Ugly... If prequeue is not empty, we have to
1330 * process it before releasing socket, otherwise
1331 * order will be broken at second iteration.
1332 * More elegant solution is required!!!
1333 *
1334 * Look: we have the following (pseudo)queues:
1335 *
1336 * 1. packets in flight
1337 * 2. backlog
1338 * 3. prequeue
1339 * 4. receive_queue
1340 *
1341 * Each queue can be processed only if the next ones
1342 * are empty. At this point we have empty receive_queue.
1343 * But prequeue _can_ be not empty after 2nd iteration,
1344 * when we jumped to start of loop because backlog
1345 * processing added something to receive_queue.
1346 * We cannot release_sock(), because backlog contains
1347 * packets arrived _after_ prequeued ones.
1348 *
1349 * Shortly, algorithm is clear --- to process all
1350 * the queues in order. We could make it more directly,
1351 * requeueing packets from backlog to prequeue, if
1352 * is not empty. It is more elegant, but eats cycles,
1353 * unfortunately.
1354 */
b03efcfb 1355 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1356 goto do_prequeue;
1357
1358 /* __ Set realtime policy in scheduler __ */
1359 }
1360
1361 if (copied >= target) {
1362 /* Do not sleep, just process backlog. */
1363 release_sock(sk);
1364 lock_sock(sk);
1365 } else
1366 sk_wait_data(sk, &timeo);
1367
1368 if (user_recv) {
1369 int chunk;
1370
1371 /* __ Restore normal policy in scheduler __ */
1372
1373 if ((chunk = len - tp->ucopy.len) != 0) {
1374 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1375 len -= chunk;
1376 copied += chunk;
1377 }
1378
1379 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1380 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1381do_prequeue:
1382 tcp_prequeue_process(sk);
1383
1384 if ((chunk = len - tp->ucopy.len) != 0) {
1385 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1386 len -= chunk;
1387 copied += chunk;
1388 }
1389 }
1390 }
1391 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1392 if (net_ratelimit())
1393 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1394 current->comm, current->pid);
1395 peek_seq = tp->copied_seq;
1396 }
1397 continue;
1398
1399 found_ok_skb:
1400 /* Ok so how much can we use? */
1401 used = skb->len - offset;
1402 if (len < used)
1403 used = len;
1404
1405 /* Do we have urgent data here? */
1406 if (tp->urg_data) {
1407 u32 urg_offset = tp->urg_seq - *seq;
1408 if (urg_offset < used) {
1409 if (!urg_offset) {
1410 if (!sock_flag(sk, SOCK_URGINLINE)) {
1411 ++*seq;
1412 offset++;
1413 used--;
1414 if (!used)
1415 goto skip_copy;
1416 }
1417 } else
1418 used = urg_offset;
1419 }
1420 }
1421
1422 if (!(flags & MSG_TRUNC)) {
1423 err = skb_copy_datagram_iovec(skb, offset,
1424 msg->msg_iov, used);
1425 if (err) {
1426 /* Exception. Bailout! */
1427 if (!copied)
1428 copied = -EFAULT;
1429 break;
1430 }
1431 }
1432
1433 *seq += used;
1434 copied += used;
1435 len -= used;
1436
1437 tcp_rcv_space_adjust(sk);
1438
1439skip_copy:
1440 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1441 tp->urg_data = 0;
1442 tcp_fast_path_check(sk, tp);
1443 }
1444 if (used + offset < skb->len)
1445 continue;
1446
1447 if (skb->h.th->fin)
1448 goto found_fin_ok;
1449 if (!(flags & MSG_PEEK))
1450 sk_eat_skb(sk, skb);
1451 continue;
1452
1453 found_fin_ok:
1454 /* Process the FIN. */
1455 ++*seq;
1456 if (!(flags & MSG_PEEK))
1457 sk_eat_skb(sk, skb);
1458 break;
1459 } while (len > 0);
1460
1461 if (user_recv) {
b03efcfb 1462 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1463 int chunk;
1464
1465 tp->ucopy.len = copied > 0 ? len : 0;
1466
1467 tcp_prequeue_process(sk);
1468
1469 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1470 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1471 len -= chunk;
1472 copied += chunk;
1473 }
1474 }
1475
1476 tp->ucopy.task = NULL;
1477 tp->ucopy.len = 0;
1478 }
1479
1480 /* According to UNIX98, msg_name/msg_namelen are ignored
1481 * on connected socket. I was just happy when found this 8) --ANK
1482 */
1483
1484 /* Clean up data we have read: This will do ACK frames. */
1485 cleanup_rbuf(sk, copied);
1486
1487 TCP_CHECK_TIMER(sk);
1488 release_sock(sk);
1489 return copied;
1490
1491out:
1492 TCP_CHECK_TIMER(sk);
1493 release_sock(sk);
1494 return err;
1495
1496recv_urg:
1497 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1498 goto out;
1499}
1500
1501/*
1502 * State processing on a close. This implements the state shift for
1503 * sending our FIN frame. Note that we only send a FIN for some
1504 * states. A shutdown() may have already sent the FIN, or we may be
1505 * closed.
1506 */
1507
1508static unsigned char new_state[16] = {
1509 /* current state: new state: action: */
1510 /* (Invalid) */ TCP_CLOSE,
1511 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1512 /* TCP_SYN_SENT */ TCP_CLOSE,
1513 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1514 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1515 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1516 /* TCP_TIME_WAIT */ TCP_CLOSE,
1517 /* TCP_CLOSE */ TCP_CLOSE,
1518 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1519 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1520 /* TCP_LISTEN */ TCP_CLOSE,
1521 /* TCP_CLOSING */ TCP_CLOSING,
1522};
1523
1524static int tcp_close_state(struct sock *sk)
1525{
1526 int next = (int)new_state[sk->sk_state];
1527 int ns = next & TCP_STATE_MASK;
1528
1529 tcp_set_state(sk, ns);
1530
1531 return next & TCP_ACTION_FIN;
1532}
1533
1534/*
1535 * Shutdown the sending side of a connection. Much like close except
1536 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1537 */
1538
1539void tcp_shutdown(struct sock *sk, int how)
1540{
1541 /* We need to grab some memory, and put together a FIN,
1542 * and then put it into the queue to be sent.
1543 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1544 */
1545 if (!(how & SEND_SHUTDOWN))
1546 return;
1547
1548 /* If we've already sent a FIN, or it's a closed state, skip this. */
1549 if ((1 << sk->sk_state) &
1550 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1551 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1552 /* Clear out any half completed packets. FIN if needed. */
1553 if (tcp_close_state(sk))
1554 tcp_send_fin(sk);
1555 }
1556}
1557
1558/*
1559 * At this point, there should be no process reference to this
1560 * socket, and thus no user references at all. Therefore we
1561 * can assume the socket waitqueue is inactive and nobody will
1562 * try to jump onto it.
1563 */
1564void tcp_destroy_sock(struct sock *sk)
1565{
1566 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1567 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1568
1569 /* It cannot be in hash table! */
1570 BUG_TRAP(sk_unhashed(sk));
1571
1572 /* If it has not 0 inet_sk(sk)->num, it must be bound */
463c84b9 1573 BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
1da177e4
LT
1574
1575 sk->sk_prot->destroy(sk);
1576
1577 sk_stream_kill_queues(sk);
1578
1579 xfrm_sk_free_policy(sk);
1580
e6848976 1581 sk_refcnt_debug_release(sk);
1da177e4
LT
1582
1583 atomic_dec(&tcp_orphan_count);
1584 sock_put(sk);
1585}
1586
1587void tcp_close(struct sock *sk, long timeout)
1588{
1589 struct sk_buff *skb;
1590 int data_was_unread = 0;
1591
1592 lock_sock(sk);
1593 sk->sk_shutdown = SHUTDOWN_MASK;
1594
1595 if (sk->sk_state == TCP_LISTEN) {
1596 tcp_set_state(sk, TCP_CLOSE);
1597
1598 /* Special case. */
1599 tcp_listen_stop(sk);
1600
1601 goto adjudge_to_death;
1602 }
1603
1604 /* We need to flush the recv. buffs. We do this only on the
1605 * descriptor close, not protocol-sourced closes, because the
1606 * reader process may not have drained the data yet!
1607 */
1608 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1609 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1610 skb->h.th->fin;
1611 data_was_unread += len;
1612 __kfree_skb(skb);
1613 }
1614
1615 sk_stream_mem_reclaim(sk);
1616
1617 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1618 * 3.10, we send a RST here because data was lost. To
1619 * witness the awful effects of the old behavior of always
1620 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1621 * a bulk GET in an FTP client, suspend the process, wait
1622 * for the client to advertise a zero window, then kill -9
1623 * the FTP client, wheee... Note: timeout is always zero
1624 * in such a case.
1625 */
1626 if (data_was_unread) {
1627 /* Unread data was tossed, zap the connection. */
1628 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1629 tcp_set_state(sk, TCP_CLOSE);
1630 tcp_send_active_reset(sk, GFP_KERNEL);
1631 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1632 /* Check zero linger _after_ checking for unread data. */
1633 sk->sk_prot->disconnect(sk, 0);
1634 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1635 } else if (tcp_close_state(sk)) {
1636 /* We FIN if the application ate all the data before
1637 * zapping the connection.
1638 */
1639
1640 /* RED-PEN. Formally speaking, we have broken TCP state
1641 * machine. State transitions:
1642 *
1643 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1644 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1645 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1646 *
1647 * are legal only when FIN has been sent (i.e. in window),
1648 * rather than queued out of window. Purists blame.
1649 *
1650 * F.e. "RFC state" is ESTABLISHED,
1651 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1652 *
1653 * The visible declinations are that sometimes
1654 * we enter time-wait state, when it is not required really
1655 * (harmless), do not send active resets, when they are
1656 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1657 * they look as CLOSING or LAST_ACK for Linux)
1658 * Probably, I missed some more holelets.
1659 * --ANK
1660 */
1661 tcp_send_fin(sk);
1662 }
1663
1664 sk_stream_wait_close(sk, timeout);
1665
1666adjudge_to_death:
1667 /* It is the last release_sock in its life. It will remove backlog. */
1668 release_sock(sk);
1669
1670
1671 /* Now socket is owned by kernel and we acquire BH lock
1672 to finish close. No need to check for user refs.
1673 */
1674 local_bh_disable();
1675 bh_lock_sock(sk);
1676 BUG_TRAP(!sock_owned_by_user(sk));
1677
1678 sock_hold(sk);
1679 sock_orphan(sk);
1680
1681 /* This is a (useful) BSD violating of the RFC. There is a
1682 * problem with TCP as specified in that the other end could
1683 * keep a socket open forever with no application left this end.
1684 * We use a 3 minute timeout (about the same as BSD) then kill
1685 * our end. If they send after that then tough - BUT: long enough
1686 * that we won't make the old 4*rto = almost no time - whoops
1687 * reset mistake.
1688 *
1689 * Nope, it was not mistake. It is really desired behaviour
1690 * f.e. on http servers, when such sockets are useless, but
1691 * consume significant resources. Let's do it with special
1692 * linger2 option. --ANK
1693 */
1694
1695 if (sk->sk_state == TCP_FIN_WAIT2) {
1696 struct tcp_sock *tp = tcp_sk(sk);
1697 if (tp->linger2 < 0) {
1698 tcp_set_state(sk, TCP_CLOSE);
1699 tcp_send_active_reset(sk, GFP_ATOMIC);
1700 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1701 } else {
463c84b9 1702 const int tmo = tcp_fin_time(sk);
1da177e4
LT
1703
1704 if (tmo > TCP_TIMEWAIT_LEN) {
463c84b9 1705 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1da177e4
LT
1706 } else {
1707 atomic_inc(&tcp_orphan_count);
1708 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1709 goto out;
1710 }
1711 }
1712 }
1713 if (sk->sk_state != TCP_CLOSE) {
1714 sk_stream_mem_reclaim(sk);
1715 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
1716 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1717 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1718 if (net_ratelimit())
1719 printk(KERN_INFO "TCP: too many of orphaned "
1720 "sockets\n");
1721 tcp_set_state(sk, TCP_CLOSE);
1722 tcp_send_active_reset(sk, GFP_ATOMIC);
1723 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1724 }
1725 }
1726 atomic_inc(&tcp_orphan_count);
1727
1728 if (sk->sk_state == TCP_CLOSE)
1729 tcp_destroy_sock(sk);
1730 /* Otherwise, socket is reprieved until protocol close. */
1731
1732out:
1733 bh_unlock_sock(sk);
1734 local_bh_enable();
1735 sock_put(sk);
1736}
1737
1738/* These states need RST on ABORT according to RFC793 */
1739
1740static inline int tcp_need_reset(int state)
1741{
1742 return (1 << state) &
1743 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1744 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1745}
1746
1747int tcp_disconnect(struct sock *sk, int flags)
1748{
1749 struct inet_sock *inet = inet_sk(sk);
463c84b9 1750 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1751 struct tcp_sock *tp = tcp_sk(sk);
1752 int err = 0;
1753 int old_state = sk->sk_state;
1754
1755 if (old_state != TCP_CLOSE)
1756 tcp_set_state(sk, TCP_CLOSE);
1757
1758 /* ABORT function of RFC793 */
1759 if (old_state == TCP_LISTEN) {
1760 tcp_listen_stop(sk);
1761 } else if (tcp_need_reset(old_state) ||
1762 (tp->snd_nxt != tp->write_seq &&
1763 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1764 /* The last check adjusts for discrepance of Linux wrt. RFC
1765 * states
1766 */
1767 tcp_send_active_reset(sk, gfp_any());
1768 sk->sk_err = ECONNRESET;
1769 } else if (old_state == TCP_SYN_SENT)
1770 sk->sk_err = ECONNRESET;
1771
1772 tcp_clear_xmit_timers(sk);
1773 __skb_queue_purge(&sk->sk_receive_queue);
1774 sk_stream_writequeue_purge(sk);
1775 __skb_queue_purge(&tp->out_of_order_queue);
1776
1777 inet->dport = 0;
1778
1779 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1780 inet_reset_saddr(sk);
1781
1782 sk->sk_shutdown = 0;
1783 sock_reset_flag(sk, SOCK_DONE);
1784 tp->srtt = 0;
1785 if ((tp->write_seq += tp->max_window + 2) == 0)
1786 tp->write_seq = 1;
463c84b9 1787 icsk->icsk_backoff = 0;
1da177e4
LT
1788 tp->snd_cwnd = 2;
1789 tp->probes_out = 0;
1790 tp->packets_out = 0;
1791 tp->snd_ssthresh = 0x7fffffff;
1792 tp->snd_cwnd_cnt = 0;
1793 tcp_set_ca_state(tp, TCP_CA_Open);
1794 tcp_clear_retrans(tp);
463c84b9 1795 inet_csk_delack_init(sk);
1da177e4
LT
1796 sk->sk_send_head = NULL;
1797 tp->rx_opt.saw_tstamp = 0;
1798 tcp_sack_reset(&tp->rx_opt);
1799 __sk_dst_reset(sk);
1800
463c84b9 1801 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1da177e4
LT
1802
1803 sk->sk_error_report(sk);
1804 return err;
1805}
1806
1da177e4
LT
1807/*
1808 * Socket option code for TCP.
1809 */
1810int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1811 int optlen)
1812{
1813 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1814 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1815 int val;
1816 int err = 0;
1817
1818 if (level != SOL_TCP)
1819 return tp->af_specific->setsockopt(sk, level, optname,
1820 optval, optlen);
1821
5f8ef48d
SH
1822 /* This is a string value all the others are int's */
1823 if (optname == TCP_CONGESTION) {
1824 char name[TCP_CA_NAME_MAX];
1825
1826 if (optlen < 1)
1827 return -EINVAL;
1828
1829 val = strncpy_from_user(name, optval,
1830 min(TCP_CA_NAME_MAX-1, optlen));
1831 if (val < 0)
1832 return -EFAULT;
1833 name[val] = 0;
1834
1835 lock_sock(sk);
1836 err = tcp_set_congestion_control(tp, name);
1837 release_sock(sk);
1838 return err;
1839 }
1840
1da177e4
LT
1841 if (optlen < sizeof(int))
1842 return -EINVAL;
1843
1844 if (get_user(val, (int __user *)optval))
1845 return -EFAULT;
1846
1847 lock_sock(sk);
1848
1849 switch (optname) {
1850 case TCP_MAXSEG:
1851 /* Values greater than interface MTU won't take effect. However
1852 * at the point when this call is done we typically don't yet
1853 * know which interface is going to be used */
1854 if (val < 8 || val > MAX_TCP_WINDOW) {
1855 err = -EINVAL;
1856 break;
1857 }
1858 tp->rx_opt.user_mss = val;
1859 break;
1860
1861 case TCP_NODELAY:
1862 if (val) {
1863 /* TCP_NODELAY is weaker than TCP_CORK, so that
1864 * this option on corked socket is remembered, but
1865 * it is not activated until cork is cleared.
1866 *
1867 * However, when TCP_NODELAY is set we make
1868 * an explicit push, which overrides even TCP_CORK
1869 * for currently queued segments.
1870 */
1871 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1872 tcp_push_pending_frames(sk, tp);
1873 } else {
1874 tp->nonagle &= ~TCP_NAGLE_OFF;
1875 }
1876 break;
1877
1878 case TCP_CORK:
1879 /* When set indicates to always queue non-full frames.
1880 * Later the user clears this option and we transmit
1881 * any pending partial frames in the queue. This is
1882 * meant to be used alongside sendfile() to get properly
1883 * filled frames when the user (for example) must write
1884 * out headers with a write() call first and then use
1885 * sendfile to send out the data parts.
1886 *
1887 * TCP_CORK can be set together with TCP_NODELAY and it is
1888 * stronger than TCP_NODELAY.
1889 */
1890 if (val) {
1891 tp->nonagle |= TCP_NAGLE_CORK;
1892 } else {
1893 tp->nonagle &= ~TCP_NAGLE_CORK;
1894 if (tp->nonagle&TCP_NAGLE_OFF)
1895 tp->nonagle |= TCP_NAGLE_PUSH;
1896 tcp_push_pending_frames(sk, tp);
1897 }
1898 break;
1899
1900 case TCP_KEEPIDLE:
1901 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1902 err = -EINVAL;
1903 else {
1904 tp->keepalive_time = val * HZ;
1905 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1906 !((1 << sk->sk_state) &
1907 (TCPF_CLOSE | TCPF_LISTEN))) {
1908 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1909 if (tp->keepalive_time > elapsed)
1910 elapsed = tp->keepalive_time - elapsed;
1911 else
1912 elapsed = 0;
463c84b9 1913 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
1914 }
1915 }
1916 break;
1917 case TCP_KEEPINTVL:
1918 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1919 err = -EINVAL;
1920 else
1921 tp->keepalive_intvl = val * HZ;
1922 break;
1923 case TCP_KEEPCNT:
1924 if (val < 1 || val > MAX_TCP_KEEPCNT)
1925 err = -EINVAL;
1926 else
1927 tp->keepalive_probes = val;
1928 break;
1929 case TCP_SYNCNT:
1930 if (val < 1 || val > MAX_TCP_SYNCNT)
1931 err = -EINVAL;
1932 else
463c84b9 1933 icsk->icsk_syn_retries = val;
1da177e4
LT
1934 break;
1935
1936 case TCP_LINGER2:
1937 if (val < 0)
1938 tp->linger2 = -1;
1939 else if (val > sysctl_tcp_fin_timeout / HZ)
1940 tp->linger2 = 0;
1941 else
1942 tp->linger2 = val * HZ;
1943 break;
1944
1945 case TCP_DEFER_ACCEPT:
1946 tp->defer_accept = 0;
1947 if (val > 0) {
1948 /* Translate value in seconds to number of
1949 * retransmits */
1950 while (tp->defer_accept < 32 &&
1951 val > ((TCP_TIMEOUT_INIT / HZ) <<
1952 tp->defer_accept))
1953 tp->defer_accept++;
1954 tp->defer_accept++;
1955 }
1956 break;
1957
1958 case TCP_WINDOW_CLAMP:
1959 if (!val) {
1960 if (sk->sk_state != TCP_CLOSE) {
1961 err = -EINVAL;
1962 break;
1963 }
1964 tp->window_clamp = 0;
1965 } else
1966 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1967 SOCK_MIN_RCVBUF / 2 : val;
1968 break;
1969
1970 case TCP_QUICKACK:
1971 if (!val) {
463c84b9 1972 icsk->icsk_ack.pingpong = 1;
1da177e4 1973 } else {
463c84b9 1974 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
1975 if ((1 << sk->sk_state) &
1976 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
1977 inet_csk_ack_scheduled(sk)) {
1978 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1da177e4
LT
1979 cleanup_rbuf(sk, 1);
1980 if (!(val & 1))
463c84b9 1981 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
1982 }
1983 }
1984 break;
1985
1986 default:
1987 err = -ENOPROTOOPT;
1988 break;
1989 };
1990 release_sock(sk);
1991 return err;
1992}
1993
1994/* Return information about state of tcp endpoint in API format. */
1995void tcp_get_info(struct sock *sk, struct tcp_info *info)
1996{
1997 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1998 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1999 u32 now = tcp_time_stamp;
2000
2001 memset(info, 0, sizeof(*info));
2002
2003 info->tcpi_state = sk->sk_state;
2004 info->tcpi_ca_state = tp->ca_state;
463c84b9 2005 info->tcpi_retransmits = icsk->icsk_retransmits;
1da177e4 2006 info->tcpi_probes = tp->probes_out;
463c84b9 2007 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
2008
2009 if (tp->rx_opt.tstamp_ok)
2010 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2011 if (tp->rx_opt.sack_ok)
2012 info->tcpi_options |= TCPI_OPT_SACK;
2013 if (tp->rx_opt.wscale_ok) {
2014 info->tcpi_options |= TCPI_OPT_WSCALE;
2015 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2016 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2017 }
2018
2019 if (tp->ecn_flags&TCP_ECN_OK)
2020 info->tcpi_options |= TCPI_OPT_ECN;
2021
463c84b9
ACM
2022 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2023 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 2024 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 2025 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4
LT
2026
2027 info->tcpi_unacked = tp->packets_out;
2028 info->tcpi_sacked = tp->sacked_out;
2029 info->tcpi_lost = tp->lost_out;
2030 info->tcpi_retrans = tp->retrans_out;
2031 info->tcpi_fackets = tp->fackets_out;
2032
2033 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 2034 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
2035 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2036
2037 info->tcpi_pmtu = tp->pmtu_cookie;
2038 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2039 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2040 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2041 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2042 info->tcpi_snd_cwnd = tp->snd_cwnd;
2043 info->tcpi_advmss = tp->advmss;
2044 info->tcpi_reordering = tp->reordering;
2045
2046 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2047 info->tcpi_rcv_space = tp->rcvq_space.space;
2048
2049 info->tcpi_total_retrans = tp->total_retrans;
2050}
2051
2052EXPORT_SYMBOL_GPL(tcp_get_info);
2053
2054int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2055 int __user *optlen)
2056{
2057 struct tcp_sock *tp = tcp_sk(sk);
2058 int val, len;
2059
2060 if (level != SOL_TCP)
2061 return tp->af_specific->getsockopt(sk, level, optname,
2062 optval, optlen);
2063
2064 if (get_user(len, optlen))
2065 return -EFAULT;
2066
2067 len = min_t(unsigned int, len, sizeof(int));
2068
2069 if (len < 0)
2070 return -EINVAL;
2071
2072 switch (optname) {
2073 case TCP_MAXSEG:
c1b4a7e6 2074 val = tp->mss_cache;
1da177e4
LT
2075 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2076 val = tp->rx_opt.user_mss;
2077 break;
2078 case TCP_NODELAY:
2079 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2080 break;
2081 case TCP_CORK:
2082 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2083 break;
2084 case TCP_KEEPIDLE:
2085 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2086 break;
2087 case TCP_KEEPINTVL:
2088 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2089 break;
2090 case TCP_KEEPCNT:
2091 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2092 break;
2093 case TCP_SYNCNT:
463c84b9 2094 val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1da177e4
LT
2095 break;
2096 case TCP_LINGER2:
2097 val = tp->linger2;
2098 if (val >= 0)
2099 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2100 break;
2101 case TCP_DEFER_ACCEPT:
2102 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2103 (tp->defer_accept - 1));
2104 break;
2105 case TCP_WINDOW_CLAMP:
2106 val = tp->window_clamp;
2107 break;
2108 case TCP_INFO: {
2109 struct tcp_info info;
2110
2111 if (get_user(len, optlen))
2112 return -EFAULT;
2113
2114 tcp_get_info(sk, &info);
2115
2116 len = min_t(unsigned int, len, sizeof(info));
2117 if (put_user(len, optlen))
2118 return -EFAULT;
2119 if (copy_to_user(optval, &info, len))
2120 return -EFAULT;
2121 return 0;
2122 }
2123 case TCP_QUICKACK:
463c84b9 2124 val = !inet_csk(sk)->icsk_ack.pingpong;
1da177e4 2125 break;
5f8ef48d
SH
2126
2127 case TCP_CONGESTION:
2128 if (get_user(len, optlen))
2129 return -EFAULT;
2130 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2131 if (put_user(len, optlen))
2132 return -EFAULT;
2133 if (copy_to_user(optval, tp->ca_ops->name, len))
2134 return -EFAULT;
2135 return 0;
1da177e4
LT
2136 default:
2137 return -ENOPROTOOPT;
2138 };
2139
2140 if (put_user(len, optlen))
2141 return -EFAULT;
2142 if (copy_to_user(optval, &val, len))
2143 return -EFAULT;
2144 return 0;
2145}
2146
2147
2148extern void __skb_cb_too_small_for_tcp(int, int);
5f8ef48d 2149extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
2150
2151static __initdata unsigned long thash_entries;
2152static int __init set_thash_entries(char *str)
2153{
2154 if (!str)
2155 return 0;
2156 thash_entries = simple_strtoul(str, &str, 0);
2157 return 1;
2158}
2159__setup("thash_entries=", set_thash_entries);
2160
2161void __init tcp_init(void)
2162{
2163 struct sk_buff *skb = NULL;
2164 int order, i;
2165
2166 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2167 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2168 sizeof(skb->cb));
2169
6e04e021
ACM
2170 tcp_hashinfo.bind_bucket_cachep =
2171 kmem_cache_create("tcp_bind_bucket",
2172 sizeof(struct inet_bind_bucket), 0,
2173 SLAB_HWCACHE_ALIGN, NULL, NULL);
2174 if (!tcp_hashinfo.bind_bucket_cachep)
1da177e4
LT
2175 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2176
1da177e4
LT
2177 /* Size and allocate the main established and bind bucket
2178 * hash tables.
2179 *
2180 * The methodology is similar to that of the buffer cache.
2181 */
6e04e021 2182 tcp_hashinfo.ehash =
1da177e4 2183 alloc_large_system_hash("TCP established",
0f7ff927 2184 sizeof(struct inet_ehash_bucket),
1da177e4
LT
2185 thash_entries,
2186 (num_physpages >= 128 * 1024) ?
2187 (25 - PAGE_SHIFT) :
2188 (27 - PAGE_SHIFT),
2189 HASH_HIGHMEM,
6e04e021 2190 &tcp_hashinfo.ehash_size,
1da177e4
LT
2191 NULL,
2192 0);
6e04e021
ACM
2193 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2194 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2195 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2196 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
1da177e4
LT
2197 }
2198
6e04e021 2199 tcp_hashinfo.bhash =
1da177e4 2200 alloc_large_system_hash("TCP bind",
0f7ff927 2201 sizeof(struct inet_bind_hashbucket),
6e04e021 2202 tcp_hashinfo.ehash_size,
1da177e4
LT
2203 (num_physpages >= 128 * 1024) ?
2204 (25 - PAGE_SHIFT) :
2205 (27 - PAGE_SHIFT),
2206 HASH_HIGHMEM,
6e04e021 2207 &tcp_hashinfo.bhash_size,
1da177e4
LT
2208 NULL,
2209 64 * 1024);
6e04e021
ACM
2210 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2211 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2212 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2213 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
2214 }
2215
2216 /* Try to be a bit smarter and adjust defaults depending
2217 * on available memory.
2218 */
2219 for (order = 0; ((1 << order) << PAGE_SHIFT) <
6e04e021 2220 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
1da177e4
LT
2221 order++)
2222 ;
e7626486 2223 if (order >= 4) {
1da177e4
LT
2224 sysctl_local_port_range[0] = 32768;
2225 sysctl_local_port_range[1] = 61000;
2226 sysctl_tcp_max_tw_buckets = 180000;
2227 sysctl_tcp_max_orphans = 4096 << (order - 4);
2228 sysctl_max_syn_backlog = 1024;
2229 } else if (order < 3) {
2230 sysctl_local_port_range[0] = 1024 * (3 - order);
2231 sysctl_tcp_max_tw_buckets >>= (3 - order);
2232 sysctl_tcp_max_orphans >>= (3 - order);
2233 sysctl_max_syn_backlog = 128;
2234 }
6e04e021 2235 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
1da177e4
LT
2236
2237 sysctl_tcp_mem[0] = 768 << order;
2238 sysctl_tcp_mem[1] = 1024 << order;
2239 sysctl_tcp_mem[2] = 1536 << order;
2240
2241 if (order < 3) {
2242 sysctl_tcp_wmem[2] = 64 * 1024;
2243 sysctl_tcp_rmem[0] = PAGE_SIZE;
2244 sysctl_tcp_rmem[1] = 43689;
2245 sysctl_tcp_rmem[2] = 2 * 43689;
2246 }
2247
2248 printk(KERN_INFO "TCP: Hash tables configured "
2249 "(established %d bind %d)\n",
6e04e021 2250 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
317a76f9
SH
2251
2252 tcp_register_congestion_control(&tcp_reno);
1da177e4
LT
2253}
2254
1da177e4
LT
2255EXPORT_SYMBOL(tcp_close);
2256EXPORT_SYMBOL(tcp_destroy_sock);
2257EXPORT_SYMBOL(tcp_disconnect);
2258EXPORT_SYMBOL(tcp_getsockopt);
2259EXPORT_SYMBOL(tcp_ioctl);
1da177e4
LT
2260EXPORT_SYMBOL(tcp_poll);
2261EXPORT_SYMBOL(tcp_read_sock);
2262EXPORT_SYMBOL(tcp_recvmsg);
2263EXPORT_SYMBOL(tcp_sendmsg);
2264EXPORT_SYMBOL(tcp_sendpage);
2265EXPORT_SYMBOL(tcp_setsockopt);
2266EXPORT_SYMBOL(tcp_shutdown);
2267EXPORT_SYMBOL(tcp_statistics);