]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv4/tcp.c
I/OAT: Only offload copies for TCP when there will be a context switch
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
1da177e4
LT
250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
1da177e4
LT
255#include <linux/fs.h>
256#include <linux/random.h>
257#include <linux/bootmem.h>
b8059ead 258#include <linux/cache.h>
f4c50d99 259#include <linux/err.h>
cfb6eeb4 260#include <linux/crypto.h>
1da177e4
LT
261
262#include <net/icmp.h>
263#include <net/tcp.h>
264#include <net/xfrm.h>
265#include <net/ip.h>
1a2449a8 266#include <net/netdma.h>
1da177e4
LT
267
268#include <asm/uaccess.h>
269#include <asm/ioctls.h>
270
ab32ea5d 271int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
1da177e4 272
ba89966c 273DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
1da177e4 274
1da177e4
LT
275atomic_t tcp_orphan_count = ATOMIC_INIT(0);
276
0a5578cf
ACM
277EXPORT_SYMBOL_GPL(tcp_orphan_count);
278
b8059ead
DM
279int sysctl_tcp_mem[3] __read_mostly;
280int sysctl_tcp_wmem[3] __read_mostly;
281int sysctl_tcp_rmem[3] __read_mostly;
1da177e4
LT
282
283EXPORT_SYMBOL(sysctl_tcp_mem);
284EXPORT_SYMBOL(sysctl_tcp_rmem);
285EXPORT_SYMBOL(sysctl_tcp_wmem);
286
287atomic_t tcp_memory_allocated; /* Current allocated memory. */
288atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
289
290EXPORT_SYMBOL(tcp_memory_allocated);
291EXPORT_SYMBOL(tcp_sockets_allocated);
292
293/*
294 * Pressure flag: try to collapse.
295 * Technical note: it is used by multiple contexts non atomically.
296 * All the sk_stream_mem_schedule() is of this nature: accounting
297 * is strict, actions are advisory and have some latency.
298 */
4103f8cd 299int tcp_memory_pressure __read_mostly;
1da177e4
LT
300
301EXPORT_SYMBOL(tcp_memory_pressure);
302
303void tcp_enter_memory_pressure(void)
304{
305 if (!tcp_memory_pressure) {
306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
307 tcp_memory_pressure = 1;
308 }
309}
310
311EXPORT_SYMBOL(tcp_enter_memory_pressure);
312
1da177e4
LT
313/*
314 * Wait for a TCP event.
315 *
316 * Note that we don't need to lock the socket, as the upper poll layers
317 * take care of normal races (between the test and the event) and we don't
318 * go look at any of the socket buffers directly.
319 */
320unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
321{
322 unsigned int mask;
323 struct sock *sk = sock->sk;
324 struct tcp_sock *tp = tcp_sk(sk);
325
326 poll_wait(file, sk->sk_sleep, wait);
327 if (sk->sk_state == TCP_LISTEN)
dc40c7bc 328 return inet_csk_listen_poll(sk);
1da177e4
LT
329
330 /* Socket is not locked. We are protected from async events
331 by poll logic and correct handling of state changes
332 made by another threads is impossible in any case.
333 */
334
335 mask = 0;
336 if (sk->sk_err)
337 mask = POLLERR;
338
339 /*
340 * POLLHUP is certainly not done right. But poll() doesn't
341 * have a notion of HUP in just one direction, and for a
342 * socket the read side is more interesting.
343 *
344 * Some poll() documentation says that POLLHUP is incompatible
345 * with the POLLOUT/POLLWR flags, so somebody should check this
346 * all. But careful, it tends to be safer to return too many
347 * bits than too few, and you can easily break real applications
348 * if you don't tell them that something has hung up!
349 *
350 * Check-me.
351 *
352 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
353 * our fs/select.c). It means that after we received EOF,
354 * poll always returns immediately, making impossible poll() on write()
355 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
356 * if and only if shutdown has been made in both directions.
357 * Actually, it is interesting to look how Solaris and DUX
358 * solve this dilemma. I would prefer, if PULLHUP were maskable,
359 * then we could set it on SND_SHUTDOWN. BTW examples given
360 * in Stevens' books assume exactly this behaviour, it explains
361 * why PULLHUP is incompatible with POLLOUT. --ANK
362 *
363 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
364 * blocking on fresh not-connected or disconnected socket. --ANK
365 */
366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
367 mask |= POLLHUP;
368 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 369 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
1da177e4
LT
370
371 /* Connected? */
372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
373 /* Potential race condition. If read of tp below will
374 * escape above sk->sk_state, we can be illegally awaken
375 * in SYN_* states. */
376 if ((tp->rcv_nxt != tp->copied_seq) &&
377 (tp->urg_seq != tp->copied_seq ||
378 tp->rcv_nxt != tp->copied_seq + 1 ||
379 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
380 mask |= POLLIN | POLLRDNORM;
381
382 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
383 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
384 mask |= POLLOUT | POLLWRNORM;
385 } else { /* send SIGIO later */
386 set_bit(SOCK_ASYNC_NOSPACE,
387 &sk->sk_socket->flags);
388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
389
390 /* Race breaker. If space is freed after
391 * wspace test but before the flags are set,
392 * IO signal will be lost.
393 */
394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
395 mask |= POLLOUT | POLLWRNORM;
396 }
397 }
398
399 if (tp->urg_data & TCP_URG_VALID)
400 mask |= POLLPRI;
401 }
402 return mask;
403}
404
405int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
406{
407 struct tcp_sock *tp = tcp_sk(sk);
408 int answ;
409
410 switch (cmd) {
411 case SIOCINQ:
412 if (sk->sk_state == TCP_LISTEN)
413 return -EINVAL;
414
415 lock_sock(sk);
416 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
417 answ = 0;
418 else if (sock_flag(sk, SOCK_URGINLINE) ||
419 !tp->urg_data ||
420 before(tp->urg_seq, tp->copied_seq) ||
421 !before(tp->urg_seq, tp->rcv_nxt)) {
422 answ = tp->rcv_nxt - tp->copied_seq;
423
424 /* Subtract 1, if FIN is in queue. */
425 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
426 answ -=
aa8223c7 427 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
1da177e4
LT
428 } else
429 answ = tp->urg_seq - tp->copied_seq;
430 release_sock(sk);
431 break;
432 case SIOCATMARK:
433 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
434 break;
435 case SIOCOUTQ:
436 if (sk->sk_state == TCP_LISTEN)
437 return -EINVAL;
438
439 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
440 answ = 0;
441 else
442 answ = tp->write_seq - tp->snd_una;
443 break;
444 default:
445 return -ENOIOCTLCMD;
3ff50b79 446 }
1da177e4
LT
447
448 return put_user(answ, (int __user *)arg);
449}
450
1da177e4
LT
451static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
452{
453 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
454 tp->pushed_seq = tp->write_seq;
455}
456
457static inline int forced_push(struct tcp_sock *tp)
458{
459 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
460}
461
9e412ba7 462static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
1da177e4 463{
9e412ba7 464 struct tcp_sock *tp = tcp_sk(sk);
352d4800
ACM
465 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
466
467 skb->csum = 0;
468 tcb->seq = tcb->end_seq = tp->write_seq;
469 tcb->flags = TCPCB_FLAG_ACK;
470 tcb->sacked = 0;
1da177e4 471 skb_header_release(skb);
fe067e8a 472 tcp_add_write_queue_tail(sk, skb);
1da177e4 473 sk_charge_skb(sk, skb);
89ebd197 474 if (tp->nonagle & TCP_NAGLE_PUSH)
e905a9ed 475 tp->nonagle &= ~TCP_NAGLE_PUSH;
1da177e4
LT
476}
477
478static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
479 struct sk_buff *skb)
480{
481 if (flags & MSG_OOB) {
482 tp->urg_mode = 1;
483 tp->snd_up = tp->write_seq;
484 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
485 }
486}
487
9e412ba7
IJ
488static inline void tcp_push(struct sock *sk, int flags, int mss_now,
489 int nonagle)
1da177e4 490{
9e412ba7
IJ
491 struct tcp_sock *tp = tcp_sk(sk);
492
fe067e8a
DM
493 if (tcp_send_head(sk)) {
494 struct sk_buff *skb = tcp_write_queue_tail(sk);
1da177e4
LT
495 if (!(flags & MSG_MORE) || forced_push(tp))
496 tcp_mark_push(tp, skb);
497 tcp_mark_urg(tp, flags, skb);
9e412ba7 498 __tcp_push_pending_frames(sk, mss_now,
1da177e4
LT
499 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
500 }
501}
502
503static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
504 size_t psize, int flags)
505{
506 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 507 int mss_now, size_goal;
1da177e4
LT
508 int err;
509 ssize_t copied;
510 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511
512 /* Wait for a connection to finish. */
513 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
514 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
515 goto out_err;
516
517 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518
519 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 520 size_goal = tp->xmit_size_goal;
1da177e4
LT
521 copied = 0;
522
523 err = -EPIPE;
524 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
525 goto do_error;
526
527 while (psize > 0) {
fe067e8a 528 struct sk_buff *skb = tcp_write_queue_tail(sk);
1da177e4
LT
529 struct page *page = pages[poffset / PAGE_SIZE];
530 int copy, i, can_coalesce;
531 int offset = poffset % PAGE_SIZE;
532 int size = min_t(size_t, psize, PAGE_SIZE - offset);
533
fe067e8a 534 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
535new_segment:
536 if (!sk_stream_memory_free(sk))
537 goto wait_for_sndbuf;
538
539 skb = sk_stream_alloc_pskb(sk, 0, 0,
540 sk->sk_allocation);
541 if (!skb)
542 goto wait_for_memory;
543
9e412ba7 544 skb_entail(sk, skb);
c1b4a7e6 545 copy = size_goal;
1da177e4
LT
546 }
547
548 if (copy > size)
549 copy = size;
550
551 i = skb_shinfo(skb)->nr_frags;
552 can_coalesce = skb_can_coalesce(skb, i, page, offset);
553 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
554 tcp_mark_push(tp, skb);
555 goto new_segment;
556 }
d80d99d6 557 if (!sk_stream_wmem_schedule(sk, copy))
1da177e4 558 goto wait_for_memory;
e905a9ed 559
1da177e4
LT
560 if (can_coalesce) {
561 skb_shinfo(skb)->frags[i - 1].size += copy;
562 } else {
563 get_page(page);
564 skb_fill_page_desc(skb, i, page, offset, copy);
565 }
566
567 skb->len += copy;
568 skb->data_len += copy;
569 skb->truesize += copy;
570 sk->sk_wmem_queued += copy;
571 sk->sk_forward_alloc -= copy;
84fa7933 572 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
573 tp->write_seq += copy;
574 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 575 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
576
577 if (!copied)
578 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
579
580 copied += copy;
581 poffset += copy;
582 if (!(psize -= copy))
583 goto out;
584
c1b4a7e6 585 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
586 continue;
587
588 if (forced_push(tp)) {
589 tcp_mark_push(tp, skb);
9e412ba7 590 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
fe067e8a 591 } else if (skb == tcp_send_head(sk))
1da177e4
LT
592 tcp_push_one(sk, mss_now);
593 continue;
594
595wait_for_sndbuf:
596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
597wait_for_memory:
598 if (copied)
9e412ba7 599 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1da177e4
LT
600
601 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
602 goto do_error;
603
604 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 605 size_goal = tp->xmit_size_goal;
1da177e4
LT
606 }
607
608out:
609 if (copied)
9e412ba7 610 tcp_push(sk, flags, mss_now, tp->nonagle);
1da177e4
LT
611 return copied;
612
613do_error:
614 if (copied)
615 goto out;
616out_err:
617 return sk_stream_error(sk, flags, err);
618}
619
620ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
621 size_t size, int flags)
622{
623 ssize_t res;
624 struct sock *sk = sock->sk;
625
1da177e4 626 if (!(sk->sk_route_caps & NETIF_F_SG) ||
8648b305 627 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1da177e4
LT
628 return sock_no_sendpage(sock, page, offset, size, flags);
629
1da177e4
LT
630 lock_sock(sk);
631 TCP_CHECK_TIMER(sk);
632 res = do_tcp_sendpages(sk, &page, offset, size, flags);
633 TCP_CHECK_TIMER(sk);
634 release_sock(sk);
635 return res;
636}
637
638#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
639#define TCP_OFF(sk) (sk->sk_sndmsg_off)
640
9e412ba7 641static inline int select_size(struct sock *sk)
1da177e4 642{
9e412ba7 643 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 644 int tmp = tp->mss_cache;
1da177e4 645
b4e26f5e 646 if (sk->sk_route_caps & NETIF_F_SG) {
bcd76111 647 if (sk_can_gso(sk))
b4e26f5e
DM
648 tmp = 0;
649 else {
650 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
651
652 if (tmp >= pgbreak &&
653 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
654 tmp = pgbreak;
655 }
656 }
1da177e4 657
1da177e4
LT
658 return tmp;
659}
660
661int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
662 size_t size)
663{
664 struct iovec *iov;
665 struct tcp_sock *tp = tcp_sk(sk);
666 struct sk_buff *skb;
667 int iovlen, flags;
c1b4a7e6 668 int mss_now, size_goal;
1da177e4
LT
669 int err, copied;
670 long timeo;
671
672 lock_sock(sk);
673 TCP_CHECK_TIMER(sk);
674
675 flags = msg->msg_flags;
676 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
677
678 /* Wait for a connection to finish. */
679 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
680 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
681 goto out_err;
682
683 /* This should be in poll */
684 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
685
686 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 687 size_goal = tp->xmit_size_goal;
1da177e4
LT
688
689 /* Ok commence sending. */
690 iovlen = msg->msg_iovlen;
691 iov = msg->msg_iov;
692 copied = 0;
693
694 err = -EPIPE;
695 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
696 goto do_error;
697
698 while (--iovlen >= 0) {
699 int seglen = iov->iov_len;
700 unsigned char __user *from = iov->iov_base;
701
702 iov++;
703
704 while (seglen > 0) {
705 int copy;
706
fe067e8a 707 skb = tcp_write_queue_tail(sk);
1da177e4 708
fe067e8a 709 if (!tcp_send_head(sk) ||
c1b4a7e6 710 (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
711
712new_segment:
713 /* Allocate new segment. If the interface is SG,
714 * allocate skb fitting to single page.
715 */
716 if (!sk_stream_memory_free(sk))
717 goto wait_for_sndbuf;
718
9e412ba7 719 skb = sk_stream_alloc_pskb(sk, select_size(sk),
1da177e4
LT
720 0, sk->sk_allocation);
721 if (!skb)
722 goto wait_for_memory;
723
724 /*
725 * Check whether we can use HW checksum.
726 */
8648b305 727 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
84fa7933 728 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4 729
9e412ba7 730 skb_entail(sk, skb);
c1b4a7e6 731 copy = size_goal;
1da177e4
LT
732 }
733
734 /* Try to append data to the end of skb. */
735 if (copy > seglen)
736 copy = seglen;
737
738 /* Where to copy to? */
739 if (skb_tailroom(skb) > 0) {
740 /* We have some space in skb head. Superb! */
741 if (copy > skb_tailroom(skb))
742 copy = skb_tailroom(skb);
743 if ((err = skb_add_data(skb, from, copy)) != 0)
744 goto do_fault;
745 } else {
746 int merge = 0;
747 int i = skb_shinfo(skb)->nr_frags;
748 struct page *page = TCP_PAGE(sk);
749 int off = TCP_OFF(sk);
750
751 if (skb_can_coalesce(skb, i, page, off) &&
752 off != PAGE_SIZE) {
753 /* We can extend the last page
754 * fragment. */
755 merge = 1;
756 } else if (i == MAX_SKB_FRAGS ||
757 (!i &&
758 !(sk->sk_route_caps & NETIF_F_SG))) {
759 /* Need to add new fragment and cannot
760 * do this because interface is non-SG,
761 * or because all the page slots are
762 * busy. */
763 tcp_mark_push(tp, skb);
764 goto new_segment;
765 } else if (page) {
1da177e4
LT
766 if (off == PAGE_SIZE) {
767 put_page(page);
768 TCP_PAGE(sk) = page = NULL;
fb5f5e6e 769 off = 0;
1da177e4 770 }
ef015786 771 } else
fb5f5e6e 772 off = 0;
ef015786
HX
773
774 if (copy > PAGE_SIZE - off)
775 copy = PAGE_SIZE - off;
776
777 if (!sk_stream_wmem_schedule(sk, copy))
778 goto wait_for_memory;
1da177e4
LT
779
780 if (!page) {
781 /* Allocate new cache page. */
782 if (!(page = sk_stream_alloc_page(sk)))
783 goto wait_for_memory;
1da177e4
LT
784 }
785
1da177e4
LT
786 /* Time to copy data. We are close to
787 * the end! */
788 err = skb_copy_to_page(sk, from, skb, page,
789 off, copy);
790 if (err) {
791 /* If this page was new, give it to the
792 * socket so it does not get leaked.
793 */
794 if (!TCP_PAGE(sk)) {
795 TCP_PAGE(sk) = page;
796 TCP_OFF(sk) = 0;
797 }
798 goto do_error;
799 }
800
801 /* Update the skb. */
802 if (merge) {
803 skb_shinfo(skb)->frags[i - 1].size +=
804 copy;
805 } else {
806 skb_fill_page_desc(skb, i, page, off, copy);
807 if (TCP_PAGE(sk)) {
808 get_page(page);
809 } else if (off + copy < PAGE_SIZE) {
810 get_page(page);
811 TCP_PAGE(sk) = page;
812 }
813 }
814
815 TCP_OFF(sk) = off + copy;
816 }
817
818 if (!copied)
819 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
820
821 tp->write_seq += copy;
822 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 823 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
824
825 from += copy;
826 copied += copy;
827 if ((seglen -= copy) == 0 && iovlen == 0)
828 goto out;
829
c1b4a7e6 830 if (skb->len < mss_now || (flags & MSG_OOB))
1da177e4
LT
831 continue;
832
833 if (forced_push(tp)) {
834 tcp_mark_push(tp, skb);
9e412ba7 835 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
fe067e8a 836 } else if (skb == tcp_send_head(sk))
1da177e4
LT
837 tcp_push_one(sk, mss_now);
838 continue;
839
840wait_for_sndbuf:
841 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
842wait_for_memory:
843 if (copied)
9e412ba7 844 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1da177e4
LT
845
846 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
847 goto do_error;
848
849 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 850 size_goal = tp->xmit_size_goal;
1da177e4
LT
851 }
852 }
853
854out:
855 if (copied)
9e412ba7 856 tcp_push(sk, flags, mss_now, tp->nonagle);
1da177e4
LT
857 TCP_CHECK_TIMER(sk);
858 release_sock(sk);
859 return copied;
860
861do_fault:
862 if (!skb->len) {
fe067e8a
DM
863 tcp_unlink_write_queue(skb, sk);
864 /* It is the one place in all of TCP, except connection
865 * reset, where we can be unlinking the send_head.
866 */
867 tcp_check_send_head(sk, skb);
1da177e4
LT
868 sk_stream_free_skb(sk, skb);
869 }
870
871do_error:
872 if (copied)
873 goto out;
874out_err:
875 err = sk_stream_error(sk, flags, err);
876 TCP_CHECK_TIMER(sk);
877 release_sock(sk);
878 return err;
879}
880
881/*
882 * Handle reading urgent data. BSD has very simple semantics for
883 * this, no blocking and very strange errors 8)
884 */
885
886static int tcp_recv_urg(struct sock *sk, long timeo,
887 struct msghdr *msg, int len, int flags,
888 int *addr_len)
889{
890 struct tcp_sock *tp = tcp_sk(sk);
891
892 /* No URG data to read. */
893 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
894 tp->urg_data == TCP_URG_READ)
895 return -EINVAL; /* Yes this is right ! */
896
897 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
898 return -ENOTCONN;
899
900 if (tp->urg_data & TCP_URG_VALID) {
901 int err = 0;
902 char c = tp->urg_data;
903
904 if (!(flags & MSG_PEEK))
905 tp->urg_data = TCP_URG_READ;
906
907 /* Read urgent data. */
908 msg->msg_flags |= MSG_OOB;
909
910 if (len > 0) {
911 if (!(flags & MSG_TRUNC))
912 err = memcpy_toiovec(msg->msg_iov, &c, 1);
913 len = 1;
914 } else
915 msg->msg_flags |= MSG_TRUNC;
916
917 return err ? -EFAULT : len;
918 }
919
920 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
921 return 0;
922
923 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
924 * the available implementations agree in this case:
925 * this call should never block, independent of the
926 * blocking state of the socket.
927 * Mike <pall@rz.uni-karlsruhe.de>
928 */
929 return -EAGAIN;
930}
931
932/* Clean up the receive buffer for full frames taken by the user,
933 * then send an ACK if necessary. COPIED is the number of bytes
934 * tcp_recvmsg has given to the user so far, it speeds up the
935 * calculation of whether or not we must ACK for the sake of
936 * a window update.
937 */
0e4b4992 938void tcp_cleanup_rbuf(struct sock *sk, int copied)
1da177e4
LT
939{
940 struct tcp_sock *tp = tcp_sk(sk);
941 int time_to_ack = 0;
942
943#if TCP_DEBUG
944 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
945
946 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
947#endif
948
463c84b9
ACM
949 if (inet_csk_ack_scheduled(sk)) {
950 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
951 /* Delayed ACKs frequently hit locked sockets during bulk
952 * receive. */
463c84b9 953 if (icsk->icsk_ack.blocked ||
1da177e4 954 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 955 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
956 /*
957 * If this read emptied read buffer, we send ACK, if
958 * connection is not bidirectional, user drained
959 * receive buffer and there was a small segment
960 * in queue.
961 */
1ef9696c
AK
962 (copied > 0 &&
963 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
964 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
965 !icsk->icsk_ack.pingpong)) &&
966 !atomic_read(&sk->sk_rmem_alloc)))
1da177e4
LT
967 time_to_ack = 1;
968 }
969
970 /* We send an ACK if we can now advertise a non-zero window
971 * which has been raised "significantly".
972 *
973 * Even if window raised up to infinity, do not send window open ACK
974 * in states, where we will not receive more. It is useless.
975 */
976 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
977 __u32 rcv_window_now = tcp_receive_window(tp);
978
979 /* Optimize, __tcp_select_window() is not cheap. */
980 if (2*rcv_window_now <= tp->window_clamp) {
981 __u32 new_window = __tcp_select_window(sk);
982
983 /* Send ACK now, if this read freed lots of space
984 * in our buffer. Certainly, new_window is new window.
985 * We can advertise it now, if it is not less than current one.
986 * "Lots" means "at least twice" here.
987 */
988 if (new_window && new_window >= 2 * rcv_window_now)
989 time_to_ack = 1;
990 }
991 }
992 if (time_to_ack)
993 tcp_send_ack(sk);
994}
995
996static void tcp_prequeue_process(struct sock *sk)
997{
998 struct sk_buff *skb;
999 struct tcp_sock *tp = tcp_sk(sk);
1000
b03efcfb 1001 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1da177e4
LT
1002
1003 /* RX process wants to run with disabled BHs, though it is not
1004 * necessary */
1005 local_bh_disable();
1006 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1007 sk->sk_backlog_rcv(sk, skb);
1008 local_bh_enable();
1009
1010 /* Clear memory counter. */
1011 tp->ucopy.memory = 0;
1012}
1013
1014static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1015{
1016 struct sk_buff *skb;
1017 u32 offset;
1018
1019 skb_queue_walk(&sk->sk_receive_queue, skb) {
1020 offset = seq - TCP_SKB_CB(skb)->seq;
aa8223c7 1021 if (tcp_hdr(skb)->syn)
1da177e4 1022 offset--;
aa8223c7 1023 if (offset < skb->len || tcp_hdr(skb)->fin) {
1da177e4
LT
1024 *off = offset;
1025 return skb;
1026 }
1027 }
1028 return NULL;
1029}
1030
1031/*
1032 * This routine provides an alternative to tcp_recvmsg() for routines
1033 * that would like to handle copying from skbuffs directly in 'sendfile'
1034 * fashion.
1035 * Note:
1036 * - It is assumed that the socket was locked by the caller.
1037 * - The routine does not block.
1038 * - At present, there is no support for reading OOB data
1039 * or for 'peeking' the socket using this routine
1040 * (although both would be easy to implement).
1041 */
1042int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1043 sk_read_actor_t recv_actor)
1044{
1045 struct sk_buff *skb;
1046 struct tcp_sock *tp = tcp_sk(sk);
1047 u32 seq = tp->copied_seq;
1048 u32 offset;
1049 int copied = 0;
1050
1051 if (sk->sk_state == TCP_LISTEN)
1052 return -ENOTCONN;
1053 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1054 if (offset < skb->len) {
1055 size_t used, len;
1056
1057 len = skb->len - offset;
1058 /* Stop reading if we hit a patch of urgent data */
1059 if (tp->urg_data) {
1060 u32 urg_offset = tp->urg_seq - seq;
1061 if (urg_offset < len)
1062 len = urg_offset;
1063 if (!len)
1064 break;
1065 }
1066 used = recv_actor(desc, skb, offset, len);
ddb61a57
JA
1067 if (used < 0) {
1068 if (!copied)
1069 copied = used;
1070 break;
1071 } else if (used <= len) {
1da177e4
LT
1072 seq += used;
1073 copied += used;
1074 offset += used;
1075 }
1076 if (offset != skb->len)
1077 break;
1078 }
aa8223c7 1079 if (tcp_hdr(skb)->fin) {
624d1164 1080 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1081 ++seq;
1082 break;
1083 }
624d1164 1084 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1085 if (!desc->count)
1086 break;
1087 }
1088 tp->copied_seq = seq;
1089
1090 tcp_rcv_space_adjust(sk);
1091
1092 /* Clean up data we have read: This will do ACK frames. */
ddb61a57 1093 if (copied > 0)
0e4b4992 1094 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1095 return copied;
1096}
1097
1098/*
1099 * This routine copies from a sock struct into the user buffer.
1100 *
1101 * Technical note: in 2.3 we work on _locked_ socket, so that
1102 * tricks with *seq access order and skb->users are not required.
1103 * Probably, code can be easily improved even more.
1104 */
1105
1106int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1107 size_t len, int nonblock, int flags, int *addr_len)
1108{
1109 struct tcp_sock *tp = tcp_sk(sk);
1110 int copied = 0;
1111 u32 peek_seq;
1112 u32 *seq;
1113 unsigned long used;
1114 int err;
1115 int target; /* Read at least this many bytes */
1116 long timeo;
1117 struct task_struct *user_recv = NULL;
1a2449a8 1118 int copied_early = 0;
2b1244a4
CL
1119 int available = 0;
1120 struct sk_buff *skb;
1da177e4
LT
1121
1122 lock_sock(sk);
1123
1124 TCP_CHECK_TIMER(sk);
1125
1126 err = -ENOTCONN;
1127 if (sk->sk_state == TCP_LISTEN)
1128 goto out;
1129
1130 timeo = sock_rcvtimeo(sk, nonblock);
1131
1132 /* Urgent data needs to be handled specially. */
1133 if (flags & MSG_OOB)
1134 goto recv_urg;
1135
1136 seq = &tp->copied_seq;
1137 if (flags & MSG_PEEK) {
1138 peek_seq = tp->copied_seq;
1139 seq = &peek_seq;
1140 }
1141
1142 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1143
1a2449a8
CL
1144#ifdef CONFIG_NET_DMA
1145 tp->ucopy.dma_chan = NULL;
1146 preempt_disable();
2b1244a4
CL
1147 skb = skb_peek_tail(&sk->sk_receive_queue);
1148 if (skb)
1149 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1150 if ((available < target) &&
1151 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
29bbd72d 1152 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1a2449a8
CL
1153 preempt_enable_no_resched();
1154 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1155 } else
1156 preempt_enable_no_resched();
1157#endif
1158
1da177e4 1159 do {
1da177e4
LT
1160 u32 offset;
1161
1162 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1163 if (tp->urg_data && tp->urg_seq == *seq) {
1164 if (copied)
1165 break;
1166 if (signal_pending(current)) {
1167 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1168 break;
1169 }
1170 }
1171
1172 /* Next get a buffer. */
1173
1174 skb = skb_peek(&sk->sk_receive_queue);
1175 do {
1176 if (!skb)
1177 break;
1178
1179 /* Now that we have two receive queues this
1180 * shouldn't happen.
1181 */
1182 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1183 printk(KERN_INFO "recvmsg bug: copied %X "
1184 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1185 break;
1186 }
1187 offset = *seq - TCP_SKB_CB(skb)->seq;
aa8223c7 1188 if (tcp_hdr(skb)->syn)
1da177e4
LT
1189 offset--;
1190 if (offset < skb->len)
1191 goto found_ok_skb;
aa8223c7 1192 if (tcp_hdr(skb)->fin)
1da177e4
LT
1193 goto found_fin_ok;
1194 BUG_TRAP(flags & MSG_PEEK);
1195 skb = skb->next;
1196 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1197
1198 /* Well, if we have backlog, try to process it now yet. */
1199
1200 if (copied >= target && !sk->sk_backlog.tail)
1201 break;
1202
1203 if (copied) {
1204 if (sk->sk_err ||
1205 sk->sk_state == TCP_CLOSE ||
1206 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1207 !timeo ||
1208 signal_pending(current) ||
1209 (flags & MSG_PEEK))
1210 break;
1211 } else {
1212 if (sock_flag(sk, SOCK_DONE))
1213 break;
1214
1215 if (sk->sk_err) {
1216 copied = sock_error(sk);
1217 break;
1218 }
1219
1220 if (sk->sk_shutdown & RCV_SHUTDOWN)
1221 break;
1222
1223 if (sk->sk_state == TCP_CLOSE) {
1224 if (!sock_flag(sk, SOCK_DONE)) {
1225 /* This occurs when user tries to read
1226 * from never connected socket.
1227 */
1228 copied = -ENOTCONN;
1229 break;
1230 }
1231 break;
1232 }
1233
1234 if (!timeo) {
1235 copied = -EAGAIN;
1236 break;
1237 }
1238
1239 if (signal_pending(current)) {
1240 copied = sock_intr_errno(timeo);
1241 break;
1242 }
1243 }
1244
0e4b4992 1245 tcp_cleanup_rbuf(sk, copied);
1da177e4 1246
7df55125 1247 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1248 /* Install new reader */
1249 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1250 user_recv = current;
1251 tp->ucopy.task = user_recv;
1252 tp->ucopy.iov = msg->msg_iov;
1253 }
1254
1255 tp->ucopy.len = len;
1256
1257 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1258 (flags & (MSG_PEEK | MSG_TRUNC)));
1259
1260 /* Ugly... If prequeue is not empty, we have to
1261 * process it before releasing socket, otherwise
1262 * order will be broken at second iteration.
1263 * More elegant solution is required!!!
1264 *
1265 * Look: we have the following (pseudo)queues:
1266 *
1267 * 1. packets in flight
1268 * 2. backlog
1269 * 3. prequeue
1270 * 4. receive_queue
1271 *
1272 * Each queue can be processed only if the next ones
1273 * are empty. At this point we have empty receive_queue.
1274 * But prequeue _can_ be not empty after 2nd iteration,
1275 * when we jumped to start of loop because backlog
1276 * processing added something to receive_queue.
1277 * We cannot release_sock(), because backlog contains
1278 * packets arrived _after_ prequeued ones.
1279 *
1280 * Shortly, algorithm is clear --- to process all
1281 * the queues in order. We could make it more directly,
1282 * requeueing packets from backlog to prequeue, if
1283 * is not empty. It is more elegant, but eats cycles,
1284 * unfortunately.
1285 */
b03efcfb 1286 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1287 goto do_prequeue;
1288
1289 /* __ Set realtime policy in scheduler __ */
1290 }
1291
1292 if (copied >= target) {
1293 /* Do not sleep, just process backlog. */
1294 release_sock(sk);
1295 lock_sock(sk);
1296 } else
1297 sk_wait_data(sk, &timeo);
1298
1a2449a8
CL
1299#ifdef CONFIG_NET_DMA
1300 tp->ucopy.wakeup = 0;
1301#endif
1302
1da177e4
LT
1303 if (user_recv) {
1304 int chunk;
1305
1306 /* __ Restore normal policy in scheduler __ */
1307
1308 if ((chunk = len - tp->ucopy.len) != 0) {
1309 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1310 len -= chunk;
1311 copied += chunk;
1312 }
1313
1314 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1315 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1316do_prequeue:
1317 tcp_prequeue_process(sk);
1318
1319 if ((chunk = len - tp->ucopy.len) != 0) {
1320 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1321 len -= chunk;
1322 copied += chunk;
1323 }
1324 }
1325 }
1326 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1327 if (net_ratelimit())
1328 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1329 current->comm, current->pid);
1330 peek_seq = tp->copied_seq;
1331 }
1332 continue;
1333
1334 found_ok_skb:
1335 /* Ok so how much can we use? */
1336 used = skb->len - offset;
1337 if (len < used)
1338 used = len;
1339
1340 /* Do we have urgent data here? */
1341 if (tp->urg_data) {
1342 u32 urg_offset = tp->urg_seq - *seq;
1343 if (urg_offset < used) {
1344 if (!urg_offset) {
1345 if (!sock_flag(sk, SOCK_URGINLINE)) {
1346 ++*seq;
1347 offset++;
1348 used--;
1349 if (!used)
1350 goto skip_copy;
1351 }
1352 } else
1353 used = urg_offset;
1354 }
1355 }
1356
1357 if (!(flags & MSG_TRUNC)) {
1a2449a8
CL
1358#ifdef CONFIG_NET_DMA
1359 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1360 tp->ucopy.dma_chan = get_softnet_dma();
1361
1362 if (tp->ucopy.dma_chan) {
1363 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1364 tp->ucopy.dma_chan, skb, offset,
1365 msg->msg_iov, used,
1366 tp->ucopy.pinned_list);
1367
1368 if (tp->ucopy.dma_cookie < 0) {
1369
1370 printk(KERN_ALERT "dma_cookie < 0\n");
1371
1372 /* Exception. Bailout! */
1373 if (!copied)
1374 copied = -EFAULT;
1375 break;
1376 }
1377 if ((offset + used) == skb->len)
1378 copied_early = 1;
1379
1380 } else
1381#endif
1382 {
1383 err = skb_copy_datagram_iovec(skb, offset,
1384 msg->msg_iov, used);
1385 if (err) {
1386 /* Exception. Bailout! */
1387 if (!copied)
1388 copied = -EFAULT;
1389 break;
1390 }
1da177e4
LT
1391 }
1392 }
1393
1394 *seq += used;
1395 copied += used;
1396 len -= used;
1397
1398 tcp_rcv_space_adjust(sk);
1399
1400skip_copy:
1401 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1402 tp->urg_data = 0;
9e412ba7 1403 tcp_fast_path_check(sk);
1da177e4
LT
1404 }
1405 if (used + offset < skb->len)
1406 continue;
1407
aa8223c7 1408 if (tcp_hdr(skb)->fin)
1da177e4 1409 goto found_fin_ok;
1a2449a8
CL
1410 if (!(flags & MSG_PEEK)) {
1411 sk_eat_skb(sk, skb, copied_early);
1412 copied_early = 0;
1413 }
1da177e4
LT
1414 continue;
1415
1416 found_fin_ok:
1417 /* Process the FIN. */
1418 ++*seq;
1a2449a8
CL
1419 if (!(flags & MSG_PEEK)) {
1420 sk_eat_skb(sk, skb, copied_early);
1421 copied_early = 0;
1422 }
1da177e4
LT
1423 break;
1424 } while (len > 0);
1425
1426 if (user_recv) {
b03efcfb 1427 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1428 int chunk;
1429
1430 tp->ucopy.len = copied > 0 ? len : 0;
1431
1432 tcp_prequeue_process(sk);
1433
1434 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1435 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1436 len -= chunk;
1437 copied += chunk;
1438 }
1439 }
1440
1441 tp->ucopy.task = NULL;
1442 tp->ucopy.len = 0;
1443 }
1444
1a2449a8
CL
1445#ifdef CONFIG_NET_DMA
1446 if (tp->ucopy.dma_chan) {
1a2449a8
CL
1447 dma_cookie_t done, used;
1448
1449 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1450
1451 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
e905a9ed
YH
1452 tp->ucopy.dma_cookie, &done,
1453 &used) == DMA_IN_PROGRESS) {
1a2449a8
CL
1454 /* do partial cleanup of sk_async_wait_queue */
1455 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1456 (dma_async_is_complete(skb->dma_cookie, done,
e905a9ed 1457 used) == DMA_SUCCESS)) {
1a2449a8
CL
1458 __skb_dequeue(&sk->sk_async_wait_queue);
1459 kfree_skb(skb);
1460 }
1461 }
1462
1463 /* Safe to free early-copied skbs now */
1464 __skb_queue_purge(&sk->sk_async_wait_queue);
1465 dma_chan_put(tp->ucopy.dma_chan);
1466 tp->ucopy.dma_chan = NULL;
1467 }
1468 if (tp->ucopy.pinned_list) {
1469 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1470 tp->ucopy.pinned_list = NULL;
1471 }
1472#endif
1473
1da177e4
LT
1474 /* According to UNIX98, msg_name/msg_namelen are ignored
1475 * on connected socket. I was just happy when found this 8) --ANK
1476 */
1477
1478 /* Clean up data we have read: This will do ACK frames. */
0e4b4992 1479 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1480
1481 TCP_CHECK_TIMER(sk);
1482 release_sock(sk);
1483 return copied;
1484
1485out:
1486 TCP_CHECK_TIMER(sk);
1487 release_sock(sk);
1488 return err;
1489
1490recv_urg:
1491 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1492 goto out;
1493}
1494
1495/*
1496 * State processing on a close. This implements the state shift for
1497 * sending our FIN frame. Note that we only send a FIN for some
1498 * states. A shutdown() may have already sent the FIN, or we may be
1499 * closed.
1500 */
1501
9b5b5cff 1502static const unsigned char new_state[16] = {
1da177e4
LT
1503 /* current state: new state: action: */
1504 /* (Invalid) */ TCP_CLOSE,
1505 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1506 /* TCP_SYN_SENT */ TCP_CLOSE,
1507 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1508 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1509 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1510 /* TCP_TIME_WAIT */ TCP_CLOSE,
1511 /* TCP_CLOSE */ TCP_CLOSE,
1512 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1513 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1514 /* TCP_LISTEN */ TCP_CLOSE,
1515 /* TCP_CLOSING */ TCP_CLOSING,
1516};
1517
1518static int tcp_close_state(struct sock *sk)
1519{
1520 int next = (int)new_state[sk->sk_state];
1521 int ns = next & TCP_STATE_MASK;
1522
1523 tcp_set_state(sk, ns);
1524
1525 return next & TCP_ACTION_FIN;
1526}
1527
1528/*
1529 * Shutdown the sending side of a connection. Much like close except
1530 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1531 */
1532
1533void tcp_shutdown(struct sock *sk, int how)
1534{
1535 /* We need to grab some memory, and put together a FIN,
1536 * and then put it into the queue to be sent.
1537 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1538 */
1539 if (!(how & SEND_SHUTDOWN))
1540 return;
1541
1542 /* If we've already sent a FIN, or it's a closed state, skip this. */
1543 if ((1 << sk->sk_state) &
1544 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1545 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1546 /* Clear out any half completed packets. FIN if needed. */
1547 if (tcp_close_state(sk))
1548 tcp_send_fin(sk);
1549 }
1550}
1551
1da177e4
LT
1552void tcp_close(struct sock *sk, long timeout)
1553{
1554 struct sk_buff *skb;
1555 int data_was_unread = 0;
75c2d907 1556 int state;
1da177e4
LT
1557
1558 lock_sock(sk);
1559 sk->sk_shutdown = SHUTDOWN_MASK;
1560
1561 if (sk->sk_state == TCP_LISTEN) {
1562 tcp_set_state(sk, TCP_CLOSE);
1563
1564 /* Special case. */
0a5578cf 1565 inet_csk_listen_stop(sk);
1da177e4
LT
1566
1567 goto adjudge_to_death;
1568 }
1569
1570 /* We need to flush the recv. buffs. We do this only on the
1571 * descriptor close, not protocol-sourced closes, because the
1572 * reader process may not have drained the data yet!
1573 */
1574 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1575 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
aa8223c7 1576 tcp_hdr(skb)->fin;
1da177e4
LT
1577 data_was_unread += len;
1578 __kfree_skb(skb);
1579 }
1580
1581 sk_stream_mem_reclaim(sk);
1582
65bb723c
GR
1583 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1584 * data was lost. To witness the awful effects of the old behavior of
1585 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1586 * GET in an FTP client, suspend the process, wait for the client to
1587 * advertise a zero window, then kill -9 the FTP client, wheee...
1588 * Note: timeout is always zero in such a case.
1da177e4
LT
1589 */
1590 if (data_was_unread) {
1591 /* Unread data was tossed, zap the connection. */
1592 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1593 tcp_set_state(sk, TCP_CLOSE);
1594 tcp_send_active_reset(sk, GFP_KERNEL);
1595 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1596 /* Check zero linger _after_ checking for unread data. */
1597 sk->sk_prot->disconnect(sk, 0);
1598 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1599 } else if (tcp_close_state(sk)) {
1600 /* We FIN if the application ate all the data before
1601 * zapping the connection.
1602 */
1603
1604 /* RED-PEN. Formally speaking, we have broken TCP state
1605 * machine. State transitions:
1606 *
1607 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1608 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1609 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1610 *
1611 * are legal only when FIN has been sent (i.e. in window),
1612 * rather than queued out of window. Purists blame.
1613 *
1614 * F.e. "RFC state" is ESTABLISHED,
1615 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1616 *
1617 * The visible declinations are that sometimes
1618 * we enter time-wait state, when it is not required really
1619 * (harmless), do not send active resets, when they are
1620 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1621 * they look as CLOSING or LAST_ACK for Linux)
1622 * Probably, I missed some more holelets.
1623 * --ANK
1624 */
1625 tcp_send_fin(sk);
1626 }
1627
1628 sk_stream_wait_close(sk, timeout);
1629
1630adjudge_to_death:
75c2d907
HX
1631 state = sk->sk_state;
1632 sock_hold(sk);
1633 sock_orphan(sk);
1634 atomic_inc(sk->sk_prot->orphan_count);
1635
1da177e4
LT
1636 /* It is the last release_sock in its life. It will remove backlog. */
1637 release_sock(sk);
1638
1639
1640 /* Now socket is owned by kernel and we acquire BH lock
1641 to finish close. No need to check for user refs.
1642 */
1643 local_bh_disable();
1644 bh_lock_sock(sk);
1645 BUG_TRAP(!sock_owned_by_user(sk));
1646
75c2d907
HX
1647 /* Have we already been destroyed by a softirq or backlog? */
1648 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1649 goto out;
1da177e4
LT
1650
1651 /* This is a (useful) BSD violating of the RFC. There is a
1652 * problem with TCP as specified in that the other end could
1653 * keep a socket open forever with no application left this end.
1654 * We use a 3 minute timeout (about the same as BSD) then kill
1655 * our end. If they send after that then tough - BUT: long enough
1656 * that we won't make the old 4*rto = almost no time - whoops
1657 * reset mistake.
1658 *
1659 * Nope, it was not mistake. It is really desired behaviour
1660 * f.e. on http servers, when such sockets are useless, but
1661 * consume significant resources. Let's do it with special
1662 * linger2 option. --ANK
1663 */
1664
1665 if (sk->sk_state == TCP_FIN_WAIT2) {
1666 struct tcp_sock *tp = tcp_sk(sk);
1667 if (tp->linger2 < 0) {
1668 tcp_set_state(sk, TCP_CLOSE);
1669 tcp_send_active_reset(sk, GFP_ATOMIC);
1670 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1671 } else {
463c84b9 1672 const int tmo = tcp_fin_time(sk);
1da177e4
LT
1673
1674 if (tmo > TCP_TIMEWAIT_LEN) {
52499afe
DM
1675 inet_csk_reset_keepalive_timer(sk,
1676 tmo - TCP_TIMEWAIT_LEN);
1da177e4 1677 } else {
1da177e4
LT
1678 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1679 goto out;
1680 }
1681 }
1682 }
1683 if (sk->sk_state != TCP_CLOSE) {
1684 sk_stream_mem_reclaim(sk);
e4fd5da3
PE
1685 if (tcp_too_many_orphans(sk,
1686 atomic_read(sk->sk_prot->orphan_count))) {
1da177e4
LT
1687 if (net_ratelimit())
1688 printk(KERN_INFO "TCP: too many of orphaned "
1689 "sockets\n");
1690 tcp_set_state(sk, TCP_CLOSE);
1691 tcp_send_active_reset(sk, GFP_ATOMIC);
1692 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1693 }
1694 }
1da177e4
LT
1695
1696 if (sk->sk_state == TCP_CLOSE)
0a5578cf 1697 inet_csk_destroy_sock(sk);
1da177e4
LT
1698 /* Otherwise, socket is reprieved until protocol close. */
1699
1700out:
1701 bh_unlock_sock(sk);
1702 local_bh_enable();
1703 sock_put(sk);
1704}
1705
1706/* These states need RST on ABORT according to RFC793 */
1707
1708static inline int tcp_need_reset(int state)
1709{
1710 return (1 << state) &
1711 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1712 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1713}
1714
1715int tcp_disconnect(struct sock *sk, int flags)
1716{
1717 struct inet_sock *inet = inet_sk(sk);
463c84b9 1718 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1719 struct tcp_sock *tp = tcp_sk(sk);
1720 int err = 0;
1721 int old_state = sk->sk_state;
1722
1723 if (old_state != TCP_CLOSE)
1724 tcp_set_state(sk, TCP_CLOSE);
1725
1726 /* ABORT function of RFC793 */
1727 if (old_state == TCP_LISTEN) {
0a5578cf 1728 inet_csk_listen_stop(sk);
1da177e4
LT
1729 } else if (tcp_need_reset(old_state) ||
1730 (tp->snd_nxt != tp->write_seq &&
1731 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
caa20d9a 1732 /* The last check adjusts for discrepancy of Linux wrt. RFC
1da177e4
LT
1733 * states
1734 */
1735 tcp_send_active_reset(sk, gfp_any());
1736 sk->sk_err = ECONNRESET;
1737 } else if (old_state == TCP_SYN_SENT)
1738 sk->sk_err = ECONNRESET;
1739
1740 tcp_clear_xmit_timers(sk);
1741 __skb_queue_purge(&sk->sk_receive_queue);
fe067e8a 1742 tcp_write_queue_purge(sk);
1da177e4 1743 __skb_queue_purge(&tp->out_of_order_queue);
1a2449a8
CL
1744#ifdef CONFIG_NET_DMA
1745 __skb_queue_purge(&sk->sk_async_wait_queue);
1746#endif
1da177e4
LT
1747
1748 inet->dport = 0;
1749
1750 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1751 inet_reset_saddr(sk);
1752
1753 sk->sk_shutdown = 0;
1754 sock_reset_flag(sk, SOCK_DONE);
1755 tp->srtt = 0;
1756 if ((tp->write_seq += tp->max_window + 2) == 0)
1757 tp->write_seq = 1;
463c84b9 1758 icsk->icsk_backoff = 0;
1da177e4 1759 tp->snd_cwnd = 2;
6687e988 1760 icsk->icsk_probes_out = 0;
1da177e4
LT
1761 tp->packets_out = 0;
1762 tp->snd_ssthresh = 0x7fffffff;
1763 tp->snd_cwnd_cnt = 0;
9772efb9 1764 tp->bytes_acked = 0;
6687e988 1765 tcp_set_ca_state(sk, TCP_CA_Open);
1da177e4 1766 tcp_clear_retrans(tp);
463c84b9 1767 inet_csk_delack_init(sk);
fe067e8a 1768 tcp_init_send_head(sk);
b40b4f79 1769 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1da177e4
LT
1770 __sk_dst_reset(sk);
1771
463c84b9 1772 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1da177e4
LT
1773
1774 sk->sk_error_report(sk);
1775 return err;
1776}
1777
1da177e4
LT
1778/*
1779 * Socket option code for TCP.
1780 */
3fdadf7d
DM
1781static int do_tcp_setsockopt(struct sock *sk, int level,
1782 int optname, char __user *optval, int optlen)
1da177e4
LT
1783{
1784 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1785 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1786 int val;
1787 int err = 0;
1788
5f8ef48d
SH
1789 /* This is a string value all the others are int's */
1790 if (optname == TCP_CONGESTION) {
1791 char name[TCP_CA_NAME_MAX];
1792
1793 if (optlen < 1)
1794 return -EINVAL;
1795
1796 val = strncpy_from_user(name, optval,
1797 min(TCP_CA_NAME_MAX-1, optlen));
1798 if (val < 0)
1799 return -EFAULT;
1800 name[val] = 0;
1801
1802 lock_sock(sk);
6687e988 1803 err = tcp_set_congestion_control(sk, name);
5f8ef48d
SH
1804 release_sock(sk);
1805 return err;
1806 }
1807
1da177e4
LT
1808 if (optlen < sizeof(int))
1809 return -EINVAL;
1810
1811 if (get_user(val, (int __user *)optval))
1812 return -EFAULT;
1813
1814 lock_sock(sk);
1815
1816 switch (optname) {
1817 case TCP_MAXSEG:
1818 /* Values greater than interface MTU won't take effect. However
1819 * at the point when this call is done we typically don't yet
1820 * know which interface is going to be used */
1821 if (val < 8 || val > MAX_TCP_WINDOW) {
1822 err = -EINVAL;
1823 break;
1824 }
1825 tp->rx_opt.user_mss = val;
1826 break;
1827
1828 case TCP_NODELAY:
1829 if (val) {
1830 /* TCP_NODELAY is weaker than TCP_CORK, so that
1831 * this option on corked socket is remembered, but
1832 * it is not activated until cork is cleared.
1833 *
1834 * However, when TCP_NODELAY is set we make
1835 * an explicit push, which overrides even TCP_CORK
1836 * for currently queued segments.
1837 */
1838 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
9e412ba7 1839 tcp_push_pending_frames(sk);
1da177e4
LT
1840 } else {
1841 tp->nonagle &= ~TCP_NAGLE_OFF;
1842 }
1843 break;
1844
1845 case TCP_CORK:
1846 /* When set indicates to always queue non-full frames.
1847 * Later the user clears this option and we transmit
1848 * any pending partial frames in the queue. This is
1849 * meant to be used alongside sendfile() to get properly
1850 * filled frames when the user (for example) must write
1851 * out headers with a write() call first and then use
1852 * sendfile to send out the data parts.
1853 *
1854 * TCP_CORK can be set together with TCP_NODELAY and it is
1855 * stronger than TCP_NODELAY.
1856 */
1857 if (val) {
1858 tp->nonagle |= TCP_NAGLE_CORK;
1859 } else {
1860 tp->nonagle &= ~TCP_NAGLE_CORK;
1861 if (tp->nonagle&TCP_NAGLE_OFF)
1862 tp->nonagle |= TCP_NAGLE_PUSH;
9e412ba7 1863 tcp_push_pending_frames(sk);
1da177e4
LT
1864 }
1865 break;
1866
1867 case TCP_KEEPIDLE:
1868 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1869 err = -EINVAL;
1870 else {
1871 tp->keepalive_time = val * HZ;
1872 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1873 !((1 << sk->sk_state) &
1874 (TCPF_CLOSE | TCPF_LISTEN))) {
1875 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1876 if (tp->keepalive_time > elapsed)
1877 elapsed = tp->keepalive_time - elapsed;
1878 else
1879 elapsed = 0;
463c84b9 1880 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
1881 }
1882 }
1883 break;
1884 case TCP_KEEPINTVL:
1885 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1886 err = -EINVAL;
1887 else
1888 tp->keepalive_intvl = val * HZ;
1889 break;
1890 case TCP_KEEPCNT:
1891 if (val < 1 || val > MAX_TCP_KEEPCNT)
1892 err = -EINVAL;
1893 else
1894 tp->keepalive_probes = val;
1895 break;
1896 case TCP_SYNCNT:
1897 if (val < 1 || val > MAX_TCP_SYNCNT)
1898 err = -EINVAL;
1899 else
463c84b9 1900 icsk->icsk_syn_retries = val;
1da177e4
LT
1901 break;
1902
1903 case TCP_LINGER2:
1904 if (val < 0)
1905 tp->linger2 = -1;
1906 else if (val > sysctl_tcp_fin_timeout / HZ)
1907 tp->linger2 = 0;
1908 else
1909 tp->linger2 = val * HZ;
1910 break;
1911
1912 case TCP_DEFER_ACCEPT:
295f7324 1913 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1da177e4
LT
1914 if (val > 0) {
1915 /* Translate value in seconds to number of
1916 * retransmits */
295f7324 1917 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1da177e4 1918 val > ((TCP_TIMEOUT_INIT / HZ) <<
295f7324
ACM
1919 icsk->icsk_accept_queue.rskq_defer_accept))
1920 icsk->icsk_accept_queue.rskq_defer_accept++;
1921 icsk->icsk_accept_queue.rskq_defer_accept++;
1da177e4
LT
1922 }
1923 break;
1924
1925 case TCP_WINDOW_CLAMP:
1926 if (!val) {
1927 if (sk->sk_state != TCP_CLOSE) {
1928 err = -EINVAL;
1929 break;
1930 }
1931 tp->window_clamp = 0;
1932 } else
1933 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1934 SOCK_MIN_RCVBUF / 2 : val;
1935 break;
1936
1937 case TCP_QUICKACK:
1938 if (!val) {
463c84b9 1939 icsk->icsk_ack.pingpong = 1;
1da177e4 1940 } else {
463c84b9 1941 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
1942 if ((1 << sk->sk_state) &
1943 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
1944 inet_csk_ack_scheduled(sk)) {
1945 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
0e4b4992 1946 tcp_cleanup_rbuf(sk, 1);
1da177e4 1947 if (!(val & 1))
463c84b9 1948 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
1949 }
1950 }
1951 break;
1952
cfb6eeb4
YH
1953#ifdef CONFIG_TCP_MD5SIG
1954 case TCP_MD5SIG:
1955 /* Read the IP->Key mappings from userspace */
1956 err = tp->af_specific->md5_parse(sk, optval, optlen);
1957 break;
1958#endif
1959
1da177e4
LT
1960 default:
1961 err = -ENOPROTOOPT;
1962 break;
3ff50b79
SH
1963 }
1964
1da177e4
LT
1965 release_sock(sk);
1966 return err;
1967}
1968
3fdadf7d
DM
1969int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1970 int optlen)
1971{
1972 struct inet_connection_sock *icsk = inet_csk(sk);
1973
1974 if (level != SOL_TCP)
1975 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1976 optval, optlen);
1977 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1978}
1979
1980#ifdef CONFIG_COMPAT
543d9cfe
ACM
1981int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1982 char __user *optval, int optlen)
3fdadf7d 1983{
dec73ff0
ACM
1984 if (level != SOL_TCP)
1985 return inet_csk_compat_setsockopt(sk, level, optname,
1986 optval, optlen);
3fdadf7d
DM
1987 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1988}
543d9cfe
ACM
1989
1990EXPORT_SYMBOL(compat_tcp_setsockopt);
3fdadf7d
DM
1991#endif
1992
1da177e4
LT
1993/* Return information about state of tcp endpoint in API format. */
1994void tcp_get_info(struct sock *sk, struct tcp_info *info)
1995{
1996 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1997 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1998 u32 now = tcp_time_stamp;
1999
2000 memset(info, 0, sizeof(*info));
2001
2002 info->tcpi_state = sk->sk_state;
6687e988 2003 info->tcpi_ca_state = icsk->icsk_ca_state;
463c84b9 2004 info->tcpi_retransmits = icsk->icsk_retransmits;
6687e988 2005 info->tcpi_probes = icsk->icsk_probes_out;
463c84b9 2006 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
2007
2008 if (tp->rx_opt.tstamp_ok)
2009 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2010 if (tp->rx_opt.sack_ok)
2011 info->tcpi_options |= TCPI_OPT_SACK;
2012 if (tp->rx_opt.wscale_ok) {
2013 info->tcpi_options |= TCPI_OPT_WSCALE;
2014 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2015 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
e905a9ed 2016 }
1da177e4
LT
2017
2018 if (tp->ecn_flags&TCP_ECN_OK)
2019 info->tcpi_options |= TCPI_OPT_ECN;
2020
463c84b9
ACM
2021 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2022 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 2023 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 2024 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4
LT
2025
2026 info->tcpi_unacked = tp->packets_out;
2027 info->tcpi_sacked = tp->sacked_out;
2028 info->tcpi_lost = tp->lost_out;
2029 info->tcpi_retrans = tp->retrans_out;
2030 info->tcpi_fackets = tp->fackets_out;
2031
2032 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 2033 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
2034 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2035
d83d8461 2036 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1da177e4
LT
2037 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2038 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2039 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2040 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2041 info->tcpi_snd_cwnd = tp->snd_cwnd;
2042 info->tcpi_advmss = tp->advmss;
2043 info->tcpi_reordering = tp->reordering;
2044
2045 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2046 info->tcpi_rcv_space = tp->rcvq_space.space;
2047
2048 info->tcpi_total_retrans = tp->total_retrans;
2049}
2050
2051EXPORT_SYMBOL_GPL(tcp_get_info);
2052
3fdadf7d
DM
2053static int do_tcp_getsockopt(struct sock *sk, int level,
2054 int optname, char __user *optval, int __user *optlen)
1da177e4 2055{
295f7324 2056 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2057 struct tcp_sock *tp = tcp_sk(sk);
2058 int val, len;
2059
1da177e4
LT
2060 if (get_user(len, optlen))
2061 return -EFAULT;
2062
2063 len = min_t(unsigned int, len, sizeof(int));
2064
2065 if (len < 0)
2066 return -EINVAL;
2067
2068 switch (optname) {
2069 case TCP_MAXSEG:
c1b4a7e6 2070 val = tp->mss_cache;
1da177e4
LT
2071 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2072 val = tp->rx_opt.user_mss;
2073 break;
2074 case TCP_NODELAY:
2075 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2076 break;
2077 case TCP_CORK:
2078 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2079 break;
2080 case TCP_KEEPIDLE:
2081 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2082 break;
2083 case TCP_KEEPINTVL:
2084 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2085 break;
2086 case TCP_KEEPCNT:
2087 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2088 break;
2089 case TCP_SYNCNT:
295f7324 2090 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1da177e4
LT
2091 break;
2092 case TCP_LINGER2:
2093 val = tp->linger2;
2094 if (val >= 0)
2095 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2096 break;
2097 case TCP_DEFER_ACCEPT:
295f7324
ACM
2098 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2099 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
1da177e4
LT
2100 break;
2101 case TCP_WINDOW_CLAMP:
2102 val = tp->window_clamp;
2103 break;
2104 case TCP_INFO: {
2105 struct tcp_info info;
2106
2107 if (get_user(len, optlen))
2108 return -EFAULT;
2109
2110 tcp_get_info(sk, &info);
2111
2112 len = min_t(unsigned int, len, sizeof(info));
2113 if (put_user(len, optlen))
2114 return -EFAULT;
2115 if (copy_to_user(optval, &info, len))
2116 return -EFAULT;
2117 return 0;
2118 }
2119 case TCP_QUICKACK:
295f7324 2120 val = !icsk->icsk_ack.pingpong;
1da177e4 2121 break;
5f8ef48d
SH
2122
2123 case TCP_CONGESTION:
2124 if (get_user(len, optlen))
2125 return -EFAULT;
2126 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2127 if (put_user(len, optlen))
2128 return -EFAULT;
6687e988 2129 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
5f8ef48d
SH
2130 return -EFAULT;
2131 return 0;
1da177e4
LT
2132 default:
2133 return -ENOPROTOOPT;
3ff50b79 2134 }
1da177e4
LT
2135
2136 if (put_user(len, optlen))
2137 return -EFAULT;
2138 if (copy_to_user(optval, &val, len))
2139 return -EFAULT;
2140 return 0;
2141}
2142
3fdadf7d
DM
2143int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2144 int __user *optlen)
2145{
2146 struct inet_connection_sock *icsk = inet_csk(sk);
2147
2148 if (level != SOL_TCP)
2149 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2150 optval, optlen);
2151 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2152}
2153
2154#ifdef CONFIG_COMPAT
543d9cfe
ACM
2155int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2156 char __user *optval, int __user *optlen)
3fdadf7d 2157{
dec73ff0
ACM
2158 if (level != SOL_TCP)
2159 return inet_csk_compat_getsockopt(sk, level, optname,
2160 optval, optlen);
3fdadf7d
DM
2161 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2162}
543d9cfe
ACM
2163
2164EXPORT_SYMBOL(compat_tcp_getsockopt);
3fdadf7d 2165#endif
1da177e4 2166
576a30eb 2167struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
f4c50d99
HX
2168{
2169 struct sk_buff *segs = ERR_PTR(-EINVAL);
2170 struct tcphdr *th;
2171 unsigned thlen;
2172 unsigned int seq;
d3bc23e7 2173 __be32 delta;
f4c50d99
HX
2174 unsigned int oldlen;
2175 unsigned int len;
2176
2177 if (!pskb_may_pull(skb, sizeof(*th)))
2178 goto out;
2179
aa8223c7 2180 th = tcp_hdr(skb);
f4c50d99
HX
2181 thlen = th->doff * 4;
2182 if (thlen < sizeof(*th))
2183 goto out;
2184
2185 if (!pskb_may_pull(skb, thlen))
2186 goto out;
2187
0718bcc0 2188 oldlen = (u16)~skb->len;
f4c50d99
HX
2189 __skb_pull(skb, thlen);
2190
3820c3f3
HX
2191 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2192 /* Packet is from an untrusted source, reset gso_segs. */
bbcf467d
HX
2193 int type = skb_shinfo(skb)->gso_type;
2194 int mss;
2195
2196 if (unlikely(type &
2197 ~(SKB_GSO_TCPV4 |
2198 SKB_GSO_DODGY |
2199 SKB_GSO_TCP_ECN |
2200 SKB_GSO_TCPV6 |
2201 0) ||
2202 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2203 goto out;
3820c3f3 2204
bbcf467d 2205 mss = skb_shinfo(skb)->gso_size;
3820c3f3
HX
2206 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2207
2208 segs = NULL;
2209 goto out;
2210 }
2211
576a30eb 2212 segs = skb_segment(skb, features);
f4c50d99
HX
2213 if (IS_ERR(segs))
2214 goto out;
2215
2216 len = skb_shinfo(skb)->gso_size;
0718bcc0 2217 delta = htonl(oldlen + (thlen + len));
f4c50d99
HX
2218
2219 skb = segs;
aa8223c7 2220 th = tcp_hdr(skb);
f4c50d99
HX
2221 seq = ntohl(th->seq);
2222
2223 do {
2224 th->fin = th->psh = 0;
2225
d3bc23e7
AV
2226 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2227 (__force u32)delta));
84fa7933 2228 if (skb->ip_summed != CHECKSUM_PARTIAL)
9c70220b
ACM
2229 th->check =
2230 csum_fold(csum_partial(skb_transport_header(skb),
2231 thlen, skb->csum));
f4c50d99
HX
2232
2233 seq += len;
2234 skb = skb->next;
aa8223c7 2235 th = tcp_hdr(skb);
f4c50d99
HX
2236
2237 th->seq = htonl(seq);
2238 th->cwr = 0;
2239 } while (skb->next);
2240
27a884dc 2241 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
9c70220b 2242 skb->data_len);
d3bc23e7
AV
2243 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2244 (__force u32)delta));
84fa7933 2245 if (skb->ip_summed != CHECKSUM_PARTIAL)
9c70220b
ACM
2246 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2247 thlen, skb->csum));
f4c50d99
HX
2248
2249out:
2250 return segs;
2251}
adcfc7d0 2252EXPORT_SYMBOL(tcp_tso_segment);
f4c50d99 2253
cfb6eeb4
YH
2254#ifdef CONFIG_TCP_MD5SIG
2255static unsigned long tcp_md5sig_users;
2256static struct tcp_md5sig_pool **tcp_md5sig_pool;
2257static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2258
2259static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2260{
2261 int cpu;
2262 for_each_possible_cpu(cpu) {
2263 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2264 if (p) {
2265 if (p->md5_desc.tfm)
2266 crypto_free_hash(p->md5_desc.tfm);
2267 kfree(p);
2268 p = NULL;
2269 }
2270 }
2271 free_percpu(pool);
2272}
2273
2274void tcp_free_md5sig_pool(void)
2275{
2276 struct tcp_md5sig_pool **pool = NULL;
2277
2c4f6219 2278 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2279 if (--tcp_md5sig_users == 0) {
2280 pool = tcp_md5sig_pool;
2281 tcp_md5sig_pool = NULL;
2282 }
2c4f6219 2283 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2284 if (pool)
2285 __tcp_free_md5sig_pool(pool);
2286}
2287
2288EXPORT_SYMBOL(tcp_free_md5sig_pool);
2289
f5b99bcd 2290static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
cfb6eeb4
YH
2291{
2292 int cpu;
2293 struct tcp_md5sig_pool **pool;
2294
2295 pool = alloc_percpu(struct tcp_md5sig_pool *);
2296 if (!pool)
2297 return NULL;
2298
2299 for_each_possible_cpu(cpu) {
2300 struct tcp_md5sig_pool *p;
2301 struct crypto_hash *hash;
2302
2303 p = kzalloc(sizeof(*p), GFP_KERNEL);
2304 if (!p)
2305 goto out_free;
2306 *per_cpu_ptr(pool, cpu) = p;
2307
2308 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2309 if (!hash || IS_ERR(hash))
2310 goto out_free;
2311
2312 p->md5_desc.tfm = hash;
2313 }
2314 return pool;
2315out_free:
2316 __tcp_free_md5sig_pool(pool);
2317 return NULL;
2318}
2319
2320struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2321{
2322 struct tcp_md5sig_pool **pool;
2323 int alloc = 0;
2324
2325retry:
2c4f6219 2326 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2327 pool = tcp_md5sig_pool;
2328 if (tcp_md5sig_users++ == 0) {
2329 alloc = 1;
2c4f6219 2330 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2331 } else if (!pool) {
2332 tcp_md5sig_users--;
2c4f6219 2333 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2334 cpu_relax();
2335 goto retry;
2336 } else
2c4f6219 2337 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2338
2339 if (alloc) {
2340 /* we cannot hold spinlock here because this may sleep. */
2341 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2c4f6219 2342 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2343 if (!p) {
2344 tcp_md5sig_users--;
2c4f6219 2345 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2346 return NULL;
2347 }
2348 pool = tcp_md5sig_pool;
2349 if (pool) {
2350 /* oops, it has already been assigned. */
2c4f6219 2351 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2352 __tcp_free_md5sig_pool(p);
2353 } else {
2354 tcp_md5sig_pool = pool = p;
2c4f6219 2355 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2356 }
2357 }
2358 return pool;
2359}
2360
2361EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2362
2363struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2364{
2365 struct tcp_md5sig_pool **p;
2c4f6219 2366 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2367 p = tcp_md5sig_pool;
2368 if (p)
2369 tcp_md5sig_users++;
2c4f6219 2370 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2371 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2372}
2373
2374EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2375
6931ba7c
DM
2376void __tcp_put_md5sig_pool(void)
2377{
2378 tcp_free_md5sig_pool();
cfb6eeb4
YH
2379}
2380
2381EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2382#endif
2383
4ac02bab
AK
2384void tcp_done(struct sock *sk)
2385{
2386 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2387 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2388
2389 tcp_set_state(sk, TCP_CLOSE);
2390 tcp_clear_xmit_timers(sk);
2391
2392 sk->sk_shutdown = SHUTDOWN_MASK;
2393
2394 if (!sock_flag(sk, SOCK_DEAD))
2395 sk->sk_state_change(sk);
2396 else
2397 inet_csk_destroy_sock(sk);
2398}
2399EXPORT_SYMBOL_GPL(tcp_done);
2400
1da177e4 2401extern void __skb_cb_too_small_for_tcp(int, int);
5f8ef48d 2402extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
2403
2404static __initdata unsigned long thash_entries;
2405static int __init set_thash_entries(char *str)
2406{
2407 if (!str)
2408 return 0;
2409 thash_entries = simple_strtoul(str, &str, 0);
2410 return 1;
2411}
2412__setup("thash_entries=", set_thash_entries);
2413
2414void __init tcp_init(void)
2415{
2416 struct sk_buff *skb = NULL;
7b4f4b5e
JH
2417 unsigned long limit;
2418 int order, i, max_share;
1da177e4
LT
2419
2420 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2421 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2422 sizeof(skb->cb));
2423
6e04e021
ACM
2424 tcp_hashinfo.bind_bucket_cachep =
2425 kmem_cache_create("tcp_bind_bucket",
2426 sizeof(struct inet_bind_bucket), 0,
e5d679f3 2427 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1da177e4 2428
1da177e4
LT
2429 /* Size and allocate the main established and bind bucket
2430 * hash tables.
2431 *
2432 * The methodology is similar to that of the buffer cache.
2433 */
6e04e021 2434 tcp_hashinfo.ehash =
1da177e4 2435 alloc_large_system_hash("TCP established",
0f7ff927 2436 sizeof(struct inet_ehash_bucket),
1da177e4
LT
2437 thash_entries,
2438 (num_physpages >= 128 * 1024) ?
18955cfc 2439 13 : 15,
9e950efa 2440 0,
6e04e021 2441 &tcp_hashinfo.ehash_size,
1da177e4
LT
2442 NULL,
2443 0);
dbca9b27
ED
2444 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2445 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
6e04e021
ACM
2446 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2447 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
dbca9b27 2448 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
1da177e4
LT
2449 }
2450
6e04e021 2451 tcp_hashinfo.bhash =
1da177e4 2452 alloc_large_system_hash("TCP bind",
0f7ff927 2453 sizeof(struct inet_bind_hashbucket),
6e04e021 2454 tcp_hashinfo.ehash_size,
1da177e4 2455 (num_physpages >= 128 * 1024) ?
18955cfc 2456 13 : 15,
9e950efa 2457 0,
6e04e021 2458 &tcp_hashinfo.bhash_size,
1da177e4
LT
2459 NULL,
2460 64 * 1024);
6e04e021
ACM
2461 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2462 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2463 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2464 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
2465 }
2466
2467 /* Try to be a bit smarter and adjust defaults depending
2468 * on available memory.
2469 */
2470 for (order = 0; ((1 << order) << PAGE_SHIFT) <
6e04e021 2471 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
1da177e4
LT
2472 order++)
2473 ;
e7626486 2474 if (order >= 4) {
295ff7ed 2475 tcp_death_row.sysctl_max_tw_buckets = 180000;
1da177e4
LT
2476 sysctl_tcp_max_orphans = 4096 << (order - 4);
2477 sysctl_max_syn_backlog = 1024;
2478 } else if (order < 3) {
295ff7ed 2479 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
1da177e4
LT
2480 sysctl_tcp_max_orphans >>= (3 - order);
2481 sysctl_max_syn_backlog = 128;
2482 }
1da177e4 2483
53cdcc04
JH
2484 /* Set the pressure threshold to be a fraction of global memory that
2485 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2486 * memory, with a floor of 128 pages.
2487 */
2488 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2489 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2490 limit = max(limit, 128UL);
2491 sysctl_tcp_mem[0] = limit / 4 * 3;
2492 sysctl_tcp_mem[1] = limit;
52bf376c 2493 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
1da177e4 2494
53cdcc04 2495 /* Set per-socket limits to no more than 1/128 the pressure threshold */
7b4f4b5e
JH
2496 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2497 max_share = min(4UL*1024*1024, limit);
2498
2499 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2500 sysctl_tcp_wmem[1] = 16*1024;
2501 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2502
2503 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2504 sysctl_tcp_rmem[1] = 87380;
2505 sysctl_tcp_rmem[2] = max(87380, max_share);
1da177e4
LT
2506
2507 printk(KERN_INFO "TCP: Hash tables configured "
2508 "(established %d bind %d)\n",
dbca9b27 2509 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
317a76f9
SH
2510
2511 tcp_register_congestion_control(&tcp_reno);
1da177e4
LT
2512}
2513
1da177e4 2514EXPORT_SYMBOL(tcp_close);
1da177e4
LT
2515EXPORT_SYMBOL(tcp_disconnect);
2516EXPORT_SYMBOL(tcp_getsockopt);
2517EXPORT_SYMBOL(tcp_ioctl);
1da177e4
LT
2518EXPORT_SYMBOL(tcp_poll);
2519EXPORT_SYMBOL(tcp_read_sock);
2520EXPORT_SYMBOL(tcp_recvmsg);
2521EXPORT_SYMBOL(tcp_sendmsg);
2522EXPORT_SYMBOL(tcp_sendpage);
2523EXPORT_SYMBOL(tcp_setsockopt);
2524EXPORT_SYMBOL(tcp_shutdown);
2525EXPORT_SYMBOL(tcp_statistics);