]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
8 | * Authors: Ross Biro | |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
18 | * Jorge Cwik, <jorge@laser.satlink.net> | |
19 | * | |
20 | * Fixes: | |
21 | * Alan Cox : Numerous verify_area() calls | |
22 | * Alan Cox : Set the ACK bit on a reset | |
23 | * Alan Cox : Stopped it crashing if it closed while | |
24 | * sk->inuse=1 and was trying to connect | |
25 | * (tcp_err()). | |
26 | * Alan Cox : All icmp error handling was broken | |
27 | * pointers passed where wrong and the | |
28 | * socket was looked up backwards. Nobody | |
29 | * tested any icmp error code obviously. | |
30 | * Alan Cox : tcp_err() now handled properly. It | |
31 | * wakes people on errors. poll | |
32 | * behaves and the icmp error race | |
33 | * has gone by moving it into sock.c | |
34 | * Alan Cox : tcp_send_reset() fixed to work for | |
35 | * everything not just packets for | |
36 | * unknown sockets. | |
37 | * Alan Cox : tcp option processing. | |
38 | * Alan Cox : Reset tweaked (still not 100%) [Had | |
39 | * syn rule wrong] | |
40 | * Herp Rosmanith : More reset fixes | |
41 | * Alan Cox : No longer acks invalid rst frames. | |
42 | * Acking any kind of RST is right out. | |
43 | * Alan Cox : Sets an ignore me flag on an rst | |
44 | * receive otherwise odd bits of prattle | |
45 | * escape still | |
46 | * Alan Cox : Fixed another acking RST frame bug. | |
47 | * Should stop LAN workplace lockups. | |
48 | * Alan Cox : Some tidyups using the new skb list | |
49 | * facilities | |
50 | * Alan Cox : sk->keepopen now seems to work | |
51 | * Alan Cox : Pulls options out correctly on accepts | |
52 | * Alan Cox : Fixed assorted sk->rqueue->next errors | |
53 | * Alan Cox : PSH doesn't end a TCP read. Switched a | |
54 | * bit to skb ops. | |
55 | * Alan Cox : Tidied tcp_data to avoid a potential | |
56 | * nasty. | |
57 | * Alan Cox : Added some better commenting, as the | |
58 | * tcp is hard to follow | |
59 | * Alan Cox : Removed incorrect check for 20 * psh | |
60 | * Michael O'Reilly : ack < copied bug fix. | |
61 | * Johannes Stille : Misc tcp fixes (not all in yet). | |
62 | * Alan Cox : FIN with no memory -> CRASH | |
63 | * Alan Cox : Added socket option proto entries. | |
64 | * Also added awareness of them to accept. | |
65 | * Alan Cox : Added TCP options (SOL_TCP) | |
66 | * Alan Cox : Switched wakeup calls to callbacks, | |
67 | * so the kernel can layer network | |
68 | * sockets. | |
69 | * Alan Cox : Use ip_tos/ip_ttl settings. | |
70 | * Alan Cox : Handle FIN (more) properly (we hope). | |
71 | * Alan Cox : RST frames sent on unsynchronised | |
72 | * state ack error. | |
73 | * Alan Cox : Put in missing check for SYN bit. | |
74 | * Alan Cox : Added tcp_select_window() aka NET2E | |
75 | * window non shrink trick. | |
76 | * Alan Cox : Added a couple of small NET2E timer | |
77 | * fixes | |
78 | * Charles Hedrick : TCP fixes | |
79 | * Toomas Tamm : TCP window fixes | |
80 | * Alan Cox : Small URG fix to rlogin ^C ack fight | |
81 | * Charles Hedrick : Rewrote most of it to actually work | |
82 | * Linus : Rewrote tcp_read() and URG handling | |
83 | * completely | |
84 | * Gerhard Koerting: Fixed some missing timer handling | |
85 | * Matthew Dillon : Reworked TCP machine states as per RFC | |
86 | * Gerhard Koerting: PC/TCP workarounds | |
87 | * Adam Caldwell : Assorted timer/timing errors | |
88 | * Matthew Dillon : Fixed another RST bug | |
89 | * Alan Cox : Move to kernel side addressing changes. | |
90 | * Alan Cox : Beginning work on TCP fastpathing | |
91 | * (not yet usable) | |
92 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. | |
93 | * Alan Cox : TCP fast path debugging | |
94 | * Alan Cox : Window clamping | |
95 | * Michael Riepe : Bug in tcp_check() | |
96 | * Matt Dillon : More TCP improvements and RST bug fixes | |
97 | * Matt Dillon : Yet more small nasties remove from the | |
98 | * TCP code (Be very nice to this man if | |
99 | * tcp finally works 100%) 8) | |
100 | * Alan Cox : BSD accept semantics. | |
101 | * Alan Cox : Reset on closedown bug. | |
102 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). | |
103 | * Michael Pall : Handle poll() after URG properly in | |
104 | * all cases. | |
105 | * Michael Pall : Undo the last fix in tcp_read_urg() | |
106 | * (multi URG PUSH broke rlogin). | |
107 | * Michael Pall : Fix the multi URG PUSH problem in | |
108 | * tcp_readable(), poll() after URG | |
109 | * works now. | |
110 | * Michael Pall : recv(...,MSG_OOB) never blocks in the | |
111 | * BSD api. | |
112 | * Alan Cox : Changed the semantics of sk->socket to | |
113 | * fix a race and a signal problem with | |
114 | * accept() and async I/O. | |
115 | * Alan Cox : Relaxed the rules on tcp_sendto(). | |
116 | * Yury Shevchuk : Really fixed accept() blocking problem. | |
117 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for | |
118 | * clients/servers which listen in on | |
119 | * fixed ports. | |
120 | * Alan Cox : Cleaned the above up and shrank it to | |
121 | * a sensible code size. | |
122 | * Alan Cox : Self connect lockup fix. | |
123 | * Alan Cox : No connect to multicast. | |
124 | * Ross Biro : Close unaccepted children on master | |
125 | * socket close. | |
126 | * Alan Cox : Reset tracing code. | |
127 | * Alan Cox : Spurious resets on shutdown. | |
128 | * Alan Cox : Giant 15 minute/60 second timer error | |
129 | * Alan Cox : Small whoops in polling before an | |
130 | * accept. | |
131 | * Alan Cox : Kept the state trace facility since | |
132 | * it's handy for debugging. | |
133 | * Alan Cox : More reset handler fixes. | |
134 | * Alan Cox : Started rewriting the code based on | |
135 | * the RFC's for other useful protocol | |
136 | * references see: Comer, KA9Q NOS, and | |
137 | * for a reference on the difference | |
138 | * between specifications and how BSD | |
139 | * works see the 4.4lite source. | |
140 | * A.N.Kuznetsov : Don't time wait on completion of tidy | |
141 | * close. | |
142 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. | |
143 | * Linus Torvalds : Fixed BSD port reuse to work first syn | |
144 | * Alan Cox : Reimplemented timers as per the RFC | |
145 | * and using multiple timers for sanity. | |
146 | * Alan Cox : Small bug fixes, and a lot of new | |
147 | * comments. | |
148 | * Alan Cox : Fixed dual reader crash by locking | |
149 | * the buffers (much like datagram.c) | |
150 | * Alan Cox : Fixed stuck sockets in probe. A probe | |
151 | * now gets fed up of retrying without | |
152 | * (even a no space) answer. | |
153 | * Alan Cox : Extracted closing code better | |
154 | * Alan Cox : Fixed the closing state machine to | |
155 | * resemble the RFC. | |
156 | * Alan Cox : More 'per spec' fixes. | |
157 | * Jorge Cwik : Even faster checksumming. | |
158 | * Alan Cox : tcp_data() doesn't ack illegal PSH | |
159 | * only frames. At least one pc tcp stack | |
160 | * generates them. | |
161 | * Alan Cox : Cache last socket. | |
162 | * Alan Cox : Per route irtt. | |
163 | * Matt Day : poll()->select() match BSD precisely on error | |
164 | * Alan Cox : New buffers | |
165 | * Marc Tamsky : Various sk->prot->retransmits and | |
166 | * sk->retransmits misupdating fixed. | |
167 | * Fixed tcp_write_timeout: stuck close, | |
168 | * and TCP syn retries gets used now. | |
169 | * Mark Yarvis : In tcp_read_wakeup(), don't send an | |
170 | * ack if state is TCP_CLOSED. | |
171 | * Alan Cox : Look up device on a retransmit - routes may | |
172 | * change. Doesn't yet cope with MSS shrink right | |
173 | * but it's a start! | |
174 | * Marc Tamsky : Closing in closing fixes. | |
175 | * Mike Shaver : RFC1122 verifications. | |
176 | * Alan Cox : rcv_saddr errors. | |
177 | * Alan Cox : Block double connect(). | |
178 | * Alan Cox : Small hooks for enSKIP. | |
179 | * Alexey Kuznetsov: Path MTU discovery. | |
180 | * Alan Cox : Support soft errors. | |
181 | * Alan Cox : Fix MTU discovery pathological case | |
182 | * when the remote claims no mtu! | |
183 | * Marc Tamsky : TCP_CLOSE fix. | |
184 | * Colin (G3TNE) : Send a reset on syn ack replies in | |
185 | * window but wrong (fixes NT lpd problems) | |
186 | * Pedro Roque : Better TCP window handling, delayed ack. | |
187 | * Joerg Reuter : No modification of locked buffers in | |
188 | * tcp_do_retransmit() | |
189 | * Eric Schenk : Changed receiver side silly window | |
190 | * avoidance algorithm to BSD style | |
191 | * algorithm. This doubles throughput | |
192 | * against machines running Solaris, | |
193 | * and seems to result in general | |
194 | * improvement. | |
195 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD | |
196 | * Willy Konynenberg : Transparent proxying support. | |
197 | * Mike McLagan : Routing by source | |
198 | * Keith Owens : Do proper merging with partial SKB's in | |
199 | * tcp_do_sendmsg to avoid burstiness. | |
200 | * Eric Schenk : Fix fast close down bug with | |
201 | * shutdown() followed by close(). | |
202 | * Andi Kleen : Make poll agree with SIGIO | |
203 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and | |
204 | * lingertime == 0 (RFC 793 ABORT Call) | |
205 | * Hirokazu Takahashi : Use copy_from_user() instead of | |
206 | * csum_and_copy_from_user() if possible. | |
207 | * | |
208 | * This program is free software; you can redistribute it and/or | |
209 | * modify it under the terms of the GNU General Public License | |
210 | * as published by the Free Software Foundation; either version | |
211 | * 2 of the License, or(at your option) any later version. | |
212 | * | |
213 | * Description of States: | |
214 | * | |
215 | * TCP_SYN_SENT sent a connection request, waiting for ack | |
216 | * | |
217 | * TCP_SYN_RECV received a connection request, sent ack, | |
218 | * waiting for final ack in three-way handshake. | |
219 | * | |
220 | * TCP_ESTABLISHED connection established | |
221 | * | |
222 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete | |
223 | * transmission of remaining buffered data | |
224 | * | |
225 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote | |
226 | * to shutdown | |
227 | * | |
228 | * TCP_CLOSING both sides have shutdown but we still have | |
229 | * data we have to finish sending | |
230 | * | |
231 | * TCP_TIME_WAIT timeout to catch resent junk before entering | |
232 | * closed, can only be entered from FIN_WAIT2 | |
233 | * or CLOSING. Required because the other end | |
234 | * may not have gotten our last ACK causing it | |
235 | * to retransmit the data packet (which we ignore) | |
236 | * | |
237 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for | |
238 | * us to finish writing our data and to shutdown | |
239 | * (we have to close() to move on to LAST_ACK) | |
240 | * | |
241 | * TCP_LAST_ACK out side has shutdown after remote has | |
242 | * shutdown. There may still be data in our | |
243 | * buffer that we have to finish sending | |
244 | * | |
245 | * TCP_CLOSE socket is finished | |
246 | */ | |
247 | ||
248 | #define pr_fmt(fmt) "TCP: " fmt | |
249 | ||
250 | #include <linux/kernel.h> | |
251 | #include <linux/module.h> | |
252 | #include <linux/types.h> | |
253 | #include <linux/fcntl.h> | |
254 | #include <linux/poll.h> | |
255 | #include <linux/inet_diag.h> | |
256 | #include <linux/init.h> | |
257 | #include <linux/fs.h> | |
258 | #include <linux/skbuff.h> | |
259 | #include <linux/scatterlist.h> | |
260 | #include <linux/splice.h> | |
261 | #include <linux/net.h> | |
262 | #include <linux/socket.h> | |
263 | #include <linux/random.h> | |
264 | #include <linux/bootmem.h> | |
265 | #include <linux/highmem.h> | |
266 | #include <linux/swap.h> | |
267 | #include <linux/cache.h> | |
268 | #include <linux/err.h> | |
269 | #include <linux/crypto.h> | |
270 | #include <linux/time.h> | |
271 | #include <linux/slab.h> | |
272 | ||
273 | #include <net/icmp.h> | |
274 | #include <net/inet_common.h> | |
275 | #include <net/tcp.h> | |
276 | #include <net/xfrm.h> | |
277 | #include <net/ip.h> | |
278 | #include <net/sock.h> | |
279 | ||
280 | #include <asm/uaccess.h> | |
281 | #include <asm/ioctls.h> | |
282 | #include <net/busy_poll.h> | |
283 | ||
284 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; | |
285 | ||
286 | int sysctl_tcp_min_tso_segs __read_mostly = 2; | |
287 | ||
288 | int sysctl_tcp_autocorking __read_mostly = 1; | |
289 | ||
290 | struct percpu_counter tcp_orphan_count; | |
291 | EXPORT_SYMBOL_GPL(tcp_orphan_count); | |
292 | ||
293 | long sysctl_tcp_mem[3] __read_mostly; | |
294 | int sysctl_tcp_wmem[3] __read_mostly; | |
295 | int sysctl_tcp_rmem[3] __read_mostly; | |
296 | ||
297 | EXPORT_SYMBOL(sysctl_tcp_mem); | |
298 | EXPORT_SYMBOL(sysctl_tcp_rmem); | |
299 | EXPORT_SYMBOL(sysctl_tcp_wmem); | |
300 | ||
301 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ | |
302 | EXPORT_SYMBOL(tcp_memory_allocated); | |
303 | ||
304 | /* | |
305 | * Current number of TCP sockets. | |
306 | */ | |
307 | struct percpu_counter tcp_sockets_allocated; | |
308 | EXPORT_SYMBOL(tcp_sockets_allocated); | |
309 | ||
310 | /* | |
311 | * TCP splice context | |
312 | */ | |
313 | struct tcp_splice_state { | |
314 | struct pipe_inode_info *pipe; | |
315 | size_t len; | |
316 | unsigned int flags; | |
317 | }; | |
318 | ||
319 | /* | |
320 | * Pressure flag: try to collapse. | |
321 | * Technical note: it is used by multiple contexts non atomically. | |
322 | * All the __sk_mem_schedule() is of this nature: accounting | |
323 | * is strict, actions are advisory and have some latency. | |
324 | */ | |
325 | int tcp_memory_pressure __read_mostly; | |
326 | EXPORT_SYMBOL(tcp_memory_pressure); | |
327 | ||
328 | void tcp_enter_memory_pressure(struct sock *sk) | |
329 | { | |
330 | if (!tcp_memory_pressure) { | |
331 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); | |
332 | tcp_memory_pressure = 1; | |
333 | } | |
334 | } | |
335 | EXPORT_SYMBOL(tcp_enter_memory_pressure); | |
336 | ||
337 | /* Convert seconds to retransmits based on initial and max timeout */ | |
338 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) | |
339 | { | |
340 | u8 res = 0; | |
341 | ||
342 | if (seconds > 0) { | |
343 | int period = timeout; | |
344 | ||
345 | res = 1; | |
346 | while (seconds > period && res < 255) { | |
347 | res++; | |
348 | timeout <<= 1; | |
349 | if (timeout > rto_max) | |
350 | timeout = rto_max; | |
351 | period += timeout; | |
352 | } | |
353 | } | |
354 | return res; | |
355 | } | |
356 | ||
357 | /* Convert retransmits to seconds based on initial and max timeout */ | |
358 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) | |
359 | { | |
360 | int period = 0; | |
361 | ||
362 | if (retrans > 0) { | |
363 | period = timeout; | |
364 | while (--retrans) { | |
365 | timeout <<= 1; | |
366 | if (timeout > rto_max) | |
367 | timeout = rto_max; | |
368 | period += timeout; | |
369 | } | |
370 | } | |
371 | return period; | |
372 | } | |
373 | ||
374 | /* Address-family independent initialization for a tcp_sock. | |
375 | * | |
376 | * NOTE: A lot of things set to zero explicitly by call to | |
377 | * sk_alloc() so need not be done here. | |
378 | */ | |
379 | void tcp_init_sock(struct sock *sk) | |
380 | { | |
381 | struct inet_connection_sock *icsk = inet_csk(sk); | |
382 | struct tcp_sock *tp = tcp_sk(sk); | |
383 | ||
384 | __skb_queue_head_init(&tp->out_of_order_queue); | |
385 | tcp_init_xmit_timers(sk); | |
386 | tcp_prequeue_init(tp); | |
387 | INIT_LIST_HEAD(&tp->tsq_node); | |
388 | ||
389 | icsk->icsk_rto = TCP_TIMEOUT_INIT; | |
390 | tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); | |
391 | ||
392 | /* So many TCP implementations out there (incorrectly) count the | |
393 | * initial SYN frame in their delayed-ACK and congestion control | |
394 | * algorithms that we must have the following bandaid to talk | |
395 | * efficiently to them. -DaveM | |
396 | */ | |
397 | tp->snd_cwnd = TCP_INIT_CWND; | |
398 | ||
399 | /* See draft-stevens-tcpca-spec-01 for discussion of the | |
400 | * initialization of these values. | |
401 | */ | |
402 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
403 | tp->snd_cwnd_clamp = ~0; | |
404 | tp->mss_cache = TCP_MSS_DEFAULT; | |
405 | ||
406 | tp->reordering = sysctl_tcp_reordering; | |
407 | tcp_enable_early_retrans(tp); | |
408 | tcp_assign_congestion_control(sk); | |
409 | ||
410 | tp->tsoffset = 0; | |
411 | ||
412 | sk->sk_state = TCP_CLOSE; | |
413 | ||
414 | sk->sk_write_space = sk_stream_write_space; | |
415 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | |
416 | ||
417 | icsk->icsk_sync_mss = tcp_sync_mss; | |
418 | ||
419 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | |
420 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | |
421 | ||
422 | local_bh_disable(); | |
423 | sock_update_memcg(sk); | |
424 | sk_sockets_allocated_inc(sk); | |
425 | local_bh_enable(); | |
426 | } | |
427 | EXPORT_SYMBOL(tcp_init_sock); | |
428 | ||
429 | static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) | |
430 | { | |
431 | if (sk->sk_tsflags) { | |
432 | struct skb_shared_info *shinfo = skb_shinfo(skb); | |
433 | ||
434 | sock_tx_timestamp(sk, &shinfo->tx_flags); | |
435 | if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) | |
436 | shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; | |
437 | } | |
438 | } | |
439 | ||
440 | /* | |
441 | * Wait for a TCP event. | |
442 | * | |
443 | * Note that we don't need to lock the socket, as the upper poll layers | |
444 | * take care of normal races (between the test and the event) and we don't | |
445 | * go look at any of the socket buffers directly. | |
446 | */ | |
447 | unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |
448 | { | |
449 | unsigned int mask; | |
450 | struct sock *sk = sock->sk; | |
451 | const struct tcp_sock *tp = tcp_sk(sk); | |
452 | ||
453 | sock_rps_record_flow(sk); | |
454 | ||
455 | sock_poll_wait(file, sk_sleep(sk), wait); | |
456 | if (sk->sk_state == TCP_LISTEN) | |
457 | return inet_csk_listen_poll(sk); | |
458 | ||
459 | /* Socket is not locked. We are protected from async events | |
460 | * by poll logic and correct handling of state changes | |
461 | * made by other threads is impossible in any case. | |
462 | */ | |
463 | ||
464 | mask = 0; | |
465 | ||
466 | /* | |
467 | * POLLHUP is certainly not done right. But poll() doesn't | |
468 | * have a notion of HUP in just one direction, and for a | |
469 | * socket the read side is more interesting. | |
470 | * | |
471 | * Some poll() documentation says that POLLHUP is incompatible | |
472 | * with the POLLOUT/POLLWR flags, so somebody should check this | |
473 | * all. But careful, it tends to be safer to return too many | |
474 | * bits than too few, and you can easily break real applications | |
475 | * if you don't tell them that something has hung up! | |
476 | * | |
477 | * Check-me. | |
478 | * | |
479 | * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and | |
480 | * our fs/select.c). It means that after we received EOF, | |
481 | * poll always returns immediately, making impossible poll() on write() | |
482 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP | |
483 | * if and only if shutdown has been made in both directions. | |
484 | * Actually, it is interesting to look how Solaris and DUX | |
485 | * solve this dilemma. I would prefer, if POLLHUP were maskable, | |
486 | * then we could set it on SND_SHUTDOWN. BTW examples given | |
487 | * in Stevens' books assume exactly this behaviour, it explains | |
488 | * why POLLHUP is incompatible with POLLOUT. --ANK | |
489 | * | |
490 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | |
491 | * blocking on fresh not-connected or disconnected socket. --ANK | |
492 | */ | |
493 | if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) | |
494 | mask |= POLLHUP; | |
495 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
496 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; | |
497 | ||
498 | /* Connected or passive Fast Open socket? */ | |
499 | if (sk->sk_state != TCP_SYN_SENT && | |
500 | (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { | |
501 | int target = sock_rcvlowat(sk, 0, INT_MAX); | |
502 | ||
503 | if (tp->urg_seq == tp->copied_seq && | |
504 | !sock_flag(sk, SOCK_URGINLINE) && | |
505 | tp->urg_data) | |
506 | target++; | |
507 | ||
508 | /* Potential race condition. If read of tp below will | |
509 | * escape above sk->sk_state, we can be illegally awaken | |
510 | * in SYN_* states. */ | |
511 | if (tp->rcv_nxt - tp->copied_seq >= target) | |
512 | mask |= POLLIN | POLLRDNORM; | |
513 | ||
514 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | |
515 | if (sk_stream_is_writeable(sk)) { | |
516 | mask |= POLLOUT | POLLWRNORM; | |
517 | } else { /* send SIGIO later */ | |
518 | set_bit(SOCK_ASYNC_NOSPACE, | |
519 | &sk->sk_socket->flags); | |
520 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
521 | ||
522 | /* Race breaker. If space is freed after | |
523 | * wspace test but before the flags are set, | |
524 | * IO signal will be lost. Memory barrier | |
525 | * pairs with the input side. | |
526 | */ | |
527 | smp_mb__after_atomic(); | |
528 | if (sk_stream_is_writeable(sk)) | |
529 | mask |= POLLOUT | POLLWRNORM; | |
530 | } | |
531 | } else | |
532 | mask |= POLLOUT | POLLWRNORM; | |
533 | ||
534 | if (tp->urg_data & TCP_URG_VALID) | |
535 | mask |= POLLPRI; | |
536 | } | |
537 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ | |
538 | smp_rmb(); | |
539 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | |
540 | mask |= POLLERR; | |
541 | ||
542 | return mask; | |
543 | } | |
544 | EXPORT_SYMBOL(tcp_poll); | |
545 | ||
546 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |
547 | { | |
548 | struct tcp_sock *tp = tcp_sk(sk); | |
549 | int answ; | |
550 | bool slow; | |
551 | ||
552 | switch (cmd) { | |
553 | case SIOCINQ: | |
554 | if (sk->sk_state == TCP_LISTEN) | |
555 | return -EINVAL; | |
556 | ||
557 | slow = lock_sock_fast(sk); | |
558 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
559 | answ = 0; | |
560 | else if (sock_flag(sk, SOCK_URGINLINE) || | |
561 | !tp->urg_data || | |
562 | before(tp->urg_seq, tp->copied_seq) || | |
563 | !before(tp->urg_seq, tp->rcv_nxt)) { | |
564 | ||
565 | answ = tp->rcv_nxt - tp->copied_seq; | |
566 | ||
567 | /* Subtract 1, if FIN was received */ | |
568 | if (answ && sock_flag(sk, SOCK_DONE)) | |
569 | answ--; | |
570 | } else | |
571 | answ = tp->urg_seq - tp->copied_seq; | |
572 | unlock_sock_fast(sk, slow); | |
573 | break; | |
574 | case SIOCATMARK: | |
575 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; | |
576 | break; | |
577 | case SIOCOUTQ: | |
578 | if (sk->sk_state == TCP_LISTEN) | |
579 | return -EINVAL; | |
580 | ||
581 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
582 | answ = 0; | |
583 | else | |
584 | answ = tp->write_seq - tp->snd_una; | |
585 | break; | |
586 | case SIOCOUTQNSD: | |
587 | if (sk->sk_state == TCP_LISTEN) | |
588 | return -EINVAL; | |
589 | ||
590 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
591 | answ = 0; | |
592 | else | |
593 | answ = tp->write_seq - tp->snd_nxt; | |
594 | break; | |
595 | default: | |
596 | return -ENOIOCTLCMD; | |
597 | } | |
598 | ||
599 | return put_user(answ, (int __user *)arg); | |
600 | } | |
601 | EXPORT_SYMBOL(tcp_ioctl); | |
602 | ||
603 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) | |
604 | { | |
605 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; | |
606 | tp->pushed_seq = tp->write_seq; | |
607 | } | |
608 | ||
609 | static inline bool forced_push(const struct tcp_sock *tp) | |
610 | { | |
611 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); | |
612 | } | |
613 | ||
614 | static void skb_entail(struct sock *sk, struct sk_buff *skb) | |
615 | { | |
616 | struct tcp_sock *tp = tcp_sk(sk); | |
617 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); | |
618 | ||
619 | skb->csum = 0; | |
620 | tcb->seq = tcb->end_seq = tp->write_seq; | |
621 | tcb->tcp_flags = TCPHDR_ACK; | |
622 | tcb->sacked = 0; | |
623 | __skb_header_release(skb); | |
624 | tcp_add_write_queue_tail(sk, skb); | |
625 | sk->sk_wmem_queued += skb->truesize; | |
626 | sk_mem_charge(sk, skb->truesize); | |
627 | if (tp->nonagle & TCP_NAGLE_PUSH) | |
628 | tp->nonagle &= ~TCP_NAGLE_PUSH; | |
629 | } | |
630 | ||
631 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) | |
632 | { | |
633 | if (flags & MSG_OOB) | |
634 | tp->snd_up = tp->write_seq; | |
635 | } | |
636 | ||
637 | /* If a not yet filled skb is pushed, do not send it if | |
638 | * we have data packets in Qdisc or NIC queues : | |
639 | * Because TX completion will happen shortly, it gives a chance | |
640 | * to coalesce future sendmsg() payload into this skb, without | |
641 | * need for a timer, and with no latency trade off. | |
642 | * As packets containing data payload have a bigger truesize | |
643 | * than pure acks (dataless) packets, the last checks prevent | |
644 | * autocorking if we only have an ACK in Qdisc/NIC queues, | |
645 | * or if TX completion was delayed after we processed ACK packet. | |
646 | */ | |
647 | static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, | |
648 | int size_goal) | |
649 | { | |
650 | return skb->len < size_goal && | |
651 | sysctl_tcp_autocorking && | |
652 | skb != tcp_write_queue_head(sk) && | |
653 | atomic_read(&sk->sk_wmem_alloc) > skb->truesize; | |
654 | } | |
655 | ||
656 | static void tcp_push(struct sock *sk, int flags, int mss_now, | |
657 | int nonagle, int size_goal) | |
658 | { | |
659 | struct tcp_sock *tp = tcp_sk(sk); | |
660 | struct sk_buff *skb; | |
661 | ||
662 | if (!tcp_send_head(sk)) | |
663 | return; | |
664 | ||
665 | skb = tcp_write_queue_tail(sk); | |
666 | if (!(flags & MSG_MORE) || forced_push(tp)) | |
667 | tcp_mark_push(tp, skb); | |
668 | ||
669 | tcp_mark_urg(tp, flags); | |
670 | ||
671 | if (tcp_should_autocork(sk, skb, size_goal)) { | |
672 | ||
673 | /* avoid atomic op if TSQ_THROTTLED bit is already set */ | |
674 | if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) { | |
675 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); | |
676 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | |
677 | } | |
678 | /* It is possible TX completion already happened | |
679 | * before we set TSQ_THROTTLED. | |
680 | */ | |
681 | if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) | |
682 | return; | |
683 | } | |
684 | ||
685 | if (flags & MSG_MORE) | |
686 | nonagle = TCP_NAGLE_CORK; | |
687 | ||
688 | __tcp_push_pending_frames(sk, mss_now, nonagle); | |
689 | } | |
690 | ||
691 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, | |
692 | unsigned int offset, size_t len) | |
693 | { | |
694 | struct tcp_splice_state *tss = rd_desc->arg.data; | |
695 | int ret; | |
696 | ||
697 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), | |
698 | tss->flags); | |
699 | if (ret > 0) | |
700 | rd_desc->count -= ret; | |
701 | return ret; | |
702 | } | |
703 | ||
704 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) | |
705 | { | |
706 | /* Store TCP splice context information in read_descriptor_t. */ | |
707 | read_descriptor_t rd_desc = { | |
708 | .arg.data = tss, | |
709 | .count = tss->len, | |
710 | }; | |
711 | ||
712 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); | |
713 | } | |
714 | ||
715 | /** | |
716 | * tcp_splice_read - splice data from TCP socket to a pipe | |
717 | * @sock: socket to splice from | |
718 | * @ppos: position (not valid) | |
719 | * @pipe: pipe to splice to | |
720 | * @len: number of bytes to splice | |
721 | * @flags: splice modifier flags | |
722 | * | |
723 | * Description: | |
724 | * Will read pages from given socket and fill them into a pipe. | |
725 | * | |
726 | **/ | |
727 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |
728 | struct pipe_inode_info *pipe, size_t len, | |
729 | unsigned int flags) | |
730 | { | |
731 | struct sock *sk = sock->sk; | |
732 | struct tcp_splice_state tss = { | |
733 | .pipe = pipe, | |
734 | .len = len, | |
735 | .flags = flags, | |
736 | }; | |
737 | long timeo; | |
738 | ssize_t spliced; | |
739 | int ret; | |
740 | ||
741 | sock_rps_record_flow(sk); | |
742 | /* | |
743 | * We can't seek on a socket input | |
744 | */ | |
745 | if (unlikely(*ppos)) | |
746 | return -ESPIPE; | |
747 | ||
748 | ret = spliced = 0; | |
749 | ||
750 | lock_sock(sk); | |
751 | ||
752 | timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); | |
753 | while (tss.len) { | |
754 | ret = __tcp_splice_read(sk, &tss); | |
755 | if (ret < 0) | |
756 | break; | |
757 | else if (!ret) { | |
758 | if (spliced) | |
759 | break; | |
760 | if (sock_flag(sk, SOCK_DONE)) | |
761 | break; | |
762 | if (sk->sk_err) { | |
763 | ret = sock_error(sk); | |
764 | break; | |
765 | } | |
766 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
767 | break; | |
768 | if (sk->sk_state == TCP_CLOSE) { | |
769 | /* | |
770 | * This occurs when user tries to read | |
771 | * from never connected socket. | |
772 | */ | |
773 | if (!sock_flag(sk, SOCK_DONE)) | |
774 | ret = -ENOTCONN; | |
775 | break; | |
776 | } | |
777 | if (!timeo) { | |
778 | ret = -EAGAIN; | |
779 | break; | |
780 | } | |
781 | sk_wait_data(sk, &timeo); | |
782 | if (signal_pending(current)) { | |
783 | ret = sock_intr_errno(timeo); | |
784 | break; | |
785 | } | |
786 | continue; | |
787 | } | |
788 | tss.len -= ret; | |
789 | spliced += ret; | |
790 | ||
791 | if (!timeo) | |
792 | break; | |
793 | release_sock(sk); | |
794 | lock_sock(sk); | |
795 | ||
796 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || | |
797 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
798 | signal_pending(current)) | |
799 | break; | |
800 | } | |
801 | ||
802 | release_sock(sk); | |
803 | ||
804 | if (spliced) | |
805 | return spliced; | |
806 | ||
807 | return ret; | |
808 | } | |
809 | EXPORT_SYMBOL(tcp_splice_read); | |
810 | ||
811 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, | |
812 | bool force_schedule) | |
813 | { | |
814 | struct sk_buff *skb; | |
815 | ||
816 | /* The TCP header must be at least 32-bit aligned. */ | |
817 | size = ALIGN(size, 4); | |
818 | ||
819 | if (unlikely(tcp_under_memory_pressure(sk))) | |
820 | sk_mem_reclaim_partial(sk); | |
821 | ||
822 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | |
823 | if (likely(skb)) { | |
824 | bool mem_scheduled; | |
825 | ||
826 | if (force_schedule) { | |
827 | mem_scheduled = true; | |
828 | sk_forced_mem_schedule(sk, skb->truesize); | |
829 | } else { | |
830 | mem_scheduled = sk_wmem_schedule(sk, skb->truesize); | |
831 | } | |
832 | if (likely(mem_scheduled)) { | |
833 | skb_reserve(skb, sk->sk_prot->max_header); | |
834 | /* | |
835 | * Make sure that we have exactly size bytes | |
836 | * available to the caller, no more, no less. | |
837 | */ | |
838 | skb->reserved_tailroom = skb->end - skb->tail - size; | |
839 | return skb; | |
840 | } | |
841 | __kfree_skb(skb); | |
842 | } else { | |
843 | sk->sk_prot->enter_memory_pressure(sk); | |
844 | sk_stream_moderate_sndbuf(sk); | |
845 | } | |
846 | return NULL; | |
847 | } | |
848 | ||
849 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |
850 | int large_allowed) | |
851 | { | |
852 | struct tcp_sock *tp = tcp_sk(sk); | |
853 | u32 new_size_goal, size_goal; | |
854 | ||
855 | if (!large_allowed || !sk_can_gso(sk)) | |
856 | return mss_now; | |
857 | ||
858 | /* Note : tcp_tso_autosize() will eventually split this later */ | |
859 | new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; | |
860 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); | |
861 | ||
862 | /* We try hard to avoid divides here */ | |
863 | size_goal = tp->gso_segs * mss_now; | |
864 | if (unlikely(new_size_goal < size_goal || | |
865 | new_size_goal >= size_goal + mss_now)) { | |
866 | tp->gso_segs = min_t(u16, new_size_goal / mss_now, | |
867 | sk->sk_gso_max_segs); | |
868 | size_goal = tp->gso_segs * mss_now; | |
869 | } | |
870 | ||
871 | return max(size_goal, mss_now); | |
872 | } | |
873 | ||
874 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) | |
875 | { | |
876 | int mss_now; | |
877 | ||
878 | mss_now = tcp_current_mss(sk); | |
879 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); | |
880 | ||
881 | return mss_now; | |
882 | } | |
883 | ||
884 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, | |
885 | size_t size, int flags) | |
886 | { | |
887 | struct tcp_sock *tp = tcp_sk(sk); | |
888 | int mss_now, size_goal; | |
889 | int err; | |
890 | ssize_t copied; | |
891 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
892 | ||
893 | /* Wait for a connection to finish. One exception is TCP Fast Open | |
894 | * (passive side) where data is allowed to be sent before a connection | |
895 | * is fully established. | |
896 | */ | |
897 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && | |
898 | !tcp_passive_fastopen(sk)) { | |
899 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | |
900 | goto out_err; | |
901 | } | |
902 | ||
903 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
904 | ||
905 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
906 | copied = 0; | |
907 | ||
908 | err = -EPIPE; | |
909 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
910 | goto out_err; | |
911 | ||
912 | while (size > 0) { | |
913 | struct sk_buff *skb = tcp_write_queue_tail(sk); | |
914 | int copy, i; | |
915 | bool can_coalesce; | |
916 | ||
917 | if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { | |
918 | new_segment: | |
919 | if (!sk_stream_memory_free(sk)) | |
920 | goto wait_for_sndbuf; | |
921 | ||
922 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, | |
923 | skb_queue_empty(&sk->sk_write_queue)); | |
924 | if (!skb) | |
925 | goto wait_for_memory; | |
926 | ||
927 | skb_entail(sk, skb); | |
928 | copy = size_goal; | |
929 | } | |
930 | ||
931 | if (copy > size) | |
932 | copy = size; | |
933 | ||
934 | i = skb_shinfo(skb)->nr_frags; | |
935 | can_coalesce = skb_can_coalesce(skb, i, page, offset); | |
936 | if (!can_coalesce && i >= MAX_SKB_FRAGS) { | |
937 | tcp_mark_push(tp, skb); | |
938 | goto new_segment; | |
939 | } | |
940 | if (!sk_wmem_schedule(sk, copy)) | |
941 | goto wait_for_memory; | |
942 | ||
943 | if (can_coalesce) { | |
944 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); | |
945 | } else { | |
946 | get_page(page); | |
947 | skb_fill_page_desc(skb, i, page, offset, copy); | |
948 | } | |
949 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | |
950 | ||
951 | skb->len += copy; | |
952 | skb->data_len += copy; | |
953 | skb->truesize += copy; | |
954 | sk->sk_wmem_queued += copy; | |
955 | sk_mem_charge(sk, copy); | |
956 | skb->ip_summed = CHECKSUM_PARTIAL; | |
957 | tp->write_seq += copy; | |
958 | TCP_SKB_CB(skb)->end_seq += copy; | |
959 | tcp_skb_pcount_set(skb, 0); | |
960 | ||
961 | if (!copied) | |
962 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; | |
963 | ||
964 | copied += copy; | |
965 | offset += copy; | |
966 | if (!(size -= copy)) { | |
967 | tcp_tx_timestamp(sk, skb); | |
968 | goto out; | |
969 | } | |
970 | ||
971 | if (skb->len < size_goal || (flags & MSG_OOB)) | |
972 | continue; | |
973 | ||
974 | if (forced_push(tp)) { | |
975 | tcp_mark_push(tp, skb); | |
976 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); | |
977 | } else if (skb == tcp_send_head(sk)) | |
978 | tcp_push_one(sk, mss_now); | |
979 | continue; | |
980 | ||
981 | wait_for_sndbuf: | |
982 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
983 | wait_for_memory: | |
984 | tcp_push(sk, flags & ~MSG_MORE, mss_now, | |
985 | TCP_NAGLE_PUSH, size_goal); | |
986 | ||
987 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
988 | goto do_error; | |
989 | ||
990 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
991 | } | |
992 | ||
993 | out: | |
994 | if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) | |
995 | tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); | |
996 | return copied; | |
997 | ||
998 | do_error: | |
999 | if (copied) | |
1000 | goto out; | |
1001 | out_err: | |
1002 | /* make sure we wake any epoll edge trigger waiter */ | |
1003 | if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) | |
1004 | sk->sk_write_space(sk); | |
1005 | return sk_stream_error(sk, flags, err); | |
1006 | } | |
1007 | ||
1008 | int tcp_sendpage(struct sock *sk, struct page *page, int offset, | |
1009 | size_t size, int flags) | |
1010 | { | |
1011 | ssize_t res; | |
1012 | ||
1013 | if (!(sk->sk_route_caps & NETIF_F_SG) || | |
1014 | !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) | |
1015 | return sock_no_sendpage(sk->sk_socket, page, offset, size, | |
1016 | flags); | |
1017 | ||
1018 | lock_sock(sk); | |
1019 | res = do_tcp_sendpages(sk, page, offset, size, flags); | |
1020 | release_sock(sk); | |
1021 | return res; | |
1022 | } | |
1023 | EXPORT_SYMBOL(tcp_sendpage); | |
1024 | ||
1025 | static inline int select_size(const struct sock *sk, bool sg) | |
1026 | { | |
1027 | const struct tcp_sock *tp = tcp_sk(sk); | |
1028 | int tmp = tp->mss_cache; | |
1029 | ||
1030 | if (sg) { | |
1031 | if (sk_can_gso(sk)) { | |
1032 | /* Small frames wont use a full page: | |
1033 | * Payload will immediately follow tcp header. | |
1034 | */ | |
1035 | tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); | |
1036 | } else { | |
1037 | int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); | |
1038 | ||
1039 | if (tmp >= pgbreak && | |
1040 | tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) | |
1041 | tmp = pgbreak; | |
1042 | } | |
1043 | } | |
1044 | ||
1045 | return tmp; | |
1046 | } | |
1047 | ||
1048 | void tcp_free_fastopen_req(struct tcp_sock *tp) | |
1049 | { | |
1050 | if (tp->fastopen_req) { | |
1051 | kfree(tp->fastopen_req); | |
1052 | tp->fastopen_req = NULL; | |
1053 | } | |
1054 | } | |
1055 | ||
1056 | static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, | |
1057 | int *copied, size_t size) | |
1058 | { | |
1059 | struct tcp_sock *tp = tcp_sk(sk); | |
1060 | int err, flags; | |
1061 | ||
1062 | if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) | |
1063 | return -EOPNOTSUPP; | |
1064 | if (tp->fastopen_req) | |
1065 | return -EALREADY; /* Another Fast Open is in progress */ | |
1066 | ||
1067 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), | |
1068 | sk->sk_allocation); | |
1069 | if (unlikely(!tp->fastopen_req)) | |
1070 | return -ENOBUFS; | |
1071 | tp->fastopen_req->data = msg; | |
1072 | tp->fastopen_req->size = size; | |
1073 | ||
1074 | flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; | |
1075 | err = __inet_stream_connect(sk->sk_socket, msg->msg_name, | |
1076 | msg->msg_namelen, flags); | |
1077 | *copied = tp->fastopen_req->copied; | |
1078 | tcp_free_fastopen_req(tp); | |
1079 | return err; | |
1080 | } | |
1081 | ||
1082 | int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |
1083 | { | |
1084 | struct tcp_sock *tp = tcp_sk(sk); | |
1085 | struct sk_buff *skb; | |
1086 | int flags, err, copied = 0; | |
1087 | int mss_now = 0, size_goal, copied_syn = 0; | |
1088 | bool sg; | |
1089 | long timeo; | |
1090 | ||
1091 | lock_sock(sk); | |
1092 | ||
1093 | flags = msg->msg_flags; | |
1094 | if (flags & MSG_FASTOPEN) { | |
1095 | err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); | |
1096 | if (err == -EINPROGRESS && copied_syn > 0) | |
1097 | goto out; | |
1098 | else if (err) | |
1099 | goto out_err; | |
1100 | } | |
1101 | ||
1102 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
1103 | ||
1104 | /* Wait for a connection to finish. One exception is TCP Fast Open | |
1105 | * (passive side) where data is allowed to be sent before a connection | |
1106 | * is fully established. | |
1107 | */ | |
1108 | if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && | |
1109 | !tcp_passive_fastopen(sk)) { | |
1110 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | |
1111 | goto do_error; | |
1112 | } | |
1113 | ||
1114 | if (unlikely(tp->repair)) { | |
1115 | if (tp->repair_queue == TCP_RECV_QUEUE) { | |
1116 | copied = tcp_send_rcvq(sk, msg, size); | |
1117 | goto out_nopush; | |
1118 | } | |
1119 | ||
1120 | err = -EINVAL; | |
1121 | if (tp->repair_queue == TCP_NO_QUEUE) | |
1122 | goto out_err; | |
1123 | ||
1124 | /* 'common' sending to sendq */ | |
1125 | } | |
1126 | ||
1127 | /* This should be in poll */ | |
1128 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
1129 | ||
1130 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
1131 | ||
1132 | /* Ok commence sending. */ | |
1133 | copied = 0; | |
1134 | ||
1135 | err = -EPIPE; | |
1136 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
1137 | goto out_err; | |
1138 | ||
1139 | sg = !!(sk->sk_route_caps & NETIF_F_SG); | |
1140 | ||
1141 | while (msg_data_left(msg)) { | |
1142 | int copy = 0; | |
1143 | int max = size_goal; | |
1144 | ||
1145 | skb = tcp_write_queue_tail(sk); | |
1146 | if (tcp_send_head(sk)) { | |
1147 | if (skb->ip_summed == CHECKSUM_NONE) | |
1148 | max = mss_now; | |
1149 | copy = max - skb->len; | |
1150 | } | |
1151 | ||
1152 | if (copy <= 0) { | |
1153 | new_segment: | |
1154 | /* Allocate new segment. If the interface is SG, | |
1155 | * allocate skb fitting to single page. | |
1156 | */ | |
1157 | if (!sk_stream_memory_free(sk)) | |
1158 | goto wait_for_sndbuf; | |
1159 | ||
1160 | skb = sk_stream_alloc_skb(sk, | |
1161 | select_size(sk, sg), | |
1162 | sk->sk_allocation, | |
1163 | skb_queue_empty(&sk->sk_write_queue)); | |
1164 | if (!skb) | |
1165 | goto wait_for_memory; | |
1166 | ||
1167 | /* | |
1168 | * Check whether we can use HW checksum. | |
1169 | */ | |
1170 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) | |
1171 | skb->ip_summed = CHECKSUM_PARTIAL; | |
1172 | ||
1173 | skb_entail(sk, skb); | |
1174 | copy = size_goal; | |
1175 | max = size_goal; | |
1176 | ||
1177 | /* All packets are restored as if they have | |
1178 | * already been sent. skb_mstamp isn't set to | |
1179 | * avoid wrong rtt estimation. | |
1180 | */ | |
1181 | if (tp->repair) | |
1182 | TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; | |
1183 | } | |
1184 | ||
1185 | /* Try to append data to the end of skb. */ | |
1186 | if (copy > msg_data_left(msg)) | |
1187 | copy = msg_data_left(msg); | |
1188 | ||
1189 | /* Where to copy to? */ | |
1190 | if (skb_availroom(skb) > 0) { | |
1191 | /* We have some space in skb head. Superb! */ | |
1192 | copy = min_t(int, copy, skb_availroom(skb)); | |
1193 | err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); | |
1194 | if (err) | |
1195 | goto do_fault; | |
1196 | } else { | |
1197 | bool merge = true; | |
1198 | int i = skb_shinfo(skb)->nr_frags; | |
1199 | struct page_frag *pfrag = sk_page_frag(sk); | |
1200 | ||
1201 | if (!sk_page_frag_refill(sk, pfrag)) | |
1202 | goto wait_for_memory; | |
1203 | ||
1204 | if (!skb_can_coalesce(skb, i, pfrag->page, | |
1205 | pfrag->offset)) { | |
1206 | if (i == MAX_SKB_FRAGS || !sg) { | |
1207 | tcp_mark_push(tp, skb); | |
1208 | goto new_segment; | |
1209 | } | |
1210 | merge = false; | |
1211 | } | |
1212 | ||
1213 | copy = min_t(int, copy, pfrag->size - pfrag->offset); | |
1214 | ||
1215 | if (!sk_wmem_schedule(sk, copy)) | |
1216 | goto wait_for_memory; | |
1217 | ||
1218 | err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, | |
1219 | pfrag->page, | |
1220 | pfrag->offset, | |
1221 | copy); | |
1222 | if (err) | |
1223 | goto do_error; | |
1224 | ||
1225 | /* Update the skb. */ | |
1226 | if (merge) { | |
1227 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); | |
1228 | } else { | |
1229 | skb_fill_page_desc(skb, i, pfrag->page, | |
1230 | pfrag->offset, copy); | |
1231 | get_page(pfrag->page); | |
1232 | } | |
1233 | pfrag->offset += copy; | |
1234 | } | |
1235 | ||
1236 | if (!copied) | |
1237 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; | |
1238 | ||
1239 | tp->write_seq += copy; | |
1240 | TCP_SKB_CB(skb)->end_seq += copy; | |
1241 | tcp_skb_pcount_set(skb, 0); | |
1242 | ||
1243 | copied += copy; | |
1244 | if (!msg_data_left(msg)) { | |
1245 | tcp_tx_timestamp(sk, skb); | |
1246 | goto out; | |
1247 | } | |
1248 | ||
1249 | if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) | |
1250 | continue; | |
1251 | ||
1252 | if (forced_push(tp)) { | |
1253 | tcp_mark_push(tp, skb); | |
1254 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); | |
1255 | } else if (skb == tcp_send_head(sk)) | |
1256 | tcp_push_one(sk, mss_now); | |
1257 | continue; | |
1258 | ||
1259 | wait_for_sndbuf: | |
1260 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
1261 | wait_for_memory: | |
1262 | if (copied) | |
1263 | tcp_push(sk, flags & ~MSG_MORE, mss_now, | |
1264 | TCP_NAGLE_PUSH, size_goal); | |
1265 | ||
1266 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
1267 | goto do_error; | |
1268 | ||
1269 | mss_now = tcp_send_mss(sk, &size_goal, flags); | |
1270 | } | |
1271 | ||
1272 | out: | |
1273 | if (copied) | |
1274 | tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); | |
1275 | out_nopush: | |
1276 | release_sock(sk); | |
1277 | return copied + copied_syn; | |
1278 | ||
1279 | do_fault: | |
1280 | if (!skb->len) { | |
1281 | tcp_unlink_write_queue(skb, sk); | |
1282 | /* It is the one place in all of TCP, except connection | |
1283 | * reset, where we can be unlinking the send_head. | |
1284 | */ | |
1285 | tcp_check_send_head(sk, skb); | |
1286 | sk_wmem_free_skb(sk, skb); | |
1287 | } | |
1288 | ||
1289 | do_error: | |
1290 | if (copied + copied_syn) | |
1291 | goto out; | |
1292 | out_err: | |
1293 | err = sk_stream_error(sk, flags, err); | |
1294 | /* make sure we wake any epoll edge trigger waiter */ | |
1295 | if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) | |
1296 | sk->sk_write_space(sk); | |
1297 | release_sock(sk); | |
1298 | return err; | |
1299 | } | |
1300 | EXPORT_SYMBOL(tcp_sendmsg); | |
1301 | ||
1302 | /* | |
1303 | * Handle reading urgent data. BSD has very simple semantics for | |
1304 | * this, no blocking and very strange errors 8) | |
1305 | */ | |
1306 | ||
1307 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) | |
1308 | { | |
1309 | struct tcp_sock *tp = tcp_sk(sk); | |
1310 | ||
1311 | /* No URG data to read. */ | |
1312 | if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || | |
1313 | tp->urg_data == TCP_URG_READ) | |
1314 | return -EINVAL; /* Yes this is right ! */ | |
1315 | ||
1316 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) | |
1317 | return -ENOTCONN; | |
1318 | ||
1319 | if (tp->urg_data & TCP_URG_VALID) { | |
1320 | int err = 0; | |
1321 | char c = tp->urg_data; | |
1322 | ||
1323 | if (!(flags & MSG_PEEK)) | |
1324 | tp->urg_data = TCP_URG_READ; | |
1325 | ||
1326 | /* Read urgent data. */ | |
1327 | msg->msg_flags |= MSG_OOB; | |
1328 | ||
1329 | if (len > 0) { | |
1330 | if (!(flags & MSG_TRUNC)) | |
1331 | err = memcpy_to_msg(msg, &c, 1); | |
1332 | len = 1; | |
1333 | } else | |
1334 | msg->msg_flags |= MSG_TRUNC; | |
1335 | ||
1336 | return err ? -EFAULT : len; | |
1337 | } | |
1338 | ||
1339 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) | |
1340 | return 0; | |
1341 | ||
1342 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and | |
1343 | * the available implementations agree in this case: | |
1344 | * this call should never block, independent of the | |
1345 | * blocking state of the socket. | |
1346 | * Mike <pall@rz.uni-karlsruhe.de> | |
1347 | */ | |
1348 | return -EAGAIN; | |
1349 | } | |
1350 | ||
1351 | static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) | |
1352 | { | |
1353 | struct sk_buff *skb; | |
1354 | int copied = 0, err = 0; | |
1355 | ||
1356 | /* XXX -- need to support SO_PEEK_OFF */ | |
1357 | ||
1358 | skb_queue_walk(&sk->sk_write_queue, skb) { | |
1359 | err = skb_copy_datagram_msg(skb, 0, msg, skb->len); | |
1360 | if (err) | |
1361 | break; | |
1362 | ||
1363 | copied += skb->len; | |
1364 | } | |
1365 | ||
1366 | return err ?: copied; | |
1367 | } | |
1368 | ||
1369 | /* Clean up the receive buffer for full frames taken by the user, | |
1370 | * then send an ACK if necessary. COPIED is the number of bytes | |
1371 | * tcp_recvmsg has given to the user so far, it speeds up the | |
1372 | * calculation of whether or not we must ACK for the sake of | |
1373 | * a window update. | |
1374 | */ | |
1375 | static void tcp_cleanup_rbuf(struct sock *sk, int copied) | |
1376 | { | |
1377 | struct tcp_sock *tp = tcp_sk(sk); | |
1378 | bool time_to_ack = false; | |
1379 | ||
1380 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); | |
1381 | ||
1382 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), | |
1383 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", | |
1384 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); | |
1385 | ||
1386 | if (inet_csk_ack_scheduled(sk)) { | |
1387 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
1388 | /* Delayed ACKs frequently hit locked sockets during bulk | |
1389 | * receive. */ | |
1390 | if (icsk->icsk_ack.blocked || | |
1391 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ | |
1392 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || | |
1393 | /* | |
1394 | * If this read emptied read buffer, we send ACK, if | |
1395 | * connection is not bidirectional, user drained | |
1396 | * receive buffer and there was a small segment | |
1397 | * in queue. | |
1398 | */ | |
1399 | (copied > 0 && | |
1400 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || | |
1401 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && | |
1402 | !icsk->icsk_ack.pingpong)) && | |
1403 | !atomic_read(&sk->sk_rmem_alloc))) | |
1404 | time_to_ack = true; | |
1405 | } | |
1406 | ||
1407 | /* We send an ACK if we can now advertise a non-zero window | |
1408 | * which has been raised "significantly". | |
1409 | * | |
1410 | * Even if window raised up to infinity, do not send window open ACK | |
1411 | * in states, where we will not receive more. It is useless. | |
1412 | */ | |
1413 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1414 | __u32 rcv_window_now = tcp_receive_window(tp); | |
1415 | ||
1416 | /* Optimize, __tcp_select_window() is not cheap. */ | |
1417 | if (2*rcv_window_now <= tp->window_clamp) { | |
1418 | __u32 new_window = __tcp_select_window(sk); | |
1419 | ||
1420 | /* Send ACK now, if this read freed lots of space | |
1421 | * in our buffer. Certainly, new_window is new window. | |
1422 | * We can advertise it now, if it is not less than current one. | |
1423 | * "Lots" means "at least twice" here. | |
1424 | */ | |
1425 | if (new_window && new_window >= 2 * rcv_window_now) | |
1426 | time_to_ack = true; | |
1427 | } | |
1428 | } | |
1429 | if (time_to_ack) | |
1430 | tcp_send_ack(sk); | |
1431 | } | |
1432 | ||
1433 | static void tcp_prequeue_process(struct sock *sk) | |
1434 | { | |
1435 | struct sk_buff *skb; | |
1436 | struct tcp_sock *tp = tcp_sk(sk); | |
1437 | ||
1438 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); | |
1439 | ||
1440 | /* RX process wants to run with disabled BHs, though it is not | |
1441 | * necessary */ | |
1442 | local_bh_disable(); | |
1443 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | |
1444 | sk_backlog_rcv(sk, skb); | |
1445 | local_bh_enable(); | |
1446 | ||
1447 | /* Clear memory counter. */ | |
1448 | tp->ucopy.memory = 0; | |
1449 | } | |
1450 | ||
1451 | static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) | |
1452 | { | |
1453 | struct sk_buff *skb; | |
1454 | u32 offset; | |
1455 | ||
1456 | while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { | |
1457 | offset = seq - TCP_SKB_CB(skb)->seq; | |
1458 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) | |
1459 | offset--; | |
1460 | if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { | |
1461 | *off = offset; | |
1462 | return skb; | |
1463 | } | |
1464 | /* This looks weird, but this can happen if TCP collapsing | |
1465 | * splitted a fat GRO packet, while we released socket lock | |
1466 | * in skb_splice_bits() | |
1467 | */ | |
1468 | sk_eat_skb(sk, skb); | |
1469 | } | |
1470 | return NULL; | |
1471 | } | |
1472 | ||
1473 | /* | |
1474 | * This routine provides an alternative to tcp_recvmsg() for routines | |
1475 | * that would like to handle copying from skbuffs directly in 'sendfile' | |
1476 | * fashion. | |
1477 | * Note: | |
1478 | * - It is assumed that the socket was locked by the caller. | |
1479 | * - The routine does not block. | |
1480 | * - At present, there is no support for reading OOB data | |
1481 | * or for 'peeking' the socket using this routine | |
1482 | * (although both would be easy to implement). | |
1483 | */ | |
1484 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
1485 | sk_read_actor_t recv_actor) | |
1486 | { | |
1487 | struct sk_buff *skb; | |
1488 | struct tcp_sock *tp = tcp_sk(sk); | |
1489 | u32 seq = tp->copied_seq; | |
1490 | u32 offset; | |
1491 | int copied = 0; | |
1492 | ||
1493 | if (sk->sk_state == TCP_LISTEN) | |
1494 | return -ENOTCONN; | |
1495 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { | |
1496 | if (offset < skb->len) { | |
1497 | int used; | |
1498 | size_t len; | |
1499 | ||
1500 | len = skb->len - offset; | |
1501 | /* Stop reading if we hit a patch of urgent data */ | |
1502 | if (tp->urg_data) { | |
1503 | u32 urg_offset = tp->urg_seq - seq; | |
1504 | if (urg_offset < len) | |
1505 | len = urg_offset; | |
1506 | if (!len) | |
1507 | break; | |
1508 | } | |
1509 | used = recv_actor(desc, skb, offset, len); | |
1510 | if (used <= 0) { | |
1511 | if (!copied) | |
1512 | copied = used; | |
1513 | break; | |
1514 | } else if (used <= len) { | |
1515 | seq += used; | |
1516 | copied += used; | |
1517 | offset += used; | |
1518 | } | |
1519 | /* If recv_actor drops the lock (e.g. TCP splice | |
1520 | * receive) the skb pointer might be invalid when | |
1521 | * getting here: tcp_collapse might have deleted it | |
1522 | * while aggregating skbs from the socket queue. | |
1523 | */ | |
1524 | skb = tcp_recv_skb(sk, seq - 1, &offset); | |
1525 | if (!skb) | |
1526 | break; | |
1527 | /* TCP coalescing might have appended data to the skb. | |
1528 | * Try to splice more frags | |
1529 | */ | |
1530 | if (offset + 1 != skb->len) | |
1531 | continue; | |
1532 | } | |
1533 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { | |
1534 | sk_eat_skb(sk, skb); | |
1535 | ++seq; | |
1536 | break; | |
1537 | } | |
1538 | sk_eat_skb(sk, skb); | |
1539 | if (!desc->count) | |
1540 | break; | |
1541 | tp->copied_seq = seq; | |
1542 | } | |
1543 | tp->copied_seq = seq; | |
1544 | ||
1545 | tcp_rcv_space_adjust(sk); | |
1546 | ||
1547 | /* Clean up data we have read: This will do ACK frames. */ | |
1548 | if (copied > 0) { | |
1549 | tcp_recv_skb(sk, seq, &offset); | |
1550 | tcp_cleanup_rbuf(sk, copied); | |
1551 | } | |
1552 | return copied; | |
1553 | } | |
1554 | EXPORT_SYMBOL(tcp_read_sock); | |
1555 | ||
1556 | /* | |
1557 | * This routine copies from a sock struct into the user buffer. | |
1558 | * | |
1559 | * Technical note: in 2.3 we work on _locked_ socket, so that | |
1560 | * tricks with *seq access order and skb->users are not required. | |
1561 | * Probably, code can be easily improved even more. | |
1562 | */ | |
1563 | ||
1564 | int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, | |
1565 | int flags, int *addr_len) | |
1566 | { | |
1567 | struct tcp_sock *tp = tcp_sk(sk); | |
1568 | int copied = 0; | |
1569 | u32 peek_seq; | |
1570 | u32 *seq; | |
1571 | unsigned long used; | |
1572 | int err; | |
1573 | int target; /* Read at least this many bytes */ | |
1574 | long timeo; | |
1575 | struct task_struct *user_recv = NULL; | |
1576 | struct sk_buff *skb; | |
1577 | u32 urg_hole = 0; | |
1578 | ||
1579 | if (unlikely(flags & MSG_ERRQUEUE)) | |
1580 | return inet_recv_error(sk, msg, len, addr_len); | |
1581 | ||
1582 | if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && | |
1583 | (sk->sk_state == TCP_ESTABLISHED)) | |
1584 | sk_busy_loop(sk, nonblock); | |
1585 | ||
1586 | lock_sock(sk); | |
1587 | ||
1588 | err = -ENOTCONN; | |
1589 | if (sk->sk_state == TCP_LISTEN) | |
1590 | goto out; | |
1591 | ||
1592 | timeo = sock_rcvtimeo(sk, nonblock); | |
1593 | ||
1594 | /* Urgent data needs to be handled specially. */ | |
1595 | if (flags & MSG_OOB) | |
1596 | goto recv_urg; | |
1597 | ||
1598 | if (unlikely(tp->repair)) { | |
1599 | err = -EPERM; | |
1600 | if (!(flags & MSG_PEEK)) | |
1601 | goto out; | |
1602 | ||
1603 | if (tp->repair_queue == TCP_SEND_QUEUE) | |
1604 | goto recv_sndq; | |
1605 | ||
1606 | err = -EINVAL; | |
1607 | if (tp->repair_queue == TCP_NO_QUEUE) | |
1608 | goto out; | |
1609 | ||
1610 | /* 'common' recv queue MSG_PEEK-ing */ | |
1611 | } | |
1612 | ||
1613 | seq = &tp->copied_seq; | |
1614 | if (flags & MSG_PEEK) { | |
1615 | peek_seq = tp->copied_seq; | |
1616 | seq = &peek_seq; | |
1617 | } | |
1618 | ||
1619 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
1620 | ||
1621 | do { | |
1622 | u32 offset; | |
1623 | ||
1624 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ | |
1625 | if (tp->urg_data && tp->urg_seq == *seq) { | |
1626 | if (copied) | |
1627 | break; | |
1628 | if (signal_pending(current)) { | |
1629 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; | |
1630 | break; | |
1631 | } | |
1632 | } | |
1633 | ||
1634 | /* Next get a buffer. */ | |
1635 | ||
1636 | skb_queue_walk(&sk->sk_receive_queue, skb) { | |
1637 | /* Now that we have two receive queues this | |
1638 | * shouldn't happen. | |
1639 | */ | |
1640 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), | |
1641 | "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", | |
1642 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, | |
1643 | flags)) | |
1644 | break; | |
1645 | ||
1646 | offset = *seq - TCP_SKB_CB(skb)->seq; | |
1647 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) | |
1648 | offset--; | |
1649 | if (offset < skb->len) | |
1650 | goto found_ok_skb; | |
1651 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | |
1652 | goto found_fin_ok; | |
1653 | WARN(!(flags & MSG_PEEK), | |
1654 | "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", | |
1655 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); | |
1656 | } | |
1657 | ||
1658 | /* Well, if we have backlog, try to process it now yet. */ | |
1659 | ||
1660 | if (copied >= target && !sk->sk_backlog.tail) | |
1661 | break; | |
1662 | ||
1663 | if (copied) { | |
1664 | if (sk->sk_err || | |
1665 | sk->sk_state == TCP_CLOSE || | |
1666 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1667 | !timeo || | |
1668 | signal_pending(current)) | |
1669 | break; | |
1670 | } else { | |
1671 | if (sock_flag(sk, SOCK_DONE)) | |
1672 | break; | |
1673 | ||
1674 | if (sk->sk_err) { | |
1675 | copied = sock_error(sk); | |
1676 | break; | |
1677 | } | |
1678 | ||
1679 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
1680 | break; | |
1681 | ||
1682 | if (sk->sk_state == TCP_CLOSE) { | |
1683 | if (!sock_flag(sk, SOCK_DONE)) { | |
1684 | /* This occurs when user tries to read | |
1685 | * from never connected socket. | |
1686 | */ | |
1687 | copied = -ENOTCONN; | |
1688 | break; | |
1689 | } | |
1690 | break; | |
1691 | } | |
1692 | ||
1693 | if (!timeo) { | |
1694 | copied = -EAGAIN; | |
1695 | break; | |
1696 | } | |
1697 | ||
1698 | if (signal_pending(current)) { | |
1699 | copied = sock_intr_errno(timeo); | |
1700 | break; | |
1701 | } | |
1702 | } | |
1703 | ||
1704 | tcp_cleanup_rbuf(sk, copied); | |
1705 | ||
1706 | if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { | |
1707 | /* Install new reader */ | |
1708 | if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { | |
1709 | user_recv = current; | |
1710 | tp->ucopy.task = user_recv; | |
1711 | tp->ucopy.msg = msg; | |
1712 | } | |
1713 | ||
1714 | tp->ucopy.len = len; | |
1715 | ||
1716 | WARN_ON(tp->copied_seq != tp->rcv_nxt && | |
1717 | !(flags & (MSG_PEEK | MSG_TRUNC))); | |
1718 | ||
1719 | /* Ugly... If prequeue is not empty, we have to | |
1720 | * process it before releasing socket, otherwise | |
1721 | * order will be broken at second iteration. | |
1722 | * More elegant solution is required!!! | |
1723 | * | |
1724 | * Look: we have the following (pseudo)queues: | |
1725 | * | |
1726 | * 1. packets in flight | |
1727 | * 2. backlog | |
1728 | * 3. prequeue | |
1729 | * 4. receive_queue | |
1730 | * | |
1731 | * Each queue can be processed only if the next ones | |
1732 | * are empty. At this point we have empty receive_queue. | |
1733 | * But prequeue _can_ be not empty after 2nd iteration, | |
1734 | * when we jumped to start of loop because backlog | |
1735 | * processing added something to receive_queue. | |
1736 | * We cannot release_sock(), because backlog contains | |
1737 | * packets arrived _after_ prequeued ones. | |
1738 | * | |
1739 | * Shortly, algorithm is clear --- to process all | |
1740 | * the queues in order. We could make it more directly, | |
1741 | * requeueing packets from backlog to prequeue, if | |
1742 | * is not empty. It is more elegant, but eats cycles, | |
1743 | * unfortunately. | |
1744 | */ | |
1745 | if (!skb_queue_empty(&tp->ucopy.prequeue)) | |
1746 | goto do_prequeue; | |
1747 | ||
1748 | /* __ Set realtime policy in scheduler __ */ | |
1749 | } | |
1750 | ||
1751 | if (copied >= target) { | |
1752 | /* Do not sleep, just process backlog. */ | |
1753 | release_sock(sk); | |
1754 | lock_sock(sk); | |
1755 | } else | |
1756 | sk_wait_data(sk, &timeo); | |
1757 | ||
1758 | if (user_recv) { | |
1759 | int chunk; | |
1760 | ||
1761 | /* __ Restore normal policy in scheduler __ */ | |
1762 | ||
1763 | if ((chunk = len - tp->ucopy.len) != 0) { | |
1764 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); | |
1765 | len -= chunk; | |
1766 | copied += chunk; | |
1767 | } | |
1768 | ||
1769 | if (tp->rcv_nxt == tp->copied_seq && | |
1770 | !skb_queue_empty(&tp->ucopy.prequeue)) { | |
1771 | do_prequeue: | |
1772 | tcp_prequeue_process(sk); | |
1773 | ||
1774 | if ((chunk = len - tp->ucopy.len) != 0) { | |
1775 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | |
1776 | len -= chunk; | |
1777 | copied += chunk; | |
1778 | } | |
1779 | } | |
1780 | } | |
1781 | if ((flags & MSG_PEEK) && | |
1782 | (peek_seq - copied - urg_hole != tp->copied_seq)) { | |
1783 | net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", | |
1784 | current->comm, | |
1785 | task_pid_nr(current)); | |
1786 | peek_seq = tp->copied_seq; | |
1787 | } | |
1788 | continue; | |
1789 | ||
1790 | found_ok_skb: | |
1791 | /* Ok so how much can we use? */ | |
1792 | used = skb->len - offset; | |
1793 | if (len < used) | |
1794 | used = len; | |
1795 | ||
1796 | /* Do we have urgent data here? */ | |
1797 | if (tp->urg_data) { | |
1798 | u32 urg_offset = tp->urg_seq - *seq; | |
1799 | if (urg_offset < used) { | |
1800 | if (!urg_offset) { | |
1801 | if (!sock_flag(sk, SOCK_URGINLINE)) { | |
1802 | ++*seq; | |
1803 | urg_hole++; | |
1804 | offset++; | |
1805 | used--; | |
1806 | if (!used) | |
1807 | goto skip_copy; | |
1808 | } | |
1809 | } else | |
1810 | used = urg_offset; | |
1811 | } | |
1812 | } | |
1813 | ||
1814 | if (!(flags & MSG_TRUNC)) { | |
1815 | err = skb_copy_datagram_msg(skb, offset, msg, used); | |
1816 | if (err) { | |
1817 | /* Exception. Bailout! */ | |
1818 | if (!copied) | |
1819 | copied = -EFAULT; | |
1820 | break; | |
1821 | } | |
1822 | } | |
1823 | ||
1824 | *seq += used; | |
1825 | copied += used; | |
1826 | len -= used; | |
1827 | ||
1828 | tcp_rcv_space_adjust(sk); | |
1829 | ||
1830 | skip_copy: | |
1831 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { | |
1832 | tp->urg_data = 0; | |
1833 | tcp_fast_path_check(sk); | |
1834 | } | |
1835 | if (used + offset < skb->len) | |
1836 | continue; | |
1837 | ||
1838 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | |
1839 | goto found_fin_ok; | |
1840 | if (!(flags & MSG_PEEK)) | |
1841 | sk_eat_skb(sk, skb); | |
1842 | continue; | |
1843 | ||
1844 | found_fin_ok: | |
1845 | /* Process the FIN. */ | |
1846 | ++*seq; | |
1847 | if (!(flags & MSG_PEEK)) | |
1848 | sk_eat_skb(sk, skb); | |
1849 | break; | |
1850 | } while (len > 0); | |
1851 | ||
1852 | if (user_recv) { | |
1853 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { | |
1854 | int chunk; | |
1855 | ||
1856 | tp->ucopy.len = copied > 0 ? len : 0; | |
1857 | ||
1858 | tcp_prequeue_process(sk); | |
1859 | ||
1860 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { | |
1861 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); | |
1862 | len -= chunk; | |
1863 | copied += chunk; | |
1864 | } | |
1865 | } | |
1866 | ||
1867 | tp->ucopy.task = NULL; | |
1868 | tp->ucopy.len = 0; | |
1869 | } | |
1870 | ||
1871 | /* According to UNIX98, msg_name/msg_namelen are ignored | |
1872 | * on connected socket. I was just happy when found this 8) --ANK | |
1873 | */ | |
1874 | ||
1875 | /* Clean up data we have read: This will do ACK frames. */ | |
1876 | tcp_cleanup_rbuf(sk, copied); | |
1877 | ||
1878 | release_sock(sk); | |
1879 | return copied; | |
1880 | ||
1881 | out: | |
1882 | release_sock(sk); | |
1883 | return err; | |
1884 | ||
1885 | recv_urg: | |
1886 | err = tcp_recv_urg(sk, msg, len, flags); | |
1887 | goto out; | |
1888 | ||
1889 | recv_sndq: | |
1890 | err = tcp_peek_sndq(sk, msg, len); | |
1891 | goto out; | |
1892 | } | |
1893 | EXPORT_SYMBOL(tcp_recvmsg); | |
1894 | ||
1895 | void tcp_set_state(struct sock *sk, int state) | |
1896 | { | |
1897 | int oldstate = sk->sk_state; | |
1898 | ||
1899 | switch (state) { | |
1900 | case TCP_ESTABLISHED: | |
1901 | if (oldstate != TCP_ESTABLISHED) | |
1902 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); | |
1903 | break; | |
1904 | ||
1905 | case TCP_CLOSE: | |
1906 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | |
1907 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); | |
1908 | ||
1909 | sk->sk_prot->unhash(sk); | |
1910 | if (inet_csk(sk)->icsk_bind_hash && | |
1911 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | |
1912 | inet_put_port(sk); | |
1913 | /* fall through */ | |
1914 | default: | |
1915 | if (oldstate == TCP_ESTABLISHED) | |
1916 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); | |
1917 | } | |
1918 | ||
1919 | /* Change state AFTER socket is unhashed to avoid closed | |
1920 | * socket sitting in hash tables. | |
1921 | */ | |
1922 | sk->sk_state = state; | |
1923 | ||
1924 | #ifdef STATE_TRACE | |
1925 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); | |
1926 | #endif | |
1927 | } | |
1928 | EXPORT_SYMBOL_GPL(tcp_set_state); | |
1929 | ||
1930 | /* | |
1931 | * State processing on a close. This implements the state shift for | |
1932 | * sending our FIN frame. Note that we only send a FIN for some | |
1933 | * states. A shutdown() may have already sent the FIN, or we may be | |
1934 | * closed. | |
1935 | */ | |
1936 | ||
1937 | static const unsigned char new_state[16] = { | |
1938 | /* current state: new state: action: */ | |
1939 | [0 /* (Invalid) */] = TCP_CLOSE, | |
1940 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1941 | [TCP_SYN_SENT] = TCP_CLOSE, | |
1942 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1943 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, | |
1944 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, | |
1945 | [TCP_TIME_WAIT] = TCP_CLOSE, | |
1946 | [TCP_CLOSE] = TCP_CLOSE, | |
1947 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, | |
1948 | [TCP_LAST_ACK] = TCP_LAST_ACK, | |
1949 | [TCP_LISTEN] = TCP_CLOSE, | |
1950 | [TCP_CLOSING] = TCP_CLOSING, | |
1951 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ | |
1952 | }; | |
1953 | ||
1954 | static int tcp_close_state(struct sock *sk) | |
1955 | { | |
1956 | int next = (int)new_state[sk->sk_state]; | |
1957 | int ns = next & TCP_STATE_MASK; | |
1958 | ||
1959 | tcp_set_state(sk, ns); | |
1960 | ||
1961 | return next & TCP_ACTION_FIN; | |
1962 | } | |
1963 | ||
1964 | /* | |
1965 | * Shutdown the sending side of a connection. Much like close except | |
1966 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). | |
1967 | */ | |
1968 | ||
1969 | void tcp_shutdown(struct sock *sk, int how) | |
1970 | { | |
1971 | /* We need to grab some memory, and put together a FIN, | |
1972 | * and then put it into the queue to be sent. | |
1973 | * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. | |
1974 | */ | |
1975 | if (!(how & SEND_SHUTDOWN)) | |
1976 | return; | |
1977 | ||
1978 | /* If we've already sent a FIN, or it's a closed state, skip this. */ | |
1979 | if ((1 << sk->sk_state) & | |
1980 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | | |
1981 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { | |
1982 | /* Clear out any half completed packets. FIN if needed. */ | |
1983 | if (tcp_close_state(sk)) | |
1984 | tcp_send_fin(sk); | |
1985 | } | |
1986 | } | |
1987 | EXPORT_SYMBOL(tcp_shutdown); | |
1988 | ||
1989 | bool tcp_check_oom(struct sock *sk, int shift) | |
1990 | { | |
1991 | bool too_many_orphans, out_of_socket_memory; | |
1992 | ||
1993 | too_many_orphans = tcp_too_many_orphans(sk, shift); | |
1994 | out_of_socket_memory = tcp_out_of_memory(sk); | |
1995 | ||
1996 | if (too_many_orphans) | |
1997 | net_info_ratelimited("too many orphaned sockets\n"); | |
1998 | if (out_of_socket_memory) | |
1999 | net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); | |
2000 | return too_many_orphans || out_of_socket_memory; | |
2001 | } | |
2002 | ||
2003 | void tcp_close(struct sock *sk, long timeout) | |
2004 | { | |
2005 | struct sk_buff *skb; | |
2006 | int data_was_unread = 0; | |
2007 | int state; | |
2008 | ||
2009 | lock_sock(sk); | |
2010 | sk->sk_shutdown = SHUTDOWN_MASK; | |
2011 | ||
2012 | if (sk->sk_state == TCP_LISTEN) { | |
2013 | tcp_set_state(sk, TCP_CLOSE); | |
2014 | ||
2015 | /* Special case. */ | |
2016 | inet_csk_listen_stop(sk); | |
2017 | ||
2018 | goto adjudge_to_death; | |
2019 | } | |
2020 | ||
2021 | /* We need to flush the recv. buffs. We do this only on the | |
2022 | * descriptor close, not protocol-sourced closes, because the | |
2023 | * reader process may not have drained the data yet! | |
2024 | */ | |
2025 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { | |
2026 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; | |
2027 | ||
2028 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | |
2029 | len--; | |
2030 | data_was_unread += len; | |
2031 | __kfree_skb(skb); | |
2032 | } | |
2033 | ||
2034 | sk_mem_reclaim(sk); | |
2035 | ||
2036 | /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ | |
2037 | if (sk->sk_state == TCP_CLOSE) | |
2038 | goto adjudge_to_death; | |
2039 | ||
2040 | /* As outlined in RFC 2525, section 2.17, we send a RST here because | |
2041 | * data was lost. To witness the awful effects of the old behavior of | |
2042 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk | |
2043 | * GET in an FTP client, suspend the process, wait for the client to | |
2044 | * advertise a zero window, then kill -9 the FTP client, wheee... | |
2045 | * Note: timeout is always zero in such a case. | |
2046 | */ | |
2047 | if (unlikely(tcp_sk(sk)->repair)) { | |
2048 | sk->sk_prot->disconnect(sk, 0); | |
2049 | } else if (data_was_unread) { | |
2050 | /* Unread data was tossed, zap the connection. */ | |
2051 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); | |
2052 | tcp_set_state(sk, TCP_CLOSE); | |
2053 | tcp_send_active_reset(sk, sk->sk_allocation); | |
2054 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | |
2055 | /* Check zero linger _after_ checking for unread data. */ | |
2056 | sk->sk_prot->disconnect(sk, 0); | |
2057 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); | |
2058 | } else if (tcp_close_state(sk)) { | |
2059 | /* We FIN if the application ate all the data before | |
2060 | * zapping the connection. | |
2061 | */ | |
2062 | ||
2063 | /* RED-PEN. Formally speaking, we have broken TCP state | |
2064 | * machine. State transitions: | |
2065 | * | |
2066 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 | |
2067 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) | |
2068 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK | |
2069 | * | |
2070 | * are legal only when FIN has been sent (i.e. in window), | |
2071 | * rather than queued out of window. Purists blame. | |
2072 | * | |
2073 | * F.e. "RFC state" is ESTABLISHED, | |
2074 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. | |
2075 | * | |
2076 | * The visible declinations are that sometimes | |
2077 | * we enter time-wait state, when it is not required really | |
2078 | * (harmless), do not send active resets, when they are | |
2079 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when | |
2080 | * they look as CLOSING or LAST_ACK for Linux) | |
2081 | * Probably, I missed some more holelets. | |
2082 | * --ANK | |
2083 | * XXX (TFO) - To start off we don't support SYN+ACK+FIN | |
2084 | * in a single packet! (May consider it later but will | |
2085 | * probably need API support or TCP_CORK SYN-ACK until | |
2086 | * data is written and socket is closed.) | |
2087 | */ | |
2088 | tcp_send_fin(sk); | |
2089 | } | |
2090 | ||
2091 | sk_stream_wait_close(sk, timeout); | |
2092 | ||
2093 | adjudge_to_death: | |
2094 | state = sk->sk_state; | |
2095 | sock_hold(sk); | |
2096 | sock_orphan(sk); | |
2097 | ||
2098 | /* It is the last release_sock in its life. It will remove backlog. */ | |
2099 | release_sock(sk); | |
2100 | ||
2101 | ||
2102 | /* Now socket is owned by kernel and we acquire BH lock | |
2103 | to finish close. No need to check for user refs. | |
2104 | */ | |
2105 | local_bh_disable(); | |
2106 | bh_lock_sock(sk); | |
2107 | WARN_ON(sock_owned_by_user(sk)); | |
2108 | ||
2109 | percpu_counter_inc(sk->sk_prot->orphan_count); | |
2110 | ||
2111 | /* Have we already been destroyed by a softirq or backlog? */ | |
2112 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | |
2113 | goto out; | |
2114 | ||
2115 | /* This is a (useful) BSD violating of the RFC. There is a | |
2116 | * problem with TCP as specified in that the other end could | |
2117 | * keep a socket open forever with no application left this end. | |
2118 | * We use a 1 minute timeout (about the same as BSD) then kill | |
2119 | * our end. If they send after that then tough - BUT: long enough | |
2120 | * that we won't make the old 4*rto = almost no time - whoops | |
2121 | * reset mistake. | |
2122 | * | |
2123 | * Nope, it was not mistake. It is really desired behaviour | |
2124 | * f.e. on http servers, when such sockets are useless, but | |
2125 | * consume significant resources. Let's do it with special | |
2126 | * linger2 option. --ANK | |
2127 | */ | |
2128 | ||
2129 | if (sk->sk_state == TCP_FIN_WAIT2) { | |
2130 | struct tcp_sock *tp = tcp_sk(sk); | |
2131 | if (tp->linger2 < 0) { | |
2132 | tcp_set_state(sk, TCP_CLOSE); | |
2133 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
2134 | NET_INC_STATS_BH(sock_net(sk), | |
2135 | LINUX_MIB_TCPABORTONLINGER); | |
2136 | } else { | |
2137 | const int tmo = tcp_fin_time(sk); | |
2138 | ||
2139 | if (tmo > TCP_TIMEWAIT_LEN) { | |
2140 | inet_csk_reset_keepalive_timer(sk, | |
2141 | tmo - TCP_TIMEWAIT_LEN); | |
2142 | } else { | |
2143 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | |
2144 | goto out; | |
2145 | } | |
2146 | } | |
2147 | } | |
2148 | if (sk->sk_state != TCP_CLOSE) { | |
2149 | sk_mem_reclaim(sk); | |
2150 | if (tcp_check_oom(sk, 0)) { | |
2151 | tcp_set_state(sk, TCP_CLOSE); | |
2152 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
2153 | NET_INC_STATS_BH(sock_net(sk), | |
2154 | LINUX_MIB_TCPABORTONMEMORY); | |
2155 | } | |
2156 | } | |
2157 | ||
2158 | if (sk->sk_state == TCP_CLOSE) { | |
2159 | struct request_sock *req = tcp_sk(sk)->fastopen_rsk; | |
2160 | /* We could get here with a non-NULL req if the socket is | |
2161 | * aborted (e.g., closed with unread data) before 3WHS | |
2162 | * finishes. | |
2163 | */ | |
2164 | if (req) | |
2165 | reqsk_fastopen_remove(sk, req, false); | |
2166 | inet_csk_destroy_sock(sk); | |
2167 | } | |
2168 | /* Otherwise, socket is reprieved until protocol close. */ | |
2169 | ||
2170 | out: | |
2171 | bh_unlock_sock(sk); | |
2172 | local_bh_enable(); | |
2173 | sock_put(sk); | |
2174 | } | |
2175 | EXPORT_SYMBOL(tcp_close); | |
2176 | ||
2177 | /* These states need RST on ABORT according to RFC793 */ | |
2178 | ||
2179 | static inline bool tcp_need_reset(int state) | |
2180 | { | |
2181 | return (1 << state) & | |
2182 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | | |
2183 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); | |
2184 | } | |
2185 | ||
2186 | int tcp_disconnect(struct sock *sk, int flags) | |
2187 | { | |
2188 | struct inet_sock *inet = inet_sk(sk); | |
2189 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2190 | struct tcp_sock *tp = tcp_sk(sk); | |
2191 | int err = 0; | |
2192 | int old_state = sk->sk_state; | |
2193 | ||
2194 | if (old_state != TCP_CLOSE) | |
2195 | tcp_set_state(sk, TCP_CLOSE); | |
2196 | ||
2197 | /* ABORT function of RFC793 */ | |
2198 | if (old_state == TCP_LISTEN) { | |
2199 | inet_csk_listen_stop(sk); | |
2200 | } else if (unlikely(tp->repair)) { | |
2201 | sk->sk_err = ECONNABORTED; | |
2202 | } else if (tcp_need_reset(old_state) || | |
2203 | (tp->snd_nxt != tp->write_seq && | |
2204 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { | |
2205 | /* The last check adjusts for discrepancy of Linux wrt. RFC | |
2206 | * states | |
2207 | */ | |
2208 | tcp_send_active_reset(sk, gfp_any()); | |
2209 | sk->sk_err = ECONNRESET; | |
2210 | } else if (old_state == TCP_SYN_SENT) | |
2211 | sk->sk_err = ECONNRESET; | |
2212 | ||
2213 | tcp_clear_xmit_timers(sk); | |
2214 | __skb_queue_purge(&sk->sk_receive_queue); | |
2215 | tcp_write_queue_purge(sk); | |
2216 | __skb_queue_purge(&tp->out_of_order_queue); | |
2217 | ||
2218 | inet->inet_dport = 0; | |
2219 | ||
2220 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | |
2221 | inet_reset_saddr(sk); | |
2222 | ||
2223 | sk->sk_shutdown = 0; | |
2224 | sock_reset_flag(sk, SOCK_DONE); | |
2225 | tp->srtt_us = 0; | |
2226 | if ((tp->write_seq += tp->max_window + 2) == 0) | |
2227 | tp->write_seq = 1; | |
2228 | icsk->icsk_backoff = 0; | |
2229 | tp->snd_cwnd = 2; | |
2230 | icsk->icsk_probes_out = 0; | |
2231 | tp->packets_out = 0; | |
2232 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
2233 | tp->snd_cwnd_cnt = 0; | |
2234 | tp->window_clamp = 0; | |
2235 | tcp_set_ca_state(sk, TCP_CA_Open); | |
2236 | tcp_clear_retrans(tp); | |
2237 | inet_csk_delack_init(sk); | |
2238 | tcp_init_send_head(sk); | |
2239 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); | |
2240 | __sk_dst_reset(sk); | |
2241 | ||
2242 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); | |
2243 | ||
2244 | sk->sk_error_report(sk); | |
2245 | return err; | |
2246 | } | |
2247 | EXPORT_SYMBOL(tcp_disconnect); | |
2248 | ||
2249 | void tcp_sock_destruct(struct sock *sk) | |
2250 | { | |
2251 | inet_sock_destruct(sk); | |
2252 | ||
2253 | kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); | |
2254 | } | |
2255 | ||
2256 | static inline bool tcp_can_repair_sock(const struct sock *sk) | |
2257 | { | |
2258 | return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && | |
2259 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); | |
2260 | } | |
2261 | ||
2262 | static int tcp_repair_options_est(struct tcp_sock *tp, | |
2263 | struct tcp_repair_opt __user *optbuf, unsigned int len) | |
2264 | { | |
2265 | struct tcp_repair_opt opt; | |
2266 | ||
2267 | while (len >= sizeof(opt)) { | |
2268 | if (copy_from_user(&opt, optbuf, sizeof(opt))) | |
2269 | return -EFAULT; | |
2270 | ||
2271 | optbuf++; | |
2272 | len -= sizeof(opt); | |
2273 | ||
2274 | switch (opt.opt_code) { | |
2275 | case TCPOPT_MSS: | |
2276 | tp->rx_opt.mss_clamp = opt.opt_val; | |
2277 | break; | |
2278 | case TCPOPT_WINDOW: | |
2279 | { | |
2280 | u16 snd_wscale = opt.opt_val & 0xFFFF; | |
2281 | u16 rcv_wscale = opt.opt_val >> 16; | |
2282 | ||
2283 | if (snd_wscale > 14 || rcv_wscale > 14) | |
2284 | return -EFBIG; | |
2285 | ||
2286 | tp->rx_opt.snd_wscale = snd_wscale; | |
2287 | tp->rx_opt.rcv_wscale = rcv_wscale; | |
2288 | tp->rx_opt.wscale_ok = 1; | |
2289 | } | |
2290 | break; | |
2291 | case TCPOPT_SACK_PERM: | |
2292 | if (opt.opt_val != 0) | |
2293 | return -EINVAL; | |
2294 | ||
2295 | tp->rx_opt.sack_ok |= TCP_SACK_SEEN; | |
2296 | if (sysctl_tcp_fack) | |
2297 | tcp_enable_fack(tp); | |
2298 | break; | |
2299 | case TCPOPT_TIMESTAMP: | |
2300 | if (opt.opt_val != 0) | |
2301 | return -EINVAL; | |
2302 | ||
2303 | tp->rx_opt.tstamp_ok = 1; | |
2304 | break; | |
2305 | } | |
2306 | } | |
2307 | ||
2308 | return 0; | |
2309 | } | |
2310 | ||
2311 | /* | |
2312 | * Socket option code for TCP. | |
2313 | */ | |
2314 | static int do_tcp_setsockopt(struct sock *sk, int level, | |
2315 | int optname, char __user *optval, unsigned int optlen) | |
2316 | { | |
2317 | struct tcp_sock *tp = tcp_sk(sk); | |
2318 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2319 | int val; | |
2320 | int err = 0; | |
2321 | ||
2322 | /* These are data/string values, all the others are ints */ | |
2323 | switch (optname) { | |
2324 | case TCP_CONGESTION: { | |
2325 | char name[TCP_CA_NAME_MAX]; | |
2326 | ||
2327 | if (optlen < 1) | |
2328 | return -EINVAL; | |
2329 | ||
2330 | val = strncpy_from_user(name, optval, | |
2331 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); | |
2332 | if (val < 0) | |
2333 | return -EFAULT; | |
2334 | name[val] = 0; | |
2335 | ||
2336 | lock_sock(sk); | |
2337 | err = tcp_set_congestion_control(sk, name); | |
2338 | release_sock(sk); | |
2339 | return err; | |
2340 | } | |
2341 | default: | |
2342 | /* fallthru */ | |
2343 | break; | |
2344 | } | |
2345 | ||
2346 | if (optlen < sizeof(int)) | |
2347 | return -EINVAL; | |
2348 | ||
2349 | if (get_user(val, (int __user *)optval)) | |
2350 | return -EFAULT; | |
2351 | ||
2352 | lock_sock(sk); | |
2353 | ||
2354 | switch (optname) { | |
2355 | case TCP_MAXSEG: | |
2356 | /* Values greater than interface MTU won't take effect. However | |
2357 | * at the point when this call is done we typically don't yet | |
2358 | * know which interface is going to be used */ | |
2359 | if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { | |
2360 | err = -EINVAL; | |
2361 | break; | |
2362 | } | |
2363 | tp->rx_opt.user_mss = val; | |
2364 | break; | |
2365 | ||
2366 | case TCP_NODELAY: | |
2367 | if (val) { | |
2368 | /* TCP_NODELAY is weaker than TCP_CORK, so that | |
2369 | * this option on corked socket is remembered, but | |
2370 | * it is not activated until cork is cleared. | |
2371 | * | |
2372 | * However, when TCP_NODELAY is set we make | |
2373 | * an explicit push, which overrides even TCP_CORK | |
2374 | * for currently queued segments. | |
2375 | */ | |
2376 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; | |
2377 | tcp_push_pending_frames(sk); | |
2378 | } else { | |
2379 | tp->nonagle &= ~TCP_NAGLE_OFF; | |
2380 | } | |
2381 | break; | |
2382 | ||
2383 | case TCP_THIN_LINEAR_TIMEOUTS: | |
2384 | if (val < 0 || val > 1) | |
2385 | err = -EINVAL; | |
2386 | else | |
2387 | tp->thin_lto = val; | |
2388 | break; | |
2389 | ||
2390 | case TCP_THIN_DUPACK: | |
2391 | if (val < 0 || val > 1) | |
2392 | err = -EINVAL; | |
2393 | else { | |
2394 | tp->thin_dupack = val; | |
2395 | if (tp->thin_dupack) | |
2396 | tcp_disable_early_retrans(tp); | |
2397 | } | |
2398 | break; | |
2399 | ||
2400 | case TCP_REPAIR: | |
2401 | if (!tcp_can_repair_sock(sk)) | |
2402 | err = -EPERM; | |
2403 | else if (val == 1) { | |
2404 | tp->repair = 1; | |
2405 | sk->sk_reuse = SK_FORCE_REUSE; | |
2406 | tp->repair_queue = TCP_NO_QUEUE; | |
2407 | } else if (val == 0) { | |
2408 | tp->repair = 0; | |
2409 | sk->sk_reuse = SK_NO_REUSE; | |
2410 | tcp_send_window_probe(sk); | |
2411 | } else | |
2412 | err = -EINVAL; | |
2413 | ||
2414 | break; | |
2415 | ||
2416 | case TCP_REPAIR_QUEUE: | |
2417 | if (!tp->repair) | |
2418 | err = -EPERM; | |
2419 | else if (val < TCP_QUEUES_NR) | |
2420 | tp->repair_queue = val; | |
2421 | else | |
2422 | err = -EINVAL; | |
2423 | break; | |
2424 | ||
2425 | case TCP_QUEUE_SEQ: | |
2426 | if (sk->sk_state != TCP_CLOSE) | |
2427 | err = -EPERM; | |
2428 | else if (tp->repair_queue == TCP_SEND_QUEUE) | |
2429 | tp->write_seq = val; | |
2430 | else if (tp->repair_queue == TCP_RECV_QUEUE) | |
2431 | tp->rcv_nxt = val; | |
2432 | else | |
2433 | err = -EINVAL; | |
2434 | break; | |
2435 | ||
2436 | case TCP_REPAIR_OPTIONS: | |
2437 | if (!tp->repair) | |
2438 | err = -EINVAL; | |
2439 | else if (sk->sk_state == TCP_ESTABLISHED) | |
2440 | err = tcp_repair_options_est(tp, | |
2441 | (struct tcp_repair_opt __user *)optval, | |
2442 | optlen); | |
2443 | else | |
2444 | err = -EPERM; | |
2445 | break; | |
2446 | ||
2447 | case TCP_CORK: | |
2448 | /* When set indicates to always queue non-full frames. | |
2449 | * Later the user clears this option and we transmit | |
2450 | * any pending partial frames in the queue. This is | |
2451 | * meant to be used alongside sendfile() to get properly | |
2452 | * filled frames when the user (for example) must write | |
2453 | * out headers with a write() call first and then use | |
2454 | * sendfile to send out the data parts. | |
2455 | * | |
2456 | * TCP_CORK can be set together with TCP_NODELAY and it is | |
2457 | * stronger than TCP_NODELAY. | |
2458 | */ | |
2459 | if (val) { | |
2460 | tp->nonagle |= TCP_NAGLE_CORK; | |
2461 | } else { | |
2462 | tp->nonagle &= ~TCP_NAGLE_CORK; | |
2463 | if (tp->nonagle&TCP_NAGLE_OFF) | |
2464 | tp->nonagle |= TCP_NAGLE_PUSH; | |
2465 | tcp_push_pending_frames(sk); | |
2466 | } | |
2467 | break; | |
2468 | ||
2469 | case TCP_KEEPIDLE: | |
2470 | if (val < 1 || val > MAX_TCP_KEEPIDLE) | |
2471 | err = -EINVAL; | |
2472 | else { | |
2473 | tp->keepalive_time = val * HZ; | |
2474 | if (sock_flag(sk, SOCK_KEEPOPEN) && | |
2475 | !((1 << sk->sk_state) & | |
2476 | (TCPF_CLOSE | TCPF_LISTEN))) { | |
2477 | u32 elapsed = keepalive_time_elapsed(tp); | |
2478 | if (tp->keepalive_time > elapsed) | |
2479 | elapsed = tp->keepalive_time - elapsed; | |
2480 | else | |
2481 | elapsed = 0; | |
2482 | inet_csk_reset_keepalive_timer(sk, elapsed); | |
2483 | } | |
2484 | } | |
2485 | break; | |
2486 | case TCP_KEEPINTVL: | |
2487 | if (val < 1 || val > MAX_TCP_KEEPINTVL) | |
2488 | err = -EINVAL; | |
2489 | else | |
2490 | tp->keepalive_intvl = val * HZ; | |
2491 | break; | |
2492 | case TCP_KEEPCNT: | |
2493 | if (val < 1 || val > MAX_TCP_KEEPCNT) | |
2494 | err = -EINVAL; | |
2495 | else | |
2496 | tp->keepalive_probes = val; | |
2497 | break; | |
2498 | case TCP_SYNCNT: | |
2499 | if (val < 1 || val > MAX_TCP_SYNCNT) | |
2500 | err = -EINVAL; | |
2501 | else | |
2502 | icsk->icsk_syn_retries = val; | |
2503 | break; | |
2504 | ||
2505 | case TCP_SAVE_SYN: | |
2506 | if (val < 0 || val > 1) | |
2507 | err = -EINVAL; | |
2508 | else | |
2509 | tp->save_syn = val; | |
2510 | break; | |
2511 | ||
2512 | case TCP_LINGER2: | |
2513 | if (val < 0) | |
2514 | tp->linger2 = -1; | |
2515 | else if (val > sysctl_tcp_fin_timeout / HZ) | |
2516 | tp->linger2 = 0; | |
2517 | else | |
2518 | tp->linger2 = val * HZ; | |
2519 | break; | |
2520 | ||
2521 | case TCP_DEFER_ACCEPT: | |
2522 | /* Translate value in seconds to number of retransmits */ | |
2523 | icsk->icsk_accept_queue.rskq_defer_accept = | |
2524 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, | |
2525 | TCP_RTO_MAX / HZ); | |
2526 | break; | |
2527 | ||
2528 | case TCP_WINDOW_CLAMP: | |
2529 | if (!val) { | |
2530 | if (sk->sk_state != TCP_CLOSE) { | |
2531 | err = -EINVAL; | |
2532 | break; | |
2533 | } | |
2534 | tp->window_clamp = 0; | |
2535 | } else | |
2536 | tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? | |
2537 | SOCK_MIN_RCVBUF / 2 : val; | |
2538 | break; | |
2539 | ||
2540 | case TCP_QUICKACK: | |
2541 | if (!val) { | |
2542 | icsk->icsk_ack.pingpong = 1; | |
2543 | } else { | |
2544 | icsk->icsk_ack.pingpong = 0; | |
2545 | if ((1 << sk->sk_state) & | |
2546 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && | |
2547 | inet_csk_ack_scheduled(sk)) { | |
2548 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; | |
2549 | tcp_cleanup_rbuf(sk, 1); | |
2550 | if (!(val & 1)) | |
2551 | icsk->icsk_ack.pingpong = 1; | |
2552 | } | |
2553 | } | |
2554 | break; | |
2555 | ||
2556 | #ifdef CONFIG_TCP_MD5SIG | |
2557 | case TCP_MD5SIG: | |
2558 | /* Read the IP->Key mappings from userspace */ | |
2559 | err = tp->af_specific->md5_parse(sk, optval, optlen); | |
2560 | break; | |
2561 | #endif | |
2562 | case TCP_USER_TIMEOUT: | |
2563 | /* Cap the max time in ms TCP will retry or probe the window | |
2564 | * before giving up and aborting (ETIMEDOUT) a connection. | |
2565 | */ | |
2566 | if (val < 0) | |
2567 | err = -EINVAL; | |
2568 | else | |
2569 | icsk->icsk_user_timeout = msecs_to_jiffies(val); | |
2570 | break; | |
2571 | ||
2572 | case TCP_FASTOPEN: | |
2573 | if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | | |
2574 | TCPF_LISTEN))) | |
2575 | err = fastopen_init_queue(sk, val); | |
2576 | else | |
2577 | err = -EINVAL; | |
2578 | break; | |
2579 | case TCP_TIMESTAMP: | |
2580 | if (!tp->repair) | |
2581 | err = -EPERM; | |
2582 | else | |
2583 | tp->tsoffset = val - tcp_time_stamp; | |
2584 | break; | |
2585 | case TCP_NOTSENT_LOWAT: | |
2586 | tp->notsent_lowat = val; | |
2587 | sk->sk_write_space(sk); | |
2588 | break; | |
2589 | default: | |
2590 | err = -ENOPROTOOPT; | |
2591 | break; | |
2592 | } | |
2593 | ||
2594 | release_sock(sk); | |
2595 | return err; | |
2596 | } | |
2597 | ||
2598 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | |
2599 | unsigned int optlen) | |
2600 | { | |
2601 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
2602 | ||
2603 | if (level != SOL_TCP) | |
2604 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
2605 | optval, optlen); | |
2606 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); | |
2607 | } | |
2608 | EXPORT_SYMBOL(tcp_setsockopt); | |
2609 | ||
2610 | #ifdef CONFIG_COMPAT | |
2611 | int compat_tcp_setsockopt(struct sock *sk, int level, int optname, | |
2612 | char __user *optval, unsigned int optlen) | |
2613 | { | |
2614 | if (level != SOL_TCP) | |
2615 | return inet_csk_compat_setsockopt(sk, level, optname, | |
2616 | optval, optlen); | |
2617 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); | |
2618 | } | |
2619 | EXPORT_SYMBOL(compat_tcp_setsockopt); | |
2620 | #endif | |
2621 | ||
2622 | /* Return information about state of tcp endpoint in API format. */ | |
2623 | void tcp_get_info(struct sock *sk, struct tcp_info *info) | |
2624 | { | |
2625 | const struct tcp_sock *tp = tcp_sk(sk); | |
2626 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
2627 | u32 now = tcp_time_stamp; | |
2628 | u32 rate; | |
2629 | ||
2630 | memset(info, 0, sizeof(*info)); | |
2631 | ||
2632 | info->tcpi_state = sk->sk_state; | |
2633 | info->tcpi_ca_state = icsk->icsk_ca_state; | |
2634 | info->tcpi_retransmits = icsk->icsk_retransmits; | |
2635 | info->tcpi_probes = icsk->icsk_probes_out; | |
2636 | info->tcpi_backoff = icsk->icsk_backoff; | |
2637 | ||
2638 | if (tp->rx_opt.tstamp_ok) | |
2639 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | |
2640 | if (tcp_is_sack(tp)) | |
2641 | info->tcpi_options |= TCPI_OPT_SACK; | |
2642 | if (tp->rx_opt.wscale_ok) { | |
2643 | info->tcpi_options |= TCPI_OPT_WSCALE; | |
2644 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; | |
2645 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; | |
2646 | } | |
2647 | ||
2648 | if (tp->ecn_flags & TCP_ECN_OK) | |
2649 | info->tcpi_options |= TCPI_OPT_ECN; | |
2650 | if (tp->ecn_flags & TCP_ECN_SEEN) | |
2651 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; | |
2652 | if (tp->syn_data_acked) | |
2653 | info->tcpi_options |= TCPI_OPT_SYN_DATA; | |
2654 | ||
2655 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); | |
2656 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); | |
2657 | info->tcpi_snd_mss = tp->mss_cache; | |
2658 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; | |
2659 | ||
2660 | if (sk->sk_state == TCP_LISTEN) { | |
2661 | info->tcpi_unacked = sk->sk_ack_backlog; | |
2662 | info->tcpi_sacked = sk->sk_max_ack_backlog; | |
2663 | } else { | |
2664 | info->tcpi_unacked = tp->packets_out; | |
2665 | info->tcpi_sacked = tp->sacked_out; | |
2666 | } | |
2667 | info->tcpi_lost = tp->lost_out; | |
2668 | info->tcpi_retrans = tp->retrans_out; | |
2669 | info->tcpi_fackets = tp->fackets_out; | |
2670 | ||
2671 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | |
2672 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); | |
2673 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); | |
2674 | ||
2675 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; | |
2676 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; | |
2677 | info->tcpi_rtt = tp->srtt_us >> 3; | |
2678 | info->tcpi_rttvar = tp->mdev_us >> 2; | |
2679 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; | |
2680 | info->tcpi_snd_cwnd = tp->snd_cwnd; | |
2681 | info->tcpi_advmss = tp->advmss; | |
2682 | info->tcpi_reordering = tp->reordering; | |
2683 | ||
2684 | info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; | |
2685 | info->tcpi_rcv_space = tp->rcvq_space.space; | |
2686 | ||
2687 | info->tcpi_total_retrans = tp->total_retrans; | |
2688 | ||
2689 | rate = READ_ONCE(sk->sk_pacing_rate); | |
2690 | info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL; | |
2691 | ||
2692 | rate = READ_ONCE(sk->sk_max_pacing_rate); | |
2693 | info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; | |
2694 | ||
2695 | spin_lock_bh(&sk->sk_lock.slock); | |
2696 | info->tcpi_bytes_acked = tp->bytes_acked; | |
2697 | info->tcpi_bytes_received = tp->bytes_received; | |
2698 | spin_unlock_bh(&sk->sk_lock.slock); | |
2699 | } | |
2700 | EXPORT_SYMBOL_GPL(tcp_get_info); | |
2701 | ||
2702 | static int do_tcp_getsockopt(struct sock *sk, int level, | |
2703 | int optname, char __user *optval, int __user *optlen) | |
2704 | { | |
2705 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2706 | struct tcp_sock *tp = tcp_sk(sk); | |
2707 | int val, len; | |
2708 | ||
2709 | if (get_user(len, optlen)) | |
2710 | return -EFAULT; | |
2711 | ||
2712 | len = min_t(unsigned int, len, sizeof(int)); | |
2713 | ||
2714 | if (len < 0) | |
2715 | return -EINVAL; | |
2716 | ||
2717 | switch (optname) { | |
2718 | case TCP_MAXSEG: | |
2719 | val = tp->mss_cache; | |
2720 | if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) | |
2721 | val = tp->rx_opt.user_mss; | |
2722 | if (tp->repair) | |
2723 | val = tp->rx_opt.mss_clamp; | |
2724 | break; | |
2725 | case TCP_NODELAY: | |
2726 | val = !!(tp->nonagle&TCP_NAGLE_OFF); | |
2727 | break; | |
2728 | case TCP_CORK: | |
2729 | val = !!(tp->nonagle&TCP_NAGLE_CORK); | |
2730 | break; | |
2731 | case TCP_KEEPIDLE: | |
2732 | val = keepalive_time_when(tp) / HZ; | |
2733 | break; | |
2734 | case TCP_KEEPINTVL: | |
2735 | val = keepalive_intvl_when(tp) / HZ; | |
2736 | break; | |
2737 | case TCP_KEEPCNT: | |
2738 | val = keepalive_probes(tp); | |
2739 | break; | |
2740 | case TCP_SYNCNT: | |
2741 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | |
2742 | break; | |
2743 | case TCP_LINGER2: | |
2744 | val = tp->linger2; | |
2745 | if (val >= 0) | |
2746 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; | |
2747 | break; | |
2748 | case TCP_DEFER_ACCEPT: | |
2749 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, | |
2750 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); | |
2751 | break; | |
2752 | case TCP_WINDOW_CLAMP: | |
2753 | val = tp->window_clamp; | |
2754 | break; | |
2755 | case TCP_INFO: { | |
2756 | struct tcp_info info; | |
2757 | ||
2758 | if (get_user(len, optlen)) | |
2759 | return -EFAULT; | |
2760 | ||
2761 | tcp_get_info(sk, &info); | |
2762 | ||
2763 | len = min_t(unsigned int, len, sizeof(info)); | |
2764 | if (put_user(len, optlen)) | |
2765 | return -EFAULT; | |
2766 | if (copy_to_user(optval, &info, len)) | |
2767 | return -EFAULT; | |
2768 | return 0; | |
2769 | } | |
2770 | case TCP_CC_INFO: { | |
2771 | const struct tcp_congestion_ops *ca_ops; | |
2772 | union tcp_cc_info info; | |
2773 | size_t sz = 0; | |
2774 | int attr; | |
2775 | ||
2776 | if (get_user(len, optlen)) | |
2777 | return -EFAULT; | |
2778 | ||
2779 | ca_ops = icsk->icsk_ca_ops; | |
2780 | if (ca_ops && ca_ops->get_info) | |
2781 | sz = ca_ops->get_info(sk, ~0U, &attr, &info); | |
2782 | ||
2783 | len = min_t(unsigned int, len, sz); | |
2784 | if (put_user(len, optlen)) | |
2785 | return -EFAULT; | |
2786 | if (copy_to_user(optval, &info, len)) | |
2787 | return -EFAULT; | |
2788 | return 0; | |
2789 | } | |
2790 | case TCP_QUICKACK: | |
2791 | val = !icsk->icsk_ack.pingpong; | |
2792 | break; | |
2793 | ||
2794 | case TCP_CONGESTION: | |
2795 | if (get_user(len, optlen)) | |
2796 | return -EFAULT; | |
2797 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); | |
2798 | if (put_user(len, optlen)) | |
2799 | return -EFAULT; | |
2800 | if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) | |
2801 | return -EFAULT; | |
2802 | return 0; | |
2803 | ||
2804 | case TCP_THIN_LINEAR_TIMEOUTS: | |
2805 | val = tp->thin_lto; | |
2806 | break; | |
2807 | case TCP_THIN_DUPACK: | |
2808 | val = tp->thin_dupack; | |
2809 | break; | |
2810 | ||
2811 | case TCP_REPAIR: | |
2812 | val = tp->repair; | |
2813 | break; | |
2814 | ||
2815 | case TCP_REPAIR_QUEUE: | |
2816 | if (tp->repair) | |
2817 | val = tp->repair_queue; | |
2818 | else | |
2819 | return -EINVAL; | |
2820 | break; | |
2821 | ||
2822 | case TCP_QUEUE_SEQ: | |
2823 | if (tp->repair_queue == TCP_SEND_QUEUE) | |
2824 | val = tp->write_seq; | |
2825 | else if (tp->repair_queue == TCP_RECV_QUEUE) | |
2826 | val = tp->rcv_nxt; | |
2827 | else | |
2828 | return -EINVAL; | |
2829 | break; | |
2830 | ||
2831 | case TCP_USER_TIMEOUT: | |
2832 | val = jiffies_to_msecs(icsk->icsk_user_timeout); | |
2833 | break; | |
2834 | ||
2835 | case TCP_FASTOPEN: | |
2836 | if (icsk->icsk_accept_queue.fastopenq) | |
2837 | val = icsk->icsk_accept_queue.fastopenq->max_qlen; | |
2838 | else | |
2839 | val = 0; | |
2840 | break; | |
2841 | ||
2842 | case TCP_TIMESTAMP: | |
2843 | val = tcp_time_stamp + tp->tsoffset; | |
2844 | break; | |
2845 | case TCP_NOTSENT_LOWAT: | |
2846 | val = tp->notsent_lowat; | |
2847 | break; | |
2848 | case TCP_SAVE_SYN: | |
2849 | val = tp->save_syn; | |
2850 | break; | |
2851 | case TCP_SAVED_SYN: { | |
2852 | if (get_user(len, optlen)) | |
2853 | return -EFAULT; | |
2854 | ||
2855 | lock_sock(sk); | |
2856 | if (tp->saved_syn) { | |
2857 | if (len < tp->saved_syn[0]) { | |
2858 | if (put_user(tp->saved_syn[0], optlen)) { | |
2859 | release_sock(sk); | |
2860 | return -EFAULT; | |
2861 | } | |
2862 | release_sock(sk); | |
2863 | return -EINVAL; | |
2864 | } | |
2865 | len = tp->saved_syn[0]; | |
2866 | if (put_user(len, optlen)) { | |
2867 | release_sock(sk); | |
2868 | return -EFAULT; | |
2869 | } | |
2870 | if (copy_to_user(optval, tp->saved_syn + 1, len)) { | |
2871 | release_sock(sk); | |
2872 | return -EFAULT; | |
2873 | } | |
2874 | tcp_saved_syn_free(tp); | |
2875 | release_sock(sk); | |
2876 | } else { | |
2877 | release_sock(sk); | |
2878 | len = 0; | |
2879 | if (put_user(len, optlen)) | |
2880 | return -EFAULT; | |
2881 | } | |
2882 | return 0; | |
2883 | } | |
2884 | default: | |
2885 | return -ENOPROTOOPT; | |
2886 | } | |
2887 | ||
2888 | if (put_user(len, optlen)) | |
2889 | return -EFAULT; | |
2890 | if (copy_to_user(optval, &val, len)) | |
2891 | return -EFAULT; | |
2892 | return 0; | |
2893 | } | |
2894 | ||
2895 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, | |
2896 | int __user *optlen) | |
2897 | { | |
2898 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2899 | ||
2900 | if (level != SOL_TCP) | |
2901 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
2902 | optval, optlen); | |
2903 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); | |
2904 | } | |
2905 | EXPORT_SYMBOL(tcp_getsockopt); | |
2906 | ||
2907 | #ifdef CONFIG_COMPAT | |
2908 | int compat_tcp_getsockopt(struct sock *sk, int level, int optname, | |
2909 | char __user *optval, int __user *optlen) | |
2910 | { | |
2911 | if (level != SOL_TCP) | |
2912 | return inet_csk_compat_getsockopt(sk, level, optname, | |
2913 | optval, optlen); | |
2914 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); | |
2915 | } | |
2916 | EXPORT_SYMBOL(compat_tcp_getsockopt); | |
2917 | #endif | |
2918 | ||
2919 | #ifdef CONFIG_TCP_MD5SIG | |
2920 | static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); | |
2921 | static DEFINE_MUTEX(tcp_md5sig_mutex); | |
2922 | static bool tcp_md5sig_pool_populated = false; | |
2923 | ||
2924 | static void __tcp_alloc_md5sig_pool(void) | |
2925 | { | |
2926 | int cpu; | |
2927 | ||
2928 | for_each_possible_cpu(cpu) { | |
2929 | if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { | |
2930 | struct crypto_hash *hash; | |
2931 | ||
2932 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | |
2933 | if (IS_ERR_OR_NULL(hash)) | |
2934 | return; | |
2935 | per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; | |
2936 | } | |
2937 | } | |
2938 | /* before setting tcp_md5sig_pool_populated, we must commit all writes | |
2939 | * to memory. See smp_rmb() in tcp_get_md5sig_pool() | |
2940 | */ | |
2941 | smp_wmb(); | |
2942 | tcp_md5sig_pool_populated = true; | |
2943 | } | |
2944 | ||
2945 | bool tcp_alloc_md5sig_pool(void) | |
2946 | { | |
2947 | if (unlikely(!tcp_md5sig_pool_populated)) { | |
2948 | mutex_lock(&tcp_md5sig_mutex); | |
2949 | ||
2950 | if (!tcp_md5sig_pool_populated) | |
2951 | __tcp_alloc_md5sig_pool(); | |
2952 | ||
2953 | mutex_unlock(&tcp_md5sig_mutex); | |
2954 | } | |
2955 | return tcp_md5sig_pool_populated; | |
2956 | } | |
2957 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |
2958 | ||
2959 | ||
2960 | /** | |
2961 | * tcp_get_md5sig_pool - get md5sig_pool for this user | |
2962 | * | |
2963 | * We use percpu structure, so if we succeed, we exit with preemption | |
2964 | * and BH disabled, to make sure another thread or softirq handling | |
2965 | * wont try to get same context. | |
2966 | */ | |
2967 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |
2968 | { | |
2969 | local_bh_disable(); | |
2970 | ||
2971 | if (tcp_md5sig_pool_populated) { | |
2972 | /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ | |
2973 | smp_rmb(); | |
2974 | return this_cpu_ptr(&tcp_md5sig_pool); | |
2975 | } | |
2976 | local_bh_enable(); | |
2977 | return NULL; | |
2978 | } | |
2979 | EXPORT_SYMBOL(tcp_get_md5sig_pool); | |
2980 | ||
2981 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | |
2982 | const struct tcphdr *th) | |
2983 | { | |
2984 | struct scatterlist sg; | |
2985 | struct tcphdr hdr; | |
2986 | int err; | |
2987 | ||
2988 | /* We are not allowed to change tcphdr, make a local copy */ | |
2989 | memcpy(&hdr, th, sizeof(hdr)); | |
2990 | hdr.check = 0; | |
2991 | ||
2992 | /* options aren't included in the hash */ | |
2993 | sg_init_one(&sg, &hdr, sizeof(hdr)); | |
2994 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr)); | |
2995 | return err; | |
2996 | } | |
2997 | EXPORT_SYMBOL(tcp_md5_hash_header); | |
2998 | ||
2999 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |
3000 | const struct sk_buff *skb, unsigned int header_len) | |
3001 | { | |
3002 | struct scatterlist sg; | |
3003 | const struct tcphdr *tp = tcp_hdr(skb); | |
3004 | struct hash_desc *desc = &hp->md5_desc; | |
3005 | unsigned int i; | |
3006 | const unsigned int head_data_len = skb_headlen(skb) > header_len ? | |
3007 | skb_headlen(skb) - header_len : 0; | |
3008 | const struct skb_shared_info *shi = skb_shinfo(skb); | |
3009 | struct sk_buff *frag_iter; | |
3010 | ||
3011 | sg_init_table(&sg, 1); | |
3012 | ||
3013 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); | |
3014 | if (crypto_hash_update(desc, &sg, head_data_len)) | |
3015 | return 1; | |
3016 | ||
3017 | for (i = 0; i < shi->nr_frags; ++i) { | |
3018 | const struct skb_frag_struct *f = &shi->frags[i]; | |
3019 | unsigned int offset = f->page_offset; | |
3020 | struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); | |
3021 | ||
3022 | sg_set_page(&sg, page, skb_frag_size(f), | |
3023 | offset_in_page(offset)); | |
3024 | if (crypto_hash_update(desc, &sg, skb_frag_size(f))) | |
3025 | return 1; | |
3026 | } | |
3027 | ||
3028 | skb_walk_frags(skb, frag_iter) | |
3029 | if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) | |
3030 | return 1; | |
3031 | ||
3032 | return 0; | |
3033 | } | |
3034 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); | |
3035 | ||
3036 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) | |
3037 | { | |
3038 | struct scatterlist sg; | |
3039 | ||
3040 | sg_init_one(&sg, key->key, key->keylen); | |
3041 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); | |
3042 | } | |
3043 | EXPORT_SYMBOL(tcp_md5_hash_key); | |
3044 | ||
3045 | #endif | |
3046 | ||
3047 | void tcp_done(struct sock *sk) | |
3048 | { | |
3049 | struct request_sock *req = tcp_sk(sk)->fastopen_rsk; | |
3050 | ||
3051 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) | |
3052 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); | |
3053 | ||
3054 | tcp_set_state(sk, TCP_CLOSE); | |
3055 | tcp_clear_xmit_timers(sk); | |
3056 | if (req) | |
3057 | reqsk_fastopen_remove(sk, req, false); | |
3058 | ||
3059 | sk->sk_shutdown = SHUTDOWN_MASK; | |
3060 | ||
3061 | if (!sock_flag(sk, SOCK_DEAD)) | |
3062 | sk->sk_state_change(sk); | |
3063 | else | |
3064 | inet_csk_destroy_sock(sk); | |
3065 | } | |
3066 | EXPORT_SYMBOL_GPL(tcp_done); | |
3067 | ||
3068 | extern struct tcp_congestion_ops tcp_reno; | |
3069 | ||
3070 | static __initdata unsigned long thash_entries; | |
3071 | static int __init set_thash_entries(char *str) | |
3072 | { | |
3073 | ssize_t ret; | |
3074 | ||
3075 | if (!str) | |
3076 | return 0; | |
3077 | ||
3078 | ret = kstrtoul(str, 0, &thash_entries); | |
3079 | if (ret) | |
3080 | return 0; | |
3081 | ||
3082 | return 1; | |
3083 | } | |
3084 | __setup("thash_entries=", set_thash_entries); | |
3085 | ||
3086 | static void __init tcp_init_mem(void) | |
3087 | { | |
3088 | unsigned long limit = nr_free_buffer_pages() / 16; | |
3089 | ||
3090 | limit = max(limit, 128UL); | |
3091 | sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ | |
3092 | sysctl_tcp_mem[1] = limit; /* 6.25 % */ | |
3093 | sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ | |
3094 | } | |
3095 | ||
3096 | void __init tcp_init(void) | |
3097 | { | |
3098 | unsigned long limit; | |
3099 | int max_rshare, max_wshare, cnt; | |
3100 | unsigned int i; | |
3101 | ||
3102 | sock_skb_cb_check_size(sizeof(struct tcp_skb_cb)); | |
3103 | ||
3104 | percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); | |
3105 | percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); | |
3106 | tcp_hashinfo.bind_bucket_cachep = | |
3107 | kmem_cache_create("tcp_bind_bucket", | |
3108 | sizeof(struct inet_bind_bucket), 0, | |
3109 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | |
3110 | ||
3111 | /* Size and allocate the main established and bind bucket | |
3112 | * hash tables. | |
3113 | * | |
3114 | * The methodology is similar to that of the buffer cache. | |
3115 | */ | |
3116 | tcp_hashinfo.ehash = | |
3117 | alloc_large_system_hash("TCP established", | |
3118 | sizeof(struct inet_ehash_bucket), | |
3119 | thash_entries, | |
3120 | 17, /* one slot per 128 KB of memory */ | |
3121 | 0, | |
3122 | NULL, | |
3123 | &tcp_hashinfo.ehash_mask, | |
3124 | 0, | |
3125 | thash_entries ? 0 : 512 * 1024); | |
3126 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) | |
3127 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); | |
3128 | ||
3129 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) | |
3130 | panic("TCP: failed to alloc ehash_locks"); | |
3131 | tcp_hashinfo.bhash = | |
3132 | alloc_large_system_hash("TCP bind", | |
3133 | sizeof(struct inet_bind_hashbucket), | |
3134 | tcp_hashinfo.ehash_mask + 1, | |
3135 | 17, /* one slot per 128 KB of memory */ | |
3136 | 0, | |
3137 | &tcp_hashinfo.bhash_size, | |
3138 | NULL, | |
3139 | 0, | |
3140 | 64 * 1024); | |
3141 | tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; | |
3142 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { | |
3143 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); | |
3144 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); | |
3145 | } | |
3146 | ||
3147 | ||
3148 | cnt = tcp_hashinfo.ehash_mask + 1; | |
3149 | ||
3150 | tcp_death_row.sysctl_max_tw_buckets = cnt / 2; | |
3151 | sysctl_tcp_max_orphans = cnt / 2; | |
3152 | sysctl_max_syn_backlog = max(128, cnt / 256); | |
3153 | ||
3154 | tcp_init_mem(); | |
3155 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ | |
3156 | limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); | |
3157 | max_wshare = min(4UL*1024*1024, limit); | |
3158 | max_rshare = min(6UL*1024*1024, limit); | |
3159 | ||
3160 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; | |
3161 | sysctl_tcp_wmem[1] = 16*1024; | |
3162 | sysctl_tcp_wmem[2] = max(64*1024, max_wshare); | |
3163 | ||
3164 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; | |
3165 | sysctl_tcp_rmem[1] = 87380; | |
3166 | sysctl_tcp_rmem[2] = max(87380, max_rshare); | |
3167 | ||
3168 | pr_info("Hash tables configured (established %u bind %u)\n", | |
3169 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); | |
3170 | ||
3171 | tcp_metrics_init(); | |
3172 | BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); | |
3173 | tcp_tasklet_init(); | |
3174 | } |