2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
55 * Alan Cox : Tidied tcp_data to avoid a potential
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
213 * Description of States:
215 * TCP_SYN_SENT sent a connection request, waiting for ack
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
220 * TCP_ESTABLISHED connection established
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
245 * TCP_CLOSE socket is finished
248 #define pr_fmt(fmt) "TCP: " fmt
250 #include <crypto/hash.h>
251 #include <linux/kernel.h>
252 #include <linux/module.h>
253 #include <linux/types.h>
254 #include <linux/fcntl.h>
255 #include <linux/poll.h>
256 #include <linux/inet_diag.h>
257 #include <linux/init.h>
258 #include <linux/fs.h>
259 #include <linux/skbuff.h>
260 #include <linux/scatterlist.h>
261 #include <linux/splice.h>
262 #include <linux/net.h>
263 #include <linux/socket.h>
264 #include <linux/random.h>
265 #include <linux/memblock.h>
266 #include <linux/highmem.h>
267 #include <linux/swap.h>
268 #include <linux/cache.h>
269 #include <linux/err.h>
270 #include <linux/time.h>
271 #include <linux/slab.h>
272 #include <linux/errqueue.h>
273 #include <linux/static_key.h>
275 #include <net/icmp.h>
276 #include <net/inet_common.h>
278 #include <net/xfrm.h>
280 #include <net/sock.h>
282 #include <linux/uaccess.h>
283 #include <asm/ioctls.h>
284 #include <net/busy_poll.h>
286 struct percpu_counter tcp_orphan_count
;
287 EXPORT_SYMBOL_GPL(tcp_orphan_count
);
289 long sysctl_tcp_mem
[3] __read_mostly
;
290 EXPORT_SYMBOL(sysctl_tcp_mem
);
292 atomic_long_t tcp_memory_allocated
; /* Current allocated memory. */
293 EXPORT_SYMBOL(tcp_memory_allocated
);
295 #if IS_ENABLED(CONFIG_SMC)
296 DEFINE_STATIC_KEY_FALSE(tcp_have_smc
);
297 EXPORT_SYMBOL(tcp_have_smc
);
301 * Current number of TCP sockets.
303 struct percpu_counter tcp_sockets_allocated
;
304 EXPORT_SYMBOL(tcp_sockets_allocated
);
309 struct tcp_splice_state
{
310 struct pipe_inode_info
*pipe
;
316 * Pressure flag: try to collapse.
317 * Technical note: it is used by multiple contexts non atomically.
318 * All the __sk_mem_schedule() is of this nature: accounting
319 * is strict, actions are advisory and have some latency.
321 unsigned long tcp_memory_pressure __read_mostly
;
322 EXPORT_SYMBOL_GPL(tcp_memory_pressure
);
324 void tcp_enter_memory_pressure(struct sock
*sk
)
328 if (tcp_memory_pressure
)
334 if (!cmpxchg(&tcp_memory_pressure
, 0, val
))
335 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURES
);
337 EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure
);
339 void tcp_leave_memory_pressure(struct sock
*sk
)
343 if (!tcp_memory_pressure
)
345 val
= xchg(&tcp_memory_pressure
, 0);
347 NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPMEMORYPRESSURESCHRONO
,
348 jiffies_to_msecs(jiffies
- val
));
350 EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure
);
352 /* Convert seconds to retransmits based on initial and max timeout */
353 static u8
secs_to_retrans(int seconds
, int timeout
, int rto_max
)
358 int period
= timeout
;
361 while (seconds
> period
&& res
< 255) {
364 if (timeout
> rto_max
)
372 /* Convert retransmits to seconds based on initial and max timeout */
373 static int retrans_to_secs(u8 retrans
, int timeout
, int rto_max
)
381 if (timeout
> rto_max
)
389 static u64
tcp_compute_delivery_rate(const struct tcp_sock
*tp
)
391 u32 rate
= READ_ONCE(tp
->rate_delivered
);
392 u32 intv
= READ_ONCE(tp
->rate_interval_us
);
396 rate64
= (u64
)rate
* tp
->mss_cache
* USEC_PER_SEC
;
397 do_div(rate64
, intv
);
402 /* Address-family independent initialization for a tcp_sock.
404 * NOTE: A lot of things set to zero explicitly by call to
405 * sk_alloc() so need not be done here.
407 void tcp_init_sock(struct sock
*sk
)
409 struct inet_connection_sock
*icsk
= inet_csk(sk
);
410 struct tcp_sock
*tp
= tcp_sk(sk
);
412 tp
->out_of_order_queue
= RB_ROOT
;
413 sk
->tcp_rtx_queue
= RB_ROOT
;
414 tcp_init_xmit_timers(sk
);
415 INIT_LIST_HEAD(&tp
->tsq_node
);
416 INIT_LIST_HEAD(&tp
->tsorted_sent_queue
);
418 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
419 tp
->mdev_us
= jiffies_to_usecs(TCP_TIMEOUT_INIT
);
420 minmax_reset(&tp
->rtt_min
, tcp_jiffies32
, ~0U);
422 /* So many TCP implementations out there (incorrectly) count the
423 * initial SYN frame in their delayed-ACK and congestion control
424 * algorithms that we must have the following bandaid to talk
425 * efficiently to them. -DaveM
427 tp
->snd_cwnd
= TCP_INIT_CWND
;
429 /* There's a bubble in the pipe until at least the first ACK. */
430 tp
->app_limited
= ~0U;
432 /* See draft-stevens-tcpca-spec-01 for discussion of the
433 * initialization of these values.
435 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
436 tp
->snd_cwnd_clamp
= ~0;
437 tp
->mss_cache
= TCP_MSS_DEFAULT
;
439 tp
->reordering
= sock_net(sk
)->ipv4
.sysctl_tcp_reordering
;
440 tcp_assign_congestion_control(sk
);
443 tp
->rack
.reo_wnd_steps
= 1;
445 sk
->sk_state
= TCP_CLOSE
;
447 sk
->sk_write_space
= sk_stream_write_space
;
448 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
450 icsk
->icsk_sync_mss
= tcp_sync_mss
;
452 sk
->sk_sndbuf
= sock_net(sk
)->ipv4
.sysctl_tcp_wmem
[1];
453 sk
->sk_rcvbuf
= sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[1];
455 sk_sockets_allocated_inc(sk
);
456 sk
->sk_route_forced_caps
= NETIF_F_GSO
;
458 EXPORT_SYMBOL(tcp_init_sock
);
460 void tcp_init_transfer(struct sock
*sk
, int bpf_op
)
462 struct inet_connection_sock
*icsk
= inet_csk(sk
);
465 icsk
->icsk_af_ops
->rebuild_header(sk
);
466 tcp_init_metrics(sk
);
467 tcp_call_bpf(sk
, bpf_op
, 0, NULL
);
468 tcp_init_congestion_control(sk
);
469 tcp_init_buffer_space(sk
);
472 static void tcp_tx_timestamp(struct sock
*sk
, u16 tsflags
)
474 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
476 if (tsflags
&& skb
) {
477 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
478 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
480 sock_tx_timestamp(sk
, tsflags
, &shinfo
->tx_flags
);
481 if (tsflags
& SOF_TIMESTAMPING_TX_ACK
)
482 tcb
->txstamp_ack
= 1;
483 if (tsflags
& SOF_TIMESTAMPING_TX_RECORD_MASK
)
484 shinfo
->tskey
= TCP_SKB_CB(skb
)->seq
+ skb
->len
- 1;
488 static inline bool tcp_stream_is_readable(const struct tcp_sock
*tp
,
489 int target
, struct sock
*sk
)
491 return (tp
->rcv_nxt
- tp
->copied_seq
>= target
) ||
492 (sk
->sk_prot
->stream_memory_read
?
493 sk
->sk_prot
->stream_memory_read(sk
) : false);
497 * Wait for a TCP event.
499 * Note that we don't need to lock the socket, as the upper poll layers
500 * take care of normal races (between the test and the event) and we don't
501 * go look at any of the socket buffers directly.
503 __poll_t
tcp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
506 struct sock
*sk
= sock
->sk
;
507 const struct tcp_sock
*tp
= tcp_sk(sk
);
510 sock_poll_wait(file
, sock
, wait
);
512 state
= inet_sk_state_load(sk
);
513 if (state
== TCP_LISTEN
)
514 return inet_csk_listen_poll(sk
);
516 /* Socket is not locked. We are protected from async events
517 * by poll logic and correct handling of state changes
518 * made by other threads is impossible in any case.
524 * EPOLLHUP is certainly not done right. But poll() doesn't
525 * have a notion of HUP in just one direction, and for a
526 * socket the read side is more interesting.
528 * Some poll() documentation says that EPOLLHUP is incompatible
529 * with the EPOLLOUT/POLLWR flags, so somebody should check this
530 * all. But careful, it tends to be safer to return too many
531 * bits than too few, and you can easily break real applications
532 * if you don't tell them that something has hung up!
536 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
537 * our fs/select.c). It means that after we received EOF,
538 * poll always returns immediately, making impossible poll() on write()
539 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
540 * if and only if shutdown has been made in both directions.
541 * Actually, it is interesting to look how Solaris and DUX
542 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
543 * then we could set it on SND_SHUTDOWN. BTW examples given
544 * in Stevens' books assume exactly this behaviour, it explains
545 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
547 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
548 * blocking on fresh not-connected or disconnected socket. --ANK
550 if (sk
->sk_shutdown
== SHUTDOWN_MASK
|| state
== TCP_CLOSE
)
552 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
553 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
555 /* Connected or passive Fast Open socket? */
556 if (state
!= TCP_SYN_SENT
&&
557 (state
!= TCP_SYN_RECV
|| tp
->fastopen_rsk
)) {
558 int target
= sock_rcvlowat(sk
, 0, INT_MAX
);
560 if (tp
->urg_seq
== tp
->copied_seq
&&
561 !sock_flag(sk
, SOCK_URGINLINE
) &&
565 if (tcp_stream_is_readable(tp
, target
, sk
))
566 mask
|= EPOLLIN
| EPOLLRDNORM
;
568 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
569 if (sk_stream_is_writeable(sk
)) {
570 mask
|= EPOLLOUT
| EPOLLWRNORM
;
571 } else { /* send SIGIO later */
572 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
573 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
575 /* Race breaker. If space is freed after
576 * wspace test but before the flags are set,
577 * IO signal will be lost. Memory barrier
578 * pairs with the input side.
580 smp_mb__after_atomic();
581 if (sk_stream_is_writeable(sk
))
582 mask
|= EPOLLOUT
| EPOLLWRNORM
;
585 mask
|= EPOLLOUT
| EPOLLWRNORM
;
587 if (tp
->urg_data
& TCP_URG_VALID
)
589 } else if (state
== TCP_SYN_SENT
&& inet_sk(sk
)->defer_connect
) {
590 /* Active TCP fastopen socket with defer_connect
591 * Return EPOLLOUT so application can call write()
592 * in order for kernel to generate SYN+data
594 mask
|= EPOLLOUT
| EPOLLWRNORM
;
596 /* This barrier is coupled with smp_wmb() in tcp_reset() */
598 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
603 EXPORT_SYMBOL(tcp_poll
);
605 int tcp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
607 struct tcp_sock
*tp
= tcp_sk(sk
);
613 if (sk
->sk_state
== TCP_LISTEN
)
616 slow
= lock_sock_fast(sk
);
618 unlock_sock_fast(sk
, slow
);
621 answ
= tp
->urg_data
&& tp
->urg_seq
== tp
->copied_seq
;
624 if (sk
->sk_state
== TCP_LISTEN
)
627 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
630 answ
= tp
->write_seq
- tp
->snd_una
;
633 if (sk
->sk_state
== TCP_LISTEN
)
636 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
639 answ
= tp
->write_seq
- tp
->snd_nxt
;
645 return put_user(answ
, (int __user
*)arg
);
647 EXPORT_SYMBOL(tcp_ioctl
);
649 static inline void tcp_mark_push(struct tcp_sock
*tp
, struct sk_buff
*skb
)
651 TCP_SKB_CB(skb
)->tcp_flags
|= TCPHDR_PSH
;
652 tp
->pushed_seq
= tp
->write_seq
;
655 static inline bool forced_push(const struct tcp_sock
*tp
)
657 return after(tp
->write_seq
, tp
->pushed_seq
+ (tp
->max_window
>> 1));
660 static void skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
662 struct tcp_sock
*tp
= tcp_sk(sk
);
663 struct tcp_skb_cb
*tcb
= TCP_SKB_CB(skb
);
666 tcb
->seq
= tcb
->end_seq
= tp
->write_seq
;
667 tcb
->tcp_flags
= TCPHDR_ACK
;
669 __skb_header_release(skb
);
670 tcp_add_write_queue_tail(sk
, skb
);
671 sk
->sk_wmem_queued
+= skb
->truesize
;
672 sk_mem_charge(sk
, skb
->truesize
);
673 if (tp
->nonagle
& TCP_NAGLE_PUSH
)
674 tp
->nonagle
&= ~TCP_NAGLE_PUSH
;
676 tcp_slow_start_after_idle_check(sk
);
679 static inline void tcp_mark_urg(struct tcp_sock
*tp
, int flags
)
682 tp
->snd_up
= tp
->write_seq
;
685 /* If a not yet filled skb is pushed, do not send it if
686 * we have data packets in Qdisc or NIC queues :
687 * Because TX completion will happen shortly, it gives a chance
688 * to coalesce future sendmsg() payload into this skb, without
689 * need for a timer, and with no latency trade off.
690 * As packets containing data payload have a bigger truesize
691 * than pure acks (dataless) packets, the last checks prevent
692 * autocorking if we only have an ACK in Qdisc/NIC queues,
693 * or if TX completion was delayed after we processed ACK packet.
695 static bool tcp_should_autocork(struct sock
*sk
, struct sk_buff
*skb
,
698 return skb
->len
< size_goal
&&
699 sock_net(sk
)->ipv4
.sysctl_tcp_autocorking
&&
700 !tcp_rtx_queue_empty(sk
) &&
701 refcount_read(&sk
->sk_wmem_alloc
) > skb
->truesize
;
704 static void tcp_push(struct sock
*sk
, int flags
, int mss_now
,
705 int nonagle
, int size_goal
)
707 struct tcp_sock
*tp
= tcp_sk(sk
);
710 skb
= tcp_write_queue_tail(sk
);
713 if (!(flags
& MSG_MORE
) || forced_push(tp
))
714 tcp_mark_push(tp
, skb
);
716 tcp_mark_urg(tp
, flags
);
718 if (tcp_should_autocork(sk
, skb
, size_goal
)) {
720 /* avoid atomic op if TSQ_THROTTLED bit is already set */
721 if (!test_bit(TSQ_THROTTLED
, &sk
->sk_tsq_flags
)) {
722 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPAUTOCORKING
);
723 set_bit(TSQ_THROTTLED
, &sk
->sk_tsq_flags
);
725 /* It is possible TX completion already happened
726 * before we set TSQ_THROTTLED.
728 if (refcount_read(&sk
->sk_wmem_alloc
) > skb
->truesize
)
732 if (flags
& MSG_MORE
)
733 nonagle
= TCP_NAGLE_CORK
;
735 __tcp_push_pending_frames(sk
, mss_now
, nonagle
);
738 static int tcp_splice_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
739 unsigned int offset
, size_t len
)
741 struct tcp_splice_state
*tss
= rd_desc
->arg
.data
;
744 ret
= skb_splice_bits(skb
, skb
->sk
, offset
, tss
->pipe
,
745 min(rd_desc
->count
, len
), tss
->flags
);
747 rd_desc
->count
-= ret
;
751 static int __tcp_splice_read(struct sock
*sk
, struct tcp_splice_state
*tss
)
753 /* Store TCP splice context information in read_descriptor_t. */
754 read_descriptor_t rd_desc
= {
759 return tcp_read_sock(sk
, &rd_desc
, tcp_splice_data_recv
);
763 * tcp_splice_read - splice data from TCP socket to a pipe
764 * @sock: socket to splice from
765 * @ppos: position (not valid)
766 * @pipe: pipe to splice to
767 * @len: number of bytes to splice
768 * @flags: splice modifier flags
771 * Will read pages from given socket and fill them into a pipe.
774 ssize_t
tcp_splice_read(struct socket
*sock
, loff_t
*ppos
,
775 struct pipe_inode_info
*pipe
, size_t len
,
778 struct sock
*sk
= sock
->sk
;
779 struct tcp_splice_state tss
= {
788 sock_rps_record_flow(sk
);
790 * We can't seek on a socket input
799 timeo
= sock_rcvtimeo(sk
, sock
->file
->f_flags
& O_NONBLOCK
);
801 ret
= __tcp_splice_read(sk
, &tss
);
807 if (sock_flag(sk
, SOCK_DONE
))
810 ret
= sock_error(sk
);
813 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
815 if (sk
->sk_state
== TCP_CLOSE
) {
817 * This occurs when user tries to read
818 * from never connected socket.
827 /* if __tcp_splice_read() got nothing while we have
828 * an skb in receive queue, we do not want to loop.
829 * This might happen with URG data.
831 if (!skb_queue_empty(&sk
->sk_receive_queue
))
833 sk_wait_data(sk
, &timeo
, NULL
);
834 if (signal_pending(current
)) {
835 ret
= sock_intr_errno(timeo
);
848 if (sk
->sk_err
|| sk
->sk_state
== TCP_CLOSE
||
849 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
850 signal_pending(current
))
861 EXPORT_SYMBOL(tcp_splice_read
);
863 struct sk_buff
*sk_stream_alloc_skb(struct sock
*sk
, int size
, gfp_t gfp
,
868 /* The TCP header must be at least 32-bit aligned. */
869 size
= ALIGN(size
, 4);
871 if (unlikely(tcp_under_memory_pressure(sk
)))
872 sk_mem_reclaim_partial(sk
);
874 skb
= alloc_skb_fclone(size
+ sk
->sk_prot
->max_header
, gfp
);
878 if (force_schedule
) {
879 mem_scheduled
= true;
880 sk_forced_mem_schedule(sk
, skb
->truesize
);
882 mem_scheduled
= sk_wmem_schedule(sk
, skb
->truesize
);
884 if (likely(mem_scheduled
)) {
885 skb_reserve(skb
, sk
->sk_prot
->max_header
);
887 * Make sure that we have exactly size bytes
888 * available to the caller, no more, no less.
890 skb
->reserved_tailroom
= skb
->end
- skb
->tail
- size
;
891 INIT_LIST_HEAD(&skb
->tcp_tsorted_anchor
);
896 sk
->sk_prot
->enter_memory_pressure(sk
);
897 sk_stream_moderate_sndbuf(sk
);
902 static unsigned int tcp_xmit_size_goal(struct sock
*sk
, u32 mss_now
,
905 struct tcp_sock
*tp
= tcp_sk(sk
);
906 u32 new_size_goal
, size_goal
;
911 /* Note : tcp_tso_autosize() will eventually split this later */
912 new_size_goal
= sk
->sk_gso_max_size
- 1 - MAX_TCP_HEADER
;
913 new_size_goal
= tcp_bound_to_half_wnd(tp
, new_size_goal
);
915 /* We try hard to avoid divides here */
916 size_goal
= tp
->gso_segs
* mss_now
;
917 if (unlikely(new_size_goal
< size_goal
||
918 new_size_goal
>= size_goal
+ mss_now
)) {
919 tp
->gso_segs
= min_t(u16
, new_size_goal
/ mss_now
,
920 sk
->sk_gso_max_segs
);
921 size_goal
= tp
->gso_segs
* mss_now
;
924 return max(size_goal
, mss_now
);
927 static int tcp_send_mss(struct sock
*sk
, int *size_goal
, int flags
)
931 mss_now
= tcp_current_mss(sk
);
932 *size_goal
= tcp_xmit_size_goal(sk
, mss_now
, !(flags
& MSG_OOB
));
937 ssize_t
do_tcp_sendpages(struct sock
*sk
, struct page
*page
, int offset
,
938 size_t size
, int flags
)
940 struct tcp_sock
*tp
= tcp_sk(sk
);
941 int mss_now
, size_goal
;
944 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
946 /* Wait for a connection to finish. One exception is TCP Fast Open
947 * (passive side) where data is allowed to be sent before a connection
948 * is fully established.
950 if (((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) &&
951 !tcp_passive_fastopen(sk
)) {
952 err
= sk_stream_wait_connect(sk
, &timeo
);
957 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
959 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
963 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
967 struct sk_buff
*skb
= tcp_write_queue_tail(sk
);
971 if (!skb
|| (copy
= size_goal
- skb
->len
) <= 0 ||
972 !tcp_skb_can_collapse_to(skb
)) {
974 if (!sk_stream_memory_free(sk
))
975 goto wait_for_sndbuf
;
977 skb
= sk_stream_alloc_skb(sk
, 0, sk
->sk_allocation
,
978 tcp_rtx_and_write_queues_empty(sk
));
980 goto wait_for_memory
;
989 i
= skb_shinfo(skb
)->nr_frags
;
990 can_coalesce
= skb_can_coalesce(skb
, i
, page
, offset
);
991 if (!can_coalesce
&& i
>= sysctl_max_skb_frags
) {
992 tcp_mark_push(tp
, skb
);
995 if (!sk_wmem_schedule(sk
, copy
))
996 goto wait_for_memory
;
999 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1002 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
1005 if (!(flags
& MSG_NO_SHARED_FRAGS
))
1006 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
1009 skb
->data_len
+= copy
;
1010 skb
->truesize
+= copy
;
1011 sk
->sk_wmem_queued
+= copy
;
1012 sk_mem_charge(sk
, copy
);
1013 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1014 tp
->write_seq
+= copy
;
1015 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1016 tcp_skb_pcount_set(skb
, 0);
1019 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1027 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
))
1030 if (forced_push(tp
)) {
1031 tcp_mark_push(tp
, skb
);
1032 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
1033 } else if (skb
== tcp_send_head(sk
))
1034 tcp_push_one(sk
, mss_now
);
1038 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1040 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
,
1041 TCP_NAGLE_PUSH
, size_goal
);
1043 err
= sk_stream_wait_memory(sk
, &timeo
);
1047 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1052 tcp_tx_timestamp(sk
, sk
->sk_tsflags
);
1053 if (!(flags
& MSG_SENDPAGE_NOTLAST
))
1054 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
, size_goal
);
1062 /* make sure we wake any epoll edge trigger waiter */
1063 if (unlikely(skb_queue_len(&sk
->sk_write_queue
) == 0 &&
1065 sk
->sk_write_space(sk
);
1066 tcp_chrono_stop(sk
, TCP_CHRONO_SNDBUF_LIMITED
);
1068 return sk_stream_error(sk
, flags
, err
);
1070 EXPORT_SYMBOL_GPL(do_tcp_sendpages
);
1072 int tcp_sendpage_locked(struct sock
*sk
, struct page
*page
, int offset
,
1073 size_t size
, int flags
)
1075 if (!(sk
->sk_route_caps
& NETIF_F_SG
))
1076 return sock_no_sendpage_locked(sk
, page
, offset
, size
, flags
);
1078 tcp_rate_check_app_limited(sk
); /* is sending application-limited? */
1080 return do_tcp_sendpages(sk
, page
, offset
, size
, flags
);
1082 EXPORT_SYMBOL_GPL(tcp_sendpage_locked
);
1084 int tcp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1085 size_t size
, int flags
)
1090 ret
= tcp_sendpage_locked(sk
, page
, offset
, size
, flags
);
1095 EXPORT_SYMBOL(tcp_sendpage
);
1097 /* Do not bother using a page frag for very small frames.
1098 * But use this heuristic only for the first skb in write queue.
1100 * Having no payload in skb->head allows better SACK shifting
1101 * in tcp_shift_skb_data(), reducing sack/rack overhead, because
1102 * write queue has less skbs.
1103 * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
1104 * This also speeds up tso_fragment(), since it wont fallback
1105 * to tcp_fragment().
1107 static int linear_payload_sz(bool first_skb
)
1110 return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER
);
1114 static int select_size(bool first_skb
, bool zc
)
1118 return linear_payload_sz(first_skb
);
1121 void tcp_free_fastopen_req(struct tcp_sock
*tp
)
1123 if (tp
->fastopen_req
) {
1124 kfree(tp
->fastopen_req
);
1125 tp
->fastopen_req
= NULL
;
1129 static int tcp_sendmsg_fastopen(struct sock
*sk
, struct msghdr
*msg
,
1130 int *copied
, size_t size
,
1131 struct ubuf_info
*uarg
)
1133 struct tcp_sock
*tp
= tcp_sk(sk
);
1134 struct inet_sock
*inet
= inet_sk(sk
);
1135 struct sockaddr
*uaddr
= msg
->msg_name
;
1138 if (!(sock_net(sk
)->ipv4
.sysctl_tcp_fastopen
& TFO_CLIENT_ENABLE
) ||
1139 (uaddr
&& msg
->msg_namelen
>= sizeof(uaddr
->sa_family
) &&
1140 uaddr
->sa_family
== AF_UNSPEC
))
1142 if (tp
->fastopen_req
)
1143 return -EALREADY
; /* Another Fast Open is in progress */
1145 tp
->fastopen_req
= kzalloc(sizeof(struct tcp_fastopen_request
),
1147 if (unlikely(!tp
->fastopen_req
))
1149 tp
->fastopen_req
->data
= msg
;
1150 tp
->fastopen_req
->size
= size
;
1151 tp
->fastopen_req
->uarg
= uarg
;
1153 if (inet
->defer_connect
) {
1154 err
= tcp_connect(sk
);
1155 /* Same failure procedure as in tcp_v4/6_connect */
1157 tcp_set_state(sk
, TCP_CLOSE
);
1158 inet
->inet_dport
= 0;
1159 sk
->sk_route_caps
= 0;
1162 flags
= (msg
->msg_flags
& MSG_DONTWAIT
) ? O_NONBLOCK
: 0;
1163 err
= __inet_stream_connect(sk
->sk_socket
, uaddr
,
1164 msg
->msg_namelen
, flags
, 1);
1165 /* fastopen_req could already be freed in __inet_stream_connect
1166 * if the connection times out or gets rst
1168 if (tp
->fastopen_req
) {
1169 *copied
= tp
->fastopen_req
->copied
;
1170 tcp_free_fastopen_req(tp
);
1171 inet
->defer_connect
= 0;
1176 int tcp_sendmsg_locked(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
1178 struct tcp_sock
*tp
= tcp_sk(sk
);
1179 struct ubuf_info
*uarg
= NULL
;
1180 struct sk_buff
*skb
;
1181 struct sockcm_cookie sockc
;
1182 int flags
, err
, copied
= 0;
1183 int mss_now
= 0, size_goal
, copied_syn
= 0;
1184 bool process_backlog
= false;
1188 flags
= msg
->msg_flags
;
1190 if (flags
& MSG_ZEROCOPY
&& size
&& sock_flag(sk
, SOCK_ZEROCOPY
)) {
1191 skb
= tcp_write_queue_tail(sk
);
1192 uarg
= sock_zerocopy_realloc(sk
, size
, skb_zcopy(skb
));
1198 zc
= sk
->sk_route_caps
& NETIF_F_SG
;
1203 if (unlikely(flags
& MSG_FASTOPEN
|| inet_sk(sk
)->defer_connect
) &&
1205 err
= tcp_sendmsg_fastopen(sk
, msg
, &copied_syn
, size
, uarg
);
1206 if (err
== -EINPROGRESS
&& copied_syn
> 0)
1212 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1214 tcp_rate_check_app_limited(sk
); /* is sending application-limited? */
1216 /* Wait for a connection to finish. One exception is TCP Fast Open
1217 * (passive side) where data is allowed to be sent before a connection
1218 * is fully established.
1220 if (((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) &&
1221 !tcp_passive_fastopen(sk
)) {
1222 err
= sk_stream_wait_connect(sk
, &timeo
);
1227 if (unlikely(tp
->repair
)) {
1228 if (tp
->repair_queue
== TCP_RECV_QUEUE
) {
1229 copied
= tcp_send_rcvq(sk
, msg
, size
);
1234 if (tp
->repair_queue
== TCP_NO_QUEUE
)
1237 /* 'common' sending to sendq */
1240 sockcm_init(&sockc
, sk
);
1241 if (msg
->msg_controllen
) {
1242 err
= sock_cmsg_send(sk
, msg
, &sockc
);
1243 if (unlikely(err
)) {
1249 /* This should be in poll */
1250 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1252 /* Ok commence sending. */
1256 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1259 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
1262 while (msg_data_left(msg
)) {
1265 skb
= tcp_write_queue_tail(sk
);
1267 copy
= size_goal
- skb
->len
;
1269 if (copy
<= 0 || !tcp_skb_can_collapse_to(skb
)) {
1274 if (!sk_stream_memory_free(sk
))
1275 goto wait_for_sndbuf
;
1277 if (process_backlog
&& sk_flush_backlog(sk
)) {
1278 process_backlog
= false;
1281 first_skb
= tcp_rtx_and_write_queues_empty(sk
);
1282 linear
= select_size(first_skb
, zc
);
1283 skb
= sk_stream_alloc_skb(sk
, linear
, sk
->sk_allocation
,
1286 goto wait_for_memory
;
1288 process_backlog
= true;
1289 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1291 skb_entail(sk
, skb
);
1294 /* All packets are restored as if they have
1295 * already been sent. skb_mstamp_ns isn't set to
1296 * avoid wrong rtt estimation.
1299 TCP_SKB_CB(skb
)->sacked
|= TCPCB_REPAIRED
;
1302 /* Try to append data to the end of skb. */
1303 if (copy
> msg_data_left(msg
))
1304 copy
= msg_data_left(msg
);
1306 /* Where to copy to? */
1307 if (skb_availroom(skb
) > 0 && !zc
) {
1308 /* We have some space in skb head. Superb! */
1309 copy
= min_t(int, copy
, skb_availroom(skb
));
1310 err
= skb_add_data_nocache(sk
, skb
, &msg
->msg_iter
, copy
);
1315 int i
= skb_shinfo(skb
)->nr_frags
;
1316 struct page_frag
*pfrag
= sk_page_frag(sk
);
1318 if (!sk_page_frag_refill(sk
, pfrag
))
1319 goto wait_for_memory
;
1321 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1323 if (i
>= sysctl_max_skb_frags
) {
1324 tcp_mark_push(tp
, skb
);
1330 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1332 if (!sk_wmem_schedule(sk
, copy
))
1333 goto wait_for_memory
;
1335 err
= skb_copy_to_page_nocache(sk
, &msg
->msg_iter
, skb
,
1342 /* Update the skb. */
1344 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1346 skb_fill_page_desc(skb
, i
, pfrag
->page
,
1347 pfrag
->offset
, copy
);
1348 page_ref_inc(pfrag
->page
);
1350 pfrag
->offset
+= copy
;
1352 err
= skb_zerocopy_iter_stream(sk
, skb
, msg
, copy
, uarg
);
1353 if (err
== -EMSGSIZE
|| err
== -EEXIST
) {
1354 tcp_mark_push(tp
, skb
);
1363 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1365 tp
->write_seq
+= copy
;
1366 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1367 tcp_skb_pcount_set(skb
, 0);
1370 if (!msg_data_left(msg
)) {
1371 if (unlikely(flags
& MSG_EOR
))
1372 TCP_SKB_CB(skb
)->eor
= 1;
1376 if (skb
->len
< size_goal
|| (flags
& MSG_OOB
) || unlikely(tp
->repair
))
1379 if (forced_push(tp
)) {
1380 tcp_mark_push(tp
, skb
);
1381 __tcp_push_pending_frames(sk
, mss_now
, TCP_NAGLE_PUSH
);
1382 } else if (skb
== tcp_send_head(sk
))
1383 tcp_push_one(sk
, mss_now
);
1387 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1390 tcp_push(sk
, flags
& ~MSG_MORE
, mss_now
,
1391 TCP_NAGLE_PUSH
, size_goal
);
1393 err
= sk_stream_wait_memory(sk
, &timeo
);
1397 mss_now
= tcp_send_mss(sk
, &size_goal
, flags
);
1402 tcp_tx_timestamp(sk
, sockc
.tsflags
);
1403 tcp_push(sk
, flags
, mss_now
, tp
->nonagle
, size_goal
);
1406 sock_zerocopy_put(uarg
);
1407 return copied
+ copied_syn
;
1411 tcp_unlink_write_queue(skb
, sk
);
1412 /* It is the one place in all of TCP, except connection
1413 * reset, where we can be unlinking the send_head.
1415 if (tcp_write_queue_empty(sk
))
1416 tcp_chrono_stop(sk
, TCP_CHRONO_BUSY
);
1417 sk_wmem_free_skb(sk
, skb
);
1421 if (copied
+ copied_syn
)
1424 sock_zerocopy_put_abort(uarg
, true);
1425 err
= sk_stream_error(sk
, flags
, err
);
1426 /* make sure we wake any epoll edge trigger waiter */
1427 if (unlikely(skb_queue_len(&sk
->sk_write_queue
) == 0 &&
1429 sk
->sk_write_space(sk
);
1430 tcp_chrono_stop(sk
, TCP_CHRONO_SNDBUF_LIMITED
);
1434 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked
);
1436 int tcp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
1441 ret
= tcp_sendmsg_locked(sk
, msg
, size
);
1446 EXPORT_SYMBOL(tcp_sendmsg
);
1449 * Handle reading urgent data. BSD has very simple semantics for
1450 * this, no blocking and very strange errors 8)
1453 static int tcp_recv_urg(struct sock
*sk
, struct msghdr
*msg
, int len
, int flags
)
1455 struct tcp_sock
*tp
= tcp_sk(sk
);
1457 /* No URG data to read. */
1458 if (sock_flag(sk
, SOCK_URGINLINE
) || !tp
->urg_data
||
1459 tp
->urg_data
== TCP_URG_READ
)
1460 return -EINVAL
; /* Yes this is right ! */
1462 if (sk
->sk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DONE
))
1465 if (tp
->urg_data
& TCP_URG_VALID
) {
1467 char c
= tp
->urg_data
;
1469 if (!(flags
& MSG_PEEK
))
1470 tp
->urg_data
= TCP_URG_READ
;
1472 /* Read urgent data. */
1473 msg
->msg_flags
|= MSG_OOB
;
1476 if (!(flags
& MSG_TRUNC
))
1477 err
= memcpy_to_msg(msg
, &c
, 1);
1480 msg
->msg_flags
|= MSG_TRUNC
;
1482 return err
? -EFAULT
: len
;
1485 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1488 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1489 * the available implementations agree in this case:
1490 * this call should never block, independent of the
1491 * blocking state of the socket.
1492 * Mike <pall@rz.uni-karlsruhe.de>
1497 static int tcp_peek_sndq(struct sock
*sk
, struct msghdr
*msg
, int len
)
1499 struct sk_buff
*skb
;
1500 int copied
= 0, err
= 0;
1502 /* XXX -- need to support SO_PEEK_OFF */
1504 skb_rbtree_walk(skb
, &sk
->tcp_rtx_queue
) {
1505 err
= skb_copy_datagram_msg(skb
, 0, msg
, skb
->len
);
1511 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
1512 err
= skb_copy_datagram_msg(skb
, 0, msg
, skb
->len
);
1519 return err
?: copied
;
1522 /* Clean up the receive buffer for full frames taken by the user,
1523 * then send an ACK if necessary. COPIED is the number of bytes
1524 * tcp_recvmsg has given to the user so far, it speeds up the
1525 * calculation of whether or not we must ACK for the sake of
1528 static void tcp_cleanup_rbuf(struct sock
*sk
, int copied
)
1530 struct tcp_sock
*tp
= tcp_sk(sk
);
1531 bool time_to_ack
= false;
1533 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1535 WARN(skb
&& !before(tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
),
1536 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1537 tp
->copied_seq
, TCP_SKB_CB(skb
)->end_seq
, tp
->rcv_nxt
);
1539 if (inet_csk_ack_scheduled(sk
)) {
1540 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1541 /* Delayed ACKs frequently hit locked sockets during bulk
1543 if (icsk
->icsk_ack
.blocked
||
1544 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1545 tp
->rcv_nxt
- tp
->rcv_wup
> icsk
->icsk_ack
.rcv_mss
||
1547 * If this read emptied read buffer, we send ACK, if
1548 * connection is not bidirectional, user drained
1549 * receive buffer and there was a small segment
1553 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED2
) ||
1554 ((icsk
->icsk_ack
.pending
& ICSK_ACK_PUSHED
) &&
1555 !inet_csk_in_pingpong_mode(sk
))) &&
1556 !atomic_read(&sk
->sk_rmem_alloc
)))
1560 /* We send an ACK if we can now advertise a non-zero window
1561 * which has been raised "significantly".
1563 * Even if window raised up to infinity, do not send window open ACK
1564 * in states, where we will not receive more. It is useless.
1566 if (copied
> 0 && !time_to_ack
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1567 __u32 rcv_window_now
= tcp_receive_window(tp
);
1569 /* Optimize, __tcp_select_window() is not cheap. */
1570 if (2*rcv_window_now
<= tp
->window_clamp
) {
1571 __u32 new_window
= __tcp_select_window(sk
);
1573 /* Send ACK now, if this read freed lots of space
1574 * in our buffer. Certainly, new_window is new window.
1575 * We can advertise it now, if it is not less than current one.
1576 * "Lots" means "at least twice" here.
1578 if (new_window
&& new_window
>= 2 * rcv_window_now
)
1586 static struct sk_buff
*tcp_recv_skb(struct sock
*sk
, u32 seq
, u32
*off
)
1588 struct sk_buff
*skb
;
1591 while ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
) {
1592 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1593 if (unlikely(TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_SYN
)) {
1594 pr_err_once("%s: found a SYN, please report !\n", __func__
);
1597 if (offset
< skb
->len
|| (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)) {
1601 /* This looks weird, but this can happen if TCP collapsing
1602 * splitted a fat GRO packet, while we released socket lock
1603 * in skb_splice_bits()
1605 sk_eat_skb(sk
, skb
);
1611 * This routine provides an alternative to tcp_recvmsg() for routines
1612 * that would like to handle copying from skbuffs directly in 'sendfile'
1615 * - It is assumed that the socket was locked by the caller.
1616 * - The routine does not block.
1617 * - At present, there is no support for reading OOB data
1618 * or for 'peeking' the socket using this routine
1619 * (although both would be easy to implement).
1621 int tcp_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
1622 sk_read_actor_t recv_actor
)
1624 struct sk_buff
*skb
;
1625 struct tcp_sock
*tp
= tcp_sk(sk
);
1626 u32 seq
= tp
->copied_seq
;
1630 if (sk
->sk_state
== TCP_LISTEN
)
1632 while ((skb
= tcp_recv_skb(sk
, seq
, &offset
)) != NULL
) {
1633 if (offset
< skb
->len
) {
1637 len
= skb
->len
- offset
;
1638 /* Stop reading if we hit a patch of urgent data */
1640 u32 urg_offset
= tp
->urg_seq
- seq
;
1641 if (urg_offset
< len
)
1646 used
= recv_actor(desc
, skb
, offset
, len
);
1651 } else if (used
<= len
) {
1656 /* If recv_actor drops the lock (e.g. TCP splice
1657 * receive) the skb pointer might be invalid when
1658 * getting here: tcp_collapse might have deleted it
1659 * while aggregating skbs from the socket queue.
1661 skb
= tcp_recv_skb(sk
, seq
- 1, &offset
);
1664 /* TCP coalescing might have appended data to the skb.
1665 * Try to splice more frags
1667 if (offset
+ 1 != skb
->len
)
1670 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
) {
1671 sk_eat_skb(sk
, skb
);
1675 sk_eat_skb(sk
, skb
);
1678 tp
->copied_seq
= seq
;
1680 tp
->copied_seq
= seq
;
1682 tcp_rcv_space_adjust(sk
);
1684 /* Clean up data we have read: This will do ACK frames. */
1686 tcp_recv_skb(sk
, seq
, &offset
);
1687 tcp_cleanup_rbuf(sk
, copied
);
1691 EXPORT_SYMBOL(tcp_read_sock
);
1693 int tcp_peek_len(struct socket
*sock
)
1695 return tcp_inq(sock
->sk
);
1697 EXPORT_SYMBOL(tcp_peek_len
);
1699 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
1700 int tcp_set_rcvlowat(struct sock
*sk
, int val
)
1704 if (sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)
1705 cap
= sk
->sk_rcvbuf
>> 1;
1707 cap
= sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[2] >> 1;
1708 val
= min(val
, cap
);
1709 sk
->sk_rcvlowat
= val
? : 1;
1711 /* Check if we need to signal EPOLLIN right now */
1714 if (sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)
1718 if (val
> sk
->sk_rcvbuf
) {
1719 sk
->sk_rcvbuf
= val
;
1720 tcp_sk(sk
)->window_clamp
= tcp_win_from_space(sk
, val
);
1724 EXPORT_SYMBOL(tcp_set_rcvlowat
);
1727 static const struct vm_operations_struct tcp_vm_ops
= {
1730 int tcp_mmap(struct file
*file
, struct socket
*sock
,
1731 struct vm_area_struct
*vma
)
1733 if (vma
->vm_flags
& (VM_WRITE
| VM_EXEC
))
1735 vma
->vm_flags
&= ~(VM_MAYWRITE
| VM_MAYEXEC
);
1737 /* Instruct vm_insert_page() to not down_read(mmap_sem) */
1738 vma
->vm_flags
|= VM_MIXEDMAP
;
1740 vma
->vm_ops
= &tcp_vm_ops
;
1743 EXPORT_SYMBOL(tcp_mmap
);
1745 static int tcp_zerocopy_receive(struct sock
*sk
,
1746 struct tcp_zerocopy_receive
*zc
)
1748 unsigned long address
= (unsigned long)zc
->address
;
1749 const skb_frag_t
*frags
= NULL
;
1750 u32 length
= 0, seq
, offset
;
1751 struct vm_area_struct
*vma
;
1752 struct sk_buff
*skb
= NULL
;
1753 struct tcp_sock
*tp
;
1757 if (address
& (PAGE_SIZE
- 1) || address
!= zc
->address
)
1760 if (sk
->sk_state
== TCP_LISTEN
)
1763 sock_rps_record_flow(sk
);
1765 down_read(¤t
->mm
->mmap_sem
);
1768 vma
= find_vma(current
->mm
, address
);
1769 if (!vma
|| vma
->vm_start
> address
|| vma
->vm_ops
!= &tcp_vm_ops
)
1771 zc
->length
= min_t(unsigned long, zc
->length
, vma
->vm_end
- address
);
1774 seq
= tp
->copied_seq
;
1776 zc
->length
= min_t(u32
, zc
->length
, inq
);
1777 zc
->length
&= ~(PAGE_SIZE
- 1);
1779 zap_page_range(vma
, address
, zc
->length
);
1780 zc
->recv_skip_hint
= 0;
1782 zc
->recv_skip_hint
= inq
;
1785 while (length
+ PAGE_SIZE
<= zc
->length
) {
1786 if (zc
->recv_skip_hint
< PAGE_SIZE
) {
1789 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
1791 skb
= tcp_recv_skb(sk
, seq
, &offset
);
1794 zc
->recv_skip_hint
= skb
->len
- offset
;
1795 offset
-= skb_headlen(skb
);
1796 if ((int)offset
< 0 || skb_has_frag_list(skb
))
1798 frags
= skb_shinfo(skb
)->frags
;
1800 if (frags
->size
> offset
)
1802 offset
-= frags
->size
;
1806 if (frags
->size
!= PAGE_SIZE
|| frags
->page_offset
) {
1807 int remaining
= zc
->recv_skip_hint
;
1809 while (remaining
&& (frags
->size
!= PAGE_SIZE
||
1810 frags
->page_offset
)) {
1811 remaining
-= frags
->size
;
1814 zc
->recv_skip_hint
-= remaining
;
1817 ret
= vm_insert_page(vma
, address
+ length
,
1818 skb_frag_page(frags
));
1821 length
+= PAGE_SIZE
;
1823 zc
->recv_skip_hint
-= PAGE_SIZE
;
1827 up_read(¤t
->mm
->mmap_sem
);
1829 tp
->copied_seq
= seq
;
1830 tcp_rcv_space_adjust(sk
);
1832 /* Clean up data we have read: This will do ACK frames. */
1833 tcp_recv_skb(sk
, seq
, &offset
);
1834 tcp_cleanup_rbuf(sk
, length
);
1836 if (length
== zc
->length
)
1837 zc
->recv_skip_hint
= 0;
1839 if (!zc
->recv_skip_hint
&& sock_flag(sk
, SOCK_DONE
))
1842 zc
->length
= length
;
1847 static void tcp_update_recv_tstamps(struct sk_buff
*skb
,
1848 struct scm_timestamping_internal
*tss
)
1851 tss
->ts
[0] = ktime_to_timespec64(skb
->tstamp
);
1853 tss
->ts
[0] = (struct timespec64
) {0};
1855 if (skb_hwtstamps(skb
)->hwtstamp
)
1856 tss
->ts
[2] = ktime_to_timespec64(skb_hwtstamps(skb
)->hwtstamp
);
1858 tss
->ts
[2] = (struct timespec64
) {0};
1861 /* Similar to __sock_recv_timestamp, but does not require an skb */
1862 static void tcp_recv_timestamp(struct msghdr
*msg
, const struct sock
*sk
,
1863 struct scm_timestamping_internal
*tss
)
1865 int new_tstamp
= sock_flag(sk
, SOCK_TSTAMP_NEW
);
1866 bool has_timestamping
= false;
1868 if (tss
->ts
[0].tv_sec
|| tss
->ts
[0].tv_nsec
) {
1869 if (sock_flag(sk
, SOCK_RCVTSTAMP
)) {
1870 if (sock_flag(sk
, SOCK_RCVTSTAMPNS
)) {
1872 struct __kernel_timespec kts
= {tss
->ts
[0].tv_sec
, tss
->ts
[0].tv_nsec
};
1874 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMPNS_NEW
,
1877 struct timespec ts_old
= timespec64_to_timespec(tss
->ts
[0]);
1879 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMPNS_OLD
,
1880 sizeof(ts_old
), &ts_old
);
1884 struct __kernel_sock_timeval stv
;
1886 stv
.tv_sec
= tss
->ts
[0].tv_sec
;
1887 stv
.tv_usec
= tss
->ts
[0].tv_nsec
/ 1000;
1888 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMP_NEW
,
1891 struct __kernel_old_timeval tv
;
1893 tv
.tv_sec
= tss
->ts
[0].tv_sec
;
1894 tv
.tv_usec
= tss
->ts
[0].tv_nsec
/ 1000;
1895 put_cmsg(msg
, SOL_SOCKET
, SO_TIMESTAMP_OLD
,
1901 if (sk
->sk_tsflags
& SOF_TIMESTAMPING_SOFTWARE
)
1902 has_timestamping
= true;
1904 tss
->ts
[0] = (struct timespec64
) {0};
1907 if (tss
->ts
[2].tv_sec
|| tss
->ts
[2].tv_nsec
) {
1908 if (sk
->sk_tsflags
& SOF_TIMESTAMPING_RAW_HARDWARE
)
1909 has_timestamping
= true;
1911 tss
->ts
[2] = (struct timespec64
) {0};
1914 if (has_timestamping
) {
1915 tss
->ts
[1] = (struct timespec64
) {0};
1916 if (sock_flag(sk
, SOCK_TSTAMP_NEW
))
1917 put_cmsg_scm_timestamping64(msg
, tss
);
1919 put_cmsg_scm_timestamping(msg
, tss
);
1923 static int tcp_inq_hint(struct sock
*sk
)
1925 const struct tcp_sock
*tp
= tcp_sk(sk
);
1926 u32 copied_seq
= READ_ONCE(tp
->copied_seq
);
1927 u32 rcv_nxt
= READ_ONCE(tp
->rcv_nxt
);
1930 inq
= rcv_nxt
- copied_seq
;
1931 if (unlikely(inq
< 0 || copied_seq
!= READ_ONCE(tp
->copied_seq
))) {
1933 inq
= tp
->rcv_nxt
- tp
->copied_seq
;
1940 * This routine copies from a sock struct into the user buffer.
1942 * Technical note: in 2.3 we work on _locked_ socket, so that
1943 * tricks with *seq access order and skb->users are not required.
1944 * Probably, code can be easily improved even more.
1947 int tcp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int nonblock
,
1948 int flags
, int *addr_len
)
1950 struct tcp_sock
*tp
= tcp_sk(sk
);
1956 int target
; /* Read at least this many bytes */
1958 struct sk_buff
*skb
, *last
;
1960 struct scm_timestamping_internal tss
;
1961 bool has_tss
= false;
1964 if (unlikely(flags
& MSG_ERRQUEUE
))
1965 return inet_recv_error(sk
, msg
, len
, addr_len
);
1967 if (sk_can_busy_loop(sk
) && skb_queue_empty(&sk
->sk_receive_queue
) &&
1968 (sk
->sk_state
== TCP_ESTABLISHED
))
1969 sk_busy_loop(sk
, nonblock
);
1974 if (sk
->sk_state
== TCP_LISTEN
)
1977 has_cmsg
= tp
->recvmsg_inq
;
1978 timeo
= sock_rcvtimeo(sk
, nonblock
);
1980 /* Urgent data needs to be handled specially. */
1981 if (flags
& MSG_OOB
)
1984 if (unlikely(tp
->repair
)) {
1986 if (!(flags
& MSG_PEEK
))
1989 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
1993 if (tp
->repair_queue
== TCP_NO_QUEUE
)
1996 /* 'common' recv queue MSG_PEEK-ing */
1999 seq
= &tp
->copied_seq
;
2000 if (flags
& MSG_PEEK
) {
2001 peek_seq
= tp
->copied_seq
;
2005 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
2010 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2011 if (tp
->urg_data
&& tp
->urg_seq
== *seq
) {
2014 if (signal_pending(current
)) {
2015 copied
= timeo
? sock_intr_errno(timeo
) : -EAGAIN
;
2020 /* Next get a buffer. */
2022 last
= skb_peek_tail(&sk
->sk_receive_queue
);
2023 skb_queue_walk(&sk
->sk_receive_queue
, skb
) {
2025 /* Now that we have two receive queues this
2028 if (WARN(before(*seq
, TCP_SKB_CB(skb
)->seq
),
2029 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
2030 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
,
2034 offset
= *seq
- TCP_SKB_CB(skb
)->seq
;
2035 if (unlikely(TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_SYN
)) {
2036 pr_err_once("%s: found a SYN, please report !\n", __func__
);
2039 if (offset
< skb
->len
)
2041 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
2043 WARN(!(flags
& MSG_PEEK
),
2044 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
2045 *seq
, TCP_SKB_CB(skb
)->seq
, tp
->rcv_nxt
, flags
);
2048 /* Well, if we have backlog, try to process it now yet. */
2050 if (copied
>= target
&& !sk
->sk_backlog
.tail
)
2055 sk
->sk_state
== TCP_CLOSE
||
2056 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
2058 signal_pending(current
))
2061 if (sock_flag(sk
, SOCK_DONE
))
2065 copied
= sock_error(sk
);
2069 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2072 if (sk
->sk_state
== TCP_CLOSE
) {
2073 /* This occurs when user tries to read
2074 * from never connected socket.
2085 if (signal_pending(current
)) {
2086 copied
= sock_intr_errno(timeo
);
2091 tcp_cleanup_rbuf(sk
, copied
);
2093 if (copied
>= target
) {
2094 /* Do not sleep, just process backlog. */
2098 sk_wait_data(sk
, &timeo
, last
);
2101 if ((flags
& MSG_PEEK
) &&
2102 (peek_seq
- copied
- urg_hole
!= tp
->copied_seq
)) {
2103 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2105 task_pid_nr(current
));
2106 peek_seq
= tp
->copied_seq
;
2111 /* Ok so how much can we use? */
2112 used
= skb
->len
- offset
;
2116 /* Do we have urgent data here? */
2118 u32 urg_offset
= tp
->urg_seq
- *seq
;
2119 if (urg_offset
< used
) {
2121 if (!sock_flag(sk
, SOCK_URGINLINE
)) {
2134 if (!(flags
& MSG_TRUNC
)) {
2135 err
= skb_copy_datagram_msg(skb
, offset
, msg
, used
);
2137 /* Exception. Bailout! */
2148 tcp_rcv_space_adjust(sk
);
2151 if (tp
->urg_data
&& after(tp
->copied_seq
, tp
->urg_seq
)) {
2153 tcp_fast_path_check(sk
);
2155 if (used
+ offset
< skb
->len
)
2158 if (TCP_SKB_CB(skb
)->has_rxtstamp
) {
2159 tcp_update_recv_tstamps(skb
, &tss
);
2163 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
2165 if (!(flags
& MSG_PEEK
))
2166 sk_eat_skb(sk
, skb
);
2170 /* Process the FIN. */
2172 if (!(flags
& MSG_PEEK
))
2173 sk_eat_skb(sk
, skb
);
2177 /* According to UNIX98, msg_name/msg_namelen are ignored
2178 * on connected socket. I was just happy when found this 8) --ANK
2181 /* Clean up data we have read: This will do ACK frames. */
2182 tcp_cleanup_rbuf(sk
, copied
);
2188 tcp_recv_timestamp(msg
, sk
, &tss
);
2189 if (tp
->recvmsg_inq
) {
2190 inq
= tcp_inq_hint(sk
);
2191 put_cmsg(msg
, SOL_TCP
, TCP_CM_INQ
, sizeof(inq
), &inq
);
2202 err
= tcp_recv_urg(sk
, msg
, len
, flags
);
2206 err
= tcp_peek_sndq(sk
, msg
, len
);
2209 EXPORT_SYMBOL(tcp_recvmsg
);
2211 void tcp_set_state(struct sock
*sk
, int state
)
2213 int oldstate
= sk
->sk_state
;
2215 /* We defined a new enum for TCP states that are exported in BPF
2216 * so as not force the internal TCP states to be frozen. The
2217 * following checks will detect if an internal state value ever
2218 * differs from the BPF value. If this ever happens, then we will
2219 * need to remap the internal value to the BPF value before calling
2220 * tcp_call_bpf_2arg.
2222 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED
!= (int)TCP_ESTABLISHED
);
2223 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT
!= (int)TCP_SYN_SENT
);
2224 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV
!= (int)TCP_SYN_RECV
);
2225 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1
!= (int)TCP_FIN_WAIT1
);
2226 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2
!= (int)TCP_FIN_WAIT2
);
2227 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT
!= (int)TCP_TIME_WAIT
);
2228 BUILD_BUG_ON((int)BPF_TCP_CLOSE
!= (int)TCP_CLOSE
);
2229 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT
!= (int)TCP_CLOSE_WAIT
);
2230 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK
!= (int)TCP_LAST_ACK
);
2231 BUILD_BUG_ON((int)BPF_TCP_LISTEN
!= (int)TCP_LISTEN
);
2232 BUILD_BUG_ON((int)BPF_TCP_CLOSING
!= (int)TCP_CLOSING
);
2233 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV
!= (int)TCP_NEW_SYN_RECV
);
2234 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES
!= (int)TCP_MAX_STATES
);
2236 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk
), BPF_SOCK_OPS_STATE_CB_FLAG
))
2237 tcp_call_bpf_2arg(sk
, BPF_SOCK_OPS_STATE_CB
, oldstate
, state
);
2240 case TCP_ESTABLISHED
:
2241 if (oldstate
!= TCP_ESTABLISHED
)
2242 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
2246 if (oldstate
== TCP_CLOSE_WAIT
|| oldstate
== TCP_ESTABLISHED
)
2247 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ESTABRESETS
);
2249 sk
->sk_prot
->unhash(sk
);
2250 if (inet_csk(sk
)->icsk_bind_hash
&&
2251 !(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
))
2255 if (oldstate
== TCP_ESTABLISHED
)
2256 TCP_DEC_STATS(sock_net(sk
), TCP_MIB_CURRESTAB
);
2259 /* Change state AFTER socket is unhashed to avoid closed
2260 * socket sitting in hash tables.
2262 inet_sk_state_store(sk
, state
);
2264 EXPORT_SYMBOL_GPL(tcp_set_state
);
2267 * State processing on a close. This implements the state shift for
2268 * sending our FIN frame. Note that we only send a FIN for some
2269 * states. A shutdown() may have already sent the FIN, or we may be
2273 static const unsigned char new_state
[16] = {
2274 /* current state: new state: action: */
2275 [0 /* (Invalid) */] = TCP_CLOSE
,
2276 [TCP_ESTABLISHED
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2277 [TCP_SYN_SENT
] = TCP_CLOSE
,
2278 [TCP_SYN_RECV
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2279 [TCP_FIN_WAIT1
] = TCP_FIN_WAIT1
,
2280 [TCP_FIN_WAIT2
] = TCP_FIN_WAIT2
,
2281 [TCP_TIME_WAIT
] = TCP_CLOSE
,
2282 [TCP_CLOSE
] = TCP_CLOSE
,
2283 [TCP_CLOSE_WAIT
] = TCP_LAST_ACK
| TCP_ACTION_FIN
,
2284 [TCP_LAST_ACK
] = TCP_LAST_ACK
,
2285 [TCP_LISTEN
] = TCP_CLOSE
,
2286 [TCP_CLOSING
] = TCP_CLOSING
,
2287 [TCP_NEW_SYN_RECV
] = TCP_CLOSE
, /* should not happen ! */
2290 static int tcp_close_state(struct sock
*sk
)
2292 int next
= (int)new_state
[sk
->sk_state
];
2293 int ns
= next
& TCP_STATE_MASK
;
2295 tcp_set_state(sk
, ns
);
2297 return next
& TCP_ACTION_FIN
;
2301 * Shutdown the sending side of a connection. Much like close except
2302 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2305 void tcp_shutdown(struct sock
*sk
, int how
)
2307 /* We need to grab some memory, and put together a FIN,
2308 * and then put it into the queue to be sent.
2309 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2311 if (!(how
& SEND_SHUTDOWN
))
2314 /* If we've already sent a FIN, or it's a closed state, skip this. */
2315 if ((1 << sk
->sk_state
) &
2316 (TCPF_ESTABLISHED
| TCPF_SYN_SENT
|
2317 TCPF_SYN_RECV
| TCPF_CLOSE_WAIT
)) {
2318 /* Clear out any half completed packets. FIN if needed. */
2319 if (tcp_close_state(sk
))
2323 EXPORT_SYMBOL(tcp_shutdown
);
2325 bool tcp_check_oom(struct sock
*sk
, int shift
)
2327 bool too_many_orphans
, out_of_socket_memory
;
2329 too_many_orphans
= tcp_too_many_orphans(sk
, shift
);
2330 out_of_socket_memory
= tcp_out_of_memory(sk
);
2332 if (too_many_orphans
)
2333 net_info_ratelimited("too many orphaned sockets\n");
2334 if (out_of_socket_memory
)
2335 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2336 return too_many_orphans
|| out_of_socket_memory
;
2339 void tcp_close(struct sock
*sk
, long timeout
)
2341 struct sk_buff
*skb
;
2342 int data_was_unread
= 0;
2346 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2348 if (sk
->sk_state
== TCP_LISTEN
) {
2349 tcp_set_state(sk
, TCP_CLOSE
);
2352 inet_csk_listen_stop(sk
);
2354 goto adjudge_to_death
;
2357 /* We need to flush the recv. buffs. We do this only on the
2358 * descriptor close, not protocol-sourced closes, because the
2359 * reader process may not have drained the data yet!
2361 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
2362 u32 len
= TCP_SKB_CB(skb
)->end_seq
- TCP_SKB_CB(skb
)->seq
;
2364 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
2366 data_was_unread
+= len
;
2372 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2373 if (sk
->sk_state
== TCP_CLOSE
)
2374 goto adjudge_to_death
;
2376 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2377 * data was lost. To witness the awful effects of the old behavior of
2378 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2379 * GET in an FTP client, suspend the process, wait for the client to
2380 * advertise a zero window, then kill -9 the FTP client, wheee...
2381 * Note: timeout is always zero in such a case.
2383 if (unlikely(tcp_sk(sk
)->repair
)) {
2384 sk
->sk_prot
->disconnect(sk
, 0);
2385 } else if (data_was_unread
) {
2386 /* Unread data was tossed, zap the connection. */
2387 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPABORTONCLOSE
);
2388 tcp_set_state(sk
, TCP_CLOSE
);
2389 tcp_send_active_reset(sk
, sk
->sk_allocation
);
2390 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
2391 /* Check zero linger _after_ checking for unread data. */
2392 sk
->sk_prot
->disconnect(sk
, 0);
2393 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPABORTONDATA
);
2394 } else if (tcp_close_state(sk
)) {
2395 /* We FIN if the application ate all the data before
2396 * zapping the connection.
2399 /* RED-PEN. Formally speaking, we have broken TCP state
2400 * machine. State transitions:
2402 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2403 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2404 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2406 * are legal only when FIN has been sent (i.e. in window),
2407 * rather than queued out of window. Purists blame.
2409 * F.e. "RFC state" is ESTABLISHED,
2410 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2412 * The visible declinations are that sometimes
2413 * we enter time-wait state, when it is not required really
2414 * (harmless), do not send active resets, when they are
2415 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2416 * they look as CLOSING or LAST_ACK for Linux)
2417 * Probably, I missed some more holelets.
2419 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2420 * in a single packet! (May consider it later but will
2421 * probably need API support or TCP_CORK SYN-ACK until
2422 * data is written and socket is closed.)
2427 sk_stream_wait_close(sk
, timeout
);
2430 state
= sk
->sk_state
;
2436 /* remove backlog if any, without releasing ownership. */
2439 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
2441 /* Have we already been destroyed by a softirq or backlog? */
2442 if (state
!= TCP_CLOSE
&& sk
->sk_state
== TCP_CLOSE
)
2445 /* This is a (useful) BSD violating of the RFC. There is a
2446 * problem with TCP as specified in that the other end could
2447 * keep a socket open forever with no application left this end.
2448 * We use a 1 minute timeout (about the same as BSD) then kill
2449 * our end. If they send after that then tough - BUT: long enough
2450 * that we won't make the old 4*rto = almost no time - whoops
2453 * Nope, it was not mistake. It is really desired behaviour
2454 * f.e. on http servers, when such sockets are useless, but
2455 * consume significant resources. Let's do it with special
2456 * linger2 option. --ANK
2459 if (sk
->sk_state
== TCP_FIN_WAIT2
) {
2460 struct tcp_sock
*tp
= tcp_sk(sk
);
2461 if (tp
->linger2
< 0) {
2462 tcp_set_state(sk
, TCP_CLOSE
);
2463 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2464 __NET_INC_STATS(sock_net(sk
),
2465 LINUX_MIB_TCPABORTONLINGER
);
2467 const int tmo
= tcp_fin_time(sk
);
2469 if (tmo
> TCP_TIMEWAIT_LEN
) {
2470 inet_csk_reset_keepalive_timer(sk
,
2471 tmo
- TCP_TIMEWAIT_LEN
);
2473 tcp_time_wait(sk
, TCP_FIN_WAIT2
, tmo
);
2478 if (sk
->sk_state
!= TCP_CLOSE
) {
2480 if (tcp_check_oom(sk
, 0)) {
2481 tcp_set_state(sk
, TCP_CLOSE
);
2482 tcp_send_active_reset(sk
, GFP_ATOMIC
);
2483 __NET_INC_STATS(sock_net(sk
),
2484 LINUX_MIB_TCPABORTONMEMORY
);
2485 } else if (!check_net(sock_net(sk
))) {
2486 /* Not possible to send reset; just close */
2487 tcp_set_state(sk
, TCP_CLOSE
);
2491 if (sk
->sk_state
== TCP_CLOSE
) {
2492 struct request_sock
*req
= tcp_sk(sk
)->fastopen_rsk
;
2493 /* We could get here with a non-NULL req if the socket is
2494 * aborted (e.g., closed with unread data) before 3WHS
2498 reqsk_fastopen_remove(sk
, req
, false);
2499 inet_csk_destroy_sock(sk
);
2501 /* Otherwise, socket is reprieved until protocol close. */
2509 EXPORT_SYMBOL(tcp_close
);
2511 /* These states need RST on ABORT according to RFC793 */
2513 static inline bool tcp_need_reset(int state
)
2515 return (1 << state
) &
2516 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
| TCPF_FIN_WAIT1
|
2517 TCPF_FIN_WAIT2
| TCPF_SYN_RECV
);
2520 static void tcp_rtx_queue_purge(struct sock
*sk
)
2522 struct rb_node
*p
= rb_first(&sk
->tcp_rtx_queue
);
2525 struct sk_buff
*skb
= rb_to_skb(p
);
2528 /* Since we are deleting whole queue, no need to
2529 * list_del(&skb->tcp_tsorted_anchor)
2531 tcp_rtx_queue_unlink(skb
, sk
);
2532 sk_wmem_free_skb(sk
, skb
);
2536 void tcp_write_queue_purge(struct sock
*sk
)
2538 struct sk_buff
*skb
;
2540 tcp_chrono_stop(sk
, TCP_CHRONO_BUSY
);
2541 while ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
2542 tcp_skb_tsorted_anchor_cleanup(skb
);
2543 sk_wmem_free_skb(sk
, skb
);
2545 tcp_rtx_queue_purge(sk
);
2546 INIT_LIST_HEAD(&tcp_sk(sk
)->tsorted_sent_queue
);
2548 tcp_clear_all_retrans_hints(tcp_sk(sk
));
2549 tcp_sk(sk
)->packets_out
= 0;
2550 inet_csk(sk
)->icsk_backoff
= 0;
2553 int tcp_disconnect(struct sock
*sk
, int flags
)
2555 struct inet_sock
*inet
= inet_sk(sk
);
2556 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2557 struct tcp_sock
*tp
= tcp_sk(sk
);
2558 int old_state
= sk
->sk_state
;
2560 if (old_state
!= TCP_CLOSE
)
2561 tcp_set_state(sk
, TCP_CLOSE
);
2563 /* ABORT function of RFC793 */
2564 if (old_state
== TCP_LISTEN
) {
2565 inet_csk_listen_stop(sk
);
2566 } else if (unlikely(tp
->repair
)) {
2567 sk
->sk_err
= ECONNABORTED
;
2568 } else if (tcp_need_reset(old_state
) ||
2569 (tp
->snd_nxt
!= tp
->write_seq
&&
2570 (1 << old_state
) & (TCPF_CLOSING
| TCPF_LAST_ACK
))) {
2571 /* The last check adjusts for discrepancy of Linux wrt. RFC
2574 tcp_send_active_reset(sk
, gfp_any());
2575 sk
->sk_err
= ECONNRESET
;
2576 } else if (old_state
== TCP_SYN_SENT
)
2577 sk
->sk_err
= ECONNRESET
;
2579 tcp_clear_xmit_timers(sk
);
2580 __skb_queue_purge(&sk
->sk_receive_queue
);
2581 tp
->copied_seq
= tp
->rcv_nxt
;
2583 tcp_write_queue_purge(sk
);
2584 tcp_fastopen_active_disable_ofo_check(sk
);
2585 skb_rbtree_purge(&tp
->out_of_order_queue
);
2587 inet
->inet_dport
= 0;
2589 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
2590 inet_reset_saddr(sk
);
2592 sk
->sk_shutdown
= 0;
2593 sock_reset_flag(sk
, SOCK_DONE
);
2595 tp
->mdev_us
= jiffies_to_usecs(TCP_TIMEOUT_INIT
);
2596 tp
->rcv_rtt_last_tsecr
= 0;
2597 tp
->write_seq
+= tp
->max_window
+ 2;
2598 if (tp
->write_seq
== 0)
2600 icsk
->icsk_backoff
= 0;
2602 icsk
->icsk_probes_out
= 0;
2603 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
2604 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
2605 tp
->snd_cwnd
= TCP_INIT_CWND
;
2606 tp
->snd_cwnd_cnt
= 0;
2607 tp
->window_clamp
= 0;
2608 tp
->delivered_ce
= 0;
2609 tcp_set_ca_state(sk
, TCP_CA_Open
);
2610 tp
->is_sack_reneg
= 0;
2611 tcp_clear_retrans(tp
);
2612 inet_csk_delack_init(sk
);
2613 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2614 * issue in __tcp_select_window()
2616 icsk
->icsk_ack
.rcv_mss
= TCP_MIN_MSS
;
2617 memset(&tp
->rx_opt
, 0, sizeof(tp
->rx_opt
));
2619 dst_release(sk
->sk_rx_dst
);
2620 sk
->sk_rx_dst
= NULL
;
2621 tcp_saved_syn_free(tp
);
2622 tp
->compressed_ack
= 0;
2624 tp
->bytes_retrans
= 0;
2625 tp
->duplicate_sack
[0].start_seq
= 0;
2626 tp
->duplicate_sack
[0].end_seq
= 0;
2629 tp
->retrans_out
= 0;
2631 tp
->tlp_high_seq
= 0;
2632 tp
->last_oow_ack_time
= 0;
2633 /* There's a bubble in the pipe until at least the first ACK. */
2634 tp
->app_limited
= ~0U;
2635 tp
->rack
.mstamp
= 0;
2636 tp
->rack
.advanced
= 0;
2637 tp
->rack
.reo_wnd_steps
= 1;
2638 tp
->rack
.last_delivered
= 0;
2639 tp
->rack
.reo_wnd_persist
= 0;
2640 tp
->rack
.dsack_seen
= 0;
2641 tp
->syn_data_acked
= 0;
2642 tp
->rx_opt
.saw_tstamp
= 0;
2643 tp
->rx_opt
.dsack
= 0;
2644 tp
->rx_opt
.num_sacks
= 0;
2647 /* Clean up fastopen related fields */
2648 tcp_free_fastopen_req(tp
);
2649 inet
->defer_connect
= 0;
2651 WARN_ON(inet
->inet_num
&& !icsk
->icsk_bind_hash
);
2653 if (sk
->sk_frag
.page
) {
2654 put_page(sk
->sk_frag
.page
);
2655 sk
->sk_frag
.page
= NULL
;
2656 sk
->sk_frag
.offset
= 0;
2659 sk
->sk_error_report(sk
);
2662 EXPORT_SYMBOL(tcp_disconnect
);
2664 static inline bool tcp_can_repair_sock(const struct sock
*sk
)
2666 return ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
) &&
2667 (sk
->sk_state
!= TCP_LISTEN
);
2670 static int tcp_repair_set_window(struct tcp_sock
*tp
, char __user
*optbuf
, int len
)
2672 struct tcp_repair_window opt
;
2677 if (len
!= sizeof(opt
))
2680 if (copy_from_user(&opt
, optbuf
, sizeof(opt
)))
2683 if (opt
.max_window
< opt
.snd_wnd
)
2686 if (after(opt
.snd_wl1
, tp
->rcv_nxt
+ opt
.rcv_wnd
))
2689 if (after(opt
.rcv_wup
, tp
->rcv_nxt
))
2692 tp
->snd_wl1
= opt
.snd_wl1
;
2693 tp
->snd_wnd
= opt
.snd_wnd
;
2694 tp
->max_window
= opt
.max_window
;
2696 tp
->rcv_wnd
= opt
.rcv_wnd
;
2697 tp
->rcv_wup
= opt
.rcv_wup
;
2702 static int tcp_repair_options_est(struct sock
*sk
,
2703 struct tcp_repair_opt __user
*optbuf
, unsigned int len
)
2705 struct tcp_sock
*tp
= tcp_sk(sk
);
2706 struct tcp_repair_opt opt
;
2708 while (len
>= sizeof(opt
)) {
2709 if (copy_from_user(&opt
, optbuf
, sizeof(opt
)))
2715 switch (opt
.opt_code
) {
2717 tp
->rx_opt
.mss_clamp
= opt
.opt_val
;
2722 u16 snd_wscale
= opt
.opt_val
& 0xFFFF;
2723 u16 rcv_wscale
= opt
.opt_val
>> 16;
2725 if (snd_wscale
> TCP_MAX_WSCALE
|| rcv_wscale
> TCP_MAX_WSCALE
)
2728 tp
->rx_opt
.snd_wscale
= snd_wscale
;
2729 tp
->rx_opt
.rcv_wscale
= rcv_wscale
;
2730 tp
->rx_opt
.wscale_ok
= 1;
2733 case TCPOPT_SACK_PERM
:
2734 if (opt
.opt_val
!= 0)
2737 tp
->rx_opt
.sack_ok
|= TCP_SACK_SEEN
;
2739 case TCPOPT_TIMESTAMP
:
2740 if (opt
.opt_val
!= 0)
2743 tp
->rx_opt
.tstamp_ok
= 1;
2752 * Socket option code for TCP.
2754 static int do_tcp_setsockopt(struct sock
*sk
, int level
,
2755 int optname
, char __user
*optval
, unsigned int optlen
)
2757 struct tcp_sock
*tp
= tcp_sk(sk
);
2758 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2759 struct net
*net
= sock_net(sk
);
2763 /* These are data/string values, all the others are ints */
2765 case TCP_CONGESTION
: {
2766 char name
[TCP_CA_NAME_MAX
];
2771 val
= strncpy_from_user(name
, optval
,
2772 min_t(long, TCP_CA_NAME_MAX
-1, optlen
));
2778 err
= tcp_set_congestion_control(sk
, name
, true, true);
2783 char name
[TCP_ULP_NAME_MAX
];
2788 val
= strncpy_from_user(name
, optval
,
2789 min_t(long, TCP_ULP_NAME_MAX
- 1,
2796 err
= tcp_set_ulp(sk
, name
);
2800 case TCP_FASTOPEN_KEY
: {
2801 __u8 key
[TCP_FASTOPEN_KEY_LENGTH
];
2803 if (optlen
!= sizeof(key
))
2806 if (copy_from_user(key
, optval
, optlen
))
2809 return tcp_fastopen_reset_cipher(net
, sk
, key
, sizeof(key
));
2816 if (optlen
< sizeof(int))
2819 if (get_user(val
, (int __user
*)optval
))
2826 /* Values greater than interface MTU won't take effect. However
2827 * at the point when this call is done we typically don't yet
2828 * know which interface is going to be used
2830 if (val
&& (val
< TCP_MIN_MSS
|| val
> MAX_TCP_WINDOW
)) {
2834 tp
->rx_opt
.user_mss
= val
;
2839 /* TCP_NODELAY is weaker than TCP_CORK, so that
2840 * this option on corked socket is remembered, but
2841 * it is not activated until cork is cleared.
2843 * However, when TCP_NODELAY is set we make
2844 * an explicit push, which overrides even TCP_CORK
2845 * for currently queued segments.
2847 tp
->nonagle
|= TCP_NAGLE_OFF
|TCP_NAGLE_PUSH
;
2848 tcp_push_pending_frames(sk
);
2850 tp
->nonagle
&= ~TCP_NAGLE_OFF
;
2854 case TCP_THIN_LINEAR_TIMEOUTS
:
2855 if (val
< 0 || val
> 1)
2861 case TCP_THIN_DUPACK
:
2862 if (val
< 0 || val
> 1)
2867 if (!tcp_can_repair_sock(sk
))
2869 else if (val
== TCP_REPAIR_ON
) {
2871 sk
->sk_reuse
= SK_FORCE_REUSE
;
2872 tp
->repair_queue
= TCP_NO_QUEUE
;
2873 } else if (val
== TCP_REPAIR_OFF
) {
2875 sk
->sk_reuse
= SK_NO_REUSE
;
2876 tcp_send_window_probe(sk
);
2877 } else if (val
== TCP_REPAIR_OFF_NO_WP
) {
2879 sk
->sk_reuse
= SK_NO_REUSE
;
2885 case TCP_REPAIR_QUEUE
:
2888 else if ((unsigned int)val
< TCP_QUEUES_NR
)
2889 tp
->repair_queue
= val
;
2895 if (sk
->sk_state
!= TCP_CLOSE
)
2897 else if (tp
->repair_queue
== TCP_SEND_QUEUE
)
2898 tp
->write_seq
= val
;
2899 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
2905 case TCP_REPAIR_OPTIONS
:
2908 else if (sk
->sk_state
== TCP_ESTABLISHED
)
2909 err
= tcp_repair_options_est(sk
,
2910 (struct tcp_repair_opt __user
*)optval
,
2917 /* When set indicates to always queue non-full frames.
2918 * Later the user clears this option and we transmit
2919 * any pending partial frames in the queue. This is
2920 * meant to be used alongside sendfile() to get properly
2921 * filled frames when the user (for example) must write
2922 * out headers with a write() call first and then use
2923 * sendfile to send out the data parts.
2925 * TCP_CORK can be set together with TCP_NODELAY and it is
2926 * stronger than TCP_NODELAY.
2929 tp
->nonagle
|= TCP_NAGLE_CORK
;
2931 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
2932 if (tp
->nonagle
&TCP_NAGLE_OFF
)
2933 tp
->nonagle
|= TCP_NAGLE_PUSH
;
2934 tcp_push_pending_frames(sk
);
2939 if (val
< 1 || val
> MAX_TCP_KEEPIDLE
)
2942 tp
->keepalive_time
= val
* HZ
;
2943 if (sock_flag(sk
, SOCK_KEEPOPEN
) &&
2944 !((1 << sk
->sk_state
) &
2945 (TCPF_CLOSE
| TCPF_LISTEN
))) {
2946 u32 elapsed
= keepalive_time_elapsed(tp
);
2947 if (tp
->keepalive_time
> elapsed
)
2948 elapsed
= tp
->keepalive_time
- elapsed
;
2951 inet_csk_reset_keepalive_timer(sk
, elapsed
);
2956 if (val
< 1 || val
> MAX_TCP_KEEPINTVL
)
2959 tp
->keepalive_intvl
= val
* HZ
;
2962 if (val
< 1 || val
> MAX_TCP_KEEPCNT
)
2965 tp
->keepalive_probes
= val
;
2968 if (val
< 1 || val
> MAX_TCP_SYNCNT
)
2971 icsk
->icsk_syn_retries
= val
;
2975 if (val
< 0 || val
> 1)
2984 else if (val
> net
->ipv4
.sysctl_tcp_fin_timeout
/ HZ
)
2987 tp
->linger2
= val
* HZ
;
2990 case TCP_DEFER_ACCEPT
:
2991 /* Translate value in seconds to number of retransmits */
2992 icsk
->icsk_accept_queue
.rskq_defer_accept
=
2993 secs_to_retrans(val
, TCP_TIMEOUT_INIT
/ HZ
,
2997 case TCP_WINDOW_CLAMP
:
2999 if (sk
->sk_state
!= TCP_CLOSE
) {
3003 tp
->window_clamp
= 0;
3005 tp
->window_clamp
= val
< SOCK_MIN_RCVBUF
/ 2 ?
3006 SOCK_MIN_RCVBUF
/ 2 : val
;
3011 inet_csk_enter_pingpong_mode(sk
);
3013 inet_csk_exit_pingpong_mode(sk
);
3014 if ((1 << sk
->sk_state
) &
3015 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
) &&
3016 inet_csk_ack_scheduled(sk
)) {
3017 icsk
->icsk_ack
.pending
|= ICSK_ACK_PUSHED
;
3018 tcp_cleanup_rbuf(sk
, 1);
3020 inet_csk_enter_pingpong_mode(sk
);
3025 #ifdef CONFIG_TCP_MD5SIG
3027 case TCP_MD5SIG_EXT
:
3028 if ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))
3029 err
= tp
->af_specific
->md5_parse(sk
, optname
, optval
, optlen
);
3034 case TCP_USER_TIMEOUT
:
3035 /* Cap the max time in ms TCP will retry or probe the window
3036 * before giving up and aborting (ETIMEDOUT) a connection.
3041 icsk
->icsk_user_timeout
= val
;
3045 if (val
>= 0 && ((1 << sk
->sk_state
) & (TCPF_CLOSE
|
3047 tcp_fastopen_init_key_once(net
);
3049 fastopen_queue_tune(sk
, val
);
3054 case TCP_FASTOPEN_CONNECT
:
3055 if (val
> 1 || val
< 0) {
3057 } else if (net
->ipv4
.sysctl_tcp_fastopen
& TFO_CLIENT_ENABLE
) {
3058 if (sk
->sk_state
== TCP_CLOSE
)
3059 tp
->fastopen_connect
= val
;
3066 case TCP_FASTOPEN_NO_COOKIE
:
3067 if (val
> 1 || val
< 0)
3069 else if (!((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
3072 tp
->fastopen_no_cookie
= val
;
3078 tp
->tsoffset
= val
- tcp_time_stamp_raw();
3080 case TCP_REPAIR_WINDOW
:
3081 err
= tcp_repair_set_window(tp
, optval
, optlen
);
3083 case TCP_NOTSENT_LOWAT
:
3084 tp
->notsent_lowat
= val
;
3085 sk
->sk_write_space(sk
);
3088 if (val
> 1 || val
< 0)
3091 tp
->recvmsg_inq
= val
;
3102 int tcp_setsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
3103 unsigned int optlen
)
3105 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
3107 if (level
!= SOL_TCP
)
3108 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
3110 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
3112 EXPORT_SYMBOL(tcp_setsockopt
);
3114 #ifdef CONFIG_COMPAT
3115 int compat_tcp_setsockopt(struct sock
*sk
, int level
, int optname
,
3116 char __user
*optval
, unsigned int optlen
)
3118 if (level
!= SOL_TCP
)
3119 return inet_csk_compat_setsockopt(sk
, level
, optname
,
3121 return do_tcp_setsockopt(sk
, level
, optname
, optval
, optlen
);
3123 EXPORT_SYMBOL(compat_tcp_setsockopt
);
3126 static void tcp_get_info_chrono_stats(const struct tcp_sock
*tp
,
3127 struct tcp_info
*info
)
3129 u64 stats
[__TCP_CHRONO_MAX
], total
= 0;
3132 for (i
= TCP_CHRONO_BUSY
; i
< __TCP_CHRONO_MAX
; ++i
) {
3133 stats
[i
] = tp
->chrono_stat
[i
- 1];
3134 if (i
== tp
->chrono_type
)
3135 stats
[i
] += tcp_jiffies32
- tp
->chrono_start
;
3136 stats
[i
] *= USEC_PER_SEC
/ HZ
;
3140 info
->tcpi_busy_time
= total
;
3141 info
->tcpi_rwnd_limited
= stats
[TCP_CHRONO_RWND_LIMITED
];
3142 info
->tcpi_sndbuf_limited
= stats
[TCP_CHRONO_SNDBUF_LIMITED
];
3145 /* Return information about state of tcp endpoint in API format. */
3146 void tcp_get_info(struct sock
*sk
, struct tcp_info
*info
)
3148 const struct tcp_sock
*tp
= tcp_sk(sk
); /* iff sk_type == SOCK_STREAM */
3149 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
3155 memset(info
, 0, sizeof(*info
));
3156 if (sk
->sk_type
!= SOCK_STREAM
)
3159 info
->tcpi_state
= inet_sk_state_load(sk
);
3161 /* Report meaningful fields for all TCP states, including listeners */
3162 rate
= READ_ONCE(sk
->sk_pacing_rate
);
3163 rate64
= (rate
!= ~0UL) ? rate
: ~0ULL;
3164 info
->tcpi_pacing_rate
= rate64
;
3166 rate
= READ_ONCE(sk
->sk_max_pacing_rate
);
3167 rate64
= (rate
!= ~0UL) ? rate
: ~0ULL;
3168 info
->tcpi_max_pacing_rate
= rate64
;
3170 info
->tcpi_reordering
= tp
->reordering
;
3171 info
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
3173 if (info
->tcpi_state
== TCP_LISTEN
) {
3174 /* listeners aliased fields :
3175 * tcpi_unacked -> Number of children ready for accept()
3176 * tcpi_sacked -> max backlog
3178 info
->tcpi_unacked
= sk
->sk_ack_backlog
;
3179 info
->tcpi_sacked
= sk
->sk_max_ack_backlog
;
3183 slow
= lock_sock_fast(sk
);
3185 info
->tcpi_ca_state
= icsk
->icsk_ca_state
;
3186 info
->tcpi_retransmits
= icsk
->icsk_retransmits
;
3187 info
->tcpi_probes
= icsk
->icsk_probes_out
;
3188 info
->tcpi_backoff
= icsk
->icsk_backoff
;
3190 if (tp
->rx_opt
.tstamp_ok
)
3191 info
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
3192 if (tcp_is_sack(tp
))
3193 info
->tcpi_options
|= TCPI_OPT_SACK
;
3194 if (tp
->rx_opt
.wscale_ok
) {
3195 info
->tcpi_options
|= TCPI_OPT_WSCALE
;
3196 info
->tcpi_snd_wscale
= tp
->rx_opt
.snd_wscale
;
3197 info
->tcpi_rcv_wscale
= tp
->rx_opt
.rcv_wscale
;
3200 if (tp
->ecn_flags
& TCP_ECN_OK
)
3201 info
->tcpi_options
|= TCPI_OPT_ECN
;
3202 if (tp
->ecn_flags
& TCP_ECN_SEEN
)
3203 info
->tcpi_options
|= TCPI_OPT_ECN_SEEN
;
3204 if (tp
->syn_data_acked
)
3205 info
->tcpi_options
|= TCPI_OPT_SYN_DATA
;
3207 info
->tcpi_rto
= jiffies_to_usecs(icsk
->icsk_rto
);
3208 info
->tcpi_ato
= jiffies_to_usecs(icsk
->icsk_ack
.ato
);
3209 info
->tcpi_snd_mss
= tp
->mss_cache
;
3210 info
->tcpi_rcv_mss
= icsk
->icsk_ack
.rcv_mss
;
3212 info
->tcpi_unacked
= tp
->packets_out
;
3213 info
->tcpi_sacked
= tp
->sacked_out
;
3215 info
->tcpi_lost
= tp
->lost_out
;
3216 info
->tcpi_retrans
= tp
->retrans_out
;
3218 now
= tcp_jiffies32
;
3219 info
->tcpi_last_data_sent
= jiffies_to_msecs(now
- tp
->lsndtime
);
3220 info
->tcpi_last_data_recv
= jiffies_to_msecs(now
- icsk
->icsk_ack
.lrcvtime
);
3221 info
->tcpi_last_ack_recv
= jiffies_to_msecs(now
- tp
->rcv_tstamp
);
3223 info
->tcpi_pmtu
= icsk
->icsk_pmtu_cookie
;
3224 info
->tcpi_rcv_ssthresh
= tp
->rcv_ssthresh
;
3225 info
->tcpi_rtt
= tp
->srtt_us
>> 3;
3226 info
->tcpi_rttvar
= tp
->mdev_us
>> 2;
3227 info
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
3228 info
->tcpi_advmss
= tp
->advmss
;
3230 info
->tcpi_rcv_rtt
= tp
->rcv_rtt_est
.rtt_us
>> 3;
3231 info
->tcpi_rcv_space
= tp
->rcvq_space
.space
;
3233 info
->tcpi_total_retrans
= tp
->total_retrans
;
3235 info
->tcpi_bytes_acked
= tp
->bytes_acked
;
3236 info
->tcpi_bytes_received
= tp
->bytes_received
;
3237 info
->tcpi_notsent_bytes
= max_t(int, 0, tp
->write_seq
- tp
->snd_nxt
);
3238 tcp_get_info_chrono_stats(tp
, info
);
3240 info
->tcpi_segs_out
= tp
->segs_out
;
3241 info
->tcpi_segs_in
= tp
->segs_in
;
3243 info
->tcpi_min_rtt
= tcp_min_rtt(tp
);
3244 info
->tcpi_data_segs_in
= tp
->data_segs_in
;
3245 info
->tcpi_data_segs_out
= tp
->data_segs_out
;
3247 info
->tcpi_delivery_rate_app_limited
= tp
->rate_app_limited
? 1 : 0;
3248 rate64
= tcp_compute_delivery_rate(tp
);
3250 info
->tcpi_delivery_rate
= rate64
;
3251 info
->tcpi_delivered
= tp
->delivered
;
3252 info
->tcpi_delivered_ce
= tp
->delivered_ce
;
3253 info
->tcpi_bytes_sent
= tp
->bytes_sent
;
3254 info
->tcpi_bytes_retrans
= tp
->bytes_retrans
;
3255 info
->tcpi_dsack_dups
= tp
->dsack_dups
;
3256 info
->tcpi_reord_seen
= tp
->reord_seen
;
3257 unlock_sock_fast(sk
, slow
);
3259 EXPORT_SYMBOL_GPL(tcp_get_info
);
3261 static size_t tcp_opt_stats_get_size(void)
3264 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_BUSY */
3265 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_RWND_LIMITED */
3266 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_SNDBUF_LIMITED */
3267 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_DATA_SEGS_OUT */
3268 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_TOTAL_RETRANS */
3269 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_PACING_RATE */
3270 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_DELIVERY_RATE */
3271 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SND_CWND */
3272 nla_total_size(sizeof(u32
)) + /* TCP_NLA_REORDERING */
3273 nla_total_size(sizeof(u32
)) + /* TCP_NLA_MIN_RTT */
3274 nla_total_size(sizeof(u8
)) + /* TCP_NLA_RECUR_RETRANS */
3275 nla_total_size(sizeof(u8
)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
3276 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SNDQ_SIZE */
3277 nla_total_size(sizeof(u8
)) + /* TCP_NLA_CA_STATE */
3278 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SND_SSTHRESH */
3279 nla_total_size(sizeof(u32
)) + /* TCP_NLA_DELIVERED */
3280 nla_total_size(sizeof(u32
)) + /* TCP_NLA_DELIVERED_CE */
3281 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_BYTES_SENT */
3282 nla_total_size_64bit(sizeof(u64
)) + /* TCP_NLA_BYTES_RETRANS */
3283 nla_total_size(sizeof(u32
)) + /* TCP_NLA_DSACK_DUPS */
3284 nla_total_size(sizeof(u32
)) + /* TCP_NLA_REORD_SEEN */
3285 nla_total_size(sizeof(u32
)) + /* TCP_NLA_SRTT */
3289 struct sk_buff
*tcp_get_timestamping_opt_stats(const struct sock
*sk
)
3291 const struct tcp_sock
*tp
= tcp_sk(sk
);
3292 struct sk_buff
*stats
;
3293 struct tcp_info info
;
3297 stats
= alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC
);
3301 tcp_get_info_chrono_stats(tp
, &info
);
3302 nla_put_u64_64bit(stats
, TCP_NLA_BUSY
,
3303 info
.tcpi_busy_time
, TCP_NLA_PAD
);
3304 nla_put_u64_64bit(stats
, TCP_NLA_RWND_LIMITED
,
3305 info
.tcpi_rwnd_limited
, TCP_NLA_PAD
);
3306 nla_put_u64_64bit(stats
, TCP_NLA_SNDBUF_LIMITED
,
3307 info
.tcpi_sndbuf_limited
, TCP_NLA_PAD
);
3308 nla_put_u64_64bit(stats
, TCP_NLA_DATA_SEGS_OUT
,
3309 tp
->data_segs_out
, TCP_NLA_PAD
);
3310 nla_put_u64_64bit(stats
, TCP_NLA_TOTAL_RETRANS
,
3311 tp
->total_retrans
, TCP_NLA_PAD
);
3313 rate
= READ_ONCE(sk
->sk_pacing_rate
);
3314 rate64
= (rate
!= ~0UL) ? rate
: ~0ULL;
3315 nla_put_u64_64bit(stats
, TCP_NLA_PACING_RATE
, rate64
, TCP_NLA_PAD
);
3317 rate64
= tcp_compute_delivery_rate(tp
);
3318 nla_put_u64_64bit(stats
, TCP_NLA_DELIVERY_RATE
, rate64
, TCP_NLA_PAD
);
3320 nla_put_u32(stats
, TCP_NLA_SND_CWND
, tp
->snd_cwnd
);
3321 nla_put_u32(stats
, TCP_NLA_REORDERING
, tp
->reordering
);
3322 nla_put_u32(stats
, TCP_NLA_MIN_RTT
, tcp_min_rtt(tp
));
3324 nla_put_u8(stats
, TCP_NLA_RECUR_RETRANS
, inet_csk(sk
)->icsk_retransmits
);
3325 nla_put_u8(stats
, TCP_NLA_DELIVERY_RATE_APP_LMT
, !!tp
->rate_app_limited
);
3326 nla_put_u32(stats
, TCP_NLA_SND_SSTHRESH
, tp
->snd_ssthresh
);
3327 nla_put_u32(stats
, TCP_NLA_DELIVERED
, tp
->delivered
);
3328 nla_put_u32(stats
, TCP_NLA_DELIVERED_CE
, tp
->delivered_ce
);
3330 nla_put_u32(stats
, TCP_NLA_SNDQ_SIZE
, tp
->write_seq
- tp
->snd_una
);
3331 nla_put_u8(stats
, TCP_NLA_CA_STATE
, inet_csk(sk
)->icsk_ca_state
);
3333 nla_put_u64_64bit(stats
, TCP_NLA_BYTES_SENT
, tp
->bytes_sent
,
3335 nla_put_u64_64bit(stats
, TCP_NLA_BYTES_RETRANS
, tp
->bytes_retrans
,
3337 nla_put_u32(stats
, TCP_NLA_DSACK_DUPS
, tp
->dsack_dups
);
3338 nla_put_u32(stats
, TCP_NLA_REORD_SEEN
, tp
->reord_seen
);
3339 nla_put_u32(stats
, TCP_NLA_SRTT
, tp
->srtt_us
>> 3);
3344 static int do_tcp_getsockopt(struct sock
*sk
, int level
,
3345 int optname
, char __user
*optval
, int __user
*optlen
)
3347 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3348 struct tcp_sock
*tp
= tcp_sk(sk
);
3349 struct net
*net
= sock_net(sk
);
3352 if (get_user(len
, optlen
))
3355 len
= min_t(unsigned int, len
, sizeof(int));
3362 val
= tp
->mss_cache
;
3363 if (!val
&& ((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
3364 val
= tp
->rx_opt
.user_mss
;
3366 val
= tp
->rx_opt
.mss_clamp
;
3369 val
= !!(tp
->nonagle
&TCP_NAGLE_OFF
);
3372 val
= !!(tp
->nonagle
&TCP_NAGLE_CORK
);
3375 val
= keepalive_time_when(tp
) / HZ
;
3378 val
= keepalive_intvl_when(tp
) / HZ
;
3381 val
= keepalive_probes(tp
);
3384 val
= icsk
->icsk_syn_retries
? : net
->ipv4
.sysctl_tcp_syn_retries
;
3389 val
= (val
? : net
->ipv4
.sysctl_tcp_fin_timeout
) / HZ
;
3391 case TCP_DEFER_ACCEPT
:
3392 val
= retrans_to_secs(icsk
->icsk_accept_queue
.rskq_defer_accept
,
3393 TCP_TIMEOUT_INIT
/ HZ
, TCP_RTO_MAX
/ HZ
);
3395 case TCP_WINDOW_CLAMP
:
3396 val
= tp
->window_clamp
;
3399 struct tcp_info info
;
3401 if (get_user(len
, optlen
))
3404 tcp_get_info(sk
, &info
);
3406 len
= min_t(unsigned int, len
, sizeof(info
));
3407 if (put_user(len
, optlen
))
3409 if (copy_to_user(optval
, &info
, len
))
3414 const struct tcp_congestion_ops
*ca_ops
;
3415 union tcp_cc_info info
;
3419 if (get_user(len
, optlen
))
3422 ca_ops
= icsk
->icsk_ca_ops
;
3423 if (ca_ops
&& ca_ops
->get_info
)
3424 sz
= ca_ops
->get_info(sk
, ~0U, &attr
, &info
);
3426 len
= min_t(unsigned int, len
, sz
);
3427 if (put_user(len
, optlen
))
3429 if (copy_to_user(optval
, &info
, len
))
3434 val
= !inet_csk_in_pingpong_mode(sk
);
3437 case TCP_CONGESTION
:
3438 if (get_user(len
, optlen
))
3440 len
= min_t(unsigned int, len
, TCP_CA_NAME_MAX
);
3441 if (put_user(len
, optlen
))
3443 if (copy_to_user(optval
, icsk
->icsk_ca_ops
->name
, len
))
3448 if (get_user(len
, optlen
))
3450 len
= min_t(unsigned int, len
, TCP_ULP_NAME_MAX
);
3451 if (!icsk
->icsk_ulp_ops
) {
3452 if (put_user(0, optlen
))
3456 if (put_user(len
, optlen
))
3458 if (copy_to_user(optval
, icsk
->icsk_ulp_ops
->name
, len
))
3462 case TCP_FASTOPEN_KEY
: {
3463 __u8 key
[TCP_FASTOPEN_KEY_LENGTH
];
3464 struct tcp_fastopen_context
*ctx
;
3466 if (get_user(len
, optlen
))
3470 ctx
= rcu_dereference(icsk
->icsk_accept_queue
.fastopenq
.ctx
);
3472 memcpy(key
, ctx
->key
, sizeof(key
));
3477 len
= min_t(unsigned int, len
, sizeof(key
));
3478 if (put_user(len
, optlen
))
3480 if (copy_to_user(optval
, key
, len
))
3484 case TCP_THIN_LINEAR_TIMEOUTS
:
3488 case TCP_THIN_DUPACK
:
3496 case TCP_REPAIR_QUEUE
:
3498 val
= tp
->repair_queue
;
3503 case TCP_REPAIR_WINDOW
: {
3504 struct tcp_repair_window opt
;
3506 if (get_user(len
, optlen
))
3509 if (len
!= sizeof(opt
))
3515 opt
.snd_wl1
= tp
->snd_wl1
;
3516 opt
.snd_wnd
= tp
->snd_wnd
;
3517 opt
.max_window
= tp
->max_window
;
3518 opt
.rcv_wnd
= tp
->rcv_wnd
;
3519 opt
.rcv_wup
= tp
->rcv_wup
;
3521 if (copy_to_user(optval
, &opt
, len
))
3526 if (tp
->repair_queue
== TCP_SEND_QUEUE
)
3527 val
= tp
->write_seq
;
3528 else if (tp
->repair_queue
== TCP_RECV_QUEUE
)
3534 case TCP_USER_TIMEOUT
:
3535 val
= icsk
->icsk_user_timeout
;
3539 val
= icsk
->icsk_accept_queue
.fastopenq
.max_qlen
;
3542 case TCP_FASTOPEN_CONNECT
:
3543 val
= tp
->fastopen_connect
;
3546 case TCP_FASTOPEN_NO_COOKIE
:
3547 val
= tp
->fastopen_no_cookie
;
3551 val
= tcp_time_stamp_raw() + tp
->tsoffset
;
3553 case TCP_NOTSENT_LOWAT
:
3554 val
= tp
->notsent_lowat
;
3557 val
= tp
->recvmsg_inq
;
3562 case TCP_SAVED_SYN
: {
3563 if (get_user(len
, optlen
))
3567 if (tp
->saved_syn
) {
3568 if (len
< tp
->saved_syn
[0]) {
3569 if (put_user(tp
->saved_syn
[0], optlen
)) {
3576 len
= tp
->saved_syn
[0];
3577 if (put_user(len
, optlen
)) {
3581 if (copy_to_user(optval
, tp
->saved_syn
+ 1, len
)) {
3585 tcp_saved_syn_free(tp
);
3590 if (put_user(len
, optlen
))
3596 case TCP_ZEROCOPY_RECEIVE
: {
3597 struct tcp_zerocopy_receive zc
;
3600 if (get_user(len
, optlen
))
3602 if (len
!= sizeof(zc
))
3604 if (copy_from_user(&zc
, optval
, len
))
3607 err
= tcp_zerocopy_receive(sk
, &zc
);
3609 if (!err
&& copy_to_user(optval
, &zc
, len
))
3615 return -ENOPROTOOPT
;
3618 if (put_user(len
, optlen
))
3620 if (copy_to_user(optval
, &val
, len
))
3625 int tcp_getsockopt(struct sock
*sk
, int level
, int optname
, char __user
*optval
,
3628 struct inet_connection_sock
*icsk
= inet_csk(sk
);
3630 if (level
!= SOL_TCP
)
3631 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
3633 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
3635 EXPORT_SYMBOL(tcp_getsockopt
);
3637 #ifdef CONFIG_COMPAT
3638 int compat_tcp_getsockopt(struct sock
*sk
, int level
, int optname
,
3639 char __user
*optval
, int __user
*optlen
)
3641 if (level
!= SOL_TCP
)
3642 return inet_csk_compat_getsockopt(sk
, level
, optname
,
3644 return do_tcp_getsockopt(sk
, level
, optname
, optval
, optlen
);
3646 EXPORT_SYMBOL(compat_tcp_getsockopt
);
3649 #ifdef CONFIG_TCP_MD5SIG
3650 static DEFINE_PER_CPU(struct tcp_md5sig_pool
, tcp_md5sig_pool
);
3651 static DEFINE_MUTEX(tcp_md5sig_mutex
);
3652 static bool tcp_md5sig_pool_populated
= false;
3654 static void __tcp_alloc_md5sig_pool(void)
3656 struct crypto_ahash
*hash
;
3659 hash
= crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC
);
3663 for_each_possible_cpu(cpu
) {
3664 void *scratch
= per_cpu(tcp_md5sig_pool
, cpu
).scratch
;
3665 struct ahash_request
*req
;
3668 scratch
= kmalloc_node(sizeof(union tcp_md5sum_block
) +
3669 sizeof(struct tcphdr
),
3674 per_cpu(tcp_md5sig_pool
, cpu
).scratch
= scratch
;
3676 if (per_cpu(tcp_md5sig_pool
, cpu
).md5_req
)
3679 req
= ahash_request_alloc(hash
, GFP_KERNEL
);
3683 ahash_request_set_callback(req
, 0, NULL
, NULL
);
3685 per_cpu(tcp_md5sig_pool
, cpu
).md5_req
= req
;
3687 /* before setting tcp_md5sig_pool_populated, we must commit all writes
3688 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
3691 tcp_md5sig_pool_populated
= true;
3694 bool tcp_alloc_md5sig_pool(void)
3696 if (unlikely(!tcp_md5sig_pool_populated
)) {
3697 mutex_lock(&tcp_md5sig_mutex
);
3699 if (!tcp_md5sig_pool_populated
) {
3700 __tcp_alloc_md5sig_pool();
3701 if (tcp_md5sig_pool_populated
)
3702 static_branch_inc(&tcp_md5_needed
);
3705 mutex_unlock(&tcp_md5sig_mutex
);
3707 return tcp_md5sig_pool_populated
;
3709 EXPORT_SYMBOL(tcp_alloc_md5sig_pool
);
3713 * tcp_get_md5sig_pool - get md5sig_pool for this user
3715 * We use percpu structure, so if we succeed, we exit with preemption
3716 * and BH disabled, to make sure another thread or softirq handling
3717 * wont try to get same context.
3719 struct tcp_md5sig_pool
*tcp_get_md5sig_pool(void)
3723 if (tcp_md5sig_pool_populated
) {
3724 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
3726 return this_cpu_ptr(&tcp_md5sig_pool
);
3731 EXPORT_SYMBOL(tcp_get_md5sig_pool
);
3733 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool
*hp
,
3734 const struct sk_buff
*skb
, unsigned int header_len
)
3736 struct scatterlist sg
;
3737 const struct tcphdr
*tp
= tcp_hdr(skb
);
3738 struct ahash_request
*req
= hp
->md5_req
;
3740 const unsigned int head_data_len
= skb_headlen(skb
) > header_len
?
3741 skb_headlen(skb
) - header_len
: 0;
3742 const struct skb_shared_info
*shi
= skb_shinfo(skb
);
3743 struct sk_buff
*frag_iter
;
3745 sg_init_table(&sg
, 1);
3747 sg_set_buf(&sg
, ((u8
*) tp
) + header_len
, head_data_len
);
3748 ahash_request_set_crypt(req
, &sg
, NULL
, head_data_len
);
3749 if (crypto_ahash_update(req
))
3752 for (i
= 0; i
< shi
->nr_frags
; ++i
) {
3753 const struct skb_frag_struct
*f
= &shi
->frags
[i
];
3754 unsigned int offset
= f
->page_offset
;
3755 struct page
*page
= skb_frag_page(f
) + (offset
>> PAGE_SHIFT
);
3757 sg_set_page(&sg
, page
, skb_frag_size(f
),
3758 offset_in_page(offset
));
3759 ahash_request_set_crypt(req
, &sg
, NULL
, skb_frag_size(f
));
3760 if (crypto_ahash_update(req
))
3764 skb_walk_frags(skb
, frag_iter
)
3765 if (tcp_md5_hash_skb_data(hp
, frag_iter
, 0))
3770 EXPORT_SYMBOL(tcp_md5_hash_skb_data
);
3772 int tcp_md5_hash_key(struct tcp_md5sig_pool
*hp
, const struct tcp_md5sig_key
*key
)
3774 struct scatterlist sg
;
3776 sg_init_one(&sg
, key
->key
, key
->keylen
);
3777 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
, key
->keylen
);
3778 return crypto_ahash_update(hp
->md5_req
);
3780 EXPORT_SYMBOL(tcp_md5_hash_key
);
3784 void tcp_done(struct sock
*sk
)
3786 struct request_sock
*req
= tcp_sk(sk
)->fastopen_rsk
;
3788 if (sk
->sk_state
== TCP_SYN_SENT
|| sk
->sk_state
== TCP_SYN_RECV
)
3789 TCP_INC_STATS(sock_net(sk
), TCP_MIB_ATTEMPTFAILS
);
3791 tcp_set_state(sk
, TCP_CLOSE
);
3792 tcp_clear_xmit_timers(sk
);
3794 reqsk_fastopen_remove(sk
, req
, false);
3796 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3798 if (!sock_flag(sk
, SOCK_DEAD
))
3799 sk
->sk_state_change(sk
);
3801 inet_csk_destroy_sock(sk
);
3803 EXPORT_SYMBOL_GPL(tcp_done
);
3805 int tcp_abort(struct sock
*sk
, int err
)
3807 if (!sk_fullsock(sk
)) {
3808 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
3809 struct request_sock
*req
= inet_reqsk(sk
);
3812 inet_csk_reqsk_queue_drop(req
->rsk_listener
, req
);
3819 /* Don't race with userspace socket closes such as tcp_close. */
3822 if (sk
->sk_state
== TCP_LISTEN
) {
3823 tcp_set_state(sk
, TCP_CLOSE
);
3824 inet_csk_listen_stop(sk
);
3827 /* Don't race with BH socket closes such as inet_csk_listen_stop. */
3831 if (!sock_flag(sk
, SOCK_DEAD
)) {
3833 /* This barrier is coupled with smp_rmb() in tcp_poll() */
3835 sk
->sk_error_report(sk
);
3836 if (tcp_need_reset(sk
->sk_state
))
3837 tcp_send_active_reset(sk
, GFP_ATOMIC
);
3843 tcp_write_queue_purge(sk
);
3847 EXPORT_SYMBOL_GPL(tcp_abort
);
3849 extern struct tcp_congestion_ops tcp_reno
;
3851 static __initdata
unsigned long thash_entries
;
3852 static int __init
set_thash_entries(char *str
)
3859 ret
= kstrtoul(str
, 0, &thash_entries
);
3865 __setup("thash_entries=", set_thash_entries
);
3867 static void __init
tcp_init_mem(void)
3869 unsigned long limit
= nr_free_buffer_pages() / 16;
3871 limit
= max(limit
, 128UL);
3872 sysctl_tcp_mem
[0] = limit
/ 4 * 3; /* 4.68 % */
3873 sysctl_tcp_mem
[1] = limit
; /* 6.25 % */
3874 sysctl_tcp_mem
[2] = sysctl_tcp_mem
[0] * 2; /* 9.37 % */
3877 void __init
tcp_init(void)
3879 int max_rshare
, max_wshare
, cnt
;
3880 unsigned long limit
;
3883 BUILD_BUG_ON(sizeof(struct tcp_skb_cb
) >
3884 FIELD_SIZEOF(struct sk_buff
, cb
));
3886 percpu_counter_init(&tcp_sockets_allocated
, 0, GFP_KERNEL
);
3887 percpu_counter_init(&tcp_orphan_count
, 0, GFP_KERNEL
);
3888 inet_hashinfo_init(&tcp_hashinfo
);
3889 inet_hashinfo2_init(&tcp_hashinfo
, "tcp_listen_portaddr_hash",
3890 thash_entries
, 21, /* one slot per 2 MB*/
3892 tcp_hashinfo
.bind_bucket_cachep
=
3893 kmem_cache_create("tcp_bind_bucket",
3894 sizeof(struct inet_bind_bucket
), 0,
3895 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3897 /* Size and allocate the main established and bind bucket
3900 * The methodology is similar to that of the buffer cache.
3902 tcp_hashinfo
.ehash
=
3903 alloc_large_system_hash("TCP established",
3904 sizeof(struct inet_ehash_bucket
),
3906 17, /* one slot per 128 KB of memory */
3909 &tcp_hashinfo
.ehash_mask
,
3911 thash_entries
? 0 : 512 * 1024);
3912 for (i
= 0; i
<= tcp_hashinfo
.ehash_mask
; i
++)
3913 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo
.ehash
[i
].chain
, i
);
3915 if (inet_ehash_locks_alloc(&tcp_hashinfo
))
3916 panic("TCP: failed to alloc ehash_locks");
3917 tcp_hashinfo
.bhash
=
3918 alloc_large_system_hash("TCP bind",
3919 sizeof(struct inet_bind_hashbucket
),
3920 tcp_hashinfo
.ehash_mask
+ 1,
3921 17, /* one slot per 128 KB of memory */
3923 &tcp_hashinfo
.bhash_size
,
3927 tcp_hashinfo
.bhash_size
= 1U << tcp_hashinfo
.bhash_size
;
3928 for (i
= 0; i
< tcp_hashinfo
.bhash_size
; i
++) {
3929 spin_lock_init(&tcp_hashinfo
.bhash
[i
].lock
);
3930 INIT_HLIST_HEAD(&tcp_hashinfo
.bhash
[i
].chain
);
3934 cnt
= tcp_hashinfo
.ehash_mask
+ 1;
3935 sysctl_tcp_max_orphans
= cnt
/ 2;
3938 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3939 limit
= nr_free_buffer_pages() << (PAGE_SHIFT
- 7);
3940 max_wshare
= min(4UL*1024*1024, limit
);
3941 max_rshare
= min(6UL*1024*1024, limit
);
3943 init_net
.ipv4
.sysctl_tcp_wmem
[0] = SK_MEM_QUANTUM
;
3944 init_net
.ipv4
.sysctl_tcp_wmem
[1] = 16*1024;
3945 init_net
.ipv4
.sysctl_tcp_wmem
[2] = max(64*1024, max_wshare
);
3947 init_net
.ipv4
.sysctl_tcp_rmem
[0] = SK_MEM_QUANTUM
;
3948 init_net
.ipv4
.sysctl_tcp_rmem
[1] = 131072;
3949 init_net
.ipv4
.sysctl_tcp_rmem
[2] = max(131072, max_rshare
);
3951 pr_info("Hash tables configured (established %u bind %u)\n",
3952 tcp_hashinfo
.ehash_mask
+ 1, tcp_hashinfo
.bhash_size
);
3956 BUG_ON(tcp_register_congestion_control(&tcp_reno
) != 0);