]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/net/tcp.h
ipv4: Namespaceify tcp_fin_timeout sysctl knob
[mirror_ubuntu-bionic-kernel.git] / include / net / tcp.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
1da177e4
LT
21#define FASTRETRANS_DEBUG 1
22
1da177e4
LT
23#include <linux/list.h>
24#include <linux/tcp.h>
187f1882 25#include <linux/bug.h>
1da177e4
LT
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
fb286bb2 29#include <linux/skbuff.h>
cfb6eeb4 30#include <linux/crypto.h>
c6aefafb 31#include <linux/cryptohash.h>
435cf559 32#include <linux/kref.h>
740b0f18 33#include <linux/ktime.h>
3f421baa
ACM
34
35#include <net/inet_connection_sock.h>
295ff7ed 36#include <net/inet_timewait_sock.h>
77d8bf9c 37#include <net/inet_hashtables.h>
1da177e4 38#include <net/checksum.h>
2e6599cb 39#include <net/request_sock.h>
1da177e4
LT
40#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
c752f073 43#include <net/tcp_states.h>
bdf1ee5d 44#include <net/inet_ecn.h>
0c266898 45#include <net/dst.h>
c752f073 46
1da177e4 47#include <linux/seq_file.h>
180d8cd9 48#include <linux/memcontrol.h>
1da177e4 49
6e04e021 50extern struct inet_hashinfo tcp_hashinfo;
1da177e4 51
dd24c001 52extern struct percpu_counter tcp_orphan_count;
5c9f3023 53void tcp_time_wait(struct sock *sk, int state, int timeo);
1da177e4 54
1da177e4 55#define MAX_TCP_HEADER (128 + MAX_HEADER)
33ad798c 56#define MAX_TCP_OPTION_SPACE 40
1da177e4 57
105970f6 58/*
1da177e4 59 * Never offer a window over 32767 without using window scaling. Some
105970f6 60 * poor stacks do signed 16bit maths!
1da177e4
LT
61 */
62#define MAX_TCP_WINDOW 32767U
63
64/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
65#define TCP_MIN_MSS 88U
66
5d424d5a 67/* The least MTU to use for probing */
dcd8fb85 68#define TCP_BASE_MSS 1024
5d424d5a 69
05cbc0db
FD
70/* probing interval, default to 10 minutes as per RFC4821 */
71#define TCP_PROBE_INTERVAL 600
72
6b58e0a5
FD
73/* Specify interval when tcp mtu probing will stop */
74#define TCP_PROBE_THRESHOLD 8
75
1da177e4
LT
76/* After receiving this amount of duplicate ACKs fast retransmit starts. */
77#define TCP_FASTRETRANS_THRESH 3
78
1da177e4
LT
79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
80#define TCP_MAX_QUICKACKS 16U
81
82/* urg_data states */
83#define TCP_URG_VALID 0x0100
84#define TCP_URG_NOTYET 0x0200
85#define TCP_URG_READ 0x0400
86
87#define TCP_RETR1 3 /*
88 * This is how many retries it does before it
89 * tries to figure out if the gateway is
90 * down. Minimal RFC value is 3; it corresponds
91 * to ~3sec-8min depending on RTO.
92 */
93
94#define TCP_RETR2 15 /*
95 * This should take at least
96 * 90 minutes to time out.
97 * RFC1122 says that the limit is 100 sec.
98 * 15 is ~13-30min depending on RTO.
99 */
100
6c9ff979
AB
101#define TCP_SYN_RETRIES 6 /* This is how many retries are done
102 * when active opening a connection.
103 * RFC1122 says the minimum retry MUST
104 * be at least 180secs. Nevertheless
105 * this value is corresponding to
106 * 63secs of retransmission with the
107 * current initial RTO.
108 */
1da177e4 109
6c9ff979
AB
110#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
111 * when passive opening a connection.
112 * This is corresponding to 31secs of
113 * retransmission with the current
114 * initial RTO.
115 */
1da177e4 116
1da177e4
LT
117#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
118 * state, about 60 seconds */
119#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
120 /* BSD style FIN_WAIT2 deadlock breaker.
121 * It used to be 3min, new value is 60sec,
122 * to combine FIN-WAIT-2 timeout with
123 * TIME-WAIT timer.
124 */
125
126#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
127#if HZ >= 100
128#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
129#define TCP_ATO_MIN ((unsigned)(HZ/25))
130#else
131#define TCP_DELACK_MIN 4U
132#define TCP_ATO_MIN 4U
133#endif
134#define TCP_RTO_MAX ((unsigned)(120*HZ))
135#define TCP_RTO_MIN ((unsigned)(HZ/5))
fd4f2cea 136#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
9ad7c049
JC
137#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
138 * used as a fallback RTO for the
139 * initial data transmission if no
140 * valid RTT sample has been acquired,
141 * most likely due to retrans in 3WHS.
142 */
1da177e4
LT
143
144#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
145 * for local resources.
146 */
147
148#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
149#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
150#define TCP_KEEPALIVE_INTVL (75*HZ)
151
152#define MAX_TCP_KEEPIDLE 32767
153#define MAX_TCP_KEEPINTVL 32767
154#define MAX_TCP_KEEPCNT 127
155#define MAX_TCP_SYNCNT 127
156
157#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
1da177e4
LT
158
159#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
160#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
161 * after this time. It should be equal
162 * (or greater than) TCP_TIMEWAIT_LEN
163 * to provide reliability equal to one
164 * provided by timewait state.
165 */
166#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
167 * timestamps. It must be less than
168 * minimal timewait lifetime.
169 */
1da177e4
LT
170/*
171 * TCP option
172 */
105970f6 173
1da177e4
LT
174#define TCPOPT_NOP 1 /* Padding */
175#define TCPOPT_EOL 0 /* End of options */
176#define TCPOPT_MSS 2 /* Segment size negotiating */
177#define TCPOPT_WINDOW 3 /* Window scaling */
178#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
179#define TCPOPT_SACK 5 /* SACK Block */
180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
cfb6eeb4 181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
7f9b838b 182#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
2100c8d2
YC
183#define TCPOPT_EXP 254 /* Experimental */
184/* Magic number to be after the option value for sharing TCP
185 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
186 */
187#define TCPOPT_FASTOPEN_MAGIC 0xF989
1da177e4
LT
188
189/*
190 * TCP option lengths
191 */
192
193#define TCPOLEN_MSS 4
194#define TCPOLEN_WINDOW 3
195#define TCPOLEN_SACK_PERM 2
196#define TCPOLEN_TIMESTAMP 10
cfb6eeb4 197#define TCPOLEN_MD5SIG 18
7f9b838b 198#define TCPOLEN_FASTOPEN_BASE 2
2100c8d2 199#define TCPOLEN_EXP_FASTOPEN_BASE 4
1da177e4
LT
200
201/* But this is what stacks really send out. */
202#define TCPOLEN_TSTAMP_ALIGNED 12
203#define TCPOLEN_WSCALE_ALIGNED 4
204#define TCPOLEN_SACKPERM_ALIGNED 4
205#define TCPOLEN_SACK_BASE 2
206#define TCPOLEN_SACK_BASE_ALIGNED 4
207#define TCPOLEN_SACK_PERBLOCK 8
cfb6eeb4 208#define TCPOLEN_MD5SIG_ALIGNED 20
33ad798c 209#define TCPOLEN_MSS_ALIGNED 4
1da177e4 210
1da177e4
LT
211/* Flags in tp->nonagle */
212#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
213#define TCP_NAGLE_CORK 2 /* Socket is corked */
caa20d9a 214#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
1da177e4 215
36e31b0a
AP
216/* TCP thin-stream limits */
217#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
218
21603fc4 219/* TCP initial congestion window as per rfc6928 */
442b9635
DM
220#define TCP_INIT_CWND 10
221
cf60af03
YC
222/* Bit Flags for sysctl_tcp_fastopen */
223#define TFO_CLIENT_ENABLE 1
10467163 224#define TFO_SERVER_ENABLE 2
67da22d2 225#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
cf60af03 226
10467163
JC
227/* Accept SYN data w/o any cookie option */
228#define TFO_SERVER_COOKIE_NOT_REQD 0x200
229
230/* Force enable TFO on all listeners, i.e., not requiring the
231 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
232 */
233#define TFO_SERVER_WO_SOCKOPT1 0x400
234#define TFO_SERVER_WO_SOCKOPT2 0x800
10467163 235
295ff7ed
ACM
236extern struct inet_timewait_death_row tcp_death_row;
237
1da177e4 238/* sysctl variables for tcp */
1da177e4
LT
239extern int sysctl_tcp_timestamps;
240extern int sysctl_tcp_window_scaling;
241extern int sysctl_tcp_sack;
2100c8d2 242extern int sysctl_tcp_fastopen;
1da177e4
LT
243extern int sysctl_tcp_retrans_collapse;
244extern int sysctl_tcp_stdurg;
245extern int sysctl_tcp_rfc1337;
246extern int sysctl_tcp_abort_on_overflow;
247extern int sysctl_tcp_max_orphans;
1da177e4
LT
248extern int sysctl_tcp_fack;
249extern int sysctl_tcp_reordering;
dca145ff 250extern int sysctl_tcp_max_reordering;
1da177e4 251extern int sysctl_tcp_dsack;
a4fe34bf 252extern long sysctl_tcp_mem[3];
1da177e4
LT
253extern int sysctl_tcp_wmem[3];
254extern int sysctl_tcp_rmem[3];
255extern int sysctl_tcp_app_win;
256extern int sysctl_tcp_adv_win_scale;
257extern int sysctl_tcp_tw_reuse;
258extern int sysctl_tcp_frto;
259extern int sysctl_tcp_low_latency;
1da177e4 260extern int sysctl_tcp_nometrics_save;
1da177e4
LT
261extern int sysctl_tcp_moderate_rcvbuf;
262extern int sysctl_tcp_tso_win_divisor;
15d99e02 263extern int sysctl_tcp_workaround_signed_windows;
35089bb2 264extern int sysctl_tcp_slow_start_after_idle;
36e31b0a 265extern int sysctl_tcp_thin_linear_timeouts;
7e380175 266extern int sysctl_tcp_thin_dupack;
eed530b6 267extern int sysctl_tcp_early_retrans;
46d3ceab 268extern int sysctl_tcp_limit_output_bytes;
282f23c6 269extern int sysctl_tcp_challenge_ack_limit;
c9bee3b7 270extern unsigned int sysctl_tcp_notsent_lowat;
95bd09eb 271extern int sysctl_tcp_min_tso_segs;
f6722583 272extern int sysctl_tcp_min_rtt_wlen;
f54b3111 273extern int sysctl_tcp_autocorking;
032ee423 274extern int sysctl_tcp_invalid_ratelimit;
43e122b0
ED
275extern int sysctl_tcp_pacing_ss_ratio;
276extern int sysctl_tcp_pacing_ca_ratio;
1da177e4 277
8d987e5c 278extern atomic_long_t tcp_memory_allocated;
1748376b 279extern struct percpu_counter tcp_sockets_allocated;
1da177e4
LT
280extern int tcp_memory_pressure;
281
b8da51eb
ED
282/* optimized version of sk_under_memory_pressure() for TCP sockets */
283static inline bool tcp_under_memory_pressure(const struct sock *sk)
284{
baac50bb
JW
285 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
286 mem_cgroup_under_socket_pressure(sk->sk_memcg))
e805605c 287 return true;
b8da51eb
ED
288
289 return tcp_memory_pressure;
290}
1da177e4
LT
291/*
292 * The next routines deal with comparing 32 bit unsigned ints
293 * and worry about wraparound (automatic with unsigned arithmetic).
294 */
295
a2a385d6 296static inline bool before(__u32 seq1, __u32 seq2)
1da177e4 297{
0d630cc0 298 return (__s32)(seq1-seq2) < 0;
1da177e4 299}
9a036b9c 300#define after(seq2, seq1) before(seq1, seq2)
1da177e4
LT
301
302/* is s2<=s1<=s3 ? */
a2a385d6 303static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
1da177e4
LT
304{
305 return seq3 - seq2 >= seq1 - seq2;
306}
307
efcdbf24
AS
308static inline bool tcp_out_of_memory(struct sock *sk)
309{
310 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
311 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
312 return true;
313 return false;
314}
315
a6c5ea4c
ED
316void sk_forced_mem_schedule(struct sock *sk, int size);
317
ad1af0fe 318static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
e4fd5da3 319{
ad1af0fe
DM
320 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
321 int orphans = percpu_counter_read_positive(ocp);
322
323 if (orphans << shift > sysctl_tcp_max_orphans) {
324 orphans = percpu_counter_sum_positive(ocp);
325 if (orphans << shift > sysctl_tcp_max_orphans)
326 return true;
327 }
ad1af0fe 328 return false;
e4fd5da3 329}
1da177e4 330
5c9f3023 331bool tcp_check_oom(struct sock *sk, int shift);
efcdbf24 332
a0f82f64 333
1da177e4
LT
334extern struct proto tcp_prot;
335
57ef42d5
PE
336#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
337#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
338#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
339#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
aa2ea058 340#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
1da177e4 341
5c9f3023
JP
342void tcp_tasklet_init(void);
343
344void tcp_v4_err(struct sk_buff *skb, u32);
345
346void tcp_shutdown(struct sock *sk, int how);
347
348void tcp_v4_early_demux(struct sk_buff *skb);
349int tcp_v4_rcv(struct sk_buff *skb);
350
351int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
1b784140 352int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
5c9f3023
JP
353int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
354 int flags);
355void tcp_release_cb(struct sock *sk);
356void tcp_wfree(struct sk_buff *skb);
357void tcp_write_timer_handler(struct sock *sk);
358void tcp_delack_timer_handler(struct sock *sk);
359int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
72ab4a86 360int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
5c9f3023
JP
361void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
362 const struct tcphdr *th, unsigned int len);
363void tcp_rcv_space_adjust(struct sock *sk);
5c9f3023
JP
364int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
365void tcp_twsk_destructor(struct sock *sk);
366ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
367 struct pipe_inode_info *pipe, size_t len,
368 unsigned int flags);
9c55e01c 369
463c84b9
ACM
370static inline void tcp_dec_quickack_mode(struct sock *sk,
371 const unsigned int pkts)
1da177e4 372{
463c84b9 373 struct inet_connection_sock *icsk = inet_csk(sk);
fc6415bc 374
463c84b9
ACM
375 if (icsk->icsk_ack.quick) {
376 if (pkts >= icsk->icsk_ack.quick) {
377 icsk->icsk_ack.quick = 0;
fc6415bc 378 /* Leaving quickack mode we deflate ATO. */
463c84b9 379 icsk->icsk_ack.ato = TCP_ATO_MIN;
fc6415bc 380 } else
463c84b9 381 icsk->icsk_ack.quick -= pkts;
1da177e4
LT
382 }
383}
384
bdf1ee5d
IJ
385#define TCP_ECN_OK 1
386#define TCP_ECN_QUEUE_CWR 2
387#define TCP_ECN_DEMAND_CWR 4
7a269ffa 388#define TCP_ECN_SEEN 8
bdf1ee5d 389
fd2c3ef7 390enum tcp_tw_status {
1da177e4
LT
391 TCP_TW_SUCCESS = 0,
392 TCP_TW_RST = 1,
393 TCP_TW_ACK = 2,
394 TCP_TW_SYN = 3
395};
396
397
5c9f3023
JP
398enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
399 struct sk_buff *skb,
400 const struct tcphdr *th);
401struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
52452c54 402 struct request_sock *req, bool fastopen);
5c9f3023
JP
403int tcp_child_process(struct sock *parent, struct sock *child,
404 struct sk_buff *skb);
5ae344c9 405void tcp_enter_loss(struct sock *sk);
5c9f3023
JP
406void tcp_clear_retrans(struct tcp_sock *tp);
407void tcp_update_metrics(struct sock *sk);
408void tcp_init_metrics(struct sock *sk);
409void tcp_metrics_init(void);
410bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
a26552af 411 bool paws_check, bool timestamps);
5c9f3023
JP
412bool tcp_remember_stamp(struct sock *sk);
413bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
414void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
415void tcp_disable_fack(struct tcp_sock *tp);
416void tcp_close(struct sock *sk, long timeout);
417void tcp_init_sock(struct sock *sk);
418unsigned int tcp_poll(struct file *file, struct socket *sock,
419 struct poll_table_struct *wait);
420int tcp_getsockopt(struct sock *sk, int level, int optname,
421 char __user *optval, int __user *optlen);
422int tcp_setsockopt(struct sock *sk, int level, int optname,
423 char __user *optval, unsigned int optlen);
424int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
53d3176b 425 char __user *optval, int __user *optlen);
5c9f3023 426int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
53d3176b 427 char __user *optval, unsigned int optlen);
5c9f3023 428void tcp_set_keepalive(struct sock *sk, int val);
42cb80a2 429void tcp_syn_ack_timeout(const struct request_sock *req);
1b784140
YX
430int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
431 int flags, int *addr_len);
5c9f3023
JP
432void tcp_parse_options(const struct sk_buff *skb,
433 struct tcp_options_received *opt_rx,
434 int estab, struct tcp_fastopen_cookie *foc);
435const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
7d5d5525 436
1da177e4
LT
437/*
438 * TCP v4 functions exported for the inet6 API
439 */
440
5c9f3023 441void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4fab9071 442void tcp_v4_mtu_reduced(struct sock *sk);
26e37360 443void tcp_req_err(struct sock *sk, u32 seq);
5c9f3023 444int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
c28c6f04 445struct sock *tcp_create_openreq_child(const struct sock *sk,
5c9f3023
JP
446 struct request_sock *req,
447 struct sk_buff *skb);
81164413 448void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
0c27171e 449struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
5c9f3023 450 struct request_sock *req,
5e0724d0
ED
451 struct dst_entry *dst,
452 struct request_sock *req_unhash,
453 bool *own_req);
5c9f3023
JP
454int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
455int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
456int tcp_connect(struct sock *sk);
5d062de7 457struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
5c9f3023 458 struct request_sock *req,
ca6fb065
ED
459 struct tcp_fastopen_cookie *foc,
460 bool attach_req);
5c9f3023 461int tcp_disconnect(struct sock *sk, int flags);
1da177e4 462
370816ae 463void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
292e8d8c 464int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
63d02d15 465void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
1da177e4 466
1da177e4 467/* From syncookies.c */
b80c0e78
ED
468struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
469 struct request_sock *req,
470 struct dst_entry *dst);
5c9f3023
JP
471int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
472 u32 cookie);
461b74c3 473struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
e05c82d3 474#ifdef CONFIG_SYN_COOKIES
8c27bd75 475
63262315 476/* Syncookies use a monotonic timer which increments every 60 seconds.
8c27bd75
FW
477 * This counter is used both as a hash input and partially encoded into
478 * the cookie value. A cookie is only validated further if the delta
479 * between the current counter value and the encoded one is less than this,
63262315 480 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
8c27bd75
FW
481 * the counter advances immediately after a cookie is generated).
482 */
264ea103
ED
483#define MAX_SYNCOOKIE_AGE 2
484#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
485#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
486
487/* syncookies: remember time of last synqueue overflow
488 * But do not dirty this field too often (once per second is enough)
3f684b4b 489 * It is racy as we do not hold a lock, but race is very minor.
264ea103 490 */
3f684b4b 491static inline void tcp_synq_overflow(const struct sock *sk)
264ea103
ED
492{
493 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
494 unsigned long now = jiffies;
495
496 if (time_after(now, last_overflow + HZ))
497 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
498}
499
500/* syncookies: no recent synqueue overflow on this listening socket? */
501static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
502{
503 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
504
505 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
506}
8c27bd75
FW
507
508static inline u32 tcp_cookie_time(void)
509{
63262315
ED
510 u64 val = get_jiffies_64();
511
264ea103 512 do_div(val, TCP_SYNCOOKIE_PERIOD);
63262315 513 return val;
8c27bd75
FW
514}
515
5c9f3023
JP
516u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
517 u16 *mssp);
3f684b4b 518__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
5c9f3023 519__u32 cookie_init_timestamp(struct request_sock *req);
f1673381
FW
520bool cookie_timestamp_decode(struct tcp_options_received *opt);
521bool cookie_ecn_ok(const struct tcp_options_received *opt,
f7b3bec6 522 const struct net *net, const struct dst_entry *dst);
4dfc2817 523
c6aefafb 524/* From net/ipv6/syncookies.c */
5c9f3023
JP
525int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
526 u32 cookie);
527struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
f1673381 528
5c9f3023
JP
529u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
530 const struct tcphdr *th, u16 *mssp);
3f684b4b 531__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
e05c82d3 532#endif
1da177e4
LT
533/* tcp_output.c */
534
5c9f3023
JP
535void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
536 int nonagle);
537bool tcp_may_send_now(struct sock *sk);
538int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
539int tcp_retransmit_skb(struct sock *, struct sk_buff *);
540void tcp_retransmit_timer(struct sock *sk);
541void tcp_xmit_retransmit_queue(struct sock *);
542void tcp_simple_retransmit(struct sock *);
543int tcp_trim_head(struct sock *, struct sk_buff *, u32);
6cc55e09 544int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
5c9f3023
JP
545
546void tcp_send_probe0(struct sock *);
547void tcp_send_partial(struct sock *);
e520af48 548int tcp_write_wakeup(struct sock *, int mib);
5c9f3023
JP
549void tcp_send_fin(struct sock *sk);
550void tcp_send_active_reset(struct sock *sk, gfp_t priority);
551int tcp_send_synack(struct sock *);
5c9f3023
JP
552void tcp_push_one(struct sock *, unsigned int mss_now);
553void tcp_send_ack(struct sock *sk);
554void tcp_send_delayed_ack(struct sock *sk);
555void tcp_send_loss_probe(struct sock *sk);
556bool tcp_schedule_loss_probe(struct sock *sk);
1da177e4 557
a762a980 558/* tcp_input.c */
5c9f3023
JP
559void tcp_resume_early_retransmit(struct sock *sk);
560void tcp_rearm_rto(struct sock *sk);
0f1c28ae 561void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5c9f3023 562void tcp_reset(struct sock *sk);
4f41b1c5 563void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
e3e17b77 564void tcp_fin(struct sock *sk);
a762a980 565
1da177e4 566/* tcp_timer.c */
5c9f3023 567void tcp_init_xmit_timers(struct sock *);
463c84b9
ACM
568static inline void tcp_clear_xmit_timers(struct sock *sk)
569{
570 inet_csk_clear_xmit_timers(sk);
571}
1da177e4 572
5c9f3023
JP
573unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
574unsigned int tcp_current_mss(struct sock *sk);
0c54b85f
IJ
575
576/* Bound MSS / TSO packet size with the half of the window */
577static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
578{
01f83d69
AK
579 int cutoff;
580
581 /* When peer uses tiny windows, there is no use in packetizing
582 * to sub-MSS pieces for the sake of SWS or making sure there
583 * are enough packets in the pipe for fast recovery.
584 *
585 * On the other hand, for extremely large MSS devices, handling
586 * smaller than MSS windows in this way does make sense.
587 */
588 if (tp->max_window >= 512)
589 cutoff = (tp->max_window >> 1);
590 else
591 cutoff = tp->max_window;
592
593 if (cutoff && pktsize > cutoff)
594 return max_t(int, cutoff, 68U - tp->tcp_header_len);
0c54b85f
IJ
595 else
596 return pktsize;
597}
1da177e4 598
17b085ea 599/* tcp.c */
0df48c26 600void tcp_get_info(struct sock *, struct tcp_info *);
1da177e4
LT
601
602/* Read 'sendfile()'-style from a TCP socket */
603typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
604 unsigned int, size_t);
5c9f3023
JP
605int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
606 sk_read_actor_t recv_actor);
1da177e4 607
5c9f3023 608void tcp_initialize_rcv_mss(struct sock *sk);
1da177e4 609
5c9f3023
JP
610int tcp_mtu_to_mss(struct sock *sk, int pmtu);
611int tcp_mss_to_mtu(struct sock *sk, int mss);
612void tcp_mtup_init(struct sock *sk);
613void tcp_init_buffer_space(struct sock *sk);
5d424d5a 614
f1ecd5d9
DL
615static inline void tcp_bound_rto(const struct sock *sk)
616{
617 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
618 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
619}
620
621static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
622{
740b0f18 623 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
f1ecd5d9
DL
624}
625
40efc6fa 626static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1da177e4
LT
627{
628 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
629 ntohl(TCP_FLAG_ACK) |
630 snd_wnd);
631}
632
40efc6fa 633static inline void tcp_fast_path_on(struct tcp_sock *tp)
1da177e4
LT
634{
635 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
636}
637
9e412ba7 638static inline void tcp_fast_path_check(struct sock *sk)
1da177e4 639{
9e412ba7
IJ
640 struct tcp_sock *tp = tcp_sk(sk);
641
b03efcfb 642 if (skb_queue_empty(&tp->out_of_order_queue) &&
1da177e4
LT
643 tp->rcv_wnd &&
644 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
645 !tp->urg_data)
646 tcp_fast_path_on(tp);
647}
648
0c266898
SS
649/* Compute the actual rto_min value */
650static inline u32 tcp_rto_min(struct sock *sk)
651{
cf533ea5 652 const struct dst_entry *dst = __sk_dst_get(sk);
0c266898
SS
653 u32 rto_min = TCP_RTO_MIN;
654
655 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
656 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
657 return rto_min;
658}
659
740b0f18
ED
660static inline u32 tcp_rto_min_us(struct sock *sk)
661{
662 return jiffies_to_usecs(tcp_rto_min(sk));
663}
664
81164413
DB
665static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
666{
667 return dst_metric_locked(dst, RTAX_CC_ALGO);
668}
669
f6722583
YC
670/* Minimum RTT in usec. ~0 means not available. */
671static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
672{
673 return tp->rtt_min[0].rtt;
674}
675
1da177e4
LT
676/* Compute the actual receive window we are currently advertising.
677 * Rcv_nxt can be after the window if our peer push more data
678 * than the offered window.
679 */
40efc6fa 680static inline u32 tcp_receive_window(const struct tcp_sock *tp)
1da177e4
LT
681{
682 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
683
684 if (win < 0)
685 win = 0;
686 return (u32) win;
687}
688
689/* Choose a new window, without checks for shrinking, and without
690 * scaling applied to the result. The caller does these things
691 * if necessary. This is a "raw" window selection.
692 */
5c9f3023 693u32 __tcp_select_window(struct sock *sk);
1da177e4 694
ee995283
PE
695void tcp_send_window_probe(struct sock *sk);
696
1da177e4
LT
697/* TCP timestamps are only 32-bits, this causes a slight
698 * complication on 64-bit systems since we store a snapshot
31f34269
SH
699 * of jiffies in the buffer control blocks below. We decided
700 * to use only the low 32-bits of jiffies and hide the ugly
1da177e4
LT
701 * casts with the following macro.
702 */
703#define tcp_time_stamp ((__u32)(jiffies))
704
7faee5c0
ED
705static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
706{
707 return skb->skb_mstamp.stamp_jiffies;
708}
709
710
a3433f35
CG
711#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
712
713#define TCPHDR_FIN 0x01
714#define TCPHDR_SYN 0x02
715#define TCPHDR_RST 0x04
716#define TCPHDR_PSH 0x08
717#define TCPHDR_ACK 0x10
718#define TCPHDR_URG 0x20
719#define TCPHDR_ECE 0x40
720#define TCPHDR_CWR 0x80
721
49213555
DB
722#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
723
caa20d9a 724/* This is what the send packet queuing engine uses to pass
f86586fa
ED
725 * TCP per-packet control information to the transmission code.
726 * We also store the host-order sequence numbers in here too.
727 * This is 44 bytes if IPV6 is enabled.
728 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1da177e4
LT
729 */
730struct tcp_skb_cb {
1da177e4
LT
731 __u32 seq; /* Starting sequence number */
732 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
cd7d8498
ED
733 union {
734 /* Note : tcp_tw_isn is used in input path only
735 * (isn chosen by tcp_timewait_state_process())
736 *
f69ad292
ED
737 * tcp_gso_segs/size are used in write queue only,
738 * cf tcp_skb_pcount()/tcp_skb_mss()
cd7d8498
ED
739 */
740 __u32 tcp_tw_isn;
f69ad292
ED
741 struct {
742 u16 tcp_gso_segs;
743 u16 tcp_gso_size;
744 };
cd7d8498 745 };
4de075e0 746 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
f4f9f6e7 747
1da177e4
LT
748 __u8 sacked; /* State flags for SACK/FACK. */
749#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
750#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
751#define TCPCB_LOST 0x04 /* SKB is lost */
752#define TCPCB_TAGBITS 0x07 /* All tag bits */
9d186cac 753#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
1da177e4 754#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
9d186cac
AV
755#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
756 TCPCB_REPAIRED)
1da177e4 757
f4f9f6e7
NC
758 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
759 /* 1 byte hole */
1da177e4 760 __u32 ack_seq; /* Sequence number ACK'd */
971f10ec
ED
761 union {
762 struct inet_skb_parm h4;
763#if IS_ENABLED(CONFIG_IPV6)
764 struct inet6_skb_parm h6;
765#endif
766 } header; /* For incoming frames */
1da177e4
LT
767};
768
769#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
770
870c3151 771
815afe17 772#if IS_ENABLED(CONFIG_IPV6)
870c3151
ED
773/* This is the variant of inet6_iif() that must be used by TCP,
774 * as TCP moves IP6CB into a different location in skb->cb[]
775 */
776static inline int tcp_v6_iif(const struct sk_buff *skb)
777{
778 return TCP_SKB_CB(skb)->header.h6.iif;
779}
815afe17 780#endif
870c3151 781
1da177e4
LT
782/* Due to TSO, an SKB can be composed of multiple actual
783 * packets. To keep these tracked properly, we use this.
bd14b1b2 784 */
1da177e4 785static inline int tcp_skb_pcount(const struct sk_buff *skb)
bd14b1b2 786{
cd7d8498
ED
787 return TCP_SKB_CB(skb)->tcp_gso_segs;
788}
bd14b1b2 789
cd7d8498
ED
790static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
791{
792 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
bd14b1b2
ED
793}
794
cd7d8498 795static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1da177e4 796{
cd7d8498 797 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1da177e4
LT
798}
799
f69ad292 800/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1da177e4
LT
801static inline int tcp_skb_mss(const struct sk_buff *skb)
802{
f69ad292 803 return TCP_SKB_CB(skb)->tcp_gso_size;
1da177e4
LT
804}
805
317a76f9
SH
806/* Events passed to congestion control interface */
807enum tcp_ca_event {
808 CA_EVENT_TX_START, /* first transmit when no packets in flight */
809 CA_EVENT_CWND_RESTART, /* congestion window restart */
810 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
317a76f9 811 CA_EVENT_LOSS, /* loss timeout */
9890092e
FW
812 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
813 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
814 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
815 CA_EVENT_NON_DELAYED_ACK,
7354c8c3
FW
816};
817
9890092e 818/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
7354c8c3 819enum tcp_ca_ack_event_flags {
9890092e
FW
820 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
821 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
822 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
317a76f9
SH
823};
824
825/*
826 * Interface for adding new TCP congestion control handlers
827 */
828#define TCP_CA_NAME_MAX 16
3ff825b2
SH
829#define TCP_CA_MAX 128
830#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
831
c5c6a8ab
DB
832#define TCP_CA_UNSPEC 0
833
30e502a3 834/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
164891aa 835#define TCP_CONG_NON_RESTRICTED 0x1
30e502a3
DB
836/* Requires ECN/ECT set on all packets */
837#define TCP_CONG_NEEDS_ECN 0x2
164891aa 838
64f40ff5
ED
839union tcp_cc_info;
840
317a76f9
SH
841struct tcp_congestion_ops {
842 struct list_head list;
c5c6a8ab
DB
843 u32 key;
844 u32 flags;
317a76f9
SH
845
846 /* initialize private data (optional) */
6687e988 847 void (*init)(struct sock *sk);
317a76f9 848 /* cleanup private data (optional) */
6687e988 849 void (*release)(struct sock *sk);
317a76f9
SH
850
851 /* return slow start threshold (required) */
6687e988 852 u32 (*ssthresh)(struct sock *sk);
317a76f9 853 /* do new cwnd calculation (required) */
24901551 854 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
317a76f9 855 /* call before changing ca_state (optional) */
6687e988 856 void (*set_state)(struct sock *sk, u8 new_state);
317a76f9 857 /* call when cwnd event occurs (optional) */
6687e988 858 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
7354c8c3
FW
859 /* call when ack arrives (optional) */
860 void (*in_ack_event)(struct sock *sk, u32 flags);
317a76f9 861 /* new value of cwnd after loss (optional) */
6687e988 862 u32 (*undo_cwnd)(struct sock *sk);
317a76f9 863 /* hook for packet ack accounting (optional) */
30cfd0ba 864 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
73c1f4a0 865 /* get info for inet_diag (optional) */
64f40ff5
ED
866 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
867 union tcp_cc_info *info);
317a76f9
SH
868
869 char name[TCP_CA_NAME_MAX];
870 struct module *owner;
871};
872
5c9f3023
JP
873int tcp_register_congestion_control(struct tcp_congestion_ops *type);
874void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
317a76f9 875
55d8694f 876void tcp_assign_congestion_control(struct sock *sk);
5c9f3023
JP
877void tcp_init_congestion_control(struct sock *sk);
878void tcp_cleanup_congestion_control(struct sock *sk);
879int tcp_set_default_congestion_control(const char *name);
880void tcp_get_default_congestion_control(char *name);
881void tcp_get_available_congestion_control(char *buf, size_t len);
882void tcp_get_allowed_congestion_control(char *buf, size_t len);
883int tcp_set_allowed_congestion_control(char *allowed);
884int tcp_set_congestion_control(struct sock *sk, const char *name);
e73ebb08
NC
885u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
886void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
317a76f9 887
5c9f3023 888u32 tcp_reno_ssthresh(struct sock *sk);
24901551 889void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
a8acfbac 890extern struct tcp_congestion_ops tcp_reno;
317a76f9 891
c5c6a8ab 892struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
c3a8d947 893u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
ea697639 894#ifdef CONFIG_INET
c5c6a8ab 895char *tcp_ca_get_name_by_key(u32 key, char *buffer);
ea697639
DB
896#else
897static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
898{
899 return NULL;
900}
901#endif
c5c6a8ab 902
30e502a3
DB
903static inline bool tcp_ca_needs_ecn(const struct sock *sk)
904{
905 const struct inet_connection_sock *icsk = inet_csk(sk);
906
907 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
908}
909
6687e988 910static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
317a76f9 911{
6687e988
ACM
912 struct inet_connection_sock *icsk = inet_csk(sk);
913
914 if (icsk->icsk_ca_ops->set_state)
915 icsk->icsk_ca_ops->set_state(sk, ca_state);
916 icsk->icsk_ca_state = ca_state;
317a76f9
SH
917}
918
6687e988 919static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
317a76f9 920{
6687e988
ACM
921 const struct inet_connection_sock *icsk = inet_csk(sk);
922
923 if (icsk->icsk_ca_ops->cwnd_event)
924 icsk->icsk_ca_ops->cwnd_event(sk, event);
317a76f9
SH
925}
926
e60402d0
IJ
927/* These functions determine how the current flow behaves in respect of SACK
928 * handling. SACK is negotiated with the peer, and therefore it can vary
929 * between different flows.
930 *
931 * tcp_is_sack - SACK enabled
932 * tcp_is_reno - No SACK
933 * tcp_is_fack - FACK enabled, implies SACK enabled
934 */
935static inline int tcp_is_sack(const struct tcp_sock *tp)
936{
937 return tp->rx_opt.sack_ok;
938}
939
a2a385d6 940static inline bool tcp_is_reno(const struct tcp_sock *tp)
e60402d0
IJ
941{
942 return !tcp_is_sack(tp);
943}
944
a2a385d6 945static inline bool tcp_is_fack(const struct tcp_sock *tp)
e60402d0 946{
ab56222a 947 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
e60402d0
IJ
948}
949
950static inline void tcp_enable_fack(struct tcp_sock *tp)
951{
ab56222a 952 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
e60402d0
IJ
953}
954
eed530b6
YC
955/* TCP early-retransmit (ER) is similar to but more conservative than
956 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
957 */
958static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
959{
1043e25f
NB
960 struct net *net = sock_net((struct sock *)tp);
961
eed530b6 962 tp->do_early_retrans = sysctl_tcp_early_retrans &&
6ba8a3b1 963 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
1043e25f 964 net->ipv4.sysctl_tcp_reordering == 3;
eed530b6
YC
965}
966
967static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
968{
969 tp->do_early_retrans = 0;
970}
971
83ae4088
IJ
972static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
973{
974 return tp->sacked_out + tp->lost_out;
975}
976
1da177e4
LT
977/* This determines how many packets are "in the network" to the best
978 * of our knowledge. In many cases it is conservative, but where
979 * detailed information is available from the receiver (via SACK
980 * blocks etc.) we can make more aggressive calculations.
981 *
982 * Use this for decisions involving congestion control, use just
983 * tp->packets_out to determine if the send queue is empty or not.
984 *
985 * Read this equation as:
986 *
987 * "Packets sent once on transmission queue" MINUS
988 * "Packets left network, but not honestly ACKed yet" PLUS
989 * "Packets fast retransmitted"
990 */
40efc6fa 991static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1da177e4 992{
83ae4088 993 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1da177e4
LT
994}
995
0b6a05c1
IJ
996#define TCP_INFINITE_SSTHRESH 0x7fffffff
997
071d5080
YC
998static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
999{
76174004 1000 return tp->snd_cwnd < tp->snd_ssthresh;
071d5080
YC
1001}
1002
0b6a05c1
IJ
1003static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1004{
1005 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1006}
1007
684bad11
YC
1008static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1009{
1010 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1011 (1 << inet_csk(sk)->icsk_ca_state);
1012}
1013
1da177e4 1014/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
684bad11 1015 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1da177e4
LT
1016 * ssthresh.
1017 */
6687e988 1018static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1da177e4 1019{
6687e988 1020 const struct tcp_sock *tp = tcp_sk(sk);
cf533ea5 1021
684bad11 1022 if (tcp_in_cwnd_reduction(sk))
1da177e4
LT
1023 return tp->snd_ssthresh;
1024 else
1025 return max(tp->snd_ssthresh,
1026 ((tp->snd_cwnd >> 1) +
1027 (tp->snd_cwnd >> 2)));
1028}
1029
b9c4595b
IJ
1030/* Use define here intentionally to get WARN_ON location shown at the caller */
1031#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1da177e4 1032
5ee2c941 1033void tcp_enter_cwr(struct sock *sk);
5c9f3023 1034__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1da177e4 1035
6b5a5c0d
NC
1036/* The maximum number of MSS of available cwnd for which TSO defers
1037 * sending if not using sysctl_tcp_tso_win_divisor.
1038 */
1039static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1040{
1041 return 3;
1042}
1043
1da177e4 1044/* Slow start with delack produces 3 packets of burst, so that
dd9e0dda
JH
1045 * it is safe "de facto". This will be the default - same as
1046 * the default reordering threshold - but if reordering increases,
1047 * we must be able to allow cwnd to burst at least this much in order
1048 * to not pull it back when holes are filled.
1da177e4
LT
1049 */
1050static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
1051{
dd9e0dda 1052 return tp->reordering;
1da177e4
LT
1053}
1054
90840def
IJ
1055/* Returns end sequence number of the receiver's advertised window */
1056static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1057{
1058 return tp->snd_una + tp->snd_wnd;
1059}
e114a710
ED
1060
1061/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1062 * flexible approach. The RFC suggests cwnd should not be raised unless
ca8a2263
NC
1063 * it was fully used previously. And that's exactly what we do in
1064 * congestion avoidance mode. But in slow start we allow cwnd to grow
1065 * as long as the application has used half the cwnd.
e114a710
ED
1066 * Example :
1067 * cwnd is 10 (IW10), but application sends 9 frames.
1068 * We allow cwnd to reach 18 when all frames are ACKed.
1069 * This check is safe because it's as aggressive as slow start which already
1070 * risks 100% overshoot. The advantage is that we discourage application to
1071 * either send more filler packets or data to artificially blow up the cwnd
1072 * usage, and allow application-limited process to probe bw more aggressively.
e114a710 1073 */
24901551 1074static inline bool tcp_is_cwnd_limited(const struct sock *sk)
e114a710
ED
1075{
1076 const struct tcp_sock *tp = tcp_sk(sk);
1077
ca8a2263 1078 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
071d5080 1079 if (tcp_in_slow_start(tp))
ca8a2263
NC
1080 return tp->snd_cwnd < 2 * tp->max_packets_out;
1081
1082 return tp->is_cwnd_limited;
e114a710 1083}
f4805ede 1084
21c8fe99
ED
1085/* Something is really bad, we could not queue an additional packet,
1086 * because qdisc is full or receiver sent a 0 window.
1087 * We do not want to add fuel to the fire, or abort too early,
1088 * so make sure the timer we arm now is at least 200ms in the future,
1089 * regardless of current icsk_rto value (as it could be ~2ms)
1090 */
1091static inline unsigned long tcp_probe0_base(const struct sock *sk)
1da177e4 1092{
21c8fe99
ED
1093 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1094}
9e412ba7 1095
21c8fe99
ED
1096/* Variant of inet_csk_rto_backoff() used for zero window probes */
1097static inline unsigned long tcp_probe0_when(const struct sock *sk,
1098 unsigned long max_when)
1099{
1100 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1101
1102 return (unsigned long)min_t(u64, when, max_when);
1103}
1104
1105static inline void tcp_check_probe_timer(struct sock *sk)
1106{
1107 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
3f421baa 1108 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
21c8fe99 1109 tcp_probe0_base(sk), TCP_RTO_MAX);
1da177e4
LT
1110}
1111
ee7537b6 1112static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
1113{
1114 tp->snd_wl1 = seq;
1115}
1116
ee7537b6 1117static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
1118{
1119 tp->snd_wl1 = seq;
1120}
1121
1da177e4
LT
1122/*
1123 * Calculate(/check) TCP checksum
1124 */
ba7808ea
FD
1125static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1126 __be32 daddr, __wsum base)
1da177e4
LT
1127{
1128 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1129}
1130
b51655b9 1131static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1da177e4 1132{
fb286bb2 1133 return __skb_checksum_complete(skb);
1da177e4
LT
1134}
1135
a2a385d6 1136static inline bool tcp_checksum_complete(struct sk_buff *skb)
1da177e4 1137{
60476372 1138 return !skb_csum_unnecessary(skb) &&
1da177e4
LT
1139 __tcp_checksum_complete(skb);
1140}
1141
1142/* Prequeue for VJ style copy to user, combined with checksumming. */
1143
40efc6fa 1144static inline void tcp_prequeue_init(struct tcp_sock *tp)
1da177e4
LT
1145{
1146 tp->ucopy.task = NULL;
1147 tp->ucopy.len = 0;
1148 tp->ucopy.memory = 0;
1149 skb_queue_head_init(&tp->ucopy.prequeue);
1150}
1151
5c9f3023 1152bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1da177e4
LT
1153
1154#undef STATE_TRACE
1155
1156#ifdef STATE_TRACE
1157static const char *statename[]={
1158 "Unused","Established","Syn Sent","Syn Recv",
1159 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1160 "Close Wait","Last ACK","Listen","Closing"
1161};
1162#endif
5c9f3023 1163void tcp_set_state(struct sock *sk, int state);
1da177e4 1164
5c9f3023 1165void tcp_done(struct sock *sk);
1da177e4 1166
c1e64e29
LC
1167int tcp_abort(struct sock *sk, int err);
1168
40efc6fa 1169static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1da177e4
LT
1170{
1171 rx_opt->dsack = 0;
1da177e4
LT
1172 rx_opt->num_sacks = 0;
1173}
1174
5c9f3023 1175u32 tcp_default_init_rwnd(u32 mss);
6f021c62
ED
1176void tcp_cwnd_restart(struct sock *sk, s32 delta);
1177
1178static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1179{
1180 struct tcp_sock *tp = tcp_sk(sk);
1181 s32 delta;
1182
1183 if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
1184 return;
1185 delta = tcp_time_stamp - tp->lsndtime;
1186 if (delta > inet_csk(sk)->icsk_rto)
1187 tcp_cwnd_restart(sk, delta);
1188}
85f16525 1189
1da177e4 1190/* Determine a window scaling and initial window to offer. */
5c9f3023
JP
1191void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1192 __u32 *window_clamp, int wscale_ok,
1193 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1da177e4
LT
1194
1195static inline int tcp_win_from_space(int space)
1196{
1197 return sysctl_tcp_adv_win_scale<=0 ?
1198 (space>>(-sysctl_tcp_adv_win_scale)) :
1199 space - (space>>sysctl_tcp_adv_win_scale);
1200}
1201
105970f6 1202/* Note: caller must be prepared to deal with negative returns */
1da177e4
LT
1203static inline int tcp_space(const struct sock *sk)
1204{
1205 return tcp_win_from_space(sk->sk_rcvbuf -
1206 atomic_read(&sk->sk_rmem_alloc));
105970f6 1207}
1da177e4
LT
1208
1209static inline int tcp_full_space(const struct sock *sk)
1210{
105970f6 1211 return tcp_win_from_space(sk->sk_rcvbuf);
1da177e4
LT
1212}
1213
843f4a55 1214extern void tcp_openreq_init_rwin(struct request_sock *req,
b1964b5f
ED
1215 const struct sock *sk_listener,
1216 const struct dst_entry *dst);
843f4a55 1217
5c9f3023 1218void tcp_enter_memory_pressure(struct sock *sk);
1da177e4 1219
1da177e4
LT
1220static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1221{
b840d15d
NB
1222 struct net *net = sock_net((struct sock *)tp);
1223
1224 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1da177e4
LT
1225}
1226
1227static inline int keepalive_time_when(const struct tcp_sock *tp)
1228{
13b287e8
NB
1229 struct net *net = sock_net((struct sock *)tp);
1230
1231 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1da177e4
LT
1232}
1233
df19a626
ED
1234static inline int keepalive_probes(const struct tcp_sock *tp)
1235{
9bd6861b
NB
1236 struct net *net = sock_net((struct sock *)tp);
1237
1238 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
df19a626
ED
1239}
1240
6c37e5de
FL
1241static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1242{
1243 const struct inet_connection_sock *icsk = &tp->inet_conn;
1244
1245 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1246 tcp_time_stamp - tp->rcv_tstamp);
1247}
1248
463c84b9 1249static inline int tcp_fin_time(const struct sock *sk)
1da177e4 1250{
1e579caa 1251 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
463c84b9 1252 const int rto = inet_csk(sk)->icsk_rto;
1da177e4 1253
463c84b9
ACM
1254 if (fin_timeout < (rto << 2) - (rto >> 1))
1255 fin_timeout = (rto << 2) - (rto >> 1);
1da177e4
LT
1256
1257 return fin_timeout;
1258}
1259
a2a385d6
ED
1260static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1261 int paws_win)
1da177e4 1262{
c887e6d2 1263 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
a2a385d6 1264 return true;
c887e6d2 1265 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
a2a385d6 1266 return true;
bc2ce894
ED
1267 /*
1268 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1269 * then following tcp messages have valid values. Ignore 0 value,
1270 * or else 'negative' tsval might forbid us to accept their packets.
1271 */
1272 if (!rx_opt->ts_recent)
a2a385d6
ED
1273 return true;
1274 return false;
c887e6d2
IJ
1275}
1276
a2a385d6
ED
1277static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1278 int rst)
c887e6d2
IJ
1279{
1280 if (tcp_paws_check(rx_opt, 0))
a2a385d6 1281 return false;
1da177e4
LT
1282
1283 /* RST segments are not recommended to carry timestamp,
1284 and, if they do, it is recommended to ignore PAWS because
1285 "their cleanup function should take precedence over timestamps."
1286 Certainly, it is mistake. It is necessary to understand the reasons
1287 of this constraint to relax it: if peer reboots, clock may go
1288 out-of-sync and half-open connections will not be reset.
1289 Actually, the problem would be not existing if all
1290 the implementations followed draft about maintaining clock
1291 via reboots. Linux-2.2 DOES NOT!
1292
1293 However, we can relax time bounds for RST segments to MSL.
1294 */
9d729f72 1295 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
a2a385d6
ED
1296 return false;
1297 return true;
1da177e4
LT
1298}
1299
7970ddc8
ED
1300bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1301 int mib_idx, u32 *last_oow_ack_time);
032ee423 1302
a9c19329 1303static inline void tcp_mib_init(struct net *net)
1da177e4
LT
1304{
1305 /* See RFC 2012 */
cf1100a7
PE
1306 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1307 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1308 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1309 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1da177e4
LT
1310}
1311
5af4ec23 1312/* from STCP */
ef9da47c 1313static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
0800f170 1314{
6a438bbe 1315 tp->lost_skb_hint = NULL;
ef9da47c
IJ
1316}
1317
1318static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1319{
1320 tcp_clear_retrans_hints_partial(tp);
6a438bbe 1321 tp->retransmit_skb_hint = NULL;
b7689205
IJ
1322}
1323
cfb6eeb4
YH
1324/* MD5 Signature */
1325struct crypto_hash;
1326
a915da9b
ED
1327union tcp_md5_addr {
1328 struct in_addr a4;
1329#if IS_ENABLED(CONFIG_IPV6)
1330 struct in6_addr a6;
1331#endif
1332};
1333
cfb6eeb4
YH
1334/* - key database */
1335struct tcp_md5sig_key {
a915da9b 1336 struct hlist_node node;
cfb6eeb4 1337 u8 keylen;
a915da9b
ED
1338 u8 family; /* AF_INET or AF_INET6 */
1339 union tcp_md5_addr addr;
1340 u8 key[TCP_MD5SIG_MAXKEYLEN];
1341 struct rcu_head rcu;
cfb6eeb4
YH
1342};
1343
1344/* - sock block */
1345struct tcp_md5sig_info {
a915da9b 1346 struct hlist_head head;
a8afca03 1347 struct rcu_head rcu;
cfb6eeb4
YH
1348};
1349
1350/* - pseudo header */
1351struct tcp4_pseudohdr {
1352 __be32 saddr;
1353 __be32 daddr;
1354 __u8 pad;
1355 __u8 protocol;
1356 __be16 len;
1357};
1358
1359struct tcp6_pseudohdr {
1360 struct in6_addr saddr;
1361 struct in6_addr daddr;
1362 __be32 len;
1363 __be32 protocol; /* including padding */
1364};
1365
1366union tcp_md5sum_block {
1367 struct tcp4_pseudohdr ip4;
dfd56b8b 1368#if IS_ENABLED(CONFIG_IPV6)
cfb6eeb4
YH
1369 struct tcp6_pseudohdr ip6;
1370#endif
1371};
1372
1373/* - pool: digest algorithm, hash description and scratch buffer */
1374struct tcp_md5sig_pool {
1375 struct hash_desc md5_desc;
1376 union tcp_md5sum_block md5_blk;
1377};
1378
cfb6eeb4 1379/* - functions */
39f8e58e
ED
1380int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1381 const struct sock *sk, const struct sk_buff *skb);
5c9f3023
JP
1382int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1383 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1384int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1385 int family);
b83e3deb 1386struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 1387 const struct sock *addr_sk);
cfb6eeb4 1388
9501f972 1389#ifdef CONFIG_TCP_MD5SIG
b83e3deb 1390struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
5c9f3023
JP
1391 const union tcp_md5_addr *addr,
1392 int family);
a915da9b 1393#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
9501f972 1394#else
b83e3deb 1395static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
1396 const union tcp_md5_addr *addr,
1397 int family)
1398{
1399 return NULL;
1400}
9501f972
YH
1401#define tcp_twsk_md5_key(twsk) NULL
1402#endif
1403
5c9f3023 1404bool tcp_alloc_md5sig_pool(void);
cfb6eeb4 1405
5c9f3023 1406struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
71cea17e
ED
1407static inline void tcp_put_md5sig_pool(void)
1408{
1409 local_bh_enable();
1410}
35790c04 1411
5c9f3023
JP
1412int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1413int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1414 unsigned int header_len);
1415int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1416 const struct tcp_md5sig_key *key);
cfb6eeb4 1417
10467163 1418/* From tcp_fastopen.c */
5c9f3023
JP
1419void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1420 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1421 unsigned long *last_syn_loss);
1422void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2646c831
DL
1423 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1424 u16 try_exp);
783237e8
YC
1425struct tcp_fastopen_request {
1426 /* Fast Open cookie. Size 0 means a cookie request */
1427 struct tcp_fastopen_cookie cookie;
1428 struct msghdr *data; /* data in MSG_FASTOPEN */
f5ddcbbb
ED
1429 size_t size;
1430 int copied; /* queued in tcp_connect() */
783237e8 1431};
783237e8
YC
1432void tcp_free_fastopen_req(struct tcp_sock *tp);
1433
10467163
JC
1434extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1435int tcp_fastopen_reset_cipher(void *key, unsigned int len);
61d2bcae 1436void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
7c85af88
ED
1437struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1438 struct request_sock *req,
1439 struct tcp_fastopen_cookie *foc,
1440 struct dst_entry *dst);
222e83d2 1441void tcp_fastopen_init_key_once(bool publish);
10467163
JC
1442#define TCP_FASTOPEN_KEY_LENGTH 16
1443
1444/* Fastopen key context */
1445struct tcp_fastopen_context {
7ae8639c
ED
1446 struct crypto_cipher *tfm;
1447 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1448 struct rcu_head rcu;
10467163
JC
1449};
1450
fe067e8a
DM
1451/* write queue abstraction */
1452static inline void tcp_write_queue_purge(struct sock *sk)
1453{
1454 struct sk_buff *skb;
1455
1456 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
3ab224be
HA
1457 sk_wmem_free_skb(sk, skb);
1458 sk_mem_reclaim(sk);
8818a9d8 1459 tcp_clear_all_retrans_hints(tcp_sk(sk));
fe067e8a
DM
1460}
1461
cf533ea5 1462static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
fe067e8a 1463{
cd07a8ea 1464 return skb_peek(&sk->sk_write_queue);
fe067e8a
DM
1465}
1466
cf533ea5 1467static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
fe067e8a 1468{
cd07a8ea 1469 return skb_peek_tail(&sk->sk_write_queue);
fe067e8a
DM
1470}
1471
cf533ea5
ED
1472static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1473 const struct sk_buff *skb)
fe067e8a 1474{
cd07a8ea 1475 return skb_queue_next(&sk->sk_write_queue, skb);
fe067e8a
DM
1476}
1477
cf533ea5
ED
1478static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1479 const struct sk_buff *skb)
832d11c5
IJ
1480{
1481 return skb_queue_prev(&sk->sk_write_queue, skb);
1482}
1483
fe067e8a 1484#define tcp_for_write_queue(skb, sk) \
cd07a8ea 1485 skb_queue_walk(&(sk)->sk_write_queue, skb)
fe067e8a
DM
1486
1487#define tcp_for_write_queue_from(skb, sk) \
cd07a8ea 1488 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
fe067e8a 1489
234b6860 1490#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
cd07a8ea 1491 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
234b6860 1492
cf533ea5 1493static inline struct sk_buff *tcp_send_head(const struct sock *sk)
fe067e8a
DM
1494{
1495 return sk->sk_send_head;
1496}
1497
cd07a8ea
DM
1498static inline bool tcp_skb_is_last(const struct sock *sk,
1499 const struct sk_buff *skb)
1500{
1501 return skb_queue_is_last(&sk->sk_write_queue, skb);
1502}
1503
cf533ea5 1504static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
fe067e8a 1505{
cd07a8ea 1506 if (tcp_skb_is_last(sk, skb))
fe067e8a 1507 sk->sk_send_head = NULL;
cd07a8ea
DM
1508 else
1509 sk->sk_send_head = tcp_write_queue_next(sk, skb);
fe067e8a
DM
1510}
1511
1512static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1513{
1514 if (sk->sk_send_head == skb_unlinked)
1515 sk->sk_send_head = NULL;
1516}
1517
1518static inline void tcp_init_send_head(struct sock *sk)
1519{
1520 sk->sk_send_head = NULL;
1521}
1522
1523static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1524{
1525 __skb_queue_tail(&sk->sk_write_queue, skb);
1526}
1527
1528static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1529{
1530 __tcp_add_write_queue_tail(sk, skb);
1531
1532 /* Queue it, remembering where we must start sending. */
6859d494 1533 if (sk->sk_send_head == NULL) {
fe067e8a 1534 sk->sk_send_head = skb;
6859d494
IJ
1535
1536 if (tcp_sk(sk)->highest_sack == NULL)
1537 tcp_sk(sk)->highest_sack = skb;
1538 }
fe067e8a
DM
1539}
1540
1541static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1542{
1543 __skb_queue_head(&sk->sk_write_queue, skb);
1544}
1545
1546/* Insert buff after skb on the write queue of sk. */
1547static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1548 struct sk_buff *buff,
1549 struct sock *sk)
1550{
7de6c033 1551 __skb_queue_after(&sk->sk_write_queue, skb, buff);
fe067e8a
DM
1552}
1553
43f59c89 1554/* Insert new before skb on the write queue of sk. */
fe067e8a
DM
1555static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1556 struct sk_buff *skb,
1557 struct sock *sk)
1558{
43f59c89 1559 __skb_queue_before(&sk->sk_write_queue, skb, new);