]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the TCP module. | |
7 | * | |
8 | * Version: @(#)tcp.h 1.0.5 05/23/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | #ifndef _TCP_H | |
19 | #define _TCP_H | |
20 | ||
21 | #define TCP_DEBUG 1 | |
22 | #define FASTRETRANS_DEBUG 1 | |
23 | ||
1da177e4 LT |
24 | #include <linux/list.h> |
25 | #include <linux/tcp.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/cache.h> | |
28 | #include <linux/percpu.h> | |
fb286bb2 | 29 | #include <linux/skbuff.h> |
97fc2f08 | 30 | #include <linux/dmaengine.h> |
cfb6eeb4 | 31 | #include <linux/crypto.h> |
c6aefafb | 32 | #include <linux/cryptohash.h> |
435cf559 | 33 | #include <linux/kref.h> |
3f421baa ACM |
34 | |
35 | #include <net/inet_connection_sock.h> | |
295ff7ed | 36 | #include <net/inet_timewait_sock.h> |
77d8bf9c | 37 | #include <net/inet_hashtables.h> |
1da177e4 | 38 | #include <net/checksum.h> |
2e6599cb | 39 | #include <net/request_sock.h> |
1da177e4 LT |
40 | #include <net/sock.h> |
41 | #include <net/snmp.h> | |
42 | #include <net/ip.h> | |
c752f073 | 43 | #include <net/tcp_states.h> |
bdf1ee5d | 44 | #include <net/inet_ecn.h> |
0c266898 | 45 | #include <net/dst.h> |
c752f073 | 46 | |
1da177e4 LT |
47 | #include <linux/seq_file.h> |
48 | ||
6e04e021 | 49 | extern struct inet_hashinfo tcp_hashinfo; |
1da177e4 | 50 | |
dd24c001 | 51 | extern struct percpu_counter tcp_orphan_count; |
1da177e4 | 52 | extern void tcp_time_wait(struct sock *sk, int state, int timeo); |
1da177e4 | 53 | |
1da177e4 | 54 | #define MAX_TCP_HEADER (128 + MAX_HEADER) |
33ad798c | 55 | #define MAX_TCP_OPTION_SPACE 40 |
1da177e4 LT |
56 | |
57 | /* | |
58 | * Never offer a window over 32767 without using window scaling. Some | |
59 | * poor stacks do signed 16bit maths! | |
60 | */ | |
61 | #define MAX_TCP_WINDOW 32767U | |
62 | ||
356f0398 ND |
63 | /* Offer an initial receive window of 10 mss. */ |
64 | #define TCP_DEFAULT_INIT_RCVWND 10 | |
65 | ||
1da177e4 LT |
66 | /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ |
67 | #define TCP_MIN_MSS 88U | |
68 | ||
5d424d5a JH |
69 | /* The least MTU to use for probing */ |
70 | #define TCP_BASE_MSS 512 | |
71 | ||
1da177e4 LT |
72 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ |
73 | #define TCP_FASTRETRANS_THRESH 3 | |
74 | ||
75 | /* Maximal reordering. */ | |
76 | #define TCP_MAX_REORDERING 127 | |
77 | ||
78 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | |
79 | #define TCP_MAX_QUICKACKS 16U | |
80 | ||
81 | /* urg_data states */ | |
82 | #define TCP_URG_VALID 0x0100 | |
83 | #define TCP_URG_NOTYET 0x0200 | |
84 | #define TCP_URG_READ 0x0400 | |
85 | ||
86 | #define TCP_RETR1 3 /* | |
87 | * This is how many retries it does before it | |
88 | * tries to figure out if the gateway is | |
89 | * down. Minimal RFC value is 3; it corresponds | |
90 | * to ~3sec-8min depending on RTO. | |
91 | */ | |
92 | ||
93 | #define TCP_RETR2 15 /* | |
94 | * This should take at least | |
95 | * 90 minutes to time out. | |
96 | * RFC1122 says that the limit is 100 sec. | |
97 | * 15 is ~13-30min depending on RTO. | |
98 | */ | |
99 | ||
100 | #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a | |
caa20d9a | 101 | * connection: ~180sec is RFC minimum */ |
1da177e4 LT |
102 | |
103 | #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a | |
caa20d9a | 104 | * connection: ~180sec is RFC minimum */ |
1da177e4 | 105 | |
1da177e4 LT |
106 | #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT |
107 | * state, about 60 seconds */ | |
108 | #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN | |
109 | /* BSD style FIN_WAIT2 deadlock breaker. | |
110 | * It used to be 3min, new value is 60sec, | |
111 | * to combine FIN-WAIT-2 timeout with | |
112 | * TIME-WAIT timer. | |
113 | */ | |
114 | ||
115 | #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ | |
116 | #if HZ >= 100 | |
117 | #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ | |
118 | #define TCP_ATO_MIN ((unsigned)(HZ/25)) | |
119 | #else | |
120 | #define TCP_DELACK_MIN 4U | |
121 | #define TCP_ATO_MIN 4U | |
122 | #endif | |
123 | #define TCP_RTO_MAX ((unsigned)(120*HZ)) | |
124 | #define TCP_RTO_MIN ((unsigned)(HZ/5)) | |
125 | #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ | |
126 | ||
127 | #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes | |
128 | * for local resources. | |
129 | */ | |
130 | ||
131 | #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ | |
132 | #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ | |
133 | #define TCP_KEEPALIVE_INTVL (75*HZ) | |
134 | ||
135 | #define MAX_TCP_KEEPIDLE 32767 | |
136 | #define MAX_TCP_KEEPINTVL 32767 | |
137 | #define MAX_TCP_KEEPCNT 127 | |
138 | #define MAX_TCP_SYNCNT 127 | |
139 | ||
140 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | |
1da177e4 LT |
141 | |
142 | #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) | |
143 | #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated | |
144 | * after this time. It should be equal | |
145 | * (or greater than) TCP_TIMEWAIT_LEN | |
146 | * to provide reliability equal to one | |
147 | * provided by timewait state. | |
148 | */ | |
149 | #define TCP_PAWS_WINDOW 1 /* Replay window for per-host | |
150 | * timestamps. It must be less than | |
151 | * minimal timewait lifetime. | |
152 | */ | |
1da177e4 LT |
153 | /* |
154 | * TCP option | |
155 | */ | |
156 | ||
157 | #define TCPOPT_NOP 1 /* Padding */ | |
158 | #define TCPOPT_EOL 0 /* End of options */ | |
159 | #define TCPOPT_MSS 2 /* Segment size negotiating */ | |
160 | #define TCPOPT_WINDOW 3 /* Window scaling */ | |
161 | #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ | |
162 | #define TCPOPT_SACK 5 /* SACK Block */ | |
163 | #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ | |
cfb6eeb4 | 164 | #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ |
435cf559 | 165 | #define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */ |
1da177e4 LT |
166 | |
167 | /* | |
168 | * TCP option lengths | |
169 | */ | |
170 | ||
171 | #define TCPOLEN_MSS 4 | |
172 | #define TCPOLEN_WINDOW 3 | |
173 | #define TCPOLEN_SACK_PERM 2 | |
174 | #define TCPOLEN_TIMESTAMP 10 | |
cfb6eeb4 | 175 | #define TCPOLEN_MD5SIG 18 |
435cf559 WAS |
176 | #define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */ |
177 | #define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */ | |
178 | #define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN) | |
179 | #define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX) | |
1da177e4 LT |
180 | |
181 | /* But this is what stacks really send out. */ | |
182 | #define TCPOLEN_TSTAMP_ALIGNED 12 | |
183 | #define TCPOLEN_WSCALE_ALIGNED 4 | |
184 | #define TCPOLEN_SACKPERM_ALIGNED 4 | |
185 | #define TCPOLEN_SACK_BASE 2 | |
186 | #define TCPOLEN_SACK_BASE_ALIGNED 4 | |
187 | #define TCPOLEN_SACK_PERBLOCK 8 | |
cfb6eeb4 | 188 | #define TCPOLEN_MD5SIG_ALIGNED 20 |
33ad798c | 189 | #define TCPOLEN_MSS_ALIGNED 4 |
1da177e4 | 190 | |
1da177e4 LT |
191 | /* Flags in tp->nonagle */ |
192 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ | |
193 | #define TCP_NAGLE_CORK 2 /* Socket is corked */ | |
caa20d9a | 194 | #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ |
1da177e4 | 195 | |
36e31b0a AP |
196 | /* TCP thin-stream limits */ |
197 | #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ | |
198 | ||
7eb38527 | 199 | /* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */ |
442b9635 DM |
200 | #define TCP_INIT_CWND 10 |
201 | ||
295ff7ed ACM |
202 | extern struct inet_timewait_death_row tcp_death_row; |
203 | ||
1da177e4 | 204 | /* sysctl variables for tcp */ |
1da177e4 LT |
205 | extern int sysctl_tcp_timestamps; |
206 | extern int sysctl_tcp_window_scaling; | |
207 | extern int sysctl_tcp_sack; | |
208 | extern int sysctl_tcp_fin_timeout; | |
1da177e4 LT |
209 | extern int sysctl_tcp_keepalive_time; |
210 | extern int sysctl_tcp_keepalive_probes; | |
211 | extern int sysctl_tcp_keepalive_intvl; | |
212 | extern int sysctl_tcp_syn_retries; | |
213 | extern int sysctl_tcp_synack_retries; | |
214 | extern int sysctl_tcp_retries1; | |
215 | extern int sysctl_tcp_retries2; | |
216 | extern int sysctl_tcp_orphan_retries; | |
217 | extern int sysctl_tcp_syncookies; | |
218 | extern int sysctl_tcp_retrans_collapse; | |
219 | extern int sysctl_tcp_stdurg; | |
220 | extern int sysctl_tcp_rfc1337; | |
221 | extern int sysctl_tcp_abort_on_overflow; | |
222 | extern int sysctl_tcp_max_orphans; | |
1da177e4 LT |
223 | extern int sysctl_tcp_fack; |
224 | extern int sysctl_tcp_reordering; | |
225 | extern int sysctl_tcp_ecn; | |
226 | extern int sysctl_tcp_dsack; | |
8d987e5c | 227 | extern long sysctl_tcp_mem[3]; |
1da177e4 LT |
228 | extern int sysctl_tcp_wmem[3]; |
229 | extern int sysctl_tcp_rmem[3]; | |
230 | extern int sysctl_tcp_app_win; | |
231 | extern int sysctl_tcp_adv_win_scale; | |
232 | extern int sysctl_tcp_tw_reuse; | |
233 | extern int sysctl_tcp_frto; | |
3cfe3baa | 234 | extern int sysctl_tcp_frto_response; |
1da177e4 | 235 | extern int sysctl_tcp_low_latency; |
95937825 | 236 | extern int sysctl_tcp_dma_copybreak; |
1da177e4 | 237 | extern int sysctl_tcp_nometrics_save; |
1da177e4 LT |
238 | extern int sysctl_tcp_moderate_rcvbuf; |
239 | extern int sysctl_tcp_tso_win_divisor; | |
9772efb9 | 240 | extern int sysctl_tcp_abc; |
5d424d5a JH |
241 | extern int sysctl_tcp_mtu_probing; |
242 | extern int sysctl_tcp_base_mss; | |
15d99e02 | 243 | extern int sysctl_tcp_workaround_signed_windows; |
35089bb2 | 244 | extern int sysctl_tcp_slow_start_after_idle; |
886236c1 | 245 | extern int sysctl_tcp_max_ssthresh; |
519855c5 | 246 | extern int sysctl_tcp_cookie_size; |
36e31b0a | 247 | extern int sysctl_tcp_thin_linear_timeouts; |
7e380175 | 248 | extern int sysctl_tcp_thin_dupack; |
1da177e4 | 249 | |
8d987e5c | 250 | extern atomic_long_t tcp_memory_allocated; |
1748376b | 251 | extern struct percpu_counter tcp_sockets_allocated; |
1da177e4 LT |
252 | extern int tcp_memory_pressure; |
253 | ||
1da177e4 LT |
254 | /* |
255 | * The next routines deal with comparing 32 bit unsigned ints | |
256 | * and worry about wraparound (automatic with unsigned arithmetic). | |
257 | */ | |
258 | ||
259 | static inline int before(__u32 seq1, __u32 seq2) | |
260 | { | |
0d630cc0 | 261 | return (__s32)(seq1-seq2) < 0; |
1da177e4 | 262 | } |
9a036b9c | 263 | #define after(seq2, seq1) before(seq1, seq2) |
1da177e4 LT |
264 | |
265 | /* is s2<=s1<=s3 ? */ | |
266 | static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | |
267 | { | |
268 | return seq3 - seq2 >= seq1 - seq2; | |
269 | } | |
270 | ||
ad1af0fe | 271 | static inline bool tcp_too_many_orphans(struct sock *sk, int shift) |
e4fd5da3 | 272 | { |
ad1af0fe DM |
273 | struct percpu_counter *ocp = sk->sk_prot->orphan_count; |
274 | int orphans = percpu_counter_read_positive(ocp); | |
275 | ||
276 | if (orphans << shift > sysctl_tcp_max_orphans) { | |
277 | orphans = percpu_counter_sum_positive(ocp); | |
278 | if (orphans << shift > sysctl_tcp_max_orphans) | |
279 | return true; | |
280 | } | |
281 | ||
282 | if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | |
8d987e5c | 283 | atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) |
ad1af0fe DM |
284 | return true; |
285 | return false; | |
e4fd5da3 | 286 | } |
1da177e4 | 287 | |
a0f82f64 FW |
288 | /* syncookies: remember time of last synqueue overflow */ |
289 | static inline void tcp_synq_overflow(struct sock *sk) | |
290 | { | |
291 | tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; | |
292 | } | |
293 | ||
294 | /* syncookies: no recent synqueue overflow on this listening socket? */ | |
295 | static inline int tcp_synq_no_recent_overflow(const struct sock *sk) | |
296 | { | |
297 | unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; | |
298 | return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); | |
299 | } | |
300 | ||
1da177e4 LT |
301 | extern struct proto tcp_prot; |
302 | ||
57ef42d5 PE |
303 | #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) |
304 | #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) | |
305 | #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) | |
306 | #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) | |
aa2ea058 | 307 | #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) |
1da177e4 | 308 | |
53d3176b CG |
309 | extern void tcp_v4_err(struct sk_buff *skb, u32); |
310 | ||
311 | extern void tcp_shutdown (struct sock *sk, int how); | |
312 | ||
313 | extern int tcp_v4_rcv(struct sk_buff *skb); | |
314 | ||
3f419d2d | 315 | extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it); |
ccb7c410 | 316 | extern void *tcp_v4_tw_get_peer(struct sock *sk); |
53d3176b | 317 | extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); |
7ba42910 CG |
318 | extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
319 | size_t size); | |
320 | extern int tcp_sendpage(struct sock *sk, struct page *page, int offset, | |
321 | size_t size, int flags); | |
53d3176b CG |
322 | extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
323 | extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |
324 | struct tcphdr *th, unsigned len); | |
325 | extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |
326 | struct tcphdr *th, unsigned len); | |
327 | extern void tcp_rcv_space_adjust(struct sock *sk); | |
328 | extern void tcp_cleanup_rbuf(struct sock *sk, int copied); | |
329 | extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); | |
330 | extern void tcp_twsk_destructor(struct sock *sk); | |
331 | extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, | |
332 | struct pipe_inode_info *pipe, size_t len, | |
333 | unsigned int flags); | |
9c55e01c | 334 | |
463c84b9 ACM |
335 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
336 | const unsigned int pkts) | |
1da177e4 | 337 | { |
463c84b9 | 338 | struct inet_connection_sock *icsk = inet_csk(sk); |
fc6415bc | 339 | |
463c84b9 ACM |
340 | if (icsk->icsk_ack.quick) { |
341 | if (pkts >= icsk->icsk_ack.quick) { | |
342 | icsk->icsk_ack.quick = 0; | |
fc6415bc | 343 | /* Leaving quickack mode we deflate ATO. */ |
463c84b9 | 344 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
fc6415bc | 345 | } else |
463c84b9 | 346 | icsk->icsk_ack.quick -= pkts; |
1da177e4 LT |
347 | } |
348 | } | |
349 | ||
bdf1ee5d IJ |
350 | #define TCP_ECN_OK 1 |
351 | #define TCP_ECN_QUEUE_CWR 2 | |
352 | #define TCP_ECN_DEMAND_CWR 4 | |
353 | ||
354 | static __inline__ void | |
355 | TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) | |
356 | { | |
357 | if (sysctl_tcp_ecn && th->ece && th->cwr) | |
358 | inet_rsk(req)->ecn_ok = 1; | |
359 | } | |
360 | ||
fd2c3ef7 | 361 | enum tcp_tw_status { |
1da177e4 LT |
362 | TCP_TW_SUCCESS = 0, |
363 | TCP_TW_RST = 1, | |
364 | TCP_TW_ACK = 2, | |
365 | TCP_TW_SYN = 3 | |
366 | }; | |
367 | ||
368 | ||
53d3176b CG |
369 | extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, |
370 | struct sk_buff *skb, | |
371 | const struct tcphdr *th); | |
372 | extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, | |
373 | struct request_sock *req, | |
374 | struct request_sock **prev); | |
375 | extern int tcp_child_process(struct sock *parent, struct sock *child, | |
376 | struct sk_buff *skb); | |
377 | extern int tcp_use_frto(struct sock *sk); | |
378 | extern void tcp_enter_frto(struct sock *sk); | |
379 | extern void tcp_enter_loss(struct sock *sk, int how); | |
380 | extern void tcp_clear_retrans(struct tcp_sock *tp); | |
381 | extern void tcp_update_metrics(struct sock *sk); | |
382 | extern void tcp_close(struct sock *sk, long timeout); | |
383 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, | |
384 | struct poll_table_struct *wait); | |
385 | extern int tcp_getsockopt(struct sock *sk, int level, int optname, | |
386 | char __user *optval, int __user *optlen); | |
387 | extern int tcp_setsockopt(struct sock *sk, int level, int optname, | |
388 | char __user *optval, unsigned int optlen); | |
389 | extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname, | |
390 | char __user *optval, int __user *optlen); | |
391 | extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname, | |
392 | char __user *optval, unsigned int optlen); | |
393 | extern void tcp_set_keepalive(struct sock *sk, int val); | |
394 | extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); | |
395 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
396 | size_t len, int nonblock, int flags, int *addr_len); | |
397 | extern void tcp_parse_options(struct sk_buff *skb, | |
398 | struct tcp_options_received *opt_rx, u8 **hvpp, | |
399 | int estab); | |
400 | extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); | |
7d5d5525 | 401 | |
1da177e4 LT |
402 | /* |
403 | * TCP v4 functions exported for the inet6 API | |
404 | */ | |
405 | ||
53d3176b CG |
406 | extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
407 | extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); | |
408 | extern struct sock * tcp_create_openreq_child(struct sock *sk, | |
409 | struct request_sock *req, | |
1da177e4 | 410 | struct sk_buff *skb); |
53d3176b CG |
411 | extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, |
412 | struct request_sock *req, | |
413 | struct dst_entry *dst); | |
414 | extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); | |
415 | extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, | |
416 | int addr_len); | |
417 | extern int tcp_connect(struct sock *sk); | |
418 | extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |
419 | struct request_sock *req, | |
420 | struct request_values *rvp); | |
421 | extern int tcp_disconnect(struct sock *sk, int flags); | |
1da177e4 | 422 | |
1da177e4 | 423 | |
1da177e4 | 424 | /* From syncookies.c */ |
2051f11f | 425 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; |
1da177e4 LT |
426 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, |
427 | struct ip_options *opt); | |
428 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |
429 | __u16 *mss); | |
430 | ||
4dfc2817 | 431 | extern __u32 cookie_init_timestamp(struct request_sock *req); |
172d69e6 | 432 | extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); |
4dfc2817 | 433 | |
c6aefafb GG |
434 | /* From net/ipv6/syncookies.c */ |
435 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); | |
436 | extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, | |
437 | __u16 *mss); | |
438 | ||
1da177e4 LT |
439 | /* tcp_output.c */ |
440 | ||
9e412ba7 IJ |
441 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
442 | int nonagle); | |
443 | extern int tcp_may_send_now(struct sock *sk); | |
1da177e4 | 444 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
f1ecd5d9 | 445 | extern void tcp_retransmit_timer(struct sock *sk); |
1da177e4 LT |
446 | extern void tcp_xmit_retransmit_queue(struct sock *); |
447 | extern void tcp_simple_retransmit(struct sock *); | |
448 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | |
6475be16 | 449 | extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); |
1da177e4 LT |
450 | |
451 | extern void tcp_send_probe0(struct sock *); | |
452 | extern void tcp_send_partial(struct sock *); | |
53d3176b | 453 | extern int tcp_write_wakeup(struct sock *); |
1da177e4 | 454 | extern void tcp_send_fin(struct sock *sk); |
dd0fc66f | 455 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
53d3176b | 456 | extern int tcp_send_synack(struct sock *); |
c1b4a7e6 | 457 | extern void tcp_push_one(struct sock *, unsigned int mss_now); |
1da177e4 LT |
458 | extern void tcp_send_ack(struct sock *sk); |
459 | extern void tcp_send_delayed_ack(struct sock *sk); | |
460 | ||
a762a980 DM |
461 | /* tcp_input.c */ |
462 | extern void tcp_cwnd_application_limited(struct sock *sk); | |
463 | ||
1da177e4 LT |
464 | /* tcp_timer.c */ |
465 | extern void tcp_init_xmit_timers(struct sock *); | |
463c84b9 ACM |
466 | static inline void tcp_clear_xmit_timers(struct sock *sk) |
467 | { | |
468 | inet_csk_clear_xmit_timers(sk); | |
469 | } | |
1da177e4 | 470 | |
1da177e4 | 471 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); |
0c54b85f IJ |
472 | extern unsigned int tcp_current_mss(struct sock *sk); |
473 | ||
474 | /* Bound MSS / TSO packet size with the half of the window */ | |
475 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | |
476 | { | |
01f83d69 AK |
477 | int cutoff; |
478 | ||
479 | /* When peer uses tiny windows, there is no use in packetizing | |
480 | * to sub-MSS pieces for the sake of SWS or making sure there | |
481 | * are enough packets in the pipe for fast recovery. | |
482 | * | |
483 | * On the other hand, for extremely large MSS devices, handling | |
484 | * smaller than MSS windows in this way does make sense. | |
485 | */ | |
486 | if (tp->max_window >= 512) | |
487 | cutoff = (tp->max_window >> 1); | |
488 | else | |
489 | cutoff = tp->max_window; | |
490 | ||
491 | if (cutoff && pktsize > cutoff) | |
492 | return max_t(int, cutoff, 68U - tp->tcp_header_len); | |
0c54b85f IJ |
493 | else |
494 | return pktsize; | |
495 | } | |
1da177e4 | 496 | |
17b085ea | 497 | /* tcp.c */ |
1da177e4 LT |
498 | extern void tcp_get_info(struct sock *, struct tcp_info *); |
499 | ||
500 | /* Read 'sendfile()'-style from a TCP socket */ | |
501 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | |
502 | unsigned int, size_t); | |
503 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
504 | sk_read_actor_t recv_actor); | |
505 | ||
40efc6fa | 506 | extern void tcp_initialize_rcv_mss(struct sock *sk); |
1da177e4 | 507 | |
5d424d5a JH |
508 | extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); |
509 | extern int tcp_mss_to_mtu(struct sock *sk, int mss); | |
510 | extern void tcp_mtup_init(struct sock *sk); | |
511 | ||
f1ecd5d9 DL |
512 | static inline void tcp_bound_rto(const struct sock *sk) |
513 | { | |
514 | if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) | |
515 | inet_csk(sk)->icsk_rto = TCP_RTO_MAX; | |
516 | } | |
517 | ||
518 | static inline u32 __tcp_set_rto(const struct tcp_sock *tp) | |
519 | { | |
520 | return (tp->srtt >> 3) + tp->rttvar; | |
521 | } | |
522 | ||
40efc6fa | 523 | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) |
1da177e4 LT |
524 | { |
525 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | |
526 | ntohl(TCP_FLAG_ACK) | | |
527 | snd_wnd); | |
528 | } | |
529 | ||
40efc6fa | 530 | static inline void tcp_fast_path_on(struct tcp_sock *tp) |
1da177e4 LT |
531 | { |
532 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | |
533 | } | |
534 | ||
9e412ba7 | 535 | static inline void tcp_fast_path_check(struct sock *sk) |
1da177e4 | 536 | { |
9e412ba7 IJ |
537 | struct tcp_sock *tp = tcp_sk(sk); |
538 | ||
b03efcfb | 539 | if (skb_queue_empty(&tp->out_of_order_queue) && |
1da177e4 LT |
540 | tp->rcv_wnd && |
541 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | |
542 | !tp->urg_data) | |
543 | tcp_fast_path_on(tp); | |
544 | } | |
545 | ||
0c266898 SS |
546 | /* Compute the actual rto_min value */ |
547 | static inline u32 tcp_rto_min(struct sock *sk) | |
548 | { | |
549 | struct dst_entry *dst = __sk_dst_get(sk); | |
550 | u32 rto_min = TCP_RTO_MIN; | |
551 | ||
552 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | |
553 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | |
554 | return rto_min; | |
555 | } | |
556 | ||
1da177e4 LT |
557 | /* Compute the actual receive window we are currently advertising. |
558 | * Rcv_nxt can be after the window if our peer push more data | |
559 | * than the offered window. | |
560 | */ | |
40efc6fa | 561 | static inline u32 tcp_receive_window(const struct tcp_sock *tp) |
1da177e4 LT |
562 | { |
563 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | |
564 | ||
565 | if (win < 0) | |
566 | win = 0; | |
567 | return (u32) win; | |
568 | } | |
569 | ||
570 | /* Choose a new window, without checks for shrinking, and without | |
571 | * scaling applied to the result. The caller does these things | |
572 | * if necessary. This is a "raw" window selection. | |
573 | */ | |
53d3176b | 574 | extern u32 __tcp_select_window(struct sock *sk); |
1da177e4 LT |
575 | |
576 | /* TCP timestamps are only 32-bits, this causes a slight | |
577 | * complication on 64-bit systems since we store a snapshot | |
31f34269 SH |
578 | * of jiffies in the buffer control blocks below. We decided |
579 | * to use only the low 32-bits of jiffies and hide the ugly | |
1da177e4 LT |
580 | * casts with the following macro. |
581 | */ | |
582 | #define tcp_time_stamp ((__u32)(jiffies)) | |
583 | ||
a3433f35 CG |
584 | #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) |
585 | ||
586 | #define TCPHDR_FIN 0x01 | |
587 | #define TCPHDR_SYN 0x02 | |
588 | #define TCPHDR_RST 0x04 | |
589 | #define TCPHDR_PSH 0x08 | |
590 | #define TCPHDR_ACK 0x10 | |
591 | #define TCPHDR_URG 0x20 | |
592 | #define TCPHDR_ECE 0x40 | |
593 | #define TCPHDR_CWR 0x80 | |
594 | ||
caa20d9a | 595 | /* This is what the send packet queuing engine uses to pass |
f86586fa ED |
596 | * TCP per-packet control information to the transmission code. |
597 | * We also store the host-order sequence numbers in here too. | |
598 | * This is 44 bytes if IPV6 is enabled. | |
599 | * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. | |
1da177e4 LT |
600 | */ |
601 | struct tcp_skb_cb { | |
602 | union { | |
603 | struct inet_skb_parm h4; | |
604 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | |
605 | struct inet6_skb_parm h6; | |
606 | #endif | |
607 | } header; /* For incoming frames */ | |
608 | __u32 seq; /* Starting sequence number */ | |
609 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ | |
610 | __u32 when; /* used to compute rtt's */ | |
611 | __u8 flags; /* TCP header flags. */ | |
1da177e4 LT |
612 | __u8 sacked; /* State flags for SACK/FACK. */ |
613 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ | |
614 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | |
615 | #define TCPCB_LOST 0x04 /* SKB is lost */ | |
616 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | |
617 | ||
618 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | |
619 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | |
620 | ||
1da177e4 LT |
621 | __u32 ack_seq; /* Sequence number ACK'd */ |
622 | }; | |
623 | ||
624 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) | |
625 | ||
1da177e4 LT |
626 | /* Due to TSO, an SKB can be composed of multiple actual |
627 | * packets. To keep these tracked properly, we use this. | |
628 | */ | |
629 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | |
630 | { | |
7967168c | 631 | return skb_shinfo(skb)->gso_segs; |
1da177e4 LT |
632 | } |
633 | ||
634 | /* This is valid iff tcp_skb_pcount() > 1. */ | |
635 | static inline int tcp_skb_mss(const struct sk_buff *skb) | |
636 | { | |
7967168c | 637 | return skb_shinfo(skb)->gso_size; |
1da177e4 LT |
638 | } |
639 | ||
317a76f9 SH |
640 | /* Events passed to congestion control interface */ |
641 | enum tcp_ca_event { | |
642 | CA_EVENT_TX_START, /* first transmit when no packets in flight */ | |
643 | CA_EVENT_CWND_RESTART, /* congestion window restart */ | |
644 | CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ | |
645 | CA_EVENT_FRTO, /* fast recovery timeout */ | |
646 | CA_EVENT_LOSS, /* loss timeout */ | |
647 | CA_EVENT_FAST_ACK, /* in sequence ack */ | |
648 | CA_EVENT_SLOW_ACK, /* other ack */ | |
649 | }; | |
650 | ||
651 | /* | |
652 | * Interface for adding new TCP congestion control handlers | |
653 | */ | |
654 | #define TCP_CA_NAME_MAX 16 | |
3ff825b2 SH |
655 | #define TCP_CA_MAX 128 |
656 | #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) | |
657 | ||
164891aa SH |
658 | #define TCP_CONG_NON_RESTRICTED 0x1 |
659 | #define TCP_CONG_RTT_STAMP 0x2 | |
660 | ||
317a76f9 SH |
661 | struct tcp_congestion_ops { |
662 | struct list_head list; | |
164891aa | 663 | unsigned long flags; |
317a76f9 SH |
664 | |
665 | /* initialize private data (optional) */ | |
6687e988 | 666 | void (*init)(struct sock *sk); |
317a76f9 | 667 | /* cleanup private data (optional) */ |
6687e988 | 668 | void (*release)(struct sock *sk); |
317a76f9 SH |
669 | |
670 | /* return slow start threshold (required) */ | |
6687e988 | 671 | u32 (*ssthresh)(struct sock *sk); |
317a76f9 | 672 | /* lower bound for congestion window (optional) */ |
72dc5b92 | 673 | u32 (*min_cwnd)(const struct sock *sk); |
317a76f9 | 674 | /* do new cwnd calculation (required) */ |
c3a05c60 | 675 | void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); |
317a76f9 | 676 | /* call before changing ca_state (optional) */ |
6687e988 | 677 | void (*set_state)(struct sock *sk, u8 new_state); |
317a76f9 | 678 | /* call when cwnd event occurs (optional) */ |
6687e988 | 679 | void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); |
317a76f9 | 680 | /* new value of cwnd after loss (optional) */ |
6687e988 | 681 | u32 (*undo_cwnd)(struct sock *sk); |
317a76f9 | 682 | /* hook for packet ack accounting (optional) */ |
30cfd0ba | 683 | void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); |
73c1f4a0 | 684 | /* get info for inet_diag (optional) */ |
6687e988 | 685 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); |
317a76f9 SH |
686 | |
687 | char name[TCP_CA_NAME_MAX]; | |
688 | struct module *owner; | |
689 | }; | |
690 | ||
691 | extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); | |
692 | extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); | |
693 | ||
6687e988 ACM |
694 | extern void tcp_init_congestion_control(struct sock *sk); |
695 | extern void tcp_cleanup_congestion_control(struct sock *sk); | |
317a76f9 SH |
696 | extern int tcp_set_default_congestion_control(const char *name); |
697 | extern void tcp_get_default_congestion_control(char *name); | |
3ff825b2 | 698 | extern void tcp_get_available_congestion_control(char *buf, size_t len); |
ce7bc3bf SH |
699 | extern void tcp_get_allowed_congestion_control(char *buf, size_t len); |
700 | extern int tcp_set_allowed_congestion_control(char *allowed); | |
6687e988 | 701 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); |
40efc6fa | 702 | extern void tcp_slow_start(struct tcp_sock *tp); |
758ce5c8 | 703 | extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); |
317a76f9 | 704 | |
5f8ef48d | 705 | extern struct tcp_congestion_ops tcp_init_congestion_ops; |
6687e988 | 706 | extern u32 tcp_reno_ssthresh(struct sock *sk); |
c3a05c60 | 707 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); |
72dc5b92 | 708 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); |
a8acfbac | 709 | extern struct tcp_congestion_ops tcp_reno; |
317a76f9 | 710 | |
6687e988 | 711 | static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) |
317a76f9 | 712 | { |
6687e988 ACM |
713 | struct inet_connection_sock *icsk = inet_csk(sk); |
714 | ||
715 | if (icsk->icsk_ca_ops->set_state) | |
716 | icsk->icsk_ca_ops->set_state(sk, ca_state); | |
717 | icsk->icsk_ca_state = ca_state; | |
317a76f9 SH |
718 | } |
719 | ||
6687e988 | 720 | static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) |
317a76f9 | 721 | { |
6687e988 ACM |
722 | const struct inet_connection_sock *icsk = inet_csk(sk); |
723 | ||
724 | if (icsk->icsk_ca_ops->cwnd_event) | |
725 | icsk->icsk_ca_ops->cwnd_event(sk, event); | |
317a76f9 SH |
726 | } |
727 | ||
e60402d0 IJ |
728 | /* These functions determine how the current flow behaves in respect of SACK |
729 | * handling. SACK is negotiated with the peer, and therefore it can vary | |
730 | * between different flows. | |
731 | * | |
732 | * tcp_is_sack - SACK enabled | |
733 | * tcp_is_reno - No SACK | |
734 | * tcp_is_fack - FACK enabled, implies SACK enabled | |
735 | */ | |
736 | static inline int tcp_is_sack(const struct tcp_sock *tp) | |
737 | { | |
738 | return tp->rx_opt.sack_ok; | |
739 | } | |
740 | ||
741 | static inline int tcp_is_reno(const struct tcp_sock *tp) | |
742 | { | |
743 | return !tcp_is_sack(tp); | |
744 | } | |
745 | ||
746 | static inline int tcp_is_fack(const struct tcp_sock *tp) | |
747 | { | |
748 | return tp->rx_opt.sack_ok & 2; | |
749 | } | |
750 | ||
751 | static inline void tcp_enable_fack(struct tcp_sock *tp) | |
752 | { | |
753 | tp->rx_opt.sack_ok |= 2; | |
754 | } | |
755 | ||
83ae4088 IJ |
756 | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) |
757 | { | |
758 | return tp->sacked_out + tp->lost_out; | |
759 | } | |
760 | ||
1da177e4 LT |
761 | /* This determines how many packets are "in the network" to the best |
762 | * of our knowledge. In many cases it is conservative, but where | |
763 | * detailed information is available from the receiver (via SACK | |
764 | * blocks etc.) we can make more aggressive calculations. | |
765 | * | |
766 | * Use this for decisions involving congestion control, use just | |
767 | * tp->packets_out to determine if the send queue is empty or not. | |
768 | * | |
769 | * Read this equation as: | |
770 | * | |
771 | * "Packets sent once on transmission queue" MINUS | |
772 | * "Packets left network, but not honestly ACKed yet" PLUS | |
773 | * "Packets fast retransmitted" | |
774 | */ | |
40efc6fa | 775 | static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) |
1da177e4 | 776 | { |
83ae4088 | 777 | return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; |
1da177e4 LT |
778 | } |
779 | ||
0b6a05c1 IJ |
780 | #define TCP_INFINITE_SSTHRESH 0x7fffffff |
781 | ||
782 | static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) | |
783 | { | |
784 | return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; | |
785 | } | |
786 | ||
1da177e4 LT |
787 | /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. |
788 | * The exception is rate halving phase, when cwnd is decreasing towards | |
789 | * ssthresh. | |
790 | */ | |
6687e988 | 791 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) |
1da177e4 | 792 | { |
6687e988 ACM |
793 | const struct tcp_sock *tp = tcp_sk(sk); |
794 | if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) | |
1da177e4 LT |
795 | return tp->snd_ssthresh; |
796 | else | |
797 | return max(tp->snd_ssthresh, | |
798 | ((tp->snd_cwnd >> 1) + | |
799 | (tp->snd_cwnd >> 2))); | |
800 | } | |
801 | ||
b9c4595b IJ |
802 | /* Use define here intentionally to get WARN_ON location shown at the caller */ |
803 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) | |
1da177e4 | 804 | |
3cfe3baa | 805 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
1da177e4 LT |
806 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
807 | ||
808 | /* Slow start with delack produces 3 packets of burst, so that | |
dd9e0dda JH |
809 | * it is safe "de facto". This will be the default - same as |
810 | * the default reordering threshold - but if reordering increases, | |
811 | * we must be able to allow cwnd to burst at least this much in order | |
812 | * to not pull it back when holes are filled. | |
1da177e4 LT |
813 | */ |
814 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |
815 | { | |
dd9e0dda | 816 | return tp->reordering; |
1da177e4 LT |
817 | } |
818 | ||
90840def IJ |
819 | /* Returns end sequence number of the receiver's advertised window */ |
820 | static inline u32 tcp_wnd_end(const struct tcp_sock *tp) | |
821 | { | |
822 | return tp->snd_una + tp->snd_wnd; | |
823 | } | |
cea14e0e | 824 | extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); |
f4805ede | 825 | |
c1bd24b7 | 826 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, |
40efc6fa | 827 | const struct sk_buff *skb) |
1da177e4 LT |
828 | { |
829 | if (skb->len < mss) | |
830 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | |
831 | } | |
832 | ||
9e412ba7 | 833 | static inline void tcp_check_probe_timer(struct sock *sk) |
1da177e4 | 834 | { |
9e412ba7 | 835 | struct tcp_sock *tp = tcp_sk(sk); |
463c84b9 | 836 | const struct inet_connection_sock *icsk = inet_csk(sk); |
9e412ba7 | 837 | |
463c84b9 | 838 | if (!tp->packets_out && !icsk->icsk_pending) |
3f421baa ACM |
839 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
840 | icsk->icsk_rto, TCP_RTO_MAX); | |
1da177e4 LT |
841 | } |
842 | ||
ee7537b6 | 843 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) |
1da177e4 LT |
844 | { |
845 | tp->snd_wl1 = seq; | |
846 | } | |
847 | ||
ee7537b6 | 848 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) |
1da177e4 LT |
849 | { |
850 | tp->snd_wl1 = seq; | |
851 | } | |
852 | ||
1da177e4 LT |
853 | /* |
854 | * Calculate(/check) TCP checksum | |
855 | */ | |
ba7808ea FD |
856 | static inline __sum16 tcp_v4_check(int len, __be32 saddr, |
857 | __be32 daddr, __wsum base) | |
1da177e4 LT |
858 | { |
859 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | |
860 | } | |
861 | ||
b51655b9 | 862 | static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) |
1da177e4 | 863 | { |
fb286bb2 | 864 | return __skb_checksum_complete(skb); |
1da177e4 LT |
865 | } |
866 | ||
40efc6fa | 867 | static inline int tcp_checksum_complete(struct sk_buff *skb) |
1da177e4 | 868 | { |
60476372 | 869 | return !skb_csum_unnecessary(skb) && |
1da177e4 LT |
870 | __tcp_checksum_complete(skb); |
871 | } | |
872 | ||
873 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | |
874 | ||
40efc6fa | 875 | static inline void tcp_prequeue_init(struct tcp_sock *tp) |
1da177e4 LT |
876 | { |
877 | tp->ucopy.task = NULL; | |
878 | tp->ucopy.len = 0; | |
879 | tp->ucopy.memory = 0; | |
880 | skb_queue_head_init(&tp->ucopy.prequeue); | |
97fc2f08 CL |
881 | #ifdef CONFIG_NET_DMA |
882 | tp->ucopy.dma_chan = NULL; | |
883 | tp->ucopy.wakeup = 0; | |
884 | tp->ucopy.pinned_list = NULL; | |
885 | tp->ucopy.dma_cookie = 0; | |
886 | #endif | |
1da177e4 LT |
887 | } |
888 | ||
889 | /* Packet is added to VJ-style prequeue for processing in process | |
890 | * context, if a reader task is waiting. Apparently, this exciting | |
891 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | |
892 | * failed somewhere. Latency? Burstiness? Well, at least now we will | |
893 | * see, why it failed. 8)8) --ANK | |
894 | * | |
895 | * NOTE: is this not too big to inline? | |
896 | */ | |
40efc6fa | 897 | static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) |
1da177e4 LT |
898 | { |
899 | struct tcp_sock *tp = tcp_sk(sk); | |
900 | ||
f5f8d86b ED |
901 | if (sysctl_tcp_low_latency || !tp->ucopy.task) |
902 | return 0; | |
903 | ||
904 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | |
905 | tp->ucopy.memory += skb->truesize; | |
906 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | |
907 | struct sk_buff *skb1; | |
908 | ||
909 | BUG_ON(sock_owned_by_user(sk)); | |
910 | ||
911 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | |
912 | sk_backlog_rcv(sk, skb1); | |
913 | NET_INC_STATS_BH(sock_net(sk), | |
914 | LINUX_MIB_TCPPREQUEUEDROPPED); | |
1da177e4 | 915 | } |
f5f8d86b ED |
916 | |
917 | tp->ucopy.memory = 0; | |
918 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | |
aa395145 | 919 | wake_up_interruptible_sync_poll(sk_sleep(sk), |
7aedec2a | 920 | POLLIN | POLLRDNORM | POLLRDBAND); |
f5f8d86b ED |
921 | if (!inet_csk_ack_scheduled(sk)) |
922 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | |
22f6dacd | 923 | (3 * tcp_rto_min(sk)) / 4, |
f5f8d86b | 924 | TCP_RTO_MAX); |
1da177e4 | 925 | } |
f5f8d86b | 926 | return 1; |
1da177e4 LT |
927 | } |
928 | ||
929 | ||
930 | #undef STATE_TRACE | |
931 | ||
932 | #ifdef STATE_TRACE | |
933 | static const char *statename[]={ | |
934 | "Unused","Established","Syn Sent","Syn Recv", | |
935 | "Fin Wait 1","Fin Wait 2","Time Wait", "Close", | |
936 | "Close Wait","Last ACK","Listen","Closing" | |
937 | }; | |
938 | #endif | |
490d5046 | 939 | extern void tcp_set_state(struct sock *sk, int state); |
1da177e4 | 940 | |
4ac02bab | 941 | extern void tcp_done(struct sock *sk); |
1da177e4 | 942 | |
40efc6fa | 943 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) |
1da177e4 LT |
944 | { |
945 | rx_opt->dsack = 0; | |
1da177e4 LT |
946 | rx_opt->num_sacks = 0; |
947 | } | |
948 | ||
1da177e4 LT |
949 | /* Determine a window scaling and initial window to offer. */ |
950 | extern void tcp_select_initial_window(int __space, __u32 mss, | |
951 | __u32 *rcv_wnd, __u32 *window_clamp, | |
31d12926 | 952 | int wscale_ok, __u8 *rcv_wscale, |
953 | __u32 init_rcv_wnd); | |
1da177e4 LT |
954 | |
955 | static inline int tcp_win_from_space(int space) | |
956 | { | |
957 | return sysctl_tcp_adv_win_scale<=0 ? | |
958 | (space>>(-sysctl_tcp_adv_win_scale)) : | |
959 | space - (space>>sysctl_tcp_adv_win_scale); | |
960 | } | |
961 | ||
962 | /* Note: caller must be prepared to deal with negative returns */ | |
963 | static inline int tcp_space(const struct sock *sk) | |
964 | { | |
965 | return tcp_win_from_space(sk->sk_rcvbuf - | |
966 | atomic_read(&sk->sk_rmem_alloc)); | |
967 | } | |
968 | ||
969 | static inline int tcp_full_space(const struct sock *sk) | |
970 | { | |
971 | return tcp_win_from_space(sk->sk_rcvbuf); | |
972 | } | |
973 | ||
40efc6fa SH |
974 | static inline void tcp_openreq_init(struct request_sock *req, |
975 | struct tcp_options_received *rx_opt, | |
976 | struct sk_buff *skb) | |
1da177e4 | 977 | { |
2e6599cb ACM |
978 | struct inet_request_sock *ireq = inet_rsk(req); |
979 | ||
1da177e4 | 980 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ |
4dfc2817 | 981 | req->cookie_ts = 0; |
2e6599cb | 982 | tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; |
1da177e4 LT |
983 | req->mss = rx_opt->mss_clamp; |
984 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | |
2e6599cb ACM |
985 | ireq->tstamp_ok = rx_opt->tstamp_ok; |
986 | ireq->sack_ok = rx_opt->sack_ok; | |
987 | ireq->snd_wscale = rx_opt->snd_wscale; | |
988 | ireq->wscale_ok = rx_opt->wscale_ok; | |
989 | ireq->acked = 0; | |
990 | ireq->ecn_ok = 0; | |
aa8223c7 | 991 | ireq->rmt_port = tcp_hdr(skb)->source; |
a3116ac5 | 992 | ireq->loc_port = tcp_hdr(skb)->dest; |
1da177e4 LT |
993 | } |
994 | ||
5c52ba17 | 995 | extern void tcp_enter_memory_pressure(struct sock *sk); |
1da177e4 | 996 | |
1da177e4 LT |
997 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) |
998 | { | |
999 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | |
1000 | } | |
1001 | ||
1002 | static inline int keepalive_time_when(const struct tcp_sock *tp) | |
1003 | { | |
1004 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | |
1005 | } | |
1006 | ||
df19a626 ED |
1007 | static inline int keepalive_probes(const struct tcp_sock *tp) |
1008 | { | |
1009 | return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | |
1010 | } | |
1011 | ||
6c37e5de FL |
1012 | static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) |
1013 | { | |
1014 | const struct inet_connection_sock *icsk = &tp->inet_conn; | |
1015 | ||
1016 | return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime, | |
1017 | tcp_time_stamp - tp->rcv_tstamp); | |
1018 | } | |
1019 | ||
463c84b9 | 1020 | static inline int tcp_fin_time(const struct sock *sk) |
1da177e4 | 1021 | { |
463c84b9 ACM |
1022 | int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; |
1023 | const int rto = inet_csk(sk)->icsk_rto; | |
1da177e4 | 1024 | |
463c84b9 ACM |
1025 | if (fin_timeout < (rto << 2) - (rto >> 1)) |
1026 | fin_timeout = (rto << 2) - (rto >> 1); | |
1da177e4 LT |
1027 | |
1028 | return fin_timeout; | |
1029 | } | |
1030 | ||
c887e6d2 IJ |
1031 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, |
1032 | int paws_win) | |
1da177e4 | 1033 | { |
c887e6d2 IJ |
1034 | if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) |
1035 | return 1; | |
1036 | if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) | |
1037 | return 1; | |
bc2ce894 ED |
1038 | /* |
1039 | * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, | |
1040 | * then following tcp messages have valid values. Ignore 0 value, | |
1041 | * or else 'negative' tsval might forbid us to accept their packets. | |
1042 | */ | |
1043 | if (!rx_opt->ts_recent) | |
1044 | return 1; | |
c887e6d2 IJ |
1045 | return 0; |
1046 | } | |
1047 | ||
1048 | static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, | |
1049 | int rst) | |
1050 | { | |
1051 | if (tcp_paws_check(rx_opt, 0)) | |
1da177e4 LT |
1052 | return 0; |
1053 | ||
1054 | /* RST segments are not recommended to carry timestamp, | |
1055 | and, if they do, it is recommended to ignore PAWS because | |
1056 | "their cleanup function should take precedence over timestamps." | |
1057 | Certainly, it is mistake. It is necessary to understand the reasons | |
1058 | of this constraint to relax it: if peer reboots, clock may go | |
1059 | out-of-sync and half-open connections will not be reset. | |
1060 | Actually, the problem would be not existing if all | |
1061 | the implementations followed draft about maintaining clock | |
1062 | via reboots. Linux-2.2 DOES NOT! | |
1063 | ||
1064 | However, we can relax time bounds for RST segments to MSL. | |
1065 | */ | |
9d729f72 | 1066 | if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) |
1da177e4 LT |
1067 | return 0; |
1068 | return 1; | |
1069 | } | |
1070 | ||
a9c19329 | 1071 | static inline void tcp_mib_init(struct net *net) |
1da177e4 LT |
1072 | { |
1073 | /* See RFC 2012 */ | |
cf1100a7 PE |
1074 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); |
1075 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); | |
1076 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); | |
1077 | TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); | |
1da177e4 LT |
1078 | } |
1079 | ||
5af4ec23 | 1080 | /* from STCP */ |
ef9da47c | 1081 | static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) |
0800f170 | 1082 | { |
6a438bbe SH |
1083 | tp->lost_skb_hint = NULL; |
1084 | tp->scoreboard_skb_hint = NULL; | |
ef9da47c IJ |
1085 | } |
1086 | ||
1087 | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) | |
1088 | { | |
1089 | tcp_clear_retrans_hints_partial(tp); | |
6a438bbe | 1090 | tp->retransmit_skb_hint = NULL; |
b7689205 IJ |
1091 | } |
1092 | ||
cfb6eeb4 YH |
1093 | /* MD5 Signature */ |
1094 | struct crypto_hash; | |
1095 | ||
1096 | /* - key database */ | |
1097 | struct tcp_md5sig_key { | |
1098 | u8 *key; | |
1099 | u8 keylen; | |
1100 | }; | |
1101 | ||
1102 | struct tcp4_md5sig_key { | |
f8ab18d2 | 1103 | struct tcp_md5sig_key base; |
cfb6eeb4 YH |
1104 | __be32 addr; |
1105 | }; | |
1106 | ||
1107 | struct tcp6_md5sig_key { | |
f8ab18d2 | 1108 | struct tcp_md5sig_key base; |
cfb6eeb4 YH |
1109 | #if 0 |
1110 | u32 scope_id; /* XXX */ | |
1111 | #endif | |
1112 | struct in6_addr addr; | |
1113 | }; | |
1114 | ||
1115 | /* - sock block */ | |
1116 | struct tcp_md5sig_info { | |
1117 | struct tcp4_md5sig_key *keys4; | |
1118 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1119 | struct tcp6_md5sig_key *keys6; | |
1120 | u32 entries6; | |
1121 | u32 alloced6; | |
1122 | #endif | |
1123 | u32 entries4; | |
1124 | u32 alloced4; | |
1125 | }; | |
1126 | ||
1127 | /* - pseudo header */ | |
1128 | struct tcp4_pseudohdr { | |
1129 | __be32 saddr; | |
1130 | __be32 daddr; | |
1131 | __u8 pad; | |
1132 | __u8 protocol; | |
1133 | __be16 len; | |
1134 | }; | |
1135 | ||
1136 | struct tcp6_pseudohdr { | |
1137 | struct in6_addr saddr; | |
1138 | struct in6_addr daddr; | |
1139 | __be32 len; | |
1140 | __be32 protocol; /* including padding */ | |
1141 | }; | |
1142 | ||
1143 | union tcp_md5sum_block { | |
1144 | struct tcp4_pseudohdr ip4; | |
1145 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1146 | struct tcp6_pseudohdr ip6; | |
1147 | #endif | |
1148 | }; | |
1149 | ||
1150 | /* - pool: digest algorithm, hash description and scratch buffer */ | |
1151 | struct tcp_md5sig_pool { | |
1152 | struct hash_desc md5_desc; | |
1153 | union tcp_md5sum_block md5_blk; | |
1154 | }; | |
1155 | ||
cfb6eeb4 | 1156 | /* - functions */ |
53d3176b CG |
1157 | extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
1158 | struct sock *sk, struct request_sock *req, | |
1159 | struct sk_buff *skb); | |
1160 | extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk, | |
1161 | struct sock *addr_sk); | |
1162 | extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, | |
1163 | u8 newkeylen); | |
1164 | extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr); | |
cfb6eeb4 | 1165 | |
9501f972 YH |
1166 | #ifdef CONFIG_TCP_MD5SIG |
1167 | #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \ | |
1168 | &(struct tcp_md5sig_key) { \ | |
1169 | .key = (twsk)->tw_md5_key, \ | |
1170 | .keylen = (twsk)->tw_md5_keylen, \ | |
1171 | } : NULL) | |
1172 | #else | |
1173 | #define tcp_twsk_md5_key(twsk) NULL | |
1174 | #endif | |
1175 | ||
7d720c3e | 1176 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); |
53d3176b | 1177 | extern void tcp_free_md5sig_pool(void); |
cfb6eeb4 | 1178 | |
35790c04 | 1179 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); |
53d3176b | 1180 | extern void tcp_put_md5sig_pool(void); |
35790c04 | 1181 | |
49a72dfb AL |
1182 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); |
1183 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | |
1184 | unsigned header_len); | |
1185 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | |
1186 | struct tcp_md5sig_key *key); | |
cfb6eeb4 | 1187 | |
fe067e8a DM |
1188 | /* write queue abstraction */ |
1189 | static inline void tcp_write_queue_purge(struct sock *sk) | |
1190 | { | |
1191 | struct sk_buff *skb; | |
1192 | ||
1193 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | |
3ab224be HA |
1194 | sk_wmem_free_skb(sk, skb); |
1195 | sk_mem_reclaim(sk); | |
8818a9d8 | 1196 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
fe067e8a DM |
1197 | } |
1198 | ||
1199 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | |
1200 | { | |
cd07a8ea | 1201 | return skb_peek(&sk->sk_write_queue); |
fe067e8a DM |
1202 | } |
1203 | ||
1204 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | |
1205 | { | |
cd07a8ea | 1206 | return skb_peek_tail(&sk->sk_write_queue); |
fe067e8a DM |
1207 | } |
1208 | ||
1209 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | |
1210 | { | |
cd07a8ea | 1211 | return skb_queue_next(&sk->sk_write_queue, skb); |
fe067e8a DM |
1212 | } |
1213 | ||
832d11c5 IJ |
1214 | static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) |
1215 | { | |
1216 | return skb_queue_prev(&sk->sk_write_queue, skb); | |
1217 | } | |
1218 | ||
fe067e8a | 1219 | #define tcp_for_write_queue(skb, sk) \ |
cd07a8ea | 1220 | skb_queue_walk(&(sk)->sk_write_queue, skb) |
fe067e8a DM |
1221 | |
1222 | #define tcp_for_write_queue_from(skb, sk) \ | |
cd07a8ea | 1223 | skb_queue_walk_from(&(sk)->sk_write_queue, skb) |
fe067e8a | 1224 | |
234b6860 | 1225 | #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ |
cd07a8ea | 1226 | skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) |
234b6860 | 1227 | |
fe067e8a DM |
1228 | static inline struct sk_buff *tcp_send_head(struct sock *sk) |
1229 | { | |
1230 | return sk->sk_send_head; | |
1231 | } | |
1232 | ||
cd07a8ea DM |
1233 | static inline bool tcp_skb_is_last(const struct sock *sk, |
1234 | const struct sk_buff *skb) | |
1235 | { | |
1236 | return skb_queue_is_last(&sk->sk_write_queue, skb); | |
1237 | } | |
1238 | ||
fe067e8a DM |
1239 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) |
1240 | { | |
cd07a8ea | 1241 | if (tcp_skb_is_last(sk, skb)) |
fe067e8a | 1242 | sk->sk_send_head = NULL; |
cd07a8ea DM |
1243 | else |
1244 | sk->sk_send_head = tcp_write_queue_next(sk, skb); | |
fe067e8a DM |
1245 | } |
1246 | ||
1247 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | |
1248 | { | |
1249 | if (sk->sk_send_head == skb_unlinked) | |
1250 | sk->sk_send_head = NULL; | |
1251 | } | |
1252 | ||
1253 | static inline void tcp_init_send_head(struct sock *sk) | |
1254 | { | |
1255 | sk->sk_send_head = NULL; | |
1256 | } | |
1257 | ||
1258 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1259 | { | |
1260 | __skb_queue_tail(&sk->sk_write_queue, skb); | |
1261 | } | |
1262 | ||
1263 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1264 | { | |
1265 | __tcp_add_write_queue_tail(sk, skb); | |
1266 | ||
1267 | /* Queue it, remembering where we must start sending. */ | |
6859d494 | 1268 | if (sk->sk_send_head == NULL) { |
fe067e8a | 1269 | sk->sk_send_head = skb; |
6859d494 IJ |
1270 | |
1271 | if (tcp_sk(sk)->highest_sack == NULL) | |
1272 | tcp_sk(sk)->highest_sack = skb; | |
1273 | } | |
fe067e8a DM |
1274 | } |
1275 | ||
1276 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | |
1277 | { | |
1278 | __skb_queue_head(&sk->sk_write_queue, skb); | |
1279 | } | |
1280 | ||
1281 | /* Insert buff after skb on the write queue of sk. */ | |
1282 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | |
1283 | struct sk_buff *buff, | |
1284 | struct sock *sk) | |
1285 | { | |
7de6c033 | 1286 | __skb_queue_after(&sk->sk_write_queue, skb, buff); |
fe067e8a DM |
1287 | } |
1288 | ||
43f59c89 | 1289 | /* Insert new before skb on the write queue of sk. */ |
fe067e8a DM |
1290 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, |
1291 | struct sk_buff *skb, | |
1292 | struct sock *sk) | |
1293 | { | |
43f59c89 | 1294 | __skb_queue_before(&sk->sk_write_queue, skb, new); |