]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the TCP module. | |
7 | * | |
8 | * Version: @(#)tcp.h 1.0.5 05/23/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | #ifndef _TCP_H | |
19 | #define _TCP_H | |
20 | ||
21 | #define TCP_DEBUG 1 | |
22 | #define FASTRETRANS_DEBUG 1 | |
23 | ||
1da177e4 LT |
24 | #include <linux/list.h> |
25 | #include <linux/tcp.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/cache.h> | |
28 | #include <linux/percpu.h> | |
fb286bb2 | 29 | #include <linux/skbuff.h> |
97fc2f08 | 30 | #include <linux/dmaengine.h> |
cfb6eeb4 | 31 | #include <linux/crypto.h> |
c6aefafb | 32 | #include <linux/cryptohash.h> |
435cf559 | 33 | #include <linux/kref.h> |
3f421baa ACM |
34 | |
35 | #include <net/inet_connection_sock.h> | |
295ff7ed | 36 | #include <net/inet_timewait_sock.h> |
77d8bf9c | 37 | #include <net/inet_hashtables.h> |
1da177e4 | 38 | #include <net/checksum.h> |
2e6599cb | 39 | #include <net/request_sock.h> |
1da177e4 LT |
40 | #include <net/sock.h> |
41 | #include <net/snmp.h> | |
42 | #include <net/ip.h> | |
c752f073 | 43 | #include <net/tcp_states.h> |
bdf1ee5d | 44 | #include <net/inet_ecn.h> |
0c266898 | 45 | #include <net/dst.h> |
c752f073 | 46 | |
1da177e4 LT |
47 | #include <linux/seq_file.h> |
48 | ||
6e04e021 | 49 | extern struct inet_hashinfo tcp_hashinfo; |
1da177e4 | 50 | |
dd24c001 | 51 | extern struct percpu_counter tcp_orphan_count; |
1da177e4 | 52 | extern void tcp_time_wait(struct sock *sk, int state, int timeo); |
1da177e4 | 53 | |
1da177e4 | 54 | #define MAX_TCP_HEADER (128 + MAX_HEADER) |
33ad798c | 55 | #define MAX_TCP_OPTION_SPACE 40 |
1da177e4 LT |
56 | |
57 | /* | |
58 | * Never offer a window over 32767 without using window scaling. Some | |
59 | * poor stacks do signed 16bit maths! | |
60 | */ | |
61 | #define MAX_TCP_WINDOW 32767U | |
62 | ||
63 | /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ | |
64 | #define TCP_MIN_MSS 88U | |
65 | ||
5d424d5a JH |
66 | /* The least MTU to use for probing */ |
67 | #define TCP_BASE_MSS 512 | |
68 | ||
1da177e4 LT |
69 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ |
70 | #define TCP_FASTRETRANS_THRESH 3 | |
71 | ||
72 | /* Maximal reordering. */ | |
73 | #define TCP_MAX_REORDERING 127 | |
74 | ||
75 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | |
76 | #define TCP_MAX_QUICKACKS 16U | |
77 | ||
78 | /* urg_data states */ | |
79 | #define TCP_URG_VALID 0x0100 | |
80 | #define TCP_URG_NOTYET 0x0200 | |
81 | #define TCP_URG_READ 0x0400 | |
82 | ||
83 | #define TCP_RETR1 3 /* | |
84 | * This is how many retries it does before it | |
85 | * tries to figure out if the gateway is | |
86 | * down. Minimal RFC value is 3; it corresponds | |
87 | * to ~3sec-8min depending on RTO. | |
88 | */ | |
89 | ||
90 | #define TCP_RETR2 15 /* | |
91 | * This should take at least | |
92 | * 90 minutes to time out. | |
93 | * RFC1122 says that the limit is 100 sec. | |
94 | * 15 is ~13-30min depending on RTO. | |
95 | */ | |
96 | ||
97 | #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a | |
caa20d9a | 98 | * connection: ~180sec is RFC minimum */ |
1da177e4 LT |
99 | |
100 | #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a | |
caa20d9a | 101 | * connection: ~180sec is RFC minimum */ |
1da177e4 LT |
102 | |
103 | ||
104 | #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned | |
105 | * socket. 7 is ~50sec-16min. | |
106 | */ | |
107 | ||
108 | ||
109 | #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT | |
110 | * state, about 60 seconds */ | |
111 | #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN | |
112 | /* BSD style FIN_WAIT2 deadlock breaker. | |
113 | * It used to be 3min, new value is 60sec, | |
114 | * to combine FIN-WAIT-2 timeout with | |
115 | * TIME-WAIT timer. | |
116 | */ | |
117 | ||
118 | #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ | |
119 | #if HZ >= 100 | |
120 | #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ | |
121 | #define TCP_ATO_MIN ((unsigned)(HZ/25)) | |
122 | #else | |
123 | #define TCP_DELACK_MIN 4U | |
124 | #define TCP_ATO_MIN 4U | |
125 | #endif | |
126 | #define TCP_RTO_MAX ((unsigned)(120*HZ)) | |
127 | #define TCP_RTO_MIN ((unsigned)(HZ/5)) | |
128 | #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ | |
129 | ||
130 | #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes | |
131 | * for local resources. | |
132 | */ | |
133 | ||
134 | #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ | |
135 | #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ | |
136 | #define TCP_KEEPALIVE_INTVL (75*HZ) | |
137 | ||
138 | #define MAX_TCP_KEEPIDLE 32767 | |
139 | #define MAX_TCP_KEEPINTVL 32767 | |
140 | #define MAX_TCP_KEEPCNT 127 | |
141 | #define MAX_TCP_SYNCNT 127 | |
142 | ||
143 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | |
1da177e4 LT |
144 | |
145 | #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) | |
146 | #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated | |
147 | * after this time. It should be equal | |
148 | * (or greater than) TCP_TIMEWAIT_LEN | |
149 | * to provide reliability equal to one | |
150 | * provided by timewait state. | |
151 | */ | |
152 | #define TCP_PAWS_WINDOW 1 /* Replay window for per-host | |
153 | * timestamps. It must be less than | |
154 | * minimal timewait lifetime. | |
155 | */ | |
1da177e4 LT |
156 | /* |
157 | * TCP option | |
158 | */ | |
159 | ||
160 | #define TCPOPT_NOP 1 /* Padding */ | |
161 | #define TCPOPT_EOL 0 /* End of options */ | |
162 | #define TCPOPT_MSS 2 /* Segment size negotiating */ | |
163 | #define TCPOPT_WINDOW 3 /* Window scaling */ | |
164 | #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ | |
165 | #define TCPOPT_SACK 5 /* SACK Block */ | |
166 | #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ | |
cfb6eeb4 | 167 | #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ |
435cf559 | 168 | #define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */ |
1da177e4 LT |
169 | |
170 | /* | |
171 | * TCP option lengths | |
172 | */ | |
173 | ||
174 | #define TCPOLEN_MSS 4 | |
175 | #define TCPOLEN_WINDOW 3 | |
176 | #define TCPOLEN_SACK_PERM 2 | |
177 | #define TCPOLEN_TIMESTAMP 10 | |
cfb6eeb4 | 178 | #define TCPOLEN_MD5SIG 18 |
435cf559 WAS |
179 | #define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */ |
180 | #define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */ | |
181 | #define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN) | |
182 | #define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX) | |
1da177e4 LT |
183 | |
184 | /* But this is what stacks really send out. */ | |
185 | #define TCPOLEN_TSTAMP_ALIGNED 12 | |
186 | #define TCPOLEN_WSCALE_ALIGNED 4 | |
187 | #define TCPOLEN_SACKPERM_ALIGNED 4 | |
188 | #define TCPOLEN_SACK_BASE 2 | |
189 | #define TCPOLEN_SACK_BASE_ALIGNED 4 | |
190 | #define TCPOLEN_SACK_PERBLOCK 8 | |
cfb6eeb4 | 191 | #define TCPOLEN_MD5SIG_ALIGNED 20 |
33ad798c | 192 | #define TCPOLEN_MSS_ALIGNED 4 |
1da177e4 | 193 | |
1da177e4 LT |
194 | /* Flags in tp->nonagle */ |
195 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ | |
196 | #define TCP_NAGLE_CORK 2 /* Socket is corked */ | |
caa20d9a | 197 | #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ |
1da177e4 | 198 | |
36e31b0a AP |
199 | /* TCP thin-stream limits */ |
200 | #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ | |
201 | ||
295ff7ed ACM |
202 | extern struct inet_timewait_death_row tcp_death_row; |
203 | ||
1da177e4 | 204 | /* sysctl variables for tcp */ |
1da177e4 LT |
205 | extern int sysctl_tcp_timestamps; |
206 | extern int sysctl_tcp_window_scaling; | |
207 | extern int sysctl_tcp_sack; | |
208 | extern int sysctl_tcp_fin_timeout; | |
1da177e4 LT |
209 | extern int sysctl_tcp_keepalive_time; |
210 | extern int sysctl_tcp_keepalive_probes; | |
211 | extern int sysctl_tcp_keepalive_intvl; | |
212 | extern int sysctl_tcp_syn_retries; | |
213 | extern int sysctl_tcp_synack_retries; | |
214 | extern int sysctl_tcp_retries1; | |
215 | extern int sysctl_tcp_retries2; | |
216 | extern int sysctl_tcp_orphan_retries; | |
217 | extern int sysctl_tcp_syncookies; | |
218 | extern int sysctl_tcp_retrans_collapse; | |
219 | extern int sysctl_tcp_stdurg; | |
220 | extern int sysctl_tcp_rfc1337; | |
221 | extern int sysctl_tcp_abort_on_overflow; | |
222 | extern int sysctl_tcp_max_orphans; | |
1da177e4 LT |
223 | extern int sysctl_tcp_fack; |
224 | extern int sysctl_tcp_reordering; | |
225 | extern int sysctl_tcp_ecn; | |
226 | extern int sysctl_tcp_dsack; | |
227 | extern int sysctl_tcp_mem[3]; | |
228 | extern int sysctl_tcp_wmem[3]; | |
229 | extern int sysctl_tcp_rmem[3]; | |
230 | extern int sysctl_tcp_app_win; | |
231 | extern int sysctl_tcp_adv_win_scale; | |
232 | extern int sysctl_tcp_tw_reuse; | |
233 | extern int sysctl_tcp_frto; | |
3cfe3baa | 234 | extern int sysctl_tcp_frto_response; |
1da177e4 | 235 | extern int sysctl_tcp_low_latency; |
95937825 | 236 | extern int sysctl_tcp_dma_copybreak; |
1da177e4 | 237 | extern int sysctl_tcp_nometrics_save; |
1da177e4 LT |
238 | extern int sysctl_tcp_moderate_rcvbuf; |
239 | extern int sysctl_tcp_tso_win_divisor; | |
9772efb9 | 240 | extern int sysctl_tcp_abc; |
5d424d5a JH |
241 | extern int sysctl_tcp_mtu_probing; |
242 | extern int sysctl_tcp_base_mss; | |
15d99e02 | 243 | extern int sysctl_tcp_workaround_signed_windows; |
35089bb2 | 244 | extern int sysctl_tcp_slow_start_after_idle; |
886236c1 | 245 | extern int sysctl_tcp_max_ssthresh; |
519855c5 | 246 | extern int sysctl_tcp_cookie_size; |
36e31b0a | 247 | extern int sysctl_tcp_thin_linear_timeouts; |
7e380175 | 248 | extern int sysctl_tcp_thin_dupack; |
1da177e4 LT |
249 | |
250 | extern atomic_t tcp_memory_allocated; | |
1748376b | 251 | extern struct percpu_counter tcp_sockets_allocated; |
1da177e4 LT |
252 | extern int tcp_memory_pressure; |
253 | ||
1da177e4 LT |
254 | /* |
255 | * The next routines deal with comparing 32 bit unsigned ints | |
256 | * and worry about wraparound (automatic with unsigned arithmetic). | |
257 | */ | |
258 | ||
259 | static inline int before(__u32 seq1, __u32 seq2) | |
260 | { | |
0d630cc0 | 261 | return (__s32)(seq1-seq2) < 0; |
1da177e4 | 262 | } |
9a036b9c | 263 | #define after(seq2, seq1) before(seq1, seq2) |
1da177e4 LT |
264 | |
265 | /* is s2<=s1<=s3 ? */ | |
266 | static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | |
267 | { | |
268 | return seq3 - seq2 >= seq1 - seq2; | |
269 | } | |
270 | ||
ad1af0fe | 271 | static inline bool tcp_too_many_orphans(struct sock *sk, int shift) |
e4fd5da3 | 272 | { |
ad1af0fe DM |
273 | struct percpu_counter *ocp = sk->sk_prot->orphan_count; |
274 | int orphans = percpu_counter_read_positive(ocp); | |
275 | ||
276 | if (orphans << shift > sysctl_tcp_max_orphans) { | |
277 | orphans = percpu_counter_sum_positive(ocp); | |
278 | if (orphans << shift > sysctl_tcp_max_orphans) | |
279 | return true; | |
280 | } | |
281 | ||
282 | if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | |
283 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) | |
284 | return true; | |
285 | return false; | |
e4fd5da3 | 286 | } |
1da177e4 | 287 | |
a0f82f64 FW |
288 | /* syncookies: remember time of last synqueue overflow */ |
289 | static inline void tcp_synq_overflow(struct sock *sk) | |
290 | { | |
291 | tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; | |
292 | } | |
293 | ||
294 | /* syncookies: no recent synqueue overflow on this listening socket? */ | |
295 | static inline int tcp_synq_no_recent_overflow(const struct sock *sk) | |
296 | { | |
297 | unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; | |
298 | return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); | |
299 | } | |
300 | ||
1da177e4 LT |
301 | extern struct proto tcp_prot; |
302 | ||
57ef42d5 PE |
303 | #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) |
304 | #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) | |
305 | #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) | |
306 | #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) | |
aa2ea058 | 307 | #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) |
1da177e4 | 308 | |
53d3176b CG |
309 | extern void tcp_v4_err(struct sk_buff *skb, u32); |
310 | ||
311 | extern void tcp_shutdown (struct sock *sk, int how); | |
312 | ||
313 | extern int tcp_v4_rcv(struct sk_buff *skb); | |
314 | ||
315 | extern int tcp_v4_remember_stamp(struct sock *sk); | |
316 | extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); | |
7ba42910 CG |
317 | extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
318 | size_t size); | |
319 | extern int tcp_sendpage(struct sock *sk, struct page *page, int offset, | |
320 | size_t size, int flags); | |
53d3176b CG |
321 | extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
322 | extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |
323 | struct tcphdr *th, unsigned len); | |
324 | extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |
325 | struct tcphdr *th, unsigned len); | |
326 | extern void tcp_rcv_space_adjust(struct sock *sk); | |
327 | extern void tcp_cleanup_rbuf(struct sock *sk, int copied); | |
328 | extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); | |
329 | extern void tcp_twsk_destructor(struct sock *sk); | |
330 | extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, | |
331 | struct pipe_inode_info *pipe, size_t len, | |
332 | unsigned int flags); | |
9c55e01c | 333 | |
463c84b9 ACM |
334 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
335 | const unsigned int pkts) | |
1da177e4 | 336 | { |
463c84b9 | 337 | struct inet_connection_sock *icsk = inet_csk(sk); |
fc6415bc | 338 | |
463c84b9 ACM |
339 | if (icsk->icsk_ack.quick) { |
340 | if (pkts >= icsk->icsk_ack.quick) { | |
341 | icsk->icsk_ack.quick = 0; | |
fc6415bc | 342 | /* Leaving quickack mode we deflate ATO. */ |
463c84b9 | 343 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
fc6415bc | 344 | } else |
463c84b9 | 345 | icsk->icsk_ack.quick -= pkts; |
1da177e4 LT |
346 | } |
347 | } | |
348 | ||
463c84b9 | 349 | extern void tcp_enter_quickack_mode(struct sock *sk); |
1da177e4 | 350 | |
bdf1ee5d IJ |
351 | #define TCP_ECN_OK 1 |
352 | #define TCP_ECN_QUEUE_CWR 2 | |
353 | #define TCP_ECN_DEMAND_CWR 4 | |
354 | ||
355 | static __inline__ void | |
356 | TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) | |
357 | { | |
358 | if (sysctl_tcp_ecn && th->ece && th->cwr) | |
359 | inet_rsk(req)->ecn_ok = 1; | |
360 | } | |
361 | ||
fd2c3ef7 | 362 | enum tcp_tw_status { |
1da177e4 LT |
363 | TCP_TW_SUCCESS = 0, |
364 | TCP_TW_RST = 1, | |
365 | TCP_TW_ACK = 2, | |
366 | TCP_TW_SYN = 3 | |
367 | }; | |
368 | ||
369 | ||
53d3176b CG |
370 | extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, |
371 | struct sk_buff *skb, | |
372 | const struct tcphdr *th); | |
373 | extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, | |
374 | struct request_sock *req, | |
375 | struct request_sock **prev); | |
376 | extern int tcp_child_process(struct sock *parent, struct sock *child, | |
377 | struct sk_buff *skb); | |
378 | extern int tcp_use_frto(struct sock *sk); | |
379 | extern void tcp_enter_frto(struct sock *sk); | |
380 | extern void tcp_enter_loss(struct sock *sk, int how); | |
381 | extern void tcp_clear_retrans(struct tcp_sock *tp); | |
382 | extern void tcp_update_metrics(struct sock *sk); | |
383 | extern void tcp_close(struct sock *sk, long timeout); | |
384 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, | |
385 | struct poll_table_struct *wait); | |
386 | extern int tcp_getsockopt(struct sock *sk, int level, int optname, | |
387 | char __user *optval, int __user *optlen); | |
388 | extern int tcp_setsockopt(struct sock *sk, int level, int optname, | |
389 | char __user *optval, unsigned int optlen); | |
390 | extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname, | |
391 | char __user *optval, int __user *optlen); | |
392 | extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname, | |
393 | char __user *optval, unsigned int optlen); | |
394 | extern void tcp_set_keepalive(struct sock *sk, int val); | |
395 | extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); | |
396 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
397 | size_t len, int nonblock, int flags, int *addr_len); | |
398 | extern void tcp_parse_options(struct sk_buff *skb, | |
399 | struct tcp_options_received *opt_rx, u8 **hvpp, | |
400 | int estab); | |
401 | extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); | |
7d5d5525 | 402 | |
1da177e4 LT |
403 | /* |
404 | * TCP v4 functions exported for the inet6 API | |
405 | */ | |
406 | ||
53d3176b CG |
407 | extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
408 | extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); | |
409 | extern struct sock * tcp_create_openreq_child(struct sock *sk, | |
410 | struct request_sock *req, | |
1da177e4 | 411 | struct sk_buff *skb); |
53d3176b CG |
412 | extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, |
413 | struct request_sock *req, | |
414 | struct dst_entry *dst); | |
415 | extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); | |
416 | extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, | |
417 | int addr_len); | |
418 | extern int tcp_connect(struct sock *sk); | |
419 | extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |
420 | struct request_sock *req, | |
421 | struct request_values *rvp); | |
422 | extern int tcp_disconnect(struct sock *sk, int flags); | |
1da177e4 | 423 | |
1da177e4 | 424 | |
1da177e4 | 425 | /* From syncookies.c */ |
2051f11f | 426 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; |
1da177e4 LT |
427 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, |
428 | struct ip_options *opt); | |
429 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |
430 | __u16 *mss); | |
431 | ||
4dfc2817 | 432 | extern __u32 cookie_init_timestamp(struct request_sock *req); |
172d69e6 | 433 | extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); |
4dfc2817 | 434 | |
c6aefafb GG |
435 | /* From net/ipv6/syncookies.c */ |
436 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); | |
437 | extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, | |
438 | __u16 *mss); | |
439 | ||
1da177e4 LT |
440 | /* tcp_output.c */ |
441 | ||
9e412ba7 IJ |
442 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
443 | int nonagle); | |
444 | extern int tcp_may_send_now(struct sock *sk); | |
1da177e4 | 445 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
f1ecd5d9 | 446 | extern void tcp_retransmit_timer(struct sock *sk); |
1da177e4 LT |
447 | extern void tcp_xmit_retransmit_queue(struct sock *); |
448 | extern void tcp_simple_retransmit(struct sock *); | |
449 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | |
6475be16 | 450 | extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); |
1da177e4 LT |
451 | |
452 | extern void tcp_send_probe0(struct sock *); | |
453 | extern void tcp_send_partial(struct sock *); | |
53d3176b | 454 | extern int tcp_write_wakeup(struct sock *); |
1da177e4 | 455 | extern void tcp_send_fin(struct sock *sk); |
dd0fc66f | 456 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
53d3176b | 457 | extern int tcp_send_synack(struct sock *); |
c1b4a7e6 | 458 | extern void tcp_push_one(struct sock *, unsigned int mss_now); |
1da177e4 LT |
459 | extern void tcp_send_ack(struct sock *sk); |
460 | extern void tcp_send_delayed_ack(struct sock *sk); | |
461 | ||
a762a980 DM |
462 | /* tcp_input.c */ |
463 | extern void tcp_cwnd_application_limited(struct sock *sk); | |
464 | ||
1da177e4 LT |
465 | /* tcp_timer.c */ |
466 | extern void tcp_init_xmit_timers(struct sock *); | |
463c84b9 ACM |
467 | static inline void tcp_clear_xmit_timers(struct sock *sk) |
468 | { | |
469 | inet_csk_clear_xmit_timers(sk); | |
470 | } | |
1da177e4 | 471 | |
1da177e4 | 472 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); |
0c54b85f IJ |
473 | extern unsigned int tcp_current_mss(struct sock *sk); |
474 | ||
475 | /* Bound MSS / TSO packet size with the half of the window */ | |
476 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | |
477 | { | |
478 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | |
479 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | |
480 | else | |
481 | return pktsize; | |
482 | } | |
1da177e4 | 483 | |
17b085ea | 484 | /* tcp.c */ |
1da177e4 LT |
485 | extern void tcp_get_info(struct sock *, struct tcp_info *); |
486 | ||
487 | /* Read 'sendfile()'-style from a TCP socket */ | |
488 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | |
489 | unsigned int, size_t); | |
490 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
491 | sk_read_actor_t recv_actor); | |
492 | ||
40efc6fa | 493 | extern void tcp_initialize_rcv_mss(struct sock *sk); |
1da177e4 | 494 | |
5d424d5a JH |
495 | extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); |
496 | extern int tcp_mss_to_mtu(struct sock *sk, int mss); | |
497 | extern void tcp_mtup_init(struct sock *sk); | |
498 | ||
f1ecd5d9 DL |
499 | static inline void tcp_bound_rto(const struct sock *sk) |
500 | { | |
501 | if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) | |
502 | inet_csk(sk)->icsk_rto = TCP_RTO_MAX; | |
503 | } | |
504 | ||
505 | static inline u32 __tcp_set_rto(const struct tcp_sock *tp) | |
506 | { | |
507 | return (tp->srtt >> 3) + tp->rttvar; | |
508 | } | |
509 | ||
40efc6fa | 510 | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) |
1da177e4 LT |
511 | { |
512 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | |
513 | ntohl(TCP_FLAG_ACK) | | |
514 | snd_wnd); | |
515 | } | |
516 | ||
40efc6fa | 517 | static inline void tcp_fast_path_on(struct tcp_sock *tp) |
1da177e4 LT |
518 | { |
519 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | |
520 | } | |
521 | ||
9e412ba7 | 522 | static inline void tcp_fast_path_check(struct sock *sk) |
1da177e4 | 523 | { |
9e412ba7 IJ |
524 | struct tcp_sock *tp = tcp_sk(sk); |
525 | ||
b03efcfb | 526 | if (skb_queue_empty(&tp->out_of_order_queue) && |
1da177e4 LT |
527 | tp->rcv_wnd && |
528 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | |
529 | !tp->urg_data) | |
530 | tcp_fast_path_on(tp); | |
531 | } | |
532 | ||
0c266898 SS |
533 | /* Compute the actual rto_min value */ |
534 | static inline u32 tcp_rto_min(struct sock *sk) | |
535 | { | |
536 | struct dst_entry *dst = __sk_dst_get(sk); | |
537 | u32 rto_min = TCP_RTO_MIN; | |
538 | ||
539 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | |
540 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | |
541 | return rto_min; | |
542 | } | |
543 | ||
1da177e4 LT |
544 | /* Compute the actual receive window we are currently advertising. |
545 | * Rcv_nxt can be after the window if our peer push more data | |
546 | * than the offered window. | |
547 | */ | |
40efc6fa | 548 | static inline u32 tcp_receive_window(const struct tcp_sock *tp) |
1da177e4 LT |
549 | { |
550 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | |
551 | ||
552 | if (win < 0) | |
553 | win = 0; | |
554 | return (u32) win; | |
555 | } | |
556 | ||
557 | /* Choose a new window, without checks for shrinking, and without | |
558 | * scaling applied to the result. The caller does these things | |
559 | * if necessary. This is a "raw" window selection. | |
560 | */ | |
53d3176b | 561 | extern u32 __tcp_select_window(struct sock *sk); |
1da177e4 LT |
562 | |
563 | /* TCP timestamps are only 32-bits, this causes a slight | |
564 | * complication on 64-bit systems since we store a snapshot | |
31f34269 SH |
565 | * of jiffies in the buffer control blocks below. We decided |
566 | * to use only the low 32-bits of jiffies and hide the ugly | |
1da177e4 LT |
567 | * casts with the following macro. |
568 | */ | |
569 | #define tcp_time_stamp ((__u32)(jiffies)) | |
570 | ||
a3433f35 CG |
571 | #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) |
572 | ||
573 | #define TCPHDR_FIN 0x01 | |
574 | #define TCPHDR_SYN 0x02 | |
575 | #define TCPHDR_RST 0x04 | |
576 | #define TCPHDR_PSH 0x08 | |
577 | #define TCPHDR_ACK 0x10 | |
578 | #define TCPHDR_URG 0x20 | |
579 | #define TCPHDR_ECE 0x40 | |
580 | #define TCPHDR_CWR 0x80 | |
581 | ||
caa20d9a | 582 | /* This is what the send packet queuing engine uses to pass |
f86586fa ED |
583 | * TCP per-packet control information to the transmission code. |
584 | * We also store the host-order sequence numbers in here too. | |
585 | * This is 44 bytes if IPV6 is enabled. | |
586 | * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. | |
1da177e4 LT |
587 | */ |
588 | struct tcp_skb_cb { | |
589 | union { | |
590 | struct inet_skb_parm h4; | |
591 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | |
592 | struct inet6_skb_parm h6; | |
593 | #endif | |
594 | } header; /* For incoming frames */ | |
595 | __u32 seq; /* Starting sequence number */ | |
596 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ | |
597 | __u32 when; /* used to compute rtt's */ | |
598 | __u8 flags; /* TCP header flags. */ | |
1da177e4 LT |
599 | __u8 sacked; /* State flags for SACK/FACK. */ |
600 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ | |
601 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | |
602 | #define TCPCB_LOST 0x04 /* SKB is lost */ | |
603 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | |
604 | ||
605 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | |
606 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | |
607 | ||
1da177e4 LT |
608 | __u32 ack_seq; /* Sequence number ACK'd */ |
609 | }; | |
610 | ||
611 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) | |
612 | ||
1da177e4 LT |
613 | /* Due to TSO, an SKB can be composed of multiple actual |
614 | * packets. To keep these tracked properly, we use this. | |
615 | */ | |
616 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | |
617 | { | |
7967168c | 618 | return skb_shinfo(skb)->gso_segs; |
1da177e4 LT |
619 | } |
620 | ||
621 | /* This is valid iff tcp_skb_pcount() > 1. */ | |
622 | static inline int tcp_skb_mss(const struct sk_buff *skb) | |
623 | { | |
7967168c | 624 | return skb_shinfo(skb)->gso_size; |
1da177e4 LT |
625 | } |
626 | ||
317a76f9 SH |
627 | /* Events passed to congestion control interface */ |
628 | enum tcp_ca_event { | |
629 | CA_EVENT_TX_START, /* first transmit when no packets in flight */ | |
630 | CA_EVENT_CWND_RESTART, /* congestion window restart */ | |
631 | CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ | |
632 | CA_EVENT_FRTO, /* fast recovery timeout */ | |
633 | CA_EVENT_LOSS, /* loss timeout */ | |
634 | CA_EVENT_FAST_ACK, /* in sequence ack */ | |
635 | CA_EVENT_SLOW_ACK, /* other ack */ | |
636 | }; | |
637 | ||
638 | /* | |
639 | * Interface for adding new TCP congestion control handlers | |
640 | */ | |
641 | #define TCP_CA_NAME_MAX 16 | |
3ff825b2 SH |
642 | #define TCP_CA_MAX 128 |
643 | #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) | |
644 | ||
164891aa SH |
645 | #define TCP_CONG_NON_RESTRICTED 0x1 |
646 | #define TCP_CONG_RTT_STAMP 0x2 | |
647 | ||
317a76f9 SH |
648 | struct tcp_congestion_ops { |
649 | struct list_head list; | |
164891aa | 650 | unsigned long flags; |
317a76f9 SH |
651 | |
652 | /* initialize private data (optional) */ | |
6687e988 | 653 | void (*init)(struct sock *sk); |
317a76f9 | 654 | /* cleanup private data (optional) */ |
6687e988 | 655 | void (*release)(struct sock *sk); |
317a76f9 SH |
656 | |
657 | /* return slow start threshold (required) */ | |
6687e988 | 658 | u32 (*ssthresh)(struct sock *sk); |
317a76f9 | 659 | /* lower bound for congestion window (optional) */ |
72dc5b92 | 660 | u32 (*min_cwnd)(const struct sock *sk); |
317a76f9 | 661 | /* do new cwnd calculation (required) */ |
c3a05c60 | 662 | void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); |
317a76f9 | 663 | /* call before changing ca_state (optional) */ |
6687e988 | 664 | void (*set_state)(struct sock *sk, u8 new_state); |
317a76f9 | 665 | /* call when cwnd event occurs (optional) */ |
6687e988 | 666 | void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); |
317a76f9 | 667 | /* new value of cwnd after loss (optional) */ |
6687e988 | 668 | u32 (*undo_cwnd)(struct sock *sk); |
317a76f9 | 669 | /* hook for packet ack accounting (optional) */ |
30cfd0ba | 670 | void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); |
73c1f4a0 | 671 | /* get info for inet_diag (optional) */ |
6687e988 | 672 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); |
317a76f9 SH |
673 | |
674 | char name[TCP_CA_NAME_MAX]; | |
675 | struct module *owner; | |
676 | }; | |
677 | ||
678 | extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); | |
679 | extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); | |
680 | ||
6687e988 ACM |
681 | extern void tcp_init_congestion_control(struct sock *sk); |
682 | extern void tcp_cleanup_congestion_control(struct sock *sk); | |
317a76f9 SH |
683 | extern int tcp_set_default_congestion_control(const char *name); |
684 | extern void tcp_get_default_congestion_control(char *name); | |
3ff825b2 | 685 | extern void tcp_get_available_congestion_control(char *buf, size_t len); |
ce7bc3bf SH |
686 | extern void tcp_get_allowed_congestion_control(char *buf, size_t len); |
687 | extern int tcp_set_allowed_congestion_control(char *allowed); | |
6687e988 | 688 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); |
40efc6fa | 689 | extern void tcp_slow_start(struct tcp_sock *tp); |
758ce5c8 | 690 | extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); |
317a76f9 | 691 | |
5f8ef48d | 692 | extern struct tcp_congestion_ops tcp_init_congestion_ops; |
6687e988 | 693 | extern u32 tcp_reno_ssthresh(struct sock *sk); |
c3a05c60 | 694 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); |
72dc5b92 | 695 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); |
a8acfbac | 696 | extern struct tcp_congestion_ops tcp_reno; |
317a76f9 | 697 | |
6687e988 | 698 | static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) |
317a76f9 | 699 | { |
6687e988 ACM |
700 | struct inet_connection_sock *icsk = inet_csk(sk); |
701 | ||
702 | if (icsk->icsk_ca_ops->set_state) | |
703 | icsk->icsk_ca_ops->set_state(sk, ca_state); | |
704 | icsk->icsk_ca_state = ca_state; | |
317a76f9 SH |
705 | } |
706 | ||
6687e988 | 707 | static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) |
317a76f9 | 708 | { |
6687e988 ACM |
709 | const struct inet_connection_sock *icsk = inet_csk(sk); |
710 | ||
711 | if (icsk->icsk_ca_ops->cwnd_event) | |
712 | icsk->icsk_ca_ops->cwnd_event(sk, event); | |
317a76f9 SH |
713 | } |
714 | ||
e60402d0 IJ |
715 | /* These functions determine how the current flow behaves in respect of SACK |
716 | * handling. SACK is negotiated with the peer, and therefore it can vary | |
717 | * between different flows. | |
718 | * | |
719 | * tcp_is_sack - SACK enabled | |
720 | * tcp_is_reno - No SACK | |
721 | * tcp_is_fack - FACK enabled, implies SACK enabled | |
722 | */ | |
723 | static inline int tcp_is_sack(const struct tcp_sock *tp) | |
724 | { | |
725 | return tp->rx_opt.sack_ok; | |
726 | } | |
727 | ||
728 | static inline int tcp_is_reno(const struct tcp_sock *tp) | |
729 | { | |
730 | return !tcp_is_sack(tp); | |
731 | } | |
732 | ||
733 | static inline int tcp_is_fack(const struct tcp_sock *tp) | |
734 | { | |
735 | return tp->rx_opt.sack_ok & 2; | |
736 | } | |
737 | ||
738 | static inline void tcp_enable_fack(struct tcp_sock *tp) | |
739 | { | |
740 | tp->rx_opt.sack_ok |= 2; | |
741 | } | |
742 | ||
83ae4088 IJ |
743 | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) |
744 | { | |
745 | return tp->sacked_out + tp->lost_out; | |
746 | } | |
747 | ||
1da177e4 LT |
748 | /* This determines how many packets are "in the network" to the best |
749 | * of our knowledge. In many cases it is conservative, but where | |
750 | * detailed information is available from the receiver (via SACK | |
751 | * blocks etc.) we can make more aggressive calculations. | |
752 | * | |
753 | * Use this for decisions involving congestion control, use just | |
754 | * tp->packets_out to determine if the send queue is empty or not. | |
755 | * | |
756 | * Read this equation as: | |
757 | * | |
758 | * "Packets sent once on transmission queue" MINUS | |
759 | * "Packets left network, but not honestly ACKed yet" PLUS | |
760 | * "Packets fast retransmitted" | |
761 | */ | |
40efc6fa | 762 | static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) |
1da177e4 | 763 | { |
83ae4088 | 764 | return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; |
1da177e4 LT |
765 | } |
766 | ||
0b6a05c1 IJ |
767 | #define TCP_INFINITE_SSTHRESH 0x7fffffff |
768 | ||
769 | static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) | |
770 | { | |
771 | return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; | |
772 | } | |
773 | ||
1da177e4 LT |
774 | /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. |
775 | * The exception is rate halving phase, when cwnd is decreasing towards | |
776 | * ssthresh. | |
777 | */ | |
6687e988 | 778 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) |
1da177e4 | 779 | { |
6687e988 ACM |
780 | const struct tcp_sock *tp = tcp_sk(sk); |
781 | if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) | |
1da177e4 LT |
782 | return tp->snd_ssthresh; |
783 | else | |
784 | return max(tp->snd_ssthresh, | |
785 | ((tp->snd_cwnd >> 1) + | |
786 | (tp->snd_cwnd >> 2))); | |
787 | } | |
788 | ||
b9c4595b IJ |
789 | /* Use define here intentionally to get WARN_ON location shown at the caller */ |
790 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) | |
1da177e4 | 791 | |
3cfe3baa | 792 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
1da177e4 LT |
793 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
794 | ||
795 | /* Slow start with delack produces 3 packets of burst, so that | |
dd9e0dda JH |
796 | * it is safe "de facto". This will be the default - same as |
797 | * the default reordering threshold - but if reordering increases, | |
798 | * we must be able to allow cwnd to burst at least this much in order | |
799 | * to not pull it back when holes are filled. | |
1da177e4 LT |
800 | */ |
801 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |
802 | { | |
dd9e0dda | 803 | return tp->reordering; |
1da177e4 LT |
804 | } |
805 | ||
90840def IJ |
806 | /* Returns end sequence number of the receiver's advertised window */ |
807 | static inline u32 tcp_wnd_end(const struct tcp_sock *tp) | |
808 | { | |
809 | return tp->snd_una + tp->snd_wnd; | |
810 | } | |
cea14e0e | 811 | extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); |
f4805ede | 812 | |
c1bd24b7 | 813 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, |
40efc6fa | 814 | const struct sk_buff *skb) |
1da177e4 LT |
815 | { |
816 | if (skb->len < mss) | |
817 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | |
818 | } | |
819 | ||
9e412ba7 | 820 | static inline void tcp_check_probe_timer(struct sock *sk) |
1da177e4 | 821 | { |
9e412ba7 | 822 | struct tcp_sock *tp = tcp_sk(sk); |
463c84b9 | 823 | const struct inet_connection_sock *icsk = inet_csk(sk); |
9e412ba7 | 824 | |
463c84b9 | 825 | if (!tp->packets_out && !icsk->icsk_pending) |
3f421baa ACM |
826 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
827 | icsk->icsk_rto, TCP_RTO_MAX); | |
1da177e4 LT |
828 | } |
829 | ||
ee7537b6 | 830 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) |
1da177e4 LT |
831 | { |
832 | tp->snd_wl1 = seq; | |
833 | } | |
834 | ||
ee7537b6 | 835 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) |
1da177e4 LT |
836 | { |
837 | tp->snd_wl1 = seq; | |
838 | } | |
839 | ||
1da177e4 LT |
840 | /* |
841 | * Calculate(/check) TCP checksum | |
842 | */ | |
ba7808ea FD |
843 | static inline __sum16 tcp_v4_check(int len, __be32 saddr, |
844 | __be32 daddr, __wsum base) | |
1da177e4 LT |
845 | { |
846 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | |
847 | } | |
848 | ||
b51655b9 | 849 | static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) |
1da177e4 | 850 | { |
fb286bb2 | 851 | return __skb_checksum_complete(skb); |
1da177e4 LT |
852 | } |
853 | ||
40efc6fa | 854 | static inline int tcp_checksum_complete(struct sk_buff *skb) |
1da177e4 | 855 | { |
60476372 | 856 | return !skb_csum_unnecessary(skb) && |
1da177e4 LT |
857 | __tcp_checksum_complete(skb); |
858 | } | |
859 | ||
860 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | |
861 | ||
40efc6fa | 862 | static inline void tcp_prequeue_init(struct tcp_sock *tp) |
1da177e4 LT |
863 | { |
864 | tp->ucopy.task = NULL; | |
865 | tp->ucopy.len = 0; | |
866 | tp->ucopy.memory = 0; | |
867 | skb_queue_head_init(&tp->ucopy.prequeue); | |
97fc2f08 CL |
868 | #ifdef CONFIG_NET_DMA |
869 | tp->ucopy.dma_chan = NULL; | |
870 | tp->ucopy.wakeup = 0; | |
871 | tp->ucopy.pinned_list = NULL; | |
872 | tp->ucopy.dma_cookie = 0; | |
873 | #endif | |
1da177e4 LT |
874 | } |
875 | ||
876 | /* Packet is added to VJ-style prequeue for processing in process | |
877 | * context, if a reader task is waiting. Apparently, this exciting | |
878 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | |
879 | * failed somewhere. Latency? Burstiness? Well, at least now we will | |
880 | * see, why it failed. 8)8) --ANK | |
881 | * | |
882 | * NOTE: is this not too big to inline? | |
883 | */ | |
40efc6fa | 884 | static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) |
1da177e4 LT |
885 | { |
886 | struct tcp_sock *tp = tcp_sk(sk); | |
887 | ||
f5f8d86b ED |
888 | if (sysctl_tcp_low_latency || !tp->ucopy.task) |
889 | return 0; | |
890 | ||
891 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | |
892 | tp->ucopy.memory += skb->truesize; | |
893 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | |
894 | struct sk_buff *skb1; | |
895 | ||
896 | BUG_ON(sock_owned_by_user(sk)); | |
897 | ||
898 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | |
899 | sk_backlog_rcv(sk, skb1); | |
900 | NET_INC_STATS_BH(sock_net(sk), | |
901 | LINUX_MIB_TCPPREQUEUEDROPPED); | |
1da177e4 | 902 | } |
f5f8d86b ED |
903 | |
904 | tp->ucopy.memory = 0; | |
905 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | |
aa395145 | 906 | wake_up_interruptible_sync_poll(sk_sleep(sk), |
7aedec2a | 907 | POLLIN | POLLRDNORM | POLLRDBAND); |
f5f8d86b ED |
908 | if (!inet_csk_ack_scheduled(sk)) |
909 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | |
22f6dacd | 910 | (3 * tcp_rto_min(sk)) / 4, |
f5f8d86b | 911 | TCP_RTO_MAX); |
1da177e4 | 912 | } |
f5f8d86b | 913 | return 1; |
1da177e4 LT |
914 | } |
915 | ||
916 | ||
917 | #undef STATE_TRACE | |
918 | ||
919 | #ifdef STATE_TRACE | |
920 | static const char *statename[]={ | |
921 | "Unused","Established","Syn Sent","Syn Recv", | |
922 | "Fin Wait 1","Fin Wait 2","Time Wait", "Close", | |
923 | "Close Wait","Last ACK","Listen","Closing" | |
924 | }; | |
925 | #endif | |
490d5046 | 926 | extern void tcp_set_state(struct sock *sk, int state); |
1da177e4 | 927 | |
4ac02bab | 928 | extern void tcp_done(struct sock *sk); |
1da177e4 | 929 | |
40efc6fa | 930 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) |
1da177e4 LT |
931 | { |
932 | rx_opt->dsack = 0; | |
1da177e4 LT |
933 | rx_opt->num_sacks = 0; |
934 | } | |
935 | ||
1da177e4 LT |
936 | /* Determine a window scaling and initial window to offer. */ |
937 | extern void tcp_select_initial_window(int __space, __u32 mss, | |
938 | __u32 *rcv_wnd, __u32 *window_clamp, | |
31d12926 | 939 | int wscale_ok, __u8 *rcv_wscale, |
940 | __u32 init_rcv_wnd); | |
1da177e4 LT |
941 | |
942 | static inline int tcp_win_from_space(int space) | |
943 | { | |
944 | return sysctl_tcp_adv_win_scale<=0 ? | |
945 | (space>>(-sysctl_tcp_adv_win_scale)) : | |
946 | space - (space>>sysctl_tcp_adv_win_scale); | |
947 | } | |
948 | ||
949 | /* Note: caller must be prepared to deal with negative returns */ | |
950 | static inline int tcp_space(const struct sock *sk) | |
951 | { | |
952 | return tcp_win_from_space(sk->sk_rcvbuf - | |
953 | atomic_read(&sk->sk_rmem_alloc)); | |
954 | } | |
955 | ||
956 | static inline int tcp_full_space(const struct sock *sk) | |
957 | { | |
958 | return tcp_win_from_space(sk->sk_rcvbuf); | |
959 | } | |
960 | ||
40efc6fa SH |
961 | static inline void tcp_openreq_init(struct request_sock *req, |
962 | struct tcp_options_received *rx_opt, | |
963 | struct sk_buff *skb) | |
1da177e4 | 964 | { |
2e6599cb ACM |
965 | struct inet_request_sock *ireq = inet_rsk(req); |
966 | ||
1da177e4 | 967 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ |
4dfc2817 | 968 | req->cookie_ts = 0; |
2e6599cb | 969 | tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; |
1da177e4 LT |
970 | req->mss = rx_opt->mss_clamp; |
971 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | |
2e6599cb ACM |
972 | ireq->tstamp_ok = rx_opt->tstamp_ok; |
973 | ireq->sack_ok = rx_opt->sack_ok; | |
974 | ireq->snd_wscale = rx_opt->snd_wscale; | |
975 | ireq->wscale_ok = rx_opt->wscale_ok; | |
976 | ireq->acked = 0; | |
977 | ireq->ecn_ok = 0; | |
aa8223c7 | 978 | ireq->rmt_port = tcp_hdr(skb)->source; |
a3116ac5 | 979 | ireq->loc_port = tcp_hdr(skb)->dest; |
1da177e4 LT |
980 | } |
981 | ||
5c52ba17 | 982 | extern void tcp_enter_memory_pressure(struct sock *sk); |
1da177e4 | 983 | |
1da177e4 LT |
984 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) |
985 | { | |
986 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | |
987 | } | |
988 | ||
989 | static inline int keepalive_time_when(const struct tcp_sock *tp) | |
990 | { | |
991 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | |
992 | } | |
993 | ||
df19a626 ED |
994 | static inline int keepalive_probes(const struct tcp_sock *tp) |
995 | { | |
996 | return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | |
997 | } | |
998 | ||
6c37e5de FL |
999 | static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) |
1000 | { | |
1001 | const struct inet_connection_sock *icsk = &tp->inet_conn; | |
1002 | ||
1003 | return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime, | |
1004 | tcp_time_stamp - tp->rcv_tstamp); | |
1005 | } | |
1006 | ||
463c84b9 | 1007 | static inline int tcp_fin_time(const struct sock *sk) |
1da177e4 | 1008 | { |
463c84b9 ACM |
1009 | int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; |
1010 | const int rto = inet_csk(sk)->icsk_rto; | |
1da177e4 | 1011 | |
463c84b9 ACM |
1012 | if (fin_timeout < (rto << 2) - (rto >> 1)) |
1013 | fin_timeout = (rto << 2) - (rto >> 1); | |
1da177e4 LT |
1014 | |
1015 | return fin_timeout; | |
1016 | } | |
1017 | ||
c887e6d2 IJ |
1018 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, |
1019 | int paws_win) | |
1da177e4 | 1020 | { |
c887e6d2 IJ |
1021 | if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) |
1022 | return 1; | |
1023 | if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) | |
1024 | return 1; | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, | |
1030 | int rst) | |
1031 | { | |
1032 | if (tcp_paws_check(rx_opt, 0)) | |
1da177e4 LT |
1033 | return 0; |
1034 | ||
1035 | /* RST segments are not recommended to carry timestamp, | |
1036 | and, if they do, it is recommended to ignore PAWS because | |
1037 | "their cleanup function should take precedence over timestamps." | |
1038 | Certainly, it is mistake. It is necessary to understand the reasons | |
1039 | of this constraint to relax it: if peer reboots, clock may go | |
1040 | out-of-sync and half-open connections will not be reset. | |
1041 | Actually, the problem would be not existing if all | |
1042 | the implementations followed draft about maintaining clock | |
1043 | via reboots. Linux-2.2 DOES NOT! | |
1044 | ||
1045 | However, we can relax time bounds for RST segments to MSL. | |
1046 | */ | |
9d729f72 | 1047 | if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) |
1da177e4 LT |
1048 | return 0; |
1049 | return 1; | |
1050 | } | |
1051 | ||
1da177e4 LT |
1052 | #define TCP_CHECK_TIMER(sk) do { } while (0) |
1053 | ||
a9c19329 | 1054 | static inline void tcp_mib_init(struct net *net) |
1da177e4 LT |
1055 | { |
1056 | /* See RFC 2012 */ | |
cf1100a7 PE |
1057 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); |
1058 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); | |
1059 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); | |
1060 | TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); | |
1da177e4 LT |
1061 | } |
1062 | ||
5af4ec23 | 1063 | /* from STCP */ |
ef9da47c | 1064 | static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) |
0800f170 | 1065 | { |
6a438bbe SH |
1066 | tp->lost_skb_hint = NULL; |
1067 | tp->scoreboard_skb_hint = NULL; | |
ef9da47c IJ |
1068 | } |
1069 | ||
1070 | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) | |
1071 | { | |
1072 | tcp_clear_retrans_hints_partial(tp); | |
6a438bbe | 1073 | tp->retransmit_skb_hint = NULL; |
b7689205 IJ |
1074 | } |
1075 | ||
cfb6eeb4 YH |
1076 | /* MD5 Signature */ |
1077 | struct crypto_hash; | |
1078 | ||
1079 | /* - key database */ | |
1080 | struct tcp_md5sig_key { | |
1081 | u8 *key; | |
1082 | u8 keylen; | |
1083 | }; | |
1084 | ||
1085 | struct tcp4_md5sig_key { | |
f8ab18d2 | 1086 | struct tcp_md5sig_key base; |
cfb6eeb4 YH |
1087 | __be32 addr; |
1088 | }; | |
1089 | ||
1090 | struct tcp6_md5sig_key { | |
f8ab18d2 | 1091 | struct tcp_md5sig_key base; |
cfb6eeb4 YH |
1092 | #if 0 |
1093 | u32 scope_id; /* XXX */ | |
1094 | #endif | |
1095 | struct in6_addr addr; | |
1096 | }; | |
1097 | ||
1098 | /* - sock block */ | |
1099 | struct tcp_md5sig_info { | |
1100 | struct tcp4_md5sig_key *keys4; | |
1101 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1102 | struct tcp6_md5sig_key *keys6; | |
1103 | u32 entries6; | |
1104 | u32 alloced6; | |
1105 | #endif | |
1106 | u32 entries4; | |
1107 | u32 alloced4; | |
1108 | }; | |
1109 | ||
1110 | /* - pseudo header */ | |
1111 | struct tcp4_pseudohdr { | |
1112 | __be32 saddr; | |
1113 | __be32 daddr; | |
1114 | __u8 pad; | |
1115 | __u8 protocol; | |
1116 | __be16 len; | |
1117 | }; | |
1118 | ||
1119 | struct tcp6_pseudohdr { | |
1120 | struct in6_addr saddr; | |
1121 | struct in6_addr daddr; | |
1122 | __be32 len; | |
1123 | __be32 protocol; /* including padding */ | |
1124 | }; | |
1125 | ||
1126 | union tcp_md5sum_block { | |
1127 | struct tcp4_pseudohdr ip4; | |
1128 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1129 | struct tcp6_pseudohdr ip6; | |
1130 | #endif | |
1131 | }; | |
1132 | ||
1133 | /* - pool: digest algorithm, hash description and scratch buffer */ | |
1134 | struct tcp_md5sig_pool { | |
1135 | struct hash_desc md5_desc; | |
1136 | union tcp_md5sum_block md5_blk; | |
1137 | }; | |
1138 | ||
1139 | #define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ | |
1140 | ||
1141 | /* - functions */ | |
53d3176b CG |
1142 | extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
1143 | struct sock *sk, struct request_sock *req, | |
1144 | struct sk_buff *skb); | |
1145 | extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk, | |
1146 | struct sock *addr_sk); | |
1147 | extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, | |
1148 | u8 newkeylen); | |
1149 | extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr); | |
cfb6eeb4 | 1150 | |
9501f972 YH |
1151 | #ifdef CONFIG_TCP_MD5SIG |
1152 | #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \ | |
1153 | &(struct tcp_md5sig_key) { \ | |
1154 | .key = (twsk)->tw_md5_key, \ | |
1155 | .keylen = (twsk)->tw_md5_keylen, \ | |
1156 | } : NULL) | |
1157 | #else | |
1158 | #define tcp_twsk_md5_key(twsk) NULL | |
1159 | #endif | |
1160 | ||
7d720c3e | 1161 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); |
53d3176b | 1162 | extern void tcp_free_md5sig_pool(void); |
cfb6eeb4 | 1163 | |
35790c04 | 1164 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); |
53d3176b | 1165 | extern void tcp_put_md5sig_pool(void); |
35790c04 | 1166 | |
49a72dfb AL |
1167 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); |
1168 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | |
1169 | unsigned header_len); | |
1170 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | |
1171 | struct tcp_md5sig_key *key); | |
cfb6eeb4 | 1172 | |
fe067e8a DM |
1173 | /* write queue abstraction */ |
1174 | static inline void tcp_write_queue_purge(struct sock *sk) | |
1175 | { | |
1176 | struct sk_buff *skb; | |
1177 | ||
1178 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | |
3ab224be HA |
1179 | sk_wmem_free_skb(sk, skb); |
1180 | sk_mem_reclaim(sk); | |
8818a9d8 | 1181 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
fe067e8a DM |
1182 | } |
1183 | ||
1184 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | |
1185 | { | |
cd07a8ea | 1186 | return skb_peek(&sk->sk_write_queue); |
fe067e8a DM |
1187 | } |
1188 | ||
1189 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | |
1190 | { | |
cd07a8ea | 1191 | return skb_peek_tail(&sk->sk_write_queue); |
fe067e8a DM |
1192 | } |
1193 | ||
1194 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | |
1195 | { | |
cd07a8ea | 1196 | return skb_queue_next(&sk->sk_write_queue, skb); |
fe067e8a DM |
1197 | } |
1198 | ||
832d11c5 IJ |
1199 | static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) |
1200 | { | |
1201 | return skb_queue_prev(&sk->sk_write_queue, skb); | |
1202 | } | |
1203 | ||
fe067e8a | 1204 | #define tcp_for_write_queue(skb, sk) \ |
cd07a8ea | 1205 | skb_queue_walk(&(sk)->sk_write_queue, skb) |
fe067e8a DM |
1206 | |
1207 | #define tcp_for_write_queue_from(skb, sk) \ | |
cd07a8ea | 1208 | skb_queue_walk_from(&(sk)->sk_write_queue, skb) |
fe067e8a | 1209 | |
234b6860 | 1210 | #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ |
cd07a8ea | 1211 | skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) |
234b6860 | 1212 | |
fe067e8a DM |
1213 | static inline struct sk_buff *tcp_send_head(struct sock *sk) |
1214 | { | |
1215 | return sk->sk_send_head; | |
1216 | } | |
1217 | ||
cd07a8ea DM |
1218 | static inline bool tcp_skb_is_last(const struct sock *sk, |
1219 | const struct sk_buff *skb) | |
1220 | { | |
1221 | return skb_queue_is_last(&sk->sk_write_queue, skb); | |
1222 | } | |
1223 | ||
fe067e8a DM |
1224 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) |
1225 | { | |
cd07a8ea | 1226 | if (tcp_skb_is_last(sk, skb)) |
fe067e8a | 1227 | sk->sk_send_head = NULL; |
cd07a8ea DM |
1228 | else |
1229 | sk->sk_send_head = tcp_write_queue_next(sk, skb); | |
fe067e8a DM |
1230 | } |
1231 | ||
1232 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | |
1233 | { | |
1234 | if (sk->sk_send_head == skb_unlinked) | |
1235 | sk->sk_send_head = NULL; | |
1236 | } | |
1237 | ||
1238 | static inline void tcp_init_send_head(struct sock *sk) | |
1239 | { | |
1240 | sk->sk_send_head = NULL; | |
1241 | } | |
1242 | ||
1243 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1244 | { | |
1245 | __skb_queue_tail(&sk->sk_write_queue, skb); | |
1246 | } | |
1247 | ||
1248 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1249 | { | |
1250 | __tcp_add_write_queue_tail(sk, skb); | |
1251 | ||
1252 | /* Queue it, remembering where we must start sending. */ | |
6859d494 | 1253 | if (sk->sk_send_head == NULL) { |
fe067e8a | 1254 | sk->sk_send_head = skb; |
6859d494 IJ |
1255 | |
1256 | if (tcp_sk(sk)->highest_sack == NULL) | |
1257 | tcp_sk(sk)->highest_sack = skb; | |
1258 | } | |
fe067e8a DM |
1259 | } |
1260 | ||
1261 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | |
1262 | { | |
1263 | __skb_queue_head(&sk->sk_write_queue, skb); | |
1264 | } | |
1265 | ||
1266 | /* Insert buff after skb on the write queue of sk. */ | |
1267 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | |
1268 | struct sk_buff *buff, | |
1269 | struct sock *sk) | |
1270 | { | |
7de6c033 | 1271 | __skb_queue_after(&sk->sk_write_queue, skb, buff); |
fe067e8a DM |
1272 | } |
1273 | ||
43f59c89 | 1274 | /* Insert new before skb on the write queue of sk. */ |
fe067e8a DM |
1275 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, |
1276 | struct sk_buff *skb, | |
1277 | struct sock *sk) | |
1278 | { | |
43f59c89 | 1279 | __skb_queue_before(&sk->sk_write_queue, skb, new); |