]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the TCP module. | |
7 | * | |
8 | * Version: @(#)tcp.h 1.0.5 05/23/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | #ifndef _TCP_H | |
19 | #define _TCP_H | |
20 | ||
21 | #define TCP_DEBUG 1 | |
22 | #define FASTRETRANS_DEBUG 1 | |
23 | ||
1da177e4 LT |
24 | #include <linux/list.h> |
25 | #include <linux/tcp.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/cache.h> | |
28 | #include <linux/percpu.h> | |
fb286bb2 | 29 | #include <linux/skbuff.h> |
97fc2f08 | 30 | #include <linux/dmaengine.h> |
cfb6eeb4 | 31 | #include <linux/crypto.h> |
3f421baa ACM |
32 | |
33 | #include <net/inet_connection_sock.h> | |
295ff7ed | 34 | #include <net/inet_timewait_sock.h> |
77d8bf9c | 35 | #include <net/inet_hashtables.h> |
1da177e4 | 36 | #include <net/checksum.h> |
2e6599cb | 37 | #include <net/request_sock.h> |
1da177e4 LT |
38 | #include <net/sock.h> |
39 | #include <net/snmp.h> | |
40 | #include <net/ip.h> | |
c752f073 ACM |
41 | #include <net/tcp_states.h> |
42 | ||
1da177e4 LT |
43 | #include <linux/seq_file.h> |
44 | ||
6e04e021 | 45 | extern struct inet_hashinfo tcp_hashinfo; |
1da177e4 | 46 | |
1da177e4 | 47 | extern atomic_t tcp_orphan_count; |
1da177e4 | 48 | extern void tcp_time_wait(struct sock *sk, int state, int timeo); |
1da177e4 | 49 | |
1da177e4 LT |
50 | #define MAX_TCP_HEADER (128 + MAX_HEADER) |
51 | ||
52 | /* | |
53 | * Never offer a window over 32767 without using window scaling. Some | |
54 | * poor stacks do signed 16bit maths! | |
55 | */ | |
56 | #define MAX_TCP_WINDOW 32767U | |
57 | ||
58 | /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ | |
59 | #define TCP_MIN_MSS 88U | |
60 | ||
61 | /* Minimal RCV_MSS. */ | |
62 | #define TCP_MIN_RCVMSS 536U | |
63 | ||
5d424d5a JH |
64 | /* The least MTU to use for probing */ |
65 | #define TCP_BASE_MSS 512 | |
66 | ||
1da177e4 LT |
67 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ |
68 | #define TCP_FASTRETRANS_THRESH 3 | |
69 | ||
70 | /* Maximal reordering. */ | |
71 | #define TCP_MAX_REORDERING 127 | |
72 | ||
73 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | |
74 | #define TCP_MAX_QUICKACKS 16U | |
75 | ||
76 | /* urg_data states */ | |
77 | #define TCP_URG_VALID 0x0100 | |
78 | #define TCP_URG_NOTYET 0x0200 | |
79 | #define TCP_URG_READ 0x0400 | |
80 | ||
81 | #define TCP_RETR1 3 /* | |
82 | * This is how many retries it does before it | |
83 | * tries to figure out if the gateway is | |
84 | * down. Minimal RFC value is 3; it corresponds | |
85 | * to ~3sec-8min depending on RTO. | |
86 | */ | |
87 | ||
88 | #define TCP_RETR2 15 /* | |
89 | * This should take at least | |
90 | * 90 minutes to time out. | |
91 | * RFC1122 says that the limit is 100 sec. | |
92 | * 15 is ~13-30min depending on RTO. | |
93 | */ | |
94 | ||
95 | #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a | |
caa20d9a | 96 | * connection: ~180sec is RFC minimum */ |
1da177e4 LT |
97 | |
98 | #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a | |
caa20d9a | 99 | * connection: ~180sec is RFC minimum */ |
1da177e4 LT |
100 | |
101 | ||
102 | #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned | |
103 | * socket. 7 is ~50sec-16min. | |
104 | */ | |
105 | ||
106 | ||
107 | #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT | |
108 | * state, about 60 seconds */ | |
109 | #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN | |
110 | /* BSD style FIN_WAIT2 deadlock breaker. | |
111 | * It used to be 3min, new value is 60sec, | |
112 | * to combine FIN-WAIT-2 timeout with | |
113 | * TIME-WAIT timer. | |
114 | */ | |
115 | ||
116 | #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ | |
117 | #if HZ >= 100 | |
118 | #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ | |
119 | #define TCP_ATO_MIN ((unsigned)(HZ/25)) | |
120 | #else | |
121 | #define TCP_DELACK_MIN 4U | |
122 | #define TCP_ATO_MIN 4U | |
123 | #endif | |
124 | #define TCP_RTO_MAX ((unsigned)(120*HZ)) | |
125 | #define TCP_RTO_MIN ((unsigned)(HZ/5)) | |
126 | #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ | |
127 | ||
128 | #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes | |
129 | * for local resources. | |
130 | */ | |
131 | ||
132 | #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ | |
133 | #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ | |
134 | #define TCP_KEEPALIVE_INTVL (75*HZ) | |
135 | ||
136 | #define MAX_TCP_KEEPIDLE 32767 | |
137 | #define MAX_TCP_KEEPINTVL 32767 | |
138 | #define MAX_TCP_KEEPCNT 127 | |
139 | #define MAX_TCP_SYNCNT 127 | |
140 | ||
141 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | |
1da177e4 LT |
142 | |
143 | #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) | |
144 | #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated | |
145 | * after this time. It should be equal | |
146 | * (or greater than) TCP_TIMEWAIT_LEN | |
147 | * to provide reliability equal to one | |
148 | * provided by timewait state. | |
149 | */ | |
150 | #define TCP_PAWS_WINDOW 1 /* Replay window for per-host | |
151 | * timestamps. It must be less than | |
152 | * minimal timewait lifetime. | |
153 | */ | |
1da177e4 LT |
154 | /* |
155 | * TCP option | |
156 | */ | |
157 | ||
158 | #define TCPOPT_NOP 1 /* Padding */ | |
159 | #define TCPOPT_EOL 0 /* End of options */ | |
160 | #define TCPOPT_MSS 2 /* Segment size negotiating */ | |
161 | #define TCPOPT_WINDOW 3 /* Window scaling */ | |
162 | #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ | |
163 | #define TCPOPT_SACK 5 /* SACK Block */ | |
164 | #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ | |
cfb6eeb4 | 165 | #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ |
1da177e4 LT |
166 | |
167 | /* | |
168 | * TCP option lengths | |
169 | */ | |
170 | ||
171 | #define TCPOLEN_MSS 4 | |
172 | #define TCPOLEN_WINDOW 3 | |
173 | #define TCPOLEN_SACK_PERM 2 | |
174 | #define TCPOLEN_TIMESTAMP 10 | |
cfb6eeb4 | 175 | #define TCPOLEN_MD5SIG 18 |
1da177e4 LT |
176 | |
177 | /* But this is what stacks really send out. */ | |
178 | #define TCPOLEN_TSTAMP_ALIGNED 12 | |
179 | #define TCPOLEN_WSCALE_ALIGNED 4 | |
180 | #define TCPOLEN_SACKPERM_ALIGNED 4 | |
181 | #define TCPOLEN_SACK_BASE 2 | |
182 | #define TCPOLEN_SACK_BASE_ALIGNED 4 | |
183 | #define TCPOLEN_SACK_PERBLOCK 8 | |
cfb6eeb4 | 184 | #define TCPOLEN_MD5SIG_ALIGNED 20 |
1da177e4 | 185 | |
1da177e4 LT |
186 | /* Flags in tp->nonagle */ |
187 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ | |
188 | #define TCP_NAGLE_CORK 2 /* Socket is corked */ | |
caa20d9a | 189 | #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ |
1da177e4 | 190 | |
295ff7ed ACM |
191 | extern struct inet_timewait_death_row tcp_death_row; |
192 | ||
1da177e4 | 193 | /* sysctl variables for tcp */ |
1da177e4 LT |
194 | extern int sysctl_tcp_timestamps; |
195 | extern int sysctl_tcp_window_scaling; | |
196 | extern int sysctl_tcp_sack; | |
197 | extern int sysctl_tcp_fin_timeout; | |
1da177e4 LT |
198 | extern int sysctl_tcp_keepalive_time; |
199 | extern int sysctl_tcp_keepalive_probes; | |
200 | extern int sysctl_tcp_keepalive_intvl; | |
201 | extern int sysctl_tcp_syn_retries; | |
202 | extern int sysctl_tcp_synack_retries; | |
203 | extern int sysctl_tcp_retries1; | |
204 | extern int sysctl_tcp_retries2; | |
205 | extern int sysctl_tcp_orphan_retries; | |
206 | extern int sysctl_tcp_syncookies; | |
207 | extern int sysctl_tcp_retrans_collapse; | |
208 | extern int sysctl_tcp_stdurg; | |
209 | extern int sysctl_tcp_rfc1337; | |
210 | extern int sysctl_tcp_abort_on_overflow; | |
211 | extern int sysctl_tcp_max_orphans; | |
1da177e4 LT |
212 | extern int sysctl_tcp_fack; |
213 | extern int sysctl_tcp_reordering; | |
214 | extern int sysctl_tcp_ecn; | |
215 | extern int sysctl_tcp_dsack; | |
216 | extern int sysctl_tcp_mem[3]; | |
217 | extern int sysctl_tcp_wmem[3]; | |
218 | extern int sysctl_tcp_rmem[3]; | |
219 | extern int sysctl_tcp_app_win; | |
220 | extern int sysctl_tcp_adv_win_scale; | |
221 | extern int sysctl_tcp_tw_reuse; | |
222 | extern int sysctl_tcp_frto; | |
3cfe3baa | 223 | extern int sysctl_tcp_frto_response; |
1da177e4 | 224 | extern int sysctl_tcp_low_latency; |
95937825 | 225 | extern int sysctl_tcp_dma_copybreak; |
1da177e4 | 226 | extern int sysctl_tcp_nometrics_save; |
1da177e4 LT |
227 | extern int sysctl_tcp_moderate_rcvbuf; |
228 | extern int sysctl_tcp_tso_win_divisor; | |
9772efb9 | 229 | extern int sysctl_tcp_abc; |
5d424d5a JH |
230 | extern int sysctl_tcp_mtu_probing; |
231 | extern int sysctl_tcp_base_mss; | |
15d99e02 | 232 | extern int sysctl_tcp_workaround_signed_windows; |
35089bb2 | 233 | extern int sysctl_tcp_slow_start_after_idle; |
886236c1 | 234 | extern int sysctl_tcp_max_ssthresh; |
1da177e4 LT |
235 | |
236 | extern atomic_t tcp_memory_allocated; | |
237 | extern atomic_t tcp_sockets_allocated; | |
238 | extern int tcp_memory_pressure; | |
239 | ||
1da177e4 LT |
240 | /* |
241 | * The next routines deal with comparing 32 bit unsigned ints | |
242 | * and worry about wraparound (automatic with unsigned arithmetic). | |
243 | */ | |
244 | ||
245 | static inline int before(__u32 seq1, __u32 seq2) | |
246 | { | |
0d630cc0 | 247 | return (__s32)(seq1-seq2) < 0; |
1da177e4 | 248 | } |
9a036b9c | 249 | #define after(seq2, seq1) before(seq1, seq2) |
1da177e4 LT |
250 | |
251 | /* is s2<=s1<=s3 ? */ | |
252 | static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | |
253 | { | |
254 | return seq3 - seq2 >= seq1 - seq2; | |
255 | } | |
256 | ||
257 | ||
258 | extern struct proto tcp_prot; | |
259 | ||
260 | DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics); | |
261 | #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field) | |
262 | #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field) | |
263 | #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field) | |
264 | #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field) | |
265 | #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val) | |
266 | #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val) | |
267 | ||
1da177e4 LT |
268 | extern void tcp_v4_err(struct sk_buff *skb, u32); |
269 | ||
270 | extern void tcp_shutdown (struct sock *sk, int how); | |
271 | ||
272 | extern int tcp_v4_rcv(struct sk_buff *skb); | |
273 | ||
274 | extern int tcp_v4_remember_stamp(struct sock *sk); | |
275 | ||
8feaf0c0 | 276 | extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); |
1da177e4 LT |
277 | |
278 | extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, | |
279 | struct msghdr *msg, size_t size); | |
280 | extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); | |
281 | ||
282 | extern int tcp_ioctl(struct sock *sk, | |
283 | int cmd, | |
284 | unsigned long arg); | |
285 | ||
286 | extern int tcp_rcv_state_process(struct sock *sk, | |
287 | struct sk_buff *skb, | |
288 | struct tcphdr *th, | |
289 | unsigned len); | |
290 | ||
291 | extern int tcp_rcv_established(struct sock *sk, | |
292 | struct sk_buff *skb, | |
293 | struct tcphdr *th, | |
294 | unsigned len); | |
295 | ||
296 | extern void tcp_rcv_space_adjust(struct sock *sk); | |
297 | ||
0e4b4992 CL |
298 | extern void tcp_cleanup_rbuf(struct sock *sk, int copied); |
299 | ||
6d6ee43e ACM |
300 | extern int tcp_twsk_unique(struct sock *sk, |
301 | struct sock *sktw, void *twp); | |
302 | ||
cfb6eeb4 YH |
303 | extern void tcp_twsk_destructor(struct sock *sk); |
304 | ||
463c84b9 ACM |
305 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
306 | const unsigned int pkts) | |
1da177e4 | 307 | { |
463c84b9 | 308 | struct inet_connection_sock *icsk = inet_csk(sk); |
fc6415bc | 309 | |
463c84b9 ACM |
310 | if (icsk->icsk_ack.quick) { |
311 | if (pkts >= icsk->icsk_ack.quick) { | |
312 | icsk->icsk_ack.quick = 0; | |
fc6415bc | 313 | /* Leaving quickack mode we deflate ATO. */ |
463c84b9 | 314 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
fc6415bc | 315 | } else |
463c84b9 | 316 | icsk->icsk_ack.quick -= pkts; |
1da177e4 LT |
317 | } |
318 | } | |
319 | ||
463c84b9 | 320 | extern void tcp_enter_quickack_mode(struct sock *sk); |
1da177e4 | 321 | |
1da177e4 LT |
322 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) |
323 | { | |
324 | rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; | |
325 | } | |
326 | ||
327 | enum tcp_tw_status | |
328 | { | |
329 | TCP_TW_SUCCESS = 0, | |
330 | TCP_TW_RST = 1, | |
331 | TCP_TW_ACK = 2, | |
332 | TCP_TW_SYN = 3 | |
333 | }; | |
334 | ||
335 | ||
8feaf0c0 | 336 | extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, |
1da177e4 | 337 | struct sk_buff *skb, |
8feaf0c0 | 338 | const struct tcphdr *th); |
1da177e4 LT |
339 | |
340 | extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, | |
60236fdd ACM |
341 | struct request_sock *req, |
342 | struct request_sock **prev); | |
1da177e4 LT |
343 | extern int tcp_child_process(struct sock *parent, |
344 | struct sock *child, | |
345 | struct sk_buff *skb); | |
46d0de4e | 346 | extern int tcp_use_frto(struct sock *sk); |
1da177e4 LT |
347 | extern void tcp_enter_frto(struct sock *sk); |
348 | extern void tcp_enter_loss(struct sock *sk, int how); | |
349 | extern void tcp_clear_retrans(struct tcp_sock *tp); | |
350 | extern void tcp_update_metrics(struct sock *sk); | |
351 | ||
352 | extern void tcp_close(struct sock *sk, | |
353 | long timeout); | |
1da177e4 LT |
354 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); |
355 | ||
356 | extern int tcp_getsockopt(struct sock *sk, int level, | |
357 | int optname, | |
358 | char __user *optval, | |
359 | int __user *optlen); | |
360 | extern int tcp_setsockopt(struct sock *sk, int level, | |
361 | int optname, char __user *optval, | |
362 | int optlen); | |
3fdadf7d DM |
363 | extern int compat_tcp_getsockopt(struct sock *sk, |
364 | int level, int optname, | |
365 | char __user *optval, int __user *optlen); | |
366 | extern int compat_tcp_setsockopt(struct sock *sk, | |
367 | int level, int optname, | |
368 | char __user *optval, int optlen); | |
1da177e4 LT |
369 | extern void tcp_set_keepalive(struct sock *sk, int val); |
370 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, | |
371 | struct msghdr *msg, | |
372 | size_t len, int nonblock, | |
373 | int flags, int *addr_len); | |
374 | ||
1da177e4 LT |
375 | extern void tcp_parse_options(struct sk_buff *skb, |
376 | struct tcp_options_received *opt_rx, | |
377 | int estab); | |
378 | ||
379 | /* | |
380 | * TCP v4 functions exported for the inet6 API | |
381 | */ | |
382 | ||
8292a17a | 383 | extern void tcp_v4_send_check(struct sock *sk, int len, |
1da177e4 LT |
384 | struct sk_buff *skb); |
385 | ||
386 | extern int tcp_v4_conn_request(struct sock *sk, | |
387 | struct sk_buff *skb); | |
388 | ||
389 | extern struct sock * tcp_create_openreq_child(struct sock *sk, | |
60236fdd | 390 | struct request_sock *req, |
1da177e4 LT |
391 | struct sk_buff *skb); |
392 | ||
393 | extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, | |
394 | struct sk_buff *skb, | |
60236fdd | 395 | struct request_sock *req, |
1da177e4 LT |
396 | struct dst_entry *dst); |
397 | ||
398 | extern int tcp_v4_do_rcv(struct sock *sk, | |
399 | struct sk_buff *skb); | |
400 | ||
401 | extern int tcp_v4_connect(struct sock *sk, | |
402 | struct sockaddr *uaddr, | |
403 | int addr_len); | |
404 | ||
405 | extern int tcp_connect(struct sock *sk); | |
406 | ||
407 | extern struct sk_buff * tcp_make_synack(struct sock *sk, | |
408 | struct dst_entry *dst, | |
60236fdd | 409 | struct request_sock *req); |
1da177e4 LT |
410 | |
411 | extern int tcp_disconnect(struct sock *sk, int flags); | |
412 | ||
413 | extern void tcp_unhash(struct sock *sk); | |
414 | ||
1da177e4 LT |
415 | /* From syncookies.c */ |
416 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |
417 | struct ip_options *opt); | |
418 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |
419 | __u16 *mss); | |
420 | ||
421 | /* tcp_output.c */ | |
422 | ||
9e412ba7 IJ |
423 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
424 | int nonagle); | |
425 | extern int tcp_may_send_now(struct sock *sk); | |
1da177e4 LT |
426 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
427 | extern void tcp_xmit_retransmit_queue(struct sock *); | |
428 | extern void tcp_simple_retransmit(struct sock *); | |
429 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | |
6475be16 | 430 | extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); |
1da177e4 LT |
431 | |
432 | extern void tcp_send_probe0(struct sock *); | |
433 | extern void tcp_send_partial(struct sock *); | |
434 | extern int tcp_write_wakeup(struct sock *); | |
435 | extern void tcp_send_fin(struct sock *sk); | |
dd0fc66f | 436 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
1da177e4 | 437 | extern int tcp_send_synack(struct sock *); |
c1b4a7e6 | 438 | extern void tcp_push_one(struct sock *, unsigned int mss_now); |
1da177e4 LT |
439 | extern void tcp_send_ack(struct sock *sk); |
440 | extern void tcp_send_delayed_ack(struct sock *sk); | |
441 | ||
a762a980 DM |
442 | /* tcp_input.c */ |
443 | extern void tcp_cwnd_application_limited(struct sock *sk); | |
444 | ||
1da177e4 LT |
445 | /* tcp_timer.c */ |
446 | extern void tcp_init_xmit_timers(struct sock *); | |
463c84b9 ACM |
447 | static inline void tcp_clear_xmit_timers(struct sock *sk) |
448 | { | |
449 | inet_csk_clear_xmit_timers(sk); | |
450 | } | |
1da177e4 | 451 | |
1da177e4 LT |
452 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); |
453 | extern unsigned int tcp_current_mss(struct sock *sk, int large); | |
454 | ||
17b085ea | 455 | /* tcp.c */ |
1da177e4 LT |
456 | extern void tcp_get_info(struct sock *, struct tcp_info *); |
457 | ||
458 | /* Read 'sendfile()'-style from a TCP socket */ | |
459 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | |
460 | unsigned int, size_t); | |
461 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
462 | sk_read_actor_t recv_actor); | |
463 | ||
40efc6fa | 464 | extern void tcp_initialize_rcv_mss(struct sock *sk); |
1da177e4 | 465 | |
5d424d5a JH |
466 | extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); |
467 | extern int tcp_mss_to_mtu(struct sock *sk, int mss); | |
468 | extern void tcp_mtup_init(struct sock *sk); | |
469 | ||
40efc6fa | 470 | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) |
1da177e4 LT |
471 | { |
472 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | |
473 | ntohl(TCP_FLAG_ACK) | | |
474 | snd_wnd); | |
475 | } | |
476 | ||
40efc6fa | 477 | static inline void tcp_fast_path_on(struct tcp_sock *tp) |
1da177e4 LT |
478 | { |
479 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | |
480 | } | |
481 | ||
9e412ba7 | 482 | static inline void tcp_fast_path_check(struct sock *sk) |
1da177e4 | 483 | { |
9e412ba7 IJ |
484 | struct tcp_sock *tp = tcp_sk(sk); |
485 | ||
b03efcfb | 486 | if (skb_queue_empty(&tp->out_of_order_queue) && |
1da177e4 LT |
487 | tp->rcv_wnd && |
488 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | |
489 | !tp->urg_data) | |
490 | tcp_fast_path_on(tp); | |
491 | } | |
492 | ||
493 | /* Compute the actual receive window we are currently advertising. | |
494 | * Rcv_nxt can be after the window if our peer push more data | |
495 | * than the offered window. | |
496 | */ | |
40efc6fa | 497 | static inline u32 tcp_receive_window(const struct tcp_sock *tp) |
1da177e4 LT |
498 | { |
499 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | |
500 | ||
501 | if (win < 0) | |
502 | win = 0; | |
503 | return (u32) win; | |
504 | } | |
505 | ||
506 | /* Choose a new window, without checks for shrinking, and without | |
507 | * scaling applied to the result. The caller does these things | |
508 | * if necessary. This is a "raw" window selection. | |
509 | */ | |
510 | extern u32 __tcp_select_window(struct sock *sk); | |
511 | ||
512 | /* TCP timestamps are only 32-bits, this causes a slight | |
513 | * complication on 64-bit systems since we store a snapshot | |
31f34269 SH |
514 | * of jiffies in the buffer control blocks below. We decided |
515 | * to use only the low 32-bits of jiffies and hide the ugly | |
1da177e4 LT |
516 | * casts with the following macro. |
517 | */ | |
518 | #define tcp_time_stamp ((__u32)(jiffies)) | |
519 | ||
caa20d9a | 520 | /* This is what the send packet queuing engine uses to pass |
1da177e4 LT |
521 | * TCP per-packet control information to the transmission |
522 | * code. We also store the host-order sequence numbers in | |
523 | * here too. This is 36 bytes on 32-bit architectures, | |
524 | * 40 bytes on 64-bit machines, if this grows please adjust | |
525 | * skbuff.h:skbuff->cb[xxx] size appropriately. | |
526 | */ | |
527 | struct tcp_skb_cb { | |
528 | union { | |
529 | struct inet_skb_parm h4; | |
530 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | |
531 | struct inet6_skb_parm h6; | |
532 | #endif | |
533 | } header; /* For incoming frames */ | |
534 | __u32 seq; /* Starting sequence number */ | |
535 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ | |
536 | __u32 when; /* used to compute rtt's */ | |
537 | __u8 flags; /* TCP header flags. */ | |
538 | ||
539 | /* NOTE: These must match up to the flags byte in a | |
540 | * real TCP header. | |
541 | */ | |
542 | #define TCPCB_FLAG_FIN 0x01 | |
543 | #define TCPCB_FLAG_SYN 0x02 | |
544 | #define TCPCB_FLAG_RST 0x04 | |
545 | #define TCPCB_FLAG_PSH 0x08 | |
546 | #define TCPCB_FLAG_ACK 0x10 | |
547 | #define TCPCB_FLAG_URG 0x20 | |
548 | #define TCPCB_FLAG_ECE 0x40 | |
549 | #define TCPCB_FLAG_CWR 0x80 | |
550 | ||
551 | __u8 sacked; /* State flags for SACK/FACK. */ | |
552 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ | |
553 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | |
554 | #define TCPCB_LOST 0x04 /* SKB is lost */ | |
555 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | |
556 | ||
557 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | |
558 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | |
559 | ||
caa20d9a | 560 | #define TCPCB_URG 0x20 /* Urgent pointer advanced here */ |
1da177e4 LT |
561 | |
562 | #define TCPCB_AT_TAIL (TCPCB_URG) | |
563 | ||
564 | __u16 urg_ptr; /* Valid w/URG flags is set. */ | |
565 | __u32 ack_seq; /* Sequence number ACK'd */ | |
566 | }; | |
567 | ||
568 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) | |
569 | ||
570 | #include <net/tcp_ecn.h> | |
571 | ||
572 | /* Due to TSO, an SKB can be composed of multiple actual | |
573 | * packets. To keep these tracked properly, we use this. | |
574 | */ | |
575 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | |
576 | { | |
7967168c | 577 | return skb_shinfo(skb)->gso_segs; |
1da177e4 LT |
578 | } |
579 | ||
580 | /* This is valid iff tcp_skb_pcount() > 1. */ | |
581 | static inline int tcp_skb_mss(const struct sk_buff *skb) | |
582 | { | |
7967168c | 583 | return skb_shinfo(skb)->gso_size; |
1da177e4 LT |
584 | } |
585 | ||
586 | static inline void tcp_dec_pcount_approx(__u32 *count, | |
587 | const struct sk_buff *skb) | |
588 | { | |
589 | if (*count) { | |
590 | *count -= tcp_skb_pcount(skb); | |
591 | if ((int)*count < 0) | |
592 | *count = 0; | |
593 | } | |
594 | } | |
595 | ||
9e412ba7 | 596 | static inline void tcp_packets_out_inc(struct sock *sk, |
1da177e4 LT |
597 | const struct sk_buff *skb) |
598 | { | |
9e412ba7 | 599 | struct tcp_sock *tp = tcp_sk(sk); |
1da177e4 LT |
600 | int orig = tp->packets_out; |
601 | ||
602 | tp->packets_out += tcp_skb_pcount(skb); | |
603 | if (!orig) | |
3f421baa ACM |
604 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
605 | inet_csk(sk)->icsk_rto, TCP_RTO_MAX); | |
1da177e4 LT |
606 | } |
607 | ||
608 | static inline void tcp_packets_out_dec(struct tcp_sock *tp, | |
609 | const struct sk_buff *skb) | |
610 | { | |
611 | tp->packets_out -= tcp_skb_pcount(skb); | |
612 | } | |
613 | ||
317a76f9 SH |
614 | /* Events passed to congestion control interface */ |
615 | enum tcp_ca_event { | |
616 | CA_EVENT_TX_START, /* first transmit when no packets in flight */ | |
617 | CA_EVENT_CWND_RESTART, /* congestion window restart */ | |
618 | CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ | |
619 | CA_EVENT_FRTO, /* fast recovery timeout */ | |
620 | CA_EVENT_LOSS, /* loss timeout */ | |
621 | CA_EVENT_FAST_ACK, /* in sequence ack */ | |
622 | CA_EVENT_SLOW_ACK, /* other ack */ | |
623 | }; | |
624 | ||
625 | /* | |
626 | * Interface for adding new TCP congestion control handlers | |
627 | */ | |
628 | #define TCP_CA_NAME_MAX 16 | |
3ff825b2 SH |
629 | #define TCP_CA_MAX 128 |
630 | #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) | |
631 | ||
164891aa SH |
632 | #define TCP_CONG_NON_RESTRICTED 0x1 |
633 | #define TCP_CONG_RTT_STAMP 0x2 | |
634 | ||
317a76f9 SH |
635 | struct tcp_congestion_ops { |
636 | struct list_head list; | |
164891aa | 637 | unsigned long flags; |
317a76f9 SH |
638 | |
639 | /* initialize private data (optional) */ | |
6687e988 | 640 | void (*init)(struct sock *sk); |
317a76f9 | 641 | /* cleanup private data (optional) */ |
6687e988 | 642 | void (*release)(struct sock *sk); |
317a76f9 SH |
643 | |
644 | /* return slow start threshold (required) */ | |
6687e988 | 645 | u32 (*ssthresh)(struct sock *sk); |
317a76f9 | 646 | /* lower bound for congestion window (optional) */ |
72dc5b92 | 647 | u32 (*min_cwnd)(const struct sock *sk); |
317a76f9 | 648 | /* do new cwnd calculation (required) */ |
6687e988 | 649 | void (*cong_avoid)(struct sock *sk, u32 ack, |
317a76f9 | 650 | u32 rtt, u32 in_flight, int good_ack); |
317a76f9 | 651 | /* call before changing ca_state (optional) */ |
6687e988 | 652 | void (*set_state)(struct sock *sk, u8 new_state); |
317a76f9 | 653 | /* call when cwnd event occurs (optional) */ |
6687e988 | 654 | void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); |
317a76f9 | 655 | /* new value of cwnd after loss (optional) */ |
6687e988 | 656 | u32 (*undo_cwnd)(struct sock *sk); |
317a76f9 | 657 | /* hook for packet ack accounting (optional) */ |
164891aa | 658 | void (*pkts_acked)(struct sock *sk, u32 num_acked, ktime_t last); |
73c1f4a0 | 659 | /* get info for inet_diag (optional) */ |
6687e988 | 660 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); |
317a76f9 SH |
661 | |
662 | char name[TCP_CA_NAME_MAX]; | |
663 | struct module *owner; | |
664 | }; | |
665 | ||
666 | extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); | |
667 | extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); | |
668 | ||
6687e988 ACM |
669 | extern void tcp_init_congestion_control(struct sock *sk); |
670 | extern void tcp_cleanup_congestion_control(struct sock *sk); | |
317a76f9 SH |
671 | extern int tcp_set_default_congestion_control(const char *name); |
672 | extern void tcp_get_default_congestion_control(char *name); | |
3ff825b2 | 673 | extern void tcp_get_available_congestion_control(char *buf, size_t len); |
ce7bc3bf SH |
674 | extern void tcp_get_allowed_congestion_control(char *buf, size_t len); |
675 | extern int tcp_set_allowed_congestion_control(char *allowed); | |
6687e988 | 676 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); |
40efc6fa | 677 | extern void tcp_slow_start(struct tcp_sock *tp); |
317a76f9 | 678 | |
5f8ef48d | 679 | extern struct tcp_congestion_ops tcp_init_congestion_ops; |
6687e988 ACM |
680 | extern u32 tcp_reno_ssthresh(struct sock *sk); |
681 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, | |
317a76f9 | 682 | u32 rtt, u32 in_flight, int flag); |
72dc5b92 | 683 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); |
a8acfbac | 684 | extern struct tcp_congestion_ops tcp_reno; |
317a76f9 | 685 | |
6687e988 | 686 | static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) |
317a76f9 | 687 | { |
6687e988 ACM |
688 | struct inet_connection_sock *icsk = inet_csk(sk); |
689 | ||
690 | if (icsk->icsk_ca_ops->set_state) | |
691 | icsk->icsk_ca_ops->set_state(sk, ca_state); | |
692 | icsk->icsk_ca_state = ca_state; | |
317a76f9 SH |
693 | } |
694 | ||
6687e988 | 695 | static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) |
317a76f9 | 696 | { |
6687e988 ACM |
697 | const struct inet_connection_sock *icsk = inet_csk(sk); |
698 | ||
699 | if (icsk->icsk_ca_ops->cwnd_event) | |
700 | icsk->icsk_ca_ops->cwnd_event(sk, event); | |
317a76f9 SH |
701 | } |
702 | ||
1da177e4 LT |
703 | /* This determines how many packets are "in the network" to the best |
704 | * of our knowledge. In many cases it is conservative, but where | |
705 | * detailed information is available from the receiver (via SACK | |
706 | * blocks etc.) we can make more aggressive calculations. | |
707 | * | |
708 | * Use this for decisions involving congestion control, use just | |
709 | * tp->packets_out to determine if the send queue is empty or not. | |
710 | * | |
711 | * Read this equation as: | |
712 | * | |
713 | * "Packets sent once on transmission queue" MINUS | |
714 | * "Packets left network, but not honestly ACKed yet" PLUS | |
715 | * "Packets fast retransmitted" | |
716 | */ | |
40efc6fa | 717 | static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) |
1da177e4 LT |
718 | { |
719 | return (tp->packets_out - tp->left_out + tp->retrans_out); | |
720 | } | |
721 | ||
1da177e4 LT |
722 | /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. |
723 | * The exception is rate halving phase, when cwnd is decreasing towards | |
724 | * ssthresh. | |
725 | */ | |
6687e988 | 726 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) |
1da177e4 | 727 | { |
6687e988 ACM |
728 | const struct tcp_sock *tp = tcp_sk(sk); |
729 | if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) | |
1da177e4 LT |
730 | return tp->snd_ssthresh; |
731 | else | |
732 | return max(tp->snd_ssthresh, | |
733 | ((tp->snd_cwnd >> 1) + | |
734 | (tp->snd_cwnd >> 2))); | |
735 | } | |
736 | ||
737 | static inline void tcp_sync_left_out(struct tcp_sock *tp) | |
738 | { | |
34588b4c | 739 | BUG_ON(tp->sacked_out + tp->lost_out > tp->packets_out); |
1da177e4 LT |
740 | tp->left_out = tp->sacked_out + tp->lost_out; |
741 | } | |
742 | ||
3cfe3baa | 743 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
1da177e4 LT |
744 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
745 | ||
746 | /* Slow start with delack produces 3 packets of burst, so that | |
747 | * it is safe "de facto". | |
748 | */ | |
749 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |
750 | { | |
751 | return 3; | |
752 | } | |
753 | ||
f4805ede SH |
754 | /* RFC2861 Check whether we are limited by application or congestion window |
755 | * This is the inverse of cwnd check in tcp_tso_should_defer | |
756 | */ | |
757 | static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | |
758 | { | |
759 | const struct tcp_sock *tp = tcp_sk(sk); | |
760 | u32 left; | |
761 | ||
762 | if (in_flight >= tp->snd_cwnd) | |
763 | return 1; | |
764 | ||
bcd76111 | 765 | if (!sk_can_gso(sk)) |
f4805ede SH |
766 | return 0; |
767 | ||
768 | left = tp->snd_cwnd - in_flight; | |
769 | if (sysctl_tcp_tso_win_divisor) | |
770 | return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd; | |
771 | else | |
772 | return left <= tcp_max_burst(tp); | |
773 | } | |
774 | ||
40efc6fa SH |
775 | static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, |
776 | const struct sk_buff *skb) | |
1da177e4 LT |
777 | { |
778 | if (skb->len < mss) | |
779 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | |
780 | } | |
781 | ||
9e412ba7 | 782 | static inline void tcp_check_probe_timer(struct sock *sk) |
1da177e4 | 783 | { |
9e412ba7 | 784 | struct tcp_sock *tp = tcp_sk(sk); |
463c84b9 | 785 | const struct inet_connection_sock *icsk = inet_csk(sk); |
9e412ba7 | 786 | |
463c84b9 | 787 | if (!tp->packets_out && !icsk->icsk_pending) |
3f421baa ACM |
788 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
789 | icsk->icsk_rto, TCP_RTO_MAX); | |
1da177e4 LT |
790 | } |
791 | ||
9e412ba7 | 792 | static inline void tcp_push_pending_frames(struct sock *sk) |
1da177e4 | 793 | { |
9e412ba7 IJ |
794 | struct tcp_sock *tp = tcp_sk(sk); |
795 | ||
796 | __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); | |
1da177e4 LT |
797 | } |
798 | ||
40efc6fa | 799 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) |
1da177e4 LT |
800 | { |
801 | tp->snd_wl1 = seq; | |
802 | } | |
803 | ||
40efc6fa | 804 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) |
1da177e4 LT |
805 | { |
806 | tp->snd_wl1 = seq; | |
807 | } | |
808 | ||
1da177e4 LT |
809 | /* |
810 | * Calculate(/check) TCP checksum | |
811 | */ | |
ba7808ea FD |
812 | static inline __sum16 tcp_v4_check(int len, __be32 saddr, |
813 | __be32 daddr, __wsum base) | |
1da177e4 LT |
814 | { |
815 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | |
816 | } | |
817 | ||
b51655b9 | 818 | static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) |
1da177e4 | 819 | { |
fb286bb2 | 820 | return __skb_checksum_complete(skb); |
1da177e4 LT |
821 | } |
822 | ||
40efc6fa | 823 | static inline int tcp_checksum_complete(struct sk_buff *skb) |
1da177e4 | 824 | { |
60476372 | 825 | return !skb_csum_unnecessary(skb) && |
1da177e4 LT |
826 | __tcp_checksum_complete(skb); |
827 | } | |
828 | ||
829 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | |
830 | ||
40efc6fa | 831 | static inline void tcp_prequeue_init(struct tcp_sock *tp) |
1da177e4 LT |
832 | { |
833 | tp->ucopy.task = NULL; | |
834 | tp->ucopy.len = 0; | |
835 | tp->ucopy.memory = 0; | |
836 | skb_queue_head_init(&tp->ucopy.prequeue); | |
97fc2f08 CL |
837 | #ifdef CONFIG_NET_DMA |
838 | tp->ucopy.dma_chan = NULL; | |
839 | tp->ucopy.wakeup = 0; | |
840 | tp->ucopy.pinned_list = NULL; | |
841 | tp->ucopy.dma_cookie = 0; | |
842 | #endif | |
1da177e4 LT |
843 | } |
844 | ||
845 | /* Packet is added to VJ-style prequeue for processing in process | |
846 | * context, if a reader task is waiting. Apparently, this exciting | |
847 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | |
848 | * failed somewhere. Latency? Burstiness? Well, at least now we will | |
849 | * see, why it failed. 8)8) --ANK | |
850 | * | |
851 | * NOTE: is this not too big to inline? | |
852 | */ | |
40efc6fa | 853 | static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) |
1da177e4 LT |
854 | { |
855 | struct tcp_sock *tp = tcp_sk(sk); | |
856 | ||
857 | if (!sysctl_tcp_low_latency && tp->ucopy.task) { | |
858 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | |
859 | tp->ucopy.memory += skb->truesize; | |
860 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | |
861 | struct sk_buff *skb1; | |
862 | ||
863 | BUG_ON(sock_owned_by_user(sk)); | |
864 | ||
865 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | |
866 | sk->sk_backlog_rcv(sk, skb1); | |
867 | NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED); | |
868 | } | |
869 | ||
870 | tp->ucopy.memory = 0; | |
871 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | |
872 | wake_up_interruptible(sk->sk_sleep); | |
463c84b9 ACM |
873 | if (!inet_csk_ack_scheduled(sk)) |
874 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | |
3f421baa ACM |
875 | (3 * TCP_RTO_MIN) / 4, |
876 | TCP_RTO_MAX); | |
1da177e4 LT |
877 | } |
878 | return 1; | |
879 | } | |
880 | return 0; | |
881 | } | |
882 | ||
883 | ||
884 | #undef STATE_TRACE | |
885 | ||
886 | #ifdef STATE_TRACE | |
887 | static const char *statename[]={ | |
888 | "Unused","Established","Syn Sent","Syn Recv", | |
889 | "Fin Wait 1","Fin Wait 2","Time Wait", "Close", | |
890 | "Close Wait","Last ACK","Listen","Closing" | |
891 | }; | |
892 | #endif | |
893 | ||
40efc6fa | 894 | static inline void tcp_set_state(struct sock *sk, int state) |
1da177e4 LT |
895 | { |
896 | int oldstate = sk->sk_state; | |
897 | ||
898 | switch (state) { | |
899 | case TCP_ESTABLISHED: | |
900 | if (oldstate != TCP_ESTABLISHED) | |
901 | TCP_INC_STATS(TCP_MIB_CURRESTAB); | |
902 | break; | |
903 | ||
904 | case TCP_CLOSE: | |
905 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | |
906 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); | |
907 | ||
908 | sk->sk_prot->unhash(sk); | |
463c84b9 | 909 | if (inet_csk(sk)->icsk_bind_hash && |
1da177e4 | 910 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
2d8c4ce5 | 911 | inet_put_port(&tcp_hashinfo, sk); |
1da177e4 LT |
912 | /* fall through */ |
913 | default: | |
914 | if (oldstate==TCP_ESTABLISHED) | |
915 | TCP_DEC_STATS(TCP_MIB_CURRESTAB); | |
916 | } | |
917 | ||
918 | /* Change state AFTER socket is unhashed to avoid closed | |
919 | * socket sitting in hash tables. | |
920 | */ | |
921 | sk->sk_state = state; | |
922 | ||
923 | #ifdef STATE_TRACE | |
924 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); | |
925 | #endif | |
926 | } | |
927 | ||
4ac02bab | 928 | extern void tcp_done(struct sock *sk); |
1da177e4 | 929 | |
40efc6fa | 930 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) |
1da177e4 LT |
931 | { |
932 | rx_opt->dsack = 0; | |
933 | rx_opt->eff_sacks = 0; | |
934 | rx_opt->num_sacks = 0; | |
935 | } | |
936 | ||
1da177e4 LT |
937 | /* Determine a window scaling and initial window to offer. */ |
938 | extern void tcp_select_initial_window(int __space, __u32 mss, | |
939 | __u32 *rcv_wnd, __u32 *window_clamp, | |
940 | int wscale_ok, __u8 *rcv_wscale); | |
941 | ||
942 | static inline int tcp_win_from_space(int space) | |
943 | { | |
944 | return sysctl_tcp_adv_win_scale<=0 ? | |
945 | (space>>(-sysctl_tcp_adv_win_scale)) : | |
946 | space - (space>>sysctl_tcp_adv_win_scale); | |
947 | } | |
948 | ||
949 | /* Note: caller must be prepared to deal with negative returns */ | |
950 | static inline int tcp_space(const struct sock *sk) | |
951 | { | |
952 | return tcp_win_from_space(sk->sk_rcvbuf - | |
953 | atomic_read(&sk->sk_rmem_alloc)); | |
954 | } | |
955 | ||
956 | static inline int tcp_full_space(const struct sock *sk) | |
957 | { | |
958 | return tcp_win_from_space(sk->sk_rcvbuf); | |
959 | } | |
960 | ||
40efc6fa SH |
961 | static inline void tcp_openreq_init(struct request_sock *req, |
962 | struct tcp_options_received *rx_opt, | |
963 | struct sk_buff *skb) | |
1da177e4 | 964 | { |
2e6599cb ACM |
965 | struct inet_request_sock *ireq = inet_rsk(req); |
966 | ||
1da177e4 | 967 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ |
2e6599cb | 968 | tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; |
1da177e4 LT |
969 | req->mss = rx_opt->mss_clamp; |
970 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | |
2e6599cb ACM |
971 | ireq->tstamp_ok = rx_opt->tstamp_ok; |
972 | ireq->sack_ok = rx_opt->sack_ok; | |
973 | ireq->snd_wscale = rx_opt->snd_wscale; | |
974 | ireq->wscale_ok = rx_opt->wscale_ok; | |
975 | ireq->acked = 0; | |
976 | ireq->ecn_ok = 0; | |
aa8223c7 | 977 | ireq->rmt_port = tcp_hdr(skb)->source; |
1da177e4 LT |
978 | } |
979 | ||
980 | extern void tcp_enter_memory_pressure(void); | |
981 | ||
1da177e4 LT |
982 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) |
983 | { | |
984 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | |
985 | } | |
986 | ||
987 | static inline int keepalive_time_when(const struct tcp_sock *tp) | |
988 | { | |
989 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | |
990 | } | |
991 | ||
463c84b9 | 992 | static inline int tcp_fin_time(const struct sock *sk) |
1da177e4 | 993 | { |
463c84b9 ACM |
994 | int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; |
995 | const int rto = inet_csk(sk)->icsk_rto; | |
1da177e4 | 996 | |
463c84b9 ACM |
997 | if (fin_timeout < (rto << 2) - (rto >> 1)) |
998 | fin_timeout = (rto << 2) - (rto >> 1); | |
1da177e4 LT |
999 | |
1000 | return fin_timeout; | |
1001 | } | |
1002 | ||
1003 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst) | |
1004 | { | |
1005 | if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) | |
1006 | return 0; | |
9d729f72 | 1007 | if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) |
1da177e4 LT |
1008 | return 0; |
1009 | ||
1010 | /* RST segments are not recommended to carry timestamp, | |
1011 | and, if they do, it is recommended to ignore PAWS because | |
1012 | "their cleanup function should take precedence over timestamps." | |
1013 | Certainly, it is mistake. It is necessary to understand the reasons | |
1014 | of this constraint to relax it: if peer reboots, clock may go | |
1015 | out-of-sync and half-open connections will not be reset. | |
1016 | Actually, the problem would be not existing if all | |
1017 | the implementations followed draft about maintaining clock | |
1018 | via reboots. Linux-2.2 DOES NOT! | |
1019 | ||
1020 | However, we can relax time bounds for RST segments to MSL. | |
1021 | */ | |
9d729f72 | 1022 | if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) |
1da177e4 LT |
1023 | return 0; |
1024 | return 1; | |
1025 | } | |
1026 | ||
1da177e4 LT |
1027 | #define TCP_CHECK_TIMER(sk) do { } while (0) |
1028 | ||
1da177e4 LT |
1029 | static inline void tcp_mib_init(void) |
1030 | { | |
1031 | /* See RFC 2012 */ | |
1032 | TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1); | |
1033 | TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); | |
1034 | TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); | |
1035 | TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1); | |
1036 | } | |
1037 | ||
6a438bbe SH |
1038 | /*from STCP */ |
1039 | static inline void clear_all_retrans_hints(struct tcp_sock *tp){ | |
1040 | tp->lost_skb_hint = NULL; | |
1041 | tp->scoreboard_skb_hint = NULL; | |
1042 | tp->retransmit_skb_hint = NULL; | |
1043 | tp->forward_skb_hint = NULL; | |
1044 | tp->fastpath_skb_hint = NULL; | |
1045 | } | |
1046 | ||
cfb6eeb4 YH |
1047 | /* MD5 Signature */ |
1048 | struct crypto_hash; | |
1049 | ||
1050 | /* - key database */ | |
1051 | struct tcp_md5sig_key { | |
1052 | u8 *key; | |
1053 | u8 keylen; | |
1054 | }; | |
1055 | ||
1056 | struct tcp4_md5sig_key { | |
1057 | u8 *key; | |
1058 | u16 keylen; | |
1059 | __be32 addr; | |
1060 | }; | |
1061 | ||
1062 | struct tcp6_md5sig_key { | |
1063 | u8 *key; | |
1064 | u16 keylen; | |
1065 | #if 0 | |
1066 | u32 scope_id; /* XXX */ | |
1067 | #endif | |
1068 | struct in6_addr addr; | |
1069 | }; | |
1070 | ||
1071 | /* - sock block */ | |
1072 | struct tcp_md5sig_info { | |
1073 | struct tcp4_md5sig_key *keys4; | |
1074 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1075 | struct tcp6_md5sig_key *keys6; | |
1076 | u32 entries6; | |
1077 | u32 alloced6; | |
1078 | #endif | |
1079 | u32 entries4; | |
1080 | u32 alloced4; | |
1081 | }; | |
1082 | ||
1083 | /* - pseudo header */ | |
1084 | struct tcp4_pseudohdr { | |
1085 | __be32 saddr; | |
1086 | __be32 daddr; | |
1087 | __u8 pad; | |
1088 | __u8 protocol; | |
1089 | __be16 len; | |
1090 | }; | |
1091 | ||
1092 | struct tcp6_pseudohdr { | |
1093 | struct in6_addr saddr; | |
1094 | struct in6_addr daddr; | |
1095 | __be32 len; | |
1096 | __be32 protocol; /* including padding */ | |
1097 | }; | |
1098 | ||
1099 | union tcp_md5sum_block { | |
1100 | struct tcp4_pseudohdr ip4; | |
1101 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1102 | struct tcp6_pseudohdr ip6; | |
1103 | #endif | |
1104 | }; | |
1105 | ||
1106 | /* - pool: digest algorithm, hash description and scratch buffer */ | |
1107 | struct tcp_md5sig_pool { | |
1108 | struct hash_desc md5_desc; | |
1109 | union tcp_md5sum_block md5_blk; | |
1110 | }; | |
1111 | ||
1112 | #define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ | |
1113 | ||
1114 | /* - functions */ | |
1115 | extern int tcp_v4_calc_md5_hash(char *md5_hash, | |
1116 | struct tcp_md5sig_key *key, | |
1117 | struct sock *sk, | |
1118 | struct dst_entry *dst, | |
1119 | struct request_sock *req, | |
1120 | struct tcphdr *th, | |
1121 | int protocol, int tcplen); | |
1122 | extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | |
1123 | struct sock *addr_sk); | |
1124 | ||
1125 | extern int tcp_v4_md5_do_add(struct sock *sk, | |
1126 | __be32 addr, | |
1127 | u8 *newkey, | |
1128 | u8 newkeylen); | |
1129 | ||
1130 | extern int tcp_v4_md5_do_del(struct sock *sk, | |
8e5200f5 | 1131 | __be32 addr); |
cfb6eeb4 YH |
1132 | |
1133 | extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void); | |
1134 | extern void tcp_free_md5sig_pool(void); | |
1135 | ||
1136 | extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); | |
1137 | extern void __tcp_put_md5sig_pool(void); | |
1138 | ||
1139 | static inline | |
1140 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |
1141 | { | |
1142 | int cpu = get_cpu(); | |
1143 | struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); | |
1144 | if (!ret) | |
1145 | put_cpu(); | |
1146 | return ret; | |
1147 | } | |
1148 | ||
1149 | static inline void tcp_put_md5sig_pool(void) | |
1150 | { | |
1151 | __tcp_put_md5sig_pool(); | |
1152 | put_cpu(); | |
1153 | } | |
1154 | ||
fe067e8a DM |
1155 | /* write queue abstraction */ |
1156 | static inline void tcp_write_queue_purge(struct sock *sk) | |
1157 | { | |
1158 | struct sk_buff *skb; | |
1159 | ||
1160 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | |
1161 | sk_stream_free_skb(sk, skb); | |
1162 | sk_stream_mem_reclaim(sk); | |
1163 | } | |
1164 | ||
1165 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | |
1166 | { | |
1167 | struct sk_buff *skb = sk->sk_write_queue.next; | |
1168 | if (skb == (struct sk_buff *) &sk->sk_write_queue) | |
1169 | return NULL; | |
1170 | return skb; | |
1171 | } | |
1172 | ||
1173 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | |
1174 | { | |
1175 | struct sk_buff *skb = sk->sk_write_queue.prev; | |
1176 | if (skb == (struct sk_buff *) &sk->sk_write_queue) | |
1177 | return NULL; | |
1178 | return skb; | |
1179 | } | |
1180 | ||
1181 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | |
1182 | { | |
1183 | return skb->next; | |
1184 | } | |
1185 | ||
1186 | #define tcp_for_write_queue(skb, sk) \ | |
1187 | for (skb = (sk)->sk_write_queue.next; \ | |
1188 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ | |
1189 | skb = skb->next) | |
1190 | ||
1191 | #define tcp_for_write_queue_from(skb, sk) \ | |
1192 | for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ | |
1193 | skb = skb->next) | |
1194 | ||
1195 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | |
1196 | { | |
1197 | return sk->sk_send_head; | |
1198 | } | |
1199 | ||
1200 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | |
1201 | { | |
d551e454 IJ |
1202 | struct tcp_sock *tp = tcp_sk(sk); |
1203 | ||
fe067e8a DM |
1204 | sk->sk_send_head = skb->next; |
1205 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) | |
1206 | sk->sk_send_head = NULL; | |
d551e454 IJ |
1207 | /* Don't override Nagle indefinately with F-RTO */ |
1208 | if (tp->frto_counter == 2) | |
1209 | tp->frto_counter = 3; | |
fe067e8a DM |
1210 | } |
1211 | ||
1212 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | |
1213 | { | |
1214 | if (sk->sk_send_head == skb_unlinked) | |
1215 | sk->sk_send_head = NULL; | |
1216 | } | |
1217 | ||
1218 | static inline void tcp_init_send_head(struct sock *sk) | |
1219 | { | |
1220 | sk->sk_send_head = NULL; | |
1221 | } | |
1222 | ||
1223 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1224 | { | |
1225 | __skb_queue_tail(&sk->sk_write_queue, skb); | |
1226 | } | |
1227 | ||
1228 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1229 | { | |
1230 | __tcp_add_write_queue_tail(sk, skb); | |
1231 | ||
1232 | /* Queue it, remembering where we must start sending. */ | |
1233 | if (sk->sk_send_head == NULL) | |
1234 | sk->sk_send_head = skb; | |
1235 | } | |
1236 | ||
1237 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | |
1238 | { | |
1239 | __skb_queue_head(&sk->sk_write_queue, skb); | |
1240 | } | |
1241 | ||
1242 | /* Insert buff after skb on the write queue of sk. */ | |
1243 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | |
1244 | struct sk_buff *buff, | |
1245 | struct sock *sk) | |
1246 | { | |
1247 | __skb_append(skb, buff, &sk->sk_write_queue); | |
1248 | } | |
1249 | ||
1250 | /* Insert skb between prev and next on the write queue of sk. */ | |
1251 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, | |
1252 | struct sk_buff *skb, | |
1253 | struct sock *sk) | |
1254 | { | |
1255 | __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); | |
1256 | } | |
1257 | ||
1258 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) | |
1259 | { | |
1260 | __skb_unlink(skb, &sk->sk_write_queue); | |
1261 | } | |
1262 | ||
1263 | static inline int tcp_skb_is_last(const struct sock *sk, | |
1264 | const struct sk_buff *skb) | |
1265 | { | |
1266 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | |
1267 | } | |
1268 | ||
1269 | static inline int tcp_write_queue_empty(struct sock *sk) | |
1270 | { | |
1271 | return skb_queue_empty(&sk->sk_write_queue); | |
1272 | } | |
1273 | ||
1da177e4 LT |
1274 | /* /proc */ |
1275 | enum tcp_seq_states { | |
1276 | TCP_SEQ_STATE_LISTENING, | |
1277 | TCP_SEQ_STATE_OPENREQ, | |
1278 | TCP_SEQ_STATE_ESTABLISHED, | |
1279 | TCP_SEQ_STATE_TIME_WAIT, | |
1280 | }; | |
1281 | ||
1282 | struct tcp_seq_afinfo { | |
1283 | struct module *owner; | |
1284 | char *name; | |
1285 | sa_family_t family; | |
1286 | int (*seq_show) (struct seq_file *m, void *v); | |
1287 | struct file_operations *seq_fops; | |
1288 | }; | |
1289 | ||
1290 | struct tcp_iter_state { | |
1291 | sa_family_t family; | |
1292 | enum tcp_seq_states state; | |
1293 | struct sock *syn_wait_sk; | |
1294 | int bucket, sbucket, num, uid; | |
1295 | struct seq_operations seq_ops; | |
1296 | }; | |
1297 | ||
1298 | extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); | |
1299 | extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); | |
1300 | ||
20380731 ACM |
1301 | extern struct request_sock_ops tcp_request_sock_ops; |
1302 | ||
1303 | extern int tcp_v4_destroy_sock(struct sock *sk); | |
1304 | ||
a430a43d | 1305 | extern int tcp_v4_gso_send_check(struct sk_buff *skb); |
576a30eb | 1306 | extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); |
f4c50d99 | 1307 | |
20380731 ACM |
1308 | #ifdef CONFIG_PROC_FS |
1309 | extern int tcp4_proc_init(void); | |
1310 | extern void tcp4_proc_exit(void); | |
1311 | #endif | |
1312 | ||
cfb6eeb4 YH |
1313 | /* TCP af-specific functions */ |
1314 | struct tcp_sock_af_ops { | |
1315 | #ifdef CONFIG_TCP_MD5SIG | |
1316 | struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, | |
1317 | struct sock *addr_sk); | |
1318 | int (*calc_md5_hash) (char *location, | |
1319 | struct tcp_md5sig_key *md5, | |
1320 | struct sock *sk, | |
1321 | struct dst_entry *dst, | |
1322 | struct request_sock *req, | |
1323 | struct tcphdr *th, | |
1324 | int protocol, int len); | |
1325 | int (*md5_add) (struct sock *sk, | |
1326 | struct sock *addr_sk, | |
1327 | u8 *newkey, | |
1328 | u8 len); | |
1329 | int (*md5_parse) (struct sock *sk, | |
1330 | char __user *optval, | |
1331 | int optlen); | |
1332 | #endif | |
1333 | }; | |
1334 | ||
1335 | struct tcp_request_sock_ops { | |
1336 | #ifdef CONFIG_TCP_MD5SIG | |
1337 | struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, | |
1338 | struct request_sock *req); | |
1339 | #endif | |
1340 | }; | |
1341 | ||
20380731 ACM |
1342 | extern void tcp_v4_init(struct net_proto_family *ops); |
1343 | extern void tcp_init(void); | |
1344 | ||
1da177e4 | 1345 | #endif /* _TCP_H */ |