]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the TCP module. | |
7 | * | |
8 | * Version: @(#)tcp.h 1.0.5 05/23/93 | |
9 | * | |
10 | * Authors: Ross Biro | |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | #ifndef _TCP_H | |
19 | #define _TCP_H | |
20 | ||
21 | #define TCP_DEBUG 1 | |
22 | #define FASTRETRANS_DEBUG 1 | |
23 | ||
24 | #include <linux/list.h> | |
25 | #include <linux/tcp.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/cache.h> | |
28 | #include <linux/percpu.h> | |
29 | #include <linux/skbuff.h> | |
30 | #include <linux/dmaengine.h> | |
31 | #include <linux/crypto.h> | |
32 | #include <linux/cryptohash.h> | |
33 | ||
34 | #include <net/inet_connection_sock.h> | |
35 | #include <net/inet_timewait_sock.h> | |
36 | #include <net/inet_hashtables.h> | |
37 | #include <net/checksum.h> | |
38 | #include <net/request_sock.h> | |
39 | #include <net/sock.h> | |
40 | #include <net/snmp.h> | |
41 | #include <net/ip.h> | |
42 | #include <net/tcp_states.h> | |
43 | #include <net/inet_ecn.h> | |
44 | #include <net/dst.h> | |
45 | ||
46 | #include <linux/seq_file.h> | |
47 | ||
48 | extern struct inet_hashinfo tcp_hashinfo; | |
49 | ||
50 | extern struct percpu_counter tcp_orphan_count; | |
51 | extern void tcp_time_wait(struct sock *sk, int state, int timeo); | |
52 | ||
53 | #define MAX_TCP_HEADER (128 + MAX_HEADER) | |
54 | #define MAX_TCP_OPTION_SPACE 40 | |
55 | ||
56 | /* | |
57 | * Never offer a window over 32767 without using window scaling. Some | |
58 | * poor stacks do signed 16bit maths! | |
59 | */ | |
60 | #define MAX_TCP_WINDOW 32767U | |
61 | ||
62 | /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ | |
63 | #define TCP_MIN_MSS 88U | |
64 | ||
65 | /* The least MTU to use for probing */ | |
66 | #define TCP_BASE_MSS 512 | |
67 | ||
68 | /* After receiving this amount of duplicate ACKs fast retransmit starts. */ | |
69 | #define TCP_FASTRETRANS_THRESH 3 | |
70 | ||
71 | /* Maximal reordering. */ | |
72 | #define TCP_MAX_REORDERING 127 | |
73 | ||
74 | /* Maximal number of ACKs sent quickly to accelerate slow-start. */ | |
75 | #define TCP_MAX_QUICKACKS 16U | |
76 | ||
77 | /* urg_data states */ | |
78 | #define TCP_URG_VALID 0x0100 | |
79 | #define TCP_URG_NOTYET 0x0200 | |
80 | #define TCP_URG_READ 0x0400 | |
81 | ||
82 | #define TCP_RETR1 3 /* | |
83 | * This is how many retries it does before it | |
84 | * tries to figure out if the gateway is | |
85 | * down. Minimal RFC value is 3; it corresponds | |
86 | * to ~3sec-8min depending on RTO. | |
87 | */ | |
88 | ||
89 | #define TCP_RETR2 15 /* | |
90 | * This should take at least | |
91 | * 90 minutes to time out. | |
92 | * RFC1122 says that the limit is 100 sec. | |
93 | * 15 is ~13-30min depending on RTO. | |
94 | */ | |
95 | ||
96 | #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a | |
97 | * connection: ~180sec is RFC minimum */ | |
98 | ||
99 | #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a | |
100 | * connection: ~180sec is RFC minimum */ | |
101 | ||
102 | ||
103 | #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned | |
104 | * socket. 7 is ~50sec-16min. | |
105 | */ | |
106 | ||
107 | ||
108 | #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT | |
109 | * state, about 60 seconds */ | |
110 | #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN | |
111 | /* BSD style FIN_WAIT2 deadlock breaker. | |
112 | * It used to be 3min, new value is 60sec, | |
113 | * to combine FIN-WAIT-2 timeout with | |
114 | * TIME-WAIT timer. | |
115 | */ | |
116 | ||
117 | #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ | |
118 | #if HZ >= 100 | |
119 | #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ | |
120 | #define TCP_ATO_MIN ((unsigned)(HZ/25)) | |
121 | #else | |
122 | #define TCP_DELACK_MIN 4U | |
123 | #define TCP_ATO_MIN 4U | |
124 | #endif | |
125 | #define TCP_RTO_MAX ((unsigned)(120*HZ)) | |
126 | #define TCP_RTO_MIN ((unsigned)(HZ/5)) | |
127 | #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ | |
128 | ||
129 | #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes | |
130 | * for local resources. | |
131 | */ | |
132 | ||
133 | #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ | |
134 | #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ | |
135 | #define TCP_KEEPALIVE_INTVL (75*HZ) | |
136 | ||
137 | #define MAX_TCP_KEEPIDLE 32767 | |
138 | #define MAX_TCP_KEEPINTVL 32767 | |
139 | #define MAX_TCP_KEEPCNT 127 | |
140 | #define MAX_TCP_SYNCNT 127 | |
141 | ||
142 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | |
143 | ||
144 | #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) | |
145 | #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated | |
146 | * after this time. It should be equal | |
147 | * (or greater than) TCP_TIMEWAIT_LEN | |
148 | * to provide reliability equal to one | |
149 | * provided by timewait state. | |
150 | */ | |
151 | #define TCP_PAWS_WINDOW 1 /* Replay window for per-host | |
152 | * timestamps. It must be less than | |
153 | * minimal timewait lifetime. | |
154 | */ | |
155 | /* | |
156 | * TCP option | |
157 | */ | |
158 | ||
159 | #define TCPOPT_NOP 1 /* Padding */ | |
160 | #define TCPOPT_EOL 0 /* End of options */ | |
161 | #define TCPOPT_MSS 2 /* Segment size negotiating */ | |
162 | #define TCPOPT_WINDOW 3 /* Window scaling */ | |
163 | #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ | |
164 | #define TCPOPT_SACK 5 /* SACK Block */ | |
165 | #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ | |
166 | #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ | |
167 | ||
168 | /* | |
169 | * TCP option lengths | |
170 | */ | |
171 | ||
172 | #define TCPOLEN_MSS 4 | |
173 | #define TCPOLEN_WINDOW 3 | |
174 | #define TCPOLEN_SACK_PERM 2 | |
175 | #define TCPOLEN_TIMESTAMP 10 | |
176 | #define TCPOLEN_MD5SIG 18 | |
177 | ||
178 | /* But this is what stacks really send out. */ | |
179 | #define TCPOLEN_TSTAMP_ALIGNED 12 | |
180 | #define TCPOLEN_WSCALE_ALIGNED 4 | |
181 | #define TCPOLEN_SACKPERM_ALIGNED 4 | |
182 | #define TCPOLEN_SACK_BASE 2 | |
183 | #define TCPOLEN_SACK_BASE_ALIGNED 4 | |
184 | #define TCPOLEN_SACK_PERBLOCK 8 | |
185 | #define TCPOLEN_MD5SIG_ALIGNED 20 | |
186 | #define TCPOLEN_MSS_ALIGNED 4 | |
187 | ||
188 | /* Flags in tp->nonagle */ | |
189 | #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ | |
190 | #define TCP_NAGLE_CORK 2 /* Socket is corked */ | |
191 | #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ | |
192 | ||
193 | extern struct inet_timewait_death_row tcp_death_row; | |
194 | ||
195 | /* sysctl variables for tcp */ | |
196 | extern int sysctl_tcp_timestamps; | |
197 | extern int sysctl_tcp_window_scaling; | |
198 | extern int sysctl_tcp_sack; | |
199 | extern int sysctl_tcp_fin_timeout; | |
200 | extern int sysctl_tcp_keepalive_time; | |
201 | extern int sysctl_tcp_keepalive_probes; | |
202 | extern int sysctl_tcp_keepalive_intvl; | |
203 | extern int sysctl_tcp_syn_retries; | |
204 | extern int sysctl_tcp_synack_retries; | |
205 | extern int sysctl_tcp_retries1; | |
206 | extern int sysctl_tcp_retries2; | |
207 | extern int sysctl_tcp_orphan_retries; | |
208 | extern int sysctl_tcp_syncookies; | |
209 | extern int sysctl_tcp_retrans_collapse; | |
210 | extern int sysctl_tcp_stdurg; | |
211 | extern int sysctl_tcp_rfc1337; | |
212 | extern int sysctl_tcp_abort_on_overflow; | |
213 | extern int sysctl_tcp_max_orphans; | |
214 | extern int sysctl_tcp_fack; | |
215 | extern int sysctl_tcp_reordering; | |
216 | extern int sysctl_tcp_ecn; | |
217 | extern int sysctl_tcp_dsack; | |
218 | extern int sysctl_tcp_mem[3]; | |
219 | extern int sysctl_tcp_wmem[3]; | |
220 | extern int sysctl_tcp_rmem[3]; | |
221 | extern int sysctl_tcp_app_win; | |
222 | extern int sysctl_tcp_adv_win_scale; | |
223 | extern int sysctl_tcp_tw_reuse; | |
224 | extern int sysctl_tcp_frto; | |
225 | extern int sysctl_tcp_frto_response; | |
226 | extern int sysctl_tcp_low_latency; | |
227 | extern int sysctl_tcp_dma_copybreak; | |
228 | extern int sysctl_tcp_nometrics_save; | |
229 | extern int sysctl_tcp_moderate_rcvbuf; | |
230 | extern int sysctl_tcp_tso_win_divisor; | |
231 | extern int sysctl_tcp_abc; | |
232 | extern int sysctl_tcp_mtu_probing; | |
233 | extern int sysctl_tcp_base_mss; | |
234 | extern int sysctl_tcp_workaround_signed_windows; | |
235 | extern int sysctl_tcp_slow_start_after_idle; | |
236 | extern int sysctl_tcp_max_ssthresh; | |
237 | ||
238 | extern atomic_t tcp_memory_allocated; | |
239 | extern struct percpu_counter tcp_sockets_allocated; | |
240 | extern int tcp_memory_pressure; | |
241 | ||
242 | /* | |
243 | * The next routines deal with comparing 32 bit unsigned ints | |
244 | * and worry about wraparound (automatic with unsigned arithmetic). | |
245 | */ | |
246 | ||
247 | static inline int before(__u32 seq1, __u32 seq2) | |
248 | { | |
249 | return (__s32)(seq1-seq2) < 0; | |
250 | } | |
251 | #define after(seq2, seq1) before(seq1, seq2) | |
252 | ||
253 | /* is s2<=s1<=s3 ? */ | |
254 | static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | |
255 | { | |
256 | return seq3 - seq2 >= seq1 - seq2; | |
257 | } | |
258 | ||
259 | static inline int tcp_too_many_orphans(struct sock *sk, int num) | |
260 | { | |
261 | return (num > sysctl_tcp_max_orphans) || | |
262 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | |
263 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); | |
264 | } | |
265 | ||
266 | /* syncookies: remember time of last synqueue overflow */ | |
267 | static inline void tcp_synq_overflow(struct sock *sk) | |
268 | { | |
269 | tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; | |
270 | } | |
271 | ||
272 | /* syncookies: no recent synqueue overflow on this listening socket? */ | |
273 | static inline int tcp_synq_no_recent_overflow(const struct sock *sk) | |
274 | { | |
275 | unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; | |
276 | return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); | |
277 | } | |
278 | ||
279 | extern struct proto tcp_prot; | |
280 | ||
281 | #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) | |
282 | #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) | |
283 | #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) | |
284 | #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val) | |
285 | ||
286 | extern void tcp_v4_err(struct sk_buff *skb, u32); | |
287 | ||
288 | extern void tcp_shutdown (struct sock *sk, int how); | |
289 | ||
290 | extern int tcp_v4_rcv(struct sk_buff *skb); | |
291 | ||
292 | extern int tcp_v4_remember_stamp(struct sock *sk); | |
293 | ||
294 | extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); | |
295 | ||
296 | extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, | |
297 | struct msghdr *msg, size_t size); | |
298 | extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); | |
299 | ||
300 | extern int tcp_ioctl(struct sock *sk, | |
301 | int cmd, | |
302 | unsigned long arg); | |
303 | ||
304 | extern int tcp_rcv_state_process(struct sock *sk, | |
305 | struct sk_buff *skb, | |
306 | struct tcphdr *th, | |
307 | unsigned len); | |
308 | ||
309 | extern int tcp_rcv_established(struct sock *sk, | |
310 | struct sk_buff *skb, | |
311 | struct tcphdr *th, | |
312 | unsigned len); | |
313 | ||
314 | extern void tcp_rcv_space_adjust(struct sock *sk); | |
315 | ||
316 | extern void tcp_cleanup_rbuf(struct sock *sk, int copied); | |
317 | ||
318 | extern int tcp_twsk_unique(struct sock *sk, | |
319 | struct sock *sktw, void *twp); | |
320 | ||
321 | extern void tcp_twsk_destructor(struct sock *sk); | |
322 | ||
323 | extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, | |
324 | struct pipe_inode_info *pipe, size_t len, unsigned int flags); | |
325 | ||
326 | static inline void tcp_dec_quickack_mode(struct sock *sk, | |
327 | const unsigned int pkts) | |
328 | { | |
329 | struct inet_connection_sock *icsk = inet_csk(sk); | |
330 | ||
331 | if (icsk->icsk_ack.quick) { | |
332 | if (pkts >= icsk->icsk_ack.quick) { | |
333 | icsk->icsk_ack.quick = 0; | |
334 | /* Leaving quickack mode we deflate ATO. */ | |
335 | icsk->icsk_ack.ato = TCP_ATO_MIN; | |
336 | } else | |
337 | icsk->icsk_ack.quick -= pkts; | |
338 | } | |
339 | } | |
340 | ||
341 | extern void tcp_enter_quickack_mode(struct sock *sk); | |
342 | ||
343 | static inline void tcp_clear_options(struct tcp_options_received *rx_opt) | |
344 | { | |
345 | rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; | |
346 | } | |
347 | ||
348 | #define TCP_ECN_OK 1 | |
349 | #define TCP_ECN_QUEUE_CWR 2 | |
350 | #define TCP_ECN_DEMAND_CWR 4 | |
351 | ||
352 | static __inline__ void | |
353 | TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) | |
354 | { | |
355 | if (sysctl_tcp_ecn && th->ece && th->cwr) | |
356 | inet_rsk(req)->ecn_ok = 1; | |
357 | } | |
358 | ||
359 | enum tcp_tw_status { | |
360 | TCP_TW_SUCCESS = 0, | |
361 | TCP_TW_RST = 1, | |
362 | TCP_TW_ACK = 2, | |
363 | TCP_TW_SYN = 3 | |
364 | }; | |
365 | ||
366 | ||
367 | extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, | |
368 | struct sk_buff *skb, | |
369 | const struct tcphdr *th); | |
370 | ||
371 | extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, | |
372 | struct request_sock *req, | |
373 | struct request_sock **prev); | |
374 | extern int tcp_child_process(struct sock *parent, | |
375 | struct sock *child, | |
376 | struct sk_buff *skb); | |
377 | extern int tcp_use_frto(struct sock *sk); | |
378 | extern void tcp_enter_frto(struct sock *sk); | |
379 | extern void tcp_enter_loss(struct sock *sk, int how); | |
380 | extern void tcp_clear_retrans(struct tcp_sock *tp); | |
381 | extern void tcp_update_metrics(struct sock *sk); | |
382 | ||
383 | extern void tcp_close(struct sock *sk, | |
384 | long timeout); | |
385 | extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); | |
386 | ||
387 | extern int tcp_getsockopt(struct sock *sk, int level, | |
388 | int optname, | |
389 | char __user *optval, | |
390 | int __user *optlen); | |
391 | extern int tcp_setsockopt(struct sock *sk, int level, | |
392 | int optname, char __user *optval, | |
393 | unsigned int optlen); | |
394 | extern int compat_tcp_getsockopt(struct sock *sk, | |
395 | int level, int optname, | |
396 | char __user *optval, int __user *optlen); | |
397 | extern int compat_tcp_setsockopt(struct sock *sk, | |
398 | int level, int optname, | |
399 | char __user *optval, unsigned int optlen); | |
400 | extern void tcp_set_keepalive(struct sock *sk, int val); | |
401 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, | |
402 | struct msghdr *msg, | |
403 | size_t len, int nonblock, | |
404 | int flags, int *addr_len); | |
405 | ||
406 | extern void tcp_parse_options(struct sk_buff *skb, | |
407 | struct tcp_options_received *opt_rx, | |
408 | int estab, | |
409 | struct dst_entry *dst); | |
410 | ||
411 | extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); | |
412 | ||
413 | /* | |
414 | * TCP v4 functions exported for the inet6 API | |
415 | */ | |
416 | ||
417 | extern void tcp_v4_send_check(struct sock *sk, int len, | |
418 | struct sk_buff *skb); | |
419 | ||
420 | extern int tcp_v4_conn_request(struct sock *sk, | |
421 | struct sk_buff *skb); | |
422 | ||
423 | extern struct sock * tcp_create_openreq_child(struct sock *sk, | |
424 | struct request_sock *req, | |
425 | struct sk_buff *skb); | |
426 | ||
427 | extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, | |
428 | struct sk_buff *skb, | |
429 | struct request_sock *req, | |
430 | struct dst_entry *dst); | |
431 | ||
432 | extern int tcp_v4_do_rcv(struct sock *sk, | |
433 | struct sk_buff *skb); | |
434 | ||
435 | extern int tcp_v4_connect(struct sock *sk, | |
436 | struct sockaddr *uaddr, | |
437 | int addr_len); | |
438 | ||
439 | extern int tcp_connect(struct sock *sk); | |
440 | ||
441 | extern struct sk_buff * tcp_make_synack(struct sock *sk, | |
442 | struct dst_entry *dst, | |
443 | struct request_sock *req, | |
444 | struct request_values *rvp); | |
445 | ||
446 | extern int tcp_disconnect(struct sock *sk, int flags); | |
447 | ||
448 | ||
449 | /* From syncookies.c */ | |
450 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | |
451 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |
452 | struct ip_options *opt); | |
453 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |
454 | __u16 *mss); | |
455 | ||
456 | extern __u32 cookie_init_timestamp(struct request_sock *req); | |
457 | extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt); | |
458 | ||
459 | /* From net/ipv6/syncookies.c */ | |
460 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); | |
461 | extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, | |
462 | __u16 *mss); | |
463 | ||
464 | /* tcp_output.c */ | |
465 | ||
466 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | |
467 | int nonagle); | |
468 | extern int tcp_may_send_now(struct sock *sk); | |
469 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | |
470 | extern void tcp_retransmit_timer(struct sock *sk); | |
471 | extern void tcp_xmit_retransmit_queue(struct sock *); | |
472 | extern void tcp_simple_retransmit(struct sock *); | |
473 | extern int tcp_trim_head(struct sock *, struct sk_buff *, u32); | |
474 | extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int); | |
475 | ||
476 | extern void tcp_send_probe0(struct sock *); | |
477 | extern void tcp_send_partial(struct sock *); | |
478 | extern int tcp_write_wakeup(struct sock *); | |
479 | extern void tcp_send_fin(struct sock *sk); | |
480 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); | |
481 | extern int tcp_send_synack(struct sock *); | |
482 | extern void tcp_push_one(struct sock *, unsigned int mss_now); | |
483 | extern void tcp_send_ack(struct sock *sk); | |
484 | extern void tcp_send_delayed_ack(struct sock *sk); | |
485 | ||
486 | /* tcp_input.c */ | |
487 | extern void tcp_cwnd_application_limited(struct sock *sk); | |
488 | ||
489 | /* tcp_timer.c */ | |
490 | extern void tcp_init_xmit_timers(struct sock *); | |
491 | static inline void tcp_clear_xmit_timers(struct sock *sk) | |
492 | { | |
493 | inet_csk_clear_xmit_timers(sk); | |
494 | } | |
495 | ||
496 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); | |
497 | extern unsigned int tcp_current_mss(struct sock *sk); | |
498 | ||
499 | /* Bound MSS / TSO packet size with the half of the window */ | |
500 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | |
501 | { | |
502 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | |
503 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | |
504 | else | |
505 | return pktsize; | |
506 | } | |
507 | ||
508 | /* tcp.c */ | |
509 | extern void tcp_get_info(struct sock *, struct tcp_info *); | |
510 | ||
511 | /* Read 'sendfile()'-style from a TCP socket */ | |
512 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | |
513 | unsigned int, size_t); | |
514 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
515 | sk_read_actor_t recv_actor); | |
516 | ||
517 | extern void tcp_initialize_rcv_mss(struct sock *sk); | |
518 | ||
519 | extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); | |
520 | extern int tcp_mss_to_mtu(struct sock *sk, int mss); | |
521 | extern void tcp_mtup_init(struct sock *sk); | |
522 | ||
523 | static inline void tcp_bound_rto(const struct sock *sk) | |
524 | { | |
525 | if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) | |
526 | inet_csk(sk)->icsk_rto = TCP_RTO_MAX; | |
527 | } | |
528 | ||
529 | static inline u32 __tcp_set_rto(const struct tcp_sock *tp) | |
530 | { | |
531 | return (tp->srtt >> 3) + tp->rttvar; | |
532 | } | |
533 | ||
534 | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) | |
535 | { | |
536 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | |
537 | ntohl(TCP_FLAG_ACK) | | |
538 | snd_wnd); | |
539 | } | |
540 | ||
541 | static inline void tcp_fast_path_on(struct tcp_sock *tp) | |
542 | { | |
543 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | |
544 | } | |
545 | ||
546 | static inline void tcp_fast_path_check(struct sock *sk) | |
547 | { | |
548 | struct tcp_sock *tp = tcp_sk(sk); | |
549 | ||
550 | if (skb_queue_empty(&tp->out_of_order_queue) && | |
551 | tp->rcv_wnd && | |
552 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | |
553 | !tp->urg_data) | |
554 | tcp_fast_path_on(tp); | |
555 | } | |
556 | ||
557 | /* Compute the actual rto_min value */ | |
558 | static inline u32 tcp_rto_min(struct sock *sk) | |
559 | { | |
560 | struct dst_entry *dst = __sk_dst_get(sk); | |
561 | u32 rto_min = TCP_RTO_MIN; | |
562 | ||
563 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | |
564 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | |
565 | return rto_min; | |
566 | } | |
567 | ||
568 | /* Compute the actual receive window we are currently advertising. | |
569 | * Rcv_nxt can be after the window if our peer push more data | |
570 | * than the offered window. | |
571 | */ | |
572 | static inline u32 tcp_receive_window(const struct tcp_sock *tp) | |
573 | { | |
574 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | |
575 | ||
576 | if (win < 0) | |
577 | win = 0; | |
578 | return (u32) win; | |
579 | } | |
580 | ||
581 | /* Choose a new window, without checks for shrinking, and without | |
582 | * scaling applied to the result. The caller does these things | |
583 | * if necessary. This is a "raw" window selection. | |
584 | */ | |
585 | extern u32 __tcp_select_window(struct sock *sk); | |
586 | ||
587 | /* TCP timestamps are only 32-bits, this causes a slight | |
588 | * complication on 64-bit systems since we store a snapshot | |
589 | * of jiffies in the buffer control blocks below. We decided | |
590 | * to use only the low 32-bits of jiffies and hide the ugly | |
591 | * casts with the following macro. | |
592 | */ | |
593 | #define tcp_time_stamp ((__u32)(jiffies)) | |
594 | ||
595 | /* This is what the send packet queuing engine uses to pass | |
596 | * TCP per-packet control information to the transmission | |
597 | * code. We also store the host-order sequence numbers in | |
598 | * here too. This is 36 bytes on 32-bit architectures, | |
599 | * 40 bytes on 64-bit machines, if this grows please adjust | |
600 | * skbuff.h:skbuff->cb[xxx] size appropriately. | |
601 | */ | |
602 | struct tcp_skb_cb { | |
603 | union { | |
604 | struct inet_skb_parm h4; | |
605 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | |
606 | struct inet6_skb_parm h6; | |
607 | #endif | |
608 | } header; /* For incoming frames */ | |
609 | __u32 seq; /* Starting sequence number */ | |
610 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ | |
611 | __u32 when; /* used to compute rtt's */ | |
612 | __u8 flags; /* TCP header flags. */ | |
613 | ||
614 | /* NOTE: These must match up to the flags byte in a | |
615 | * real TCP header. | |
616 | */ | |
617 | #define TCPCB_FLAG_FIN 0x01 | |
618 | #define TCPCB_FLAG_SYN 0x02 | |
619 | #define TCPCB_FLAG_RST 0x04 | |
620 | #define TCPCB_FLAG_PSH 0x08 | |
621 | #define TCPCB_FLAG_ACK 0x10 | |
622 | #define TCPCB_FLAG_URG 0x20 | |
623 | #define TCPCB_FLAG_ECE 0x40 | |
624 | #define TCPCB_FLAG_CWR 0x80 | |
625 | ||
626 | __u8 sacked; /* State flags for SACK/FACK. */ | |
627 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ | |
628 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | |
629 | #define TCPCB_LOST 0x04 /* SKB is lost */ | |
630 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | |
631 | ||
632 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | |
633 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | |
634 | ||
635 | __u32 ack_seq; /* Sequence number ACK'd */ | |
636 | }; | |
637 | ||
638 | #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) | |
639 | ||
640 | /* Due to TSO, an SKB can be composed of multiple actual | |
641 | * packets. To keep these tracked properly, we use this. | |
642 | */ | |
643 | static inline int tcp_skb_pcount(const struct sk_buff *skb) | |
644 | { | |
645 | return skb_shinfo(skb)->gso_segs; | |
646 | } | |
647 | ||
648 | /* This is valid iff tcp_skb_pcount() > 1. */ | |
649 | static inline int tcp_skb_mss(const struct sk_buff *skb) | |
650 | { | |
651 | return skb_shinfo(skb)->gso_size; | |
652 | } | |
653 | ||
654 | /* Events passed to congestion control interface */ | |
655 | enum tcp_ca_event { | |
656 | CA_EVENT_TX_START, /* first transmit when no packets in flight */ | |
657 | CA_EVENT_CWND_RESTART, /* congestion window restart */ | |
658 | CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ | |
659 | CA_EVENT_FRTO, /* fast recovery timeout */ | |
660 | CA_EVENT_LOSS, /* loss timeout */ | |
661 | CA_EVENT_FAST_ACK, /* in sequence ack */ | |
662 | CA_EVENT_SLOW_ACK, /* other ack */ | |
663 | }; | |
664 | ||
665 | /* | |
666 | * Interface for adding new TCP congestion control handlers | |
667 | */ | |
668 | #define TCP_CA_NAME_MAX 16 | |
669 | #define TCP_CA_MAX 128 | |
670 | #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) | |
671 | ||
672 | #define TCP_CONG_NON_RESTRICTED 0x1 | |
673 | #define TCP_CONG_RTT_STAMP 0x2 | |
674 | ||
675 | struct tcp_congestion_ops { | |
676 | struct list_head list; | |
677 | unsigned long flags; | |
678 | ||
679 | /* initialize private data (optional) */ | |
680 | void (*init)(struct sock *sk); | |
681 | /* cleanup private data (optional) */ | |
682 | void (*release)(struct sock *sk); | |
683 | ||
684 | /* return slow start threshold (required) */ | |
685 | u32 (*ssthresh)(struct sock *sk); | |
686 | /* lower bound for congestion window (optional) */ | |
687 | u32 (*min_cwnd)(const struct sock *sk); | |
688 | /* do new cwnd calculation (required) */ | |
689 | void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); | |
690 | /* call before changing ca_state (optional) */ | |
691 | void (*set_state)(struct sock *sk, u8 new_state); | |
692 | /* call when cwnd event occurs (optional) */ | |
693 | void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); | |
694 | /* new value of cwnd after loss (optional) */ | |
695 | u32 (*undo_cwnd)(struct sock *sk); | |
696 | /* hook for packet ack accounting (optional) */ | |
697 | void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); | |
698 | /* get info for inet_diag (optional) */ | |
699 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); | |
700 | ||
701 | char name[TCP_CA_NAME_MAX]; | |
702 | struct module *owner; | |
703 | }; | |
704 | ||
705 | extern int tcp_register_congestion_control(struct tcp_congestion_ops *type); | |
706 | extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); | |
707 | ||
708 | extern void tcp_init_congestion_control(struct sock *sk); | |
709 | extern void tcp_cleanup_congestion_control(struct sock *sk); | |
710 | extern int tcp_set_default_congestion_control(const char *name); | |
711 | extern void tcp_get_default_congestion_control(char *name); | |
712 | extern void tcp_get_available_congestion_control(char *buf, size_t len); | |
713 | extern void tcp_get_allowed_congestion_control(char *buf, size_t len); | |
714 | extern int tcp_set_allowed_congestion_control(char *allowed); | |
715 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); | |
716 | extern void tcp_slow_start(struct tcp_sock *tp); | |
717 | extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); | |
718 | ||
719 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | |
720 | extern u32 tcp_reno_ssthresh(struct sock *sk); | |
721 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); | |
722 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); | |
723 | extern struct tcp_congestion_ops tcp_reno; | |
724 | ||
725 | static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) | |
726 | { | |
727 | struct inet_connection_sock *icsk = inet_csk(sk); | |
728 | ||
729 | if (icsk->icsk_ca_ops->set_state) | |
730 | icsk->icsk_ca_ops->set_state(sk, ca_state); | |
731 | icsk->icsk_ca_state = ca_state; | |
732 | } | |
733 | ||
734 | static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) | |
735 | { | |
736 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
737 | ||
738 | if (icsk->icsk_ca_ops->cwnd_event) | |
739 | icsk->icsk_ca_ops->cwnd_event(sk, event); | |
740 | } | |
741 | ||
742 | /* These functions determine how the current flow behaves in respect of SACK | |
743 | * handling. SACK is negotiated with the peer, and therefore it can vary | |
744 | * between different flows. | |
745 | * | |
746 | * tcp_is_sack - SACK enabled | |
747 | * tcp_is_reno - No SACK | |
748 | * tcp_is_fack - FACK enabled, implies SACK enabled | |
749 | */ | |
750 | static inline int tcp_is_sack(const struct tcp_sock *tp) | |
751 | { | |
752 | return tp->rx_opt.sack_ok; | |
753 | } | |
754 | ||
755 | static inline int tcp_is_reno(const struct tcp_sock *tp) | |
756 | { | |
757 | return !tcp_is_sack(tp); | |
758 | } | |
759 | ||
760 | static inline int tcp_is_fack(const struct tcp_sock *tp) | |
761 | { | |
762 | return tp->rx_opt.sack_ok & 2; | |
763 | } | |
764 | ||
765 | static inline void tcp_enable_fack(struct tcp_sock *tp) | |
766 | { | |
767 | tp->rx_opt.sack_ok |= 2; | |
768 | } | |
769 | ||
770 | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) | |
771 | { | |
772 | return tp->sacked_out + tp->lost_out; | |
773 | } | |
774 | ||
775 | /* This determines how many packets are "in the network" to the best | |
776 | * of our knowledge. In many cases it is conservative, but where | |
777 | * detailed information is available from the receiver (via SACK | |
778 | * blocks etc.) we can make more aggressive calculations. | |
779 | * | |
780 | * Use this for decisions involving congestion control, use just | |
781 | * tp->packets_out to determine if the send queue is empty or not. | |
782 | * | |
783 | * Read this equation as: | |
784 | * | |
785 | * "Packets sent once on transmission queue" MINUS | |
786 | * "Packets left network, but not honestly ACKed yet" PLUS | |
787 | * "Packets fast retransmitted" | |
788 | */ | |
789 | static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) | |
790 | { | |
791 | return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; | |
792 | } | |
793 | ||
794 | #define TCP_INFINITE_SSTHRESH 0x7fffffff | |
795 | ||
796 | static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) | |
797 | { | |
798 | return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; | |
799 | } | |
800 | ||
801 | /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. | |
802 | * The exception is rate halving phase, when cwnd is decreasing towards | |
803 | * ssthresh. | |
804 | */ | |
805 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |
806 | { | |
807 | const struct tcp_sock *tp = tcp_sk(sk); | |
808 | if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) | |
809 | return tp->snd_ssthresh; | |
810 | else | |
811 | return max(tp->snd_ssthresh, | |
812 | ((tp->snd_cwnd >> 1) + | |
813 | (tp->snd_cwnd >> 2))); | |
814 | } | |
815 | ||
816 | /* Use define here intentionally to get WARN_ON location shown at the caller */ | |
817 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) | |
818 | ||
819 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); | |
820 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | |
821 | ||
822 | /* Slow start with delack produces 3 packets of burst, so that | |
823 | * it is safe "de facto". This will be the default - same as | |
824 | * the default reordering threshold - but if reordering increases, | |
825 | * we must be able to allow cwnd to burst at least this much in order | |
826 | * to not pull it back when holes are filled. | |
827 | */ | |
828 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |
829 | { | |
830 | return tp->reordering; | |
831 | } | |
832 | ||
833 | /* Returns end sequence number of the receiver's advertised window */ | |
834 | static inline u32 tcp_wnd_end(const struct tcp_sock *tp) | |
835 | { | |
836 | return tp->snd_una + tp->snd_wnd; | |
837 | } | |
838 | extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); | |
839 | ||
840 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, | |
841 | const struct sk_buff *skb) | |
842 | { | |
843 | if (skb->len < mss) | |
844 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | |
845 | } | |
846 | ||
847 | static inline void tcp_check_probe_timer(struct sock *sk) | |
848 | { | |
849 | struct tcp_sock *tp = tcp_sk(sk); | |
850 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
851 | ||
852 | if (!tp->packets_out && !icsk->icsk_pending) | |
853 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | |
854 | icsk->icsk_rto, TCP_RTO_MAX); | |
855 | } | |
856 | ||
857 | static inline void tcp_push_pending_frames(struct sock *sk) | |
858 | { | |
859 | struct tcp_sock *tp = tcp_sk(sk); | |
860 | ||
861 | __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); | |
862 | } | |
863 | ||
864 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) | |
865 | { | |
866 | tp->snd_wl1 = seq; | |
867 | } | |
868 | ||
869 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) | |
870 | { | |
871 | tp->snd_wl1 = seq; | |
872 | } | |
873 | ||
874 | /* | |
875 | * Calculate(/check) TCP checksum | |
876 | */ | |
877 | static inline __sum16 tcp_v4_check(int len, __be32 saddr, | |
878 | __be32 daddr, __wsum base) | |
879 | { | |
880 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | |
881 | } | |
882 | ||
883 | static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) | |
884 | { | |
885 | return __skb_checksum_complete(skb); | |
886 | } | |
887 | ||
888 | static inline int tcp_checksum_complete(struct sk_buff *skb) | |
889 | { | |
890 | return !skb_csum_unnecessary(skb) && | |
891 | __tcp_checksum_complete(skb); | |
892 | } | |
893 | ||
894 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | |
895 | ||
896 | static inline void tcp_prequeue_init(struct tcp_sock *tp) | |
897 | { | |
898 | tp->ucopy.task = NULL; | |
899 | tp->ucopy.len = 0; | |
900 | tp->ucopy.memory = 0; | |
901 | skb_queue_head_init(&tp->ucopy.prequeue); | |
902 | #ifdef CONFIG_NET_DMA | |
903 | tp->ucopy.dma_chan = NULL; | |
904 | tp->ucopy.wakeup = 0; | |
905 | tp->ucopy.pinned_list = NULL; | |
906 | tp->ucopy.dma_cookie = 0; | |
907 | #endif | |
908 | } | |
909 | ||
910 | /* Packet is added to VJ-style prequeue for processing in process | |
911 | * context, if a reader task is waiting. Apparently, this exciting | |
912 | * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) | |
913 | * failed somewhere. Latency? Burstiness? Well, at least now we will | |
914 | * see, why it failed. 8)8) --ANK | |
915 | * | |
916 | * NOTE: is this not too big to inline? | |
917 | */ | |
918 | static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |
919 | { | |
920 | struct tcp_sock *tp = tcp_sk(sk); | |
921 | ||
922 | if (sysctl_tcp_low_latency || !tp->ucopy.task) | |
923 | return 0; | |
924 | ||
925 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | |
926 | tp->ucopy.memory += skb->truesize; | |
927 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | |
928 | struct sk_buff *skb1; | |
929 | ||
930 | BUG_ON(sock_owned_by_user(sk)); | |
931 | ||
932 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | |
933 | sk_backlog_rcv(sk, skb1); | |
934 | NET_INC_STATS_BH(sock_net(sk), | |
935 | LINUX_MIB_TCPPREQUEUEDROPPED); | |
936 | } | |
937 | ||
938 | tp->ucopy.memory = 0; | |
939 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | |
940 | wake_up_interruptible_poll(sk->sk_sleep, | |
941 | POLLIN | POLLRDNORM | POLLRDBAND); | |
942 | if (!inet_csk_ack_scheduled(sk)) | |
943 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | |
944 | (3 * tcp_rto_min(sk)) / 4, | |
945 | TCP_RTO_MAX); | |
946 | } | |
947 | return 1; | |
948 | } | |
949 | ||
950 | ||
951 | #undef STATE_TRACE | |
952 | ||
953 | #ifdef STATE_TRACE | |
954 | static const char *statename[]={ | |
955 | "Unused","Established","Syn Sent","Syn Recv", | |
956 | "Fin Wait 1","Fin Wait 2","Time Wait", "Close", | |
957 | "Close Wait","Last ACK","Listen","Closing" | |
958 | }; | |
959 | #endif | |
960 | extern void tcp_set_state(struct sock *sk, int state); | |
961 | ||
962 | extern void tcp_done(struct sock *sk); | |
963 | ||
964 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) | |
965 | { | |
966 | rx_opt->dsack = 0; | |
967 | rx_opt->num_sacks = 0; | |
968 | } | |
969 | ||
970 | /* Determine a window scaling and initial window to offer. */ | |
971 | extern void tcp_select_initial_window(int __space, __u32 mss, | |
972 | __u32 *rcv_wnd, __u32 *window_clamp, | |
973 | int wscale_ok, __u8 *rcv_wscale); | |
974 | ||
975 | static inline int tcp_win_from_space(int space) | |
976 | { | |
977 | return sysctl_tcp_adv_win_scale<=0 ? | |
978 | (space>>(-sysctl_tcp_adv_win_scale)) : | |
979 | space - (space>>sysctl_tcp_adv_win_scale); | |
980 | } | |
981 | ||
982 | /* Note: caller must be prepared to deal with negative returns */ | |
983 | static inline int tcp_space(const struct sock *sk) | |
984 | { | |
985 | return tcp_win_from_space(sk->sk_rcvbuf - | |
986 | atomic_read(&sk->sk_rmem_alloc)); | |
987 | } | |
988 | ||
989 | static inline int tcp_full_space(const struct sock *sk) | |
990 | { | |
991 | return tcp_win_from_space(sk->sk_rcvbuf); | |
992 | } | |
993 | ||
994 | static inline void tcp_openreq_init(struct request_sock *req, | |
995 | struct tcp_options_received *rx_opt, | |
996 | struct sk_buff *skb) | |
997 | { | |
998 | struct inet_request_sock *ireq = inet_rsk(req); | |
999 | ||
1000 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ | |
1001 | req->cookie_ts = 0; | |
1002 | tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; | |
1003 | req->mss = rx_opt->mss_clamp; | |
1004 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | |
1005 | ireq->tstamp_ok = rx_opt->tstamp_ok; | |
1006 | ireq->sack_ok = rx_opt->sack_ok; | |
1007 | ireq->snd_wscale = rx_opt->snd_wscale; | |
1008 | ireq->wscale_ok = rx_opt->wscale_ok; | |
1009 | ireq->acked = 0; | |
1010 | ireq->ecn_ok = 0; | |
1011 | ireq->rmt_port = tcp_hdr(skb)->source; | |
1012 | ireq->loc_port = tcp_hdr(skb)->dest; | |
1013 | } | |
1014 | ||
1015 | extern void tcp_enter_memory_pressure(struct sock *sk); | |
1016 | ||
1017 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) | |
1018 | { | |
1019 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | |
1020 | } | |
1021 | ||
1022 | static inline int keepalive_time_when(const struct tcp_sock *tp) | |
1023 | { | |
1024 | return tp->keepalive_time ? : sysctl_tcp_keepalive_time; | |
1025 | } | |
1026 | ||
1027 | static inline int keepalive_probes(const struct tcp_sock *tp) | |
1028 | { | |
1029 | return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | |
1030 | } | |
1031 | ||
1032 | static inline int tcp_fin_time(const struct sock *sk) | |
1033 | { | |
1034 | int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; | |
1035 | const int rto = inet_csk(sk)->icsk_rto; | |
1036 | ||
1037 | if (fin_timeout < (rto << 2) - (rto >> 1)) | |
1038 | fin_timeout = (rto << 2) - (rto >> 1); | |
1039 | ||
1040 | return fin_timeout; | |
1041 | } | |
1042 | ||
1043 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, | |
1044 | int paws_win) | |
1045 | { | |
1046 | if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) | |
1047 | return 1; | |
1048 | if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) | |
1049 | return 1; | |
1050 | ||
1051 | return 0; | |
1052 | } | |
1053 | ||
1054 | static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, | |
1055 | int rst) | |
1056 | { | |
1057 | if (tcp_paws_check(rx_opt, 0)) | |
1058 | return 0; | |
1059 | ||
1060 | /* RST segments are not recommended to carry timestamp, | |
1061 | and, if they do, it is recommended to ignore PAWS because | |
1062 | "their cleanup function should take precedence over timestamps." | |
1063 | Certainly, it is mistake. It is necessary to understand the reasons | |
1064 | of this constraint to relax it: if peer reboots, clock may go | |
1065 | out-of-sync and half-open connections will not be reset. | |
1066 | Actually, the problem would be not existing if all | |
1067 | the implementations followed draft about maintaining clock | |
1068 | via reboots. Linux-2.2 DOES NOT! | |
1069 | ||
1070 | However, we can relax time bounds for RST segments to MSL. | |
1071 | */ | |
1072 | if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) | |
1073 | return 0; | |
1074 | return 1; | |
1075 | } | |
1076 | ||
1077 | #define TCP_CHECK_TIMER(sk) do { } while (0) | |
1078 | ||
1079 | static inline void tcp_mib_init(struct net *net) | |
1080 | { | |
1081 | /* See RFC 2012 */ | |
1082 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); | |
1083 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); | |
1084 | TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); | |
1085 | TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); | |
1086 | } | |
1087 | ||
1088 | /* from STCP */ | |
1089 | static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) | |
1090 | { | |
1091 | tp->lost_skb_hint = NULL; | |
1092 | tp->scoreboard_skb_hint = NULL; | |
1093 | } | |
1094 | ||
1095 | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) | |
1096 | { | |
1097 | tcp_clear_retrans_hints_partial(tp); | |
1098 | tp->retransmit_skb_hint = NULL; | |
1099 | } | |
1100 | ||
1101 | /* MD5 Signature */ | |
1102 | struct crypto_hash; | |
1103 | ||
1104 | /* - key database */ | |
1105 | struct tcp_md5sig_key { | |
1106 | u8 *key; | |
1107 | u8 keylen; | |
1108 | }; | |
1109 | ||
1110 | struct tcp4_md5sig_key { | |
1111 | struct tcp_md5sig_key base; | |
1112 | __be32 addr; | |
1113 | }; | |
1114 | ||
1115 | struct tcp6_md5sig_key { | |
1116 | struct tcp_md5sig_key base; | |
1117 | #if 0 | |
1118 | u32 scope_id; /* XXX */ | |
1119 | #endif | |
1120 | struct in6_addr addr; | |
1121 | }; | |
1122 | ||
1123 | /* - sock block */ | |
1124 | struct tcp_md5sig_info { | |
1125 | struct tcp4_md5sig_key *keys4; | |
1126 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1127 | struct tcp6_md5sig_key *keys6; | |
1128 | u32 entries6; | |
1129 | u32 alloced6; | |
1130 | #endif | |
1131 | u32 entries4; | |
1132 | u32 alloced4; | |
1133 | }; | |
1134 | ||
1135 | /* - pseudo header */ | |
1136 | struct tcp4_pseudohdr { | |
1137 | __be32 saddr; | |
1138 | __be32 daddr; | |
1139 | __u8 pad; | |
1140 | __u8 protocol; | |
1141 | __be16 len; | |
1142 | }; | |
1143 | ||
1144 | struct tcp6_pseudohdr { | |
1145 | struct in6_addr saddr; | |
1146 | struct in6_addr daddr; | |
1147 | __be32 len; | |
1148 | __be32 protocol; /* including padding */ | |
1149 | }; | |
1150 | ||
1151 | union tcp_md5sum_block { | |
1152 | struct tcp4_pseudohdr ip4; | |
1153 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1154 | struct tcp6_pseudohdr ip6; | |
1155 | #endif | |
1156 | }; | |
1157 | ||
1158 | /* - pool: digest algorithm, hash description and scratch buffer */ | |
1159 | struct tcp_md5sig_pool { | |
1160 | struct hash_desc md5_desc; | |
1161 | union tcp_md5sum_block md5_blk; | |
1162 | }; | |
1163 | ||
1164 | #define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */ | |
1165 | ||
1166 | /* - functions */ | |
1167 | extern int tcp_v4_md5_hash_skb(char *md5_hash, | |
1168 | struct tcp_md5sig_key *key, | |
1169 | struct sock *sk, | |
1170 | struct request_sock *req, | |
1171 | struct sk_buff *skb); | |
1172 | ||
1173 | extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | |
1174 | struct sock *addr_sk); | |
1175 | ||
1176 | extern int tcp_v4_md5_do_add(struct sock *sk, | |
1177 | __be32 addr, | |
1178 | u8 *newkey, | |
1179 | u8 newkeylen); | |
1180 | ||
1181 | extern int tcp_v4_md5_do_del(struct sock *sk, | |
1182 | __be32 addr); | |
1183 | ||
1184 | #ifdef CONFIG_TCP_MD5SIG | |
1185 | #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \ | |
1186 | &(struct tcp_md5sig_key) { \ | |
1187 | .key = (twsk)->tw_md5_key, \ | |
1188 | .keylen = (twsk)->tw_md5_keylen, \ | |
1189 | } : NULL) | |
1190 | #else | |
1191 | #define tcp_twsk_md5_key(twsk) NULL | |
1192 | #endif | |
1193 | ||
1194 | extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *); | |
1195 | extern void tcp_free_md5sig_pool(void); | |
1196 | ||
1197 | extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); | |
1198 | extern void __tcp_put_md5sig_pool(void); | |
1199 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | |
1200 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | |
1201 | unsigned header_len); | |
1202 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | |
1203 | struct tcp_md5sig_key *key); | |
1204 | ||
1205 | static inline | |
1206 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |
1207 | { | |
1208 | int cpu = get_cpu(); | |
1209 | struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); | |
1210 | if (!ret) | |
1211 | put_cpu(); | |
1212 | return ret; | |
1213 | } | |
1214 | ||
1215 | static inline void tcp_put_md5sig_pool(void) | |
1216 | { | |
1217 | __tcp_put_md5sig_pool(); | |
1218 | put_cpu(); | |
1219 | } | |
1220 | ||
1221 | /* write queue abstraction */ | |
1222 | static inline void tcp_write_queue_purge(struct sock *sk) | |
1223 | { | |
1224 | struct sk_buff *skb; | |
1225 | ||
1226 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | |
1227 | sk_wmem_free_skb(sk, skb); | |
1228 | sk_mem_reclaim(sk); | |
1229 | } | |
1230 | ||
1231 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | |
1232 | { | |
1233 | return skb_peek(&sk->sk_write_queue); | |
1234 | } | |
1235 | ||
1236 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | |
1237 | { | |
1238 | return skb_peek_tail(&sk->sk_write_queue); | |
1239 | } | |
1240 | ||
1241 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | |
1242 | { | |
1243 | return skb_queue_next(&sk->sk_write_queue, skb); | |
1244 | } | |
1245 | ||
1246 | static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) | |
1247 | { | |
1248 | return skb_queue_prev(&sk->sk_write_queue, skb); | |
1249 | } | |
1250 | ||
1251 | #define tcp_for_write_queue(skb, sk) \ | |
1252 | skb_queue_walk(&(sk)->sk_write_queue, skb) | |
1253 | ||
1254 | #define tcp_for_write_queue_from(skb, sk) \ | |
1255 | skb_queue_walk_from(&(sk)->sk_write_queue, skb) | |
1256 | ||
1257 | #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ | |
1258 | skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) | |
1259 | ||
1260 | /* This function calculates a "timeout" which is equivalent to the timeout of a | |
1261 | * TCP connection after "boundary" unsucessful, exponentially backed-off | |
1262 | * retransmissions with an initial RTO of TCP_RTO_MIN. | |
1263 | */ | |
1264 | static inline bool retransmits_timed_out(const struct sock *sk, | |
1265 | unsigned int boundary) | |
1266 | { | |
1267 | unsigned int timeout, linear_backoff_thresh; | |
1268 | ||
1269 | if (!inet_csk(sk)->icsk_retransmits) | |
1270 | return false; | |
1271 | ||
1272 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); | |
1273 | ||
1274 | if (boundary <= linear_backoff_thresh) | |
1275 | timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; | |
1276 | else | |
1277 | timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + | |
1278 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | |
1279 | ||
1280 | return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout; | |
1281 | } | |
1282 | ||
1283 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | |
1284 | { | |
1285 | return sk->sk_send_head; | |
1286 | } | |
1287 | ||
1288 | static inline bool tcp_skb_is_last(const struct sock *sk, | |
1289 | const struct sk_buff *skb) | |
1290 | { | |
1291 | return skb_queue_is_last(&sk->sk_write_queue, skb); | |
1292 | } | |
1293 | ||
1294 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | |
1295 | { | |
1296 | if (tcp_skb_is_last(sk, skb)) | |
1297 | sk->sk_send_head = NULL; | |
1298 | else | |
1299 | sk->sk_send_head = tcp_write_queue_next(sk, skb); | |
1300 | } | |
1301 | ||
1302 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | |
1303 | { | |
1304 | if (sk->sk_send_head == skb_unlinked) | |
1305 | sk->sk_send_head = NULL; | |
1306 | } | |
1307 | ||
1308 | static inline void tcp_init_send_head(struct sock *sk) | |
1309 | { | |
1310 | sk->sk_send_head = NULL; | |
1311 | } | |
1312 | ||
1313 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1314 | { | |
1315 | __skb_queue_tail(&sk->sk_write_queue, skb); | |
1316 | } | |
1317 | ||
1318 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | |
1319 | { | |
1320 | __tcp_add_write_queue_tail(sk, skb); | |
1321 | ||
1322 | /* Queue it, remembering where we must start sending. */ | |
1323 | if (sk->sk_send_head == NULL) { | |
1324 | sk->sk_send_head = skb; | |
1325 | ||
1326 | if (tcp_sk(sk)->highest_sack == NULL) | |
1327 | tcp_sk(sk)->highest_sack = skb; | |
1328 | } | |
1329 | } | |
1330 | ||
1331 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | |
1332 | { | |
1333 | __skb_queue_head(&sk->sk_write_queue, skb); | |
1334 | } | |
1335 | ||
1336 | /* Insert buff after skb on the write queue of sk. */ | |
1337 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | |
1338 | struct sk_buff *buff, | |
1339 | struct sock *sk) | |
1340 | { | |
1341 | __skb_queue_after(&sk->sk_write_queue, skb, buff); | |
1342 | } | |
1343 | ||
1344 | /* Insert new before skb on the write queue of sk. */ | |
1345 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, | |
1346 | struct sk_buff *skb, | |
1347 | struct sock *sk) | |
1348 | { | |
1349 | __skb_queue_before(&sk->sk_write_queue, skb, new); | |
1350 | ||
1351 | if (sk->sk_send_head == skb) | |
1352 | sk->sk_send_head = new; | |
1353 | } | |
1354 | ||
1355 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) | |
1356 | { | |
1357 | __skb_unlink(skb, &sk->sk_write_queue); | |
1358 | } | |
1359 | ||
1360 | static inline int tcp_write_queue_empty(struct sock *sk) | |
1361 | { | |
1362 | return skb_queue_empty(&sk->sk_write_queue); | |
1363 | } | |
1364 | ||
1365 | /* Start sequence of the highest skb with SACKed bit, valid only if | |
1366 | * sacked > 0 or when the caller has ensured validity by itself. | |
1367 | */ | |
1368 | static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) | |
1369 | { | |
1370 | if (!tp->sacked_out) | |
1371 | return tp->snd_una; | |
1372 | ||
1373 | if (tp->highest_sack == NULL) | |
1374 | return tp->snd_nxt; | |
1375 | ||
1376 | return TCP_SKB_CB(tp->highest_sack)->seq; | |
1377 | } | |
1378 | ||
1379 | static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) | |
1380 | { | |
1381 | tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL : | |
1382 | tcp_write_queue_next(sk, skb); | |
1383 | } | |
1384 | ||
1385 | static inline struct sk_buff *tcp_highest_sack(struct sock *sk) | |
1386 | { | |
1387 | return tcp_sk(sk)->highest_sack; | |
1388 | } | |
1389 | ||
1390 | static inline void tcp_highest_sack_reset(struct sock *sk) | |
1391 | { | |
1392 | tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); | |
1393 | } | |
1394 | ||
1395 | /* Called when old skb is about to be deleted (to be combined with new skb) */ | |
1396 | static inline void tcp_highest_sack_combine(struct sock *sk, | |
1397 | struct sk_buff *old, | |
1398 | struct sk_buff *new) | |
1399 | { | |
1400 | if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) | |
1401 | tcp_sk(sk)->highest_sack = new; | |
1402 | } | |
1403 | ||
1404 | /* /proc */ | |
1405 | enum tcp_seq_states { | |
1406 | TCP_SEQ_STATE_LISTENING, | |
1407 | TCP_SEQ_STATE_OPENREQ, | |
1408 | TCP_SEQ_STATE_ESTABLISHED, | |
1409 | TCP_SEQ_STATE_TIME_WAIT, | |
1410 | }; | |
1411 | ||
1412 | struct tcp_seq_afinfo { | |
1413 | char *name; | |
1414 | sa_family_t family; | |
1415 | struct file_operations seq_fops; | |
1416 | struct seq_operations seq_ops; | |
1417 | }; | |
1418 | ||
1419 | struct tcp_iter_state { | |
1420 | struct seq_net_private p; | |
1421 | sa_family_t family; | |
1422 | enum tcp_seq_states state; | |
1423 | struct sock *syn_wait_sk; | |
1424 | int bucket, sbucket, num, uid; | |
1425 | }; | |
1426 | ||
1427 | extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); | |
1428 | extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo); | |
1429 | ||
1430 | extern struct request_sock_ops tcp_request_sock_ops; | |
1431 | extern struct request_sock_ops tcp6_request_sock_ops; | |
1432 | ||
1433 | extern void tcp_v4_destroy_sock(struct sock *sk); | |
1434 | ||
1435 | extern int tcp_v4_gso_send_check(struct sk_buff *skb); | |
1436 | extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); | |
1437 | extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, | |
1438 | struct sk_buff *skb); | |
1439 | extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, | |
1440 | struct sk_buff *skb); | |
1441 | extern int tcp_gro_complete(struct sk_buff *skb); | |
1442 | extern int tcp4_gro_complete(struct sk_buff *skb); | |
1443 | ||
1444 | #ifdef CONFIG_PROC_FS | |
1445 | extern int tcp4_proc_init(void); | |
1446 | extern void tcp4_proc_exit(void); | |
1447 | #endif | |
1448 | ||
1449 | /* TCP af-specific functions */ | |
1450 | struct tcp_sock_af_ops { | |
1451 | #ifdef CONFIG_TCP_MD5SIG | |
1452 | struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, | |
1453 | struct sock *addr_sk); | |
1454 | int (*calc_md5_hash) (char *location, | |
1455 | struct tcp_md5sig_key *md5, | |
1456 | struct sock *sk, | |
1457 | struct request_sock *req, | |
1458 | struct sk_buff *skb); | |
1459 | int (*md5_add) (struct sock *sk, | |
1460 | struct sock *addr_sk, | |
1461 | u8 *newkey, | |
1462 | u8 len); | |
1463 | int (*md5_parse) (struct sock *sk, | |
1464 | char __user *optval, | |
1465 | int optlen); | |
1466 | #endif | |
1467 | }; | |
1468 | ||
1469 | struct tcp_request_sock_ops { | |
1470 | #ifdef CONFIG_TCP_MD5SIG | |
1471 | struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, | |
1472 | struct request_sock *req); | |
1473 | int (*calc_md5_hash) (char *location, | |
1474 | struct tcp_md5sig_key *md5, | |
1475 | struct sock *sk, | |
1476 | struct request_sock *req, | |
1477 | struct sk_buff *skb); | |
1478 | #endif | |
1479 | }; | |
1480 | ||
1481 | /* Using SHA1 for now, define some constants. | |
1482 | */ | |
1483 | #define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS) | |
1484 | #define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4) | |
1485 | #define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS) | |
1486 | ||
1487 | extern int tcp_cookie_generator(u32 *bakery); | |
1488 | ||
1489 | extern void tcp_v4_init(void); | |
1490 | extern void tcp_init(void); | |
1491 | ||
1492 | #endif /* _TCP_H */ |