]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
8 | * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $ | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
13 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
14 | * Florian La Roche, <flla@stud.uni-sb.de> | |
15 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
16 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
17 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
18 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
19 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
20 | * Jorge Cwik, <jorge@laser.satlink.net> | |
21 | */ | |
22 | ||
23 | #include <linux/config.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/sysctl.h> | |
27 | #include <linux/workqueue.h> | |
28 | #include <net/tcp.h> | |
29 | #include <net/inet_common.h> | |
30 | #include <net/xfrm.h> | |
31 | ||
32 | #ifdef CONFIG_SYSCTL | |
33 | #define SYNC_INIT 0 /* let the user enable it */ | |
34 | #else | |
35 | #define SYNC_INIT 1 | |
36 | #endif | |
37 | ||
38 | int sysctl_tcp_tw_recycle; | |
39 | int sysctl_tcp_max_tw_buckets = NR_FILE*2; | |
40 | ||
41 | int sysctl_tcp_syncookies = SYNC_INIT; | |
42 | int sysctl_tcp_abort_on_overflow; | |
43 | ||
44 | static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo); | |
45 | ||
46 | static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) | |
47 | { | |
48 | if (seq == s_win) | |
49 | return 1; | |
50 | if (after(end_seq, s_win) && before(seq, e_win)) | |
51 | return 1; | |
52 | return (seq == e_win && seq == end_seq); | |
53 | } | |
54 | ||
55 | /* New-style handling of TIME_WAIT sockets. */ | |
56 | ||
57 | int tcp_tw_count; | |
58 | ||
59 | ||
60 | /* Must be called with locally disabled BHs. */ | |
61 | static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | |
62 | { | |
63 | struct tcp_ehash_bucket *ehead; | |
64 | struct tcp_bind_hashbucket *bhead; | |
65 | struct tcp_bind_bucket *tb; | |
66 | ||
67 | /* Unlink from established hashes. */ | |
68 | ehead = &tcp_ehash[tw->tw_hashent]; | |
69 | write_lock(&ehead->lock); | |
70 | if (hlist_unhashed(&tw->tw_node)) { | |
71 | write_unlock(&ehead->lock); | |
72 | return; | |
73 | } | |
74 | __hlist_del(&tw->tw_node); | |
75 | sk_node_init(&tw->tw_node); | |
76 | write_unlock(&ehead->lock); | |
77 | ||
78 | /* Disassociate with bind bucket. */ | |
79 | bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)]; | |
80 | spin_lock(&bhead->lock); | |
81 | tb = tw->tw_tb; | |
82 | __hlist_del(&tw->tw_bind_node); | |
83 | tw->tw_tb = NULL; | |
84 | tcp_bucket_destroy(tb); | |
85 | spin_unlock(&bhead->lock); | |
86 | ||
e6848976 | 87 | #ifdef SOCK_REFCNT_DEBUG |
1da177e4 LT |
88 | if (atomic_read(&tw->tw_refcnt) != 1) { |
89 | printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, | |
90 | atomic_read(&tw->tw_refcnt)); | |
91 | } | |
92 | #endif | |
93 | tcp_tw_put(tw); | |
94 | } | |
95 | ||
96 | /* | |
97 | * * Main purpose of TIME-WAIT state is to close connection gracefully, | |
98 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN | |
99 | * (and, probably, tail of data) and one or more our ACKs are lost. | |
100 | * * What is TIME-WAIT timeout? It is associated with maximal packet | |
101 | * lifetime in the internet, which results in wrong conclusion, that | |
102 | * it is set to catch "old duplicate segments" wandering out of their path. | |
103 | * It is not quite correct. This timeout is calculated so that it exceeds | |
104 | * maximal retransmission timeout enough to allow to lose one (or more) | |
105 | * segments sent by peer and our ACKs. This time may be calculated from RTO. | |
106 | * * When TIME-WAIT socket receives RST, it means that another end | |
107 | * finally closed and we are allowed to kill TIME-WAIT too. | |
108 | * * Second purpose of TIME-WAIT is catching old duplicate segments. | |
109 | * Well, certainly it is pure paranoia, but if we load TIME-WAIT | |
110 | * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. | |
111 | * * If we invented some more clever way to catch duplicates | |
112 | * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. | |
113 | * | |
114 | * The algorithm below is based on FORMAL INTERPRETATION of RFCs. | |
115 | * When you compare it to RFCs, please, read section SEGMENT ARRIVES | |
116 | * from the very beginning. | |
117 | * | |
118 | * NOTE. With recycling (and later with fin-wait-2) TW bucket | |
119 | * is _not_ stateless. It means, that strictly speaking we must | |
120 | * spinlock it. I do not want! Well, probability of misbehaviour | |
121 | * is ridiculously low and, seems, we could use some mb() tricks | |
122 | * to avoid misread sequence numbers, states etc. --ANK | |
123 | */ | |
124 | enum tcp_tw_status | |
125 | tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, | |
126 | struct tcphdr *th, unsigned len) | |
127 | { | |
128 | struct tcp_options_received tmp_opt; | |
129 | int paws_reject = 0; | |
130 | ||
131 | tmp_opt.saw_tstamp = 0; | |
132 | if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { | |
133 | tcp_parse_options(skb, &tmp_opt, 0); | |
134 | ||
135 | if (tmp_opt.saw_tstamp) { | |
136 | tmp_opt.ts_recent = tw->tw_ts_recent; | |
137 | tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; | |
138 | paws_reject = tcp_paws_check(&tmp_opt, th->rst); | |
139 | } | |
140 | } | |
141 | ||
142 | if (tw->tw_substate == TCP_FIN_WAIT2) { | |
143 | /* Just repeat all the checks of tcp_rcv_state_process() */ | |
144 | ||
145 | /* Out of window, send ACK */ | |
146 | if (paws_reject || | |
147 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
148 | tw->tw_rcv_nxt, | |
149 | tw->tw_rcv_nxt + tw->tw_rcv_wnd)) | |
150 | return TCP_TW_ACK; | |
151 | ||
152 | if (th->rst) | |
153 | goto kill; | |
154 | ||
155 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) | |
156 | goto kill_with_rst; | |
157 | ||
158 | /* Dup ACK? */ | |
159 | if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || | |
160 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { | |
161 | tcp_tw_put(tw); | |
162 | return TCP_TW_SUCCESS; | |
163 | } | |
164 | ||
165 | /* New data or FIN. If new data arrive after half-duplex close, | |
166 | * reset. | |
167 | */ | |
168 | if (!th->fin || | |
169 | TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { | |
170 | kill_with_rst: | |
171 | tcp_tw_deschedule(tw); | |
172 | tcp_tw_put(tw); | |
173 | return TCP_TW_RST; | |
174 | } | |
175 | ||
176 | /* FIN arrived, enter true time-wait state. */ | |
177 | tw->tw_substate = TCP_TIME_WAIT; | |
178 | tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; | |
179 | if (tmp_opt.saw_tstamp) { | |
180 | tw->tw_ts_recent_stamp = xtime.tv_sec; | |
181 | tw->tw_ts_recent = tmp_opt.rcv_tsval; | |
182 | } | |
183 | ||
184 | /* I am shamed, but failed to make it more elegant. | |
185 | * Yes, it is direct reference to IP, which is impossible | |
186 | * to generalize to IPv6. Taking into account that IPv6 | |
187 | * do not undertsnad recycling in any case, it not | |
188 | * a big problem in practice. --ANK */ | |
189 | if (tw->tw_family == AF_INET && | |
190 | sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && | |
191 | tcp_v4_tw_remember_stamp(tw)) | |
192 | tcp_tw_schedule(tw, tw->tw_timeout); | |
193 | else | |
194 | tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); | |
195 | return TCP_TW_ACK; | |
196 | } | |
197 | ||
198 | /* | |
199 | * Now real TIME-WAIT state. | |
200 | * | |
201 | * RFC 1122: | |
202 | * "When a connection is [...] on TIME-WAIT state [...] | |
203 | * [a TCP] MAY accept a new SYN from the remote TCP to | |
204 | * reopen the connection directly, if it: | |
205 | * | |
206 | * (1) assigns its initial sequence number for the new | |
207 | * connection to be larger than the largest sequence | |
208 | * number it used on the previous connection incarnation, | |
209 | * and | |
210 | * | |
211 | * (2) returns to TIME-WAIT state if the SYN turns out | |
212 | * to be an old duplicate". | |
213 | */ | |
214 | ||
215 | if (!paws_reject && | |
216 | (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && | |
217 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { | |
218 | /* In window segment, it may be only reset or bare ack. */ | |
219 | ||
220 | if (th->rst) { | |
221 | /* This is TIME_WAIT assasination, in two flavors. | |
222 | * Oh well... nobody has a sufficient solution to this | |
223 | * protocol bug yet. | |
224 | */ | |
225 | if (sysctl_tcp_rfc1337 == 0) { | |
226 | kill: | |
227 | tcp_tw_deschedule(tw); | |
228 | tcp_tw_put(tw); | |
229 | return TCP_TW_SUCCESS; | |
230 | } | |
231 | } | |
232 | tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); | |
233 | ||
234 | if (tmp_opt.saw_tstamp) { | |
235 | tw->tw_ts_recent = tmp_opt.rcv_tsval; | |
236 | tw->tw_ts_recent_stamp = xtime.tv_sec; | |
237 | } | |
238 | ||
239 | tcp_tw_put(tw); | |
240 | return TCP_TW_SUCCESS; | |
241 | } | |
242 | ||
243 | /* Out of window segment. | |
244 | ||
245 | All the segments are ACKed immediately. | |
246 | ||
247 | The only exception is new SYN. We accept it, if it is | |
248 | not old duplicate and we are not in danger to be killed | |
249 | by delayed old duplicates. RFC check is that it has | |
250 | newer sequence number works at rates <40Mbit/sec. | |
251 | However, if paws works, it is reliable AND even more, | |
252 | we even may relax silly seq space cutoff. | |
253 | ||
254 | RED-PEN: we violate main RFC requirement, if this SYN will appear | |
255 | old duplicate (i.e. we receive RST in reply to SYN-ACK), | |
256 | we must return socket to time-wait state. It is not good, | |
257 | but not fatal yet. | |
258 | */ | |
259 | ||
260 | if (th->syn && !th->rst && !th->ack && !paws_reject && | |
261 | (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || | |
262 | (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { | |
263 | u32 isn = tw->tw_snd_nxt + 65535 + 2; | |
264 | if (isn == 0) | |
265 | isn++; | |
266 | TCP_SKB_CB(skb)->when = isn; | |
267 | return TCP_TW_SYN; | |
268 | } | |
269 | ||
270 | if (paws_reject) | |
271 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | |
272 | ||
273 | if(!th->rst) { | |
274 | /* In this case we must reset the TIMEWAIT timer. | |
275 | * | |
276 | * If it is ACKless SYN it may be both old duplicate | |
277 | * and new good SYN with random sequence number <rcv_nxt. | |
278 | * Do not reschedule in the last case. | |
279 | */ | |
280 | if (paws_reject || th->ack) | |
281 | tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); | |
282 | ||
283 | /* Send ACK. Note, we do not put the bucket, | |
284 | * it will be released by caller. | |
285 | */ | |
286 | return TCP_TW_ACK; | |
287 | } | |
288 | tcp_tw_put(tw); | |
289 | return TCP_TW_SUCCESS; | |
290 | } | |
291 | ||
292 | /* Enter the time wait state. This is called with locally disabled BH. | |
293 | * Essentially we whip up a timewait bucket, copy the | |
294 | * relevant info into it from the SK, and mess with hash chains | |
295 | * and list linkage. | |
296 | */ | |
297 | static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | |
298 | { | |
299 | struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; | |
300 | struct tcp_bind_hashbucket *bhead; | |
301 | ||
302 | /* Step 1: Put TW into bind hash. Original socket stays there too. | |
303 | Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in | |
304 | binding cache, even if it is closed. | |
305 | */ | |
306 | bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)]; | |
307 | spin_lock(&bhead->lock); | |
308 | tw->tw_tb = tcp_sk(sk)->bind_hash; | |
309 | BUG_TRAP(tcp_sk(sk)->bind_hash); | |
310 | tw_add_bind_node(tw, &tw->tw_tb->owners); | |
311 | spin_unlock(&bhead->lock); | |
312 | ||
313 | write_lock(&ehead->lock); | |
314 | ||
315 | /* Step 2: Remove SK from established hash. */ | |
316 | if (__sk_del_node_init(sk)) | |
317 | sock_prot_dec_use(sk->sk_prot); | |
318 | ||
319 | /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ | |
320 | tw_add_node(tw, &(ehead + tcp_ehash_size)->chain); | |
321 | atomic_inc(&tw->tw_refcnt); | |
322 | ||
323 | write_unlock(&ehead->lock); | |
324 | } | |
325 | ||
326 | /* | |
327 | * Move a socket to time-wait or dead fin-wait-2 state. | |
328 | */ | |
329 | void tcp_time_wait(struct sock *sk, int state, int timeo) | |
330 | { | |
331 | struct tcp_tw_bucket *tw = NULL; | |
332 | struct tcp_sock *tp = tcp_sk(sk); | |
333 | int recycle_ok = 0; | |
334 | ||
335 | if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) | |
336 | recycle_ok = tp->af_specific->remember_stamp(sk); | |
337 | ||
338 | if (tcp_tw_count < sysctl_tcp_max_tw_buckets) | |
339 | tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); | |
340 | ||
341 | if(tw != NULL) { | |
342 | struct inet_sock *inet = inet_sk(sk); | |
343 | int rto = (tp->rto<<2) - (tp->rto>>1); | |
344 | ||
345 | /* Give us an identity. */ | |
346 | tw->tw_daddr = inet->daddr; | |
347 | tw->tw_rcv_saddr = inet->rcv_saddr; | |
348 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; | |
349 | tw->tw_num = inet->num; | |
350 | tw->tw_state = TCP_TIME_WAIT; | |
351 | tw->tw_substate = state; | |
352 | tw->tw_sport = inet->sport; | |
353 | tw->tw_dport = inet->dport; | |
354 | tw->tw_family = sk->sk_family; | |
355 | tw->tw_reuse = sk->sk_reuse; | |
356 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; | |
357 | atomic_set(&tw->tw_refcnt, 1); | |
358 | ||
359 | tw->tw_hashent = sk->sk_hashent; | |
360 | tw->tw_rcv_nxt = tp->rcv_nxt; | |
361 | tw->tw_snd_nxt = tp->snd_nxt; | |
362 | tw->tw_rcv_wnd = tcp_receive_window(tp); | |
363 | tw->tw_ts_recent = tp->rx_opt.ts_recent; | |
364 | tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; | |
365 | tw_dead_node_init(tw); | |
366 | ||
367 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
368 | if (tw->tw_family == PF_INET6) { | |
369 | struct ipv6_pinfo *np = inet6_sk(sk); | |
370 | ||
371 | ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); | |
372 | ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); | |
373 | tw->tw_v6_ipv6only = np->ipv6only; | |
374 | } else { | |
375 | memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr)); | |
376 | memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr)); | |
377 | tw->tw_v6_ipv6only = 0; | |
378 | } | |
379 | #endif | |
380 | /* Linkage updates. */ | |
381 | __tcp_tw_hashdance(sk, tw); | |
382 | ||
383 | /* Get the TIME_WAIT timeout firing. */ | |
384 | if (timeo < rto) | |
385 | timeo = rto; | |
386 | ||
387 | if (recycle_ok) { | |
388 | tw->tw_timeout = rto; | |
389 | } else { | |
390 | tw->tw_timeout = TCP_TIMEWAIT_LEN; | |
391 | if (state == TCP_TIME_WAIT) | |
392 | timeo = TCP_TIMEWAIT_LEN; | |
393 | } | |
394 | ||
395 | tcp_tw_schedule(tw, timeo); | |
396 | tcp_tw_put(tw); | |
397 | } else { | |
398 | /* Sorry, if we're out of memory, just CLOSE this | |
399 | * socket up. We've got bigger problems than | |
400 | * non-graceful socket closings. | |
401 | */ | |
402 | if (net_ratelimit()) | |
403 | printk(KERN_INFO "TCP: time wait bucket table overflow\n"); | |
404 | } | |
405 | ||
406 | tcp_update_metrics(sk); | |
407 | tcp_done(sk); | |
408 | } | |
409 | ||
410 | /* Kill off TIME_WAIT sockets once their lifetime has expired. */ | |
411 | static int tcp_tw_death_row_slot; | |
412 | ||
413 | static void tcp_twkill(unsigned long); | |
414 | ||
415 | /* TIME_WAIT reaping mechanism. */ | |
416 | #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */ | |
417 | #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS) | |
418 | ||
419 | #define TCP_TWKILL_QUOTA 100 | |
420 | ||
421 | static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS]; | |
422 | static DEFINE_SPINLOCK(tw_death_lock); | |
423 | static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0); | |
424 | static void twkill_work(void *); | |
425 | static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL); | |
426 | static u32 twkill_thread_slots; | |
427 | ||
428 | /* Returns non-zero if quota exceeded. */ | |
429 | static int tcp_do_twkill_work(int slot, unsigned int quota) | |
430 | { | |
431 | struct tcp_tw_bucket *tw; | |
432 | struct hlist_node *node; | |
433 | unsigned int killed; | |
434 | int ret; | |
435 | ||
436 | /* NOTE: compare this to previous version where lock | |
437 | * was released after detaching chain. It was racy, | |
438 | * because tw buckets are scheduled in not serialized context | |
439 | * in 2.3 (with netfilter), and with softnet it is common, because | |
440 | * soft irqs are not sequenced. | |
441 | */ | |
442 | killed = 0; | |
443 | ret = 0; | |
444 | rescan: | |
445 | tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) { | |
446 | __tw_del_dead_node(tw); | |
447 | spin_unlock(&tw_death_lock); | |
448 | tcp_timewait_kill(tw); | |
449 | tcp_tw_put(tw); | |
450 | killed++; | |
451 | spin_lock(&tw_death_lock); | |
452 | if (killed > quota) { | |
453 | ret = 1; | |
454 | break; | |
455 | } | |
456 | ||
457 | /* While we dropped tw_death_lock, another cpu may have | |
458 | * killed off the next TW bucket in the list, therefore | |
459 | * do a fresh re-read of the hlist head node with the | |
460 | * lock reacquired. We still use the hlist traversal | |
461 | * macro in order to get the prefetches. | |
462 | */ | |
463 | goto rescan; | |
464 | } | |
465 | ||
466 | tcp_tw_count -= killed; | |
467 | NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed); | |
468 | ||
469 | return ret; | |
470 | } | |
471 | ||
472 | static void tcp_twkill(unsigned long dummy) | |
473 | { | |
474 | int need_timer, ret; | |
475 | ||
476 | spin_lock(&tw_death_lock); | |
477 | ||
478 | if (tcp_tw_count == 0) | |
479 | goto out; | |
480 | ||
481 | need_timer = 0; | |
482 | ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA); | |
483 | if (ret) { | |
484 | twkill_thread_slots |= (1 << tcp_tw_death_row_slot); | |
485 | mb(); | |
486 | schedule_work(&tcp_twkill_work); | |
487 | need_timer = 1; | |
488 | } else { | |
489 | /* We purged the entire slot, anything left? */ | |
490 | if (tcp_tw_count) | |
491 | need_timer = 1; | |
492 | } | |
493 | tcp_tw_death_row_slot = | |
494 | ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1)); | |
495 | if (need_timer) | |
496 | mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD); | |
497 | out: | |
498 | spin_unlock(&tw_death_lock); | |
499 | } | |
500 | ||
501 | extern void twkill_slots_invalid(void); | |
502 | ||
503 | static void twkill_work(void *dummy) | |
504 | { | |
505 | int i; | |
506 | ||
507 | if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8)) | |
508 | twkill_slots_invalid(); | |
509 | ||
510 | while (twkill_thread_slots) { | |
511 | spin_lock_bh(&tw_death_lock); | |
512 | for (i = 0; i < TCP_TWKILL_SLOTS; i++) { | |
513 | if (!(twkill_thread_slots & (1 << i))) | |
514 | continue; | |
515 | ||
516 | while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) { | |
517 | if (need_resched()) { | |
518 | spin_unlock_bh(&tw_death_lock); | |
519 | schedule(); | |
520 | spin_lock_bh(&tw_death_lock); | |
521 | } | |
522 | } | |
523 | ||
524 | twkill_thread_slots &= ~(1 << i); | |
525 | } | |
526 | spin_unlock_bh(&tw_death_lock); | |
527 | } | |
528 | } | |
529 | ||
530 | /* These are always called from BH context. See callers in | |
531 | * tcp_input.c to verify this. | |
532 | */ | |
533 | ||
534 | /* This is for handling early-kills of TIME_WAIT sockets. */ | |
535 | void tcp_tw_deschedule(struct tcp_tw_bucket *tw) | |
536 | { | |
537 | spin_lock(&tw_death_lock); | |
538 | if (tw_del_dead_node(tw)) { | |
539 | tcp_tw_put(tw); | |
540 | if (--tcp_tw_count == 0) | |
541 | del_timer(&tcp_tw_timer); | |
542 | } | |
543 | spin_unlock(&tw_death_lock); | |
544 | tcp_timewait_kill(tw); | |
545 | } | |
546 | ||
547 | /* Short-time timewait calendar */ | |
548 | ||
549 | static int tcp_twcal_hand = -1; | |
550 | static int tcp_twcal_jiffie; | |
551 | static void tcp_twcal_tick(unsigned long); | |
552 | static struct timer_list tcp_twcal_timer = | |
553 | TIMER_INITIALIZER(tcp_twcal_tick, 0, 0); | |
554 | static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS]; | |
555 | ||
556 | static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) | |
557 | { | |
558 | struct hlist_head *list; | |
559 | int slot; | |
560 | ||
561 | /* timeout := RTO * 3.5 | |
562 | * | |
563 | * 3.5 = 1+2+0.5 to wait for two retransmits. | |
564 | * | |
565 | * RATIONALE: if FIN arrived and we entered TIME-WAIT state, | |
566 | * our ACK acking that FIN can be lost. If N subsequent retransmitted | |
567 | * FINs (or previous seqments) are lost (probability of such event | |
568 | * is p^(N+1), where p is probability to lose single packet and | |
569 | * time to detect the loss is about RTO*(2^N - 1) with exponential | |
570 | * backoff). Normal timewait length is calculated so, that we | |
571 | * waited at least for one retransmitted FIN (maximal RTO is 120sec). | |
572 | * [ BTW Linux. following BSD, violates this requirement waiting | |
573 | * only for 60sec, we should wait at least for 240 secs. | |
574 | * Well, 240 consumes too much of resources 8) | |
575 | * ] | |
576 | * This interval is not reduced to catch old duplicate and | |
577 | * responces to our wandering segments living for two MSLs. | |
578 | * However, if we use PAWS to detect | |
579 | * old duplicates, we can reduce the interval to bounds required | |
580 | * by RTO, rather than MSL. So, if peer understands PAWS, we | |
581 | * kill tw bucket after 3.5*RTO (it is important that this number | |
582 | * is greater than TS tick!) and detect old duplicates with help | |
583 | * of PAWS. | |
584 | */ | |
585 | slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK; | |
586 | ||
587 | spin_lock(&tw_death_lock); | |
588 | ||
589 | /* Unlink it, if it was scheduled */ | |
590 | if (tw_del_dead_node(tw)) | |
591 | tcp_tw_count--; | |
592 | else | |
593 | atomic_inc(&tw->tw_refcnt); | |
594 | ||
595 | if (slot >= TCP_TW_RECYCLE_SLOTS) { | |
596 | /* Schedule to slow timer */ | |
597 | if (timeo >= TCP_TIMEWAIT_LEN) { | |
598 | slot = TCP_TWKILL_SLOTS-1; | |
599 | } else { | |
600 | slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD; | |
601 | if (slot >= TCP_TWKILL_SLOTS) | |
602 | slot = TCP_TWKILL_SLOTS-1; | |
603 | } | |
604 | tw->tw_ttd = jiffies + timeo; | |
605 | slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1); | |
606 | list = &tcp_tw_death_row[slot]; | |
607 | } else { | |
608 | tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK); | |
609 | ||
610 | if (tcp_twcal_hand < 0) { | |
611 | tcp_twcal_hand = 0; | |
612 | tcp_twcal_jiffie = jiffies; | |
613 | tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK); | |
614 | add_timer(&tcp_twcal_timer); | |
615 | } else { | |
616 | if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK))) | |
617 | mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK)); | |
618 | slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1); | |
619 | } | |
620 | list = &tcp_twcal_row[slot]; | |
621 | } | |
622 | ||
623 | hlist_add_head(&tw->tw_death_node, list); | |
624 | ||
625 | if (tcp_tw_count++ == 0) | |
626 | mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD); | |
627 | spin_unlock(&tw_death_lock); | |
628 | } | |
629 | ||
630 | void tcp_twcal_tick(unsigned long dummy) | |
631 | { | |
632 | int n, slot; | |
633 | unsigned long j; | |
634 | unsigned long now = jiffies; | |
635 | int killed = 0; | |
636 | int adv = 0; | |
637 | ||
638 | spin_lock(&tw_death_lock); | |
639 | if (tcp_twcal_hand < 0) | |
640 | goto out; | |
641 | ||
642 | slot = tcp_twcal_hand; | |
643 | j = tcp_twcal_jiffie; | |
644 | ||
645 | for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) { | |
646 | if (time_before_eq(j, now)) { | |
647 | struct hlist_node *node, *safe; | |
648 | struct tcp_tw_bucket *tw; | |
649 | ||
650 | tw_for_each_inmate_safe(tw, node, safe, | |
651 | &tcp_twcal_row[slot]) { | |
652 | __tw_del_dead_node(tw); | |
653 | tcp_timewait_kill(tw); | |
654 | tcp_tw_put(tw); | |
655 | killed++; | |
656 | } | |
657 | } else { | |
658 | if (!adv) { | |
659 | adv = 1; | |
660 | tcp_twcal_jiffie = j; | |
661 | tcp_twcal_hand = slot; | |
662 | } | |
663 | ||
664 | if (!hlist_empty(&tcp_twcal_row[slot])) { | |
665 | mod_timer(&tcp_twcal_timer, j); | |
666 | goto out; | |
667 | } | |
668 | } | |
669 | j += (1<<TCP_TW_RECYCLE_TICK); | |
670 | slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1); | |
671 | } | |
672 | tcp_twcal_hand = -1; | |
673 | ||
674 | out: | |
675 | if ((tcp_tw_count -= killed) == 0) | |
676 | del_timer(&tcp_tw_timer); | |
677 | NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed); | |
678 | spin_unlock(&tw_death_lock); | |
679 | } | |
680 | ||
681 | /* This is not only more efficient than what we used to do, it eliminates | |
682 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | |
683 | * | |
684 | * Actually, we could lots of memory writes here. tp of listening | |
685 | * socket contains all necessary default parameters. | |
686 | */ | |
60236fdd | 687 | struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) |
1da177e4 LT |
688 | { |
689 | /* allocate the newsk from the same slab of the master sock, | |
690 | * if not, at sk_free time we'll try to free it from the wrong | |
691 | * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */ | |
692 | struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0); | |
693 | ||
694 | if(newsk != NULL) { | |
2e6599cb ACM |
695 | struct inet_request_sock *ireq = inet_rsk(req); |
696 | struct tcp_request_sock *treq = tcp_rsk(req); | |
1da177e4 LT |
697 | struct tcp_sock *newtp; |
698 | struct sk_filter *filter; | |
699 | ||
700 | memcpy(newsk, sk, sizeof(struct tcp_sock)); | |
701 | newsk->sk_state = TCP_SYN_RECV; | |
702 | ||
703 | /* SANITY */ | |
704 | sk_node_init(&newsk->sk_node); | |
705 | tcp_sk(newsk)->bind_hash = NULL; | |
706 | ||
707 | /* Clone the TCP header template */ | |
2e6599cb | 708 | inet_sk(newsk)->dport = ireq->rmt_port; |
1da177e4 LT |
709 | |
710 | sock_lock_init(newsk); | |
711 | bh_lock_sock(newsk); | |
712 | ||
713 | rwlock_init(&newsk->sk_dst_lock); | |
6cbb0df7 | 714 | newsk->sk_dst_cache = NULL; |
1da177e4 LT |
715 | atomic_set(&newsk->sk_rmem_alloc, 0); |
716 | skb_queue_head_init(&newsk->sk_receive_queue); | |
717 | atomic_set(&newsk->sk_wmem_alloc, 0); | |
718 | skb_queue_head_init(&newsk->sk_write_queue); | |
719 | atomic_set(&newsk->sk_omem_alloc, 0); | |
720 | newsk->sk_wmem_queued = 0; | |
721 | newsk->sk_forward_alloc = 0; | |
722 | ||
723 | sock_reset_flag(newsk, SOCK_DONE); | |
724 | newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; | |
725 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; | |
726 | newsk->sk_send_head = NULL; | |
727 | rwlock_init(&newsk->sk_callback_lock); | |
728 | skb_queue_head_init(&newsk->sk_error_queue); | |
729 | newsk->sk_write_space = sk_stream_write_space; | |
730 | ||
731 | if ((filter = newsk->sk_filter) != NULL) | |
732 | sk_filter_charge(newsk, filter); | |
733 | ||
734 | if (unlikely(xfrm_sk_clone_policy(newsk))) { | |
735 | /* It is still raw copy of parent, so invalidate | |
736 | * destructor and make plain sk_free() */ | |
737 | newsk->sk_destruct = NULL; | |
738 | sk_free(newsk); | |
739 | return NULL; | |
740 | } | |
741 | ||
742 | /* Now setup tcp_sock */ | |
743 | newtp = tcp_sk(newsk); | |
744 | newtp->pred_flags = 0; | |
2e6599cb ACM |
745 | newtp->rcv_nxt = treq->rcv_isn + 1; |
746 | newtp->snd_nxt = treq->snt_isn + 1; | |
747 | newtp->snd_una = treq->snt_isn + 1; | |
748 | newtp->snd_sml = treq->snt_isn + 1; | |
1da177e4 LT |
749 | |
750 | tcp_prequeue_init(newtp); | |
751 | ||
2e6599cb | 752 | tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); |
1da177e4 LT |
753 | |
754 | newtp->retransmits = 0; | |
755 | newtp->backoff = 0; | |
756 | newtp->srtt = 0; | |
757 | newtp->mdev = TCP_TIMEOUT_INIT; | |
758 | newtp->rto = TCP_TIMEOUT_INIT; | |
759 | ||
760 | newtp->packets_out = 0; | |
761 | newtp->left_out = 0; | |
762 | newtp->retrans_out = 0; | |
763 | newtp->sacked_out = 0; | |
764 | newtp->fackets_out = 0; | |
765 | newtp->snd_ssthresh = 0x7fffffff; | |
766 | ||
767 | /* So many TCP implementations out there (incorrectly) count the | |
768 | * initial SYN frame in their delayed-ACK and congestion control | |
769 | * algorithms that we must have the following bandaid to talk | |
770 | * efficiently to them. -DaveM | |
771 | */ | |
772 | newtp->snd_cwnd = 2; | |
773 | newtp->snd_cwnd_cnt = 0; | |
774 | ||
775 | newtp->frto_counter = 0; | |
776 | newtp->frto_highmark = 0; | |
777 | ||
317a76f9 SH |
778 | newtp->ca_ops = &tcp_reno; |
779 | ||
1da177e4 LT |
780 | tcp_set_ca_state(newtp, TCP_CA_Open); |
781 | tcp_init_xmit_timers(newsk); | |
782 | skb_queue_head_init(&newtp->out_of_order_queue); | |
2e6599cb ACM |
783 | newtp->rcv_wup = treq->rcv_isn + 1; |
784 | newtp->write_seq = treq->snt_isn + 1; | |
1da177e4 | 785 | newtp->pushed_seq = newtp->write_seq; |
2e6599cb | 786 | newtp->copied_seq = treq->rcv_isn + 1; |
1da177e4 LT |
787 | |
788 | newtp->rx_opt.saw_tstamp = 0; | |
789 | ||
790 | newtp->rx_opt.dsack = 0; | |
791 | newtp->rx_opt.eff_sacks = 0; | |
792 | ||
793 | newtp->probes_out = 0; | |
794 | newtp->rx_opt.num_sacks = 0; | |
795 | newtp->urg_data = 0; | |
0e87506f ACM |
796 | /* Deinitialize accept_queue to trap illegal accesses. */ |
797 | memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); | |
1da177e4 LT |
798 | |
799 | /* Back to base struct sock members. */ | |
800 | newsk->sk_err = 0; | |
801 | newsk->sk_priority = 0; | |
802 | atomic_set(&newsk->sk_refcnt, 2); | |
e6848976 ACM |
803 | |
804 | /* | |
805 | * Increment the counter in the same struct proto as the master | |
806 | * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that | |
807 | * is the same as sk->sk_prot->socks, as this field was copied | |
808 | * with memcpy), same rationale as the first comment in this | |
809 | * function. | |
810 | * | |
811 | * This _changes_ the previous behaviour, where | |
812 | * tcp_create_openreq_child always was incrementing the | |
813 | * equivalent to tcp_prot->socks (inet_sock_nr), so this have | |
814 | * to be taken into account in all callers. -acme | |
815 | */ | |
816 | sk_refcnt_debug_inc(newsk); | |
817 | ||
1da177e4 LT |
818 | atomic_inc(&tcp_sockets_allocated); |
819 | ||
820 | if (sock_flag(newsk, SOCK_KEEPOPEN)) | |
821 | tcp_reset_keepalive_timer(newsk, | |
822 | keepalive_time_when(newtp)); | |
823 | newsk->sk_socket = NULL; | |
824 | newsk->sk_sleep = NULL; | |
825 | ||
2e6599cb ACM |
826 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
827 | if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { | |
1da177e4 LT |
828 | if (sysctl_tcp_fack) |
829 | newtp->rx_opt.sack_ok |= 2; | |
830 | } | |
831 | newtp->window_clamp = req->window_clamp; | |
832 | newtp->rcv_ssthresh = req->rcv_wnd; | |
833 | newtp->rcv_wnd = req->rcv_wnd; | |
2e6599cb | 834 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
1da177e4 | 835 | if (newtp->rx_opt.wscale_ok) { |
2e6599cb ACM |
836 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
837 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; | |
1da177e4 LT |
838 | } else { |
839 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; | |
840 | newtp->window_clamp = min(newtp->window_clamp, 65535U); | |
841 | } | |
842 | newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale; | |
843 | newtp->max_window = newtp->snd_wnd; | |
844 | ||
845 | if (newtp->rx_opt.tstamp_ok) { | |
846 | newtp->rx_opt.ts_recent = req->ts_recent; | |
847 | newtp->rx_opt.ts_recent_stamp = xtime.tv_sec; | |
848 | newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; | |
849 | } else { | |
850 | newtp->rx_opt.ts_recent_stamp = 0; | |
851 | newtp->tcp_header_len = sizeof(struct tcphdr); | |
852 | } | |
853 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | |
854 | newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; | |
855 | newtp->rx_opt.mss_clamp = req->mss; | |
856 | TCP_ECN_openreq_child(newtp, req); | |
857 | if (newtp->ecn_flags&TCP_ECN_OK) | |
858 | sock_set_flag(newsk, SOCK_NO_LARGESEND); | |
859 | ||
1da177e4 LT |
860 | TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); |
861 | } | |
862 | return newsk; | |
863 | } | |
864 | ||
865 | /* | |
866 | * Process an incoming packet for SYN_RECV sockets represented | |
60236fdd | 867 | * as a request_sock. |
1da177e4 LT |
868 | */ |
869 | ||
870 | struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |
60236fdd ACM |
871 | struct request_sock *req, |
872 | struct request_sock **prev) | |
1da177e4 LT |
873 | { |
874 | struct tcphdr *th = skb->h.th; | |
875 | struct tcp_sock *tp = tcp_sk(sk); | |
876 | u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); | |
877 | int paws_reject = 0; | |
878 | struct tcp_options_received tmp_opt; | |
879 | struct sock *child; | |
880 | ||
881 | tmp_opt.saw_tstamp = 0; | |
882 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | |
883 | tcp_parse_options(skb, &tmp_opt, 0); | |
884 | ||
885 | if (tmp_opt.saw_tstamp) { | |
886 | tmp_opt.ts_recent = req->ts_recent; | |
887 | /* We do not store true stamp, but it is not required, | |
888 | * it can be estimated (approximately) | |
889 | * from another data. | |
890 | */ | |
891 | tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); | |
892 | paws_reject = tcp_paws_check(&tmp_opt, th->rst); | |
893 | } | |
894 | } | |
895 | ||
896 | /* Check for pure retransmitted SYN. */ | |
2e6599cb | 897 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
1da177e4 LT |
898 | flg == TCP_FLAG_SYN && |
899 | !paws_reject) { | |
900 | /* | |
901 | * RFC793 draws (Incorrectly! It was fixed in RFC1122) | |
902 | * this case on figure 6 and figure 8, but formal | |
903 | * protocol description says NOTHING. | |
904 | * To be more exact, it says that we should send ACK, | |
905 | * because this segment (at least, if it has no data) | |
906 | * is out of window. | |
907 | * | |
908 | * CONCLUSION: RFC793 (even with RFC1122) DOES NOT | |
909 | * describe SYN-RECV state. All the description | |
910 | * is wrong, we cannot believe to it and should | |
911 | * rely only on common sense and implementation | |
912 | * experience. | |
913 | * | |
914 | * Enforce "SYN-ACK" according to figure 8, figure 6 | |
915 | * of RFC793, fixed by RFC1122. | |
916 | */ | |
60236fdd | 917 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); |
1da177e4 LT |
918 | return NULL; |
919 | } | |
920 | ||
921 | /* Further reproduces section "SEGMENT ARRIVES" | |
922 | for state SYN-RECEIVED of RFC793. | |
923 | It is broken, however, it does not work only | |
924 | when SYNs are crossed. | |
925 | ||
926 | You would think that SYN crossing is impossible here, since | |
927 | we should have a SYN_SENT socket (from connect()) on our end, | |
928 | but this is not true if the crossed SYNs were sent to both | |
929 | ends by a malicious third party. We must defend against this, | |
930 | and to do that we first verify the ACK (as per RFC793, page | |
931 | 36) and reset if it is invalid. Is this a true full defense? | |
932 | To convince ourselves, let us consider a way in which the ACK | |
933 | test can still pass in this 'malicious crossed SYNs' case. | |
934 | Malicious sender sends identical SYNs (and thus identical sequence | |
935 | numbers) to both A and B: | |
936 | ||
937 | A: gets SYN, seq=7 | |
938 | B: gets SYN, seq=7 | |
939 | ||
940 | By our good fortune, both A and B select the same initial | |
941 | send sequence number of seven :-) | |
942 | ||
943 | A: sends SYN|ACK, seq=7, ack_seq=8 | |
944 | B: sends SYN|ACK, seq=7, ack_seq=8 | |
945 | ||
946 | So we are now A eating this SYN|ACK, ACK test passes. So | |
947 | does sequence test, SYN is truncated, and thus we consider | |
948 | it a bare ACK. | |
949 | ||
950 | If tp->defer_accept, we silently drop this bare ACK. Otherwise, | |
951 | we create an established connection. Both ends (listening sockets) | |
952 | accept the new incoming connection and try to talk to each other. 8-) | |
953 | ||
954 | Note: This case is both harmless, and rare. Possibility is about the | |
955 | same as us discovering intelligent life on another plant tomorrow. | |
956 | ||
957 | But generally, we should (RFC lies!) to accept ACK | |
958 | from SYNACK both here and in tcp_rcv_state_process(). | |
959 | tcp_rcv_state_process() does not, hence, we do not too. | |
960 | ||
961 | Note that the case is absolutely generic: | |
962 | we cannot optimize anything here without | |
963 | violating protocol. All the checks must be made | |
964 | before attempt to create socket. | |
965 | */ | |
966 | ||
967 | /* RFC793 page 36: "If the connection is in any non-synchronized state ... | |
968 | * and the incoming segment acknowledges something not yet | |
969 | * sent (the segment carries an unaccaptable ACK) ... | |
970 | * a reset is sent." | |
971 | * | |
972 | * Invalid ACK: reset will be sent by listening socket | |
973 | */ | |
974 | if ((flg & TCP_FLAG_ACK) && | |
2e6599cb | 975 | (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) |
1da177e4 LT |
976 | return sk; |
977 | ||
978 | /* Also, it would be not so bad idea to check rcv_tsecr, which | |
979 | * is essentially ACK extension and too early or too late values | |
980 | * should cause reset in unsynchronized states. | |
981 | */ | |
982 | ||
983 | /* RFC793: "first check sequence number". */ | |
984 | ||
985 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
2e6599cb | 986 | tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { |
1da177e4 LT |
987 | /* Out of window: send ACK and drop. */ |
988 | if (!(flg & TCP_FLAG_RST)) | |
60236fdd | 989 | req->rsk_ops->send_ack(skb, req); |
1da177e4 LT |
990 | if (paws_reject) |
991 | NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); | |
992 | return NULL; | |
993 | } | |
994 | ||
995 | /* In sequence, PAWS is OK. */ | |
996 | ||
2e6599cb | 997 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) |
1da177e4 LT |
998 | req->ts_recent = tmp_opt.rcv_tsval; |
999 | ||
2e6599cb | 1000 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
1da177e4 | 1001 | /* Truncate SYN, it is out of window starting |
2e6599cb | 1002 | at tcp_rsk(req)->rcv_isn + 1. */ |
1da177e4 LT |
1003 | flg &= ~TCP_FLAG_SYN; |
1004 | } | |
1005 | ||
1006 | /* RFC793: "second check the RST bit" and | |
1007 | * "fourth, check the SYN bit" | |
1008 | */ | |
1009 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) | |
1010 | goto embryonic_reset; | |
1011 | ||
1012 | /* ACK sequence verified above, just make sure ACK is | |
1013 | * set. If ACK not set, just silently drop the packet. | |
1014 | */ | |
1015 | if (!(flg & TCP_FLAG_ACK)) | |
1016 | return NULL; | |
1017 | ||
1018 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | |
2e6599cb ACM |
1019 | if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
1020 | inet_rsk(req)->acked = 1; | |
1da177e4 LT |
1021 | return NULL; |
1022 | } | |
1023 | ||
1024 | /* OK, ACK is valid, create big socket and | |
1025 | * feed this segment to it. It will repeat all | |
1026 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO | |
1027 | * ESTABLISHED STATE. If it will be dropped after | |
1028 | * socket is created, wait for troubles. | |
1029 | */ | |
1030 | child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL); | |
1031 | if (child == NULL) | |
1032 | goto listen_overflow; | |
1033 | ||
1034 | tcp_synq_unlink(tp, req, prev); | |
1035 | tcp_synq_removed(sk, req); | |
1036 | ||
1037 | tcp_acceptq_queue(sk, req, child); | |
1038 | return child; | |
1039 | ||
1040 | listen_overflow: | |
1041 | if (!sysctl_tcp_abort_on_overflow) { | |
2e6599cb | 1042 | inet_rsk(req)->acked = 1; |
1da177e4 LT |
1043 | return NULL; |
1044 | } | |
1045 | ||
1046 | embryonic_reset: | |
1047 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); | |
1048 | if (!(flg & TCP_FLAG_RST)) | |
60236fdd | 1049 | req->rsk_ops->send_reset(skb); |
1da177e4 LT |
1050 | |
1051 | tcp_synq_drop(sk, req, prev); | |
1052 | return NULL; | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * Queue segment on the new socket if the new socket is active, | |
1057 | * otherwise we just shortcircuit this and continue with | |
1058 | * the new socket. | |
1059 | */ | |
1060 | ||
1061 | int tcp_child_process(struct sock *parent, struct sock *child, | |
1062 | struct sk_buff *skb) | |
1063 | { | |
1064 | int ret = 0; | |
1065 | int state = child->sk_state; | |
1066 | ||
1067 | if (!sock_owned_by_user(child)) { | |
1068 | ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); | |
1069 | ||
1070 | /* Wakeup parent, send SIGIO */ | |
1071 | if (state == TCP_SYN_RECV && child->sk_state != state) | |
1072 | parent->sk_data_ready(parent, 0); | |
1073 | } else { | |
1074 | /* Alas, it is possible again, because we do lookup | |
1075 | * in main socket hash table and lock on listening | |
1076 | * socket does not protect us more. | |
1077 | */ | |
1078 | sk_add_backlog(child, skb); | |
1079 | } | |
1080 | ||
1081 | bh_unlock_sock(child); | |
1082 | sock_put(child); | |
1083 | return ret; | |
1084 | } | |
1085 | ||
1086 | EXPORT_SYMBOL(tcp_check_req); | |
1087 | EXPORT_SYMBOL(tcp_child_process); | |
1088 | EXPORT_SYMBOL(tcp_create_openreq_child); | |
1089 | EXPORT_SYMBOL(tcp_timewait_state_process); | |
1090 | EXPORT_SYMBOL(tcp_tw_deschedule); |