]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/ipv4/tcp_input.c
tcp: introduce struct tcp_sacktag_state to reduce arg pressure
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / tcp_input.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes:
23 * Pedro Roque : Fast Retransmit/Recovery.
24 * Two receive queues.
25 * Retransmit queue handled by TCP.
26 * Better retransmit timer handling.
27 * New congestion avoidance.
28 * Header prediction.
29 * Variable renaming.
30 *
31 * Eric : Fast Retransmit.
32 * Randy Scott : MSS option defines.
33 * Eric Schenk : Fixes to slow start algorithm.
34 * Eric Schenk : Yet another double ACK bug.
35 * Eric Schenk : Delayed ACK bug fixes.
36 * Eric Schenk : Floyd style fast retrans war avoidance.
37 * David S. Miller : Don't allow zero congestion window.
38 * Eric Schenk : Fix retransmitter so that it sends
39 * next packet on ack of previous packet.
40 * Andi Kleen : Moved open_request checking here
41 * and process RSTs for open_requests.
42 * Andi Kleen : Better prune_queue, and other fixes.
caa20d9a 43 * Andrey Savochkin: Fix RTT measurements in the presence of
1da177e4
LT
44 * timestamps.
45 * Andrey Savochkin: Check sequence numbers correctly when
46 * removing SACKs due to in sequence incoming
47 * data segments.
48 * Andi Kleen: Make sure we never ack data there is not
49 * enough room for. Also make this condition
50 * a fatal error if it might still happen.
e905a9ed 51 * Andi Kleen: Add tcp_measure_rcv_mss to make
1da177e4 52 * connections with MSS<min(MTU,ann. MSS)
e905a9ed 53 * work without delayed acks.
1da177e4
LT
54 * Andi Kleen: Process packets with PSH set in the
55 * fast path.
56 * J Hadi Salim: ECN support
57 * Andrei Gurtov,
58 * Pasi Sarolahti,
59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission
60 * engine. Lots of bugs are found.
61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
1da177e4
LT
62 */
63
1da177e4
LT
64#include <linux/mm.h>
65#include <linux/module.h>
66#include <linux/sysctl.h>
5ffc02a1 67#include <net/dst.h>
1da177e4
LT
68#include <net/tcp.h>
69#include <net/inet_common.h>
70#include <linux/ipsec.h>
71#include <asm/unaligned.h>
1a2449a8 72#include <net/netdma.h>
1da177e4 73
ab32ea5d
BH
74int sysctl_tcp_timestamps __read_mostly = 1;
75int sysctl_tcp_window_scaling __read_mostly = 1;
76int sysctl_tcp_sack __read_mostly = 1;
77int sysctl_tcp_fack __read_mostly = 1;
78int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
79int sysctl_tcp_ecn __read_mostly;
80int sysctl_tcp_dsack __read_mostly = 1;
81int sysctl_tcp_app_win __read_mostly = 31;
82int sysctl_tcp_adv_win_scale __read_mostly = 2;
1da177e4 83
ab32ea5d
BH
84int sysctl_tcp_stdurg __read_mostly;
85int sysctl_tcp_rfc1337 __read_mostly;
86int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
c96fd3d4 87int sysctl_tcp_frto __read_mostly = 2;
3cfe3baa 88int sysctl_tcp_frto_response __read_mostly;
ab32ea5d 89int sysctl_tcp_nometrics_save __read_mostly;
1da177e4 90
ab32ea5d
BH
91int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
92int sysctl_tcp_abc __read_mostly;
1da177e4 93
1da177e4
LT
94#define FLAG_DATA 0x01 /* Incoming frame contained data. */
95#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
96#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
97#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
98#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
99#define FLAG_DATA_SACKED 0x20 /* New SACK. */
100#define FLAG_ECE 0x40 /* ECE in this ACK */
101#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
102#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
4dc2665e 103#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
2e605294 104#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
564262c1 105#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
009a2e3e 106#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
cadbd031 107#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
1da177e4
LT
108
109#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
110#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
111#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
112#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
2e605294 113#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
1da177e4 114
1da177e4 115#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
bdf1ee5d 116#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
1da177e4 117
e905a9ed 118/* Adapt the MSS value used to make delayed ack decision to the
1da177e4 119 * real world.
e905a9ed 120 */
056834d9 121static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
1da177e4 122{
463c84b9 123 struct inet_connection_sock *icsk = inet_csk(sk);
e905a9ed 124 const unsigned int lss = icsk->icsk_ack.last_seg_size;
463c84b9 125 unsigned int len;
1da177e4 126
e905a9ed 127 icsk->icsk_ack.last_seg_size = 0;
1da177e4
LT
128
129 /* skb->len may jitter because of SACKs, even if peer
130 * sends good full-sized frames.
131 */
056834d9 132 len = skb_shinfo(skb)->gso_size ? : skb->len;
463c84b9
ACM
133 if (len >= icsk->icsk_ack.rcv_mss) {
134 icsk->icsk_ack.rcv_mss = len;
1da177e4
LT
135 } else {
136 /* Otherwise, we make more careful check taking into account,
137 * that SACKs block is variable.
138 *
139 * "len" is invariant segment length, including TCP header.
140 */
9c70220b 141 len += skb->data - skb_transport_header(skb);
1da177e4
LT
142 if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
143 /* If PSH is not set, packet should be
144 * full sized, provided peer TCP is not badly broken.
145 * This observation (if it is correct 8)) allows
146 * to handle super-low mtu links fairly.
147 */
148 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
aa8223c7 149 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
1da177e4
LT
150 /* Subtract also invariant (if peer is RFC compliant),
151 * tcp header plus fixed timestamp option length.
152 * Resulting "len" is MSS free of SACK jitter.
153 */
463c84b9
ACM
154 len -= tcp_sk(sk)->tcp_header_len;
155 icsk->icsk_ack.last_seg_size = len;
1da177e4 156 if (len == lss) {
463c84b9 157 icsk->icsk_ack.rcv_mss = len;
1da177e4
LT
158 return;
159 }
160 }
1ef9696c
AK
161 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
162 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
463c84b9 163 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1da177e4
LT
164 }
165}
166
463c84b9 167static void tcp_incr_quickack(struct sock *sk)
1da177e4 168{
463c84b9
ACM
169 struct inet_connection_sock *icsk = inet_csk(sk);
170 unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
1da177e4 171
056834d9
IJ
172 if (quickacks == 0)
173 quickacks = 2;
463c84b9
ACM
174 if (quickacks > icsk->icsk_ack.quick)
175 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
1da177e4
LT
176}
177
463c84b9 178void tcp_enter_quickack_mode(struct sock *sk)
1da177e4 179{
463c84b9
ACM
180 struct inet_connection_sock *icsk = inet_csk(sk);
181 tcp_incr_quickack(sk);
182 icsk->icsk_ack.pingpong = 0;
183 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4
LT
184}
185
186/* Send ACKs quickly, if "quick" count is not exhausted
187 * and the session is not interactive.
188 */
189
463c84b9 190static inline int tcp_in_quickack_mode(const struct sock *sk)
1da177e4 191{
463c84b9
ACM
192 const struct inet_connection_sock *icsk = inet_csk(sk);
193 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
1da177e4
LT
194}
195
bdf1ee5d
IJ
196static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
197{
056834d9 198 if (tp->ecn_flags & TCP_ECN_OK)
bdf1ee5d
IJ
199 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
200}
201
202static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
203{
204 if (tcp_hdr(skb)->cwr)
205 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
206}
207
208static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
209{
210 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
211}
212
213static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
214{
056834d9 215 if (tp->ecn_flags & TCP_ECN_OK) {
bdf1ee5d
IJ
216 if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
217 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
218 /* Funny extension: if ECT is not set on a segment,
219 * it is surely retransmit. It is not in ECN RFC,
220 * but Linux follows this rule. */
221 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
222 tcp_enter_quickack_mode((struct sock *)tp);
223 }
224}
225
226static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
227{
056834d9 228 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
bdf1ee5d
IJ
229 tp->ecn_flags &= ~TCP_ECN_OK;
230}
231
232static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
233{
056834d9 234 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
bdf1ee5d
IJ
235 tp->ecn_flags &= ~TCP_ECN_OK;
236}
237
238static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
239{
056834d9 240 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
bdf1ee5d
IJ
241 return 1;
242 return 0;
243}
244
1da177e4
LT
245/* Buffer size and advertised window tuning.
246 *
247 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
248 */
249
250static void tcp_fixup_sndbuf(struct sock *sk)
251{
252 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
253 sizeof(struct sk_buff);
254
255 if (sk->sk_sndbuf < 3 * sndmem)
256 sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
257}
258
259/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
260 *
261 * All tcp_full_space() is split to two parts: "network" buffer, allocated
262 * forward and advertised in receiver window (tp->rcv_wnd) and
263 * "application buffer", required to isolate scheduling/application
264 * latencies from network.
265 * window_clamp is maximal advertised window. It can be less than
266 * tcp_full_space(), in this case tcp_full_space() - window_clamp
267 * is reserved for "application" buffer. The less window_clamp is
268 * the smoother our behaviour from viewpoint of network, but the lower
269 * throughput and the higher sensitivity of the connection to losses. 8)
270 *
271 * rcv_ssthresh is more strict window_clamp used at "slow start"
272 * phase to predict further behaviour of this connection.
273 * It is used for two goals:
274 * - to enforce header prediction at sender, even when application
275 * requires some significant "application buffer". It is check #1.
276 * - to prevent pruning of receive queue because of misprediction
277 * of receiver window. Check #2.
278 *
279 * The scheme does not work when sender sends good segments opening
caa20d9a 280 * window and then starts to feed us spaghetti. But it should work
1da177e4
LT
281 * in common situations. Otherwise, we have to rely on queue collapsing.
282 */
283
284/* Slow part of check#2. */
9e412ba7 285static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
1da177e4 286{
9e412ba7 287 struct tcp_sock *tp = tcp_sk(sk);
1da177e4 288 /* Optimize this! */
dfd4f0ae
ED
289 int truesize = tcp_win_from_space(skb->truesize) >> 1;
290 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1;
1da177e4
LT
291
292 while (tp->rcv_ssthresh <= window) {
293 if (truesize <= skb->len)
463c84b9 294 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
1da177e4
LT
295
296 truesize >>= 1;
297 window >>= 1;
298 }
299 return 0;
300}
301
056834d9 302static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
1da177e4 303{
9e412ba7
IJ
304 struct tcp_sock *tp = tcp_sk(sk);
305
1da177e4
LT
306 /* Check #1 */
307 if (tp->rcv_ssthresh < tp->window_clamp &&
308 (int)tp->rcv_ssthresh < tcp_space(sk) &&
309 !tcp_memory_pressure) {
310 int incr;
311
312 /* Check #2. Increase window, if skb with such overhead
313 * will fit to rcvbuf in future.
314 */
315 if (tcp_win_from_space(skb->truesize) <= skb->len)
056834d9 316 incr = 2 * tp->advmss;
1da177e4 317 else
9e412ba7 318 incr = __tcp_grow_window(sk, skb);
1da177e4
LT
319
320 if (incr) {
056834d9
IJ
321 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
322 tp->window_clamp);
463c84b9 323 inet_csk(sk)->icsk_ack.quick |= 1;
1da177e4
LT
324 }
325 }
326}
327
328/* 3. Tuning rcvbuf, when connection enters established state. */
329
330static void tcp_fixup_rcvbuf(struct sock *sk)
331{
332 struct tcp_sock *tp = tcp_sk(sk);
333 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
334
335 /* Try to select rcvbuf so that 4 mss-sized segments
caa20d9a 336 * will fit to window and corresponding skbs will fit to our rcvbuf.
1da177e4
LT
337 * (was 3; 4 is minimum to allow fast retransmit to work.)
338 */
339 while (tcp_win_from_space(rcvmem) < tp->advmss)
340 rcvmem += 128;
341 if (sk->sk_rcvbuf < 4 * rcvmem)
342 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
343}
344
caa20d9a 345/* 4. Try to fixup all. It is made immediately after connection enters
1da177e4
LT
346 * established state.
347 */
348static void tcp_init_buffer_space(struct sock *sk)
349{
350 struct tcp_sock *tp = tcp_sk(sk);
351 int maxwin;
352
353 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
354 tcp_fixup_rcvbuf(sk);
355 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
356 tcp_fixup_sndbuf(sk);
357
358 tp->rcvq_space.space = tp->rcv_wnd;
359
360 maxwin = tcp_full_space(sk);
361
362 if (tp->window_clamp >= maxwin) {
363 tp->window_clamp = maxwin;
364
365 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
366 tp->window_clamp = max(maxwin -
367 (maxwin >> sysctl_tcp_app_win),
368 4 * tp->advmss);
369 }
370
371 /* Force reservation of one segment. */
372 if (sysctl_tcp_app_win &&
373 tp->window_clamp > 2 * tp->advmss &&
374 tp->window_clamp + tp->advmss > maxwin)
375 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
376
377 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
378 tp->snd_cwnd_stamp = tcp_time_stamp;
379}
380
1da177e4 381/* 5. Recalculate window clamp after socket hit its memory bounds. */
9e412ba7 382static void tcp_clamp_window(struct sock *sk)
1da177e4 383{
9e412ba7 384 struct tcp_sock *tp = tcp_sk(sk);
6687e988 385 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 386
6687e988 387 icsk->icsk_ack.quick = 0;
1da177e4 388
326f36e9
JH
389 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
390 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
391 !tcp_memory_pressure &&
392 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
393 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
394 sysctl_tcp_rmem[2]);
1da177e4 395 }
326f36e9 396 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
056834d9 397 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
1da177e4
LT
398}
399
40efc6fa
SH
400/* Initialize RCV_MSS value.
401 * RCV_MSS is an our guess about MSS used by the peer.
402 * We haven't any direct information about the MSS.
403 * It's better to underestimate the RCV_MSS rather than overestimate.
404 * Overestimations make us ACKing less frequently than needed.
405 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
406 */
407void tcp_initialize_rcv_mss(struct sock *sk)
408{
409 struct tcp_sock *tp = tcp_sk(sk);
410 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
411
056834d9 412 hint = min(hint, tp->rcv_wnd / 2);
40efc6fa
SH
413 hint = min(hint, TCP_MIN_RCVMSS);
414 hint = max(hint, TCP_MIN_MSS);
415
416 inet_csk(sk)->icsk_ack.rcv_mss = hint;
417}
418
1da177e4
LT
419/* Receiver "autotuning" code.
420 *
421 * The algorithm for RTT estimation w/o timestamps is based on
422 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
423 * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps>
424 *
425 * More detail on this code can be found at
426 * <http://www.psc.edu/~jheffner/senior_thesis.ps>,
427 * though this reference is out of date. A new paper
428 * is pending.
429 */
430static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
431{
432 u32 new_sample = tp->rcv_rtt_est.rtt;
433 long m = sample;
434
435 if (m == 0)
436 m = 1;
437
438 if (new_sample != 0) {
439 /* If we sample in larger samples in the non-timestamp
440 * case, we could grossly overestimate the RTT especially
441 * with chatty applications or bulk transfer apps which
442 * are stalled on filesystem I/O.
443 *
444 * Also, since we are only going for a minimum in the
31f34269 445 * non-timestamp case, we do not smooth things out
caa20d9a 446 * else with timestamps disabled convergence takes too
1da177e4
LT
447 * long.
448 */
449 if (!win_dep) {
450 m -= (new_sample >> 3);
451 new_sample += m;
452 } else if (m < new_sample)
453 new_sample = m << 3;
454 } else {
caa20d9a 455 /* No previous measure. */
1da177e4
LT
456 new_sample = m << 3;
457 }
458
459 if (tp->rcv_rtt_est.rtt != new_sample)
460 tp->rcv_rtt_est.rtt = new_sample;
461}
462
463static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
464{
465 if (tp->rcv_rtt_est.time == 0)
466 goto new_measure;
467 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
468 return;
056834d9 469 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
1da177e4
LT
470
471new_measure:
472 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
473 tp->rcv_rtt_est.time = tcp_time_stamp;
474}
475
056834d9
IJ
476static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
477 const struct sk_buff *skb)
1da177e4 478{
463c84b9 479 struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
480 if (tp->rx_opt.rcv_tsecr &&
481 (TCP_SKB_CB(skb)->end_seq -
463c84b9 482 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
1da177e4
LT
483 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
484}
485
486/*
487 * This function should be called every time data is copied to user space.
488 * It calculates the appropriate TCP receive buffer space.
489 */
490void tcp_rcv_space_adjust(struct sock *sk)
491{
492 struct tcp_sock *tp = tcp_sk(sk);
493 int time;
494 int space;
e905a9ed 495
1da177e4
LT
496 if (tp->rcvq_space.time == 0)
497 goto new_measure;
e905a9ed 498
1da177e4 499 time = tcp_time_stamp - tp->rcvq_space.time;
056834d9 500 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
1da177e4 501 return;
e905a9ed 502
1da177e4
LT
503 space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
504
505 space = max(tp->rcvq_space.space, space);
506
507 if (tp->rcvq_space.space != space) {
508 int rcvmem;
509
510 tp->rcvq_space.space = space;
511
6fcf9412
JH
512 if (sysctl_tcp_moderate_rcvbuf &&
513 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
1da177e4
LT
514 int new_clamp = space;
515
516 /* Receive space grows, normalize in order to
517 * take into account packet headers and sk_buff
518 * structure overhead.
519 */
520 space /= tp->advmss;
521 if (!space)
522 space = 1;
523 rcvmem = (tp->advmss + MAX_TCP_HEADER +
524 16 + sizeof(struct sk_buff));
525 while (tcp_win_from_space(rcvmem) < tp->advmss)
526 rcvmem += 128;
527 space *= rcvmem;
528 space = min(space, sysctl_tcp_rmem[2]);
529 if (space > sk->sk_rcvbuf) {
530 sk->sk_rcvbuf = space;
531
532 /* Make the window clamp follow along. */
533 tp->window_clamp = new_clamp;
534 }
535 }
536 }
e905a9ed 537
1da177e4
LT
538new_measure:
539 tp->rcvq_space.seq = tp->copied_seq;
540 tp->rcvq_space.time = tcp_time_stamp;
541}
542
543/* There is something which you must keep in mind when you analyze the
544 * behavior of the tp->ato delayed ack timeout interval. When a
545 * connection starts up, we want to ack as quickly as possible. The
546 * problem is that "good" TCP's do slow start at the beginning of data
547 * transmission. The means that until we send the first few ACK's the
548 * sender will sit on his end and only queue most of his data, because
549 * he can only send snd_cwnd unacked packets at any given time. For
550 * each ACK we send, he increments snd_cwnd and transmits more of his
551 * queue. -DaveM
552 */
9e412ba7 553static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
1da177e4 554{
9e412ba7 555 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 556 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
557 u32 now;
558
463c84b9 559 inet_csk_schedule_ack(sk);
1da177e4 560
463c84b9 561 tcp_measure_rcv_mss(sk, skb);
1da177e4
LT
562
563 tcp_rcv_rtt_measure(tp);
e905a9ed 564
1da177e4
LT
565 now = tcp_time_stamp;
566
463c84b9 567 if (!icsk->icsk_ack.ato) {
1da177e4
LT
568 /* The _first_ data packet received, initialize
569 * delayed ACK engine.
570 */
463c84b9
ACM
571 tcp_incr_quickack(sk);
572 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4 573 } else {
463c84b9 574 int m = now - icsk->icsk_ack.lrcvtime;
1da177e4 575
056834d9 576 if (m <= TCP_ATO_MIN / 2) {
1da177e4 577 /* The fastest case is the first. */
463c84b9
ACM
578 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
579 } else if (m < icsk->icsk_ack.ato) {
580 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
581 if (icsk->icsk_ack.ato > icsk->icsk_rto)
582 icsk->icsk_ack.ato = icsk->icsk_rto;
583 } else if (m > icsk->icsk_rto) {
caa20d9a 584 /* Too long gap. Apparently sender failed to
1da177e4
LT
585 * restart window, so that we send ACKs quickly.
586 */
463c84b9 587 tcp_incr_quickack(sk);
3ab224be 588 sk_mem_reclaim(sk);
1da177e4
LT
589 }
590 }
463c84b9 591 icsk->icsk_ack.lrcvtime = now;
1da177e4
LT
592
593 TCP_ECN_check_ce(tp, skb);
594
595 if (skb->len >= 128)
9e412ba7 596 tcp_grow_window(sk, skb);
1da177e4
LT
597}
598
05bb1fad
DM
599static u32 tcp_rto_min(struct sock *sk)
600{
601 struct dst_entry *dst = __sk_dst_get(sk);
602 u32 rto_min = TCP_RTO_MIN;
603
5c127c58 604 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
c1e20f7c 605 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
05bb1fad
DM
606 return rto_min;
607}
608
1da177e4
LT
609/* Called to compute a smoothed rtt estimate. The data fed to this
610 * routine either comes from timestamps, or from segments that were
611 * known _not_ to have been retransmitted [see Karn/Partridge
612 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
613 * piece by Van Jacobson.
614 * NOTE: the next three routines used to be one big routine.
615 * To save cycles in the RFC 1323 implementation it was better to break
616 * it up into three procedures. -- erics
617 */
2d2abbab 618static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
1da177e4 619{
6687e988 620 struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
621 long m = mrtt; /* RTT */
622
1da177e4
LT
623 /* The following amusing code comes from Jacobson's
624 * article in SIGCOMM '88. Note that rtt and mdev
625 * are scaled versions of rtt and mean deviation.
e905a9ed 626 * This is designed to be as fast as possible
1da177e4
LT
627 * m stands for "measurement".
628 *
629 * On a 1990 paper the rto value is changed to:
630 * RTO = rtt + 4 * mdev
631 *
632 * Funny. This algorithm seems to be very broken.
633 * These formulae increase RTO, when it should be decreased, increase
31f34269 634 * too slowly, when it should be increased quickly, decrease too quickly
1da177e4
LT
635 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
636 * does not matter how to _calculate_ it. Seems, it was trap
637 * that VJ failed to avoid. 8)
638 */
2de979bd 639 if (m == 0)
1da177e4
LT
640 m = 1;
641 if (tp->srtt != 0) {
642 m -= (tp->srtt >> 3); /* m is now error in rtt est */
643 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */
644 if (m < 0) {
645 m = -m; /* m is now abs(error) */
646 m -= (tp->mdev >> 2); /* similar update on mdev */
647 /* This is similar to one of Eifel findings.
648 * Eifel blocks mdev updates when rtt decreases.
649 * This solution is a bit different: we use finer gain
650 * for mdev in this case (alpha*beta).
651 * Like Eifel it also prevents growth of rto,
652 * but also it limits too fast rto decreases,
653 * happening in pure Eifel.
654 */
655 if (m > 0)
656 m >>= 3;
657 } else {
658 m -= (tp->mdev >> 2); /* similar update on mdev */
659 }
660 tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */
661 if (tp->mdev > tp->mdev_max) {
662 tp->mdev_max = tp->mdev;
663 if (tp->mdev_max > tp->rttvar)
664 tp->rttvar = tp->mdev_max;
665 }
666 if (after(tp->snd_una, tp->rtt_seq)) {
667 if (tp->mdev_max < tp->rttvar)
056834d9 668 tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
1da177e4 669 tp->rtt_seq = tp->snd_nxt;
05bb1fad 670 tp->mdev_max = tcp_rto_min(sk);
1da177e4
LT
671 }
672 } else {
673 /* no previous measure. */
056834d9
IJ
674 tp->srtt = m << 3; /* take the measured time to be rtt */
675 tp->mdev = m << 1; /* make sure rto = 3*rtt */
05bb1fad 676 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
1da177e4
LT
677 tp->rtt_seq = tp->snd_nxt;
678 }
1da177e4
LT
679}
680
681/* Calculate rto without backoff. This is the second half of Van Jacobson's
682 * routine referred to above.
683 */
463c84b9 684static inline void tcp_set_rto(struct sock *sk)
1da177e4 685{
463c84b9 686 const struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
687 /* Old crap is replaced with new one. 8)
688 *
689 * More seriously:
690 * 1. If rtt variance happened to be less 50msec, it is hallucination.
691 * It cannot be less due to utterly erratic ACK generation made
692 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
693 * to do with delayed acks, because at cwnd>2 true delack timeout
694 * is invisible. Actually, Linux-2.4 also generates erratic
caa20d9a 695 * ACKs in some circumstances.
1da177e4 696 */
463c84b9 697 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
1da177e4
LT
698
699 /* 2. Fixups made earlier cannot be right.
700 * If we do not estimate RTO correctly without them,
701 * all the algo is pure shit and should be replaced
caa20d9a 702 * with correct one. It is exactly, which we pretend to do.
1da177e4
LT
703 */
704}
705
706/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
707 * guarantees that rto is higher.
708 */
463c84b9 709static inline void tcp_bound_rto(struct sock *sk)
1da177e4 710{
463c84b9
ACM
711 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
712 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
1da177e4
LT
713}
714
715/* Save metrics learned by this TCP session.
716 This function is called only, when TCP finishes successfully
717 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
718 */
719void tcp_update_metrics(struct sock *sk)
720{
721 struct tcp_sock *tp = tcp_sk(sk);
722 struct dst_entry *dst = __sk_dst_get(sk);
723
724 if (sysctl_tcp_nometrics_save)
725 return;
726
727 dst_confirm(dst);
728
056834d9 729 if (dst && (dst->flags & DST_HOST)) {
6687e988 730 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 731 int m;
c1e20f7c 732 unsigned long rtt;
1da177e4 733
6687e988 734 if (icsk->icsk_backoff || !tp->srtt) {
1da177e4
LT
735 /* This session failed to estimate rtt. Why?
736 * Probably, no packets returned in time.
737 * Reset our results.
738 */
739 if (!(dst_metric_locked(dst, RTAX_RTT)))
056834d9 740 dst->metrics[RTAX_RTT - 1] = 0;
1da177e4
LT
741 return;
742 }
743
c1e20f7c
SH
744 rtt = dst_metric_rtt(dst, RTAX_RTT);
745 m = rtt - tp->srtt;
1da177e4
LT
746
747 /* If newly calculated rtt larger than stored one,
748 * store new one. Otherwise, use EWMA. Remember,
749 * rtt overestimation is always better than underestimation.
750 */
751 if (!(dst_metric_locked(dst, RTAX_RTT))) {
752 if (m <= 0)
c1e20f7c 753 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
1da177e4 754 else
c1e20f7c 755 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
1da177e4
LT
756 }
757
758 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
c1e20f7c 759 unsigned long var;
1da177e4
LT
760 if (m < 0)
761 m = -m;
762
763 /* Scale deviation to rttvar fixed point */
764 m >>= 1;
765 if (m < tp->mdev)
766 m = tp->mdev;
767
c1e20f7c
SH
768 var = dst_metric_rtt(dst, RTAX_RTTVAR);
769 if (m >= var)
770 var = m;
1da177e4 771 else
c1e20f7c
SH
772 var -= (var - m) >> 2;
773
774 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
1da177e4
LT
775 }
776
777 if (tp->snd_ssthresh >= 0xFFFF) {
778 /* Slow start still did not finish. */
779 if (dst_metric(dst, RTAX_SSTHRESH) &&
780 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
781 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
782 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
783 if (!dst_metric_locked(dst, RTAX_CWND) &&
784 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
056834d9 785 dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
1da177e4 786 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
6687e988 787 icsk->icsk_ca_state == TCP_CA_Open) {
1da177e4
LT
788 /* Cong. avoidance phase, cwnd is reliable. */
789 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
790 dst->metrics[RTAX_SSTHRESH-1] =
791 max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
792 if (!dst_metric_locked(dst, RTAX_CWND))
5ffc02a1 793 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
1da177e4
LT
794 } else {
795 /* Else slow start did not finish, cwnd is non-sense,
796 ssthresh may be also invalid.
797 */
798 if (!dst_metric_locked(dst, RTAX_CWND))
5ffc02a1
SS
799 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
800 if (dst_metric(dst, RTAX_SSTHRESH) &&
1da177e4 801 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
5ffc02a1 802 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
1da177e4
LT
803 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
804 }
805
806 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
5ffc02a1 807 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
1da177e4
LT
808 tp->reordering != sysctl_tcp_reordering)
809 dst->metrics[RTAX_REORDERING-1] = tp->reordering;
810 }
811 }
812}
813
410e27a4
GR
814/* Numbers are taken from RFC3390.
815 *
816 * John Heffner states:
817 *
818 * The RFC specifies a window of no more than 4380 bytes
819 * unless 2*MSS > 4380. Reading the pseudocode in the RFC
820 * is a bit misleading because they use a clamp at 4380 bytes
821 * rather than use a multiplier in the relevant range.
822 */
1da177e4
LT
823__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
824{
825 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
826
410e27a4
GR
827 if (!cwnd) {
828 if (tp->mss_cache > 1460)
829 cwnd = 2;
830 else
831 cwnd = (tp->mss_cache > 1095) ? 3 : 4;
832 }
1da177e4
LT
833 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
834}
835
40efc6fa 836/* Set slow start threshold and cwnd not falling to slow start */
3cfe3baa 837void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
40efc6fa
SH
838{
839 struct tcp_sock *tp = tcp_sk(sk);
3cfe3baa 840 const struct inet_connection_sock *icsk = inet_csk(sk);
40efc6fa
SH
841
842 tp->prior_ssthresh = 0;
843 tp->bytes_acked = 0;
e01f9d77 844 if (icsk->icsk_ca_state < TCP_CA_CWR) {
40efc6fa 845 tp->undo_marker = 0;
3cfe3baa
IJ
846 if (set_ssthresh)
847 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
40efc6fa
SH
848 tp->snd_cwnd = min(tp->snd_cwnd,
849 tcp_packets_in_flight(tp) + 1U);
850 tp->snd_cwnd_cnt = 0;
851 tp->high_seq = tp->snd_nxt;
852 tp->snd_cwnd_stamp = tcp_time_stamp;
853 TCP_ECN_queue_cwr(tp);
854
855 tcp_set_ca_state(sk, TCP_CA_CWR);
856 }
857}
858
e60402d0
IJ
859/*
860 * Packet counting of FACK is based on in-order assumptions, therefore TCP
861 * disables it when reordering is detected
862 */
863static void tcp_disable_fack(struct tcp_sock *tp)
864{
85cc391c
IJ
865 /* RFC3517 uses different metric in lost marker => reset on change */
866 if (tcp_is_fack(tp))
867 tp->lost_skb_hint = NULL;
e60402d0
IJ
868 tp->rx_opt.sack_ok &= ~2;
869}
870
564262c1 871/* Take a notice that peer is sending D-SACKs */
e60402d0
IJ
872static void tcp_dsack_seen(struct tcp_sock *tp)
873{
874 tp->rx_opt.sack_ok |= 4;
875}
876
1da177e4
LT
877/* Initialize metrics on socket. */
878
879static void tcp_init_metrics(struct sock *sk)
880{
881 struct tcp_sock *tp = tcp_sk(sk);
882 struct dst_entry *dst = __sk_dst_get(sk);
883
884 if (dst == NULL)
885 goto reset;
886
887 dst_confirm(dst);
888
889 if (dst_metric_locked(dst, RTAX_CWND))
890 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
891 if (dst_metric(dst, RTAX_SSTHRESH)) {
892 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
893 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
894 tp->snd_ssthresh = tp->snd_cwnd_clamp;
895 }
896 if (dst_metric(dst, RTAX_REORDERING) &&
897 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
e60402d0 898 tcp_disable_fack(tp);
1da177e4
LT
899 tp->reordering = dst_metric(dst, RTAX_REORDERING);
900 }
901
902 if (dst_metric(dst, RTAX_RTT) == 0)
903 goto reset;
904
c1e20f7c 905 if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
1da177e4
LT
906 goto reset;
907
908 /* Initial rtt is determined from SYN,SYN-ACK.
909 * The segment is small and rtt may appear much
910 * less than real one. Use per-dst memory
911 * to make it more realistic.
912 *
913 * A bit of theory. RTT is time passed after "normal" sized packet
caa20d9a 914 * is sent until it is ACKed. In normal circumstances sending small
1da177e4
LT
915 * packets force peer to delay ACKs and calculation is correct too.
916 * The algorithm is adaptive and, provided we follow specs, it
917 * NEVER underestimate RTT. BUT! If peer tries to make some clever
918 * tricks sort of "quick acks" for time long enough to decrease RTT
919 * to low value, and then abruptly stops to do it and starts to delay
920 * ACKs, wait for troubles.
921 */
c1e20f7c
SH
922 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
923 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
1da177e4
LT
924 tp->rtt_seq = tp->snd_nxt;
925 }
c1e20f7c
SH
926 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
927 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
488faa2a 928 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
1da177e4 929 }
463c84b9
ACM
930 tcp_set_rto(sk);
931 tcp_bound_rto(sk);
932 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
1da177e4
LT
933 goto reset;
934 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
935 tp->snd_cwnd_stamp = tcp_time_stamp;
936 return;
937
938reset:
939 /* Play conservative. If timestamps are not
940 * supported, TCP will fail to recalculate correct
941 * rtt, if initial rto is too small. FORGET ALL AND RESET!
942 */
943 if (!tp->rx_opt.saw_tstamp && tp->srtt) {
944 tp->srtt = 0;
945 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
463c84b9 946 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
947 }
948}
949
6687e988
ACM
950static void tcp_update_reordering(struct sock *sk, const int metric,
951 const int ts)
1da177e4 952{
6687e988 953 struct tcp_sock *tp = tcp_sk(sk);
1da177e4 954 if (metric > tp->reordering) {
40b215e5
PE
955 int mib_idx;
956
1da177e4
LT
957 tp->reordering = min(TCP_MAX_REORDERING, metric);
958
959 /* This exciting event is worth to be remembered. 8) */
960 if (ts)
40b215e5 961 mib_idx = LINUX_MIB_TCPTSREORDER;
e60402d0 962 else if (tcp_is_reno(tp))
40b215e5 963 mib_idx = LINUX_MIB_TCPRENOREORDER;
e60402d0 964 else if (tcp_is_fack(tp))
40b215e5 965 mib_idx = LINUX_MIB_TCPFACKREORDER;
1da177e4 966 else
40b215e5
PE
967 mib_idx = LINUX_MIB_TCPSACKREORDER;
968
de0744af 969 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1da177e4
LT
970#if FASTRETRANS_DEBUG > 1
971 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
6687e988 972 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
1da177e4
LT
973 tp->reordering,
974 tp->fackets_out,
975 tp->sacked_out,
976 tp->undo_marker ? tp->undo_retrans : 0);
977#endif
e60402d0 978 tcp_disable_fack(tp);
1da177e4
LT
979 }
980}
981
006f582c 982/* This must be called before lost_out is incremented */
c8c213f2
IJ
983static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
984{
006f582c 985 if ((tp->retransmit_skb_hint == NULL) ||
c8c213f2
IJ
986 before(TCP_SKB_CB(skb)->seq,
987 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
006f582c
IJ
988 tp->retransmit_skb_hint = skb;
989
990 if (!tp->lost_out ||
991 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
992 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
c8c213f2
IJ
993}
994
41ea36e3
IJ
995static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
996{
997 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
998 tcp_verify_retransmit_hint(tp, skb);
999
1000 tp->lost_out += tcp_skb_pcount(skb);
1001 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1002 }
1003}
1004
e1aa680f
IJ
1005static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1006 struct sk_buff *skb)
006f582c
IJ
1007{
1008 tcp_verify_retransmit_hint(tp, skb);
1009
1010 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1011 tp->lost_out += tcp_skb_pcount(skb);
1012 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1013 }
1014}
1015
1da177e4
LT
1016/* This procedure tags the retransmission queue when SACKs arrive.
1017 *
1018 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
1019 * Packets in queue with these bits set are counted in variables
1020 * sacked_out, retrans_out and lost_out, correspondingly.
1021 *
1022 * Valid combinations are:
1023 * Tag InFlight Description
1024 * 0 1 - orig segment is in flight.
1025 * S 0 - nothing flies, orig reached receiver.
1026 * L 0 - nothing flies, orig lost by net.
1027 * R 2 - both orig and retransmit are in flight.
1028 * L|R 1 - orig is lost, retransmit is in flight.
1029 * S|R 1 - orig reached receiver, retrans is still in flight.
1030 * (L|S|R is logically valid, it could occur when L|R is sacked,
1031 * but it is equivalent to plain S and code short-curcuits it to S.
1032 * L|S is logically invalid, it would mean -1 packet in flight 8))
1033 *
1034 * These 6 states form finite state machine, controlled by the following events:
1035 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1036 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1037 * 3. Loss detection event of one of three flavors:
1038 * A. Scoreboard estimator decided the packet is lost.
1039 * A'. Reno "three dupacks" marks head of queue lost.
1040 * A''. Its FACK modfication, head until snd.fack is lost.
1041 * B. SACK arrives sacking data transmitted after never retransmitted
1042 * hole was sent out.
1043 * C. SACK arrives sacking SND.NXT at the moment, when the
1044 * segment was retransmitted.
1045 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1046 *
1047 * It is pleasant to note, that state diagram turns out to be commutative,
1048 * so that we are allowed not to be bothered by order of our actions,
1049 * when multiple events arrive simultaneously. (see the function below).
1050 *
1051 * Reordering detection.
1052 * --------------------
1053 * Reordering metric is maximal distance, which a packet can be displaced
1054 * in packet stream. With SACKs we can estimate it:
1055 *
1056 * 1. SACK fills old hole and the corresponding segment was not
1057 * ever retransmitted -> reordering. Alas, we cannot use it
1058 * when segment was retransmitted.
1059 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1060 * for retransmitted and already SACKed segment -> reordering..
1061 * Both of these heuristics are not used in Loss state, when we cannot
1062 * account for retransmits accurately.
5b3c9882
IJ
1063 *
1064 * SACK block validation.
1065 * ----------------------
1066 *
1067 * SACK block range validation checks that the received SACK block fits to
1068 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1069 * Note that SND.UNA is not included to the range though being valid because
0e835331
IJ
1070 * it means that the receiver is rather inconsistent with itself reporting
1071 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1072 * perfectly valid, however, in light of RFC2018 which explicitly states
1073 * that "SACK block MUST reflect the newest segment. Even if the newest
1074 * segment is going to be discarded ...", not that it looks very clever
1075 * in case of head skb. Due to potentional receiver driven attacks, we
1076 * choose to avoid immediate execution of a walk in write queue due to
1077 * reneging and defer head skb's loss recovery to standard loss recovery
1078 * procedure that will eventually trigger (nothing forbids us doing this).
5b3c9882
IJ
1079 *
1080 * Implements also blockage to start_seq wrap-around. Problem lies in the
1081 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1082 * there's no guarantee that it will be before snd_nxt (n). The problem
1083 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1084 * wrap (s_w):
1085 *
1086 * <- outs wnd -> <- wrapzone ->
1087 * u e n u_w e_w s n_w
1088 * | | | | | | |
1089 * |<------------+------+----- TCP seqno space --------------+---------->|
1090 * ...-- <2^31 ->| |<--------...
1091 * ...---- >2^31 ------>| |<--------...
1092 *
1093 * Current code wouldn't be vulnerable but it's better still to discard such
1094 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1095 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1096 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1097 * equal to the ideal case (infinite seqno space without wrap caused issues).
1098 *
1099 * With D-SACK the lower bound is extended to cover sequence space below
1100 * SND.UNA down to undo_marker, which is the last point of interest. Yet
564262c1 1101 * again, D-SACK block must not to go across snd_una (for the same reason as
5b3c9882
IJ
1102 * for the normal SACK blocks, explained above). But there all simplicity
1103 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1104 * fully below undo_marker they do not affect behavior in anyway and can
1105 * therefore be safely ignored. In rare cases (which are more or less
1106 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1107 * fragmentation and packet reordering past skb's retransmission. To consider
1108 * them correctly, the acceptable range must be extended even more though
1109 * the exact amount is rather hard to quantify. However, tp->max_window can
1110 * be used as an exaggerated estimate.
1da177e4 1111 */
5b3c9882
IJ
1112static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1113 u32 start_seq, u32 end_seq)
1114{
1115 /* Too far in future, or reversed (interpretation is ambiguous) */
1116 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1117 return 0;
1118
1119 /* Nasty start_seq wrap-around check (see comments above) */
1120 if (!before(start_seq, tp->snd_nxt))
1121 return 0;
1122
564262c1 1123 /* In outstanding window? ...This is valid exit for D-SACKs too.
5b3c9882
IJ
1124 * start_seq == snd_una is non-sensical (see comments above)
1125 */
1126 if (after(start_seq, tp->snd_una))
1127 return 1;
1128
1129 if (!is_dsack || !tp->undo_marker)
1130 return 0;
1131
1132 /* ...Then it's D-SACK, and must reside below snd_una completely */
1133 if (!after(end_seq, tp->snd_una))
1134 return 0;
1135
1136 if (!before(start_seq, tp->undo_marker))
1137 return 1;
1138
1139 /* Too old */
1140 if (!after(end_seq, tp->undo_marker))
1141 return 0;
1142
1143 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1144 * start_seq < undo_marker and end_seq >= undo_marker.
1145 */
1146 return !before(start_seq, end_seq - tp->max_window);
1147}
1148
1c1e87ed
IJ
1149/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
1150 * Event "C". Later note: FACK people cheated me again 8), we have to account
1151 * for reordering! Ugly, but should help.
f785a8e2
IJ
1152 *
1153 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
1154 * less than what is now known to be received by the other end (derived from
9f58f3b7
IJ
1155 * highest SACK block). Also calculate the lowest snd_nxt among the remaining
1156 * retransmitted skbs to avoid some costly processing per ACKs.
1c1e87ed 1157 */
407ef1de 1158static void tcp_mark_lost_retrans(struct sock *sk)
1c1e87ed 1159{
9f58f3b7 1160 const struct inet_connection_sock *icsk = inet_csk(sk);
1c1e87ed
IJ
1161 struct tcp_sock *tp = tcp_sk(sk);
1162 struct sk_buff *skb;
f785a8e2 1163 int cnt = 0;
df2e014b 1164 u32 new_low_seq = tp->snd_nxt;
6859d494 1165 u32 received_upto = tcp_highest_sack_seq(tp);
9f58f3b7
IJ
1166
1167 if (!tcp_is_fack(tp) || !tp->retrans_out ||
1168 !after(received_upto, tp->lost_retrans_low) ||
1169 icsk->icsk_ca_state != TCP_CA_Recovery)
407ef1de 1170 return;
1c1e87ed
IJ
1171
1172 tcp_for_write_queue(skb, sk) {
1173 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
1174
1175 if (skb == tcp_send_head(sk))
1176 break;
f785a8e2 1177 if (cnt == tp->retrans_out)
1c1e87ed
IJ
1178 break;
1179 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1180 continue;
1181
f785a8e2
IJ
1182 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
1183 continue;
1184
1185 if (after(received_upto, ack_seq) &&
1c1e87ed 1186 (tcp_is_fack(tp) ||
f785a8e2 1187 !before(received_upto,
1c1e87ed
IJ
1188 ack_seq + tp->reordering * tp->mss_cache))) {
1189 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1190 tp->retrans_out -= tcp_skb_pcount(skb);
1191
006f582c 1192 tcp_skb_mark_lost_uncond_verify(tp, skb);
de0744af 1193 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
f785a8e2 1194 } else {
df2e014b 1195 if (before(ack_seq, new_low_seq))
b08d6cb2 1196 new_low_seq = ack_seq;
f785a8e2 1197 cnt += tcp_skb_pcount(skb);
1c1e87ed
IJ
1198 }
1199 }
b08d6cb2
IJ
1200
1201 if (tp->retrans_out)
1202 tp->lost_retrans_low = new_low_seq;
1c1e87ed 1203}
5b3c9882 1204
1ed83465 1205static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
d06e021d
DM
1206 struct tcp_sack_block_wire *sp, int num_sacks,
1207 u32 prior_snd_una)
1208{
1ed83465 1209 struct tcp_sock *tp = tcp_sk(sk);
d3e2ce3b
HH
1210 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1211 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
d06e021d
DM
1212 int dup_sack = 0;
1213
1214 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1215 dup_sack = 1;
e60402d0 1216 tcp_dsack_seen(tp);
de0744af 1217 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
d06e021d 1218 } else if (num_sacks > 1) {
d3e2ce3b
HH
1219 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1220 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
d06e021d
DM
1221
1222 if (!after(end_seq_0, end_seq_1) &&
1223 !before(start_seq_0, start_seq_1)) {
1224 dup_sack = 1;
e60402d0 1225 tcp_dsack_seen(tp);
de0744af
PE
1226 NET_INC_STATS_BH(sock_net(sk),
1227 LINUX_MIB_TCPDSACKOFORECV);
d06e021d
DM
1228 }
1229 }
1230
1231 /* D-SACK for already forgotten data... Do dumb counting. */
1232 if (dup_sack &&
1233 !after(end_seq_0, prior_snd_una) &&
1234 after(end_seq_0, tp->undo_marker))
1235 tp->undo_retrans--;
1236
1237 return dup_sack;
1238}
1239
a1197f5a
IJ
1240struct tcp_sacktag_state {
1241 int reord;
1242 int fack_count;
1243 int flag;
1244};
1245
d1935942
IJ
1246/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1247 * the incoming SACK may not exactly match but we can find smaller MSS
1248 * aligned portion of it that matches. Therefore we might need to fragment
1249 * which may fail and creates some hassle (caller must handle error case
1250 * returns).
832d11c5
IJ
1251 *
1252 * FIXME: this could be merged to shift decision code
d1935942 1253 */
0f79efdc
AB
1254static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1255 u32 start_seq, u32 end_seq)
d1935942
IJ
1256{
1257 int in_sack, err;
1258 unsigned int pkt_len;
adb92db8 1259 unsigned int mss;
d1935942
IJ
1260
1261 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1262 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1263
1264 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1265 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
adb92db8 1266 mss = tcp_skb_mss(skb);
d1935942
IJ
1267 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1268
adb92db8 1269 if (!in_sack) {
d1935942 1270 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
adb92db8
IJ
1271 if (pkt_len < mss)
1272 pkt_len = mss;
1273 } else {
d1935942 1274 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
adb92db8
IJ
1275 if (pkt_len < mss)
1276 return -EINVAL;
1277 }
1278
1279 /* Round if necessary so that SACKs cover only full MSSes
1280 * and/or the remaining small portion (if present)
1281 */
1282 if (pkt_len > mss) {
1283 unsigned int new_len = (pkt_len / mss) * mss;
1284 if (!in_sack && new_len < pkt_len) {
1285 new_len += mss;
1286 if (new_len > skb->len)
1287 return 0;
1288 }
1289 pkt_len = new_len;
1290 }
1291 err = tcp_fragment(sk, skb, pkt_len, mss);
d1935942
IJ
1292 if (err < 0)
1293 return err;
1294 }
1295
1296 return in_sack;
1297}
1298
a1197f5a
IJ
1299static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1300 struct tcp_sacktag_state *state,
1301 int dup_sack, int pcount)
9e10c47c 1302{
6859d494 1303 struct tcp_sock *tp = tcp_sk(sk);
9e10c47c 1304 u8 sacked = TCP_SKB_CB(skb)->sacked;
a1197f5a 1305 int fack_count = state->fack_count;
9e10c47c
IJ
1306
1307 /* Account D-SACK for retransmitted packet. */
1308 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1309 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1310 tp->undo_retrans--;
ede9f3b1 1311 if (sacked & TCPCB_SACKED_ACKED)
a1197f5a 1312 state->reord = min(fack_count, state->reord);
9e10c47c
IJ
1313 }
1314
1315 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1316 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
a1197f5a 1317 return sacked;
9e10c47c
IJ
1318
1319 if (!(sacked & TCPCB_SACKED_ACKED)) {
1320 if (sacked & TCPCB_SACKED_RETRANS) {
1321 /* If the segment is not tagged as lost,
1322 * we do not clear RETRANS, believing
1323 * that retransmission is still in flight.
1324 */
1325 if (sacked & TCPCB_LOST) {
a1197f5a 1326 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
f58b22fd
IJ
1327 tp->lost_out -= pcount;
1328 tp->retrans_out -= pcount;
9e10c47c
IJ
1329 }
1330 } else {
1331 if (!(sacked & TCPCB_RETRANS)) {
1332 /* New sack for not retransmitted frame,
1333 * which was in hole. It is reordering.
1334 */
1335 if (before(TCP_SKB_CB(skb)->seq,
1336 tcp_highest_sack_seq(tp)))
a1197f5a
IJ
1337 state->reord = min(fack_count,
1338 state->reord);
9e10c47c
IJ
1339
1340 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
1341 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
a1197f5a 1342 state->flag |= FLAG_ONLY_ORIG_SACKED;
9e10c47c
IJ
1343 }
1344
1345 if (sacked & TCPCB_LOST) {
a1197f5a 1346 sacked &= ~TCPCB_LOST;
f58b22fd 1347 tp->lost_out -= pcount;
9e10c47c
IJ
1348 }
1349 }
1350
a1197f5a
IJ
1351 sacked |= TCPCB_SACKED_ACKED;
1352 state->flag |= FLAG_DATA_SACKED;
f58b22fd 1353 tp->sacked_out += pcount;
9e10c47c 1354
f58b22fd 1355 fack_count += pcount;
9e10c47c
IJ
1356
1357 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1358 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1359 before(TCP_SKB_CB(skb)->seq,
1360 TCP_SKB_CB(tp->lost_skb_hint)->seq))
f58b22fd 1361 tp->lost_cnt_hint += pcount;
9e10c47c
IJ
1362
1363 if (fack_count > tp->fackets_out)
1364 tp->fackets_out = fack_count;
9e10c47c
IJ
1365 }
1366
1367 /* D-SACK. We can detect redundant retransmission in S|R and plain R
1368 * frames and clear it. undo_retrans is decreased above, L|R frames
1369 * are accounted above as well.
1370 */
a1197f5a
IJ
1371 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1372 sacked &= ~TCPCB_SACKED_RETRANS;
f58b22fd 1373 tp->retrans_out -= pcount;
9e10c47c
IJ
1374 }
1375
a1197f5a 1376 return sacked;
9e10c47c
IJ
1377}
1378
832d11c5 1379static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
a1197f5a
IJ
1380 struct sk_buff *skb,
1381 struct tcp_sacktag_state *state,
1382 unsigned int pcount, int shifted, int mss)
832d11c5
IJ
1383{
1384 struct tcp_sock *tp = tcp_sk(sk);
832d11c5
IJ
1385
1386 BUG_ON(!pcount);
1387
92ee76b6
IJ
1388 /* Tweak before seqno plays */
1389 if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
1390 !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
1391 tp->lost_cnt_hint += pcount;
1392
832d11c5
IJ
1393 TCP_SKB_CB(prev)->end_seq += shifted;
1394 TCP_SKB_CB(skb)->seq += shifted;
1395
1396 skb_shinfo(prev)->gso_segs += pcount;
1397 BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
1398 skb_shinfo(skb)->gso_segs -= pcount;
1399
1400 /* When we're adding to gso_segs == 1, gso_size will be zero,
1401 * in theory this shouldn't be necessary but as long as DSACK
1402 * code can come after this skb later on it's better to keep
1403 * setting gso_size to something.
1404 */
1405 if (!skb_shinfo(prev)->gso_size) {
1406 skb_shinfo(prev)->gso_size = mss;
1407 skb_shinfo(prev)->gso_type = sk->sk_gso_type;
1408 }
1409
1410 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1411 if (skb_shinfo(skb)->gso_segs <= 1) {
1412 skb_shinfo(skb)->gso_size = 0;
1413 skb_shinfo(skb)->gso_type = 0;
1414 }
1415
a1197f5a
IJ
1416 /* We discard results */
1417 tcp_sacktag_one(skb, sk, state, 0, pcount);
832d11c5
IJ
1418
1419 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1420 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1421
832d11c5
IJ
1422 if (skb->len > 0) {
1423 BUG_ON(!tcp_skb_pcount(skb));
111cc8b9 1424 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
832d11c5
IJ
1425 return 0;
1426 }
1427
1428 /* Whole SKB was eaten :-) */
1429
92ee76b6
IJ
1430 if (skb == tp->retransmit_skb_hint)
1431 tp->retransmit_skb_hint = prev;
1432 if (skb == tp->scoreboard_skb_hint)
1433 tp->scoreboard_skb_hint = prev;
1434 if (skb == tp->lost_skb_hint) {
1435 tp->lost_skb_hint = prev;
1436 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1437 }
1438
832d11c5
IJ
1439 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
1440 if (skb == tcp_highest_sack(sk))
1441 tcp_advance_highest_sack(sk, skb);
1442
1443 tcp_unlink_write_queue(skb, sk);
1444 sk_wmem_free_skb(sk, skb);
1445
111cc8b9
IJ
1446 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
1447
832d11c5
IJ
1448 return 1;
1449}
1450
1451/* I wish gso_size would have a bit more sane initialization than
1452 * something-or-zero which complicates things
1453 */
775ffabf 1454static int tcp_skb_seglen(struct sk_buff *skb)
832d11c5 1455{
775ffabf 1456 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
832d11c5
IJ
1457}
1458
1459/* Shifting pages past head area doesn't work */
1460static int skb_can_shift(struct sk_buff *skb)
1461{
1462 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1463}
1464
1465/* Try collapsing SACK blocks spanning across multiple skbs to a single
1466 * skb.
1467 */
1468static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
a1197f5a 1469 struct tcp_sacktag_state *state,
832d11c5 1470 u32 start_seq, u32 end_seq,
a1197f5a 1471 int dup_sack)
832d11c5
IJ
1472{
1473 struct tcp_sock *tp = tcp_sk(sk);
1474 struct sk_buff *prev;
1475 int mss;
1476 int pcount = 0;
1477 int len;
1478 int in_sack;
1479
1480 if (!sk_can_gso(sk))
1481 goto fallback;
1482
1483 /* Normally R but no L won't result in plain S */
1484 if (!dup_sack &&
9969ca5f 1485 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
832d11c5
IJ
1486 goto fallback;
1487 if (!skb_can_shift(skb))
1488 goto fallback;
1489 /* This frame is about to be dropped (was ACKed). */
1490 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1491 goto fallback;
1492
1493 /* Can only happen with delayed DSACK + discard craziness */
1494 if (unlikely(skb == tcp_write_queue_head(sk)))
1495 goto fallback;
1496 prev = tcp_write_queue_prev(sk, skb);
1497
1498 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1499 goto fallback;
1500
1501 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1502 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1503
1504 if (in_sack) {
1505 len = skb->len;
1506 pcount = tcp_skb_pcount(skb);
775ffabf 1507 mss = tcp_skb_seglen(skb);
832d11c5
IJ
1508
1509 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1510 * drop this restriction as unnecessary
1511 */
775ffabf 1512 if (mss != tcp_skb_seglen(prev))
832d11c5
IJ
1513 goto fallback;
1514 } else {
1515 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1516 goto noop;
1517 /* CHECKME: This is non-MSS split case only?, this will
1518 * cause skipped skbs due to advancing loop btw, original
1519 * has that feature too
1520 */
1521 if (tcp_skb_pcount(skb) <= 1)
1522 goto noop;
1523
1524 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1525 if (!in_sack) {
1526 /* TODO: head merge to next could be attempted here
1527 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1528 * though it might not be worth of the additional hassle
1529 *
1530 * ...we can probably just fallback to what was done
1531 * previously. We could try merging non-SACKed ones
1532 * as well but it probably isn't going to buy off
1533 * because later SACKs might again split them, and
1534 * it would make skb timestamp tracking considerably
1535 * harder problem.
1536 */
1537 goto fallback;
1538 }
1539
1540 len = end_seq - TCP_SKB_CB(skb)->seq;
1541 BUG_ON(len < 0);
1542 BUG_ON(len > skb->len);
1543
1544 /* MSS boundaries should be honoured or else pcount will
1545 * severely break even though it makes things bit trickier.
1546 * Optimize common case to avoid most of the divides
1547 */
1548 mss = tcp_skb_mss(skb);
1549
1550 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1551 * drop this restriction as unnecessary
1552 */
775ffabf 1553 if (mss != tcp_skb_seglen(prev))
832d11c5
IJ
1554 goto fallback;
1555
1556 if (len == mss) {
1557 pcount = 1;
1558 } else if (len < mss) {
1559 goto noop;
1560 } else {
1561 pcount = len / mss;
1562 len = pcount * mss;
1563 }
1564 }
1565
1566 if (!skb_shift(prev, skb, len))
1567 goto fallback;
a1197f5a 1568 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss))
832d11c5
IJ
1569 goto out;
1570
1571 /* Hole filled allows collapsing with the next as well, this is very
1572 * useful when hole on every nth skb pattern happens
1573 */
1574 if (prev == tcp_write_queue_tail(sk))
1575 goto out;
1576 skb = tcp_write_queue_next(sk, prev);
1577
f0bc52f3
IJ
1578 if (!skb_can_shift(skb) ||
1579 (skb == tcp_send_head(sk)) ||
1580 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
775ffabf 1581 (mss != tcp_skb_seglen(skb)))
832d11c5
IJ
1582 goto out;
1583
1584 len = skb->len;
1585 if (skb_shift(prev, skb, len)) {
1586 pcount += tcp_skb_pcount(skb);
a1197f5a
IJ
1587 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), len,
1588 mss);
832d11c5
IJ
1589 }
1590
1591out:
a1197f5a 1592 state->fack_count += pcount;
832d11c5
IJ
1593 return prev;
1594
1595noop:
1596 return skb;
1597
1598fallback:
111cc8b9 1599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
832d11c5
IJ
1600 return NULL;
1601}
1602
68f8353b
IJ
1603static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1604 struct tcp_sack_block *next_dup,
a1197f5a 1605 struct tcp_sacktag_state *state,
68f8353b 1606 u32 start_seq, u32 end_seq,
a1197f5a 1607 int dup_sack_in)
68f8353b 1608{
832d11c5
IJ
1609 struct tcp_sock *tp = tcp_sk(sk);
1610 struct sk_buff *tmp;
1611
68f8353b
IJ
1612 tcp_for_write_queue_from(skb, sk) {
1613 int in_sack = 0;
1614 int dup_sack = dup_sack_in;
1615
1616 if (skb == tcp_send_head(sk))
1617 break;
1618
1619 /* queue is in-order => we can short-circuit the walk early */
1620 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1621 break;
1622
1623 if ((next_dup != NULL) &&
1624 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1625 in_sack = tcp_match_skb_to_sack(sk, skb,
1626 next_dup->start_seq,
1627 next_dup->end_seq);
1628 if (in_sack > 0)
1629 dup_sack = 1;
1630 }
1631
832d11c5
IJ
1632 /* skb reference here is a bit tricky to get right, since
1633 * shifting can eat and free both this skb and the next,
1634 * so not even _safe variant of the loop is enough.
1635 */
1636 if (in_sack <= 0) {
a1197f5a
IJ
1637 tmp = tcp_shift_skb_data(sk, skb, state,
1638 start_seq, end_seq, dup_sack);
832d11c5
IJ
1639 if (tmp != NULL) {
1640 if (tmp != skb) {
1641 skb = tmp;
1642 continue;
1643 }
1644
1645 in_sack = 0;
1646 } else {
1647 in_sack = tcp_match_skb_to_sack(sk, skb,
1648 start_seq,
1649 end_seq);
1650 }
1651 }
1652
68f8353b
IJ
1653 if (unlikely(in_sack < 0))
1654 break;
1655
832d11c5 1656 if (in_sack) {
a1197f5a
IJ
1657 TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
1658 state,
1659 dup_sack,
1660 tcp_skb_pcount(skb));
68f8353b 1661
832d11c5
IJ
1662 if (!before(TCP_SKB_CB(skb)->seq,
1663 tcp_highest_sack_seq(tp)))
1664 tcp_advance_highest_sack(sk, skb);
1665 }
1666
a1197f5a 1667 state->fack_count += tcp_skb_pcount(skb);
68f8353b
IJ
1668 }
1669 return skb;
1670}
1671
1672/* Avoid all extra work that is being done by sacktag while walking in
1673 * a normal way
1674 */
1675static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
a1197f5a
IJ
1676 struct tcp_sacktag_state *state,
1677 u32 skip_to_seq)
68f8353b
IJ
1678{
1679 tcp_for_write_queue_from(skb, sk) {
1680 if (skb == tcp_send_head(sk))
1681 break;
1682
e8bae275 1683 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
68f8353b 1684 break;
d152a7d8 1685
a1197f5a 1686 state->fack_count += tcp_skb_pcount(skb);
68f8353b
IJ
1687 }
1688 return skb;
1689}
1690
1691static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1692 struct sock *sk,
1693 struct tcp_sack_block *next_dup,
a1197f5a
IJ
1694 struct tcp_sacktag_state *state,
1695 u32 skip_to_seq)
68f8353b
IJ
1696{
1697 if (next_dup == NULL)
1698 return skb;
1699
1700 if (before(next_dup->start_seq, skip_to_seq)) {
a1197f5a
IJ
1701 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
1702 skb = tcp_sacktag_walk(skb, sk, NULL, state,
1703 next_dup->start_seq, next_dup->end_seq,
1704 1);
68f8353b
IJ
1705 }
1706
1707 return skb;
1708}
1709
1710static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
1711{
1712 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1713}
1714
1da177e4 1715static int
056834d9
IJ
1716tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1717 u32 prior_snd_una)
1da177e4 1718{
6687e988 1719 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1720 struct tcp_sock *tp = tcp_sk(sk);
9c70220b
ACM
1721 unsigned char *ptr = (skb_transport_header(ack_skb) +
1722 TCP_SKB_CB(ack_skb)->sacked);
fd6dad61 1723 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
4389dded 1724 struct tcp_sack_block sp[TCP_NUM_SACKS];
68f8353b 1725 struct tcp_sack_block *cache;
a1197f5a 1726 struct tcp_sacktag_state state;
68f8353b 1727 struct sk_buff *skb;
4389dded 1728 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
fd6dad61 1729 int used_sacks;
7769f406 1730 int found_dup_sack = 0;
68f8353b 1731 int i, j;
fda03fbb 1732 int first_sack_index;
1da177e4 1733
a1197f5a
IJ
1734 state.flag = 0;
1735 state.reord = tp->packets_out;
1736
d738cd8f 1737 if (!tp->sacked_out) {
de83c058
IJ
1738 if (WARN_ON(tp->fackets_out))
1739 tp->fackets_out = 0;
6859d494 1740 tcp_highest_sack_reset(sk);
d738cd8f 1741 }
1da177e4 1742
1ed83465 1743 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
d06e021d
DM
1744 num_sacks, prior_snd_una);
1745 if (found_dup_sack)
a1197f5a 1746 state.flag |= FLAG_DSACKING_ACK;
6f74651a
BE
1747
1748 /* Eliminate too old ACKs, but take into
1749 * account more or less fresh ones, they can
1750 * contain valid SACK info.
1751 */
1752 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1753 return 0;
1754