]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv4/tcp_input.c
tcp: fix lost retransmit SNMP under-counting
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / tcp_input.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes:
23 * Pedro Roque : Fast Retransmit/Recovery.
24 * Two receive queues.
25 * Retransmit queue handled by TCP.
26 * Better retransmit timer handling.
27 * New congestion avoidance.
28 * Header prediction.
29 * Variable renaming.
30 *
31 * Eric : Fast Retransmit.
32 * Randy Scott : MSS option defines.
33 * Eric Schenk : Fixes to slow start algorithm.
34 * Eric Schenk : Yet another double ACK bug.
35 * Eric Schenk : Delayed ACK bug fixes.
36 * Eric Schenk : Floyd style fast retrans war avoidance.
37 * David S. Miller : Don't allow zero congestion window.
38 * Eric Schenk : Fix retransmitter so that it sends
39 * next packet on ack of previous packet.
40 * Andi Kleen : Moved open_request checking here
41 * and process RSTs for open_requests.
42 * Andi Kleen : Better prune_queue, and other fixes.
caa20d9a 43 * Andrey Savochkin: Fix RTT measurements in the presence of
1da177e4
LT
44 * timestamps.
45 * Andrey Savochkin: Check sequence numbers correctly when
46 * removing SACKs due to in sequence incoming
47 * data segments.
48 * Andi Kleen: Make sure we never ack data there is not
49 * enough room for. Also make this condition
50 * a fatal error if it might still happen.
e905a9ed 51 * Andi Kleen: Add tcp_measure_rcv_mss to make
1da177e4 52 * connections with MSS<min(MTU,ann. MSS)
e905a9ed 53 * work without delayed acks.
1da177e4
LT
54 * Andi Kleen: Process packets with PSH set in the
55 * fast path.
56 * J Hadi Salim: ECN support
57 * Andrei Gurtov,
58 * Pasi Sarolahti,
59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission
60 * engine. Lots of bugs are found.
61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
1da177e4
LT
62 */
63
afd46503
JP
64#define pr_fmt(fmt) "TCP: " fmt
65
1da177e4 66#include <linux/mm.h>
5a0e3ad6 67#include <linux/slab.h>
1da177e4
LT
68#include <linux/module.h>
69#include <linux/sysctl.h>
a0bffffc 70#include <linux/kernel.h>
ad971f61 71#include <linux/prefetch.h>
5ffc02a1 72#include <net/dst.h>
1da177e4
LT
73#include <net/tcp.h>
74#include <net/inet_common.h>
75#include <linux/ipsec.h>
76#include <asm/unaligned.h>
e1c8a607 77#include <linux/errqueue.h>
1da177e4 78
ab32ea5d
BH
79int sysctl_tcp_timestamps __read_mostly = 1;
80int sysctl_tcp_window_scaling __read_mostly = 1;
81int sysctl_tcp_sack __read_mostly = 1;
94bdc978 82int sysctl_tcp_fack __read_mostly;
dca145ff 83int sysctl_tcp_max_reordering __read_mostly = 300;
ab32ea5d
BH
84int sysctl_tcp_dsack __read_mostly = 1;
85int sysctl_tcp_app_win __read_mostly = 31;
b49960a0 86int sysctl_tcp_adv_win_scale __read_mostly = 1;
4bc2f18b 87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
25429d7b 88EXPORT_SYMBOL(sysctl_tcp_timestamps);
1da177e4 89
282f23c6 90/* rfc5961 challenge ack rate limiting */
75ff39cc 91int sysctl_tcp_challenge_ack_limit = 1000;
282f23c6 92
ab32ea5d
BH
93int sysctl_tcp_stdurg __read_mostly;
94int sysctl_tcp_rfc1337 __read_mostly;
95int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
c96fd3d4 96int sysctl_tcp_frto __read_mostly = 2;
f6722583 97int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
ab32ea5d 98int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
6ba8a3b1 99int sysctl_tcp_early_retrans __read_mostly = 3;
032ee423 100int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
1da177e4 101
1da177e4
LT
102#define FLAG_DATA 0x01 /* Incoming frame contained data. */
103#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
104#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
105#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
106#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
107#define FLAG_DATA_SACKED 0x20 /* New SACK. */
108#define FLAG_ECE 0x40 /* ECE in this ACK */
291a00d1 109#define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */
1da177e4 110#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
e33099f9 111#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
2e605294 112#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
564262c1 113#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
cadbd031 114#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
12fb3dd9 115#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
1da177e4
LT
116
117#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
118#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
119#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
120#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
121
1da177e4 122#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
bdf1ee5d 123#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
1da177e4 124
e662ca40
YC
125#define REXMIT_NONE 0 /* no loss recovery to do */
126#define REXMIT_LOST 1 /* retransmit packets marked lost */
127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
128
0b9aefea
MRL
129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
130 unsigned int len)
dcb17d22
MRL
131{
132 static bool __once __read_mostly;
133
134 if (!__once) {
135 struct net_device *dev;
136
137 __once = true;
138
139 rcu_read_lock();
140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
0b9aefea
MRL
141 if (!dev || len >= dev->mtu)
142 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
143 dev ? dev->name : "Unknown driver");
dcb17d22
MRL
144 rcu_read_unlock();
145 }
146}
147
e905a9ed 148/* Adapt the MSS value used to make delayed ack decision to the
1da177e4 149 * real world.
e905a9ed 150 */
056834d9 151static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
1da177e4 152{
463c84b9 153 struct inet_connection_sock *icsk = inet_csk(sk);
e905a9ed 154 const unsigned int lss = icsk->icsk_ack.last_seg_size;
463c84b9 155 unsigned int len;
1da177e4 156
e905a9ed 157 icsk->icsk_ack.last_seg_size = 0;
1da177e4
LT
158
159 /* skb->len may jitter because of SACKs, even if peer
160 * sends good full-sized frames.
161 */
056834d9 162 len = skb_shinfo(skb)->gso_size ? : skb->len;
463c84b9 163 if (len >= icsk->icsk_ack.rcv_mss) {
dcb17d22
MRL
164 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
165 tcp_sk(sk)->advmss);
0b9aefea
MRL
166 /* Account for possibly-removed options */
167 if (unlikely(len > icsk->icsk_ack.rcv_mss +
168 MAX_TCP_OPTION_SPACE))
169 tcp_gro_dev_warn(sk, skb, len);
1da177e4
LT
170 } else {
171 /* Otherwise, we make more careful check taking into account,
172 * that SACKs block is variable.
173 *
174 * "len" is invariant segment length, including TCP header.
175 */
9c70220b 176 len += skb->data - skb_transport_header(skb);
bee7ca9e 177 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
1da177e4
LT
178 /* If PSH is not set, packet should be
179 * full sized, provided peer TCP is not badly broken.
180 * This observation (if it is correct 8)) allows
181 * to handle super-low mtu links fairly.
182 */
183 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
aa8223c7 184 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
1da177e4
LT
185 /* Subtract also invariant (if peer is RFC compliant),
186 * tcp header plus fixed timestamp option length.
187 * Resulting "len" is MSS free of SACK jitter.
188 */
463c84b9
ACM
189 len -= tcp_sk(sk)->tcp_header_len;
190 icsk->icsk_ack.last_seg_size = len;
1da177e4 191 if (len == lss) {
463c84b9 192 icsk->icsk_ack.rcv_mss = len;
1da177e4
LT
193 return;
194 }
195 }
1ef9696c
AK
196 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
197 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
463c84b9 198 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1da177e4
LT
199 }
200}
201
463c84b9 202static void tcp_incr_quickack(struct sock *sk)
1da177e4 203{
463c84b9 204 struct inet_connection_sock *icsk = inet_csk(sk);
95c96174 205 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
1da177e4 206
056834d9
IJ
207 if (quickacks == 0)
208 quickacks = 2;
463c84b9
ACM
209 if (quickacks > icsk->icsk_ack.quick)
210 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
1da177e4
LT
211}
212
1b9f4092 213static void tcp_enter_quickack_mode(struct sock *sk)
1da177e4 214{
463c84b9
ACM
215 struct inet_connection_sock *icsk = inet_csk(sk);
216 tcp_incr_quickack(sk);
217 icsk->icsk_ack.pingpong = 0;
218 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4
LT
219}
220
221/* Send ACKs quickly, if "quick" count is not exhausted
222 * and the session is not interactive.
223 */
224
2251ae46 225static bool tcp_in_quickack_mode(struct sock *sk)
1da177e4 226{
463c84b9 227 const struct inet_connection_sock *icsk = inet_csk(sk);
2251ae46 228 const struct dst_entry *dst = __sk_dst_get(sk);
a2a385d6 229
2251ae46
JM
230 return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
231 (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
1da177e4
LT
232}
233
735d3831 234static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
bdf1ee5d 235{
056834d9 236 if (tp->ecn_flags & TCP_ECN_OK)
bdf1ee5d
IJ
237 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
238}
239
735d3831 240static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
bdf1ee5d
IJ
241{
242 if (tcp_hdr(skb)->cwr)
243 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
244}
245
735d3831 246static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
bdf1ee5d
IJ
247{
248 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
249}
250
735d3831 251static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
bdf1ee5d 252{
b82d1bb4 253 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
7a269ffa 254 case INET_ECN_NOT_ECT:
bdf1ee5d 255 /* Funny extension: if ECT is not set on a segment,
7a269ffa
ED
256 * and we already seen ECT on a previous segment,
257 * it is probably a retransmit.
258 */
259 if (tp->ecn_flags & TCP_ECN_SEEN)
bdf1ee5d 260 tcp_enter_quickack_mode((struct sock *)tp);
7a269ffa
ED
261 break;
262 case INET_ECN_CE:
9890092e
FW
263 if (tcp_ca_needs_ecn((struct sock *)tp))
264 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
265
aae06bf5
ED
266 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
267 /* Better not delay acks, sender can have a very low cwnd */
268 tcp_enter_quickack_mode((struct sock *)tp);
269 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
270 }
9890092e
FW
271 tp->ecn_flags |= TCP_ECN_SEEN;
272 break;
7a269ffa 273 default:
9890092e
FW
274 if (tcp_ca_needs_ecn((struct sock *)tp))
275 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
7a269ffa 276 tp->ecn_flags |= TCP_ECN_SEEN;
9890092e 277 break;
bdf1ee5d
IJ
278 }
279}
280
735d3831
FW
281static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
282{
283 if (tp->ecn_flags & TCP_ECN_OK)
284 __tcp_ecn_check_ce(tp, skb);
285}
286
287static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
bdf1ee5d 288{
056834d9 289 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
bdf1ee5d
IJ
290 tp->ecn_flags &= ~TCP_ECN_OK;
291}
292
735d3831 293static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
bdf1ee5d 294{
056834d9 295 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
bdf1ee5d
IJ
296 tp->ecn_flags &= ~TCP_ECN_OK;
297}
298
735d3831 299static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
bdf1ee5d 300{
056834d9 301 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
a2a385d6
ED
302 return true;
303 return false;
bdf1ee5d
IJ
304}
305
1da177e4
LT
306/* Buffer size and advertised window tuning.
307 *
308 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
309 */
310
6ae70532 311static void tcp_sndbuf_expand(struct sock *sk)
1da177e4 312{
6ae70532 313 const struct tcp_sock *tp = tcp_sk(sk);
77bfc174 314 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
6ae70532
ED
315 int sndmem, per_mss;
316 u32 nr_segs;
317
318 /* Worst case is non GSO/TSO : each frame consumes one skb
319 * and skb->head is kmalloced using power of two area of memory
320 */
321 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
322 MAX_TCP_HEADER +
323 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
324
325 per_mss = roundup_pow_of_two(per_mss) +
326 SKB_DATA_ALIGN(sizeof(struct sk_buff));
327
328 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
329 nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
330
331 /* Fast Recovery (RFC 5681 3.2) :
332 * Cubic needs 1.7 factor, rounded to 2 to include
333 * extra cushion (application might react slowly to POLLOUT)
334 */
77bfc174
YC
335 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
336 sndmem *= nr_segs * per_mss;
1da177e4 337
06a59ecb
ED
338 if (sk->sk_sndbuf < sndmem)
339 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
1da177e4
LT
340}
341
342/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
343 *
344 * All tcp_full_space() is split to two parts: "network" buffer, allocated
345 * forward and advertised in receiver window (tp->rcv_wnd) and
346 * "application buffer", required to isolate scheduling/application
347 * latencies from network.
348 * window_clamp is maximal advertised window. It can be less than
349 * tcp_full_space(), in this case tcp_full_space() - window_clamp
350 * is reserved for "application" buffer. The less window_clamp is
351 * the smoother our behaviour from viewpoint of network, but the lower
352 * throughput and the higher sensitivity of the connection to losses. 8)
353 *
354 * rcv_ssthresh is more strict window_clamp used at "slow start"
355 * phase to predict further behaviour of this connection.
356 * It is used for two goals:
357 * - to enforce header prediction at sender, even when application
358 * requires some significant "application buffer". It is check #1.
359 * - to prevent pruning of receive queue because of misprediction
360 * of receiver window. Check #2.
361 *
362 * The scheme does not work when sender sends good segments opening
caa20d9a 363 * window and then starts to feed us spaghetti. But it should work
1da177e4
LT
364 * in common situations. Otherwise, we have to rely on queue collapsing.
365 */
366
367/* Slow part of check#2. */
9e412ba7 368static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
1da177e4 369{
9e412ba7 370 struct tcp_sock *tp = tcp_sk(sk);
1da177e4 371 /* Optimize this! */
dfd4f0ae
ED
372 int truesize = tcp_win_from_space(skb->truesize) >> 1;
373 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1;
1da177e4
LT
374
375 while (tp->rcv_ssthresh <= window) {
376 if (truesize <= skb->len)
463c84b9 377 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
1da177e4
LT
378
379 truesize >>= 1;
380 window >>= 1;
381 }
382 return 0;
383}
384
cf533ea5 385static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
1da177e4 386{
9e412ba7
IJ
387 struct tcp_sock *tp = tcp_sk(sk);
388
1da177e4
LT
389 /* Check #1 */
390 if (tp->rcv_ssthresh < tp->window_clamp &&
391 (int)tp->rcv_ssthresh < tcp_space(sk) &&
b8da51eb 392 !tcp_under_memory_pressure(sk)) {
1da177e4
LT
393 int incr;
394
395 /* Check #2. Increase window, if skb with such overhead
396 * will fit to rcvbuf in future.
397 */
398 if (tcp_win_from_space(skb->truesize) <= skb->len)
056834d9 399 incr = 2 * tp->advmss;
1da177e4 400 else
9e412ba7 401 incr = __tcp_grow_window(sk, skb);
1da177e4
LT
402
403 if (incr) {
4d846f02 404 incr = max_t(int, incr, 2 * skb->len);
056834d9
IJ
405 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
406 tp->window_clamp);
463c84b9 407 inet_csk(sk)->icsk_ack.quick |= 1;
1da177e4
LT
408 }
409 }
410}
411
412/* 3. Tuning rcvbuf, when connection enters established state. */
1da177e4
LT
413static void tcp_fixup_rcvbuf(struct sock *sk)
414{
e9266a02 415 u32 mss = tcp_sk(sk)->advmss;
e9266a02 416 int rcvmem;
1da177e4 417
85f16525
YC
418 rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
419 tcp_default_init_rwnd(mss);
e9266a02 420
b0983d3c
ED
421 /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
422 * Allow enough cushion so that sender is not limited by our window
423 */
424 if (sysctl_tcp_moderate_rcvbuf)
425 rcvmem <<= 2;
426
e9266a02
ED
427 if (sk->sk_rcvbuf < rcvmem)
428 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
1da177e4
LT
429}
430
caa20d9a 431/* 4. Try to fixup all. It is made immediately after connection enters
1da177e4
LT
432 * established state.
433 */
10467163 434void tcp_init_buffer_space(struct sock *sk)
1da177e4
LT
435{
436 struct tcp_sock *tp = tcp_sk(sk);
437 int maxwin;
438
439 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
440 tcp_fixup_rcvbuf(sk);
441 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
6ae70532 442 tcp_sndbuf_expand(sk);
1da177e4
LT
443
444 tp->rcvq_space.space = tp->rcv_wnd;
b0983d3c
ED
445 tp->rcvq_space.time = tcp_time_stamp;
446 tp->rcvq_space.seq = tp->copied_seq;
1da177e4
LT
447
448 maxwin = tcp_full_space(sk);
449
450 if (tp->window_clamp >= maxwin) {
451 tp->window_clamp = maxwin;
452
453 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
454 tp->window_clamp = max(maxwin -
455 (maxwin >> sysctl_tcp_app_win),
456 4 * tp->advmss);
457 }
458
459 /* Force reservation of one segment. */
460 if (sysctl_tcp_app_win &&
461 tp->window_clamp > 2 * tp->advmss &&
462 tp->window_clamp + tp->advmss > maxwin)
463 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
464
465 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
466 tp->snd_cwnd_stamp = tcp_time_stamp;
467}
468
1da177e4 469/* 5. Recalculate window clamp after socket hit its memory bounds. */
9e412ba7 470static void tcp_clamp_window(struct sock *sk)
1da177e4 471{
9e412ba7 472 struct tcp_sock *tp = tcp_sk(sk);
6687e988 473 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 474
6687e988 475 icsk->icsk_ack.quick = 0;
1da177e4 476
326f36e9
JH
477 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
478 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
b8da51eb 479 !tcp_under_memory_pressure(sk) &&
180d8cd9 480 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
326f36e9
JH
481 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
482 sysctl_tcp_rmem[2]);
1da177e4 483 }
326f36e9 484 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
056834d9 485 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
1da177e4
LT
486}
487
40efc6fa
SH
488/* Initialize RCV_MSS value.
489 * RCV_MSS is an our guess about MSS used by the peer.
490 * We haven't any direct information about the MSS.
491 * It's better to underestimate the RCV_MSS rather than overestimate.
492 * Overestimations make us ACKing less frequently than needed.
493 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
494 */
495void tcp_initialize_rcv_mss(struct sock *sk)
496{
cf533ea5 497 const struct tcp_sock *tp = tcp_sk(sk);
40efc6fa
SH
498 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
499
056834d9 500 hint = min(hint, tp->rcv_wnd / 2);
bee7ca9e 501 hint = min(hint, TCP_MSS_DEFAULT);
40efc6fa
SH
502 hint = max(hint, TCP_MIN_MSS);
503
504 inet_csk(sk)->icsk_ack.rcv_mss = hint;
505}
4bc2f18b 506EXPORT_SYMBOL(tcp_initialize_rcv_mss);
40efc6fa 507
1da177e4
LT
508/* Receiver "autotuning" code.
509 *
510 * The algorithm for RTT estimation w/o timestamps is based on
511 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
631dd1a8 512 * <http://public.lanl.gov/radiant/pubs.html#DRS>
1da177e4
LT
513 *
514 * More detail on this code can be found at
631dd1a8 515 * <http://staff.psc.edu/jheffner/>,
1da177e4
LT
516 * though this reference is out of date. A new paper
517 * is pending.
518 */
519static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
520{
521 u32 new_sample = tp->rcv_rtt_est.rtt;
522 long m = sample;
523
524 if (m == 0)
525 m = 1;
526
527 if (new_sample != 0) {
528 /* If we sample in larger samples in the non-timestamp
529 * case, we could grossly overestimate the RTT especially
530 * with chatty applications or bulk transfer apps which
531 * are stalled on filesystem I/O.
532 *
533 * Also, since we are only going for a minimum in the
31f34269 534 * non-timestamp case, we do not smooth things out
caa20d9a 535 * else with timestamps disabled convergence takes too
1da177e4
LT
536 * long.
537 */
538 if (!win_dep) {
539 m -= (new_sample >> 3);
540 new_sample += m;
18a223e0
NC
541 } else {
542 m <<= 3;
543 if (m < new_sample)
544 new_sample = m;
545 }
1da177e4 546 } else {
caa20d9a 547 /* No previous measure. */
1da177e4
LT
548 new_sample = m << 3;
549 }
550
551 if (tp->rcv_rtt_est.rtt != new_sample)
552 tp->rcv_rtt_est.rtt = new_sample;
553}
554
555static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
556{
557 if (tp->rcv_rtt_est.time == 0)
558 goto new_measure;
559 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
560 return;
651913ce 561 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
1da177e4
LT
562
563new_measure:
564 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
565 tp->rcv_rtt_est.time = tcp_time_stamp;
566}
567
056834d9
IJ
568static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
569 const struct sk_buff *skb)
1da177e4 570{
463c84b9 571 struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
572 if (tp->rx_opt.rcv_tsecr &&
573 (TCP_SKB_CB(skb)->end_seq -
463c84b9 574 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
1da177e4
LT
575 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
576}
577
578/*
579 * This function should be called every time data is copied to user space.
580 * It calculates the appropriate TCP receive buffer space.
581 */
582void tcp_rcv_space_adjust(struct sock *sk)
583{
584 struct tcp_sock *tp = tcp_sk(sk);
585 int time;
b0983d3c 586 int copied;
e905a9ed 587
1da177e4 588 time = tcp_time_stamp - tp->rcvq_space.time;
056834d9 589 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
1da177e4 590 return;
e905a9ed 591
b0983d3c
ED
592 /* Number of bytes copied to user in last RTT */
593 copied = tp->copied_seq - tp->rcvq_space.seq;
594 if (copied <= tp->rcvq_space.space)
595 goto new_measure;
596
597 /* A bit of theory :
598 * copied = bytes received in previous RTT, our base window
599 * To cope with packet losses, we need a 2x factor
600 * To cope with slow start, and sender growing its cwin by 100 %
601 * every RTT, we need a 4x factor, because the ACK we are sending
602 * now is for the next RTT, not the current one :
603 * <prev RTT . ><current RTT .. ><next RTT .... >
604 */
605
606 if (sysctl_tcp_moderate_rcvbuf &&
607 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
608 int rcvwin, rcvmem, rcvbuf;
1da177e4 609
b0983d3c
ED
610 /* minimal window to cope with packet losses, assuming
611 * steady state. Add some cushion because of small variations.
612 */
613 rcvwin = (copied << 1) + 16 * tp->advmss;
1da177e4 614
b0983d3c
ED
615 /* If rate increased by 25%,
616 * assume slow start, rcvwin = 3 * copied
617 * If rate increased by 50%,
618 * assume sender can use 2x growth, rcvwin = 4 * copied
619 */
620 if (copied >=
621 tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
622 if (copied >=
623 tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
624 rcvwin <<= 1;
625 else
626 rcvwin += (rcvwin >> 1);
627 }
1da177e4 628
b0983d3c
ED
629 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
630 while (tcp_win_from_space(rcvmem) < tp->advmss)
631 rcvmem += 128;
1da177e4 632
b0983d3c
ED
633 rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
634 if (rcvbuf > sk->sk_rcvbuf) {
635 sk->sk_rcvbuf = rcvbuf;
1da177e4 636
b0983d3c
ED
637 /* Make the window clamp follow along. */
638 tp->window_clamp = rcvwin;
1da177e4
LT
639 }
640 }
b0983d3c 641 tp->rcvq_space.space = copied;
e905a9ed 642
1da177e4
LT
643new_measure:
644 tp->rcvq_space.seq = tp->copied_seq;
645 tp->rcvq_space.time = tcp_time_stamp;
646}
647
648/* There is something which you must keep in mind when you analyze the
649 * behavior of the tp->ato delayed ack timeout interval. When a
650 * connection starts up, we want to ack as quickly as possible. The
651 * problem is that "good" TCP's do slow start at the beginning of data
652 * transmission. The means that until we send the first few ACK's the
653 * sender will sit on his end and only queue most of his data, because
654 * he can only send snd_cwnd unacked packets at any given time. For
655 * each ACK we send, he increments snd_cwnd and transmits more of his
656 * queue. -DaveM
657 */
9e412ba7 658static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
1da177e4 659{
9e412ba7 660 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 661 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
662 u32 now;
663
463c84b9 664 inet_csk_schedule_ack(sk);
1da177e4 665
463c84b9 666 tcp_measure_rcv_mss(sk, skb);
1da177e4
LT
667
668 tcp_rcv_rtt_measure(tp);
e905a9ed 669
1da177e4
LT
670 now = tcp_time_stamp;
671
463c84b9 672 if (!icsk->icsk_ack.ato) {
1da177e4
LT
673 /* The _first_ data packet received, initialize
674 * delayed ACK engine.
675 */
463c84b9
ACM
676 tcp_incr_quickack(sk);
677 icsk->icsk_ack.ato = TCP_ATO_MIN;
1da177e4 678 } else {
463c84b9 679 int m = now - icsk->icsk_ack.lrcvtime;
1da177e4 680
056834d9 681 if (m <= TCP_ATO_MIN / 2) {
1da177e4 682 /* The fastest case is the first. */
463c84b9
ACM
683 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
684 } else if (m < icsk->icsk_ack.ato) {
685 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
686 if (icsk->icsk_ack.ato > icsk->icsk_rto)
687 icsk->icsk_ack.ato = icsk->icsk_rto;
688 } else if (m > icsk->icsk_rto) {
caa20d9a 689 /* Too long gap. Apparently sender failed to
1da177e4
LT
690 * restart window, so that we send ACKs quickly.
691 */
463c84b9 692 tcp_incr_quickack(sk);
3ab224be 693 sk_mem_reclaim(sk);
1da177e4
LT
694 }
695 }
463c84b9 696 icsk->icsk_ack.lrcvtime = now;
1da177e4 697
735d3831 698 tcp_ecn_check_ce(tp, skb);
1da177e4
LT
699
700 if (skb->len >= 128)
9e412ba7 701 tcp_grow_window(sk, skb);
1da177e4
LT
702}
703
1da177e4
LT
704/* Called to compute a smoothed rtt estimate. The data fed to this
705 * routine either comes from timestamps, or from segments that were
706 * known _not_ to have been retransmitted [see Karn/Partridge
707 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
708 * piece by Van Jacobson.
709 * NOTE: the next three routines used to be one big routine.
710 * To save cycles in the RFC 1323 implementation it was better to break
711 * it up into three procedures. -- erics
712 */
740b0f18 713static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
1da177e4 714{
6687e988 715 struct tcp_sock *tp = tcp_sk(sk);
740b0f18
ED
716 long m = mrtt_us; /* RTT */
717 u32 srtt = tp->srtt_us;
1da177e4 718
1da177e4
LT
719 /* The following amusing code comes from Jacobson's
720 * article in SIGCOMM '88. Note that rtt and mdev
721 * are scaled versions of rtt and mean deviation.
e905a9ed 722 * This is designed to be as fast as possible
1da177e4
LT
723 * m stands for "measurement".
724 *
725 * On a 1990 paper the rto value is changed to:
726 * RTO = rtt + 4 * mdev
727 *
728 * Funny. This algorithm seems to be very broken.
729 * These formulae increase RTO, when it should be decreased, increase
31f34269 730 * too slowly, when it should be increased quickly, decrease too quickly
1da177e4
LT
731 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
732 * does not matter how to _calculate_ it. Seems, it was trap
733 * that VJ failed to avoid. 8)
734 */
4a5ab4e2
ED
735 if (srtt != 0) {
736 m -= (srtt >> 3); /* m is now error in rtt est */
737 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
1da177e4
LT
738 if (m < 0) {
739 m = -m; /* m is now abs(error) */
740b0f18 740 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1da177e4
LT
741 /* This is similar to one of Eifel findings.
742 * Eifel blocks mdev updates when rtt decreases.
743 * This solution is a bit different: we use finer gain
744 * for mdev in this case (alpha*beta).
745 * Like Eifel it also prevents growth of rto,
746 * but also it limits too fast rto decreases,
747 * happening in pure Eifel.
748 */
749 if (m > 0)
750 m >>= 3;
751 } else {
740b0f18 752 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1da177e4 753 }
740b0f18
ED
754 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
755 if (tp->mdev_us > tp->mdev_max_us) {
756 tp->mdev_max_us = tp->mdev_us;
757 if (tp->mdev_max_us > tp->rttvar_us)
758 tp->rttvar_us = tp->mdev_max_us;
1da177e4
LT
759 }
760 if (after(tp->snd_una, tp->rtt_seq)) {
740b0f18
ED
761 if (tp->mdev_max_us < tp->rttvar_us)
762 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
1da177e4 763 tp->rtt_seq = tp->snd_nxt;
740b0f18 764 tp->mdev_max_us = tcp_rto_min_us(sk);
1da177e4
LT
765 }
766 } else {
767 /* no previous measure. */
4a5ab4e2 768 srtt = m << 3; /* take the measured time to be rtt */
740b0f18
ED
769 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
770 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
771 tp->mdev_max_us = tp->rttvar_us;
1da177e4
LT
772 tp->rtt_seq = tp->snd_nxt;
773 }
740b0f18 774 tp->srtt_us = max(1U, srtt);
1da177e4
LT
775}
776
95bd09eb
ED
777/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
778 * Note: TCP stack does not yet implement pacing.
779 * FQ packet scheduler can be used to implement cheap but effective
780 * TCP pacing, to smooth the burst on large writes when packets
781 * in flight is significantly lower than cwnd (or rwin)
782 */
43e122b0
ED
783int sysctl_tcp_pacing_ss_ratio __read_mostly = 200;
784int sysctl_tcp_pacing_ca_ratio __read_mostly = 120;
785
95bd09eb
ED
786static void tcp_update_pacing_rate(struct sock *sk)
787{
788 const struct tcp_sock *tp = tcp_sk(sk);
789 u64 rate;
790
791 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
43e122b0
ED
792 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
793
794 /* current rate is (cwnd * mss) / srtt
795 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
796 * In Congestion Avoidance phase, set it to 120 % the current rate.
797 *
798 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
799 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
800 * end of slow start and should slow down.
801 */
802 if (tp->snd_cwnd < tp->snd_ssthresh / 2)
803 rate *= sysctl_tcp_pacing_ss_ratio;
804 else
805 rate *= sysctl_tcp_pacing_ca_ratio;
95bd09eb
ED
806
807 rate *= max(tp->snd_cwnd, tp->packets_out);
808
740b0f18
ED
809 if (likely(tp->srtt_us))
810 do_div(rate, tp->srtt_us);
95bd09eb 811
ba537427
ED
812 /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
813 * without any lock. We want to make sure compiler wont store
814 * intermediate values in this location.
815 */
816 ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
817 sk->sk_max_pacing_rate);
95bd09eb
ED
818}
819
1da177e4
LT
820/* Calculate rto without backoff. This is the second half of Van Jacobson's
821 * routine referred to above.
822 */
f7e56a76 823static void tcp_set_rto(struct sock *sk)
1da177e4 824{
463c84b9 825 const struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
826 /* Old crap is replaced with new one. 8)
827 *
828 * More seriously:
829 * 1. If rtt variance happened to be less 50msec, it is hallucination.
830 * It cannot be less due to utterly erratic ACK generation made
831 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
832 * to do with delayed acks, because at cwnd>2 true delack timeout
833 * is invisible. Actually, Linux-2.4 also generates erratic
caa20d9a 834 * ACKs in some circumstances.
1da177e4 835 */
f1ecd5d9 836 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
1da177e4
LT
837
838 /* 2. Fixups made earlier cannot be right.
839 * If we do not estimate RTO correctly without them,
840 * all the algo is pure shit and should be replaced
caa20d9a 841 * with correct one. It is exactly, which we pretend to do.
1da177e4 842 */
1da177e4 843
ee6aac59
IJ
844 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
845 * guarantees that rto is higher.
846 */
f1ecd5d9 847 tcp_bound_rto(sk);
1da177e4
LT
848}
849
cf533ea5 850__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
1da177e4
LT
851{
852 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
853
22b71c8f 854 if (!cwnd)
442b9635 855 cwnd = TCP_INIT_CWND;
1da177e4
LT
856 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
857}
858
e60402d0
IJ
859/*
860 * Packet counting of FACK is based on in-order assumptions, therefore TCP
861 * disables it when reordering is detected
862 */
4aabd8ef 863void tcp_disable_fack(struct tcp_sock *tp)
e60402d0 864{
85cc391c
IJ
865 /* RFC3517 uses different metric in lost marker => reset on change */
866 if (tcp_is_fack(tp))
867 tp->lost_skb_hint = NULL;
ab56222a 868 tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED;
e60402d0
IJ
869}
870
564262c1 871/* Take a notice that peer is sending D-SACKs */
e60402d0
IJ
872static void tcp_dsack_seen(struct tcp_sock *tp)
873{
ab56222a 874 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
e60402d0
IJ
875}
876
6687e988
ACM
877static void tcp_update_reordering(struct sock *sk, const int metric,
878 const int ts)
1da177e4 879{
6687e988 880 struct tcp_sock *tp = tcp_sk(sk);
1da177e4 881 if (metric > tp->reordering) {
40b215e5
PE
882 int mib_idx;
883
dca145ff 884 tp->reordering = min(sysctl_tcp_max_reordering, metric);
1da177e4
LT
885
886 /* This exciting event is worth to be remembered. 8) */
887 if (ts)
40b215e5 888 mib_idx = LINUX_MIB_TCPTSREORDER;
e60402d0 889 else if (tcp_is_reno(tp))
40b215e5 890 mib_idx = LINUX_MIB_TCPRENOREORDER;
e60402d0 891 else if (tcp_is_fack(tp))
40b215e5 892 mib_idx = LINUX_MIB_TCPFACKREORDER;
1da177e4 893 else
40b215e5
PE
894 mib_idx = LINUX_MIB_TCPSACKREORDER;
895
c10d9310 896 NET_INC_STATS(sock_net(sk), mib_idx);
1da177e4 897#if FASTRETRANS_DEBUG > 1
91df42be
JP
898 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
899 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
900 tp->reordering,
901 tp->fackets_out,
902 tp->sacked_out,
903 tp->undo_marker ? tp->undo_retrans : 0);
1da177e4 904#endif
e60402d0 905 tcp_disable_fack(tp);
1da177e4 906 }
eed530b6 907
4f41b1c5 908 tp->rack.reord = 1;
1da177e4
LT
909}
910
006f582c 911/* This must be called before lost_out is incremented */
c8c213f2
IJ
912static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
913{
51456b29 914 if (!tp->retransmit_skb_hint ||
c8c213f2
IJ
915 before(TCP_SKB_CB(skb)->seq,
916 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
006f582c 917 tp->retransmit_skb_hint = skb;
c8c213f2
IJ
918}
919
0682e690
NC
920/* Sum the number of packets on the wire we have marked as lost.
921 * There are two cases we care about here:
922 * a) Packet hasn't been marked lost (nor retransmitted),
923 * and this is the first loss.
924 * b) Packet has been marked both lost and retransmitted,
925 * and this means we think it was lost again.
926 */
927static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
928{
929 __u8 sacked = TCP_SKB_CB(skb)->sacked;
930
931 if (!(sacked & TCPCB_LOST) ||
932 ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
933 tp->lost += tcp_skb_pcount(skb);
934}
935
41ea36e3
IJ
936static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
937{
938 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
939 tcp_verify_retransmit_hint(tp, skb);
940
941 tp->lost_out += tcp_skb_pcount(skb);
0682e690 942 tcp_sum_lost(tp, skb);
41ea36e3
IJ
943 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
944 }
945}
946
4f41b1c5 947void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
006f582c
IJ
948{
949 tcp_verify_retransmit_hint(tp, skb);
950
0682e690 951 tcp_sum_lost(tp, skb);
006f582c
IJ
952 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
953 tp->lost_out += tcp_skb_pcount(skb);
954 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
955 }
956}
957
1da177e4
LT
958/* This procedure tags the retransmission queue when SACKs arrive.
959 *
960 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
961 * Packets in queue with these bits set are counted in variables
962 * sacked_out, retrans_out and lost_out, correspondingly.
963 *
964 * Valid combinations are:
965 * Tag InFlight Description
966 * 0 1 - orig segment is in flight.
967 * S 0 - nothing flies, orig reached receiver.
968 * L 0 - nothing flies, orig lost by net.
969 * R 2 - both orig and retransmit are in flight.
970 * L|R 1 - orig is lost, retransmit is in flight.
971 * S|R 1 - orig reached receiver, retrans is still in flight.
972 * (L|S|R is logically valid, it could occur when L|R is sacked,
973 * but it is equivalent to plain S and code short-curcuits it to S.
974 * L|S is logically invalid, it would mean -1 packet in flight 8))
975 *
976 * These 6 states form finite state machine, controlled by the following events:
977 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
978 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
974c1236 979 * 3. Loss detection event of two flavors:
1da177e4
LT
980 * A. Scoreboard estimator decided the packet is lost.
981 * A'. Reno "three dupacks" marks head of queue lost.
974c1236
YC
982 * A''. Its FACK modification, head until snd.fack is lost.
983 * B. SACK arrives sacking SND.NXT at the moment, when the
1da177e4
LT
984 * segment was retransmitted.
985 * 4. D-SACK added new rule: D-SACK changes any tag to S.
986 *
987 * It is pleasant to note, that state diagram turns out to be commutative,
988 * so that we are allowed not to be bothered by order of our actions,
989 * when multiple events arrive simultaneously. (see the function below).
990 *
991 * Reordering detection.
992 * --------------------
993 * Reordering metric is maximal distance, which a packet can be displaced
994 * in packet stream. With SACKs we can estimate it:
995 *
996 * 1. SACK fills old hole and the corresponding segment was not
997 * ever retransmitted -> reordering. Alas, we cannot use it
998 * when segment was retransmitted.
999 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1000 * for retransmitted and already SACKed segment -> reordering..
1001 * Both of these heuristics are not used in Loss state, when we cannot
1002 * account for retransmits accurately.
5b3c9882
IJ
1003 *
1004 * SACK block validation.
1005 * ----------------------
1006 *
1007 * SACK block range validation checks that the received SACK block fits to
1008 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1009 * Note that SND.UNA is not included to the range though being valid because
0e835331
IJ
1010 * it means that the receiver is rather inconsistent with itself reporting
1011 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1012 * perfectly valid, however, in light of RFC2018 which explicitly states
1013 * that "SACK block MUST reflect the newest segment. Even if the newest
1014 * segment is going to be discarded ...", not that it looks very clever
1015 * in case of head skb. Due to potentional receiver driven attacks, we
1016 * choose to avoid immediate execution of a walk in write queue due to
1017 * reneging and defer head skb's loss recovery to standard loss recovery
1018 * procedure that will eventually trigger (nothing forbids us doing this).
5b3c9882
IJ
1019 *
1020 * Implements also blockage to start_seq wrap-around. Problem lies in the
1021 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1022 * there's no guarantee that it will be before snd_nxt (n). The problem
1023 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1024 * wrap (s_w):
1025 *
1026 * <- outs wnd -> <- wrapzone ->
1027 * u e n u_w e_w s n_w
1028 * | | | | | | |
1029 * |<------------+------+----- TCP seqno space --------------+---------->|
1030 * ...-- <2^31 ->| |<--------...
1031 * ...---- >2^31 ------>| |<--------...
1032 *
1033 * Current code wouldn't be vulnerable but it's better still to discard such
1034 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1035 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1036 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1037 * equal to the ideal case (infinite seqno space without wrap caused issues).
1038 *
1039 * With D-SACK the lower bound is extended to cover sequence space below
1040 * SND.UNA down to undo_marker, which is the last point of interest. Yet
564262c1 1041 * again, D-SACK block must not to go across snd_una (for the same reason as
5b3c9882
IJ
1042 * for the normal SACK blocks, explained above). But there all simplicity
1043 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1044 * fully below undo_marker they do not affect behavior in anyway and can
1045 * therefore be safely ignored. In rare cases (which are more or less
1046 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1047 * fragmentation and packet reordering past skb's retransmission. To consider
1048 * them correctly, the acceptable range must be extended even more though
1049 * the exact amount is rather hard to quantify. However, tp->max_window can
1050 * be used as an exaggerated estimate.
1da177e4 1051 */
a2a385d6
ED
1052static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1053 u32 start_seq, u32 end_seq)
5b3c9882
IJ
1054{
1055 /* Too far in future, or reversed (interpretation is ambiguous) */
1056 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
a2a385d6 1057 return false;
5b3c9882
IJ
1058
1059 /* Nasty start_seq wrap-around check (see comments above) */
1060 if (!before(start_seq, tp->snd_nxt))
a2a385d6 1061 return false;
5b3c9882 1062
564262c1 1063 /* In outstanding window? ...This is valid exit for D-SACKs too.
5b3c9882
IJ
1064 * start_seq == snd_una is non-sensical (see comments above)
1065 */
1066 if (after(start_seq, tp->snd_una))
a2a385d6 1067 return true;
5b3c9882
IJ
1068
1069 if (!is_dsack || !tp->undo_marker)
a2a385d6 1070 return false;
5b3c9882
IJ
1071
1072 /* ...Then it's D-SACK, and must reside below snd_una completely */
f779b2d6 1073 if (after(end_seq, tp->snd_una))
a2a385d6 1074 return false;
5b3c9882
IJ
1075
1076 if (!before(start_seq, tp->undo_marker))
a2a385d6 1077 return true;
5b3c9882
IJ
1078
1079 /* Too old */
1080 if (!after(end_seq, tp->undo_marker))
a2a385d6 1081 return false;
5b3c9882
IJ
1082
1083 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1084 * start_seq < undo_marker and end_seq >= undo_marker.
1085 */
1086 return !before(start_seq, end_seq - tp->max_window);
1087}
1088
a2a385d6
ED
1089static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1090 struct tcp_sack_block_wire *sp, int num_sacks,
1091 u32 prior_snd_una)
d06e021d 1092{
1ed83465 1093 struct tcp_sock *tp = tcp_sk(sk);
d3e2ce3b
HH
1094 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1095 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
a2a385d6 1096 bool dup_sack = false;
d06e021d
DM
1097
1098 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
a2a385d6 1099 dup_sack = true;
e60402d0 1100 tcp_dsack_seen(tp);
c10d9310 1101 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
d06e021d 1102 } else if (num_sacks > 1) {
d3e2ce3b
HH
1103 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1104 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
d06e021d
DM
1105
1106 if (!after(end_seq_0, end_seq_1) &&
1107 !before(start_seq_0, start_seq_1)) {
a2a385d6 1108 dup_sack = true;
e60402d0 1109 tcp_dsack_seen(tp);
c10d9310 1110 NET_INC_STATS(sock_net(sk),
de0744af 1111 LINUX_MIB_TCPDSACKOFORECV);
d06e021d
DM
1112 }
1113 }
1114
1115 /* D-SACK for already forgotten data... Do dumb counting. */
6e08d5e3 1116 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
d06e021d
DM
1117 !after(end_seq_0, prior_snd_una) &&
1118 after(end_seq_0, tp->undo_marker))
1119 tp->undo_retrans--;
1120
1121 return dup_sack;
1122}
1123
a1197f5a 1124struct tcp_sacktag_state {
740b0f18
ED
1125 int reord;
1126 int fack_count;
31231a8a
KKJ
1127 /* Timestamps for earliest and latest never-retransmitted segment
1128 * that was SACKed. RTO needs the earliest RTT to stay conservative,
1129 * but congestion control should still get an accurate delay signal.
1130 */
1131 struct skb_mstamp first_sackt;
1132 struct skb_mstamp last_sackt;
deed7be7 1133 struct skb_mstamp ack_time; /* Timestamp when the S/ACK was received */
b9f64820 1134 struct rate_sample *rate;
740b0f18 1135 int flag;
a1197f5a
IJ
1136};
1137
d1935942
IJ
1138/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1139 * the incoming SACK may not exactly match but we can find smaller MSS
1140 * aligned portion of it that matches. Therefore we might need to fragment
1141 * which may fail and creates some hassle (caller must handle error case
1142 * returns).
832d11c5
IJ
1143 *
1144 * FIXME: this could be merged to shift decision code
d1935942 1145 */
0f79efdc 1146static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
a2a385d6 1147 u32 start_seq, u32 end_seq)
d1935942 1148{
a2a385d6
ED
1149 int err;
1150 bool in_sack;
d1935942 1151 unsigned int pkt_len;
adb92db8 1152 unsigned int mss;
d1935942
IJ
1153
1154 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1155 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1156
1157 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1158 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
adb92db8 1159 mss = tcp_skb_mss(skb);
d1935942
IJ
1160 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1161
adb92db8 1162 if (!in_sack) {
d1935942 1163 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
adb92db8
IJ
1164 if (pkt_len < mss)
1165 pkt_len = mss;
1166 } else {
d1935942 1167 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
adb92db8
IJ
1168 if (pkt_len < mss)
1169 return -EINVAL;
1170 }
1171
1172 /* Round if necessary so that SACKs cover only full MSSes
1173 * and/or the remaining small portion (if present)
1174 */
1175 if (pkt_len > mss) {
1176 unsigned int new_len = (pkt_len / mss) * mss;
1177 if (!in_sack && new_len < pkt_len) {
1178 new_len += mss;
2cd0d743 1179 if (new_len >= skb->len)
adb92db8
IJ
1180 return 0;
1181 }
1182 pkt_len = new_len;
1183 }
6cc55e09 1184 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
d1935942
IJ
1185 if (err < 0)
1186 return err;
1187 }
1188
1189 return in_sack;
1190}
1191
cc9a672e
NC
1192/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1193static u8 tcp_sacktag_one(struct sock *sk,
1194 struct tcp_sacktag_state *state, u8 sacked,
1195 u32 start_seq, u32 end_seq,
740b0f18
ED
1196 int dup_sack, int pcount,
1197 const struct skb_mstamp *xmit_time)
9e10c47c 1198{
6859d494 1199 struct tcp_sock *tp = tcp_sk(sk);
a1197f5a 1200 int fack_count = state->fack_count;
9e10c47c
IJ
1201
1202 /* Account D-SACK for retransmitted packet. */
1203 if (dup_sack && (sacked & TCPCB_RETRANS)) {
6e08d5e3 1204 if (tp->undo_marker && tp->undo_retrans > 0 &&
cc9a672e 1205 after(end_seq, tp->undo_marker))
9e10c47c 1206 tp->undo_retrans--;
ede9f3b1 1207 if (sacked & TCPCB_SACKED_ACKED)
a1197f5a 1208 state->reord = min(fack_count, state->reord);
9e10c47c
IJ
1209 }
1210
1211 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
cc9a672e 1212 if (!after(end_seq, tp->snd_una))
a1197f5a 1213 return sacked;
9e10c47c
IJ
1214
1215 if (!(sacked & TCPCB_SACKED_ACKED)) {
1d0833df
YC
1216 tcp_rack_advance(tp, sacked, end_seq,
1217 xmit_time, &state->ack_time);
659a8ad5 1218
9e10c47c
IJ
1219 if (sacked & TCPCB_SACKED_RETRANS) {
1220 /* If the segment is not tagged as lost,
1221 * we do not clear RETRANS, believing
1222 * that retransmission is still in flight.
1223 */
1224 if (sacked & TCPCB_LOST) {
a1197f5a 1225 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
f58b22fd
IJ
1226 tp->lost_out -= pcount;
1227 tp->retrans_out -= pcount;
9e10c47c
IJ
1228 }
1229 } else {
1230 if (!(sacked & TCPCB_RETRANS)) {
1231 /* New sack for not retransmitted frame,
1232 * which was in hole. It is reordering.
1233 */
cc9a672e 1234 if (before(start_seq,
9e10c47c 1235 tcp_highest_sack_seq(tp)))
a1197f5a
IJ
1236 state->reord = min(fack_count,
1237 state->reord);
e33099f9
YC
1238 if (!after(end_seq, tp->high_seq))
1239 state->flag |= FLAG_ORIG_SACK_ACKED;
31231a8a
KKJ
1240 if (state->first_sackt.v64 == 0)
1241 state->first_sackt = *xmit_time;
1242 state->last_sackt = *xmit_time;
9e10c47c
IJ
1243 }
1244
1245 if (sacked & TCPCB_LOST) {
a1197f5a 1246 sacked &= ~TCPCB_LOST;
f58b22fd 1247 tp->lost_out -= pcount;
9e10c47c
IJ
1248 }
1249 }
1250
a1197f5a
IJ
1251 sacked |= TCPCB_SACKED_ACKED;
1252 state->flag |= FLAG_DATA_SACKED;
f58b22fd 1253 tp->sacked_out += pcount;
ddf1af6f 1254 tp->delivered += pcount; /* Out-of-order packets delivered */
9e10c47c 1255
f58b22fd 1256 fack_count += pcount;
9e10c47c
IJ
1257
1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
00db4124 1259 if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
cc9a672e 1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
f58b22fd 1261 tp->lost_cnt_hint += pcount;
9e10c47c
IJ
1262
1263 if (fack_count > tp->fackets_out)
1264 tp->fackets_out = fack_count;
9e10c47c
IJ
1265 }
1266
1267 /* D-SACK. We can detect redundant retransmission in S|R and plain R
1268 * frames and clear it. undo_retrans is decreased above, L|R frames
1269 * are accounted above as well.
1270 */
a1197f5a
IJ
1271 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1272 sacked &= ~TCPCB_SACKED_RETRANS;
f58b22fd 1273 tp->retrans_out -= pcount;
9e10c47c
IJ
1274 }
1275
a1197f5a 1276 return sacked;
9e10c47c
IJ
1277}
1278
daef52ba
NC
1279/* Shift newly-SACKed bytes from this skb to the immediately previous
1280 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1281 */
a2a385d6
ED
1282static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1283 struct tcp_sacktag_state *state,
1284 unsigned int pcount, int shifted, int mss,
1285 bool dup_sack)
832d11c5
IJ
1286{
1287 struct tcp_sock *tp = tcp_sk(sk);
50133161 1288 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
daef52ba
NC
1289 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1290 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
832d11c5
IJ
1291
1292 BUG_ON(!pcount);
1293
4c90d3b3
NC
1294 /* Adjust counters and hints for the newly sacked sequence
1295 * range but discard the return value since prev is already
1296 * marked. We must tag the range first because the seq
1297 * advancement below implicitly advances
1298 * tcp_highest_sack_seq() when skb is highest_sack.
1299 */
1300 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
59c9af42 1301 start_seq, end_seq, dup_sack, pcount,
740b0f18 1302 &skb->skb_mstamp);
b9f64820 1303 tcp_rate_skb_delivered(sk, skb, state->rate);
4c90d3b3
NC
1304
1305 if (skb == tp->lost_skb_hint)
0af2a0d0
NC
1306 tp->lost_cnt_hint += pcount;
1307
832d11c5
IJ
1308 TCP_SKB_CB(prev)->end_seq += shifted;
1309 TCP_SKB_CB(skb)->seq += shifted;
1310
cd7d8498
ED
1311 tcp_skb_pcount_add(prev, pcount);
1312 BUG_ON(tcp_skb_pcount(skb) < pcount);
1313 tcp_skb_pcount_add(skb, -pcount);
832d11c5
IJ
1314
1315 /* When we're adding to gso_segs == 1, gso_size will be zero,
1316 * in theory this shouldn't be necessary but as long as DSACK
1317 * code can come after this skb later on it's better to keep
1318 * setting gso_size to something.
1319 */
f69ad292
ED
1320 if (!TCP_SKB_CB(prev)->tcp_gso_size)
1321 TCP_SKB_CB(prev)->tcp_gso_size = mss;
832d11c5
IJ
1322
1323 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
51466a75 1324 if (tcp_skb_pcount(skb) <= 1)
f69ad292 1325 TCP_SKB_CB(skb)->tcp_gso_size = 0;
832d11c5 1326
832d11c5
IJ
1327 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1328 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1329
832d11c5
IJ
1330 if (skb->len > 0) {
1331 BUG_ON(!tcp_skb_pcount(skb));
c10d9310 1332 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
a2a385d6 1333 return false;
832d11c5
IJ
1334 }
1335
1336 /* Whole SKB was eaten :-) */
1337
92ee76b6
IJ
1338 if (skb == tp->retransmit_skb_hint)
1339 tp->retransmit_skb_hint = prev;
92ee76b6
IJ
1340 if (skb == tp->lost_skb_hint) {
1341 tp->lost_skb_hint = prev;
1342 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1343 }
1344
5e8a402f 1345 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
a643b5d4 1346 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
5e8a402f
ED
1347 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1348 TCP_SKB_CB(prev)->end_seq++;
1349
832d11c5
IJ
1350 if (skb == tcp_highest_sack(sk))
1351 tcp_advance_highest_sack(sk, skb);
1352
cfea5a68 1353 tcp_skb_collapse_tstamp(prev, skb);
b9f64820
YC
1354 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
1355 TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
1356
832d11c5
IJ
1357 tcp_unlink_write_queue(skb, sk);
1358 sk_wmem_free_skb(sk, skb);
1359
c10d9310 1360 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
111cc8b9 1361
a2a385d6 1362 return true;
832d11c5
IJ
1363}
1364
1365/* I wish gso_size would have a bit more sane initialization than
1366 * something-or-zero which complicates things
1367 */
cf533ea5 1368static int tcp_skb_seglen(const struct sk_buff *skb)
832d11c5 1369{
775ffabf 1370 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
832d11c5
IJ
1371}
1372
1373/* Shifting pages past head area doesn't work */
cf533ea5 1374static int skb_can_shift(const struct sk_buff *skb)
832d11c5
IJ
1375{
1376 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1377}
1378
1379/* Try collapsing SACK blocks spanning across multiple skbs to a single
1380 * skb.
1381 */
1382static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
a1197f5a 1383 struct tcp_sacktag_state *state,
832d11c5 1384 u32 start_seq, u32 end_seq,
a2a385d6 1385 bool dup_sack)
832d11c5
IJ
1386{
1387 struct tcp_sock *tp = tcp_sk(sk);
1388 struct sk_buff *prev;
1389 int mss;
1390 int pcount = 0;
1391 int len;
1392 int in_sack;
1393
1394 if (!sk_can_gso(sk))
1395 goto fallback;
1396
1397 /* Normally R but no L won't result in plain S */
1398 if (!dup_sack &&
9969ca5f 1399 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
832d11c5
IJ
1400 goto fallback;
1401 if (!skb_can_shift(skb))
1402 goto fallback;
1403 /* This frame is about to be dropped (was ACKed). */
1404 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1405 goto fallback;
1406
1407 /* Can only happen with delayed DSACK + discard craziness */
1408 if (unlikely(skb == tcp_write_queue_head(sk)))
1409 goto fallback;
1410 prev = tcp_write_queue_prev(sk, skb);
1411
1412 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1413 goto fallback;
1414
a643b5d4
MKL
1415 if (!tcp_skb_can_collapse_to(prev))
1416 goto fallback;
1417
832d11c5
IJ
1418 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1419 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1420
1421 if (in_sack) {
1422 len = skb->len;
1423 pcount = tcp_skb_pcount(skb);
775ffabf 1424 mss = tcp_skb_seglen(skb);
832d11c5
IJ
1425
1426 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1427 * drop this restriction as unnecessary
1428 */
775ffabf 1429 if (mss != tcp_skb_seglen(prev))
832d11c5
IJ
1430 goto fallback;
1431 } else {
1432 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1433 goto noop;
1434 /* CHECKME: This is non-MSS split case only?, this will
1435 * cause skipped skbs due to advancing loop btw, original
1436 * has that feature too
1437 */
1438 if (tcp_skb_pcount(skb) <= 1)
1439 goto noop;
1440
1441 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1442 if (!in_sack) {
1443 /* TODO: head merge to next could be attempted here
1444 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1445 * though it might not be worth of the additional hassle
1446 *
1447 * ...we can probably just fallback to what was done
1448 * previously. We could try merging non-SACKed ones
1449 * as well but it probably isn't going to buy off
1450 * because later SACKs might again split them, and
1451 * it would make skb timestamp tracking considerably
1452 * harder problem.
1453 */
1454 goto fallback;
1455 }
1456
1457 len = end_seq - TCP_SKB_CB(skb)->seq;
1458 BUG_ON(len < 0);
1459 BUG_ON(len > skb->len);
1460
1461 /* MSS boundaries should be honoured or else pcount will
1462 * severely break even though it makes things bit trickier.
1463 * Optimize common case to avoid most of the divides
1464 */
1465 mss = tcp_skb_mss(skb);
1466
1467 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1468 * drop this restriction as unnecessary
1469 */
775ffabf 1470 if (mss != tcp_skb_seglen(prev))
832d11c5
IJ
1471 goto fallback;
1472
1473 if (len == mss) {
1474 pcount = 1;
1475 } else if (len < mss) {
1476 goto noop;
1477 } else {
1478 pcount = len / mss;
1479 len = pcount * mss;
1480 }
1481 }
1482
4648dc97
NC
1483 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1484 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1485 goto fallback;
1486
832d11c5
IJ
1487 if (!skb_shift(prev, skb, len))
1488 goto fallback;
9ec06ff5 1489 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
832d11c5
IJ
1490 goto out;
1491
1492 /* Hole filled allows collapsing with the next as well, this is very
1493 * useful when hole on every nth skb pattern happens
1494 */
1495 if (prev == tcp_write_queue_tail(sk))
1496 goto out;
1497 skb = tcp_write_queue_next(sk, prev);
1498
f0bc52f3
IJ
1499 if (!skb_can_shift(skb) ||
1500 (skb == tcp_send_head(sk)) ||
1501 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
775ffabf 1502 (mss != tcp_skb_seglen(skb)))
832d11c5
IJ
1503 goto out;
1504
1505 len = skb->len;
1506 if (skb_shift(prev, skb, len)) {
1507 pcount += tcp_skb_pcount(skb);
9ec06ff5 1508 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
832d11c5
IJ
1509 }
1510
1511out:
a1197f5a 1512 state->fack_count += pcount;
832d11c5
IJ
1513 return prev;
1514
1515noop:
1516 return skb;
1517
1518fallback:
c10d9310 1519 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
832d11c5
IJ
1520 return NULL;
1521}
1522
68f8353b
IJ
1523static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1524 struct tcp_sack_block *next_dup,
a1197f5a 1525 struct tcp_sacktag_state *state,
68f8353b 1526 u32 start_seq, u32 end_seq,
a2a385d6 1527 bool dup_sack_in)
68f8353b 1528{
832d11c5
IJ
1529 struct tcp_sock *tp = tcp_sk(sk);
1530 struct sk_buff *tmp;
1531
68f8353b
IJ
1532 tcp_for_write_queue_from(skb, sk) {
1533 int in_sack = 0;
a2a385d6 1534 bool dup_sack = dup_sack_in;
68f8353b
IJ
1535
1536 if (skb == tcp_send_head(sk))
1537 break;
1538
1539 /* queue is in-order => we can short-circuit the walk early */
1540 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1541 break;
1542
00db4124 1543 if (next_dup &&
68f8353b
IJ
1544 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1545 in_sack = tcp_match_skb_to_sack(sk, skb,
1546 next_dup->start_seq,
1547 next_dup->end_seq);
1548 if (in_sack > 0)
a2a385d6 1549 dup_sack = true;
68f8353b
IJ
1550 }
1551
832d11c5
IJ
1552 /* skb reference here is a bit tricky to get right, since
1553 * shifting can eat and free both this skb and the next,
1554 * so not even _safe variant of the loop is enough.
1555 */
1556 if (in_sack <= 0) {
a1197f5a
IJ
1557 tmp = tcp_shift_skb_data(sk, skb, state,
1558 start_seq, end_seq, dup_sack);
00db4124 1559 if (tmp) {
832d11c5
IJ
1560 if (tmp != skb) {
1561 skb = tmp;
1562 continue;
1563 }
1564
1565 in_sack = 0;
1566 } else {
1567 in_sack = tcp_match_skb_to_sack(sk, skb,
1568 start_seq,
1569 end_seq);
1570 }
1571 }
1572
68f8353b
IJ
1573 if (unlikely(in_sack < 0))
1574 break;
1575
832d11c5 1576 if (in_sack) {
cc9a672e
NC
1577 TCP_SKB_CB(skb)->sacked =
1578 tcp_sacktag_one(sk,
1579 state,
1580 TCP_SKB_CB(skb)->sacked,
1581 TCP_SKB_CB(skb)->seq,
1582 TCP_SKB_CB(skb)->end_seq,
1583 dup_sack,
59c9af42 1584 tcp_skb_pcount(skb),
740b0f18 1585 &skb->skb_mstamp);
b9f64820 1586 tcp_rate_skb_delivered(sk, skb, state->rate);
68f8353b 1587
832d11c5
IJ
1588 if (!before(TCP_SKB_CB(skb)->seq,
1589 tcp_highest_sack_seq(tp)))
1590 tcp_advance_highest_sack(sk, skb);
1591 }
1592
a1197f5a 1593 state->fack_count += tcp_skb_pcount(skb);
68f8353b
IJ
1594 }
1595 return skb;
1596}
1597
1598/* Avoid all extra work that is being done by sacktag while walking in
1599 * a normal way
1600 */
1601static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
a1197f5a
IJ
1602 struct tcp_sacktag_state *state,
1603 u32 skip_to_seq)
68f8353b
IJ
1604{
1605 tcp_for_write_queue_from(skb, sk) {
1606 if (skb == tcp_send_head(sk))
1607 break;
1608
e8bae275 1609 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
68f8353b 1610 break;
d152a7d8 1611
a1197f5a 1612 state->fack_count += tcp_skb_pcount(skb);
68f8353b
IJ
1613 }
1614 return skb;
1615}
1616
1617static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1618 struct sock *sk,
1619 struct tcp_sack_block *next_dup,
a1197f5a
IJ
1620 struct tcp_sacktag_state *state,
1621 u32 skip_to_seq)
68f8353b 1622{
51456b29 1623 if (!next_dup)
68f8353b
IJ
1624 return skb;
1625
1626 if (before(next_dup->start_seq, skip_to_seq)) {
a1197f5a
IJ
1627 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
1628 skb = tcp_sacktag_walk(skb, sk, NULL, state,
1629 next_dup->start_seq, next_dup->end_seq,
1630 1);
68f8353b
IJ
1631 }
1632
1633 return skb;
1634}
1635
cf533ea5 1636static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
68f8353b
IJ
1637{
1638 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1639}
1640
1da177e4 1641static int
cf533ea5 1642tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
196da974 1643 u32 prior_snd_una, struct tcp_sacktag_state *state)
1da177e4
LT
1644{
1645 struct tcp_sock *tp = tcp_sk(sk);
cf533ea5
ED
1646 const unsigned char *ptr = (skb_transport_header(ack_skb) +
1647 TCP_SKB_CB(ack_skb)->sacked);
fd6dad61 1648 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
4389dded 1649 struct tcp_sack_block sp[TCP_NUM_SACKS];
68f8353b
IJ
1650 struct tcp_sack_block *cache;
1651 struct sk_buff *skb;
4389dded 1652 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
fd6dad61 1653 int used_sacks;
a2a385d6 1654 bool found_dup_sack = false;
68f8353b 1655 int i, j;
fda03fbb 1656 int first_sack_index;
1da177e4 1657
196da974
KKJ
1658 state->flag = 0;
1659 state->reord = tp->packets_out;
a1197f5a 1660
d738cd8f 1661 if (!tp->sacked_out) {
de83c058
IJ
1662 if (WARN_ON(tp->fackets_out))
1663 tp->fackets_out = 0;
6859d494 1664 tcp_highest_sack_reset(sk);
d738cd8f 1665 }
1da177e4 1666
1ed83465 1667 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
d06e021d 1668 num_sacks, prior_snd_una);
b9f64820 1669 if (found_dup_sack) {
196da974 1670 state->flag |= FLAG_DSACKING_ACK;
b9f64820
YC
1671 tp->delivered++; /* A spurious retransmission is delivered */
1672 }
6f74651a
BE
1673
1674 /* Eliminate too old ACKs, but take into
1675 * account more or less fresh ones, they can
1676 * contain valid SACK info.
1677 */
1678 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1679 return 0;
1680