]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/tcp_recovery.c
tcp: fix off-by-one bug in RACK
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_recovery.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
659a8ad5
YC
2#include <linux/tcp.h>
3#include <net/tcp.h>
4f41b1c5 4
db8da6bb
YC
5static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
6{
7 struct tcp_sock *tp = tcp_sk(sk);
8
9 tcp_skb_mark_lost_uncond_verify(tp, skb);
10 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
11 /* Account for retransmits that are lost again */
12 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
13 tp->retrans_out -= tcp_skb_pcount(skb);
ecde8f36
YC
14 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
15 tcp_skb_pcount(skb));
db8da6bb
YC
16 }
17}
18
9a568de4 19static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1d0833df 20{
9a568de4 21 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1d0833df
YC
22}
23
a0370b3f
YC
24/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
25 *
26 * Marks a packet lost, if some packet sent later has been (s)acked.
4f41b1c5
YC
27 * The underlying idea is similar to the traditional dupthresh and FACK
28 * but they look at different metrics:
29 *
30 * dupthresh: 3 OOO packets delivered (packet count)
31 * FACK: sequence delta to highest sacked sequence (sequence space)
32 * RACK: sent time delta to the latest delivered packet (time domain)
33 *
34 * The advantage of RACK is it applies to both original and retransmitted
35 * packet and therefore is robust against tail losses. Another advantage
36 * is being more resilient to reordering by simply allowing some
37 * "settling delay", instead of tweaking the dupthresh.
38 *
a0370b3f
YC
39 * When tcp_rack_detect_loss() detects some packets are lost and we
40 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
41 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
42 * make us enter the CA_Recovery state.
4f41b1c5 43 */
7c1c7308 44static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
4f41b1c5
YC
45{
46 struct tcp_sock *tp = tcp_sk(sk);
1f255691 47 u32 min_rtt = tcp_min_rtt(tp);
043b87d7 48 struct sk_buff *skb, *n;
e636f8b0 49 u32 reo_wnd;
4f41b1c5 50
57dde7f7 51 *reo_timeout = 0;
4f41b1c5
YC
52 /* To be more reordering resilient, allow min_rtt/4 settling delay
53 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
54 * RTT because reordering is often a path property and less related
55 * to queuing or delayed ACKs.
4f41b1c5
YC
56 */
57 reo_wnd = 1000;
0ce294d8
YC
58 if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) &&
59 min_rtt != ~0U) {
1f255691
PJ
60 reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd);
61 reo_wnd = min(reo_wnd, tp->srtt_us >> 3);
62 }
4f41b1c5 63
043b87d7
YC
64 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
65 tcp_tsorted_anchor) {
4f41b1c5 66 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
bef06223 67 s32 remaining;
4f41b1c5 68
bef06223
YC
69 /* Skip ones marked lost but not yet retransmitted */
70 if ((scb->sacked & TCPCB_LOST) &&
71 !(scb->sacked & TCPCB_SACKED_RETRANS))
72 continue;
57dde7f7 73
bef06223
YC
74 if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
75 tp->rack.end_seq, scb->end_seq))
76 break;
57dde7f7 77
bef06223
YC
78 /* A packet is lost if it has not been s/acked beyond
79 * the recent RTT plus the reordering window.
80 */
81 remaining = tp->rack.rtt_us + reo_wnd -
82 tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
428aec5e 83 if (remaining <= 0) {
bef06223
YC
84 tcp_rack_mark_skb_lost(sk, skb);
85 list_del_init(&skb->tcp_tsorted_anchor);
86 } else {
428aec5e
YC
87 /* Record maximum wait time */
88 *reo_timeout = max_t(u32, *reo_timeout, remaining);
4f41b1c5
YC
89 }
90 }
e636f8b0
YC
91}
92
128eda86 93void tcp_rack_mark_lost(struct sock *sk)
e636f8b0
YC
94{
95 struct tcp_sock *tp = tcp_sk(sk);
57dde7f7 96 u32 timeout;
e636f8b0 97
a0370b3f 98 if (!tp->rack.advanced)
e636f8b0 99 return;
57dde7f7 100
e636f8b0
YC
101 /* Reset the advanced flag to avoid unnecessary queue scanning */
102 tp->rack.advanced = 0;
7c1c7308 103 tcp_rack_detect_loss(sk, &timeout);
57dde7f7 104 if (timeout) {
bb4d991a 105 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
57dde7f7
YC
106 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
107 timeout, inet_csk(sk)->icsk_rto);
108 }
4f41b1c5
YC
109}
110
deed7be7
YC
111/* Record the most recently (re)sent time among the (s)acked packets
112 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
113 * draft-cheng-tcpm-rack-00.txt
114 */
1d0833df 115void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
9a568de4 116 u64 xmit_time)
659a8ad5 117{
deed7be7
YC
118 u32 rtt_us;
119
9a568de4
ED
120 if (tp->rack.mstamp &&
121 !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
1d0833df 122 end_seq, tp->rack.end_seq))
659a8ad5
YC
123 return;
124
9a568de4 125 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
659a8ad5 126 if (sacked & TCPCB_RETRANS) {
659a8ad5
YC
127 /* If the sacked packet was retransmitted, it's ambiguous
128 * whether the retransmission or the original (or the prior
129 * retransmission) was sacked.
130 *
131 * If the original is lost, there is no ambiguity. Otherwise
132 * we assume the original can be delayed up to aRTT + min_rtt.
133 * the aRTT term is bounded by the fast recovery or timeout,
134 * so it's at least one RTT (i.e., retransmission is at least
135 * an RTT later).
136 */
deed7be7 137 if (rtt_us < tcp_min_rtt(tp))
659a8ad5
YC
138 return;
139 }
deed7be7 140 tp->rack.rtt_us = rtt_us;
9a568de4 141 tp->rack.mstamp = xmit_time;
1d0833df 142 tp->rack.end_seq = end_seq;
659a8ad5
YC
143 tp->rack.advanced = 1;
144}
57dde7f7
YC
145
146/* We have waited long enough to accommodate reordering. Mark the expired
147 * packets lost and retransmit them.
148 */
149void tcp_rack_reo_timeout(struct sock *sk)
150{
151 struct tcp_sock *tp = tcp_sk(sk);
57dde7f7
YC
152 u32 timeout, prior_inflight;
153
57dde7f7 154 prior_inflight = tcp_packets_in_flight(tp);
7c1c7308 155 tcp_rack_detect_loss(sk, &timeout);
57dde7f7
YC
156 if (prior_inflight != tcp_packets_in_flight(tp)) {
157 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
158 tcp_enter_recovery(sk, false);
159 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
160 tcp_cwnd_reduction(sk, 1, 0);
161 }
162 tcp_xmit_retransmit_queue(sk);
163 }
164 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
165 tcp_rearm_rto(sk);
166}
1f255691
PJ
167
168/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
169 *
170 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
171 * by srtt), since there is possibility that spurious retransmission was
172 * due to reordering delay longer than reo_wnd.
173 *
174 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
175 * no. of successful recoveries (accounts for full DSACK-based loss
176 * recovery undo). After that, reset it to default (min_rtt/4).
177 *
178 * At max, reo_wnd is incremented only once per rtt. So that the new
179 * DSACK on which we are reacting, is due to the spurious retx (approx)
180 * after the reo_wnd has been updated last time.
181 *
182 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
183 * absolute value to account for change in rtt.
184 */
185void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
186{
187 struct tcp_sock *tp = tcp_sk(sk);
188
189 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
190 !rs->prior_delivered)
191 return;
192
193 /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
194 if (before(rs->prior_delivered, tp->rack.last_delivered))
195 tp->rack.dsack_seen = 0;
196
197 /* Adjust the reo_wnd if update is pending */
198 if (tp->rack.dsack_seen) {
199 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
200 tp->rack.reo_wnd_steps + 1);
201 tp->rack.dsack_seen = 0;
202 tp->rack.last_delivered = tp->delivered;
203 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
204 } else if (!tp->rack.reo_wnd_persist) {
205 tp->rack.reo_wnd_steps = 1;
206 }
207}