4 int sysctl_tcp_recovery __read_mostly
= TCP_RACK_LOSS_DETECTION
;
6 static void tcp_rack_mark_skb_lost(struct sock
*sk
, struct sk_buff
*skb
)
8 struct tcp_sock
*tp
= tcp_sk(sk
);
10 tcp_skb_mark_lost_uncond_verify(tp
, skb
);
11 if (TCP_SKB_CB(skb
)->sacked
& TCPCB_SACKED_RETRANS
) {
12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb
)->sacked
&= ~TCPCB_SACKED_RETRANS
;
14 tp
->retrans_out
-= tcp_skb_pcount(skb
);
15 NET_ADD_STATS(sock_net(sk
), LINUX_MIB_TCPLOSTRETRANSMIT
,
20 static bool tcp_rack_sent_after(const struct skb_mstamp
*t1
,
21 const struct skb_mstamp
*t2
,
24 return skb_mstamp_after(t1
, t2
) ||
25 (t1
->v64
== t2
->v64
&& after(seq1
, seq2
));
28 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
30 * Marks a packet lost, if some packet sent later has been (s)acked.
31 * The underlying idea is similar to the traditional dupthresh and FACK
32 * but they look at different metrics:
34 * dupthresh: 3 OOO packets delivered (packet count)
35 * FACK: sequence delta to highest sacked sequence (sequence space)
36 * RACK: sent time delta to the latest delivered packet (time domain)
38 * The advantage of RACK is it applies to both original and retransmitted
39 * packet and therefore is robust against tail losses. Another advantage
40 * is being more resilient to reordering by simply allowing some
41 * "settling delay", instead of tweaking the dupthresh.
43 * When tcp_rack_detect_loss() detects some packets are lost and we
44 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
45 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
46 * make us enter the CA_Recovery state.
48 static void tcp_rack_detect_loss(struct sock
*sk
, u32
*reo_timeout
)
50 struct tcp_sock
*tp
= tcp_sk(sk
);
55 /* To be more reordering resilient, allow min_rtt/4 settling delay
56 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
57 * RTT because reordering is often a path property and less related
58 * to queuing or delayed ACKs.
61 if ((tp
->rack
.reord
|| !tp
->lost_out
) && tcp_min_rtt(tp
) != ~0U)
62 reo_wnd
= max(tcp_min_rtt(tp
) >> 2, reo_wnd
);
64 tcp_for_write_queue(skb
, sk
) {
65 struct tcp_skb_cb
*scb
= TCP_SKB_CB(skb
);
67 if (skb
== tcp_send_head(sk
))
70 /* Skip ones already (s)acked */
71 if (!after(scb
->end_seq
, tp
->snd_una
) ||
72 scb
->sacked
& TCPCB_SACKED_ACKED
)
75 if (tcp_rack_sent_after(&tp
->rack
.mstamp
, &skb
->skb_mstamp
,
76 tp
->rack
.end_seq
, scb
->end_seq
)) {
77 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
78 * A packet is lost if its elapsed time is beyond
79 * the recent RTT plus the reordering window.
81 u32 elapsed
= skb_mstamp_us_delta(&tp
->tcp_mstamp
,
83 s32 remaining
= tp
->rack
.rtt_us
+ reo_wnd
- elapsed
;
86 tcp_rack_mark_skb_lost(sk
, skb
);
90 /* Skip ones marked lost but not yet retransmitted */
91 if ((scb
->sacked
& TCPCB_LOST
) &&
92 !(scb
->sacked
& TCPCB_SACKED_RETRANS
))
95 /* Record maximum wait time (+1 to avoid 0) */
96 *reo_timeout
= max_t(u32
, *reo_timeout
, 1 + remaining
);
98 } else if (!(scb
->sacked
& TCPCB_RETRANS
)) {
99 /* Original data are sent sequentially so stop early
100 * b/c the rest are all sent after rack_sent
107 void tcp_rack_mark_lost(struct sock
*sk
)
109 struct tcp_sock
*tp
= tcp_sk(sk
);
112 if (!tp
->rack
.advanced
)
115 /* Reset the advanced flag to avoid unnecessary queue scanning */
116 tp
->rack
.advanced
= 0;
117 tcp_rack_detect_loss(sk
, &timeout
);
119 timeout
= usecs_to_jiffies(timeout
+ TCP_REO_TIMEOUT_MIN
);
120 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_REO_TIMEOUT
,
121 timeout
, inet_csk(sk
)->icsk_rto
);
125 /* Record the most recently (re)sent time among the (s)acked packets
126 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
127 * draft-cheng-tcpm-rack-00.txt
129 void tcp_rack_advance(struct tcp_sock
*tp
, u8 sacked
, u32 end_seq
,
130 const struct skb_mstamp
*xmit_time
)
134 if (tp
->rack
.mstamp
.v64
&&
135 !tcp_rack_sent_after(xmit_time
, &tp
->rack
.mstamp
,
136 end_seq
, tp
->rack
.end_seq
))
139 rtt_us
= skb_mstamp_us_delta(&tp
->tcp_mstamp
, xmit_time
);
140 if (sacked
& TCPCB_RETRANS
) {
141 /* If the sacked packet was retransmitted, it's ambiguous
142 * whether the retransmission or the original (or the prior
143 * retransmission) was sacked.
145 * If the original is lost, there is no ambiguity. Otherwise
146 * we assume the original can be delayed up to aRTT + min_rtt.
147 * the aRTT term is bounded by the fast recovery or timeout,
148 * so it's at least one RTT (i.e., retransmission is at least
151 if (rtt_us
< tcp_min_rtt(tp
))
154 tp
->rack
.rtt_us
= rtt_us
;
155 tp
->rack
.mstamp
= *xmit_time
;
156 tp
->rack
.end_seq
= end_seq
;
157 tp
->rack
.advanced
= 1;
160 /* We have waited long enough to accommodate reordering. Mark the expired
161 * packets lost and retransmit them.
163 void tcp_rack_reo_timeout(struct sock
*sk
)
165 struct tcp_sock
*tp
= tcp_sk(sk
);
166 u32 timeout
, prior_inflight
;
168 prior_inflight
= tcp_packets_in_flight(tp
);
169 skb_mstamp_get(&tp
->tcp_mstamp
);
170 tcp_rack_detect_loss(sk
, &timeout
);
171 if (prior_inflight
!= tcp_packets_in_flight(tp
)) {
172 if (inet_csk(sk
)->icsk_ca_state
!= TCP_CA_Recovery
) {
173 tcp_enter_recovery(sk
, false);
174 if (!inet_csk(sk
)->icsk_ca_ops
->cong_control
)
175 tcp_cwnd_reduction(sk
, 1, 0);
177 tcp_xmit_retransmit_queue(sk
);
179 if (inet_csk(sk
)->icsk_pending
!= ICSK_TIME_RETRANS
)