]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
659a8ad5 YC |
2 | #include <linux/tcp.h> |
3 | #include <net/tcp.h> | |
4f41b1c5 | 4 | |
db8da6bb YC |
5 | static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) |
6 | { | |
7 | struct tcp_sock *tp = tcp_sk(sk); | |
8 | ||
9 | tcp_skb_mark_lost_uncond_verify(tp, skb); | |
10 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { | |
11 | /* Account for retransmits that are lost again */ | |
12 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | |
13 | tp->retrans_out -= tcp_skb_pcount(skb); | |
ecde8f36 YC |
14 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, |
15 | tcp_skb_pcount(skb)); | |
db8da6bb YC |
16 | } |
17 | } | |
18 | ||
9a568de4 | 19 | static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) |
1d0833df | 20 | { |
9a568de4 | 21 | return t1 > t2 || (t1 == t2 && after(seq1, seq2)); |
1d0833df YC |
22 | } |
23 | ||
a0370b3f YC |
24 | /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): |
25 | * | |
26 | * Marks a packet lost, if some packet sent later has been (s)acked. | |
4f41b1c5 YC |
27 | * The underlying idea is similar to the traditional dupthresh and FACK |
28 | * but they look at different metrics: | |
29 | * | |
30 | * dupthresh: 3 OOO packets delivered (packet count) | |
31 | * FACK: sequence delta to highest sacked sequence (sequence space) | |
32 | * RACK: sent time delta to the latest delivered packet (time domain) | |
33 | * | |
34 | * The advantage of RACK is it applies to both original and retransmitted | |
35 | * packet and therefore is robust against tail losses. Another advantage | |
36 | * is being more resilient to reordering by simply allowing some | |
37 | * "settling delay", instead of tweaking the dupthresh. | |
38 | * | |
a0370b3f YC |
39 | * When tcp_rack_detect_loss() detects some packets are lost and we |
40 | * are not already in the CA_Recovery state, either tcp_rack_reo_timeout() | |
41 | * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will | |
42 | * make us enter the CA_Recovery state. | |
4f41b1c5 | 43 | */ |
7c1c7308 | 44 | static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) |
4f41b1c5 YC |
45 | { |
46 | struct tcp_sock *tp = tcp_sk(sk); | |
1f255691 | 47 | u32 min_rtt = tcp_min_rtt(tp); |
043b87d7 | 48 | struct sk_buff *skb, *n; |
e636f8b0 | 49 | u32 reo_wnd; |
4f41b1c5 | 50 | |
57dde7f7 | 51 | *reo_timeout = 0; |
4f41b1c5 YC |
52 | /* To be more reordering resilient, allow min_rtt/4 settling delay |
53 | * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed | |
54 | * RTT because reordering is often a path property and less related | |
55 | * to queuing or delayed ACKs. | |
4f41b1c5 YC |
56 | */ |
57 | reo_wnd = 1000; | |
0ce294d8 YC |
58 | if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) && |
59 | min_rtt != ~0U) { | |
1f255691 PJ |
60 | reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd); |
61 | reo_wnd = min(reo_wnd, tp->srtt_us >> 3); | |
62 | } | |
4f41b1c5 | 63 | |
043b87d7 YC |
64 | list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, |
65 | tcp_tsorted_anchor) { | |
4f41b1c5 | 66 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
bef06223 | 67 | s32 remaining; |
4f41b1c5 | 68 | |
bef06223 YC |
69 | /* Skip ones marked lost but not yet retransmitted */ |
70 | if ((scb->sacked & TCPCB_LOST) && | |
71 | !(scb->sacked & TCPCB_SACKED_RETRANS)) | |
72 | continue; | |
57dde7f7 | 73 | |
bef06223 YC |
74 | if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, |
75 | tp->rack.end_seq, scb->end_seq)) | |
76 | break; | |
57dde7f7 | 77 | |
bef06223 YC |
78 | /* A packet is lost if it has not been s/acked beyond |
79 | * the recent RTT plus the reordering window. | |
80 | */ | |
81 | remaining = tp->rack.rtt_us + reo_wnd - | |
82 | tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); | |
428aec5e | 83 | if (remaining <= 0) { |
bef06223 YC |
84 | tcp_rack_mark_skb_lost(sk, skb); |
85 | list_del_init(&skb->tcp_tsorted_anchor); | |
86 | } else { | |
428aec5e YC |
87 | /* Record maximum wait time */ |
88 | *reo_timeout = max_t(u32, *reo_timeout, remaining); | |
4f41b1c5 YC |
89 | } |
90 | } | |
e636f8b0 YC |
91 | } |
92 | ||
128eda86 | 93 | void tcp_rack_mark_lost(struct sock *sk) |
e636f8b0 YC |
94 | { |
95 | struct tcp_sock *tp = tcp_sk(sk); | |
57dde7f7 | 96 | u32 timeout; |
e636f8b0 | 97 | |
a0370b3f | 98 | if (!tp->rack.advanced) |
e636f8b0 | 99 | return; |
57dde7f7 | 100 | |
e636f8b0 YC |
101 | /* Reset the advanced flag to avoid unnecessary queue scanning */ |
102 | tp->rack.advanced = 0; | |
7c1c7308 | 103 | tcp_rack_detect_loss(sk, &timeout); |
57dde7f7 | 104 | if (timeout) { |
bb4d991a | 105 | timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN; |
57dde7f7 YC |
106 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, |
107 | timeout, inet_csk(sk)->icsk_rto); | |
108 | } | |
4f41b1c5 YC |
109 | } |
110 | ||
deed7be7 YC |
111 | /* Record the most recently (re)sent time among the (s)acked packets |
112 | * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from | |
113 | * draft-cheng-tcpm-rack-00.txt | |
114 | */ | |
1d0833df | 115 | void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, |
9a568de4 | 116 | u64 xmit_time) |
659a8ad5 | 117 | { |
deed7be7 YC |
118 | u32 rtt_us; |
119 | ||
9a568de4 | 120 | rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); |
6065fd0d | 121 | if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { |
659a8ad5 YC |
122 | /* If the sacked packet was retransmitted, it's ambiguous |
123 | * whether the retransmission or the original (or the prior | |
124 | * retransmission) was sacked. | |
125 | * | |
126 | * If the original is lost, there is no ambiguity. Otherwise | |
127 | * we assume the original can be delayed up to aRTT + min_rtt. | |
128 | * the aRTT term is bounded by the fast recovery or timeout, | |
129 | * so it's at least one RTT (i.e., retransmission is at least | |
130 | * an RTT later). | |
131 | */ | |
6065fd0d | 132 | return; |
659a8ad5 | 133 | } |
659a8ad5 | 134 | tp->rack.advanced = 1; |
6065fd0d YC |
135 | tp->rack.rtt_us = rtt_us; |
136 | if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp, | |
137 | end_seq, tp->rack.end_seq)) { | |
138 | tp->rack.mstamp = xmit_time; | |
139 | tp->rack.end_seq = end_seq; | |
140 | } | |
659a8ad5 | 141 | } |
57dde7f7 YC |
142 | |
143 | /* We have waited long enough to accommodate reordering. Mark the expired | |
144 | * packets lost and retransmit them. | |
145 | */ | |
146 | void tcp_rack_reo_timeout(struct sock *sk) | |
147 | { | |
148 | struct tcp_sock *tp = tcp_sk(sk); | |
57dde7f7 YC |
149 | u32 timeout, prior_inflight; |
150 | ||
57dde7f7 | 151 | prior_inflight = tcp_packets_in_flight(tp); |
7c1c7308 | 152 | tcp_rack_detect_loss(sk, &timeout); |
57dde7f7 YC |
153 | if (prior_inflight != tcp_packets_in_flight(tp)) { |
154 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { | |
155 | tcp_enter_recovery(sk, false); | |
156 | if (!inet_csk(sk)->icsk_ca_ops->cong_control) | |
157 | tcp_cwnd_reduction(sk, 1, 0); | |
158 | } | |
159 | tcp_xmit_retransmit_queue(sk); | |
160 | } | |
161 | if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) | |
162 | tcp_rearm_rto(sk); | |
163 | } | |
1f255691 PJ |
164 | |
165 | /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries. | |
166 | * | |
167 | * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded | |
168 | * by srtt), since there is possibility that spurious retransmission was | |
169 | * due to reordering delay longer than reo_wnd. | |
170 | * | |
171 | * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16) | |
172 | * no. of successful recoveries (accounts for full DSACK-based loss | |
173 | * recovery undo). After that, reset it to default (min_rtt/4). | |
174 | * | |
175 | * At max, reo_wnd is incremented only once per rtt. So that the new | |
176 | * DSACK on which we are reacting, is due to the spurious retx (approx) | |
177 | * after the reo_wnd has been updated last time. | |
178 | * | |
179 | * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than | |
180 | * absolute value to account for change in rtt. | |
181 | */ | |
182 | void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) | |
183 | { | |
184 | struct tcp_sock *tp = tcp_sk(sk); | |
185 | ||
186 | if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || | |
187 | !rs->prior_delivered) | |
188 | return; | |
189 | ||
190 | /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */ | |
191 | if (before(rs->prior_delivered, tp->rack.last_delivered)) | |
192 | tp->rack.dsack_seen = 0; | |
193 | ||
194 | /* Adjust the reo_wnd if update is pending */ | |
195 | if (tp->rack.dsack_seen) { | |
196 | tp->rack.reo_wnd_steps = min_t(u32, 0xFF, | |
197 | tp->rack.reo_wnd_steps + 1); | |
198 | tp->rack.dsack_seen = 0; | |
199 | tp->rack.last_delivered = tp->delivered; | |
200 | tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; | |
201 | } else if (!tp->rack.reo_wnd_persist) { | |
202 | tp->rack.reo_wnd_steps = 1; | |
203 | } | |
204 | } |