]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ipv4/tcp_bbr.c
perf report: Introduce --inline option
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / tcp_bbr.c
CommitLineData
0f8782ea
NC
1/* Bottleneck Bandwidth and RTT (BBR) congestion control
2 *
3 * BBR congestion control computes the sending rate based on the delivery
4 * rate (throughput) estimated from ACKs. In a nutshell:
5 *
6 * On each ACK, update our model of the network path:
7 * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
8 * min_rtt = windowed_min(rtt, 10 seconds)
9 * pacing_rate = pacing_gain * bottleneck_bandwidth
10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
11 *
12 * The core algorithm does not react directly to packet losses or delays,
13 * although BBR may adjust the size of next send per ACK when loss is
14 * observed, or adjust the sending rate if it estimates there is a
15 * traffic policer, in order to keep the drop rate reasonable.
16 *
9b9375b5
NC
17 * Here is a state transition diagram for BBR:
18 *
19 * |
20 * V
21 * +---> STARTUP ----+
22 * | | |
23 * | V |
24 * | DRAIN ----+
25 * | | |
26 * | V |
27 * +---> PROBE_BW ----+
28 * | ^ | |
29 * | | | |
30 * | +----+ |
31 * | |
32 * +---- PROBE_RTT <--+
33 *
34 * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
35 * When it estimates the pipe is full, it enters DRAIN to drain the queue.
36 * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
37 * A long-lived BBR flow spends the vast majority of its time remaining
38 * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
39 * in a fair manner, with a small, bounded queue. *If* a flow has been
40 * continuously sending for the entire min_rtt window, and hasn't seen an RTT
41 * sample that matches or decreases its min_rtt estimate for 10 seconds, then
42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
43 * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
44 * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
45 * otherwise we enter STARTUP to try to fill the pipe.
46 *
0f8782ea
NC
47 * BBR is described in detail in:
48 * "BBR: Congestion-Based Congestion Control",
49 * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
50 * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
51 *
52 * There is a public e-mail list for discussing BBR development and testing:
53 * https://groups.google.com/forum/#!forum/bbr-dev
54 *
55 * NOTE: BBR *must* be used with the fq qdisc ("man tc-fq") with pacing enabled,
56 * since pacing is integral to the BBR design and implementation.
57 * BBR without pacing would not function properly, and may incur unnecessary
58 * high packet loss rates.
59 */
60#include <linux/module.h>
61#include <net/tcp.h>
62#include <linux/inet_diag.h>
63#include <linux/inet.h>
64#include <linux/random.h>
65#include <linux/win_minmax.h>
66
67/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
68 * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
69 * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
70 * Since the minimum window is >=4 packets, the lower bound isn't
71 * an issue. The upper bound isn't an issue with existing technologies.
72 */
73#define BW_SCALE 24
74#define BW_UNIT (1 << BW_SCALE)
75
76#define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
77#define BBR_UNIT (1 << BBR_SCALE)
78
79/* BBR has the following modes for deciding how fast to send: */
80enum bbr_mode {
81 BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
82 BBR_DRAIN, /* drain any queue created during startup */
83 BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
9b9375b5 84 BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
0f8782ea
NC
85};
86
87/* BBR congestion control block */
88struct bbr {
89 u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
90 u32 min_rtt_stamp; /* timestamp of min_rtt_us */
91 u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
92 struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
93 u32 rtt_cnt; /* count of packet-timed rounds elapsed */
94 u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
95 struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */
96 u32 mode:3, /* current bbr_mode in state machine */
97 prev_ca_state:3, /* CA state on previous ACK */
98 packet_conservation:1, /* use packet conservation? */
99 restore_cwnd:1, /* decided to revert cwnd to old value */
100 round_start:1, /* start of packet-timed tx->ack round? */
101 tso_segs_goal:7, /* segments we want in each skb we send */
102 idle_restart:1, /* restarting after idle? */
103 probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
104 unused:5,
105 lt_is_sampling:1, /* taking long-term ("LT") samples now? */
106 lt_rtt_cnt:7, /* round trips in long-term interval */
107 lt_use_bw:1; /* use lt_bw as our bw estimate? */
108 u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
109 u32 lt_last_delivered; /* LT intvl start: tp->delivered */
110 u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
111 u32 lt_last_lost; /* LT intvl start: tp->lost */
112 u32 pacing_gain:10, /* current gain for setting pacing rate */
113 cwnd_gain:10, /* current gain for setting cwnd */
114 full_bw_cnt:3, /* number of rounds without large bw gains */
115 cycle_idx:3, /* current index in pacing_gain cycle array */
116 unused_b:6;
117 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
118 u32 full_bw; /* recent bw, to estimate if pipe is full */
119};
120
121#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
122
123/* Window length of bw filter (in rounds): */
124static const int bbr_bw_rtts = CYCLE_LEN + 2;
125/* Window length of min_rtt filter (in sec): */
126static const u32 bbr_min_rtt_win_sec = 10;
127/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
128static const u32 bbr_probe_rtt_mode_ms = 200;
129/* Skip TSO below the following bandwidth (bits/sec): */
130static const int bbr_min_tso_rate = 1200000;
131
132/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
133 * that will allow a smoothly increasing pacing rate that will double each RTT
134 * and send the same number of packets per RTT that an un-paced, slow-starting
135 * Reno or CUBIC flow would:
136 */
137static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
138/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
139 * the queue created in BBR_STARTUP in a single round:
140 */
141static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
142/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
143static const int bbr_cwnd_gain = BBR_UNIT * 2;
144/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
145static const int bbr_pacing_gain[] = {
146 BBR_UNIT * 5 / 4, /* probe for more available bw */
147 BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
148 BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
149 BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
150};
151/* Randomize the starting gain cycling phase over N phases: */
152static const u32 bbr_cycle_rand = 7;
153
154/* Try to keep at least this many packets in flight, if things go smoothly. For
155 * smooth functioning, a sliding window protocol ACKing every other packet
156 * needs at least 4 packets in flight:
157 */
158static const u32 bbr_cwnd_min_target = 4;
159
160/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
161/* If bw has increased significantly (1.25x), there may be more bw available: */
162static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
163/* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
164static const u32 bbr_full_bw_cnt = 3;
165
166/* "long-term" ("LT") bandwidth estimator parameters... */
167/* The minimum number of rounds in an LT bw sampling interval: */
168static const u32 bbr_lt_intvl_min_rtts = 4;
169/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
170static const u32 bbr_lt_loss_thresh = 50;
171/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
172static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
173/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
174static const u32 bbr_lt_bw_diff = 4000 / 8;
175/* If we estimate we're policed, use lt_bw for this many round trips: */
176static const u32 bbr_lt_bw_max_rtts = 48;
177
178/* Do we estimate that STARTUP filled the pipe? */
179static bool bbr_full_bw_reached(const struct sock *sk)
180{
181 const struct bbr *bbr = inet_csk_ca(sk);
182
183 return bbr->full_bw_cnt >= bbr_full_bw_cnt;
184}
185
186/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
187static u32 bbr_max_bw(const struct sock *sk)
188{
189 struct bbr *bbr = inet_csk_ca(sk);
190
191 return minmax_get(&bbr->bw);
192}
193
194/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
195static u32 bbr_bw(const struct sock *sk)
196{
197 struct bbr *bbr = inet_csk_ca(sk);
198
199 return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
200}
201
202/* Return rate in bytes per second, optionally with a gain.
203 * The order here is chosen carefully to avoid overflow of u64. This should
204 * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
205 */
206static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
207{
208 rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
209 rate *= gain;
210 rate >>= BBR_SCALE;
211 rate *= USEC_PER_SEC;
212 return rate >> BW_SCALE;
213}
214
215/* Pace using current bw estimate and a gain factor. In order to help drive the
216 * network toward lower queues while maintaining high utilization and low
217 * latency, the average pacing rate aims to be slightly (~1%) lower than the
218 * estimated bandwidth. This is an important aspect of the design. In this
219 * implementation this slightly lower pacing rate is achieved implicitly by not
220 * including link-layer headers in the packet size used for the pacing rate.
221 */
222static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
223{
224 struct bbr *bbr = inet_csk_ca(sk);
225 u64 rate = bw;
226
227 rate = bbr_rate_bytes_per_sec(sk, rate, gain);
228 rate = min_t(u64, rate, sk->sk_max_pacing_rate);
229 if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
230 sk->sk_pacing_rate = rate;
231}
232
233/* Return count of segments we want in the skbs we send, or 0 for default. */
234static u32 bbr_tso_segs_goal(struct sock *sk)
235{
236 struct bbr *bbr = inet_csk_ca(sk);
237
238 return bbr->tso_segs_goal;
239}
240
241static void bbr_set_tso_segs_goal(struct sock *sk)
242{
243 struct tcp_sock *tp = tcp_sk(sk);
244 struct bbr *bbr = inet_csk_ca(sk);
245 u32 min_segs;
246
247 min_segs = sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
248 bbr->tso_segs_goal = min(tcp_tso_autosize(sk, tp->mss_cache, min_segs),
249 0x7FU);
250}
251
252/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
253static void bbr_save_cwnd(struct sock *sk)
254{
255 struct tcp_sock *tp = tcp_sk(sk);
256 struct bbr *bbr = inet_csk_ca(sk);
257
258 if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
259 bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
260 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
261 bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
262}
263
264static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
265{
266 struct tcp_sock *tp = tcp_sk(sk);
267 struct bbr *bbr = inet_csk_ca(sk);
268
269 if (event == CA_EVENT_TX_START && tp->app_limited) {
270 bbr->idle_restart = 1;
271 /* Avoid pointless buffer overflows: pace at est. bw if we don't
272 * need more speed (we're restarting from idle and app-limited).
273 */
274 if (bbr->mode == BBR_PROBE_BW)
275 bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
276 }
277}
278
279/* Find target cwnd. Right-size the cwnd based on min RTT and the
280 * estimated bottleneck bandwidth:
281 *
282 * cwnd = bw * min_rtt * gain = BDP * gain
283 *
284 * The key factor, gain, controls the amount of queue. While a small gain
285 * builds a smaller queue, it becomes more vulnerable to noise in RTT
286 * measurements (e.g., delayed ACKs or other ACK compression effects). This
287 * noise may cause BBR to under-estimate the rate.
288 *
289 * To achieve full performance in high-speed paths, we budget enough cwnd to
290 * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
291 * - one skb in sending host Qdisc,
292 * - one skb in sending host TSO/GSO engine
293 * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
294 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
295 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
296 * which allows 2 outstanding 2-packet sequences, to try to keep pipe
297 * full even with ACK-every-other-packet delayed ACKs.
298 */
299static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
300{
301 struct bbr *bbr = inet_csk_ca(sk);
302 u32 cwnd;
303 u64 w;
304
305 /* If we've never had a valid RTT sample, cap cwnd at the initial
306 * default. This should only happen when the connection is not using TCP
307 * timestamps and has retransmitted all of the SYN/SYNACK/data packets
308 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
309 * case we need to slow-start up toward something safe: TCP_INIT_CWND.
310 */
311 if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
312 return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
313
314 w = (u64)bw * bbr->min_rtt_us;
315
316 /* Apply a gain to the given value, then remove the BW_SCALE shift. */
317 cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
318
319 /* Allow enough full-sized skbs in flight to utilize end systems. */
320 cwnd += 3 * bbr->tso_segs_goal;
321
322 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
323 cwnd = (cwnd + 1) & ~1U;
324
325 return cwnd;
326}
327
328/* An optimization in BBR to reduce losses: On the first round of recovery, we
329 * follow the packet conservation principle: send P packets per P packets acked.
330 * After that, we slow-start and send at most 2*P packets per P packets acked.
331 * After recovery finishes, or upon undo, we restore the cwnd we had when
332 * recovery started (capped by the target cwnd based on estimated BDP).
333 *
334 * TODO(ycheng/ncardwell): implement a rate-based approach.
335 */
336static bool bbr_set_cwnd_to_recover_or_restore(
337 struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
338{
339 struct tcp_sock *tp = tcp_sk(sk);
340 struct bbr *bbr = inet_csk_ca(sk);
341 u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
342 u32 cwnd = tp->snd_cwnd;
343
344 /* An ACK for P pkts should release at most 2*P packets. We do this
345 * in two steps. First, here we deduct the number of lost packets.
346 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
347 */
348 if (rs->losses > 0)
349 cwnd = max_t(s32, cwnd - rs->losses, 1);
350
351 if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
352 /* Starting 1st round of Recovery, so do packet conservation. */
353 bbr->packet_conservation = 1;
354 bbr->next_rtt_delivered = tp->delivered; /* start round now */
355 /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
356 cwnd = tcp_packets_in_flight(tp) + acked;
357 } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
358 /* Exiting loss recovery; restore cwnd saved before recovery. */
359 bbr->restore_cwnd = 1;
360 bbr->packet_conservation = 0;
361 }
362 bbr->prev_ca_state = state;
363
364 if (bbr->restore_cwnd) {
365 /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
366 cwnd = max(cwnd, bbr->prior_cwnd);
367 bbr->restore_cwnd = 0;
368 }
369
370 if (bbr->packet_conservation) {
371 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
372 return true; /* yes, using packet conservation */
373 }
374 *new_cwnd = cwnd;
375 return false;
376}
377
378/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
379 * has drawn us down below target), or snap down to target if we're above it.
380 */
381static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
382 u32 acked, u32 bw, int gain)
383{
384 struct tcp_sock *tp = tcp_sk(sk);
385 struct bbr *bbr = inet_csk_ca(sk);
386 u32 cwnd = 0, target_cwnd = 0;
387
388 if (!acked)
389 return;
390
391 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
392 goto done;
393
394 /* If we're below target cwnd, slow start cwnd toward target cwnd. */
395 target_cwnd = bbr_target_cwnd(sk, bw, gain);
396 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
397 cwnd = min(cwnd + acked, target_cwnd);
398 else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
399 cwnd = cwnd + acked;
400 cwnd = max(cwnd, bbr_cwnd_min_target);
401
402done:
403 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
404 if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
405 tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
406}
407
408/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
409static bool bbr_is_next_cycle_phase(struct sock *sk,
410 const struct rate_sample *rs)
411{
412 struct tcp_sock *tp = tcp_sk(sk);
413 struct bbr *bbr = inet_csk_ca(sk);
414 bool is_full_length =
415 skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) >
416 bbr->min_rtt_us;
417 u32 inflight, bw;
418
419 /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
420 * use the pipe without increasing the queue.
421 */
422 if (bbr->pacing_gain == BBR_UNIT)
423 return is_full_length; /* just use wall clock time */
424
425 inflight = rs->prior_in_flight; /* what was in-flight before ACK? */
426 bw = bbr_max_bw(sk);
427
428 /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
429 * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
430 * small (e.g. on a LAN). We do not persist if packets are lost, since
431 * a path with small buffers may not hold that much.
432 */
433 if (bbr->pacing_gain > BBR_UNIT)
434 return is_full_length &&
435 (rs->losses || /* perhaps pacing_gain*BDP won't fit */
436 inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
437
438 /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
439 * probing didn't find more bw. If inflight falls to match BDP then we
440 * estimate queue is drained; persisting would underutilize the pipe.
441 */
442 return is_full_length ||
443 inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
444}
445
446static void bbr_advance_cycle_phase(struct sock *sk)
447{
448 struct tcp_sock *tp = tcp_sk(sk);
449 struct bbr *bbr = inet_csk_ca(sk);
450
451 bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
452 bbr->cycle_mstamp = tp->delivered_mstamp;
453 bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
454}
455
456/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
457static void bbr_update_cycle_phase(struct sock *sk,
458 const struct rate_sample *rs)
459{
460 struct bbr *bbr = inet_csk_ca(sk);
461
462 if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
463 bbr_is_next_cycle_phase(sk, rs))
464 bbr_advance_cycle_phase(sk);
465}
466
467static void bbr_reset_startup_mode(struct sock *sk)
468{
469 struct bbr *bbr = inet_csk_ca(sk);
470
471 bbr->mode = BBR_STARTUP;
472 bbr->pacing_gain = bbr_high_gain;
473 bbr->cwnd_gain = bbr_high_gain;
474}
475
476static void bbr_reset_probe_bw_mode(struct sock *sk)
477{
478 struct bbr *bbr = inet_csk_ca(sk);
479
480 bbr->mode = BBR_PROBE_BW;
481 bbr->pacing_gain = BBR_UNIT;
482 bbr->cwnd_gain = bbr_cwnd_gain;
483 bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
484 bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
485}
486
487static void bbr_reset_mode(struct sock *sk)
488{
489 if (!bbr_full_bw_reached(sk))
490 bbr_reset_startup_mode(sk);
491 else
492 bbr_reset_probe_bw_mode(sk);
493}
494
495/* Start a new long-term sampling interval. */
496static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
497{
498 struct tcp_sock *tp = tcp_sk(sk);
499 struct bbr *bbr = inet_csk_ca(sk);
500
501 bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies;
502 bbr->lt_last_delivered = tp->delivered;
503 bbr->lt_last_lost = tp->lost;
504 bbr->lt_rtt_cnt = 0;
505}
506
507/* Completely reset long-term bandwidth sampling. */
508static void bbr_reset_lt_bw_sampling(struct sock *sk)
509{
510 struct bbr *bbr = inet_csk_ca(sk);
511
512 bbr->lt_bw = 0;
513 bbr->lt_use_bw = 0;
514 bbr->lt_is_sampling = false;
515 bbr_reset_lt_bw_sampling_interval(sk);
516}
517
518/* Long-term bw sampling interval is done. Estimate whether we're policed. */
519static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
520{
521 struct bbr *bbr = inet_csk_ca(sk);
522 u32 diff;
523
524 if (bbr->lt_bw) { /* do we have bw from a previous interval? */
525 /* Is new bw close to the lt_bw from the previous interval? */
526 diff = abs(bw - bbr->lt_bw);
527 if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
528 (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
529 bbr_lt_bw_diff)) {
530 /* All criteria are met; estimate we're policed. */
531 bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
532 bbr->lt_use_bw = 1;
533 bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
534 bbr->lt_rtt_cnt = 0;
535 return;
536 }
537 }
538 bbr->lt_bw = bw;
539 bbr_reset_lt_bw_sampling_interval(sk);
540}
541
542/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
543 * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
544 * explicitly models their policed rate, to reduce unnecessary losses. We
545 * estimate that we're policed if we see 2 consecutive sampling intervals with
546 * consistent throughput and high packet loss. If we think we're being policed,
547 * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
548 */
549static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
550{
551 struct tcp_sock *tp = tcp_sk(sk);
552 struct bbr *bbr = inet_csk_ca(sk);
553 u32 lost, delivered;
554 u64 bw;
555 s32 t;
556
557 if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
558 if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
559 ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
560 bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
561 bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
562 }
563 return;
564 }
565
566 /* Wait for the first loss before sampling, to let the policer exhaust
567 * its tokens and estimate the steady-state rate allowed by the policer.
568 * Starting samples earlier includes bursts that over-estimate the bw.
569 */
570 if (!bbr->lt_is_sampling) {
571 if (!rs->losses)
572 return;
573 bbr_reset_lt_bw_sampling_interval(sk);
574 bbr->lt_is_sampling = true;
575 }
576
577 /* To avoid underestimates, reset sampling if we run out of data. */
578 if (rs->is_app_limited) {
579 bbr_reset_lt_bw_sampling(sk);
580 return;
581 }
582
583 if (bbr->round_start)
584 bbr->lt_rtt_cnt++; /* count round trips in this interval */
585 if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
586 return; /* sampling interval needs to be longer */
587 if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
588 bbr_reset_lt_bw_sampling(sk); /* interval is too long */
589 return;
590 }
591
592 /* End sampling interval when a packet is lost, so we estimate the
593 * policer tokens were exhausted. Stopping the sampling before the
594 * tokens are exhausted under-estimates the policed rate.
595 */
596 if (!rs->losses)
597 return;
598
599 /* Calculate packets lost and delivered in sampling interval. */
600 lost = tp->lost - bbr->lt_last_lost;
601 delivered = tp->delivered - bbr->lt_last_delivered;
602 /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
603 if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
604 return;
605
606 /* Find average delivery rate in this sampling interval. */
607 t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp);
608 if (t < 1)
609 return; /* interval is less than one jiffy, so wait */
610 t = jiffies_to_usecs(t);
611 /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */
612 if (t < 1) {
613 bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
614 return;
615 }
616 bw = (u64)delivered * BW_UNIT;
617 do_div(bw, t);
618 bbr_lt_bw_interval_done(sk, bw);
619}
620
621/* Estimate the bandwidth based on how fast packets are delivered */
622static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
623{
624 struct tcp_sock *tp = tcp_sk(sk);
625 struct bbr *bbr = inet_csk_ca(sk);
626 u64 bw;
627
628 bbr->round_start = 0;
629 if (rs->delivered < 0 || rs->interval_us <= 0)
630 return; /* Not a valid observation */
631
632 /* See if we've reached the next RTT */
633 if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
634 bbr->next_rtt_delivered = tp->delivered;
635 bbr->rtt_cnt++;
636 bbr->round_start = 1;
637 bbr->packet_conservation = 0;
638 }
639
640 bbr_lt_bw_sampling(sk, rs);
641
642 /* Divide delivered by the interval to find a (lower bound) bottleneck
643 * bandwidth sample. Delivered is in packets and interval_us in uS and
644 * ratio will be <<1 for most connections. So delivered is first scaled.
645 */
646 bw = (u64)rs->delivered * BW_UNIT;
647 do_div(bw, rs->interval_us);
648
649 /* If this sample is application-limited, it is likely to have a very
650 * low delivered count that represents application behavior rather than
651 * the available network rate. Such a sample could drag down estimated
652 * bw, causing needless slow-down. Thus, to continue to send at the
653 * last measured network rate, we filter out app-limited samples unless
654 * they describe the path bw at least as well as our bw model.
655 *
656 * So the goal during app-limited phase is to proceed with the best
657 * network rate no matter how long. We automatically leave this
658 * phase when app writes faster than the network can deliver :)
659 */
660 if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
661 /* Incorporate new sample into our max bw filter. */
662 minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
663 }
664}
665
666/* Estimate when the pipe is full, using the change in delivery rate: BBR
667 * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
668 * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
669 * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
670 * higher rwin, 3: we get higher delivery rate samples. Or transient
671 * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
672 * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
673 */
674static void bbr_check_full_bw_reached(struct sock *sk,
675 const struct rate_sample *rs)
676{
677 struct bbr *bbr = inet_csk_ca(sk);
678 u32 bw_thresh;
679
680 if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
681 return;
682
683 bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
684 if (bbr_max_bw(sk) >= bw_thresh) {
685 bbr->full_bw = bbr_max_bw(sk);
686 bbr->full_bw_cnt = 0;
687 return;
688 }
689 ++bbr->full_bw_cnt;
690}
691
692/* If pipe is probably full, drain the queue and then enter steady-state. */
693static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
694{
695 struct bbr *bbr = inet_csk_ca(sk);
696
697 if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
698 bbr->mode = BBR_DRAIN; /* drain queue we created */
699 bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
700 bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
701 } /* fall through to check if in-flight is already small: */
702 if (bbr->mode == BBR_DRAIN &&
703 tcp_packets_in_flight(tcp_sk(sk)) <=
704 bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
705 bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
706}
707
708/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
709 * periodically drain the bottleneck queue, to converge to measure the true
710 * min_rtt (unloaded propagation delay). This allows the flows to keep queues
711 * small (reducing queuing delay and packet loss) and achieve fairness among
712 * BBR flows.
713 *
714 * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
715 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
716 * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
717 * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
718 * re-enter the previous mode. BBR uses 200ms to approximately bound the
719 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
720 *
721 * Note that flows need only pay 2% if they are busy sending over the last 10
722 * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
723 * natural silences or low-rate periods within 10 seconds where the rate is low
724 * enough for long enough to drain its queue in the bottleneck. We pick up
725 * these min RTT measurements opportunistically with our min_rtt filter. :-)
726 */
727static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
728{
729 struct tcp_sock *tp = tcp_sk(sk);
730 struct bbr *bbr = inet_csk_ca(sk);
731 bool filter_expired;
732
733 /* Track min RTT seen in the min_rtt_win_sec filter window: */
734 filter_expired = after(tcp_time_stamp,
735 bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
736 if (rs->rtt_us >= 0 &&
737 (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
738 bbr->min_rtt_us = rs->rtt_us;
739 bbr->min_rtt_stamp = tcp_time_stamp;
740 }
741
742 if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
743 !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
744 bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
745 bbr->pacing_gain = BBR_UNIT;
746 bbr->cwnd_gain = BBR_UNIT;
747 bbr_save_cwnd(sk); /* note cwnd so we can restore it */
748 bbr->probe_rtt_done_stamp = 0;
749 }
750
751 if (bbr->mode == BBR_PROBE_RTT) {
752 /* Ignore low rate samples during this mode. */
753 tp->app_limited =
754 (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
755 /* Maintain min packets in flight for max(200 ms, 1 round). */
756 if (!bbr->probe_rtt_done_stamp &&
757 tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
758 bbr->probe_rtt_done_stamp = tcp_time_stamp +
759 msecs_to_jiffies(bbr_probe_rtt_mode_ms);
760 bbr->probe_rtt_round_done = 0;
761 bbr->next_rtt_delivered = tp->delivered;
762 } else if (bbr->probe_rtt_done_stamp) {
763 if (bbr->round_start)
764 bbr->probe_rtt_round_done = 1;
765 if (bbr->probe_rtt_round_done &&
766 after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) {
767 bbr->min_rtt_stamp = tcp_time_stamp;
768 bbr->restore_cwnd = 1; /* snap to prior_cwnd */
769 bbr_reset_mode(sk);
770 }
771 }
772 }
773 bbr->idle_restart = 0;
774}
775
776static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
777{
778 bbr_update_bw(sk, rs);
779 bbr_update_cycle_phase(sk, rs);
780 bbr_check_full_bw_reached(sk, rs);
781 bbr_check_drain(sk, rs);
782 bbr_update_min_rtt(sk, rs);
783}
784
785static void bbr_main(struct sock *sk, const struct rate_sample *rs)
786{
787 struct bbr *bbr = inet_csk_ca(sk);
788 u32 bw;
789
790 bbr_update_model(sk, rs);
791
792 bw = bbr_bw(sk);
793 bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
794 bbr_set_tso_segs_goal(sk);
795 bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
796}
797
798static void bbr_init(struct sock *sk)
799{
800 struct tcp_sock *tp = tcp_sk(sk);
801 struct bbr *bbr = inet_csk_ca(sk);
802 u64 bw;
803
804 bbr->prior_cwnd = 0;
805 bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
806 bbr->rtt_cnt = 0;
807 bbr->next_rtt_delivered = 0;
808 bbr->prev_ca_state = TCP_CA_Open;
809 bbr->packet_conservation = 0;
810
811 bbr->probe_rtt_done_stamp = 0;
812 bbr->probe_rtt_round_done = 0;
813 bbr->min_rtt_us = tcp_min_rtt(tp);
814 bbr->min_rtt_stamp = tcp_time_stamp;
815
816 minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
817
818 /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
819 bw = (u64)tp->snd_cwnd * BW_UNIT;
820 do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
821 sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
822 bbr_set_pacing_rate(sk, bw, bbr_high_gain);
823
824 bbr->restore_cwnd = 0;
825 bbr->round_start = 0;
826 bbr->idle_restart = 0;
827 bbr->full_bw = 0;
828 bbr->full_bw_cnt = 0;
829 bbr->cycle_mstamp.v64 = 0;
830 bbr->cycle_idx = 0;
831 bbr_reset_lt_bw_sampling(sk);
832 bbr_reset_startup_mode(sk);
833}
834
835static u32 bbr_sndbuf_expand(struct sock *sk)
836{
837 /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
838 return 3;
839}
840
841/* In theory BBR does not need to undo the cwnd since it does not
842 * always reduce cwnd on losses (see bbr_main()). Keep it for now.
843 */
844static u32 bbr_undo_cwnd(struct sock *sk)
845{
846 return tcp_sk(sk)->snd_cwnd;
847}
848
849/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
850static u32 bbr_ssthresh(struct sock *sk)
851{
852 bbr_save_cwnd(sk);
853 return TCP_INFINITE_SSTHRESH; /* BBR does not use ssthresh */
854}
855
856static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
857 union tcp_cc_info *info)
858{
859 if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
860 ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
861 struct tcp_sock *tp = tcp_sk(sk);
862 struct bbr *bbr = inet_csk_ca(sk);
863 u64 bw = bbr_bw(sk);
864
865 bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
866 memset(&info->bbr, 0, sizeof(info->bbr));
867 info->bbr.bbr_bw_lo = (u32)bw;
868 info->bbr.bbr_bw_hi = (u32)(bw >> 32);
869 info->bbr.bbr_min_rtt = bbr->min_rtt_us;
870 info->bbr.bbr_pacing_gain = bbr->pacing_gain;
871 info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
872 *attr = INET_DIAG_BBRINFO;
873 return sizeof(info->bbr);
874 }
875 return 0;
876}
877
878static void bbr_set_state(struct sock *sk, u8 new_state)
879{
880 struct bbr *bbr = inet_csk_ca(sk);
881
882 if (new_state == TCP_CA_Loss) {
883 struct rate_sample rs = { .losses = 1 };
884
885 bbr->prev_ca_state = TCP_CA_Loss;
886 bbr->full_bw = 0;
887 bbr->round_start = 1; /* treat RTO like end of a round */
888 bbr_lt_bw_sampling(sk, &rs);
889 }
890}
891
892static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
893 .flags = TCP_CONG_NON_RESTRICTED,
894 .name = "bbr",
895 .owner = THIS_MODULE,
896 .init = bbr_init,
897 .cong_control = bbr_main,
898 .sndbuf_expand = bbr_sndbuf_expand,
899 .undo_cwnd = bbr_undo_cwnd,
900 .cwnd_event = bbr_cwnd_event,
901 .ssthresh = bbr_ssthresh,
902 .tso_segs_goal = bbr_tso_segs_goal,
903 .get_info = bbr_get_info,
904 .set_state = bbr_set_state,
905};
906
907static int __init bbr_register(void)
908{
909 BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
910 return tcp_register_congestion_control(&tcp_bbr_cong_ops);
911}
912
913static void __exit bbr_unregister(void)
914{
915 tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
916}
917
918module_init(bbr_register);
919module_exit(bbr_unregister);
920
921MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
922MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
923MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
924MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
925MODULE_LICENSE("Dual BSD/GPL");
926MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");