]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/tcp_hybla.c
[ICSK]: Move TCP congestion avoidance members to icsk
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_hybla.c
1 /*
2 * TCP HYBLA
3 *
4 * TCP-HYBLA Congestion control algorithm, based on:
5 * C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement
6 * for Heterogeneous Networks",
7 * International Journal on satellite Communications,
8 * September 2004
9 * Daniele Lacamera
10 * root at danielinux.net
11 */
12
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <net/tcp.h>
16
17 /* Tcp Hybla structure. */
18 struct hybla {
19 u8 hybla_en;
20 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
21 u32 rho; /* Rho parameter, integer part */
22 u32 rho2; /* Rho * Rho, integer part */
23 u32 rho_3ls; /* Rho parameter, <<3 */
24 u32 rho2_7ls; /* Rho^2, <<7 */
25 u32 minrtt; /* Minimum smoothed round trip time value seen */
26 };
27
28 /* Hybla reference round trip time (default= 1/40 sec = 25 ms),
29 expressed in jiffies */
30 static int rtt0 = 25;
31 module_param(rtt0, int, 0644);
32 MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
33
34
35 /* This is called to refresh values for hybla parameters */
36 static inline void hybla_recalc_param (struct sock *sk)
37 {
38 struct hybla *ca = inet_csk_ca(sk);
39
40 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
41 ca->rho = ca->rho_3ls >> 3;
42 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
43 ca->rho2 = ca->rho2_7ls >>7;
44 }
45
46 static void hybla_init(struct sock *sk)
47 {
48 struct tcp_sock *tp = tcp_sk(sk);
49 struct hybla *ca = inet_csk_ca(sk);
50
51 ca->rho = 0;
52 ca->rho2 = 0;
53 ca->rho_3ls = 0;
54 ca->rho2_7ls = 0;
55 ca->snd_cwnd_cents = 0;
56 ca->hybla_en = 1;
57 tp->snd_cwnd = 2;
58 tp->snd_cwnd_clamp = 65535;
59
60 /* 1st Rho measurement based on initial srtt */
61 hybla_recalc_param(sk);
62
63 /* set minimum rtt as this is the 1st ever seen */
64 ca->minrtt = tp->srtt;
65 tp->snd_cwnd = ca->rho;
66 }
67
68 static void hybla_state(struct sock *sk, u8 ca_state)
69 {
70 struct hybla *ca = inet_csk_ca(sk);
71 ca->hybla_en = (ca_state == TCP_CA_Open);
72 }
73
74 static inline u32 hybla_fraction(u32 odds)
75 {
76 static const u32 fractions[] = {
77 128, 139, 152, 165, 181, 197, 215, 234,
78 };
79
80 return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128;
81 }
82
83 /* TCP Hybla main routine.
84 * This is the algorithm behavior:
85 * o Recalc Hybla parameters if min_rtt has changed
86 * o Give cwnd a new value based on the model proposed
87 * o remember increments <1
88 */
89 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
90 u32 in_flight, int flag)
91 {
92 struct tcp_sock *tp = tcp_sk(sk);
93 struct hybla *ca = inet_csk_ca(sk);
94 u32 increment, odd, rho_fractions;
95 int is_slowstart = 0;
96
97 /* Recalculate rho only if this srtt is the lowest */
98 if (tp->srtt < ca->minrtt){
99 hybla_recalc_param(sk);
100 ca->minrtt = tp->srtt;
101 }
102
103 if (!ca->hybla_en)
104 return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
105
106 if (in_flight < tp->snd_cwnd)
107 return;
108
109 if (ca->rho == 0)
110 hybla_recalc_param(sk);
111
112 rho_fractions = ca->rho_3ls - (ca->rho << 3);
113
114 if (tp->snd_cwnd < tp->snd_ssthresh) {
115 /*
116 * slow start
117 * INC = 2^RHO - 1
118 * This is done by splitting the rho parameter
119 * into 2 parts: an integer part and a fraction part.
120 * Inrement<<7 is estimated by doing:
121 * [2^(int+fract)]<<7
122 * that is equal to:
123 * (2^int) * [(2^fract) <<7]
124 * 2^int is straightly computed as 1<<int,
125 * while we will use hybla_slowstart_fraction_increment() to
126 * calculate 2^fract in a <<7 value.
127 */
128 is_slowstart = 1;
129 increment = ((1 << ca->rho) * hybla_fraction(rho_fractions))
130 - 128;
131 } else {
132 /*
133 * congestion avoidance
134 * INC = RHO^2 / W
135 * as long as increment is estimated as (rho<<7)/window
136 * it already is <<7 and we can easily count its fractions.
137 */
138 increment = ca->rho2_7ls / tp->snd_cwnd;
139 if (increment < 128)
140 tp->snd_cwnd_cnt++;
141 }
142
143 odd = increment % 128;
144 tp->snd_cwnd += increment >> 7;
145 ca->snd_cwnd_cents += odd;
146
147 /* check when fractions goes >=128 and increase cwnd by 1. */
148 while(ca->snd_cwnd_cents >= 128) {
149 tp->snd_cwnd++;
150 ca->snd_cwnd_cents -= 128;
151 tp->snd_cwnd_cnt = 0;
152 }
153
154 /* clamp down slowstart cwnd to ssthresh value. */
155 if (is_slowstart)
156 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
157
158 tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
159 }
160
161 static struct tcp_congestion_ops tcp_hybla = {
162 .init = hybla_init,
163 .ssthresh = tcp_reno_ssthresh,
164 .min_cwnd = tcp_reno_min_cwnd,
165 .cong_avoid = hybla_cong_avoid,
166 .set_state = hybla_state,
167
168 .owner = THIS_MODULE,
169 .name = "hybla"
170 };
171
172 static int __init hybla_register(void)
173 {
174 BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE);
175 return tcp_register_congestion_control(&tcp_hybla);
176 }
177
178 static void __exit hybla_unregister(void)
179 {
180 tcp_unregister_congestion_control(&tcp_hybla);
181 }
182
183 module_init(hybla_register);
184 module_exit(hybla_unregister);
185
186 MODULE_AUTHOR("Daniele Lacamera");
187 MODULE_LICENSE("GPL");
188 MODULE_DESCRIPTION("TCP Hybla");