]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_netem.c Network emulator | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
798b6b19 | 7 | * 2 of the License. |
1da177e4 LT |
8 | * |
9 | * Many of the algorithms and ideas for this came from | |
10297b99 | 10 | * NIST Net which is not copyrighted. |
1da177e4 LT |
11 | * |
12 | * Authors: Stephen Hemminger <shemminger@osdl.org> | |
13 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> | |
14 | */ | |
15 | ||
b7f080cf | 16 | #include <linux/mm.h> |
1da177e4 | 17 | #include <linux/module.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
1da177e4 LT |
19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/errno.h> | |
1da177e4 | 22 | #include <linux/skbuff.h> |
78776d3f | 23 | #include <linux/vmalloc.h> |
1da177e4 | 24 | #include <linux/rtnetlink.h> |
90b41a1c | 25 | #include <linux/reciprocal_div.h> |
aec0a40a | 26 | #include <linux/rbtree.h> |
1da177e4 | 27 | |
dc5fc579 | 28 | #include <net/netlink.h> |
1da177e4 | 29 | #include <net/pkt_sched.h> |
e4ae004b | 30 | #include <net/inet_ecn.h> |
1da177e4 | 31 | |
250a65f7 | 32 | #define VERSION "1.3" |
eb229c4c | 33 | |
1da177e4 LT |
34 | /* Network Emulation Queuing algorithm. |
35 | ==================================== | |
36 | ||
37 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based | |
38 | Network Emulation Tool | |
39 | [2] Luigi Rizzo, DummyNet for FreeBSD | |
40 | ||
41 | ---------------------------------------------------------------- | |
42 | ||
43 | This started out as a simple way to delay outgoing packets to | |
44 | test TCP but has grown to include most of the functionality | |
45 | of a full blown network emulator like NISTnet. It can delay | |
46 | packets and add random jitter (and correlation). The random | |
47 | distribution can be loaded from a table as well to provide | |
48 | normal, Pareto, or experimental curves. Packet loss, | |
49 | duplication, and reordering can also be emulated. | |
50 | ||
51 | This qdisc does not do classification that can be handled in | |
52 | layering other disciplines. It does not need to do bandwidth | |
53 | control either since that can be handled by using token | |
54 | bucket or other rate control. | |
661b7972 | 55 | |
56 | Correlated Loss Generator models | |
57 | ||
58 | Added generation of correlated loss according to the | |
59 | "Gilbert-Elliot" model, a 4-state markov model. | |
60 | ||
61 | References: | |
62 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG | |
63 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general | |
64 | and intuitive loss model for packet networks and its implementation | |
65 | in the Netem module in the Linux kernel", available in [1] | |
66 | ||
67 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it | |
68 | Fabio Ludovici <fabio.ludovici at yahoo.it> | |
1da177e4 LT |
69 | */ |
70 | ||
71 | struct netem_sched_data { | |
aec0a40a ED |
72 | /* internal t(ime)fifo qdisc uses t_root and sch->limit */ |
73 | struct rb_root t_root; | |
50612537 ED |
74 | |
75 | /* optional qdisc for classful handling (NULL at netem init) */ | |
1da177e4 | 76 | struct Qdisc *qdisc; |
50612537 | 77 | |
59cb5c67 | 78 | struct qdisc_watchdog watchdog; |
1da177e4 | 79 | |
112f9cb6 DT |
80 | s64 latency; |
81 | s64 jitter; | |
b407621c | 82 | |
1da177e4 | 83 | u32 loss; |
e4ae004b | 84 | u32 ecn; |
1da177e4 LT |
85 | u32 limit; |
86 | u32 counter; | |
87 | u32 gap; | |
1da177e4 | 88 | u32 duplicate; |
0dca51d3 | 89 | u32 reorder; |
c865e5d9 | 90 | u32 corrupt; |
6a031f67 | 91 | u64 rate; |
90b41a1c HPP |
92 | s32 packet_overhead; |
93 | u32 cell_size; | |
809fa972 | 94 | struct reciprocal_value cell_size_reciprocal; |
90b41a1c | 95 | s32 cell_overhead; |
1da177e4 LT |
96 | |
97 | struct crndstate { | |
b407621c SH |
98 | u32 last; |
99 | u32 rho; | |
c865e5d9 | 100 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
1da177e4 LT |
101 | |
102 | struct disttable { | |
103 | u32 size; | |
104 | s16 table[0]; | |
105 | } *delay_dist; | |
661b7972 | 106 | |
107 | enum { | |
108 | CLG_RANDOM, | |
109 | CLG_4_STATES, | |
110 | CLG_GILB_ELL, | |
111 | } loss_model; | |
112 | ||
a6e2fe17 YY |
113 | enum { |
114 | TX_IN_GAP_PERIOD = 1, | |
115 | TX_IN_BURST_PERIOD, | |
116 | LOST_IN_GAP_PERIOD, | |
117 | LOST_IN_BURST_PERIOD, | |
118 | } _4_state_model; | |
119 | ||
c045a734 YY |
120 | enum { |
121 | GOOD_STATE = 1, | |
122 | BAD_STATE, | |
123 | } GE_state_model; | |
124 | ||
661b7972 | 125 | /* Correlated Loss Generation models */ |
126 | struct clgstate { | |
127 | /* state of the Markov chain */ | |
128 | u8 state; | |
129 | ||
130 | /* 4-states and Gilbert-Elliot models */ | |
131 | u32 a1; /* p13 for 4-states or p for GE */ | |
132 | u32 a2; /* p31 for 4-states or r for GE */ | |
133 | u32 a3; /* p32 for 4-states or h for GE */ | |
134 | u32 a4; /* p14 for 4-states or 1-k for GE */ | |
135 | u32 a5; /* p23 used only in 4-states */ | |
136 | } clg; | |
137 | ||
836af83b DT |
138 | struct tc_netem_slot slot_config; |
139 | struct slotstate { | |
140 | u64 slot_next; | |
141 | s32 packets_left; | |
142 | s32 bytes_left; | |
143 | } slot; | |
144 | ||
1da177e4 LT |
145 | }; |
146 | ||
50612537 ED |
147 | /* Time stamp put into socket buffer control block |
148 | * Only valid when skbs are in our internal t(ime)fifo queue. | |
56b17425 ED |
149 | * |
150 | * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, | |
151 | * and skb->next & skb->prev are scratch space for a qdisc, | |
152 | * we save skb->tstamp value in skb->cb[] before destroying it. | |
50612537 | 153 | */ |
1da177e4 | 154 | struct netem_skb_cb { |
112f9cb6 | 155 | u64 time_to_send; |
1da177e4 LT |
156 | }; |
157 | ||
5f86173b JK |
158 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
159 | { | |
aec0a40a | 160 | /* we assume we can use skb next/prev/tstamp as storage for rb_node */ |
16bda13d | 161 | qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); |
175f9c1b | 162 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
5f86173b JK |
163 | } |
164 | ||
1da177e4 LT |
165 | /* init_crandom - initialize correlated random number generator |
166 | * Use entropy source for initial seed. | |
167 | */ | |
168 | static void init_crandom(struct crndstate *state, unsigned long rho) | |
169 | { | |
170 | state->rho = rho; | |
63862b5b | 171 | state->last = prandom_u32(); |
1da177e4 LT |
172 | } |
173 | ||
174 | /* get_crandom - correlated random number generator | |
175 | * Next number depends on last value. | |
176 | * rho is scaled to avoid floating point. | |
177 | */ | |
b407621c | 178 | static u32 get_crandom(struct crndstate *state) |
1da177e4 LT |
179 | { |
180 | u64 value, rho; | |
181 | unsigned long answer; | |
182 | ||
bb2f8cc0 | 183 | if (state->rho == 0) /* no correlation */ |
63862b5b | 184 | return prandom_u32(); |
1da177e4 | 185 | |
63862b5b | 186 | value = prandom_u32(); |
1da177e4 LT |
187 | rho = (u64)state->rho + 1; |
188 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; | |
189 | state->last = answer; | |
190 | return answer; | |
191 | } | |
192 | ||
661b7972 | 193 | /* loss_4state - 4-state model loss generator |
194 | * Generates losses according to the 4-state Markov chain adopted in | |
195 | * the GI (General and Intuitive) loss model. | |
196 | */ | |
197 | static bool loss_4state(struct netem_sched_data *q) | |
198 | { | |
199 | struct clgstate *clg = &q->clg; | |
63862b5b | 200 | u32 rnd = prandom_u32(); |
661b7972 | 201 | |
202 | /* | |
25985edc | 203 | * Makes a comparison between rnd and the transition |
661b7972 | 204 | * probabilities outgoing from the current state, then decides the |
205 | * next state and if the next packet has to be transmitted or lost. | |
206 | * The four states correspond to: | |
a6e2fe17 YY |
207 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period |
208 | * LOST_IN_BURST_PERIOD => isolated losses within a gap period | |
209 | * LOST_IN_GAP_PERIOD => lost packets within a burst period | |
210 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period | |
661b7972 | 211 | */ |
212 | switch (clg->state) { | |
a6e2fe17 | 213 | case TX_IN_GAP_PERIOD: |
661b7972 | 214 | if (rnd < clg->a4) { |
a6e2fe17 | 215 | clg->state = LOST_IN_BURST_PERIOD; |
661b7972 | 216 | return true; |
ab6c27be | 217 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
a6e2fe17 | 218 | clg->state = LOST_IN_GAP_PERIOD; |
661b7972 | 219 | return true; |
a6e2fe17 YY |
220 | } else if (clg->a1 + clg->a4 < rnd) { |
221 | clg->state = TX_IN_GAP_PERIOD; | |
222 | } | |
661b7972 | 223 | |
224 | break; | |
a6e2fe17 | 225 | case TX_IN_BURST_PERIOD: |
661b7972 | 226 | if (rnd < clg->a5) { |
a6e2fe17 | 227 | clg->state = LOST_IN_GAP_PERIOD; |
661b7972 | 228 | return true; |
a6e2fe17 YY |
229 | } else { |
230 | clg->state = TX_IN_BURST_PERIOD; | |
231 | } | |
661b7972 | 232 | |
233 | break; | |
a6e2fe17 | 234 | case LOST_IN_GAP_PERIOD: |
661b7972 | 235 | if (rnd < clg->a3) |
a6e2fe17 | 236 | clg->state = TX_IN_BURST_PERIOD; |
661b7972 | 237 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { |
a6e2fe17 | 238 | clg->state = TX_IN_GAP_PERIOD; |
661b7972 | 239 | } else if (clg->a2 + clg->a3 < rnd) { |
a6e2fe17 | 240 | clg->state = LOST_IN_GAP_PERIOD; |
661b7972 | 241 | return true; |
242 | } | |
243 | break; | |
a6e2fe17 YY |
244 | case LOST_IN_BURST_PERIOD: |
245 | clg->state = TX_IN_GAP_PERIOD; | |
661b7972 | 246 | break; |
247 | } | |
248 | ||
249 | return false; | |
250 | } | |
251 | ||
252 | /* loss_gilb_ell - Gilbert-Elliot model loss generator | |
253 | * Generates losses according to the Gilbert-Elliot loss model or | |
254 | * its special cases (Gilbert or Simple Gilbert) | |
255 | * | |
25985edc | 256 | * Makes a comparison between random number and the transition |
661b7972 | 257 | * probabilities outgoing from the current state, then decides the |
25985edc | 258 | * next state. A second random number is extracted and the comparison |
661b7972 | 259 | * with the loss probability of the current state decides if the next |
260 | * packet will be transmitted or lost. | |
261 | */ | |
262 | static bool loss_gilb_ell(struct netem_sched_data *q) | |
263 | { | |
264 | struct clgstate *clg = &q->clg; | |
265 | ||
266 | switch (clg->state) { | |
c045a734 | 267 | case GOOD_STATE: |
63862b5b | 268 | if (prandom_u32() < clg->a1) |
c045a734 | 269 | clg->state = BAD_STATE; |
63862b5b | 270 | if (prandom_u32() < clg->a4) |
661b7972 | 271 | return true; |
7c2781fa | 272 | break; |
c045a734 | 273 | case BAD_STATE: |
63862b5b | 274 | if (prandom_u32() < clg->a2) |
c045a734 | 275 | clg->state = GOOD_STATE; |
63862b5b | 276 | if (prandom_u32() > clg->a3) |
661b7972 | 277 | return true; |
278 | } | |
279 | ||
280 | return false; | |
281 | } | |
282 | ||
283 | static bool loss_event(struct netem_sched_data *q) | |
284 | { | |
285 | switch (q->loss_model) { | |
286 | case CLG_RANDOM: | |
287 | /* Random packet drop 0 => none, ~0 => all */ | |
288 | return q->loss && q->loss >= get_crandom(&q->loss_cor); | |
289 | ||
290 | case CLG_4_STATES: | |
291 | /* 4state loss model algorithm (used also for GI model) | |
292 | * Extracts a value from the markov 4 state loss generator, | |
293 | * if it is 1 drops a packet and if needed writes the event in | |
294 | * the kernel logs | |
295 | */ | |
296 | return loss_4state(q); | |
297 | ||
298 | case CLG_GILB_ELL: | |
299 | /* Gilbert-Elliot loss model algorithm | |
300 | * Extracts a value from the Gilbert-Elliot loss generator, | |
301 | * if it is 1 drops a packet and if needed writes the event in | |
302 | * the kernel logs | |
303 | */ | |
304 | return loss_gilb_ell(q); | |
305 | } | |
306 | ||
307 | return false; /* not reached */ | |
308 | } | |
309 | ||
310 | ||
1da177e4 LT |
311 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
312 | * std deviation sigma. Uses table lookup to approximate the desired | |
313 | * distribution, and a uniformly-distributed pseudo-random source. | |
314 | */ | |
9b0ed891 | 315 | static s64 tabledist(s64 mu, s32 sigma, |
112f9cb6 | 316 | struct crndstate *state, |
9b0ed891 | 317 | const struct disttable *dist) |
1da177e4 | 318 | { |
112f9cb6 | 319 | s64 x; |
b407621c SH |
320 | long t; |
321 | u32 rnd; | |
1da177e4 LT |
322 | |
323 | if (sigma == 0) | |
324 | return mu; | |
325 | ||
326 | rnd = get_crandom(state); | |
327 | ||
328 | /* default uniform distribution */ | |
10297b99 | 329 | if (dist == NULL) |
9b0ed891 | 330 | return (rnd % (2 * sigma)) - sigma + mu; |
1da177e4 LT |
331 | |
332 | t = dist->table[rnd % dist->size]; | |
333 | x = (sigma % NETEM_DIST_SCALE) * t; | |
334 | if (x >= 0) | |
335 | x += NETEM_DIST_SCALE/2; | |
336 | else | |
337 | x -= NETEM_DIST_SCALE/2; | |
338 | ||
339 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | |
340 | } | |
341 | ||
bce552fd | 342 | static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) |
7bc0f28c | 343 | { |
90b41a1c HPP |
344 | len += q->packet_overhead; |
345 | ||
346 | if (q->cell_size) { | |
347 | u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); | |
348 | ||
349 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ | |
350 | cells++; | |
351 | len = cells * (q->cell_size + q->cell_overhead); | |
352 | } | |
bce552fd SH |
353 | |
354 | return div64_u64(len * NSEC_PER_SEC, q->rate); | |
7bc0f28c HPP |
355 | } |
356 | ||
ff704050 | 357 | static void tfifo_reset(struct Qdisc *sch) |
358 | { | |
359 | struct netem_sched_data *q = qdisc_priv(sch); | |
3aa605f2 | 360 | struct rb_node *p = rb_first(&q->t_root); |
ff704050 | 361 | |
3aa605f2 | 362 | while (p) { |
18a4c0ea | 363 | struct sk_buff *skb = rb_to_skb(p); |
ff704050 | 364 | |
3aa605f2 ED |
365 | p = rb_next(p); |
366 | rb_erase(&skb->rbnode, &q->t_root); | |
2f08a9a1 | 367 | rtnl_kfree_skbs(skb, skb); |
ff704050 | 368 | } |
369 | } | |
370 | ||
960fb66e | 371 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
50612537 | 372 | { |
aec0a40a | 373 | struct netem_sched_data *q = qdisc_priv(sch); |
112f9cb6 | 374 | u64 tnext = netem_skb_cb(nskb)->time_to_send; |
aec0a40a | 375 | struct rb_node **p = &q->t_root.rb_node, *parent = NULL; |
50612537 | 376 | |
aec0a40a ED |
377 | while (*p) { |
378 | struct sk_buff *skb; | |
50612537 | 379 | |
aec0a40a | 380 | parent = *p; |
18a4c0ea | 381 | skb = rb_to_skb(parent); |
960fb66e | 382 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
aec0a40a ED |
383 | p = &parent->rb_right; |
384 | else | |
385 | p = &parent->rb_left; | |
50612537 | 386 | } |
56b17425 ED |
387 | rb_link_node(&nskb->rbnode, parent, p); |
388 | rb_insert_color(&nskb->rbnode, &q->t_root); | |
aec0a40a | 389 | sch->q.qlen++; |
50612537 ED |
390 | } |
391 | ||
6071bd1a NH |
392 | /* netem can't properly corrupt a megapacket (like we get from GSO), so instead |
393 | * when we statistically choose to corrupt one, we instead segment it, returning | |
394 | * the first packet to be corrupted, and re-enqueue the remaining frames | |
395 | */ | |
520ac30f ED |
396 | static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, |
397 | struct sk_buff **to_free) | |
6071bd1a NH |
398 | { |
399 | struct sk_buff *segs; | |
400 | netdev_features_t features = netif_skb_features(skb); | |
401 | ||
402 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | |
403 | ||
404 | if (IS_ERR_OR_NULL(segs)) { | |
520ac30f | 405 | qdisc_drop(skb, sch, to_free); |
6071bd1a NH |
406 | return NULL; |
407 | } | |
408 | consume_skb(skb); | |
409 | return segs; | |
410 | } | |
411 | ||
48da34b7 FW |
412 | static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb) |
413 | { | |
414 | skb->next = qh->head; | |
415 | ||
416 | if (!qh->head) | |
417 | qh->tail = skb; | |
418 | qh->head = skb; | |
419 | qh->qlen++; | |
420 | } | |
421 | ||
0afb51e7 SH |
422 | /* |
423 | * Insert one skb into qdisc. | |
424 | * Note: parent depends on return value to account for queue length. | |
425 | * NET_XMIT_DROP: queue length didn't change. | |
426 | * NET_XMIT_SUCCESS: one skb was queued. | |
427 | */ | |
520ac30f ED |
428 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
429 | struct sk_buff **to_free) | |
1da177e4 LT |
430 | { |
431 | struct netem_sched_data *q = qdisc_priv(sch); | |
89e1df74 GC |
432 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
433 | struct netem_skb_cb *cb; | |
0afb51e7 | 434 | struct sk_buff *skb2; |
6071bd1a | 435 | struct sk_buff *segs = NULL; |
2ed3362c | 436 | unsigned int prev_len = qdisc_pkt_len(skb); |
0afb51e7 | 437 | int count = 1; |
6071bd1a | 438 | int rc = NET_XMIT_SUCCESS; |
1b543fbf | 439 | int rc_drop = NET_XMIT_DROP; |
1da177e4 | 440 | |
78191534 CP |
441 | /* Do not fool qdisc_drop_all() */ |
442 | skb->prev = NULL; | |
443 | ||
0afb51e7 SH |
444 | /* Random duplication */ |
445 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | |
446 | ++count; | |
447 | ||
661b7972 | 448 | /* Drop packet? */ |
e4ae004b ED |
449 | if (loss_event(q)) { |
450 | if (q->ecn && INET_ECN_set_ce(skb)) | |
25331d6c | 451 | qdisc_qstats_drop(sch); /* mark packet */ |
e4ae004b ED |
452 | else |
453 | --count; | |
454 | } | |
0afb51e7 | 455 | if (count == 0) { |
25331d6c | 456 | qdisc_qstats_drop(sch); |
520ac30f | 457 | __qdisc_drop(skb, to_free); |
c27f339a | 458 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4 LT |
459 | } |
460 | ||
5a308f40 ED |
461 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
462 | * place at TX completion time, so _before_ the link transit delay) | |
5a308f40 | 463 | */ |
5080f39e | 464 | if (q->latency || q->jitter || q->rate) |
f2f872f9 | 465 | skb_orphan_partial(skb); |
4e8a5201 | 466 | |
0afb51e7 SH |
467 | /* |
468 | * If we need to duplicate packet, then re-insert at top of the | |
469 | * qdisc tree, since parent queuer expects that only one | |
470 | * skb will be queued. | |
471 | */ | |
472 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | |
9dde6409 | 473 | struct Qdisc *rootq = qdisc_root_bh(sch); |
0afb51e7 | 474 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
0afb51e7 | 475 | |
b396cca6 | 476 | q->duplicate = 0; |
520ac30f | 477 | rootq->enqueue(skb2, rootq, to_free); |
0afb51e7 | 478 | q->duplicate = dupsave; |
1b543fbf | 479 | rc_drop = NET_XMIT_SUCCESS; |
1da177e4 LT |
480 | } |
481 | ||
c865e5d9 SH |
482 | /* |
483 | * Randomized packet corruption. | |
484 | * Make copy if needed since we are modifying | |
485 | * If packet is going to be hardware checksummed, then | |
486 | * do it now in software before we mangle it. | |
487 | */ | |
488 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | |
6071bd1a | 489 | if (skb_is_gso(skb)) { |
520ac30f | 490 | segs = netem_segment(skb, sch, to_free); |
6071bd1a | 491 | if (!segs) |
1b543fbf | 492 | return rc_drop; |
2ed3362c | 493 | qdisc_skb_cb(segs)->pkt_len = segs->len; |
6071bd1a NH |
494 | } else { |
495 | segs = skb; | |
496 | } | |
497 | ||
498 | skb = segs; | |
499 | segs = segs->next; | |
500 | ||
8a6e9c67 ED |
501 | skb = skb_unshare(skb, GFP_ATOMIC); |
502 | if (unlikely(!skb)) { | |
503 | qdisc_qstats_drop(sch); | |
504 | goto finish_segs; | |
505 | } | |
506 | if (skb->ip_summed == CHECKSUM_PARTIAL && | |
507 | skb_checksum_help(skb)) { | |
508 | qdisc_drop(skb, sch, to_free); | |
6071bd1a NH |
509 | goto finish_segs; |
510 | } | |
c865e5d9 | 511 | |
63862b5b AH |
512 | skb->data[prandom_u32() % skb_headlen(skb)] ^= |
513 | 1<<(prandom_u32() % 8); | |
c865e5d9 SH |
514 | } |
515 | ||
1b543fbf SL |
516 | if (unlikely(sch->q.qlen >= sch->limit)) { |
517 | qdisc_drop_all(skb, sch, to_free); | |
518 | return rc_drop; | |
519 | } | |
960fb66e | 520 | |
25331d6c | 521 | qdisc_qstats_backlog_inc(sch, skb); |
960fb66e | 522 | |
5f86173b | 523 | cb = netem_skb_cb(skb); |
cc7ec456 | 524 | if (q->gap == 0 || /* not doing reordering */ |
a42b4799 | 525 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
f64f9e71 | 526 | q->reorder < get_crandom(&q->reorder_cor)) { |
112f9cb6 DT |
527 | u64 now; |
528 | s64 delay; | |
07aaa115 SH |
529 | |
530 | delay = tabledist(q->latency, q->jitter, | |
531 | &q->delay_cor, q->delay_dist); | |
532 | ||
112f9cb6 | 533 | now = ktime_get_ns(); |
7bc0f28c HPP |
534 | |
535 | if (q->rate) { | |
5080f39e NU |
536 | struct netem_skb_cb *last = NULL; |
537 | ||
538 | if (sch->q.tail) | |
539 | last = netem_skb_cb(sch->q.tail); | |
540 | if (q->t_root.rb_node) { | |
541 | struct sk_buff *t_skb; | |
542 | struct netem_skb_cb *t_last; | |
543 | ||
18a4c0ea | 544 | t_skb = skb_rb_last(&q->t_root); |
5080f39e NU |
545 | t_last = netem_skb_cb(t_skb); |
546 | if (!last || | |
547 | t_last->time_to_send > last->time_to_send) { | |
548 | last = t_last; | |
549 | } | |
550 | } | |
7bc0f28c | 551 | |
aec0a40a | 552 | if (last) { |
7bc0f28c | 553 | /* |
a13d3104 JN |
554 | * Last packet in queue is reference point (now), |
555 | * calculate this time bonus and subtract | |
7bc0f28c HPP |
556 | * from delay. |
557 | */ | |
5080f39e | 558 | delay -= last->time_to_send - now; |
112f9cb6 | 559 | delay = max_t(s64, 0, delay); |
5080f39e | 560 | now = last->time_to_send; |
7bc0f28c | 561 | } |
a13d3104 | 562 | |
bce552fd | 563 | delay += packet_time_ns(qdisc_pkt_len(skb), q); |
7bc0f28c HPP |
564 | } |
565 | ||
7c59e25f | 566 | cb->time_to_send = now + delay; |
1da177e4 | 567 | ++q->counter; |
960fb66e | 568 | tfifo_enqueue(skb, sch); |
1da177e4 | 569 | } else { |
10297b99 | 570 | /* |
0dca51d3 SH |
571 | * Do re-ordering by putting one out of N packets at the front |
572 | * of the queue. | |
573 | */ | |
112f9cb6 | 574 | cb->time_to_send = ktime_get_ns(); |
0dca51d3 | 575 | q->counter = 0; |
8ba25dad | 576 | |
48da34b7 | 577 | netem_enqueue_skb_head(&sch->q, skb); |
eb101924 | 578 | sch->qstats.requeues++; |
378a2f09 | 579 | } |
1da177e4 | 580 | |
6071bd1a NH |
581 | finish_segs: |
582 | if (segs) { | |
2ed3362c JK |
583 | unsigned int len, last_len; |
584 | int nb = 0; | |
585 | ||
586 | len = skb->len; | |
587 | ||
6071bd1a NH |
588 | while (segs) { |
589 | skb2 = segs->next; | |
590 | segs->next = NULL; | |
591 | qdisc_skb_cb(segs)->pkt_len = segs->len; | |
592 | last_len = segs->len; | |
520ac30f | 593 | rc = qdisc_enqueue(segs, sch, to_free); |
6071bd1a NH |
594 | if (rc != NET_XMIT_SUCCESS) { |
595 | if (net_xmit_drop_count(rc)) | |
596 | qdisc_qstats_drop(sch); | |
597 | } else { | |
598 | nb++; | |
599 | len += last_len; | |
600 | } | |
601 | segs = skb2; | |
602 | } | |
2ed3362c | 603 | qdisc_tree_reduce_backlog(sch, -nb, prev_len - len); |
6071bd1a | 604 | } |
10f6dfcf | 605 | return NET_XMIT_SUCCESS; |
1da177e4 LT |
606 | } |
607 | ||
836af83b DT |
608 | /* Delay the next round with a new future slot with a |
609 | * correct number of bytes and packets. | |
610 | */ | |
611 | ||
612 | static void get_slot_next(struct netem_sched_data *q, u64 now) | |
613 | { | |
614 | q->slot.slot_next = now + q->slot_config.min_delay + | |
615 | (prandom_u32() * | |
616 | (q->slot_config.max_delay - | |
617 | q->slot_config.min_delay) >> 32); | |
618 | q->slot.packets_left = q->slot_config.max_packets; | |
619 | q->slot.bytes_left = q->slot_config.max_bytes; | |
620 | } | |
621 | ||
1da177e4 LT |
622 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
623 | { | |
624 | struct netem_sched_data *q = qdisc_priv(sch); | |
625 | struct sk_buff *skb; | |
aec0a40a | 626 | struct rb_node *p; |
1da177e4 | 627 | |
50612537 | 628 | tfifo_dequeue: |
ed760cb8 | 629 | skb = __qdisc_dequeue_head(&sch->q); |
771018e7 | 630 | if (skb) { |
25331d6c | 631 | qdisc_qstats_backlog_dec(sch, skb); |
0ad2a836 | 632 | deliver: |
aec0a40a ED |
633 | qdisc_bstats_update(sch, skb); |
634 | return skb; | |
635 | } | |
636 | p = rb_first(&q->t_root); | |
637 | if (p) { | |
112f9cb6 | 638 | u64 time_to_send; |
836af83b | 639 | u64 now = ktime_get_ns(); |
36b7bfe0 | 640 | |
18a4c0ea | 641 | skb = rb_to_skb(p); |
0f9f32ac SH |
642 | |
643 | /* if more time remaining? */ | |
36b7bfe0 | 644 | time_to_send = netem_skb_cb(skb)->time_to_send; |
836af83b DT |
645 | if (q->slot.slot_next && q->slot.slot_next < time_to_send) |
646 | get_slot_next(q, now); | |
aec0a40a | 647 | |
836af83b DT |
648 | if (time_to_send <= now && q->slot.slot_next <= now) { |
649 | rb_erase(p, &q->t_root); | |
aec0a40a | 650 | sch->q.qlen--; |
0ad2a836 | 651 | qdisc_qstats_backlog_dec(sch, skb); |
aec0a40a ED |
652 | skb->next = NULL; |
653 | skb->prev = NULL; | |
bffa72cf ED |
654 | /* skb->dev shares skb->rbnode area, |
655 | * we need to restore its value. | |
656 | */ | |
657 | skb->dev = qdisc_dev(sch); | |
03c05f0d | 658 | |
8caf1539 JP |
659 | #ifdef CONFIG_NET_CLS_ACT |
660 | /* | |
661 | * If it's at ingress let's pretend the delay is | |
662 | * from the network (tstamp will be updated). | |
663 | */ | |
bc31c905 | 664 | if (skb->tc_redirected && skb->tc_from_ingress) |
2456e855 | 665 | skb->tstamp = 0; |
8caf1539 | 666 | #endif |
10f6dfcf | 667 | |
836af83b DT |
668 | if (q->slot.slot_next) { |
669 | q->slot.packets_left--; | |
670 | q->slot.bytes_left -= qdisc_pkt_len(skb); | |
671 | if (q->slot.packets_left <= 0 || | |
672 | q->slot.bytes_left <= 0) | |
673 | get_slot_next(q, now); | |
674 | } | |
675 | ||
50612537 | 676 | if (q->qdisc) { |
21de12ee | 677 | unsigned int pkt_len = qdisc_pkt_len(skb); |
520ac30f ED |
678 | struct sk_buff *to_free = NULL; |
679 | int err; | |
50612537 | 680 | |
520ac30f ED |
681 | err = qdisc_enqueue(skb, q->qdisc, &to_free); |
682 | kfree_skb_list(to_free); | |
21de12ee ED |
683 | if (err != NET_XMIT_SUCCESS && |
684 | net_xmit_drop_count(err)) { | |
685 | qdisc_qstats_drop(sch); | |
686 | qdisc_tree_reduce_backlog(sch, 1, | |
687 | pkt_len); | |
50612537 ED |
688 | } |
689 | goto tfifo_dequeue; | |
690 | } | |
aec0a40a | 691 | goto deliver; |
07aaa115 | 692 | } |
11274e5a | 693 | |
50612537 ED |
694 | if (q->qdisc) { |
695 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
696 | if (skb) | |
697 | goto deliver; | |
698 | } | |
836af83b DT |
699 | |
700 | qdisc_watchdog_schedule_ns(&q->watchdog, | |
701 | max(time_to_send, | |
702 | q->slot.slot_next)); | |
0f9f32ac SH |
703 | } |
704 | ||
50612537 ED |
705 | if (q->qdisc) { |
706 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
707 | if (skb) | |
708 | goto deliver; | |
709 | } | |
0f9f32ac | 710 | return NULL; |
1da177e4 LT |
711 | } |
712 | ||
1da177e4 LT |
713 | static void netem_reset(struct Qdisc *sch) |
714 | { | |
715 | struct netem_sched_data *q = qdisc_priv(sch); | |
716 | ||
50612537 | 717 | qdisc_reset_queue(sch); |
ff704050 | 718 | tfifo_reset(sch); |
50612537 ED |
719 | if (q->qdisc) |
720 | qdisc_reset(q->qdisc); | |
59cb5c67 | 721 | qdisc_watchdog_cancel(&q->watchdog); |
1da177e4 LT |
722 | } |
723 | ||
6373a9a2 | 724 | static void dist_free(struct disttable *d) |
725 | { | |
4cb28970 | 726 | kvfree(d); |
6373a9a2 | 727 | } |
728 | ||
1da177e4 LT |
729 | /* |
730 | * Distribution data is a variable size payload containing | |
731 | * signed 16 bit values. | |
732 | */ | |
836af83b | 733 | |
1e90474c | 734 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4 LT |
735 | { |
736 | struct netem_sched_data *q = qdisc_priv(sch); | |
6373a9a2 | 737 | size_t n = nla_len(attr)/sizeof(__s16); |
1e90474c | 738 | const __s16 *data = nla_data(attr); |
7698b4fc | 739 | spinlock_t *root_lock; |
1da177e4 LT |
740 | struct disttable *d; |
741 | int i; | |
742 | ||
5b488383 | 743 | if (!n || n > NETEM_DIST_MAX) |
1da177e4 LT |
744 | return -EINVAL; |
745 | ||
752ade68 | 746 | d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); |
1da177e4 LT |
747 | if (!d) |
748 | return -ENOMEM; | |
749 | ||
750 | d->size = n; | |
751 | for (i = 0; i < n; i++) | |
752 | d->table[i] = data[i]; | |
10297b99 | 753 | |
102396ae | 754 | root_lock = qdisc_root_sleeping_lock(sch); |
7698b4fc DM |
755 | |
756 | spin_lock_bh(root_lock); | |
bb52c7ac | 757 | swap(q->delay_dist, d); |
7698b4fc | 758 | spin_unlock_bh(root_lock); |
bb52c7ac ED |
759 | |
760 | dist_free(d); | |
1da177e4 LT |
761 | return 0; |
762 | } | |
763 | ||
836af83b DT |
764 | static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) |
765 | { | |
766 | const struct tc_netem_slot *c = nla_data(attr); | |
767 | ||
768 | q->slot_config = *c; | |
769 | if (q->slot_config.max_packets == 0) | |
770 | q->slot_config.max_packets = INT_MAX; | |
771 | if (q->slot_config.max_bytes == 0) | |
772 | q->slot_config.max_bytes = INT_MAX; | |
773 | q->slot.packets_left = q->slot_config.max_packets; | |
774 | q->slot.bytes_left = q->slot_config.max_bytes; | |
775 | if (q->slot_config.min_delay | q->slot_config.max_delay) | |
776 | q->slot.slot_next = ktime_get_ns(); | |
777 | else | |
778 | q->slot.slot_next = 0; | |
779 | } | |
780 | ||
49545a77 | 781 | static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) |
1da177e4 | 782 | { |
1e90474c | 783 | const struct tc_netem_corr *c = nla_data(attr); |
1da177e4 | 784 | |
1da177e4 LT |
785 | init_crandom(&q->delay_cor, c->delay_corr); |
786 | init_crandom(&q->loss_cor, c->loss_corr); | |
787 | init_crandom(&q->dup_cor, c->dup_corr); | |
1da177e4 LT |
788 | } |
789 | ||
49545a77 | 790 | static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) |
0dca51d3 | 791 | { |
1e90474c | 792 | const struct tc_netem_reorder *r = nla_data(attr); |
0dca51d3 | 793 | |
0dca51d3 SH |
794 | q->reorder = r->probability; |
795 | init_crandom(&q->reorder_cor, r->correlation); | |
0dca51d3 SH |
796 | } |
797 | ||
49545a77 | 798 | static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) |
c865e5d9 | 799 | { |
1e90474c | 800 | const struct tc_netem_corrupt *r = nla_data(attr); |
c865e5d9 | 801 | |
c865e5d9 SH |
802 | q->corrupt = r->probability; |
803 | init_crandom(&q->corrupt_cor, r->correlation); | |
c865e5d9 SH |
804 | } |
805 | ||
49545a77 | 806 | static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) |
7bc0f28c | 807 | { |
7bc0f28c HPP |
808 | const struct tc_netem_rate *r = nla_data(attr); |
809 | ||
810 | q->rate = r->rate; | |
90b41a1c HPP |
811 | q->packet_overhead = r->packet_overhead; |
812 | q->cell_size = r->cell_size; | |
809fa972 | 813 | q->cell_overhead = r->cell_overhead; |
90b41a1c HPP |
814 | if (q->cell_size) |
815 | q->cell_size_reciprocal = reciprocal_value(q->cell_size); | |
809fa972 HFS |
816 | else |
817 | q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; | |
7bc0f28c HPP |
818 | } |
819 | ||
49545a77 | 820 | static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) |
661b7972 | 821 | { |
661b7972 | 822 | const struct nlattr *la; |
823 | int rem; | |
824 | ||
825 | nla_for_each_nested(la, attr, rem) { | |
826 | u16 type = nla_type(la); | |
827 | ||
833fa743 | 828 | switch (type) { |
661b7972 | 829 | case NETEM_LOSS_GI: { |
830 | const struct tc_netem_gimodel *gi = nla_data(la); | |
831 | ||
2494654d | 832 | if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { |
661b7972 | 833 | pr_info("netem: incorrect gi model size\n"); |
834 | return -EINVAL; | |
835 | } | |
836 | ||
837 | q->loss_model = CLG_4_STATES; | |
838 | ||
3fbac2a8 | 839 | q->clg.state = TX_IN_GAP_PERIOD; |
661b7972 | 840 | q->clg.a1 = gi->p13; |
841 | q->clg.a2 = gi->p31; | |
842 | q->clg.a3 = gi->p32; | |
843 | q->clg.a4 = gi->p14; | |
844 | q->clg.a5 = gi->p23; | |
845 | break; | |
846 | } | |
847 | ||
848 | case NETEM_LOSS_GE: { | |
849 | const struct tc_netem_gemodel *ge = nla_data(la); | |
850 | ||
2494654d | 851 | if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { |
852 | pr_info("netem: incorrect ge model size\n"); | |
661b7972 | 853 | return -EINVAL; |
854 | } | |
855 | ||
856 | q->loss_model = CLG_GILB_ELL; | |
3fbac2a8 | 857 | q->clg.state = GOOD_STATE; |
661b7972 | 858 | q->clg.a1 = ge->p; |
859 | q->clg.a2 = ge->r; | |
860 | q->clg.a3 = ge->h; | |
861 | q->clg.a4 = ge->k1; | |
862 | break; | |
863 | } | |
864 | ||
865 | default: | |
866 | pr_info("netem: unknown loss type %u\n", type); | |
867 | return -EINVAL; | |
868 | } | |
869 | } | |
870 | ||
871 | return 0; | |
872 | } | |
873 | ||
27a3421e PM |
874 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
875 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, | |
876 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, | |
877 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | |
7bc0f28c | 878 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
661b7972 | 879 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
e4ae004b | 880 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
6a031f67 | 881 | [TCA_NETEM_RATE64] = { .type = NLA_U64 }, |
99803171 DT |
882 | [TCA_NETEM_LATENCY64] = { .type = NLA_S64 }, |
883 | [TCA_NETEM_JITTER64] = { .type = NLA_S64 }, | |
836af83b | 884 | [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) }, |
27a3421e PM |
885 | }; |
886 | ||
2c10b32b TG |
887 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
888 | const struct nla_policy *policy, int len) | |
889 | { | |
890 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | |
891 | ||
661b7972 | 892 | if (nested_len < 0) { |
893 | pr_info("netem: invalid attributes len %d\n", nested_len); | |
2c10b32b | 894 | return -EINVAL; |
661b7972 | 895 | } |
896 | ||
2c10b32b TG |
897 | if (nested_len >= nla_attr_size(0)) |
898 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | |
fceb6435 | 899 | nested_len, policy, NULL); |
661b7972 | 900 | |
2c10b32b TG |
901 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
902 | return 0; | |
903 | } | |
904 | ||
c865e5d9 | 905 | /* Parse netlink message to set options */ |
1e90474c | 906 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
907 | { |
908 | struct netem_sched_data *q = qdisc_priv(sch); | |
b03f4672 | 909 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
1da177e4 | 910 | struct tc_netem_qopt *qopt; |
54a4b05c YY |
911 | struct clgstate old_clg; |
912 | int old_loss_model = CLG_RANDOM; | |
1da177e4 | 913 | int ret; |
10297b99 | 914 | |
b03f4672 | 915 | if (opt == NULL) |
1da177e4 LT |
916 | return -EINVAL; |
917 | ||
2c10b32b TG |
918 | qopt = nla_data(opt); |
919 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); | |
b03f4672 PM |
920 | if (ret < 0) |
921 | return ret; | |
922 | ||
54a4b05c YY |
923 | /* backup q->clg and q->loss_model */ |
924 | old_clg = q->clg; | |
925 | old_loss_model = q->loss_model; | |
926 | ||
927 | if (tb[TCA_NETEM_LOSS]) { | |
49545a77 | 928 | ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); |
54a4b05c YY |
929 | if (ret) { |
930 | q->loss_model = old_loss_model; | |
931 | return ret; | |
932 | } | |
933 | } else { | |
934 | q->loss_model = CLG_RANDOM; | |
935 | } | |
936 | ||
937 | if (tb[TCA_NETEM_DELAY_DIST]) { | |
938 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); | |
939 | if (ret) { | |
940 | /* recover clg and loss_model, in case of | |
941 | * q->clg and q->loss_model were modified | |
942 | * in get_loss_clg() | |
943 | */ | |
944 | q->clg = old_clg; | |
945 | q->loss_model = old_loss_model; | |
946 | return ret; | |
947 | } | |
948 | } | |
949 | ||
50612537 | 950 | sch->limit = qopt->limit; |
10297b99 | 951 | |
112f9cb6 DT |
952 | q->latency = PSCHED_TICKS2NS(qopt->latency); |
953 | q->jitter = PSCHED_TICKS2NS(qopt->jitter); | |
1da177e4 LT |
954 | q->limit = qopt->limit; |
955 | q->gap = qopt->gap; | |
0dca51d3 | 956 | q->counter = 0; |
1da177e4 LT |
957 | q->loss = qopt->loss; |
958 | q->duplicate = qopt->duplicate; | |
959 | ||
bb2f8cc0 SH |
960 | /* for compatibility with earlier versions. |
961 | * if gap is set, need to assume 100% probability | |
0dca51d3 | 962 | */ |
a362e0a7 SH |
963 | if (q->gap) |
964 | q->reorder = ~0; | |
0dca51d3 | 965 | |
265eb67f | 966 | if (tb[TCA_NETEM_CORR]) |
49545a77 | 967 | get_correlation(q, tb[TCA_NETEM_CORR]); |
1da177e4 | 968 | |
265eb67f | 969 | if (tb[TCA_NETEM_REORDER]) |
49545a77 | 970 | get_reorder(q, tb[TCA_NETEM_REORDER]); |
1da177e4 | 971 | |
265eb67f | 972 | if (tb[TCA_NETEM_CORRUPT]) |
49545a77 | 973 | get_corrupt(q, tb[TCA_NETEM_CORRUPT]); |
1da177e4 | 974 | |
7bc0f28c | 975 | if (tb[TCA_NETEM_RATE]) |
49545a77 | 976 | get_rate(q, tb[TCA_NETEM_RATE]); |
7bc0f28c | 977 | |
6a031f67 YY |
978 | if (tb[TCA_NETEM_RATE64]) |
979 | q->rate = max_t(u64, q->rate, | |
980 | nla_get_u64(tb[TCA_NETEM_RATE64])); | |
981 | ||
99803171 DT |
982 | if (tb[TCA_NETEM_LATENCY64]) |
983 | q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); | |
984 | ||
985 | if (tb[TCA_NETEM_JITTER64]) | |
986 | q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); | |
987 | ||
e4ae004b ED |
988 | if (tb[TCA_NETEM_ECN]) |
989 | q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); | |
990 | ||
836af83b DT |
991 | if (tb[TCA_NETEM_SLOT]) |
992 | get_slot(q, tb[TCA_NETEM_SLOT]); | |
993 | ||
661b7972 | 994 | return ret; |
1da177e4 LT |
995 | } |
996 | ||
1e90474c | 997 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
998 | { |
999 | struct netem_sched_data *q = qdisc_priv(sch); | |
1000 | int ret; | |
1001 | ||
634576a1 NA |
1002 | qdisc_watchdog_init(&q->watchdog, sch); |
1003 | ||
1da177e4 LT |
1004 | if (!opt) |
1005 | return -EINVAL; | |
1006 | ||
661b7972 | 1007 | q->loss_model = CLG_RANDOM; |
1da177e4 | 1008 | ret = netem_change(sch, opt); |
50612537 | 1009 | if (ret) |
250a65f7 | 1010 | pr_info("netem: change failed\n"); |
1da177e4 LT |
1011 | return ret; |
1012 | } | |
1013 | ||
1014 | static void netem_destroy(struct Qdisc *sch) | |
1015 | { | |
1016 | struct netem_sched_data *q = qdisc_priv(sch); | |
1017 | ||
59cb5c67 | 1018 | qdisc_watchdog_cancel(&q->watchdog); |
50612537 ED |
1019 | if (q->qdisc) |
1020 | qdisc_destroy(q->qdisc); | |
6373a9a2 | 1021 | dist_free(q->delay_dist); |
1da177e4 LT |
1022 | } |
1023 | ||
661b7972 | 1024 | static int dump_loss_model(const struct netem_sched_data *q, |
1025 | struct sk_buff *skb) | |
1026 | { | |
1027 | struct nlattr *nest; | |
1028 | ||
1029 | nest = nla_nest_start(skb, TCA_NETEM_LOSS); | |
1030 | if (nest == NULL) | |
1031 | goto nla_put_failure; | |
1032 | ||
1033 | switch (q->loss_model) { | |
1034 | case CLG_RANDOM: | |
1035 | /* legacy loss model */ | |
1036 | nla_nest_cancel(skb, nest); | |
1037 | return 0; /* no data */ | |
1038 | ||
1039 | case CLG_4_STATES: { | |
1040 | struct tc_netem_gimodel gi = { | |
1041 | .p13 = q->clg.a1, | |
1042 | .p31 = q->clg.a2, | |
1043 | .p32 = q->clg.a3, | |
1044 | .p14 = q->clg.a4, | |
1045 | .p23 = q->clg.a5, | |
1046 | }; | |
1047 | ||
1b34ec43 DM |
1048 | if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) |
1049 | goto nla_put_failure; | |
661b7972 | 1050 | break; |
1051 | } | |
1052 | case CLG_GILB_ELL: { | |
1053 | struct tc_netem_gemodel ge = { | |
1054 | .p = q->clg.a1, | |
1055 | .r = q->clg.a2, | |
1056 | .h = q->clg.a3, | |
1057 | .k1 = q->clg.a4, | |
1058 | }; | |
1059 | ||
1b34ec43 DM |
1060 | if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) |
1061 | goto nla_put_failure; | |
661b7972 | 1062 | break; |
1063 | } | |
1064 | } | |
1065 | ||
1066 | nla_nest_end(skb, nest); | |
1067 | return 0; | |
1068 | ||
1069 | nla_put_failure: | |
1070 | nla_nest_cancel(skb, nest); | |
1071 | return -1; | |
1072 | } | |
1073 | ||
1da177e4 LT |
1074 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
1075 | { | |
1076 | const struct netem_sched_data *q = qdisc_priv(sch); | |
861d7f74 | 1077 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
1da177e4 LT |
1078 | struct tc_netem_qopt qopt; |
1079 | struct tc_netem_corr cor; | |
0dca51d3 | 1080 | struct tc_netem_reorder reorder; |
c865e5d9 | 1081 | struct tc_netem_corrupt corrupt; |
7bc0f28c | 1082 | struct tc_netem_rate rate; |
836af83b | 1083 | struct tc_netem_slot slot; |
1da177e4 | 1084 | |
112f9cb6 DT |
1085 | qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), |
1086 | UINT_MAX); | |
1087 | qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), | |
1088 | UINT_MAX); | |
1da177e4 LT |
1089 | qopt.limit = q->limit; |
1090 | qopt.loss = q->loss; | |
1091 | qopt.gap = q->gap; | |
1092 | qopt.duplicate = q->duplicate; | |
1b34ec43 DM |
1093 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
1094 | goto nla_put_failure; | |
1da177e4 | 1095 | |
99803171 DT |
1096 | if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) |
1097 | goto nla_put_failure; | |
1098 | ||
1099 | if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) | |
1100 | goto nla_put_failure; | |
1101 | ||
1da177e4 LT |
1102 | cor.delay_corr = q->delay_cor.rho; |
1103 | cor.loss_corr = q->loss_cor.rho; | |
1104 | cor.dup_corr = q->dup_cor.rho; | |
1b34ec43 DM |
1105 | if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) |
1106 | goto nla_put_failure; | |
0dca51d3 SH |
1107 | |
1108 | reorder.probability = q->reorder; | |
1109 | reorder.correlation = q->reorder_cor.rho; | |
1b34ec43 DM |
1110 | if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) |
1111 | goto nla_put_failure; | |
0dca51d3 | 1112 | |
c865e5d9 SH |
1113 | corrupt.probability = q->corrupt; |
1114 | corrupt.correlation = q->corrupt_cor.rho; | |
1b34ec43 DM |
1115 | if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) |
1116 | goto nla_put_failure; | |
c865e5d9 | 1117 | |
6a031f67 | 1118 | if (q->rate >= (1ULL << 32)) { |
2a51c1e8 ND |
1119 | if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, |
1120 | TCA_NETEM_PAD)) | |
6a031f67 YY |
1121 | goto nla_put_failure; |
1122 | rate.rate = ~0U; | |
1123 | } else { | |
1124 | rate.rate = q->rate; | |
1125 | } | |
90b41a1c HPP |
1126 | rate.packet_overhead = q->packet_overhead; |
1127 | rate.cell_size = q->cell_size; | |
1128 | rate.cell_overhead = q->cell_overhead; | |
1b34ec43 DM |
1129 | if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) |
1130 | goto nla_put_failure; | |
7bc0f28c | 1131 | |
e4ae004b ED |
1132 | if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) |
1133 | goto nla_put_failure; | |
1134 | ||
661b7972 | 1135 | if (dump_loss_model(q, skb) != 0) |
1136 | goto nla_put_failure; | |
1137 | ||
836af83b DT |
1138 | if (q->slot_config.min_delay | q->slot_config.max_delay) { |
1139 | slot = q->slot_config; | |
1140 | if (slot.max_packets == INT_MAX) | |
1141 | slot.max_packets = 0; | |
1142 | if (slot.max_bytes == INT_MAX) | |
1143 | slot.max_bytes = 0; | |
1144 | if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot)) | |
1145 | goto nla_put_failure; | |
1146 | } | |
1147 | ||
861d7f74 | 1148 | return nla_nest_end(skb, nla); |
1da177e4 | 1149 | |
1e90474c | 1150 | nla_put_failure: |
861d7f74 | 1151 | nlmsg_trim(skb, nla); |
1da177e4 LT |
1152 | return -1; |
1153 | } | |
1154 | ||
10f6dfcf | 1155 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
1156 | struct sk_buff *skb, struct tcmsg *tcm) | |
1157 | { | |
1158 | struct netem_sched_data *q = qdisc_priv(sch); | |
1159 | ||
50612537 | 1160 | if (cl != 1 || !q->qdisc) /* only one class */ |
10f6dfcf | 1161 | return -ENOENT; |
1162 | ||
1163 | tcm->tcm_handle |= TC_H_MIN(1); | |
1164 | tcm->tcm_info = q->qdisc->handle; | |
1165 | ||
1166 | return 0; | |
1167 | } | |
1168 | ||
1169 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
1170 | struct Qdisc **old) | |
1171 | { | |
1172 | struct netem_sched_data *q = qdisc_priv(sch); | |
1173 | ||
86a7996c | 1174 | *old = qdisc_replace(sch, new, &q->qdisc); |
10f6dfcf | 1175 | return 0; |
1176 | } | |
1177 | ||
1178 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) | |
1179 | { | |
1180 | struct netem_sched_data *q = qdisc_priv(sch); | |
1181 | return q->qdisc; | |
1182 | } | |
1183 | ||
143976ce | 1184 | static unsigned long netem_find(struct Qdisc *sch, u32 classid) |
10f6dfcf | 1185 | { |
1186 | return 1; | |
1187 | } | |
1188 | ||
10f6dfcf | 1189 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
1190 | { | |
1191 | if (!walker->stop) { | |
1192 | if (walker->count >= walker->skip) | |
1193 | if (walker->fn(sch, 1, walker) < 0) { | |
1194 | walker->stop = 1; | |
1195 | return; | |
1196 | } | |
1197 | walker->count++; | |
1198 | } | |
1199 | } | |
1200 | ||
1201 | static const struct Qdisc_class_ops netem_class_ops = { | |
1202 | .graft = netem_graft, | |
1203 | .leaf = netem_leaf, | |
143976ce | 1204 | .find = netem_find, |
10f6dfcf | 1205 | .walk = netem_walk, |
1206 | .dump = netem_dump_class, | |
1207 | }; | |
1208 | ||
20fea08b | 1209 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1da177e4 | 1210 | .id = "netem", |
10f6dfcf | 1211 | .cl_ops = &netem_class_ops, |
1da177e4 LT |
1212 | .priv_size = sizeof(struct netem_sched_data), |
1213 | .enqueue = netem_enqueue, | |
1214 | .dequeue = netem_dequeue, | |
77be155c | 1215 | .peek = qdisc_peek_dequeued, |
1da177e4 LT |
1216 | .init = netem_init, |
1217 | .reset = netem_reset, | |
1218 | .destroy = netem_destroy, | |
1219 | .change = netem_change, | |
1220 | .dump = netem_dump, | |
1221 | .owner = THIS_MODULE, | |
1222 | }; | |
1223 | ||
1224 | ||
1225 | static int __init netem_module_init(void) | |
1226 | { | |
eb229c4c | 1227 | pr_info("netem: version " VERSION "\n"); |
1da177e4 LT |
1228 | return register_qdisc(&netem_qdisc_ops); |
1229 | } | |
1230 | static void __exit netem_module_exit(void) | |
1231 | { | |
1232 | unregister_qdisc(&netem_qdisc_ops); | |
1233 | } | |
1234 | module_init(netem_module_init) | |
1235 | module_exit(netem_module_exit) | |
1236 | MODULE_LICENSE("GPL"); |