]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_netem.c Network emulator | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
798b6b19 | 7 | * 2 of the License. |
1da177e4 LT |
8 | * |
9 | * Many of the algorithms and ideas for this came from | |
10297b99 | 10 | * NIST Net which is not copyrighted. |
1da177e4 LT |
11 | * |
12 | * Authors: Stephen Hemminger <shemminger@osdl.org> | |
13 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> | |
14 | */ | |
15 | ||
b7f080cf | 16 | #include <linux/mm.h> |
1da177e4 | 17 | #include <linux/module.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
1da177e4 LT |
19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/errno.h> | |
1da177e4 | 22 | #include <linux/skbuff.h> |
78776d3f | 23 | #include <linux/vmalloc.h> |
1da177e4 | 24 | #include <linux/rtnetlink.h> |
90b41a1c | 25 | #include <linux/reciprocal_div.h> |
aec0a40a | 26 | #include <linux/rbtree.h> |
1da177e4 | 27 | |
dc5fc579 | 28 | #include <net/netlink.h> |
1da177e4 | 29 | #include <net/pkt_sched.h> |
e4ae004b | 30 | #include <net/inet_ecn.h> |
1da177e4 | 31 | |
250a65f7 | 32 | #define VERSION "1.3" |
eb229c4c | 33 | |
1da177e4 LT |
34 | /* Network Emulation Queuing algorithm. |
35 | ==================================== | |
36 | ||
37 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based | |
38 | Network Emulation Tool | |
39 | [2] Luigi Rizzo, DummyNet for FreeBSD | |
40 | ||
41 | ---------------------------------------------------------------- | |
42 | ||
43 | This started out as a simple way to delay outgoing packets to | |
44 | test TCP but has grown to include most of the functionality | |
45 | of a full blown network emulator like NISTnet. It can delay | |
46 | packets and add random jitter (and correlation). The random | |
47 | distribution can be loaded from a table as well to provide | |
48 | normal, Pareto, or experimental curves. Packet loss, | |
49 | duplication, and reordering can also be emulated. | |
50 | ||
51 | This qdisc does not do classification that can be handled in | |
52 | layering other disciplines. It does not need to do bandwidth | |
53 | control either since that can be handled by using token | |
54 | bucket or other rate control. | |
661b7972 | 55 | |
56 | Correlated Loss Generator models | |
57 | ||
58 | Added generation of correlated loss according to the | |
59 | "Gilbert-Elliot" model, a 4-state markov model. | |
60 | ||
61 | References: | |
62 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG | |
63 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general | |
64 | and intuitive loss model for packet networks and its implementation | |
65 | in the Netem module in the Linux kernel", available in [1] | |
66 | ||
67 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it | |
68 | Fabio Ludovici <fabio.ludovici at yahoo.it> | |
1da177e4 LT |
69 | */ |
70 | ||
71 | struct netem_sched_data { | |
aec0a40a ED |
72 | /* internal t(ime)fifo qdisc uses t_root and sch->limit */ |
73 | struct rb_root t_root; | |
50612537 ED |
74 | |
75 | /* optional qdisc for classful handling (NULL at netem init) */ | |
1da177e4 | 76 | struct Qdisc *qdisc; |
50612537 | 77 | |
59cb5c67 | 78 | struct qdisc_watchdog watchdog; |
1da177e4 | 79 | |
b407621c SH |
80 | psched_tdiff_t latency; |
81 | psched_tdiff_t jitter; | |
82 | ||
1da177e4 | 83 | u32 loss; |
e4ae004b | 84 | u32 ecn; |
1da177e4 LT |
85 | u32 limit; |
86 | u32 counter; | |
87 | u32 gap; | |
1da177e4 | 88 | u32 duplicate; |
0dca51d3 | 89 | u32 reorder; |
c865e5d9 | 90 | u32 corrupt; |
6a031f67 | 91 | u64 rate; |
90b41a1c HPP |
92 | s32 packet_overhead; |
93 | u32 cell_size; | |
809fa972 | 94 | struct reciprocal_value cell_size_reciprocal; |
90b41a1c | 95 | s32 cell_overhead; |
1da177e4 LT |
96 | |
97 | struct crndstate { | |
b407621c SH |
98 | u32 last; |
99 | u32 rho; | |
c865e5d9 | 100 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
1da177e4 LT |
101 | |
102 | struct disttable { | |
103 | u32 size; | |
104 | s16 table[0]; | |
105 | } *delay_dist; | |
661b7972 | 106 | |
107 | enum { | |
108 | CLG_RANDOM, | |
109 | CLG_4_STATES, | |
110 | CLG_GILB_ELL, | |
111 | } loss_model; | |
112 | ||
a6e2fe17 YY |
113 | enum { |
114 | TX_IN_GAP_PERIOD = 1, | |
115 | TX_IN_BURST_PERIOD, | |
116 | LOST_IN_GAP_PERIOD, | |
117 | LOST_IN_BURST_PERIOD, | |
118 | } _4_state_model; | |
119 | ||
c045a734 YY |
120 | enum { |
121 | GOOD_STATE = 1, | |
122 | BAD_STATE, | |
123 | } GE_state_model; | |
124 | ||
661b7972 | 125 | /* Correlated Loss Generation models */ |
126 | struct clgstate { | |
127 | /* state of the Markov chain */ | |
128 | u8 state; | |
129 | ||
130 | /* 4-states and Gilbert-Elliot models */ | |
131 | u32 a1; /* p13 for 4-states or p for GE */ | |
132 | u32 a2; /* p31 for 4-states or r for GE */ | |
133 | u32 a3; /* p32 for 4-states or h for GE */ | |
134 | u32 a4; /* p14 for 4-states or 1-k for GE */ | |
135 | u32 a5; /* p23 used only in 4-states */ | |
136 | } clg; | |
137 | ||
1da177e4 LT |
138 | }; |
139 | ||
50612537 ED |
140 | /* Time stamp put into socket buffer control block |
141 | * Only valid when skbs are in our internal t(ime)fifo queue. | |
56b17425 ED |
142 | * |
143 | * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, | |
144 | * and skb->next & skb->prev are scratch space for a qdisc, | |
145 | * we save skb->tstamp value in skb->cb[] before destroying it. | |
50612537 | 146 | */ |
1da177e4 LT |
147 | struct netem_skb_cb { |
148 | psched_time_t time_to_send; | |
aec0a40a | 149 | ktime_t tstamp_save; |
1da177e4 LT |
150 | }; |
151 | ||
aec0a40a ED |
152 | |
153 | static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) | |
154 | { | |
56b17425 | 155 | return container_of(rb, struct sk_buff, rbnode); |
aec0a40a ED |
156 | } |
157 | ||
5f86173b JK |
158 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
159 | { | |
aec0a40a | 160 | /* we assume we can use skb next/prev/tstamp as storage for rb_node */ |
16bda13d | 161 | qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); |
175f9c1b | 162 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
5f86173b JK |
163 | } |
164 | ||
1da177e4 LT |
165 | /* init_crandom - initialize correlated random number generator |
166 | * Use entropy source for initial seed. | |
167 | */ | |
168 | static void init_crandom(struct crndstate *state, unsigned long rho) | |
169 | { | |
170 | state->rho = rho; | |
63862b5b | 171 | state->last = prandom_u32(); |
1da177e4 LT |
172 | } |
173 | ||
174 | /* get_crandom - correlated random number generator | |
175 | * Next number depends on last value. | |
176 | * rho is scaled to avoid floating point. | |
177 | */ | |
b407621c | 178 | static u32 get_crandom(struct crndstate *state) |
1da177e4 LT |
179 | { |
180 | u64 value, rho; | |
181 | unsigned long answer; | |
182 | ||
bb2f8cc0 | 183 | if (state->rho == 0) /* no correlation */ |
63862b5b | 184 | return prandom_u32(); |
1da177e4 | 185 | |
63862b5b | 186 | value = prandom_u32(); |
1da177e4 LT |
187 | rho = (u64)state->rho + 1; |
188 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; | |
189 | state->last = answer; | |
190 | return answer; | |
191 | } | |
192 | ||
661b7972 | 193 | /* loss_4state - 4-state model loss generator |
194 | * Generates losses according to the 4-state Markov chain adopted in | |
195 | * the GI (General and Intuitive) loss model. | |
196 | */ | |
197 | static bool loss_4state(struct netem_sched_data *q) | |
198 | { | |
199 | struct clgstate *clg = &q->clg; | |
63862b5b | 200 | u32 rnd = prandom_u32(); |
661b7972 | 201 | |
202 | /* | |
25985edc | 203 | * Makes a comparison between rnd and the transition |
661b7972 | 204 | * probabilities outgoing from the current state, then decides the |
205 | * next state and if the next packet has to be transmitted or lost. | |
206 | * The four states correspond to: | |
a6e2fe17 YY |
207 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period |
208 | * LOST_IN_BURST_PERIOD => isolated losses within a gap period | |
209 | * LOST_IN_GAP_PERIOD => lost packets within a burst period | |
210 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period | |
661b7972 | 211 | */ |
212 | switch (clg->state) { | |
a6e2fe17 | 213 | case TX_IN_GAP_PERIOD: |
661b7972 | 214 | if (rnd < clg->a4) { |
a6e2fe17 | 215 | clg->state = LOST_IN_BURST_PERIOD; |
661b7972 | 216 | return true; |
ab6c27be | 217 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
a6e2fe17 | 218 | clg->state = LOST_IN_GAP_PERIOD; |
661b7972 | 219 | return true; |
a6e2fe17 YY |
220 | } else if (clg->a1 + clg->a4 < rnd) { |
221 | clg->state = TX_IN_GAP_PERIOD; | |
222 | } | |
661b7972 | 223 | |
224 | break; | |
a6e2fe17 | 225 | case TX_IN_BURST_PERIOD: |
661b7972 | 226 | if (rnd < clg->a5) { |
a6e2fe17 | 227 | clg->state = LOST_IN_GAP_PERIOD; |
661b7972 | 228 | return true; |
a6e2fe17 YY |
229 | } else { |
230 | clg->state = TX_IN_BURST_PERIOD; | |
231 | } | |
661b7972 | 232 | |
233 | break; | |
a6e2fe17 | 234 | case LOST_IN_GAP_PERIOD: |
661b7972 | 235 | if (rnd < clg->a3) |
a6e2fe17 | 236 | clg->state = TX_IN_BURST_PERIOD; |
661b7972 | 237 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { |
a6e2fe17 | 238 | clg->state = TX_IN_GAP_PERIOD; |
661b7972 | 239 | } else if (clg->a2 + clg->a3 < rnd) { |
a6e2fe17 | 240 | clg->state = LOST_IN_GAP_PERIOD; |
661b7972 | 241 | return true; |
242 | } | |
243 | break; | |
a6e2fe17 YY |
244 | case LOST_IN_BURST_PERIOD: |
245 | clg->state = TX_IN_GAP_PERIOD; | |
661b7972 | 246 | break; |
247 | } | |
248 | ||
249 | return false; | |
250 | } | |
251 | ||
252 | /* loss_gilb_ell - Gilbert-Elliot model loss generator | |
253 | * Generates losses according to the Gilbert-Elliot loss model or | |
254 | * its special cases (Gilbert or Simple Gilbert) | |
255 | * | |
25985edc | 256 | * Makes a comparison between random number and the transition |
661b7972 | 257 | * probabilities outgoing from the current state, then decides the |
25985edc | 258 | * next state. A second random number is extracted and the comparison |
661b7972 | 259 | * with the loss probability of the current state decides if the next |
260 | * packet will be transmitted or lost. | |
261 | */ | |
262 | static bool loss_gilb_ell(struct netem_sched_data *q) | |
263 | { | |
264 | struct clgstate *clg = &q->clg; | |
265 | ||
266 | switch (clg->state) { | |
c045a734 | 267 | case GOOD_STATE: |
63862b5b | 268 | if (prandom_u32() < clg->a1) |
c045a734 | 269 | clg->state = BAD_STATE; |
63862b5b | 270 | if (prandom_u32() < clg->a4) |
661b7972 | 271 | return true; |
7c2781fa | 272 | break; |
c045a734 | 273 | case BAD_STATE: |
63862b5b | 274 | if (prandom_u32() < clg->a2) |
c045a734 | 275 | clg->state = GOOD_STATE; |
63862b5b | 276 | if (prandom_u32() > clg->a3) |
661b7972 | 277 | return true; |
278 | } | |
279 | ||
280 | return false; | |
281 | } | |
282 | ||
283 | static bool loss_event(struct netem_sched_data *q) | |
284 | { | |
285 | switch (q->loss_model) { | |
286 | case CLG_RANDOM: | |
287 | /* Random packet drop 0 => none, ~0 => all */ | |
288 | return q->loss && q->loss >= get_crandom(&q->loss_cor); | |
289 | ||
290 | case CLG_4_STATES: | |
291 | /* 4state loss model algorithm (used also for GI model) | |
292 | * Extracts a value from the markov 4 state loss generator, | |
293 | * if it is 1 drops a packet and if needed writes the event in | |
294 | * the kernel logs | |
295 | */ | |
296 | return loss_4state(q); | |
297 | ||
298 | case CLG_GILB_ELL: | |
299 | /* Gilbert-Elliot loss model algorithm | |
300 | * Extracts a value from the Gilbert-Elliot loss generator, | |
301 | * if it is 1 drops a packet and if needed writes the event in | |
302 | * the kernel logs | |
303 | */ | |
304 | return loss_gilb_ell(q); | |
305 | } | |
306 | ||
307 | return false; /* not reached */ | |
308 | } | |
309 | ||
310 | ||
1da177e4 LT |
311 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
312 | * std deviation sigma. Uses table lookup to approximate the desired | |
313 | * distribution, and a uniformly-distributed pseudo-random source. | |
314 | */ | |
b407621c SH |
315 | static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, |
316 | struct crndstate *state, | |
317 | const struct disttable *dist) | |
1da177e4 | 318 | { |
b407621c SH |
319 | psched_tdiff_t x; |
320 | long t; | |
321 | u32 rnd; | |
1da177e4 LT |
322 | |
323 | if (sigma == 0) | |
324 | return mu; | |
325 | ||
326 | rnd = get_crandom(state); | |
327 | ||
328 | /* default uniform distribution */ | |
10297b99 | 329 | if (dist == NULL) |
1da177e4 LT |
330 | return (rnd % (2*sigma)) - sigma + mu; |
331 | ||
332 | t = dist->table[rnd % dist->size]; | |
333 | x = (sigma % NETEM_DIST_SCALE) * t; | |
334 | if (x >= 0) | |
335 | x += NETEM_DIST_SCALE/2; | |
336 | else | |
337 | x -= NETEM_DIST_SCALE/2; | |
338 | ||
339 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | |
340 | } | |
341 | ||
90b41a1c | 342 | static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) |
7bc0f28c | 343 | { |
90b41a1c | 344 | u64 ticks; |
fc33cc72 | 345 | |
90b41a1c HPP |
346 | len += q->packet_overhead; |
347 | ||
348 | if (q->cell_size) { | |
349 | u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); | |
350 | ||
351 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ | |
352 | cells++; | |
353 | len = cells * (q->cell_size + q->cell_overhead); | |
354 | } | |
355 | ||
356 | ticks = (u64)len * NSEC_PER_SEC; | |
357 | ||
358 | do_div(ticks, q->rate); | |
fc33cc72 | 359 | return PSCHED_NS2TICKS(ticks); |
7bc0f28c HPP |
360 | } |
361 | ||
ff704050 | 362 | static void tfifo_reset(struct Qdisc *sch) |
363 | { | |
364 | struct netem_sched_data *q = qdisc_priv(sch); | |
365 | struct rb_node *p; | |
366 | ||
367 | while ((p = rb_first(&q->t_root))) { | |
368 | struct sk_buff *skb = netem_rb_to_skb(p); | |
369 | ||
370 | rb_erase(p, &q->t_root); | |
371 | skb->next = NULL; | |
372 | skb->prev = NULL; | |
373 | kfree_skb(skb); | |
374 | } | |
375 | } | |
376 | ||
960fb66e | 377 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
50612537 | 378 | { |
aec0a40a | 379 | struct netem_sched_data *q = qdisc_priv(sch); |
50612537 | 380 | psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; |
aec0a40a | 381 | struct rb_node **p = &q->t_root.rb_node, *parent = NULL; |
50612537 | 382 | |
aec0a40a ED |
383 | while (*p) { |
384 | struct sk_buff *skb; | |
50612537 | 385 | |
aec0a40a ED |
386 | parent = *p; |
387 | skb = netem_rb_to_skb(parent); | |
960fb66e | 388 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
aec0a40a ED |
389 | p = &parent->rb_right; |
390 | else | |
391 | p = &parent->rb_left; | |
50612537 | 392 | } |
56b17425 ED |
393 | rb_link_node(&nskb->rbnode, parent, p); |
394 | rb_insert_color(&nskb->rbnode, &q->t_root); | |
aec0a40a | 395 | sch->q.qlen++; |
50612537 ED |
396 | } |
397 | ||
6071bd1a NH |
398 | /* netem can't properly corrupt a megapacket (like we get from GSO), so instead |
399 | * when we statistically choose to corrupt one, we instead segment it, returning | |
400 | * the first packet to be corrupted, and re-enqueue the remaining frames | |
401 | */ | |
402 | static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) | |
403 | { | |
404 | struct sk_buff *segs; | |
405 | netdev_features_t features = netif_skb_features(skb); | |
406 | ||
407 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | |
408 | ||
409 | if (IS_ERR_OR_NULL(segs)) { | |
410 | qdisc_reshape_fail(skb, sch); | |
411 | return NULL; | |
412 | } | |
413 | consume_skb(skb); | |
414 | return segs; | |
415 | } | |
416 | ||
0afb51e7 SH |
417 | /* |
418 | * Insert one skb into qdisc. | |
419 | * Note: parent depends on return value to account for queue length. | |
420 | * NET_XMIT_DROP: queue length didn't change. | |
421 | * NET_XMIT_SUCCESS: one skb was queued. | |
422 | */ | |
1da177e4 LT |
423 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
424 | { | |
425 | struct netem_sched_data *q = qdisc_priv(sch); | |
89e1df74 GC |
426 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
427 | struct netem_skb_cb *cb; | |
0afb51e7 | 428 | struct sk_buff *skb2; |
6071bd1a NH |
429 | struct sk_buff *segs = NULL; |
430 | unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); | |
431 | int nb = 0; | |
0afb51e7 | 432 | int count = 1; |
6071bd1a | 433 | int rc = NET_XMIT_SUCCESS; |
1da177e4 | 434 | |
0afb51e7 SH |
435 | /* Random duplication */ |
436 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | |
437 | ++count; | |
438 | ||
661b7972 | 439 | /* Drop packet? */ |
e4ae004b ED |
440 | if (loss_event(q)) { |
441 | if (q->ecn && INET_ECN_set_ce(skb)) | |
25331d6c | 442 | qdisc_qstats_drop(sch); /* mark packet */ |
e4ae004b ED |
443 | else |
444 | --count; | |
445 | } | |
0afb51e7 | 446 | if (count == 0) { |
25331d6c | 447 | qdisc_qstats_drop(sch); |
1da177e4 | 448 | kfree_skb(skb); |
c27f339a | 449 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4 LT |
450 | } |
451 | ||
5a308f40 ED |
452 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
453 | * place at TX completion time, so _before_ the link transit delay) | |
5a308f40 ED |
454 | */ |
455 | if (q->latency || q->jitter) | |
f2f872f9 | 456 | skb_orphan_partial(skb); |
4e8a5201 | 457 | |
0afb51e7 SH |
458 | /* |
459 | * If we need to duplicate packet, then re-insert at top of the | |
460 | * qdisc tree, since parent queuer expects that only one | |
461 | * skb will be queued. | |
462 | */ | |
463 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | |
7698b4fc | 464 | struct Qdisc *rootq = qdisc_root(sch); |
0afb51e7 | 465 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
0afb51e7 | 466 | |
b396cca6 ED |
467 | q->duplicate = 0; |
468 | rootq->enqueue(skb2, rootq); | |
0afb51e7 | 469 | q->duplicate = dupsave; |
1da177e4 LT |
470 | } |
471 | ||
c865e5d9 SH |
472 | /* |
473 | * Randomized packet corruption. | |
474 | * Make copy if needed since we are modifying | |
475 | * If packet is going to be hardware checksummed, then | |
476 | * do it now in software before we mangle it. | |
477 | */ | |
478 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | |
6071bd1a NH |
479 | if (skb_is_gso(skb)) { |
480 | segs = netem_segment(skb, sch); | |
481 | if (!segs) | |
482 | return NET_XMIT_DROP; | |
483 | } else { | |
484 | segs = skb; | |
485 | } | |
486 | ||
487 | skb = segs; | |
488 | segs = segs->next; | |
489 | ||
f64f9e71 JP |
490 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || |
491 | (skb->ip_summed == CHECKSUM_PARTIAL && | |
6071bd1a NH |
492 | skb_checksum_help(skb))) { |
493 | rc = qdisc_drop(skb, sch); | |
494 | goto finish_segs; | |
495 | } | |
c865e5d9 | 496 | |
63862b5b AH |
497 | skb->data[prandom_u32() % skb_headlen(skb)] ^= |
498 | 1<<(prandom_u32() % 8); | |
c865e5d9 SH |
499 | } |
500 | ||
960fb66e ED |
501 | if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) |
502 | return qdisc_reshape_fail(skb, sch); | |
503 | ||
25331d6c | 504 | qdisc_qstats_backlog_inc(sch, skb); |
960fb66e | 505 | |
5f86173b | 506 | cb = netem_skb_cb(skb); |
cc7ec456 | 507 | if (q->gap == 0 || /* not doing reordering */ |
a42b4799 | 508 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
f64f9e71 | 509 | q->reorder < get_crandom(&q->reorder_cor)) { |
0f9f32ac | 510 | psched_time_t now; |
07aaa115 SH |
511 | psched_tdiff_t delay; |
512 | ||
513 | delay = tabledist(q->latency, q->jitter, | |
514 | &q->delay_cor, q->delay_dist); | |
515 | ||
3bebcda2 | 516 | now = psched_get_time(); |
7bc0f28c HPP |
517 | |
518 | if (q->rate) { | |
aec0a40a | 519 | struct sk_buff *last; |
7bc0f28c | 520 | |
aec0a40a ED |
521 | if (!skb_queue_empty(&sch->q)) |
522 | last = skb_peek_tail(&sch->q); | |
523 | else | |
524 | last = netem_rb_to_skb(rb_last(&q->t_root)); | |
525 | if (last) { | |
7bc0f28c | 526 | /* |
a13d3104 JN |
527 | * Last packet in queue is reference point (now), |
528 | * calculate this time bonus and subtract | |
7bc0f28c HPP |
529 | * from delay. |
530 | */ | |
aec0a40a | 531 | delay -= netem_skb_cb(last)->time_to_send - now; |
a13d3104 | 532 | delay = max_t(psched_tdiff_t, 0, delay); |
aec0a40a | 533 | now = netem_skb_cb(last)->time_to_send; |
7bc0f28c | 534 | } |
a13d3104 | 535 | |
8cfd88d6 | 536 | delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q); |
7bc0f28c HPP |
537 | } |
538 | ||
7c59e25f | 539 | cb->time_to_send = now + delay; |
aec0a40a | 540 | cb->tstamp_save = skb->tstamp; |
1da177e4 | 541 | ++q->counter; |
960fb66e | 542 | tfifo_enqueue(skb, sch); |
1da177e4 | 543 | } else { |
10297b99 | 544 | /* |
0dca51d3 SH |
545 | * Do re-ordering by putting one out of N packets at the front |
546 | * of the queue. | |
547 | */ | |
3bebcda2 | 548 | cb->time_to_send = psched_get_time(); |
0dca51d3 | 549 | q->counter = 0; |
8ba25dad | 550 | |
50612537 | 551 | __skb_queue_head(&sch->q, skb); |
eb101924 | 552 | sch->qstats.requeues++; |
378a2f09 | 553 | } |
1da177e4 | 554 | |
6071bd1a NH |
555 | finish_segs: |
556 | if (segs) { | |
557 | while (segs) { | |
558 | skb2 = segs->next; | |
559 | segs->next = NULL; | |
560 | qdisc_skb_cb(segs)->pkt_len = segs->len; | |
561 | last_len = segs->len; | |
562 | rc = qdisc_enqueue(segs, sch); | |
563 | if (rc != NET_XMIT_SUCCESS) { | |
564 | if (net_xmit_drop_count(rc)) | |
565 | qdisc_qstats_drop(sch); | |
566 | } else { | |
567 | nb++; | |
568 | len += last_len; | |
569 | } | |
570 | segs = skb2; | |
571 | } | |
572 | sch->q.qlen += nb; | |
573 | if (nb > 1) | |
574 | qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); | |
575 | } | |
10f6dfcf | 576 | return NET_XMIT_SUCCESS; |
1da177e4 LT |
577 | } |
578 | ||
cc7ec456 | 579 | static unsigned int netem_drop(struct Qdisc *sch) |
1da177e4 LT |
580 | { |
581 | struct netem_sched_data *q = qdisc_priv(sch); | |
50612537 | 582 | unsigned int len; |
1da177e4 | 583 | |
50612537 | 584 | len = qdisc_queue_drop(sch); |
aec0a40a ED |
585 | |
586 | if (!len) { | |
587 | struct rb_node *p = rb_first(&q->t_root); | |
588 | ||
589 | if (p) { | |
590 | struct sk_buff *skb = netem_rb_to_skb(p); | |
591 | ||
592 | rb_erase(p, &q->t_root); | |
593 | sch->q.qlen--; | |
594 | skb->next = NULL; | |
595 | skb->prev = NULL; | |
25331d6c | 596 | qdisc_qstats_backlog_dec(sch, skb); |
aec0a40a ED |
597 | kfree_skb(skb); |
598 | } | |
599 | } | |
50612537 ED |
600 | if (!len && q->qdisc && q->qdisc->ops->drop) |
601 | len = q->qdisc->ops->drop(q->qdisc); | |
602 | if (len) | |
25331d6c | 603 | qdisc_qstats_drop(sch); |
50612537 | 604 | |
1da177e4 LT |
605 | return len; |
606 | } | |
607 | ||
1da177e4 LT |
608 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
609 | { | |
610 | struct netem_sched_data *q = qdisc_priv(sch); | |
611 | struct sk_buff *skb; | |
aec0a40a | 612 | struct rb_node *p; |
1da177e4 | 613 | |
fd245a4a | 614 | if (qdisc_is_throttled(sch)) |
11274e5a SH |
615 | return NULL; |
616 | ||
50612537 | 617 | tfifo_dequeue: |
aec0a40a | 618 | skb = __skb_dequeue(&sch->q); |
771018e7 | 619 | if (skb) { |
25331d6c | 620 | qdisc_qstats_backlog_dec(sch, skb); |
0ad2a836 | 621 | deliver: |
aec0a40a ED |
622 | qdisc_unthrottled(sch); |
623 | qdisc_bstats_update(sch, skb); | |
624 | return skb; | |
625 | } | |
626 | p = rb_first(&q->t_root); | |
627 | if (p) { | |
36b7bfe0 ED |
628 | psched_time_t time_to_send; |
629 | ||
aec0a40a | 630 | skb = netem_rb_to_skb(p); |
0f9f32ac SH |
631 | |
632 | /* if more time remaining? */ | |
36b7bfe0 ED |
633 | time_to_send = netem_skb_cb(skb)->time_to_send; |
634 | if (time_to_send <= psched_get_time()) { | |
aec0a40a ED |
635 | rb_erase(p, &q->t_root); |
636 | ||
637 | sch->q.qlen--; | |
0ad2a836 | 638 | qdisc_qstats_backlog_dec(sch, skb); |
aec0a40a ED |
639 | skb->next = NULL; |
640 | skb->prev = NULL; | |
641 | skb->tstamp = netem_skb_cb(skb)->tstamp_save; | |
03c05f0d | 642 | |
8caf1539 JP |
643 | #ifdef CONFIG_NET_CLS_ACT |
644 | /* | |
645 | * If it's at ingress let's pretend the delay is | |
646 | * from the network (tstamp will be updated). | |
647 | */ | |
648 | if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) | |
649 | skb->tstamp.tv64 = 0; | |
650 | #endif | |
10f6dfcf | 651 | |
50612537 | 652 | if (q->qdisc) { |
21de12ee | 653 | unsigned int pkt_len = qdisc_pkt_len(skb); |
50612537 ED |
654 | int err = qdisc_enqueue(skb, q->qdisc); |
655 | ||
21de12ee ED |
656 | if (err != NET_XMIT_SUCCESS && |
657 | net_xmit_drop_count(err)) { | |
658 | qdisc_qstats_drop(sch); | |
659 | qdisc_tree_reduce_backlog(sch, 1, | |
660 | pkt_len); | |
50612537 ED |
661 | } |
662 | goto tfifo_dequeue; | |
663 | } | |
aec0a40a | 664 | goto deliver; |
07aaa115 | 665 | } |
11274e5a | 666 | |
50612537 ED |
667 | if (q->qdisc) { |
668 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
669 | if (skb) | |
670 | goto deliver; | |
671 | } | |
36b7bfe0 | 672 | qdisc_watchdog_schedule(&q->watchdog, time_to_send); |
0f9f32ac SH |
673 | } |
674 | ||
50612537 ED |
675 | if (q->qdisc) { |
676 | skb = q->qdisc->ops->dequeue(q->qdisc); | |
677 | if (skb) | |
678 | goto deliver; | |
679 | } | |
0f9f32ac | 680 | return NULL; |
1da177e4 LT |
681 | } |
682 | ||
1da177e4 LT |
683 | static void netem_reset(struct Qdisc *sch) |
684 | { | |
685 | struct netem_sched_data *q = qdisc_priv(sch); | |
686 | ||
50612537 | 687 | qdisc_reset_queue(sch); |
ff704050 | 688 | tfifo_reset(sch); |
50612537 ED |
689 | if (q->qdisc) |
690 | qdisc_reset(q->qdisc); | |
59cb5c67 | 691 | qdisc_watchdog_cancel(&q->watchdog); |
1da177e4 LT |
692 | } |
693 | ||
6373a9a2 | 694 | static void dist_free(struct disttable *d) |
695 | { | |
4cb28970 | 696 | kvfree(d); |
6373a9a2 | 697 | } |
698 | ||
1da177e4 LT |
699 | /* |
700 | * Distribution data is a variable size payload containing | |
701 | * signed 16 bit values. | |
702 | */ | |
1e90474c | 703 | static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4 LT |
704 | { |
705 | struct netem_sched_data *q = qdisc_priv(sch); | |
6373a9a2 | 706 | size_t n = nla_len(attr)/sizeof(__s16); |
1e90474c | 707 | const __s16 *data = nla_data(attr); |
7698b4fc | 708 | spinlock_t *root_lock; |
1da177e4 LT |
709 | struct disttable *d; |
710 | int i; | |
6373a9a2 | 711 | size_t s; |
1da177e4 | 712 | |
df173bda | 713 | if (n > NETEM_DIST_MAX) |
1da177e4 LT |
714 | return -EINVAL; |
715 | ||
6373a9a2 | 716 | s = sizeof(struct disttable) + n * sizeof(s16); |
bb52c7ac | 717 | d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN); |
6373a9a2 | 718 | if (!d) |
719 | d = vmalloc(s); | |
1da177e4 LT |
720 | if (!d) |
721 | return -ENOMEM; | |
722 | ||
723 | d->size = n; | |
724 | for (i = 0; i < n; i++) | |
725 | d->table[i] = data[i]; | |
10297b99 | 726 | |
102396ae | 727 | root_lock = qdisc_root_sleeping_lock(sch); |
7698b4fc DM |
728 | |
729 | spin_lock_bh(root_lock); | |
bb52c7ac | 730 | swap(q->delay_dist, d); |
7698b4fc | 731 | spin_unlock_bh(root_lock); |
bb52c7ac ED |
732 | |
733 | dist_free(d); | |
1da177e4 LT |
734 | return 0; |
735 | } | |
736 | ||
49545a77 | 737 | static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) |
1da177e4 | 738 | { |
1e90474c | 739 | const struct tc_netem_corr *c = nla_data(attr); |
1da177e4 | 740 | |
1da177e4 LT |
741 | init_crandom(&q->delay_cor, c->delay_corr); |
742 | init_crandom(&q->loss_cor, c->loss_corr); | |
743 | init_crandom(&q->dup_cor, c->dup_corr); | |
1da177e4 LT |
744 | } |
745 | ||
49545a77 | 746 | static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) |
0dca51d3 | 747 | { |
1e90474c | 748 | const struct tc_netem_reorder *r = nla_data(attr); |
0dca51d3 | 749 | |
0dca51d3 SH |
750 | q->reorder = r->probability; |
751 | init_crandom(&q->reorder_cor, r->correlation); | |
0dca51d3 SH |
752 | } |
753 | ||
49545a77 | 754 | static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) |
c865e5d9 | 755 | { |
1e90474c | 756 | const struct tc_netem_corrupt *r = nla_data(attr); |
c865e5d9 | 757 | |
c865e5d9 SH |
758 | q->corrupt = r->probability; |
759 | init_crandom(&q->corrupt_cor, r->correlation); | |
c865e5d9 SH |
760 | } |
761 | ||
49545a77 | 762 | static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) |
7bc0f28c | 763 | { |
7bc0f28c HPP |
764 | const struct tc_netem_rate *r = nla_data(attr); |
765 | ||
766 | q->rate = r->rate; | |
90b41a1c HPP |
767 | q->packet_overhead = r->packet_overhead; |
768 | q->cell_size = r->cell_size; | |
809fa972 | 769 | q->cell_overhead = r->cell_overhead; |
90b41a1c HPP |
770 | if (q->cell_size) |
771 | q->cell_size_reciprocal = reciprocal_value(q->cell_size); | |
809fa972 HFS |
772 | else |
773 | q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; | |
7bc0f28c HPP |
774 | } |
775 | ||
49545a77 | 776 | static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) |
661b7972 | 777 | { |
661b7972 | 778 | const struct nlattr *la; |
779 | int rem; | |
780 | ||
781 | nla_for_each_nested(la, attr, rem) { | |
782 | u16 type = nla_type(la); | |
783 | ||
833fa743 | 784 | switch (type) { |
661b7972 | 785 | case NETEM_LOSS_GI: { |
786 | const struct tc_netem_gimodel *gi = nla_data(la); | |
787 | ||
2494654d | 788 | if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { |
661b7972 | 789 | pr_info("netem: incorrect gi model size\n"); |
790 | return -EINVAL; | |
791 | } | |
792 | ||
793 | q->loss_model = CLG_4_STATES; | |
794 | ||
3fbac2a8 | 795 | q->clg.state = TX_IN_GAP_PERIOD; |
661b7972 | 796 | q->clg.a1 = gi->p13; |
797 | q->clg.a2 = gi->p31; | |
798 | q->clg.a3 = gi->p32; | |
799 | q->clg.a4 = gi->p14; | |
800 | q->clg.a5 = gi->p23; | |
801 | break; | |
802 | } | |
803 | ||
804 | case NETEM_LOSS_GE: { | |
805 | const struct tc_netem_gemodel *ge = nla_data(la); | |
806 | ||
2494654d | 807 | if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { |
808 | pr_info("netem: incorrect ge model size\n"); | |
661b7972 | 809 | return -EINVAL; |
810 | } | |
811 | ||
812 | q->loss_model = CLG_GILB_ELL; | |
3fbac2a8 | 813 | q->clg.state = GOOD_STATE; |
661b7972 | 814 | q->clg.a1 = ge->p; |
815 | q->clg.a2 = ge->r; | |
816 | q->clg.a3 = ge->h; | |
817 | q->clg.a4 = ge->k1; | |
818 | break; | |
819 | } | |
820 | ||
821 | default: | |
822 | pr_info("netem: unknown loss type %u\n", type); | |
823 | return -EINVAL; | |
824 | } | |
825 | } | |
826 | ||
827 | return 0; | |
828 | } | |
829 | ||
27a3421e PM |
830 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
831 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, | |
832 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, | |
833 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | |
7bc0f28c | 834 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
661b7972 | 835 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
e4ae004b | 836 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
6a031f67 | 837 | [TCA_NETEM_RATE64] = { .type = NLA_U64 }, |
27a3421e PM |
838 | }; |
839 | ||
2c10b32b TG |
840 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
841 | const struct nla_policy *policy, int len) | |
842 | { | |
843 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | |
844 | ||
661b7972 | 845 | if (nested_len < 0) { |
846 | pr_info("netem: invalid attributes len %d\n", nested_len); | |
2c10b32b | 847 | return -EINVAL; |
661b7972 | 848 | } |
849 | ||
2c10b32b TG |
850 | if (nested_len >= nla_attr_size(0)) |
851 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | |
852 | nested_len, policy); | |
661b7972 | 853 | |
2c10b32b TG |
854 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
855 | return 0; | |
856 | } | |
857 | ||
c865e5d9 | 858 | /* Parse netlink message to set options */ |
1e90474c | 859 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
860 | { |
861 | struct netem_sched_data *q = qdisc_priv(sch); | |
b03f4672 | 862 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
1da177e4 | 863 | struct tc_netem_qopt *qopt; |
54a4b05c YY |
864 | struct clgstate old_clg; |
865 | int old_loss_model = CLG_RANDOM; | |
1da177e4 | 866 | int ret; |
10297b99 | 867 | |
b03f4672 | 868 | if (opt == NULL) |
1da177e4 LT |
869 | return -EINVAL; |
870 | ||
2c10b32b TG |
871 | qopt = nla_data(opt); |
872 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); | |
b03f4672 PM |
873 | if (ret < 0) |
874 | return ret; | |
875 | ||
54a4b05c YY |
876 | /* backup q->clg and q->loss_model */ |
877 | old_clg = q->clg; | |
878 | old_loss_model = q->loss_model; | |
879 | ||
880 | if (tb[TCA_NETEM_LOSS]) { | |
49545a77 | 881 | ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); |
54a4b05c YY |
882 | if (ret) { |
883 | q->loss_model = old_loss_model; | |
884 | return ret; | |
885 | } | |
886 | } else { | |
887 | q->loss_model = CLG_RANDOM; | |
888 | } | |
889 | ||
890 | if (tb[TCA_NETEM_DELAY_DIST]) { | |
891 | ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); | |
892 | if (ret) { | |
893 | /* recover clg and loss_model, in case of | |
894 | * q->clg and q->loss_model were modified | |
895 | * in get_loss_clg() | |
896 | */ | |
897 | q->clg = old_clg; | |
898 | q->loss_model = old_loss_model; | |
899 | return ret; | |
900 | } | |
901 | } | |
902 | ||
50612537 | 903 | sch->limit = qopt->limit; |
10297b99 | 904 | |
1da177e4 LT |
905 | q->latency = qopt->latency; |
906 | q->jitter = qopt->jitter; | |
907 | q->limit = qopt->limit; | |
908 | q->gap = qopt->gap; | |
0dca51d3 | 909 | q->counter = 0; |
1da177e4 LT |
910 | q->loss = qopt->loss; |
911 | q->duplicate = qopt->duplicate; | |
912 | ||
bb2f8cc0 SH |
913 | /* for compatibility with earlier versions. |
914 | * if gap is set, need to assume 100% probability | |
0dca51d3 | 915 | */ |
a362e0a7 SH |
916 | if (q->gap) |
917 | q->reorder = ~0; | |
0dca51d3 | 918 | |
265eb67f | 919 | if (tb[TCA_NETEM_CORR]) |
49545a77 | 920 | get_correlation(q, tb[TCA_NETEM_CORR]); |
1da177e4 | 921 | |
265eb67f | 922 | if (tb[TCA_NETEM_REORDER]) |
49545a77 | 923 | get_reorder(q, tb[TCA_NETEM_REORDER]); |
1da177e4 | 924 | |
265eb67f | 925 | if (tb[TCA_NETEM_CORRUPT]) |
49545a77 | 926 | get_corrupt(q, tb[TCA_NETEM_CORRUPT]); |
1da177e4 | 927 | |
7bc0f28c | 928 | if (tb[TCA_NETEM_RATE]) |
49545a77 | 929 | get_rate(q, tb[TCA_NETEM_RATE]); |
7bc0f28c | 930 | |
6a031f67 YY |
931 | if (tb[TCA_NETEM_RATE64]) |
932 | q->rate = max_t(u64, q->rate, | |
933 | nla_get_u64(tb[TCA_NETEM_RATE64])); | |
934 | ||
e4ae004b ED |
935 | if (tb[TCA_NETEM_ECN]) |
936 | q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); | |
937 | ||
661b7972 | 938 | return ret; |
1da177e4 LT |
939 | } |
940 | ||
1e90474c | 941 | static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
942 | { |
943 | struct netem_sched_data *q = qdisc_priv(sch); | |
944 | int ret; | |
945 | ||
946 | if (!opt) | |
947 | return -EINVAL; | |
948 | ||
59cb5c67 | 949 | qdisc_watchdog_init(&q->watchdog, sch); |
1da177e4 | 950 | |
661b7972 | 951 | q->loss_model = CLG_RANDOM; |
1da177e4 | 952 | ret = netem_change(sch, opt); |
50612537 | 953 | if (ret) |
250a65f7 | 954 | pr_info("netem: change failed\n"); |
1da177e4 LT |
955 | return ret; |
956 | } | |
957 | ||
958 | static void netem_destroy(struct Qdisc *sch) | |
959 | { | |
960 | struct netem_sched_data *q = qdisc_priv(sch); | |
961 | ||
59cb5c67 | 962 | qdisc_watchdog_cancel(&q->watchdog); |
50612537 ED |
963 | if (q->qdisc) |
964 | qdisc_destroy(q->qdisc); | |
6373a9a2 | 965 | dist_free(q->delay_dist); |
1da177e4 LT |
966 | } |
967 | ||
661b7972 | 968 | static int dump_loss_model(const struct netem_sched_data *q, |
969 | struct sk_buff *skb) | |
970 | { | |
971 | struct nlattr *nest; | |
972 | ||
973 | nest = nla_nest_start(skb, TCA_NETEM_LOSS); | |
974 | if (nest == NULL) | |
975 | goto nla_put_failure; | |
976 | ||
977 | switch (q->loss_model) { | |
978 | case CLG_RANDOM: | |
979 | /* legacy loss model */ | |
980 | nla_nest_cancel(skb, nest); | |
981 | return 0; /* no data */ | |
982 | ||
983 | case CLG_4_STATES: { | |
984 | struct tc_netem_gimodel gi = { | |
985 | .p13 = q->clg.a1, | |
986 | .p31 = q->clg.a2, | |
987 | .p32 = q->clg.a3, | |
988 | .p14 = q->clg.a4, | |
989 | .p23 = q->clg.a5, | |
990 | }; | |
991 | ||
1b34ec43 DM |
992 | if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) |
993 | goto nla_put_failure; | |
661b7972 | 994 | break; |
995 | } | |
996 | case CLG_GILB_ELL: { | |
997 | struct tc_netem_gemodel ge = { | |
998 | .p = q->clg.a1, | |
999 | .r = q->clg.a2, | |
1000 | .h = q->clg.a3, | |
1001 | .k1 = q->clg.a4, | |
1002 | }; | |
1003 | ||
1b34ec43 DM |
1004 | if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) |
1005 | goto nla_put_failure; | |
661b7972 | 1006 | break; |
1007 | } | |
1008 | } | |
1009 | ||
1010 | nla_nest_end(skb, nest); | |
1011 | return 0; | |
1012 | ||
1013 | nla_put_failure: | |
1014 | nla_nest_cancel(skb, nest); | |
1015 | return -1; | |
1016 | } | |
1017 | ||
1da177e4 LT |
1018 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
1019 | { | |
1020 | const struct netem_sched_data *q = qdisc_priv(sch); | |
861d7f74 | 1021 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
1da177e4 LT |
1022 | struct tc_netem_qopt qopt; |
1023 | struct tc_netem_corr cor; | |
0dca51d3 | 1024 | struct tc_netem_reorder reorder; |
c865e5d9 | 1025 | struct tc_netem_corrupt corrupt; |
7bc0f28c | 1026 | struct tc_netem_rate rate; |
1da177e4 LT |
1027 | |
1028 | qopt.latency = q->latency; | |
1029 | qopt.jitter = q->jitter; | |
1030 | qopt.limit = q->limit; | |
1031 | qopt.loss = q->loss; | |
1032 | qopt.gap = q->gap; | |
1033 | qopt.duplicate = q->duplicate; | |
1b34ec43 DM |
1034 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
1035 | goto nla_put_failure; | |
1da177e4 LT |
1036 | |
1037 | cor.delay_corr = q->delay_cor.rho; | |
1038 | cor.loss_corr = q->loss_cor.rho; | |
1039 | cor.dup_corr = q->dup_cor.rho; | |
1b34ec43 DM |
1040 | if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) |
1041 | goto nla_put_failure; | |
0dca51d3 SH |
1042 | |
1043 | reorder.probability = q->reorder; | |
1044 | reorder.correlation = q->reorder_cor.rho; | |
1b34ec43 DM |
1045 | if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) |
1046 | goto nla_put_failure; | |
0dca51d3 | 1047 | |
c865e5d9 SH |
1048 | corrupt.probability = q->corrupt; |
1049 | corrupt.correlation = q->corrupt_cor.rho; | |
1b34ec43 DM |
1050 | if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) |
1051 | goto nla_put_failure; | |
c865e5d9 | 1052 | |
6a031f67 | 1053 | if (q->rate >= (1ULL << 32)) { |
2a51c1e8 ND |
1054 | if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, |
1055 | TCA_NETEM_PAD)) | |
6a031f67 YY |
1056 | goto nla_put_failure; |
1057 | rate.rate = ~0U; | |
1058 | } else { | |
1059 | rate.rate = q->rate; | |
1060 | } | |
90b41a1c HPP |
1061 | rate.packet_overhead = q->packet_overhead; |
1062 | rate.cell_size = q->cell_size; | |
1063 | rate.cell_overhead = q->cell_overhead; | |
1b34ec43 DM |
1064 | if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) |
1065 | goto nla_put_failure; | |
7bc0f28c | 1066 | |
e4ae004b ED |
1067 | if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) |
1068 | goto nla_put_failure; | |
1069 | ||
661b7972 | 1070 | if (dump_loss_model(q, skb) != 0) |
1071 | goto nla_put_failure; | |
1072 | ||
861d7f74 | 1073 | return nla_nest_end(skb, nla); |
1da177e4 | 1074 | |
1e90474c | 1075 | nla_put_failure: |
861d7f74 | 1076 | nlmsg_trim(skb, nla); |
1da177e4 LT |
1077 | return -1; |
1078 | } | |
1079 | ||
10f6dfcf | 1080 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
1081 | struct sk_buff *skb, struct tcmsg *tcm) | |
1082 | { | |
1083 | struct netem_sched_data *q = qdisc_priv(sch); | |
1084 | ||
50612537 | 1085 | if (cl != 1 || !q->qdisc) /* only one class */ |
10f6dfcf | 1086 | return -ENOENT; |
1087 | ||
1088 | tcm->tcm_handle |= TC_H_MIN(1); | |
1089 | tcm->tcm_info = q->qdisc->handle; | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
1095 | struct Qdisc **old) | |
1096 | { | |
1097 | struct netem_sched_data *q = qdisc_priv(sch); | |
1098 | ||
86a7996c | 1099 | *old = qdisc_replace(sch, new, &q->qdisc); |
10f6dfcf | 1100 | return 0; |
1101 | } | |
1102 | ||
1103 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) | |
1104 | { | |
1105 | struct netem_sched_data *q = qdisc_priv(sch); | |
1106 | return q->qdisc; | |
1107 | } | |
1108 | ||
1109 | static unsigned long netem_get(struct Qdisc *sch, u32 classid) | |
1110 | { | |
1111 | return 1; | |
1112 | } | |
1113 | ||
1114 | static void netem_put(struct Qdisc *sch, unsigned long arg) | |
1115 | { | |
1116 | } | |
1117 | ||
1118 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |
1119 | { | |
1120 | if (!walker->stop) { | |
1121 | if (walker->count >= walker->skip) | |
1122 | if (walker->fn(sch, 1, walker) < 0) { | |
1123 | walker->stop = 1; | |
1124 | return; | |
1125 | } | |
1126 | walker->count++; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | static const struct Qdisc_class_ops netem_class_ops = { | |
1131 | .graft = netem_graft, | |
1132 | .leaf = netem_leaf, | |
1133 | .get = netem_get, | |
1134 | .put = netem_put, | |
1135 | .walk = netem_walk, | |
1136 | .dump = netem_dump_class, | |
1137 | }; | |
1138 | ||
20fea08b | 1139 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1da177e4 | 1140 | .id = "netem", |
10f6dfcf | 1141 | .cl_ops = &netem_class_ops, |
1da177e4 LT |
1142 | .priv_size = sizeof(struct netem_sched_data), |
1143 | .enqueue = netem_enqueue, | |
1144 | .dequeue = netem_dequeue, | |
77be155c | 1145 | .peek = qdisc_peek_dequeued, |
1da177e4 LT |
1146 | .drop = netem_drop, |
1147 | .init = netem_init, | |
1148 | .reset = netem_reset, | |
1149 | .destroy = netem_destroy, | |
1150 | .change = netem_change, | |
1151 | .dump = netem_dump, | |
1152 | .owner = THIS_MODULE, | |
1153 | }; | |
1154 | ||
1155 | ||
1156 | static int __init netem_module_init(void) | |
1157 | { | |
eb229c4c | 1158 | pr_info("netem: version " VERSION "\n"); |
1da177e4 LT |
1159 | return register_qdisc(&netem_qdisc_ops); |
1160 | } | |
1161 | static void __exit netem_module_exit(void) | |
1162 | { | |
1163 | unregister_qdisc(&netem_qdisc_ops); | |
1164 | } | |
1165 | module_init(netem_module_init) | |
1166 | module_exit(netem_module_exit) | |
1167 | MODULE_LICENSE("GPL"); |