]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/sched/sch_netem.c
KVM: PPC: Use preregistered memory API to access TCE list
[mirror_ubuntu-zesty-kernel.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
27
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
31
32 #define VERSION "1.3"
33
34 /* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
69 */
70
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
74
75 /* optional qdisc for classful handling (NULL at netem init) */
76 struct Qdisc *qdisc;
77
78 struct qdisc_watchdog watchdog;
79
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u64 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 struct reciprocal_value cell_size_reciprocal;
95 s32 cell_overhead;
96
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
138 };
139
140 /* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
146 */
147 struct netem_skb_cb {
148 psched_time_t time_to_send;
149 ktime_t tstamp_save;
150 };
151
152
153 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
154 {
155 return rb_entry(rb, struct sk_buff, rbnode);
156 }
157
158 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159 {
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
163 }
164
165 /* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168 static void init_crandom(struct crndstate *state, unsigned long rho)
169 {
170 state->rho = rho;
171 state->last = prandom_u32();
172 }
173
174 /* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
178 static u32 get_crandom(struct crndstate *state)
179 {
180 u64 value, rho;
181 unsigned long answer;
182
183 if (state->rho == 0) /* no correlation */
184 return prandom_u32();
185
186 value = prandom_u32();
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191 }
192
193 /* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197 static bool loss_4state(struct netem_sched_data *q)
198 {
199 struct clgstate *clg = &q->clg;
200 u32 rnd = prandom_u32();
201
202 /*
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
211 */
212 switch (clg->state) {
213 case TX_IN_GAP_PERIOD:
214 if (rnd < clg->a4) {
215 clg->state = LOST_IN_BURST_PERIOD;
216 return true;
217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
218 clg->state = LOST_IN_GAP_PERIOD;
219 return true;
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
223
224 break;
225 case TX_IN_BURST_PERIOD:
226 if (rnd < clg->a5) {
227 clg->state = LOST_IN_GAP_PERIOD;
228 return true;
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
232
233 break;
234 case LOST_IN_GAP_PERIOD:
235 if (rnd < clg->a3)
236 clg->state = TX_IN_BURST_PERIOD;
237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
238 clg->state = TX_IN_GAP_PERIOD;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = LOST_IN_GAP_PERIOD;
241 return true;
242 }
243 break;
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
246 break;
247 }
248
249 return false;
250 }
251
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262 static bool loss_gilb_ell(struct netem_sched_data *q)
263 {
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
267 case GOOD_STATE:
268 if (prandom_u32() < clg->a1)
269 clg->state = BAD_STATE;
270 if (prandom_u32() < clg->a4)
271 return true;
272 break;
273 case BAD_STATE:
274 if (prandom_u32() < clg->a2)
275 clg->state = GOOD_STATE;
276 if (prandom_u32() > clg->a3)
277 return true;
278 }
279
280 return false;
281 }
282
283 static bool loss_event(struct netem_sched_data *q)
284 {
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308 }
309
310
311 /* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
315 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
318 {
319 psched_tdiff_t x;
320 long t;
321 u32 rnd;
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
329 if (dist == NULL)
330 return (rnd % (2*sigma)) - sigma + mu;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340 }
341
342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
343 {
344 u64 ticks;
345
346 len += q->packet_overhead;
347
348 if (q->cell_size) {
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
350
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 cells++;
353 len = cells * (q->cell_size + q->cell_overhead);
354 }
355
356 ticks = (u64)len * NSEC_PER_SEC;
357
358 do_div(ticks, q->rate);
359 return PSCHED_NS2TICKS(ticks);
360 }
361
362 static void tfifo_reset(struct Qdisc *sch)
363 {
364 struct netem_sched_data *q = qdisc_priv(sch);
365 struct rb_node *p;
366
367 while ((p = rb_first(&q->t_root))) {
368 struct sk_buff *skb = netem_rb_to_skb(p);
369
370 rb_erase(p, &q->t_root);
371 rtnl_kfree_skbs(skb, skb);
372 }
373 }
374
375 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
376 {
377 struct netem_sched_data *q = qdisc_priv(sch);
378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
380
381 while (*p) {
382 struct sk_buff *skb;
383
384 parent = *p;
385 skb = netem_rb_to_skb(parent);
386 if (tnext >= netem_skb_cb(skb)->time_to_send)
387 p = &parent->rb_right;
388 else
389 p = &parent->rb_left;
390 }
391 rb_link_node(&nskb->rbnode, parent, p);
392 rb_insert_color(&nskb->rbnode, &q->t_root);
393 sch->q.qlen++;
394 }
395
396 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
399 */
400 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
402 {
403 struct sk_buff *segs;
404 netdev_features_t features = netif_skb_features(skb);
405
406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
407
408 if (IS_ERR_OR_NULL(segs)) {
409 qdisc_drop(skb, sch, to_free);
410 return NULL;
411 }
412 consume_skb(skb);
413 return segs;
414 }
415
416 static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
417 {
418 skb->next = qh->head;
419
420 if (!qh->head)
421 qh->tail = skb;
422 qh->head = skb;
423 qh->qlen++;
424 }
425
426 /*
427 * Insert one skb into qdisc.
428 * Note: parent depends on return value to account for queue length.
429 * NET_XMIT_DROP: queue length didn't change.
430 * NET_XMIT_SUCCESS: one skb was queued.
431 */
432 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
433 struct sk_buff **to_free)
434 {
435 struct netem_sched_data *q = qdisc_priv(sch);
436 /* We don't fill cb now as skb_unshare() may invalidate it */
437 struct netem_skb_cb *cb;
438 struct sk_buff *skb2;
439 struct sk_buff *segs = NULL;
440 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
441 int nb = 0;
442 int count = 1;
443 int rc = NET_XMIT_SUCCESS;
444
445 /* Random duplication */
446 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
447 ++count;
448
449 /* Drop packet? */
450 if (loss_event(q)) {
451 if (q->ecn && INET_ECN_set_ce(skb))
452 qdisc_qstats_drop(sch); /* mark packet */
453 else
454 --count;
455 }
456 if (count == 0) {
457 qdisc_qstats_drop(sch);
458 __qdisc_drop(skb, to_free);
459 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
460 }
461
462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay)
464 */
465 if (q->latency || q->jitter)
466 skb_orphan_partial(skb);
467
468 /*
469 * If we need to duplicate packet, then re-insert at top of the
470 * qdisc tree, since parent queuer expects that only one
471 * skb will be queued.
472 */
473 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
474 struct Qdisc *rootq = qdisc_root(sch);
475 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
476
477 q->duplicate = 0;
478 rootq->enqueue(skb2, rootq, to_free);
479 q->duplicate = dupsave;
480 }
481
482 /*
483 * Randomized packet corruption.
484 * Make copy if needed since we are modifying
485 * If packet is going to be hardware checksummed, then
486 * do it now in software before we mangle it.
487 */
488 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
489 if (skb_is_gso(skb)) {
490 segs = netem_segment(skb, sch, to_free);
491 if (!segs)
492 return NET_XMIT_DROP;
493 } else {
494 segs = skb;
495 }
496
497 skb = segs;
498 segs = segs->next;
499
500 skb = skb_unshare(skb, GFP_ATOMIC);
501 if (unlikely(!skb)) {
502 qdisc_qstats_drop(sch);
503 goto finish_segs;
504 }
505 if (skb->ip_summed == CHECKSUM_PARTIAL &&
506 skb_checksum_help(skb)) {
507 qdisc_drop(skb, sch, to_free);
508 goto finish_segs;
509 }
510
511 skb->data[prandom_u32() % skb_headlen(skb)] ^=
512 1<<(prandom_u32() % 8);
513 }
514
515 if (unlikely(sch->q.qlen >= sch->limit))
516 return qdisc_drop(skb, sch, to_free);
517
518 qdisc_qstats_backlog_inc(sch, skb);
519
520 cb = netem_skb_cb(skb);
521 if (q->gap == 0 || /* not doing reordering */
522 q->counter < q->gap - 1 || /* inside last reordering gap */
523 q->reorder < get_crandom(&q->reorder_cor)) {
524 psched_time_t now;
525 psched_tdiff_t delay;
526
527 delay = tabledist(q->latency, q->jitter,
528 &q->delay_cor, q->delay_dist);
529
530 now = psched_get_time();
531
532 if (q->rate) {
533 struct sk_buff *last;
534
535 if (sch->q.qlen)
536 last = sch->q.tail;
537 else
538 last = netem_rb_to_skb(rb_last(&q->t_root));
539 if (last) {
540 /*
541 * Last packet in queue is reference point (now),
542 * calculate this time bonus and subtract
543 * from delay.
544 */
545 delay -= netem_skb_cb(last)->time_to_send - now;
546 delay = max_t(psched_tdiff_t, 0, delay);
547 now = netem_skb_cb(last)->time_to_send;
548 }
549
550 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
551 }
552
553 cb->time_to_send = now + delay;
554 cb->tstamp_save = skb->tstamp;
555 ++q->counter;
556 tfifo_enqueue(skb, sch);
557 } else {
558 /*
559 * Do re-ordering by putting one out of N packets at the front
560 * of the queue.
561 */
562 cb->time_to_send = psched_get_time();
563 q->counter = 0;
564
565 netem_enqueue_skb_head(&sch->q, skb);
566 sch->qstats.requeues++;
567 }
568
569 finish_segs:
570 if (segs) {
571 while (segs) {
572 skb2 = segs->next;
573 segs->next = NULL;
574 qdisc_skb_cb(segs)->pkt_len = segs->len;
575 last_len = segs->len;
576 rc = qdisc_enqueue(segs, sch, to_free);
577 if (rc != NET_XMIT_SUCCESS) {
578 if (net_xmit_drop_count(rc))
579 qdisc_qstats_drop(sch);
580 } else {
581 nb++;
582 len += last_len;
583 }
584 segs = skb2;
585 }
586 sch->q.qlen += nb;
587 if (nb > 1)
588 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
589 }
590 return NET_XMIT_SUCCESS;
591 }
592
593 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
594 {
595 struct netem_sched_data *q = qdisc_priv(sch);
596 struct sk_buff *skb;
597 struct rb_node *p;
598
599 tfifo_dequeue:
600 skb = __qdisc_dequeue_head(&sch->q);
601 if (skb) {
602 qdisc_qstats_backlog_dec(sch, skb);
603 deliver:
604 qdisc_bstats_update(sch, skb);
605 return skb;
606 }
607 p = rb_first(&q->t_root);
608 if (p) {
609 psched_time_t time_to_send;
610
611 skb = netem_rb_to_skb(p);
612
613 /* if more time remaining? */
614 time_to_send = netem_skb_cb(skb)->time_to_send;
615 if (time_to_send <= psched_get_time()) {
616 rb_erase(p, &q->t_root);
617
618 sch->q.qlen--;
619 qdisc_qstats_backlog_dec(sch, skb);
620 skb->next = NULL;
621 skb->prev = NULL;
622 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
623
624 #ifdef CONFIG_NET_CLS_ACT
625 /*
626 * If it's at ingress let's pretend the delay is
627 * from the network (tstamp will be updated).
628 */
629 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
630 skb->tstamp = 0;
631 #endif
632
633 if (q->qdisc) {
634 unsigned int pkt_len = qdisc_pkt_len(skb);
635 struct sk_buff *to_free = NULL;
636 int err;
637
638 err = qdisc_enqueue(skb, q->qdisc, &to_free);
639 kfree_skb_list(to_free);
640 if (err != NET_XMIT_SUCCESS &&
641 net_xmit_drop_count(err)) {
642 qdisc_qstats_drop(sch);
643 qdisc_tree_reduce_backlog(sch, 1,
644 pkt_len);
645 }
646 goto tfifo_dequeue;
647 }
648 goto deliver;
649 }
650
651 if (q->qdisc) {
652 skb = q->qdisc->ops->dequeue(q->qdisc);
653 if (skb)
654 goto deliver;
655 }
656 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
657 }
658
659 if (q->qdisc) {
660 skb = q->qdisc->ops->dequeue(q->qdisc);
661 if (skb)
662 goto deliver;
663 }
664 return NULL;
665 }
666
667 static void netem_reset(struct Qdisc *sch)
668 {
669 struct netem_sched_data *q = qdisc_priv(sch);
670
671 qdisc_reset_queue(sch);
672 tfifo_reset(sch);
673 if (q->qdisc)
674 qdisc_reset(q->qdisc);
675 qdisc_watchdog_cancel(&q->watchdog);
676 }
677
678 static void dist_free(struct disttable *d)
679 {
680 kvfree(d);
681 }
682
683 /*
684 * Distribution data is a variable size payload containing
685 * signed 16 bit values.
686 */
687 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
688 {
689 struct netem_sched_data *q = qdisc_priv(sch);
690 size_t n = nla_len(attr)/sizeof(__s16);
691 const __s16 *data = nla_data(attr);
692 spinlock_t *root_lock;
693 struct disttable *d;
694 int i;
695 size_t s;
696
697 if (n > NETEM_DIST_MAX)
698 return -EINVAL;
699
700 s = sizeof(struct disttable) + n * sizeof(s16);
701 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
702 if (!d)
703 d = vmalloc(s);
704 if (!d)
705 return -ENOMEM;
706
707 d->size = n;
708 for (i = 0; i < n; i++)
709 d->table[i] = data[i];
710
711 root_lock = qdisc_root_sleeping_lock(sch);
712
713 spin_lock_bh(root_lock);
714 swap(q->delay_dist, d);
715 spin_unlock_bh(root_lock);
716
717 dist_free(d);
718 return 0;
719 }
720
721 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
722 {
723 const struct tc_netem_corr *c = nla_data(attr);
724
725 init_crandom(&q->delay_cor, c->delay_corr);
726 init_crandom(&q->loss_cor, c->loss_corr);
727 init_crandom(&q->dup_cor, c->dup_corr);
728 }
729
730 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
731 {
732 const struct tc_netem_reorder *r = nla_data(attr);
733
734 q->reorder = r->probability;
735 init_crandom(&q->reorder_cor, r->correlation);
736 }
737
738 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
739 {
740 const struct tc_netem_corrupt *r = nla_data(attr);
741
742 q->corrupt = r->probability;
743 init_crandom(&q->corrupt_cor, r->correlation);
744 }
745
746 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
747 {
748 const struct tc_netem_rate *r = nla_data(attr);
749
750 q->rate = r->rate;
751 q->packet_overhead = r->packet_overhead;
752 q->cell_size = r->cell_size;
753 q->cell_overhead = r->cell_overhead;
754 if (q->cell_size)
755 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
756 else
757 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
758 }
759
760 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
761 {
762 const struct nlattr *la;
763 int rem;
764
765 nla_for_each_nested(la, attr, rem) {
766 u16 type = nla_type(la);
767
768 switch (type) {
769 case NETEM_LOSS_GI: {
770 const struct tc_netem_gimodel *gi = nla_data(la);
771
772 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
773 pr_info("netem: incorrect gi model size\n");
774 return -EINVAL;
775 }
776
777 q->loss_model = CLG_4_STATES;
778
779 q->clg.state = TX_IN_GAP_PERIOD;
780 q->clg.a1 = gi->p13;
781 q->clg.a2 = gi->p31;
782 q->clg.a3 = gi->p32;
783 q->clg.a4 = gi->p14;
784 q->clg.a5 = gi->p23;
785 break;
786 }
787
788 case NETEM_LOSS_GE: {
789 const struct tc_netem_gemodel *ge = nla_data(la);
790
791 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
792 pr_info("netem: incorrect ge model size\n");
793 return -EINVAL;
794 }
795
796 q->loss_model = CLG_GILB_ELL;
797 q->clg.state = GOOD_STATE;
798 q->clg.a1 = ge->p;
799 q->clg.a2 = ge->r;
800 q->clg.a3 = ge->h;
801 q->clg.a4 = ge->k1;
802 break;
803 }
804
805 default:
806 pr_info("netem: unknown loss type %u\n", type);
807 return -EINVAL;
808 }
809 }
810
811 return 0;
812 }
813
814 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
815 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
816 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
817 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
818 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
819 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
820 [TCA_NETEM_ECN] = { .type = NLA_U32 },
821 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
822 };
823
824 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
825 const struct nla_policy *policy, int len)
826 {
827 int nested_len = nla_len(nla) - NLA_ALIGN(len);
828
829 if (nested_len < 0) {
830 pr_info("netem: invalid attributes len %d\n", nested_len);
831 return -EINVAL;
832 }
833
834 if (nested_len >= nla_attr_size(0))
835 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
836 nested_len, policy);
837
838 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
839 return 0;
840 }
841
842 /* Parse netlink message to set options */
843 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
844 {
845 struct netem_sched_data *q = qdisc_priv(sch);
846 struct nlattr *tb[TCA_NETEM_MAX + 1];
847 struct tc_netem_qopt *qopt;
848 struct clgstate old_clg;
849 int old_loss_model = CLG_RANDOM;
850 int ret;
851
852 if (opt == NULL)
853 return -EINVAL;
854
855 qopt = nla_data(opt);
856 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
857 if (ret < 0)
858 return ret;
859
860 /* backup q->clg and q->loss_model */
861 old_clg = q->clg;
862 old_loss_model = q->loss_model;
863
864 if (tb[TCA_NETEM_LOSS]) {
865 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
866 if (ret) {
867 q->loss_model = old_loss_model;
868 return ret;
869 }
870 } else {
871 q->loss_model = CLG_RANDOM;
872 }
873
874 if (tb[TCA_NETEM_DELAY_DIST]) {
875 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
876 if (ret) {
877 /* recover clg and loss_model, in case of
878 * q->clg and q->loss_model were modified
879 * in get_loss_clg()
880 */
881 q->clg = old_clg;
882 q->loss_model = old_loss_model;
883 return ret;
884 }
885 }
886
887 sch->limit = qopt->limit;
888
889 q->latency = qopt->latency;
890 q->jitter = qopt->jitter;
891 q->limit = qopt->limit;
892 q->gap = qopt->gap;
893 q->counter = 0;
894 q->loss = qopt->loss;
895 q->duplicate = qopt->duplicate;
896
897 /* for compatibility with earlier versions.
898 * if gap is set, need to assume 100% probability
899 */
900 if (q->gap)
901 q->reorder = ~0;
902
903 if (tb[TCA_NETEM_CORR])
904 get_correlation(q, tb[TCA_NETEM_CORR]);
905
906 if (tb[TCA_NETEM_REORDER])
907 get_reorder(q, tb[TCA_NETEM_REORDER]);
908
909 if (tb[TCA_NETEM_CORRUPT])
910 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
911
912 if (tb[TCA_NETEM_RATE])
913 get_rate(q, tb[TCA_NETEM_RATE]);
914
915 if (tb[TCA_NETEM_RATE64])
916 q->rate = max_t(u64, q->rate,
917 nla_get_u64(tb[TCA_NETEM_RATE64]));
918
919 if (tb[TCA_NETEM_ECN])
920 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
921
922 return ret;
923 }
924
925 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
926 {
927 struct netem_sched_data *q = qdisc_priv(sch);
928 int ret;
929
930 if (!opt)
931 return -EINVAL;
932
933 qdisc_watchdog_init(&q->watchdog, sch);
934
935 q->loss_model = CLG_RANDOM;
936 ret = netem_change(sch, opt);
937 if (ret)
938 pr_info("netem: change failed\n");
939 return ret;
940 }
941
942 static void netem_destroy(struct Qdisc *sch)
943 {
944 struct netem_sched_data *q = qdisc_priv(sch);
945
946 qdisc_watchdog_cancel(&q->watchdog);
947 if (q->qdisc)
948 qdisc_destroy(q->qdisc);
949 dist_free(q->delay_dist);
950 }
951
952 static int dump_loss_model(const struct netem_sched_data *q,
953 struct sk_buff *skb)
954 {
955 struct nlattr *nest;
956
957 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
958 if (nest == NULL)
959 goto nla_put_failure;
960
961 switch (q->loss_model) {
962 case CLG_RANDOM:
963 /* legacy loss model */
964 nla_nest_cancel(skb, nest);
965 return 0; /* no data */
966
967 case CLG_4_STATES: {
968 struct tc_netem_gimodel gi = {
969 .p13 = q->clg.a1,
970 .p31 = q->clg.a2,
971 .p32 = q->clg.a3,
972 .p14 = q->clg.a4,
973 .p23 = q->clg.a5,
974 };
975
976 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
977 goto nla_put_failure;
978 break;
979 }
980 case CLG_GILB_ELL: {
981 struct tc_netem_gemodel ge = {
982 .p = q->clg.a1,
983 .r = q->clg.a2,
984 .h = q->clg.a3,
985 .k1 = q->clg.a4,
986 };
987
988 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
989 goto nla_put_failure;
990 break;
991 }
992 }
993
994 nla_nest_end(skb, nest);
995 return 0;
996
997 nla_put_failure:
998 nla_nest_cancel(skb, nest);
999 return -1;
1000 }
1001
1002 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1003 {
1004 const struct netem_sched_data *q = qdisc_priv(sch);
1005 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1006 struct tc_netem_qopt qopt;
1007 struct tc_netem_corr cor;
1008 struct tc_netem_reorder reorder;
1009 struct tc_netem_corrupt corrupt;
1010 struct tc_netem_rate rate;
1011
1012 qopt.latency = q->latency;
1013 qopt.jitter = q->jitter;
1014 qopt.limit = q->limit;
1015 qopt.loss = q->loss;
1016 qopt.gap = q->gap;
1017 qopt.duplicate = q->duplicate;
1018 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1019 goto nla_put_failure;
1020
1021 cor.delay_corr = q->delay_cor.rho;
1022 cor.loss_corr = q->loss_cor.rho;
1023 cor.dup_corr = q->dup_cor.rho;
1024 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1025 goto nla_put_failure;
1026
1027 reorder.probability = q->reorder;
1028 reorder.correlation = q->reorder_cor.rho;
1029 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1030 goto nla_put_failure;
1031
1032 corrupt.probability = q->corrupt;
1033 corrupt.correlation = q->corrupt_cor.rho;
1034 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1035 goto nla_put_failure;
1036
1037 if (q->rate >= (1ULL << 32)) {
1038 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1039 TCA_NETEM_PAD))
1040 goto nla_put_failure;
1041 rate.rate = ~0U;
1042 } else {
1043 rate.rate = q->rate;
1044 }
1045 rate.packet_overhead = q->packet_overhead;
1046 rate.cell_size = q->cell_size;
1047 rate.cell_overhead = q->cell_overhead;
1048 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1049 goto nla_put_failure;
1050
1051 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1052 goto nla_put_failure;
1053
1054 if (dump_loss_model(q, skb) != 0)
1055 goto nla_put_failure;
1056
1057 return nla_nest_end(skb, nla);
1058
1059 nla_put_failure:
1060 nlmsg_trim(skb, nla);
1061 return -1;
1062 }
1063
1064 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1065 struct sk_buff *skb, struct tcmsg *tcm)
1066 {
1067 struct netem_sched_data *q = qdisc_priv(sch);
1068
1069 if (cl != 1 || !q->qdisc) /* only one class */
1070 return -ENOENT;
1071
1072 tcm->tcm_handle |= TC_H_MIN(1);
1073 tcm->tcm_info = q->qdisc->handle;
1074
1075 return 0;
1076 }
1077
1078 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1079 struct Qdisc **old)
1080 {
1081 struct netem_sched_data *q = qdisc_priv(sch);
1082
1083 *old = qdisc_replace(sch, new, &q->qdisc);
1084 return 0;
1085 }
1086
1087 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1088 {
1089 struct netem_sched_data *q = qdisc_priv(sch);
1090 return q->qdisc;
1091 }
1092
1093 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1094 {
1095 return 1;
1096 }
1097
1098 static void netem_put(struct Qdisc *sch, unsigned long arg)
1099 {
1100 }
1101
1102 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1103 {
1104 if (!walker->stop) {
1105 if (walker->count >= walker->skip)
1106 if (walker->fn(sch, 1, walker) < 0) {
1107 walker->stop = 1;
1108 return;
1109 }
1110 walker->count++;
1111 }
1112 }
1113
1114 static const struct Qdisc_class_ops netem_class_ops = {
1115 .graft = netem_graft,
1116 .leaf = netem_leaf,
1117 .get = netem_get,
1118 .put = netem_put,
1119 .walk = netem_walk,
1120 .dump = netem_dump_class,
1121 };
1122
1123 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1124 .id = "netem",
1125 .cl_ops = &netem_class_ops,
1126 .priv_size = sizeof(struct netem_sched_data),
1127 .enqueue = netem_enqueue,
1128 .dequeue = netem_dequeue,
1129 .peek = qdisc_peek_dequeued,
1130 .init = netem_init,
1131 .reset = netem_reset,
1132 .destroy = netem_destroy,
1133 .change = netem_change,
1134 .dump = netem_dump,
1135 .owner = THIS_MODULE,
1136 };
1137
1138
1139 static int __init netem_module_init(void)
1140 {
1141 pr_info("netem: version " VERSION "\n");
1142 return register_qdisc(&netem_qdisc_ops);
1143 }
1144 static void __exit netem_module_exit(void)
1145 {
1146 unregister_qdisc(&netem_qdisc_ops);
1147 }
1148 module_init(netem_module_init)
1149 module_exit(netem_module_exit)
1150 MODULE_LICENSE("GPL");