2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data
{
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root
;
75 /* optional qdisc for classful handling (NULL at netem init) */
78 struct qdisc_watchdog watchdog
;
94 struct reciprocal_value cell_size_reciprocal
;
100 } delay_cor
, loss_cor
, dup_cor
, reorder_cor
, corrupt_cor
;
114 TX_IN_GAP_PERIOD
= 1,
117 LOST_IN_BURST_PERIOD
,
125 /* Correlated Loss Generation models */
127 /* state of the Markov chain */
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1
; /* p13 for 4-states or p for GE */
132 u32 a2
; /* p31 for 4-states or r for GE */
133 u32 a3
; /* p32 for 4-states or h for GE */
134 u32 a4
; /* p14 for 4-states or 1-k for GE */
135 u32 a5
; /* p23 used only in 4-states */
138 struct tc_netem_slot slot_config
;
147 /* Time stamp put into socket buffer control block
148 * Only valid when skbs are in our internal t(ime)fifo queue.
150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
151 * and skb->next & skb->prev are scratch space for a qdisc,
152 * we save skb->tstamp value in skb->cb[] before destroying it.
154 struct netem_skb_cb
{
158 static inline struct netem_skb_cb
*netem_skb_cb(struct sk_buff
*skb
)
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb
, sizeof(struct netem_skb_cb
));
162 return (struct netem_skb_cb
*)qdisc_skb_cb(skb
)->data
;
165 /* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
168 static void init_crandom(struct crndstate
*state
, unsigned long rho
)
171 state
->last
= prandom_u32();
174 /* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
178 static u32
get_crandom(struct crndstate
*state
)
181 unsigned long answer
;
183 if (state
->rho
== 0) /* no correlation */
184 return prandom_u32();
186 value
= prandom_u32();
187 rho
= (u64
)state
->rho
+ 1;
188 answer
= (value
* ((1ull<<32) - rho
) + state
->last
* rho
) >> 32;
189 state
->last
= answer
;
193 /* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
197 static bool loss_4state(struct netem_sched_data
*q
)
199 struct clgstate
*clg
= &q
->clg
;
200 u32 rnd
= prandom_u32();
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
212 switch (clg
->state
) {
213 case TX_IN_GAP_PERIOD
:
215 clg
->state
= LOST_IN_BURST_PERIOD
;
217 } else if (clg
->a4
< rnd
&& rnd
< clg
->a1
+ clg
->a4
) {
218 clg
->state
= LOST_IN_GAP_PERIOD
;
220 } else if (clg
->a1
+ clg
->a4
< rnd
) {
221 clg
->state
= TX_IN_GAP_PERIOD
;
225 case TX_IN_BURST_PERIOD
:
227 clg
->state
= LOST_IN_GAP_PERIOD
;
230 clg
->state
= TX_IN_BURST_PERIOD
;
234 case LOST_IN_GAP_PERIOD
:
236 clg
->state
= TX_IN_BURST_PERIOD
;
237 else if (clg
->a3
< rnd
&& rnd
< clg
->a2
+ clg
->a3
) {
238 clg
->state
= TX_IN_GAP_PERIOD
;
239 } else if (clg
->a2
+ clg
->a3
< rnd
) {
240 clg
->state
= LOST_IN_GAP_PERIOD
;
244 case LOST_IN_BURST_PERIOD
:
245 clg
->state
= TX_IN_GAP_PERIOD
;
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
262 static bool loss_gilb_ell(struct netem_sched_data
*q
)
264 struct clgstate
*clg
= &q
->clg
;
266 switch (clg
->state
) {
268 if (prandom_u32() < clg
->a1
)
269 clg
->state
= BAD_STATE
;
270 if (prandom_u32() < clg
->a4
)
274 if (prandom_u32() < clg
->a2
)
275 clg
->state
= GOOD_STATE
;
276 if (prandom_u32() > clg
->a3
)
283 static bool loss_event(struct netem_sched_data
*q
)
285 switch (q
->loss_model
) {
287 /* Random packet drop 0 => none, ~0 => all */
288 return q
->loss
&& q
->loss
>= get_crandom(&q
->loss_cor
);
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
296 return loss_4state(q
);
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
304 return loss_gilb_ell(q
);
307 return false; /* not reached */
311 /* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
315 static s64
tabledist(s64 mu
, s32 sigma
,
316 struct crndstate
*state
,
317 const struct disttable
*dist
)
326 rnd
= get_crandom(state
);
328 /* default uniform distribution */
330 return (rnd
% (2 * sigma
)) - sigma
+ mu
;
332 t
= dist
->table
[rnd
% dist
->size
];
333 x
= (sigma
% NETEM_DIST_SCALE
) * t
;
335 x
+= NETEM_DIST_SCALE
/2;
337 x
-= NETEM_DIST_SCALE
/2;
339 return x
/ NETEM_DIST_SCALE
+ (sigma
/ NETEM_DIST_SCALE
) * t
+ mu
;
342 static u64
packet_time_ns(u64 len
, const struct netem_sched_data
*q
)
344 len
+= q
->packet_overhead
;
347 u32 cells
= reciprocal_divide(len
, q
->cell_size_reciprocal
);
349 if (len
> cells
* q
->cell_size
) /* extra cell needed for remainder */
351 len
= cells
* (q
->cell_size
+ q
->cell_overhead
);
354 return div64_u64(len
* NSEC_PER_SEC
, q
->rate
);
357 static void tfifo_reset(struct Qdisc
*sch
)
359 struct netem_sched_data
*q
= qdisc_priv(sch
);
360 struct rb_node
*p
= rb_first(&q
->t_root
);
363 struct sk_buff
*skb
= rb_to_skb(p
);
366 rb_erase(&skb
->rbnode
, &q
->t_root
);
367 rtnl_kfree_skbs(skb
, skb
);
371 static void tfifo_enqueue(struct sk_buff
*nskb
, struct Qdisc
*sch
)
373 struct netem_sched_data
*q
= qdisc_priv(sch
);
374 u64 tnext
= netem_skb_cb(nskb
)->time_to_send
;
375 struct rb_node
**p
= &q
->t_root
.rb_node
, *parent
= NULL
;
381 skb
= rb_to_skb(parent
);
382 if (tnext
>= netem_skb_cb(skb
)->time_to_send
)
383 p
= &parent
->rb_right
;
385 p
= &parent
->rb_left
;
387 rb_link_node(&nskb
->rbnode
, parent
, p
);
388 rb_insert_color(&nskb
->rbnode
, &q
->t_root
);
392 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
393 * when we statistically choose to corrupt one, we instead segment it, returning
394 * the first packet to be corrupted, and re-enqueue the remaining frames
396 static struct sk_buff
*netem_segment(struct sk_buff
*skb
, struct Qdisc
*sch
,
397 struct sk_buff
**to_free
)
399 struct sk_buff
*segs
;
400 netdev_features_t features
= netif_skb_features(skb
);
402 segs
= skb_gso_segment(skb
, features
& ~NETIF_F_GSO_MASK
);
404 if (IS_ERR_OR_NULL(segs
)) {
405 qdisc_drop(skb
, sch
, to_free
);
412 static void netem_enqueue_skb_head(struct qdisc_skb_head
*qh
, struct sk_buff
*skb
)
414 skb
->next
= qh
->head
;
423 * Insert one skb into qdisc.
424 * Note: parent depends on return value to account for queue length.
425 * NET_XMIT_DROP: queue length didn't change.
426 * NET_XMIT_SUCCESS: one skb was queued.
428 static int netem_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
429 struct sk_buff
**to_free
)
431 struct netem_sched_data
*q
= qdisc_priv(sch
);
432 /* We don't fill cb now as skb_unshare() may invalidate it */
433 struct netem_skb_cb
*cb
;
434 struct sk_buff
*skb2
;
435 struct sk_buff
*segs
= NULL
;
436 unsigned int len
= 0, last_len
, prev_len
= qdisc_pkt_len(skb
);
439 int rc
= NET_XMIT_SUCCESS
;
440 int rc_drop
= NET_XMIT_DROP
;
442 /* Do not fool qdisc_drop_all() */
445 /* Random duplication */
446 if (q
->duplicate
&& q
->duplicate
>= get_crandom(&q
->dup_cor
))
451 if (q
->ecn
&& INET_ECN_set_ce(skb
))
452 qdisc_qstats_drop(sch
); /* mark packet */
457 qdisc_qstats_drop(sch
);
458 __qdisc_drop(skb
, to_free
);
459 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay)
465 if (q
->latency
|| q
->jitter
|| q
->rate
)
466 skb_orphan_partial(skb
);
469 * If we need to duplicate packet, then re-insert at top of the
470 * qdisc tree, since parent queuer expects that only one
471 * skb will be queued.
473 if (count
> 1 && (skb2
= skb_clone(skb
, GFP_ATOMIC
)) != NULL
) {
474 struct Qdisc
*rootq
= qdisc_root(sch
);
475 u32 dupsave
= q
->duplicate
; /* prevent duplicating a dup... */
478 rootq
->enqueue(skb2
, rootq
, to_free
);
479 q
->duplicate
= dupsave
;
480 rc_drop
= NET_XMIT_SUCCESS
;
484 * Randomized packet corruption.
485 * Make copy if needed since we are modifying
486 * If packet is going to be hardware checksummed, then
487 * do it now in software before we mangle it.
489 if (q
->corrupt
&& q
->corrupt
>= get_crandom(&q
->corrupt_cor
)) {
490 if (skb_is_gso(skb
)) {
491 segs
= netem_segment(skb
, sch
, to_free
);
501 skb
= skb_unshare(skb
, GFP_ATOMIC
);
502 if (unlikely(!skb
)) {
503 qdisc_qstats_drop(sch
);
506 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
507 skb_checksum_help(skb
)) {
508 qdisc_drop(skb
, sch
, to_free
);
512 skb
->data
[prandom_u32() % skb_headlen(skb
)] ^=
513 1<<(prandom_u32() % 8);
516 if (unlikely(sch
->q
.qlen
>= sch
->limit
)) {
517 qdisc_drop_all(skb
, sch
, to_free
);
521 qdisc_qstats_backlog_inc(sch
, skb
);
523 cb
= netem_skb_cb(skb
);
524 if (q
->gap
== 0 || /* not doing reordering */
525 q
->counter
< q
->gap
- 1 || /* inside last reordering gap */
526 q
->reorder
< get_crandom(&q
->reorder_cor
)) {
530 delay
= tabledist(q
->latency
, q
->jitter
,
531 &q
->delay_cor
, q
->delay_dist
);
533 now
= ktime_get_ns();
536 struct netem_skb_cb
*last
= NULL
;
539 last
= netem_skb_cb(sch
->q
.tail
);
540 if (q
->t_root
.rb_node
) {
541 struct sk_buff
*t_skb
;
542 struct netem_skb_cb
*t_last
;
544 t_skb
= skb_rb_last(&q
->t_root
);
545 t_last
= netem_skb_cb(t_skb
);
547 t_last
->time_to_send
> last
->time_to_send
) {
554 * Last packet in queue is reference point (now),
555 * calculate this time bonus and subtract
558 delay
-= last
->time_to_send
- now
;
559 delay
= max_t(s64
, 0, delay
);
560 now
= last
->time_to_send
;
563 delay
+= packet_time_ns(qdisc_pkt_len(skb
), q
);
566 cb
->time_to_send
= now
+ delay
;
568 tfifo_enqueue(skb
, sch
);
571 * Do re-ordering by putting one out of N packets at the front
574 cb
->time_to_send
= ktime_get_ns();
577 netem_enqueue_skb_head(&sch
->q
, skb
);
578 sch
->qstats
.requeues
++;
586 qdisc_skb_cb(segs
)->pkt_len
= segs
->len
;
587 last_len
= segs
->len
;
588 rc
= qdisc_enqueue(segs
, sch
, to_free
);
589 if (rc
!= NET_XMIT_SUCCESS
) {
590 if (net_xmit_drop_count(rc
))
591 qdisc_qstats_drop(sch
);
600 qdisc_tree_reduce_backlog(sch
, 1 - nb
, prev_len
- len
);
602 return NET_XMIT_SUCCESS
;
605 /* Delay the next round with a new future slot with a
606 * correct number of bytes and packets.
609 static void get_slot_next(struct netem_sched_data
*q
, u64 now
)
611 q
->slot
.slot_next
= now
+ q
->slot_config
.min_delay
+
613 (q
->slot_config
.max_delay
-
614 q
->slot_config
.min_delay
) >> 32);
615 q
->slot
.packets_left
= q
->slot_config
.max_packets
;
616 q
->slot
.bytes_left
= q
->slot_config
.max_bytes
;
619 static struct sk_buff
*netem_dequeue(struct Qdisc
*sch
)
621 struct netem_sched_data
*q
= qdisc_priv(sch
);
626 skb
= __qdisc_dequeue_head(&sch
->q
);
628 qdisc_qstats_backlog_dec(sch
, skb
);
630 qdisc_bstats_update(sch
, skb
);
633 p
= rb_first(&q
->t_root
);
636 u64 now
= ktime_get_ns();
640 /* if more time remaining? */
641 time_to_send
= netem_skb_cb(skb
)->time_to_send
;
642 if (q
->slot
.slot_next
&& q
->slot
.slot_next
< time_to_send
)
643 get_slot_next(q
, now
);
645 if (time_to_send
<= now
&& q
->slot
.slot_next
<= now
) {
646 rb_erase(p
, &q
->t_root
);
648 qdisc_qstats_backlog_dec(sch
, skb
);
651 /* skb->dev shares skb->rbnode area,
652 * we need to restore its value.
654 skb
->dev
= qdisc_dev(sch
);
656 #ifdef CONFIG_NET_CLS_ACT
658 * If it's at ingress let's pretend the delay is
659 * from the network (tstamp will be updated).
661 if (skb
->tc_redirected
&& skb
->tc_from_ingress
)
665 if (q
->slot
.slot_next
) {
666 q
->slot
.packets_left
--;
667 q
->slot
.bytes_left
-= qdisc_pkt_len(skb
);
668 if (q
->slot
.packets_left
<= 0 ||
669 q
->slot
.bytes_left
<= 0)
670 get_slot_next(q
, now
);
674 unsigned int pkt_len
= qdisc_pkt_len(skb
);
675 struct sk_buff
*to_free
= NULL
;
678 err
= qdisc_enqueue(skb
, q
->qdisc
, &to_free
);
679 kfree_skb_list(to_free
);
680 if (err
!= NET_XMIT_SUCCESS
&&
681 net_xmit_drop_count(err
)) {
682 qdisc_qstats_drop(sch
);
683 qdisc_tree_reduce_backlog(sch
, 1,
692 skb
= q
->qdisc
->ops
->dequeue(q
->qdisc
);
697 qdisc_watchdog_schedule_ns(&q
->watchdog
,
703 skb
= q
->qdisc
->ops
->dequeue(q
->qdisc
);
710 static void netem_reset(struct Qdisc
*sch
)
712 struct netem_sched_data
*q
= qdisc_priv(sch
);
714 qdisc_reset_queue(sch
);
717 qdisc_reset(q
->qdisc
);
718 qdisc_watchdog_cancel(&q
->watchdog
);
721 static void dist_free(struct disttable
*d
)
727 * Distribution data is a variable size payload containing
728 * signed 16 bit values.
731 static int get_dist_table(struct Qdisc
*sch
, const struct nlattr
*attr
)
733 struct netem_sched_data
*q
= qdisc_priv(sch
);
734 size_t n
= nla_len(attr
)/sizeof(__s16
);
735 const __s16
*data
= nla_data(attr
);
736 spinlock_t
*root_lock
;
740 if (n
> NETEM_DIST_MAX
)
743 d
= kvmalloc(sizeof(struct disttable
) + n
* sizeof(s16
), GFP_KERNEL
);
748 for (i
= 0; i
< n
; i
++)
749 d
->table
[i
] = data
[i
];
751 root_lock
= qdisc_root_sleeping_lock(sch
);
753 spin_lock_bh(root_lock
);
754 swap(q
->delay_dist
, d
);
755 spin_unlock_bh(root_lock
);
761 static void get_slot(struct netem_sched_data
*q
, const struct nlattr
*attr
)
763 const struct tc_netem_slot
*c
= nla_data(attr
);
766 if (q
->slot_config
.max_packets
== 0)
767 q
->slot_config
.max_packets
= INT_MAX
;
768 if (q
->slot_config
.max_bytes
== 0)
769 q
->slot_config
.max_bytes
= INT_MAX
;
770 q
->slot
.packets_left
= q
->slot_config
.max_packets
;
771 q
->slot
.bytes_left
= q
->slot_config
.max_bytes
;
772 if (q
->slot_config
.min_delay
| q
->slot_config
.max_delay
)
773 q
->slot
.slot_next
= ktime_get_ns();
775 q
->slot
.slot_next
= 0;
778 static void get_correlation(struct netem_sched_data
*q
, const struct nlattr
*attr
)
780 const struct tc_netem_corr
*c
= nla_data(attr
);
782 init_crandom(&q
->delay_cor
, c
->delay_corr
);
783 init_crandom(&q
->loss_cor
, c
->loss_corr
);
784 init_crandom(&q
->dup_cor
, c
->dup_corr
);
787 static void get_reorder(struct netem_sched_data
*q
, const struct nlattr
*attr
)
789 const struct tc_netem_reorder
*r
= nla_data(attr
);
791 q
->reorder
= r
->probability
;
792 init_crandom(&q
->reorder_cor
, r
->correlation
);
795 static void get_corrupt(struct netem_sched_data
*q
, const struct nlattr
*attr
)
797 const struct tc_netem_corrupt
*r
= nla_data(attr
);
799 q
->corrupt
= r
->probability
;
800 init_crandom(&q
->corrupt_cor
, r
->correlation
);
803 static void get_rate(struct netem_sched_data
*q
, const struct nlattr
*attr
)
805 const struct tc_netem_rate
*r
= nla_data(attr
);
808 q
->packet_overhead
= r
->packet_overhead
;
809 q
->cell_size
= r
->cell_size
;
810 q
->cell_overhead
= r
->cell_overhead
;
812 q
->cell_size_reciprocal
= reciprocal_value(q
->cell_size
);
814 q
->cell_size_reciprocal
= (struct reciprocal_value
) { 0 };
817 static int get_loss_clg(struct netem_sched_data
*q
, const struct nlattr
*attr
)
819 const struct nlattr
*la
;
822 nla_for_each_nested(la
, attr
, rem
) {
823 u16 type
= nla_type(la
);
826 case NETEM_LOSS_GI
: {
827 const struct tc_netem_gimodel
*gi
= nla_data(la
);
829 if (nla_len(la
) < sizeof(struct tc_netem_gimodel
)) {
830 pr_info("netem: incorrect gi model size\n");
834 q
->loss_model
= CLG_4_STATES
;
836 q
->clg
.state
= TX_IN_GAP_PERIOD
;
845 case NETEM_LOSS_GE
: {
846 const struct tc_netem_gemodel
*ge
= nla_data(la
);
848 if (nla_len(la
) < sizeof(struct tc_netem_gemodel
)) {
849 pr_info("netem: incorrect ge model size\n");
853 q
->loss_model
= CLG_GILB_ELL
;
854 q
->clg
.state
= GOOD_STATE
;
863 pr_info("netem: unknown loss type %u\n", type
);
871 static const struct nla_policy netem_policy
[TCA_NETEM_MAX
+ 1] = {
872 [TCA_NETEM_CORR
] = { .len
= sizeof(struct tc_netem_corr
) },
873 [TCA_NETEM_REORDER
] = { .len
= sizeof(struct tc_netem_reorder
) },
874 [TCA_NETEM_CORRUPT
] = { .len
= sizeof(struct tc_netem_corrupt
) },
875 [TCA_NETEM_RATE
] = { .len
= sizeof(struct tc_netem_rate
) },
876 [TCA_NETEM_LOSS
] = { .type
= NLA_NESTED
},
877 [TCA_NETEM_ECN
] = { .type
= NLA_U32
},
878 [TCA_NETEM_RATE64
] = { .type
= NLA_U64
},
879 [TCA_NETEM_LATENCY64
] = { .type
= NLA_S64
},
880 [TCA_NETEM_JITTER64
] = { .type
= NLA_S64
},
881 [TCA_NETEM_SLOT
] = { .len
= sizeof(struct tc_netem_slot
) },
884 static int parse_attr(struct nlattr
*tb
[], int maxtype
, struct nlattr
*nla
,
885 const struct nla_policy
*policy
, int len
)
887 int nested_len
= nla_len(nla
) - NLA_ALIGN(len
);
889 if (nested_len
< 0) {
890 pr_info("netem: invalid attributes len %d\n", nested_len
);
894 if (nested_len
>= nla_attr_size(0))
895 return nla_parse(tb
, maxtype
, nla_data(nla
) + NLA_ALIGN(len
),
896 nested_len
, policy
, NULL
);
898 memset(tb
, 0, sizeof(struct nlattr
*) * (maxtype
+ 1));
902 /* Parse netlink message to set options */
903 static int netem_change(struct Qdisc
*sch
, struct nlattr
*opt
)
905 struct netem_sched_data
*q
= qdisc_priv(sch
);
906 struct nlattr
*tb
[TCA_NETEM_MAX
+ 1];
907 struct tc_netem_qopt
*qopt
;
908 struct clgstate old_clg
;
909 int old_loss_model
= CLG_RANDOM
;
915 qopt
= nla_data(opt
);
916 ret
= parse_attr(tb
, TCA_NETEM_MAX
, opt
, netem_policy
, sizeof(*qopt
));
920 /* backup q->clg and q->loss_model */
922 old_loss_model
= q
->loss_model
;
924 if (tb
[TCA_NETEM_LOSS
]) {
925 ret
= get_loss_clg(q
, tb
[TCA_NETEM_LOSS
]);
927 q
->loss_model
= old_loss_model
;
931 q
->loss_model
= CLG_RANDOM
;
934 if (tb
[TCA_NETEM_DELAY_DIST
]) {
935 ret
= get_dist_table(sch
, tb
[TCA_NETEM_DELAY_DIST
]);
937 /* recover clg and loss_model, in case of
938 * q->clg and q->loss_model were modified
942 q
->loss_model
= old_loss_model
;
947 sch
->limit
= qopt
->limit
;
949 q
->latency
= PSCHED_TICKS2NS(qopt
->latency
);
950 q
->jitter
= PSCHED_TICKS2NS(qopt
->jitter
);
951 q
->limit
= qopt
->limit
;
954 q
->loss
= qopt
->loss
;
955 q
->duplicate
= qopt
->duplicate
;
957 /* for compatibility with earlier versions.
958 * if gap is set, need to assume 100% probability
963 if (tb
[TCA_NETEM_CORR
])
964 get_correlation(q
, tb
[TCA_NETEM_CORR
]);
966 if (tb
[TCA_NETEM_REORDER
])
967 get_reorder(q
, tb
[TCA_NETEM_REORDER
]);
969 if (tb
[TCA_NETEM_CORRUPT
])
970 get_corrupt(q
, tb
[TCA_NETEM_CORRUPT
]);
972 if (tb
[TCA_NETEM_RATE
])
973 get_rate(q
, tb
[TCA_NETEM_RATE
]);
975 if (tb
[TCA_NETEM_RATE64
])
976 q
->rate
= max_t(u64
, q
->rate
,
977 nla_get_u64(tb
[TCA_NETEM_RATE64
]));
979 if (tb
[TCA_NETEM_LATENCY64
])
980 q
->latency
= nla_get_s64(tb
[TCA_NETEM_LATENCY64
]);
982 if (tb
[TCA_NETEM_JITTER64
])
983 q
->jitter
= nla_get_s64(tb
[TCA_NETEM_JITTER64
]);
985 if (tb
[TCA_NETEM_ECN
])
986 q
->ecn
= nla_get_u32(tb
[TCA_NETEM_ECN
]);
988 if (tb
[TCA_NETEM_SLOT
])
989 get_slot(q
, tb
[TCA_NETEM_SLOT
]);
994 static int netem_init(struct Qdisc
*sch
, struct nlattr
*opt
)
996 struct netem_sched_data
*q
= qdisc_priv(sch
);
999 qdisc_watchdog_init(&q
->watchdog
, sch
);
1004 q
->loss_model
= CLG_RANDOM
;
1005 ret
= netem_change(sch
, opt
);
1007 pr_info("netem: change failed\n");
1011 static void netem_destroy(struct Qdisc
*sch
)
1013 struct netem_sched_data
*q
= qdisc_priv(sch
);
1015 qdisc_watchdog_cancel(&q
->watchdog
);
1017 qdisc_destroy(q
->qdisc
);
1018 dist_free(q
->delay_dist
);
1021 static int dump_loss_model(const struct netem_sched_data
*q
,
1022 struct sk_buff
*skb
)
1024 struct nlattr
*nest
;
1026 nest
= nla_nest_start(skb
, TCA_NETEM_LOSS
);
1028 goto nla_put_failure
;
1030 switch (q
->loss_model
) {
1032 /* legacy loss model */
1033 nla_nest_cancel(skb
, nest
);
1034 return 0; /* no data */
1036 case CLG_4_STATES
: {
1037 struct tc_netem_gimodel gi
= {
1045 if (nla_put(skb
, NETEM_LOSS_GI
, sizeof(gi
), &gi
))
1046 goto nla_put_failure
;
1049 case CLG_GILB_ELL
: {
1050 struct tc_netem_gemodel ge
= {
1057 if (nla_put(skb
, NETEM_LOSS_GE
, sizeof(ge
), &ge
))
1058 goto nla_put_failure
;
1063 nla_nest_end(skb
, nest
);
1067 nla_nest_cancel(skb
, nest
);
1071 static int netem_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
1073 const struct netem_sched_data
*q
= qdisc_priv(sch
);
1074 struct nlattr
*nla
= (struct nlattr
*) skb_tail_pointer(skb
);
1075 struct tc_netem_qopt qopt
;
1076 struct tc_netem_corr cor
;
1077 struct tc_netem_reorder reorder
;
1078 struct tc_netem_corrupt corrupt
;
1079 struct tc_netem_rate rate
;
1080 struct tc_netem_slot slot
;
1082 qopt
.latency
= min_t(psched_tdiff_t
, PSCHED_NS2TICKS(q
->latency
),
1084 qopt
.jitter
= min_t(psched_tdiff_t
, PSCHED_NS2TICKS(q
->jitter
),
1086 qopt
.limit
= q
->limit
;
1087 qopt
.loss
= q
->loss
;
1089 qopt
.duplicate
= q
->duplicate
;
1090 if (nla_put(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
))
1091 goto nla_put_failure
;
1093 if (nla_put(skb
, TCA_NETEM_LATENCY64
, sizeof(q
->latency
), &q
->latency
))
1094 goto nla_put_failure
;
1096 if (nla_put(skb
, TCA_NETEM_JITTER64
, sizeof(q
->jitter
), &q
->jitter
))
1097 goto nla_put_failure
;
1099 cor
.delay_corr
= q
->delay_cor
.rho
;
1100 cor
.loss_corr
= q
->loss_cor
.rho
;
1101 cor
.dup_corr
= q
->dup_cor
.rho
;
1102 if (nla_put(skb
, TCA_NETEM_CORR
, sizeof(cor
), &cor
))
1103 goto nla_put_failure
;
1105 reorder
.probability
= q
->reorder
;
1106 reorder
.correlation
= q
->reorder_cor
.rho
;
1107 if (nla_put(skb
, TCA_NETEM_REORDER
, sizeof(reorder
), &reorder
))
1108 goto nla_put_failure
;
1110 corrupt
.probability
= q
->corrupt
;
1111 corrupt
.correlation
= q
->corrupt_cor
.rho
;
1112 if (nla_put(skb
, TCA_NETEM_CORRUPT
, sizeof(corrupt
), &corrupt
))
1113 goto nla_put_failure
;
1115 if (q
->rate
>= (1ULL << 32)) {
1116 if (nla_put_u64_64bit(skb
, TCA_NETEM_RATE64
, q
->rate
,
1118 goto nla_put_failure
;
1121 rate
.rate
= q
->rate
;
1123 rate
.packet_overhead
= q
->packet_overhead
;
1124 rate
.cell_size
= q
->cell_size
;
1125 rate
.cell_overhead
= q
->cell_overhead
;
1126 if (nla_put(skb
, TCA_NETEM_RATE
, sizeof(rate
), &rate
))
1127 goto nla_put_failure
;
1129 if (q
->ecn
&& nla_put_u32(skb
, TCA_NETEM_ECN
, q
->ecn
))
1130 goto nla_put_failure
;
1132 if (dump_loss_model(q
, skb
) != 0)
1133 goto nla_put_failure
;
1135 if (q
->slot_config
.min_delay
| q
->slot_config
.max_delay
) {
1136 slot
= q
->slot_config
;
1137 if (slot
.max_packets
== INT_MAX
)
1138 slot
.max_packets
= 0;
1139 if (slot
.max_bytes
== INT_MAX
)
1141 if (nla_put(skb
, TCA_NETEM_SLOT
, sizeof(slot
), &slot
))
1142 goto nla_put_failure
;
1145 return nla_nest_end(skb
, nla
);
1148 nlmsg_trim(skb
, nla
);
1152 static int netem_dump_class(struct Qdisc
*sch
, unsigned long cl
,
1153 struct sk_buff
*skb
, struct tcmsg
*tcm
)
1155 struct netem_sched_data
*q
= qdisc_priv(sch
);
1157 if (cl
!= 1 || !q
->qdisc
) /* only one class */
1160 tcm
->tcm_handle
|= TC_H_MIN(1);
1161 tcm
->tcm_info
= q
->qdisc
->handle
;
1166 static int netem_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1169 struct netem_sched_data
*q
= qdisc_priv(sch
);
1171 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
1175 static struct Qdisc
*netem_leaf(struct Qdisc
*sch
, unsigned long arg
)
1177 struct netem_sched_data
*q
= qdisc_priv(sch
);
1181 static unsigned long netem_find(struct Qdisc
*sch
, u32 classid
)
1186 static void netem_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
1188 if (!walker
->stop
) {
1189 if (walker
->count
>= walker
->skip
)
1190 if (walker
->fn(sch
, 1, walker
) < 0) {
1198 static const struct Qdisc_class_ops netem_class_ops
= {
1199 .graft
= netem_graft
,
1203 .dump
= netem_dump_class
,
1206 static struct Qdisc_ops netem_qdisc_ops __read_mostly
= {
1208 .cl_ops
= &netem_class_ops
,
1209 .priv_size
= sizeof(struct netem_sched_data
),
1210 .enqueue
= netem_enqueue
,
1211 .dequeue
= netem_dequeue
,
1212 .peek
= qdisc_peek_dequeued
,
1214 .reset
= netem_reset
,
1215 .destroy
= netem_destroy
,
1216 .change
= netem_change
,
1218 .owner
= THIS_MODULE
,
1222 static int __init
netem_module_init(void)
1224 pr_info("netem: version " VERSION
"\n");
1225 return register_qdisc(&netem_qdisc_ops
);
1227 static void __exit
netem_module_exit(void)
1229 unregister_qdisc(&netem_qdisc_ops
);
1231 module_init(netem_module_init
)
1232 module_exit(netem_module_exit
)
1233 MODULE_LICENSE("GPL");