]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/sched/sch_netem.c
drm/i915: Save the old CDCLK atomic state
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
b7f080cf 16#include <linux/mm.h>
1da177e4 17#include <linux/module.h>
5a0e3ad6 18#include <linux/slab.h>
1da177e4
LT
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
1da177e4 22#include <linux/skbuff.h>
78776d3f 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/rtnetlink.h>
90b41a1c 25#include <linux/reciprocal_div.h>
aec0a40a 26#include <linux/rbtree.h>
1da177e4 27
dc5fc579 28#include <net/netlink.h>
1da177e4 29#include <net/pkt_sched.h>
e4ae004b 30#include <net/inet_ecn.h>
1da177e4 31
250a65f7 32#define VERSION "1.3"
eb229c4c 33
1da177e4
LT
34/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
661b7972 55
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
1da177e4
LT
69*/
70
71struct netem_sched_data {
aec0a40a
ED
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
50612537
ED
74
75 /* optional qdisc for classful handling (NULL at netem init) */
1da177e4 76 struct Qdisc *qdisc;
50612537 77
59cb5c67 78 struct qdisc_watchdog watchdog;
1da177e4 79
112f9cb6
DT
80 s64 latency;
81 s64 jitter;
b407621c 82
1da177e4 83 u32 loss;
e4ae004b 84 u32 ecn;
1da177e4
LT
85 u32 limit;
86 u32 counter;
87 u32 gap;
1da177e4 88 u32 duplicate;
0dca51d3 89 u32 reorder;
c865e5d9 90 u32 corrupt;
6a031f67 91 u64 rate;
90b41a1c
HPP
92 s32 packet_overhead;
93 u32 cell_size;
809fa972 94 struct reciprocal_value cell_size_reciprocal;
90b41a1c 95 s32 cell_overhead;
1da177e4
LT
96
97 struct crndstate {
b407621c
SH
98 u32 last;
99 u32 rho;
c865e5d9 100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
661b7972 106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
a6e2fe17
YY
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
c045a734
YY
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
661b7972 125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
836af83b
DT
138 struct tc_netem_slot slot_config;
139 struct slotstate {
140 u64 slot_next;
141 s32 packets_left;
142 s32 bytes_left;
143 } slot;
144
1da177e4
LT
145};
146
50612537
ED
147/* Time stamp put into socket buffer control block
148 * Only valid when skbs are in our internal t(ime)fifo queue.
56b17425
ED
149 *
150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
151 * and skb->next & skb->prev are scratch space for a qdisc,
152 * we save skb->tstamp value in skb->cb[] before destroying it.
50612537 153 */
1da177e4 154struct netem_skb_cb {
112f9cb6 155 u64 time_to_send;
1da177e4
LT
156};
157
5f86173b
JK
158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
aec0a40a 160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
16bda13d 161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175f9c1b 162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
5f86173b
JK
163}
164
1da177e4
LT
165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
63862b5b 171 state->last = prandom_u32();
1da177e4
LT
172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
b407621c 178static u32 get_crandom(struct crndstate *state)
1da177e4
LT
179{
180 u64 value, rho;
181 unsigned long answer;
182
bb2f8cc0 183 if (state->rho == 0) /* no correlation */
63862b5b 184 return prandom_u32();
1da177e4 185
63862b5b 186 value = prandom_u32();
1da177e4
LT
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
661b7972 193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
63862b5b 200 u32 rnd = prandom_u32();
661b7972 201
202 /*
25985edc 203 * Makes a comparison between rnd and the transition
661b7972 204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
a6e2fe17
YY
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
661b7972 211 */
212 switch (clg->state) {
a6e2fe17 213 case TX_IN_GAP_PERIOD:
661b7972 214 if (rnd < clg->a4) {
a6e2fe17 215 clg->state = LOST_IN_BURST_PERIOD;
661b7972 216 return true;
ab6c27be 217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
a6e2fe17 218 clg->state = LOST_IN_GAP_PERIOD;
661b7972 219 return true;
a6e2fe17
YY
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
661b7972 223
224 break;
a6e2fe17 225 case TX_IN_BURST_PERIOD:
661b7972 226 if (rnd < clg->a5) {
a6e2fe17 227 clg->state = LOST_IN_GAP_PERIOD;
661b7972 228 return true;
a6e2fe17
YY
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
661b7972 232
233 break;
a6e2fe17 234 case LOST_IN_GAP_PERIOD:
661b7972 235 if (rnd < clg->a3)
a6e2fe17 236 clg->state = TX_IN_BURST_PERIOD;
661b7972 237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
a6e2fe17 238 clg->state = TX_IN_GAP_PERIOD;
661b7972 239 } else if (clg->a2 + clg->a3 < rnd) {
a6e2fe17 240 clg->state = LOST_IN_GAP_PERIOD;
661b7972 241 return true;
242 }
243 break;
a6e2fe17
YY
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
661b7972 246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
25985edc 256 * Makes a comparison between random number and the transition
661b7972 257 * probabilities outgoing from the current state, then decides the
25985edc 258 * next state. A second random number is extracted and the comparison
661b7972 259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
c045a734 267 case GOOD_STATE:
63862b5b 268 if (prandom_u32() < clg->a1)
c045a734 269 clg->state = BAD_STATE;
63862b5b 270 if (prandom_u32() < clg->a4)
661b7972 271 return true;
7c2781fa 272 break;
c045a734 273 case BAD_STATE:
63862b5b 274 if (prandom_u32() < clg->a2)
c045a734 275 clg->state = GOOD_STATE;
63862b5b 276 if (prandom_u32() > clg->a3)
661b7972 277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
1da177e4
LT
311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
9b0ed891 315static s64 tabledist(s64 mu, s32 sigma,
112f9cb6 316 struct crndstate *state,
9b0ed891 317 const struct disttable *dist)
1da177e4 318{
112f9cb6 319 s64 x;
b407621c
SH
320 long t;
321 u32 rnd;
1da177e4
LT
322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
10297b99 329 if (dist == NULL)
9b0ed891 330 return (rnd % (2 * sigma)) - sigma + mu;
1da177e4
LT
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
bce552fd 342static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
7bc0f28c 343{
90b41a1c
HPP
344 len += q->packet_overhead;
345
346 if (q->cell_size) {
347 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
348
349 if (len > cells * q->cell_size) /* extra cell needed for remainder */
350 cells++;
351 len = cells * (q->cell_size + q->cell_overhead);
352 }
bce552fd
SH
353
354 return div64_u64(len * NSEC_PER_SEC, q->rate);
7bc0f28c
HPP
355}
356
ff704050 357static void tfifo_reset(struct Qdisc *sch)
358{
359 struct netem_sched_data *q = qdisc_priv(sch);
3aa605f2 360 struct rb_node *p = rb_first(&q->t_root);
ff704050 361
3aa605f2 362 while (p) {
18a4c0ea 363 struct sk_buff *skb = rb_to_skb(p);
ff704050 364
3aa605f2
ED
365 p = rb_next(p);
366 rb_erase(&skb->rbnode, &q->t_root);
2f08a9a1 367 rtnl_kfree_skbs(skb, skb);
ff704050 368 }
369}
370
960fb66e 371static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
50612537 372{
aec0a40a 373 struct netem_sched_data *q = qdisc_priv(sch);
112f9cb6 374 u64 tnext = netem_skb_cb(nskb)->time_to_send;
aec0a40a 375 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
50612537 376
aec0a40a
ED
377 while (*p) {
378 struct sk_buff *skb;
50612537 379
aec0a40a 380 parent = *p;
18a4c0ea 381 skb = rb_to_skb(parent);
960fb66e 382 if (tnext >= netem_skb_cb(skb)->time_to_send)
aec0a40a
ED
383 p = &parent->rb_right;
384 else
385 p = &parent->rb_left;
50612537 386 }
56b17425
ED
387 rb_link_node(&nskb->rbnode, parent, p);
388 rb_insert_color(&nskb->rbnode, &q->t_root);
aec0a40a 389 sch->q.qlen++;
50612537
ED
390}
391
6071bd1a
NH
392/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
393 * when we statistically choose to corrupt one, we instead segment it, returning
394 * the first packet to be corrupted, and re-enqueue the remaining frames
395 */
520ac30f
ED
396static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
397 struct sk_buff **to_free)
6071bd1a
NH
398{
399 struct sk_buff *segs;
400 netdev_features_t features = netif_skb_features(skb);
401
402 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
403
404 if (IS_ERR_OR_NULL(segs)) {
520ac30f 405 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
406 return NULL;
407 }
408 consume_skb(skb);
409 return segs;
410}
411
48da34b7
FW
412static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
413{
414 skb->next = qh->head;
415
416 if (!qh->head)
417 qh->tail = skb;
418 qh->head = skb;
419 qh->qlen++;
420}
421
0afb51e7
SH
422/*
423 * Insert one skb into qdisc.
424 * Note: parent depends on return value to account for queue length.
425 * NET_XMIT_DROP: queue length didn't change.
426 * NET_XMIT_SUCCESS: one skb was queued.
427 */
520ac30f
ED
428static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
429 struct sk_buff **to_free)
1da177e4
LT
430{
431 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
432 /* We don't fill cb now as skb_unshare() may invalidate it */
433 struct netem_skb_cb *cb;
0afb51e7 434 struct sk_buff *skb2;
6071bd1a
NH
435 struct sk_buff *segs = NULL;
436 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
437 int nb = 0;
0afb51e7 438 int count = 1;
6071bd1a 439 int rc = NET_XMIT_SUCCESS;
1da177e4 440
0afb51e7
SH
441 /* Random duplication */
442 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
443 ++count;
444
661b7972 445 /* Drop packet? */
e4ae004b
ED
446 if (loss_event(q)) {
447 if (q->ecn && INET_ECN_set_ce(skb))
25331d6c 448 qdisc_qstats_drop(sch); /* mark packet */
e4ae004b
ED
449 else
450 --count;
451 }
0afb51e7 452 if (count == 0) {
25331d6c 453 qdisc_qstats_drop(sch);
520ac30f 454 __qdisc_drop(skb, to_free);
c27f339a 455 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
456 }
457
5a308f40
ED
458 /* If a delay is expected, orphan the skb. (orphaning usually takes
459 * place at TX completion time, so _before_ the link transit delay)
5a308f40 460 */
5080f39e 461 if (q->latency || q->jitter || q->rate)
f2f872f9 462 skb_orphan_partial(skb);
4e8a5201 463
0afb51e7
SH
464 /*
465 * If we need to duplicate packet, then re-insert at top of the
466 * qdisc tree, since parent queuer expects that only one
467 * skb will be queued.
468 */
469 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
7698b4fc 470 struct Qdisc *rootq = qdisc_root(sch);
0afb51e7 471 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
0afb51e7 472
b396cca6 473 q->duplicate = 0;
520ac30f 474 rootq->enqueue(skb2, rootq, to_free);
0afb51e7 475 q->duplicate = dupsave;
1da177e4
LT
476 }
477
c865e5d9
SH
478 /*
479 * Randomized packet corruption.
480 * Make copy if needed since we are modifying
481 * If packet is going to be hardware checksummed, then
482 * do it now in software before we mangle it.
483 */
484 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
6071bd1a 485 if (skb_is_gso(skb)) {
520ac30f 486 segs = netem_segment(skb, sch, to_free);
6071bd1a
NH
487 if (!segs)
488 return NET_XMIT_DROP;
489 } else {
490 segs = skb;
491 }
492
493 skb = segs;
494 segs = segs->next;
495
8a6e9c67
ED
496 skb = skb_unshare(skb, GFP_ATOMIC);
497 if (unlikely(!skb)) {
498 qdisc_qstats_drop(sch);
499 goto finish_segs;
500 }
501 if (skb->ip_summed == CHECKSUM_PARTIAL &&
502 skb_checksum_help(skb)) {
503 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
504 goto finish_segs;
505 }
c865e5d9 506
63862b5b
AH
507 skb->data[prandom_u32() % skb_headlen(skb)] ^=
508 1<<(prandom_u32() % 8);
c865e5d9
SH
509 }
510
97d0678f 511 if (unlikely(sch->q.qlen >= sch->limit))
1c81b043 512 return qdisc_drop_all(skb, sch, to_free);
960fb66e 513
25331d6c 514 qdisc_qstats_backlog_inc(sch, skb);
960fb66e 515
5f86173b 516 cb = netem_skb_cb(skb);
cc7ec456 517 if (q->gap == 0 || /* not doing reordering */
a42b4799 518 q->counter < q->gap - 1 || /* inside last reordering gap */
f64f9e71 519 q->reorder < get_crandom(&q->reorder_cor)) {
112f9cb6
DT
520 u64 now;
521 s64 delay;
07aaa115
SH
522
523 delay = tabledist(q->latency, q->jitter,
524 &q->delay_cor, q->delay_dist);
525
112f9cb6 526 now = ktime_get_ns();
7bc0f28c
HPP
527
528 if (q->rate) {
5080f39e
NU
529 struct netem_skb_cb *last = NULL;
530
531 if (sch->q.tail)
532 last = netem_skb_cb(sch->q.tail);
533 if (q->t_root.rb_node) {
534 struct sk_buff *t_skb;
535 struct netem_skb_cb *t_last;
536
18a4c0ea 537 t_skb = skb_rb_last(&q->t_root);
5080f39e
NU
538 t_last = netem_skb_cb(t_skb);
539 if (!last ||
540 t_last->time_to_send > last->time_to_send) {
541 last = t_last;
542 }
543 }
7bc0f28c 544
aec0a40a 545 if (last) {
7bc0f28c 546 /*
a13d3104
JN
547 * Last packet in queue is reference point (now),
548 * calculate this time bonus and subtract
7bc0f28c
HPP
549 * from delay.
550 */
5080f39e 551 delay -= last->time_to_send - now;
112f9cb6 552 delay = max_t(s64, 0, delay);
5080f39e 553 now = last->time_to_send;
7bc0f28c 554 }
a13d3104 555
bce552fd 556 delay += packet_time_ns(qdisc_pkt_len(skb), q);
7bc0f28c
HPP
557 }
558
7c59e25f 559 cb->time_to_send = now + delay;
1da177e4 560 ++q->counter;
960fb66e 561 tfifo_enqueue(skb, sch);
1da177e4 562 } else {
10297b99 563 /*
0dca51d3
SH
564 * Do re-ordering by putting one out of N packets at the front
565 * of the queue.
566 */
112f9cb6 567 cb->time_to_send = ktime_get_ns();
0dca51d3 568 q->counter = 0;
8ba25dad 569
48da34b7 570 netem_enqueue_skb_head(&sch->q, skb);
eb101924 571 sch->qstats.requeues++;
378a2f09 572 }
1da177e4 573
6071bd1a
NH
574finish_segs:
575 if (segs) {
576 while (segs) {
577 skb2 = segs->next;
578 segs->next = NULL;
579 qdisc_skb_cb(segs)->pkt_len = segs->len;
580 last_len = segs->len;
520ac30f 581 rc = qdisc_enqueue(segs, sch, to_free);
6071bd1a
NH
582 if (rc != NET_XMIT_SUCCESS) {
583 if (net_xmit_drop_count(rc))
584 qdisc_qstats_drop(sch);
585 } else {
586 nb++;
587 len += last_len;
588 }
589 segs = skb2;
590 }
591 sch->q.qlen += nb;
592 if (nb > 1)
593 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
594 }
10f6dfcf 595 return NET_XMIT_SUCCESS;
1da177e4
LT
596}
597
836af83b
DT
598/* Delay the next round with a new future slot with a
599 * correct number of bytes and packets.
600 */
601
602static void get_slot_next(struct netem_sched_data *q, u64 now)
603{
604 q->slot.slot_next = now + q->slot_config.min_delay +
605 (prandom_u32() *
606 (q->slot_config.max_delay -
607 q->slot_config.min_delay) >> 32);
608 q->slot.packets_left = q->slot_config.max_packets;
609 q->slot.bytes_left = q->slot_config.max_bytes;
610}
611
1da177e4
LT
612static struct sk_buff *netem_dequeue(struct Qdisc *sch)
613{
614 struct netem_sched_data *q = qdisc_priv(sch);
615 struct sk_buff *skb;
aec0a40a 616 struct rb_node *p;
1da177e4 617
50612537 618tfifo_dequeue:
ed760cb8 619 skb = __qdisc_dequeue_head(&sch->q);
771018e7 620 if (skb) {
25331d6c 621 qdisc_qstats_backlog_dec(sch, skb);
0ad2a836 622deliver:
aec0a40a
ED
623 qdisc_bstats_update(sch, skb);
624 return skb;
625 }
626 p = rb_first(&q->t_root);
627 if (p) {
112f9cb6 628 u64 time_to_send;
836af83b 629 u64 now = ktime_get_ns();
36b7bfe0 630
18a4c0ea 631 skb = rb_to_skb(p);
0f9f32ac
SH
632
633 /* if more time remaining? */
36b7bfe0 634 time_to_send = netem_skb_cb(skb)->time_to_send;
836af83b
DT
635 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
636 get_slot_next(q, now);
aec0a40a 637
836af83b
DT
638 if (time_to_send <= now && q->slot.slot_next <= now) {
639 rb_erase(p, &q->t_root);
aec0a40a 640 sch->q.qlen--;
0ad2a836 641 qdisc_qstats_backlog_dec(sch, skb);
aec0a40a
ED
642 skb->next = NULL;
643 skb->prev = NULL;
bffa72cf
ED
644 /* skb->dev shares skb->rbnode area,
645 * we need to restore its value.
646 */
647 skb->dev = qdisc_dev(sch);
03c05f0d 648
8caf1539
JP
649#ifdef CONFIG_NET_CLS_ACT
650 /*
651 * If it's at ingress let's pretend the delay is
652 * from the network (tstamp will be updated).
653 */
bc31c905 654 if (skb->tc_redirected && skb->tc_from_ingress)
2456e855 655 skb->tstamp = 0;
8caf1539 656#endif
10f6dfcf 657
836af83b
DT
658 if (q->slot.slot_next) {
659 q->slot.packets_left--;
660 q->slot.bytes_left -= qdisc_pkt_len(skb);
661 if (q->slot.packets_left <= 0 ||
662 q->slot.bytes_left <= 0)
663 get_slot_next(q, now);
664 }
665
50612537 666 if (q->qdisc) {
21de12ee 667 unsigned int pkt_len = qdisc_pkt_len(skb);
520ac30f
ED
668 struct sk_buff *to_free = NULL;
669 int err;
50612537 670
520ac30f
ED
671 err = qdisc_enqueue(skb, q->qdisc, &to_free);
672 kfree_skb_list(to_free);
21de12ee
ED
673 if (err != NET_XMIT_SUCCESS &&
674 net_xmit_drop_count(err)) {
675 qdisc_qstats_drop(sch);
676 qdisc_tree_reduce_backlog(sch, 1,
677 pkt_len);
50612537
ED
678 }
679 goto tfifo_dequeue;
680 }
aec0a40a 681 goto deliver;
07aaa115 682 }
11274e5a 683
50612537
ED
684 if (q->qdisc) {
685 skb = q->qdisc->ops->dequeue(q->qdisc);
686 if (skb)
687 goto deliver;
688 }
836af83b
DT
689
690 qdisc_watchdog_schedule_ns(&q->watchdog,
691 max(time_to_send,
692 q->slot.slot_next));
0f9f32ac
SH
693 }
694
50612537
ED
695 if (q->qdisc) {
696 skb = q->qdisc->ops->dequeue(q->qdisc);
697 if (skb)
698 goto deliver;
699 }
0f9f32ac 700 return NULL;
1da177e4
LT
701}
702
1da177e4
LT
703static void netem_reset(struct Qdisc *sch)
704{
705 struct netem_sched_data *q = qdisc_priv(sch);
706
50612537 707 qdisc_reset_queue(sch);
ff704050 708 tfifo_reset(sch);
50612537
ED
709 if (q->qdisc)
710 qdisc_reset(q->qdisc);
59cb5c67 711 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
712}
713
6373a9a2 714static void dist_free(struct disttable *d)
715{
4cb28970 716 kvfree(d);
6373a9a2 717}
718
1da177e4
LT
719/*
720 * Distribution data is a variable size payload containing
721 * signed 16 bit values.
722 */
836af83b 723
1e90474c 724static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
725{
726 struct netem_sched_data *q = qdisc_priv(sch);
6373a9a2 727 size_t n = nla_len(attr)/sizeof(__s16);
1e90474c 728 const __s16 *data = nla_data(attr);
7698b4fc 729 spinlock_t *root_lock;
1da177e4
LT
730 struct disttable *d;
731 int i;
732
df173bda 733 if (n > NETEM_DIST_MAX)
1da177e4
LT
734 return -EINVAL;
735
752ade68 736 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
1da177e4
LT
737 if (!d)
738 return -ENOMEM;
739
740 d->size = n;
741 for (i = 0; i < n; i++)
742 d->table[i] = data[i];
10297b99 743
102396ae 744 root_lock = qdisc_root_sleeping_lock(sch);
7698b4fc
DM
745
746 spin_lock_bh(root_lock);
bb52c7ac 747 swap(q->delay_dist, d);
7698b4fc 748 spin_unlock_bh(root_lock);
bb52c7ac
ED
749
750 dist_free(d);
1da177e4
LT
751 return 0;
752}
753
836af83b
DT
754static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
755{
756 const struct tc_netem_slot *c = nla_data(attr);
757
758 q->slot_config = *c;
759 if (q->slot_config.max_packets == 0)
760 q->slot_config.max_packets = INT_MAX;
761 if (q->slot_config.max_bytes == 0)
762 q->slot_config.max_bytes = INT_MAX;
763 q->slot.packets_left = q->slot_config.max_packets;
764 q->slot.bytes_left = q->slot_config.max_bytes;
765 if (q->slot_config.min_delay | q->slot_config.max_delay)
766 q->slot.slot_next = ktime_get_ns();
767 else
768 q->slot.slot_next = 0;
769}
770
49545a77 771static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
1da177e4 772{
1e90474c 773 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 774
1da177e4
LT
775 init_crandom(&q->delay_cor, c->delay_corr);
776 init_crandom(&q->loss_cor, c->loss_corr);
777 init_crandom(&q->dup_cor, c->dup_corr);
1da177e4
LT
778}
779
49545a77 780static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
0dca51d3 781{
1e90474c 782 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 783
0dca51d3
SH
784 q->reorder = r->probability;
785 init_crandom(&q->reorder_cor, r->correlation);
0dca51d3
SH
786}
787
49545a77 788static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
c865e5d9 789{
1e90474c 790 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 791
c865e5d9
SH
792 q->corrupt = r->probability;
793 init_crandom(&q->corrupt_cor, r->correlation);
c865e5d9
SH
794}
795
49545a77 796static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
7bc0f28c 797{
7bc0f28c
HPP
798 const struct tc_netem_rate *r = nla_data(attr);
799
800 q->rate = r->rate;
90b41a1c
HPP
801 q->packet_overhead = r->packet_overhead;
802 q->cell_size = r->cell_size;
809fa972 803 q->cell_overhead = r->cell_overhead;
90b41a1c
HPP
804 if (q->cell_size)
805 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
809fa972
HFS
806 else
807 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
7bc0f28c
HPP
808}
809
49545a77 810static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
661b7972 811{
661b7972 812 const struct nlattr *la;
813 int rem;
814
815 nla_for_each_nested(la, attr, rem) {
816 u16 type = nla_type(la);
817
833fa743 818 switch (type) {
661b7972 819 case NETEM_LOSS_GI: {
820 const struct tc_netem_gimodel *gi = nla_data(la);
821
2494654d 822 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
661b7972 823 pr_info("netem: incorrect gi model size\n");
824 return -EINVAL;
825 }
826
827 q->loss_model = CLG_4_STATES;
828
3fbac2a8 829 q->clg.state = TX_IN_GAP_PERIOD;
661b7972 830 q->clg.a1 = gi->p13;
831 q->clg.a2 = gi->p31;
832 q->clg.a3 = gi->p32;
833 q->clg.a4 = gi->p14;
834 q->clg.a5 = gi->p23;
835 break;
836 }
837
838 case NETEM_LOSS_GE: {
839 const struct tc_netem_gemodel *ge = nla_data(la);
840
2494654d 841 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
842 pr_info("netem: incorrect ge model size\n");
661b7972 843 return -EINVAL;
844 }
845
846 q->loss_model = CLG_GILB_ELL;
3fbac2a8 847 q->clg.state = GOOD_STATE;
661b7972 848 q->clg.a1 = ge->p;
849 q->clg.a2 = ge->r;
850 q->clg.a3 = ge->h;
851 q->clg.a4 = ge->k1;
852 break;
853 }
854
855 default:
856 pr_info("netem: unknown loss type %u\n", type);
857 return -EINVAL;
858 }
859 }
860
861 return 0;
862}
863
27a3421e
PM
864static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
865 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
866 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
867 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
7bc0f28c 868 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
661b7972 869 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
e4ae004b 870 [TCA_NETEM_ECN] = { .type = NLA_U32 },
6a031f67 871 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
99803171
DT
872 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
873 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
836af83b 874 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
27a3421e
PM
875};
876
2c10b32b
TG
877static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
878 const struct nla_policy *policy, int len)
879{
880 int nested_len = nla_len(nla) - NLA_ALIGN(len);
881
661b7972 882 if (nested_len < 0) {
883 pr_info("netem: invalid attributes len %d\n", nested_len);
2c10b32b 884 return -EINVAL;
661b7972 885 }
886
2c10b32b
TG
887 if (nested_len >= nla_attr_size(0))
888 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
fceb6435 889 nested_len, policy, NULL);
661b7972 890
2c10b32b
TG
891 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
892 return 0;
893}
894
c865e5d9 895/* Parse netlink message to set options */
1e90474c 896static int netem_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
897{
898 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 899 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4 900 struct tc_netem_qopt *qopt;
54a4b05c
YY
901 struct clgstate old_clg;
902 int old_loss_model = CLG_RANDOM;
1da177e4 903 int ret;
10297b99 904
b03f4672 905 if (opt == NULL)
1da177e4
LT
906 return -EINVAL;
907
2c10b32b
TG
908 qopt = nla_data(opt);
909 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
b03f4672
PM
910 if (ret < 0)
911 return ret;
912
54a4b05c
YY
913 /* backup q->clg and q->loss_model */
914 old_clg = q->clg;
915 old_loss_model = q->loss_model;
916
917 if (tb[TCA_NETEM_LOSS]) {
49545a77 918 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
54a4b05c
YY
919 if (ret) {
920 q->loss_model = old_loss_model;
921 return ret;
922 }
923 } else {
924 q->loss_model = CLG_RANDOM;
925 }
926
927 if (tb[TCA_NETEM_DELAY_DIST]) {
928 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
929 if (ret) {
930 /* recover clg and loss_model, in case of
931 * q->clg and q->loss_model were modified
932 * in get_loss_clg()
933 */
934 q->clg = old_clg;
935 q->loss_model = old_loss_model;
936 return ret;
937 }
938 }
939
50612537 940 sch->limit = qopt->limit;
10297b99 941
112f9cb6
DT
942 q->latency = PSCHED_TICKS2NS(qopt->latency);
943 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1da177e4
LT
944 q->limit = qopt->limit;
945 q->gap = qopt->gap;
0dca51d3 946 q->counter = 0;
1da177e4
LT
947 q->loss = qopt->loss;
948 q->duplicate = qopt->duplicate;
949
bb2f8cc0
SH
950 /* for compatibility with earlier versions.
951 * if gap is set, need to assume 100% probability
0dca51d3 952 */
a362e0a7
SH
953 if (q->gap)
954 q->reorder = ~0;
0dca51d3 955
265eb67f 956 if (tb[TCA_NETEM_CORR])
49545a77 957 get_correlation(q, tb[TCA_NETEM_CORR]);
1da177e4 958
265eb67f 959 if (tb[TCA_NETEM_REORDER])
49545a77 960 get_reorder(q, tb[TCA_NETEM_REORDER]);
1da177e4 961
265eb67f 962 if (tb[TCA_NETEM_CORRUPT])
49545a77 963 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1da177e4 964
7bc0f28c 965 if (tb[TCA_NETEM_RATE])
49545a77 966 get_rate(q, tb[TCA_NETEM_RATE]);
7bc0f28c 967
6a031f67
YY
968 if (tb[TCA_NETEM_RATE64])
969 q->rate = max_t(u64, q->rate,
970 nla_get_u64(tb[TCA_NETEM_RATE64]));
971
99803171
DT
972 if (tb[TCA_NETEM_LATENCY64])
973 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
974
975 if (tb[TCA_NETEM_JITTER64])
976 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
977
e4ae004b
ED
978 if (tb[TCA_NETEM_ECN])
979 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
980
836af83b
DT
981 if (tb[TCA_NETEM_SLOT])
982 get_slot(q, tb[TCA_NETEM_SLOT]);
983
661b7972 984 return ret;
1da177e4
LT
985}
986
1e90474c 987static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
988{
989 struct netem_sched_data *q = qdisc_priv(sch);
990 int ret;
991
634576a1
NA
992 qdisc_watchdog_init(&q->watchdog, sch);
993
1da177e4
LT
994 if (!opt)
995 return -EINVAL;
996
661b7972 997 q->loss_model = CLG_RANDOM;
1da177e4 998 ret = netem_change(sch, opt);
50612537 999 if (ret)
250a65f7 1000 pr_info("netem: change failed\n");
1da177e4
LT
1001 return ret;
1002}
1003
1004static void netem_destroy(struct Qdisc *sch)
1005{
1006 struct netem_sched_data *q = qdisc_priv(sch);
1007
59cb5c67 1008 qdisc_watchdog_cancel(&q->watchdog);
50612537
ED
1009 if (q->qdisc)
1010 qdisc_destroy(q->qdisc);
6373a9a2 1011 dist_free(q->delay_dist);
1da177e4
LT
1012}
1013
661b7972 1014static int dump_loss_model(const struct netem_sched_data *q,
1015 struct sk_buff *skb)
1016{
1017 struct nlattr *nest;
1018
1019 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1020 if (nest == NULL)
1021 goto nla_put_failure;
1022
1023 switch (q->loss_model) {
1024 case CLG_RANDOM:
1025 /* legacy loss model */
1026 nla_nest_cancel(skb, nest);
1027 return 0; /* no data */
1028
1029 case CLG_4_STATES: {
1030 struct tc_netem_gimodel gi = {
1031 .p13 = q->clg.a1,
1032 .p31 = q->clg.a2,
1033 .p32 = q->clg.a3,
1034 .p14 = q->clg.a4,
1035 .p23 = q->clg.a5,
1036 };
1037
1b34ec43
DM
1038 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1039 goto nla_put_failure;
661b7972 1040 break;
1041 }
1042 case CLG_GILB_ELL: {
1043 struct tc_netem_gemodel ge = {
1044 .p = q->clg.a1,
1045 .r = q->clg.a2,
1046 .h = q->clg.a3,
1047 .k1 = q->clg.a4,
1048 };
1049
1b34ec43
DM
1050 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1051 goto nla_put_failure;
661b7972 1052 break;
1053 }
1054 }
1055
1056 nla_nest_end(skb, nest);
1057 return 0;
1058
1059nla_put_failure:
1060 nla_nest_cancel(skb, nest);
1061 return -1;
1062}
1063
1da177e4
LT
1064static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1065{
1066 const struct netem_sched_data *q = qdisc_priv(sch);
861d7f74 1067 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1da177e4
LT
1068 struct tc_netem_qopt qopt;
1069 struct tc_netem_corr cor;
0dca51d3 1070 struct tc_netem_reorder reorder;
c865e5d9 1071 struct tc_netem_corrupt corrupt;
7bc0f28c 1072 struct tc_netem_rate rate;
836af83b 1073 struct tc_netem_slot slot;
1da177e4 1074
112f9cb6
DT
1075 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1076 UINT_MAX);
1077 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1078 UINT_MAX);
1da177e4
LT
1079 qopt.limit = q->limit;
1080 qopt.loss = q->loss;
1081 qopt.gap = q->gap;
1082 qopt.duplicate = q->duplicate;
1b34ec43
DM
1083 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1084 goto nla_put_failure;
1da177e4 1085
99803171
DT
1086 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1087 goto nla_put_failure;
1088
1089 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1090 goto nla_put_failure;
1091
1da177e4
LT
1092 cor.delay_corr = q->delay_cor.rho;
1093 cor.loss_corr = q->loss_cor.rho;
1094 cor.dup_corr = q->dup_cor.rho;
1b34ec43
DM
1095 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1096 goto nla_put_failure;
0dca51d3
SH
1097
1098 reorder.probability = q->reorder;
1099 reorder.correlation = q->reorder_cor.rho;
1b34ec43
DM
1100 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1101 goto nla_put_failure;
0dca51d3 1102
c865e5d9
SH
1103 corrupt.probability = q->corrupt;
1104 corrupt.correlation = q->corrupt_cor.rho;
1b34ec43
DM
1105 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1106 goto nla_put_failure;
c865e5d9 1107
6a031f67 1108 if (q->rate >= (1ULL << 32)) {
2a51c1e8
ND
1109 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1110 TCA_NETEM_PAD))
6a031f67
YY
1111 goto nla_put_failure;
1112 rate.rate = ~0U;
1113 } else {
1114 rate.rate = q->rate;
1115 }
90b41a1c
HPP
1116 rate.packet_overhead = q->packet_overhead;
1117 rate.cell_size = q->cell_size;
1118 rate.cell_overhead = q->cell_overhead;
1b34ec43
DM
1119 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1120 goto nla_put_failure;
7bc0f28c 1121
e4ae004b
ED
1122 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1123 goto nla_put_failure;
1124
661b7972 1125 if (dump_loss_model(q, skb) != 0)
1126 goto nla_put_failure;
1127
836af83b
DT
1128 if (q->slot_config.min_delay | q->slot_config.max_delay) {
1129 slot = q->slot_config;
1130 if (slot.max_packets == INT_MAX)
1131 slot.max_packets = 0;
1132 if (slot.max_bytes == INT_MAX)
1133 slot.max_bytes = 0;
1134 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1135 goto nla_put_failure;
1136 }
1137
861d7f74 1138 return nla_nest_end(skb, nla);
1da177e4 1139
1e90474c 1140nla_put_failure:
861d7f74 1141 nlmsg_trim(skb, nla);
1da177e4
LT
1142 return -1;
1143}
1144
10f6dfcf 1145static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1146 struct sk_buff *skb, struct tcmsg *tcm)
1147{
1148 struct netem_sched_data *q = qdisc_priv(sch);
1149
50612537 1150 if (cl != 1 || !q->qdisc) /* only one class */
10f6dfcf 1151 return -ENOENT;
1152
1153 tcm->tcm_handle |= TC_H_MIN(1);
1154 tcm->tcm_info = q->qdisc->handle;
1155
1156 return 0;
1157}
1158
1159static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1160 struct Qdisc **old)
1161{
1162 struct netem_sched_data *q = qdisc_priv(sch);
1163
86a7996c 1164 *old = qdisc_replace(sch, new, &q->qdisc);
10f6dfcf 1165 return 0;
1166}
1167
1168static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1169{
1170 struct netem_sched_data *q = qdisc_priv(sch);
1171 return q->qdisc;
1172}
1173
143976ce 1174static unsigned long netem_find(struct Qdisc *sch, u32 classid)
10f6dfcf 1175{
1176 return 1;
1177}
1178
10f6dfcf 1179static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1180{
1181 if (!walker->stop) {
1182 if (walker->count >= walker->skip)
1183 if (walker->fn(sch, 1, walker) < 0) {
1184 walker->stop = 1;
1185 return;
1186 }
1187 walker->count++;
1188 }
1189}
1190
1191static const struct Qdisc_class_ops netem_class_ops = {
1192 .graft = netem_graft,
1193 .leaf = netem_leaf,
143976ce 1194 .find = netem_find,
10f6dfcf 1195 .walk = netem_walk,
1196 .dump = netem_dump_class,
1197};
1198
20fea08b 1199static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4 1200 .id = "netem",
10f6dfcf 1201 .cl_ops = &netem_class_ops,
1da177e4
LT
1202 .priv_size = sizeof(struct netem_sched_data),
1203 .enqueue = netem_enqueue,
1204 .dequeue = netem_dequeue,
77be155c 1205 .peek = qdisc_peek_dequeued,
1da177e4
LT
1206 .init = netem_init,
1207 .reset = netem_reset,
1208 .destroy = netem_destroy,
1209 .change = netem_change,
1210 .dump = netem_dump,
1211 .owner = THIS_MODULE,
1212};
1213
1214
1215static int __init netem_module_init(void)
1216{
eb229c4c 1217 pr_info("netem: version " VERSION "\n");
1da177e4
LT
1218 return register_qdisc(&netem_qdisc_ops);
1219}
1220static void __exit netem_module_exit(void)
1221{
1222 unregister_qdisc(&netem_qdisc_ops);
1223}
1224module_init(netem_module_init)
1225module_exit(netem_module_exit)
1226MODULE_LICENSE("GPL");