]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/sched/sch_netem.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[mirror_ubuntu-artful-kernel.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
1da177e4
LT
16#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24
dc5fc579 25#include <net/netlink.h>
1da177e4
LT
26#include <net/pkt_sched.h>
27
c865e5d9 28#define VERSION "1.2"
eb229c4c 29
1da177e4
LT
30/* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
51
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
54*/
55
56struct netem_sched_data {
57 struct Qdisc *qdisc;
59cb5c67 58 struct qdisc_watchdog watchdog;
1da177e4 59
b407621c
SH
60 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
62
1da177e4
LT
63 u32 loss;
64 u32 limit;
65 u32 counter;
66 u32 gap;
1da177e4 67 u32 duplicate;
0dca51d3 68 u32 reorder;
c865e5d9 69 u32 corrupt;
1da177e4
LT
70
71 struct crndstate {
b407621c
SH
72 u32 last;
73 u32 rho;
c865e5d9 74 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
75
76 struct disttable {
77 u32 size;
78 s16 table[0];
79 } *delay_dist;
80};
81
82/* Time stamp put into socket buffer control block */
83struct netem_skb_cb {
84 psched_time_t time_to_send;
85};
86
87/* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
89 */
90static void init_crandom(struct crndstate *state, unsigned long rho)
91{
92 state->rho = rho;
93 state->last = net_random();
94}
95
96/* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
99 */
b407621c 100static u32 get_crandom(struct crndstate *state)
1da177e4
LT
101{
102 u64 value, rho;
103 unsigned long answer;
104
bb2f8cc0 105 if (state->rho == 0) /* no correlation */
1da177e4
LT
106 return net_random();
107
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
112 return answer;
113}
114
115/* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
118 */
b407621c
SH
119static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
1da177e4 122{
b407621c
SH
123 psched_tdiff_t x;
124 long t;
125 u32 rnd;
1da177e4
LT
126
127 if (sigma == 0)
128 return mu;
129
130 rnd = get_crandom(state);
131
132 /* default uniform distribution */
10297b99 133 if (dist == NULL)
1da177e4
LT
134 return (rnd % (2*sigma)) - sigma + mu;
135
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
138 if (x >= 0)
139 x += NETEM_DIST_SCALE/2;
140 else
141 x -= NETEM_DIST_SCALE/2;
142
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
144}
145
0afb51e7
SH
146/*
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
151 */
1da177e4
LT
152static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153{
154 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
0afb51e7 157 struct sk_buff *skb2;
1da177e4 158 int ret;
0afb51e7 159 int count = 1;
1da177e4 160
771018e7 161 pr_debug("netem_enqueue skb=%p\n", skb);
1da177e4 162
0afb51e7
SH
163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
165 ++count;
166
1da177e4 167 /* Random packet drop 0 => none, ~0 => all */
0afb51e7
SH
168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
169 --count;
170
171 if (count == 0) {
1da177e4
LT
172 sch->qstats.drops++;
173 kfree_skb(skb);
89bbb0a3 174 return NET_XMIT_BYPASS;
1da177e4
LT
175 }
176
4e8a5201
DM
177 skb_orphan(skb);
178
0afb51e7
SH
179 /*
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
183 */
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
187 q->duplicate = 0;
188
189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
1da177e4
LT
191 }
192
c865e5d9
SH
193 /*
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
198 */
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
84fa7933
PM
201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
c865e5d9
SH
203 sch->qstats.drops++;
204 return NET_XMIT_DROP;
205 }
206
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
208 }
209
89e1df74 210 cb = (struct netem_skb_cb *)skb->cb;
0dca51d3
SH
211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 214 psched_time_t now;
07aaa115
SH
215 psched_tdiff_t delay;
216
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
219
3bebcda2 220 now = psched_get_time();
7c59e25f 221 cb->time_to_send = now + delay;
1da177e4
LT
222 ++q->counter;
223 ret = q->qdisc->enqueue(skb, q->qdisc);
224 } else {
10297b99 225 /*
0dca51d3
SH
226 * Do re-ordering by putting one out of N packets at the front
227 * of the queue.
228 */
3bebcda2 229 cb->time_to_send = psched_get_time();
0dca51d3 230 q->counter = 0;
0f9f32ac 231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
1da177e4
LT
232 }
233
234 if (likely(ret == NET_XMIT_SUCCESS)) {
235 sch->q.qlen++;
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
238 } else
239 sch->qstats.drops++;
240
d5d75cd6 241 pr_debug("netem: enqueue ret %d\n", ret);
1da177e4
LT
242 return ret;
243}
244
245/* Requeue packets but don't change time stamp */
246static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
247{
248 struct netem_sched_data *q = qdisc_priv(sch);
249 int ret;
250
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
252 sch->q.qlen++;
253 sch->qstats.requeues++;
254 }
255
256 return ret;
257}
258
259static unsigned int netem_drop(struct Qdisc* sch)
260{
261 struct netem_sched_data *q = qdisc_priv(sch);
6d037a26 262 unsigned int len = 0;
1da177e4 263
6d037a26 264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
265 sch->q.qlen--;
266 sch->qstats.drops++;
267 }
268 return len;
269}
270
1da177e4
LT
271static struct sk_buff *netem_dequeue(struct Qdisc *sch)
272{
273 struct netem_sched_data *q = qdisc_priv(sch);
274 struct sk_buff *skb;
275
11274e5a
SH
276 smp_mb();
277 if (sch->flags & TCQ_F_THROTTLED)
278 return NULL;
279
1da177e4 280 skb = q->qdisc->dequeue(q->qdisc);
771018e7 281 if (skb) {
0f9f32ac
SH
282 const struct netem_skb_cb *cb
283 = (const struct netem_skb_cb *)skb->cb;
3bebcda2 284 psched_time_t now = psched_get_time();
0f9f32ac
SH
285
286 /* if more time remaining? */
104e0878 287 if (cb->time_to_send <= now) {
0f9f32ac
SH
288 pr_debug("netem_dequeue: return skb=%p\n", skb);
289 sch->q.qlen--;
0f9f32ac 290 return skb;
07aaa115 291 }
11274e5a
SH
292
293 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
294 qdisc_tree_decrease_qlen(q->qdisc, 1);
295 sch->qstats.drops++;
296 printk(KERN_ERR "netem: %s could not requeue\n",
297 q->qdisc->ops->id);
298 }
299
300 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
0f9f32ac
SH
301 }
302
303 return NULL;
1da177e4
LT
304}
305
1da177e4
LT
306static void netem_reset(struct Qdisc *sch)
307{
308 struct netem_sched_data *q = qdisc_priv(sch);
309
310 qdisc_reset(q->qdisc);
1da177e4 311 sch->q.qlen = 0;
59cb5c67 312 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
313}
314
300ce174 315/* Pass size change message down to embedded FIFO */
1da177e4
LT
316static int set_fifo_limit(struct Qdisc *q, int limit)
317{
10297b99 318 struct rtattr *rta;
1da177e4
LT
319 int ret = -ENOMEM;
320
300ce174
SH
321 /* Hack to avoid sending change message to non-FIFO */
322 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
323 return 0;
324
1da177e4
LT
325 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
326 if (rta) {
327 rta->rta_type = RTM_NEWQDISC;
10297b99 328 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
1da177e4 329 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
10297b99 330
1da177e4
LT
331 ret = q->ops->change(q, rta);
332 kfree(rta);
333 }
334 return ret;
335}
336
337/*
338 * Distribution data is a variable size payload containing
339 * signed 16 bit values.
340 */
341static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
342{
343 struct netem_sched_data *q = qdisc_priv(sch);
344 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
345 const __s16 *data = RTA_DATA(attr);
346 struct disttable *d;
347 int i;
348
349 if (n > 65536)
350 return -EINVAL;
351
352 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
353 if (!d)
354 return -ENOMEM;
355
356 d->size = n;
357 for (i = 0; i < n; i++)
358 d->table[i] = data[i];
10297b99 359
1da177e4
LT
360 spin_lock_bh(&sch->dev->queue_lock);
361 d = xchg(&q->delay_dist, d);
362 spin_unlock_bh(&sch->dev->queue_lock);
363
364 kfree(d);
365 return 0;
366}
367
368static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
369{
370 struct netem_sched_data *q = qdisc_priv(sch);
371 const struct tc_netem_corr *c = RTA_DATA(attr);
372
373 if (RTA_PAYLOAD(attr) != sizeof(*c))
374 return -EINVAL;
375
376 init_crandom(&q->delay_cor, c->delay_corr);
377 init_crandom(&q->loss_cor, c->loss_corr);
378 init_crandom(&q->dup_cor, c->dup_corr);
379 return 0;
380}
381
0dca51d3
SH
382static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
383{
384 struct netem_sched_data *q = qdisc_priv(sch);
385 const struct tc_netem_reorder *r = RTA_DATA(attr);
386
387 if (RTA_PAYLOAD(attr) != sizeof(*r))
388 return -EINVAL;
389
390 q->reorder = r->probability;
391 init_crandom(&q->reorder_cor, r->correlation);
392 return 0;
393}
394
c865e5d9
SH
395static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
396{
397 struct netem_sched_data *q = qdisc_priv(sch);
398 const struct tc_netem_corrupt *r = RTA_DATA(attr);
399
400 if (RTA_PAYLOAD(attr) != sizeof(*r))
401 return -EINVAL;
402
403 q->corrupt = r->probability;
404 init_crandom(&q->corrupt_cor, r->correlation);
405 return 0;
406}
407
408/* Parse netlink message to set options */
1da177e4
LT
409static int netem_change(struct Qdisc *sch, struct rtattr *opt)
410{
411 struct netem_sched_data *q = qdisc_priv(sch);
412 struct tc_netem_qopt *qopt;
413 int ret;
10297b99 414
1da177e4
LT
415 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
416 return -EINVAL;
417
418 qopt = RTA_DATA(opt);
419 ret = set_fifo_limit(q->qdisc, qopt->limit);
420 if (ret) {
421 pr_debug("netem: can't set fifo limit\n");
422 return ret;
423 }
10297b99 424
1da177e4
LT
425 q->latency = qopt->latency;
426 q->jitter = qopt->jitter;
427 q->limit = qopt->limit;
428 q->gap = qopt->gap;
0dca51d3 429 q->counter = 0;
1da177e4
LT
430 q->loss = qopt->loss;
431 q->duplicate = qopt->duplicate;
432
bb2f8cc0
SH
433 /* for compatibility with earlier versions.
434 * if gap is set, need to assume 100% probability
0dca51d3 435 */
a362e0a7
SH
436 if (q->gap)
437 q->reorder = ~0;
0dca51d3 438
1da177e4
LT
439 /* Handle nested options after initial queue options.
440 * Should have put all options in nested format but too late now.
10297b99 441 */
1da177e4
LT
442 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
443 struct rtattr *tb[TCA_NETEM_MAX];
10297b99 444 if (rtattr_parse(tb, TCA_NETEM_MAX,
1da177e4
LT
445 RTA_DATA(opt) + sizeof(*qopt),
446 RTA_PAYLOAD(opt) - sizeof(*qopt)))
447 return -EINVAL;
448
449 if (tb[TCA_NETEM_CORR-1]) {
450 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
451 if (ret)
452 return ret;
453 }
454
455 if (tb[TCA_NETEM_DELAY_DIST-1]) {
456 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
457 if (ret)
458 return ret;
459 }
c865e5d9 460
0dca51d3
SH
461 if (tb[TCA_NETEM_REORDER-1]) {
462 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
463 if (ret)
464 return ret;
465 }
1da177e4 466
c865e5d9
SH
467 if (tb[TCA_NETEM_CORRUPT-1]) {
468 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
469 if (ret)
470 return ret;
471 }
472 }
1da177e4
LT
473
474 return 0;
475}
476
300ce174
SH
477/*
478 * Special case version of FIFO queue for use by netem.
479 * It queues in order based on timestamps in skb's
480 */
481struct fifo_sched_data {
482 u32 limit;
075aa573 483 psched_time_t oldest;
300ce174
SH
484};
485
486static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
487{
488 struct fifo_sched_data *q = qdisc_priv(sch);
489 struct sk_buff_head *list = &sch->q;
075aa573 490 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
300ce174
SH
491 struct sk_buff *skb;
492
493 if (likely(skb_queue_len(list) < q->limit)) {
075aa573 494 /* Optimize for add at tail */
104e0878 495 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
075aa573
SH
496 q->oldest = tnext;
497 return qdisc_enqueue_tail(nskb, sch);
498 }
499
300ce174
SH
500 skb_queue_reverse_walk(list, skb) {
501 const struct netem_skb_cb *cb
502 = (const struct netem_skb_cb *)skb->cb;
503
104e0878 504 if (tnext >= cb->time_to_send)
300ce174
SH
505 break;
506 }
507
508 __skb_queue_after(list, skb, nskb);
509
510 sch->qstats.backlog += nskb->len;
511 sch->bstats.bytes += nskb->len;
512 sch->bstats.packets++;
513
514 return NET_XMIT_SUCCESS;
515 }
516
075aa573 517 return qdisc_reshape_fail(nskb, sch);
300ce174
SH
518}
519
520static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
521{
522 struct fifo_sched_data *q = qdisc_priv(sch);
523
524 if (opt) {
525 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
526 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
527 return -EINVAL;
528
529 q->limit = ctl->limit;
530 } else
531 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
532
a084980d 533 q->oldest = PSCHED_PASTPERFECT;
300ce174
SH
534 return 0;
535}
536
537static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
538{
539 struct fifo_sched_data *q = qdisc_priv(sch);
540 struct tc_fifo_qopt opt = { .limit = q->limit };
541
542 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
543 return skb->len;
544
545rtattr_failure:
546 return -1;
547}
548
549static struct Qdisc_ops tfifo_qdisc_ops = {
550 .id = "tfifo",
551 .priv_size = sizeof(struct fifo_sched_data),
552 .enqueue = tfifo_enqueue,
553 .dequeue = qdisc_dequeue_head,
554 .requeue = qdisc_requeue,
555 .drop = qdisc_queue_drop,
556 .init = tfifo_init,
557 .reset = qdisc_reset_queue,
558 .change = tfifo_init,
559 .dump = tfifo_dump,
560};
561
1da177e4
LT
562static int netem_init(struct Qdisc *sch, struct rtattr *opt)
563{
564 struct netem_sched_data *q = qdisc_priv(sch);
565 int ret;
566
567 if (!opt)
568 return -EINVAL;
569
59cb5c67 570 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4 571
9f9afec4
PM
572 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
573 TC_H_MAKE(sch->handle, 1));
1da177e4
LT
574 if (!q->qdisc) {
575 pr_debug("netem: qdisc create failed\n");
576 return -ENOMEM;
577 }
578
579 ret = netem_change(sch, opt);
580 if (ret) {
581 pr_debug("netem: change failed\n");
582 qdisc_destroy(q->qdisc);
583 }
584 return ret;
585}
586
587static void netem_destroy(struct Qdisc *sch)
588{
589 struct netem_sched_data *q = qdisc_priv(sch);
590
59cb5c67 591 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
592 qdisc_destroy(q->qdisc);
593 kfree(q->delay_dist);
594}
595
596static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
597{
598 const struct netem_sched_data *q = qdisc_priv(sch);
27a884dc 599 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
600 struct rtattr *rta = (struct rtattr *) b;
601 struct tc_netem_qopt qopt;
602 struct tc_netem_corr cor;
0dca51d3 603 struct tc_netem_reorder reorder;
c865e5d9 604 struct tc_netem_corrupt corrupt;
1da177e4
LT
605
606 qopt.latency = q->latency;
607 qopt.jitter = q->jitter;
608 qopt.limit = q->limit;
609 qopt.loss = q->loss;
610 qopt.gap = q->gap;
611 qopt.duplicate = q->duplicate;
612 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
613
614 cor.delay_corr = q->delay_cor.rho;
615 cor.loss_corr = q->loss_cor.rho;
616 cor.dup_corr = q->dup_cor.rho;
617 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
0dca51d3
SH
618
619 reorder.probability = q->reorder;
620 reorder.correlation = q->reorder_cor.rho;
621 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
622
c865e5d9
SH
623 corrupt.probability = q->corrupt;
624 corrupt.correlation = q->corrupt_cor.rho;
625 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
626
27a884dc 627 rta->rta_len = skb_tail_pointer(skb) - b;
1da177e4
LT
628
629 return skb->len;
630
631rtattr_failure:
dc5fc579 632 nlmsg_trim(skb, b);
1da177e4
LT
633 return -1;
634}
635
636static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
637 struct sk_buff *skb, struct tcmsg *tcm)
638{
639 struct netem_sched_data *q = qdisc_priv(sch);
640
641 if (cl != 1) /* only one class */
642 return -ENOENT;
643
644 tcm->tcm_handle |= TC_H_MIN(1);
645 tcm->tcm_info = q->qdisc->handle;
646
647 return 0;
648}
649
650static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
651 struct Qdisc **old)
652{
653 struct netem_sched_data *q = qdisc_priv(sch);
654
655 if (new == NULL)
656 new = &noop_qdisc;
657
658 sch_tree_lock(sch);
659 *old = xchg(&q->qdisc, new);
5e50da01 660 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 661 qdisc_reset(*old);
1da177e4
LT
662 sch_tree_unlock(sch);
663
664 return 0;
665}
666
667static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
668{
669 struct netem_sched_data *q = qdisc_priv(sch);
670 return q->qdisc;
671}
672
673static unsigned long netem_get(struct Qdisc *sch, u32 classid)
674{
675 return 1;
676}
677
678static void netem_put(struct Qdisc *sch, unsigned long arg)
679{
680}
681
10297b99 682static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1da177e4
LT
683 struct rtattr **tca, unsigned long *arg)
684{
685 return -ENOSYS;
686}
687
688static int netem_delete(struct Qdisc *sch, unsigned long arg)
689{
690 return -ENOSYS;
691}
692
693static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
694{
695 if (!walker->stop) {
696 if (walker->count >= walker->skip)
697 if (walker->fn(sch, 1, walker) < 0) {
698 walker->stop = 1;
699 return;
700 }
701 walker->count++;
702 }
703}
704
705static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
706{
707 return NULL;
708}
709
710static struct Qdisc_class_ops netem_class_ops = {
711 .graft = netem_graft,
712 .leaf = netem_leaf,
713 .get = netem_get,
714 .put = netem_put,
715 .change = netem_change_class,
716 .delete = netem_delete,
717 .walk = netem_walk,
718 .tcf_chain = netem_find_tcf,
719 .dump = netem_dump_class,
720};
721
722static struct Qdisc_ops netem_qdisc_ops = {
723 .id = "netem",
724 .cl_ops = &netem_class_ops,
725 .priv_size = sizeof(struct netem_sched_data),
726 .enqueue = netem_enqueue,
727 .dequeue = netem_dequeue,
728 .requeue = netem_requeue,
729 .drop = netem_drop,
730 .init = netem_init,
731 .reset = netem_reset,
732 .destroy = netem_destroy,
733 .change = netem_change,
734 .dump = netem_dump,
735 .owner = THIS_MODULE,
736};
737
738
739static int __init netem_module_init(void)
740{
eb229c4c 741 pr_info("netem: version " VERSION "\n");
1da177e4
LT
742 return register_qdisc(&netem_qdisc_ops);
743}
744static void __exit netem_module_exit(void)
745{
746 unregister_qdisc(&netem_qdisc_ops);
747}
748module_init(netem_module_init)
749module_exit(netem_module_exit)
750MODULE_LICENSE("GPL");