]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/sch_netem.c
[NET_SCHED]: turn PSCHED_GET_TIME into inline function
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_netem.c
1 /*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
8 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27
28 #define VERSION "1.2"
29
30 /* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
51
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
54 */
55
56 struct netem_sched_data {
57 struct Qdisc *qdisc;
58 struct qdisc_watchdog watchdog;
59
60 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
62
63 u32 loss;
64 u32 limit;
65 u32 counter;
66 u32 gap;
67 u32 duplicate;
68 u32 reorder;
69 u32 corrupt;
70
71 struct crndstate {
72 u32 last;
73 u32 rho;
74 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
75
76 struct disttable {
77 u32 size;
78 s16 table[0];
79 } *delay_dist;
80 };
81
82 /* Time stamp put into socket buffer control block */
83 struct netem_skb_cb {
84 psched_time_t time_to_send;
85 };
86
87 /* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
89 */
90 static void init_crandom(struct crndstate *state, unsigned long rho)
91 {
92 state->rho = rho;
93 state->last = net_random();
94 }
95
96 /* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
99 */
100 static u32 get_crandom(struct crndstate *state)
101 {
102 u64 value, rho;
103 unsigned long answer;
104
105 if (state->rho == 0) /* no correlation */
106 return net_random();
107
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
112 return answer;
113 }
114
115 /* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
118 */
119 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
122 {
123 psched_tdiff_t x;
124 long t;
125 u32 rnd;
126
127 if (sigma == 0)
128 return mu;
129
130 rnd = get_crandom(state);
131
132 /* default uniform distribution */
133 if (dist == NULL)
134 return (rnd % (2*sigma)) - sigma + mu;
135
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
138 if (x >= 0)
139 x += NETEM_DIST_SCALE/2;
140 else
141 x -= NETEM_DIST_SCALE/2;
142
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
144 }
145
146 /*
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
151 */
152 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153 {
154 struct netem_sched_data *q = qdisc_priv(sch);
155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
157 struct sk_buff *skb2;
158 int ret;
159 int count = 1;
160
161 pr_debug("netem_enqueue skb=%p\n", skb);
162
163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
165 ++count;
166
167 /* Random packet drop 0 => none, ~0 => all */
168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
169 --count;
170
171 if (count == 0) {
172 sch->qstats.drops++;
173 kfree_skb(skb);
174 return NET_XMIT_BYPASS;
175 }
176
177 skb_orphan(skb);
178
179 /*
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
183 */
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
187 q->duplicate = 0;
188
189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
191 }
192
193 /*
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
198 */
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
203 sch->qstats.drops++;
204 return NET_XMIT_DROP;
205 }
206
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
208 }
209
210 cb = (struct netem_skb_cb *)skb->cb;
211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
214 psched_time_t now;
215 psched_tdiff_t delay;
216
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
219
220 now = psched_get_time();
221 cb->time_to_send = now + delay;
222 ++q->counter;
223 ret = q->qdisc->enqueue(skb, q->qdisc);
224 } else {
225 /*
226 * Do re-ordering by putting one out of N packets at the front
227 * of the queue.
228 */
229 cb->time_to_send = psched_get_time();
230 q->counter = 0;
231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
232 }
233
234 if (likely(ret == NET_XMIT_SUCCESS)) {
235 sch->q.qlen++;
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
238 } else
239 sch->qstats.drops++;
240
241 pr_debug("netem: enqueue ret %d\n", ret);
242 return ret;
243 }
244
245 /* Requeue packets but don't change time stamp */
246 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
247 {
248 struct netem_sched_data *q = qdisc_priv(sch);
249 int ret;
250
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
252 sch->q.qlen++;
253 sch->qstats.requeues++;
254 }
255
256 return ret;
257 }
258
259 static unsigned int netem_drop(struct Qdisc* sch)
260 {
261 struct netem_sched_data *q = qdisc_priv(sch);
262 unsigned int len = 0;
263
264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
265 sch->q.qlen--;
266 sch->qstats.drops++;
267 }
268 return len;
269 }
270
271 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
272 {
273 struct netem_sched_data *q = qdisc_priv(sch);
274 struct sk_buff *skb;
275
276 smp_mb();
277 if (sch->flags & TCQ_F_THROTTLED)
278 return NULL;
279
280 skb = q->qdisc->dequeue(q->qdisc);
281 if (skb) {
282 const struct netem_skb_cb *cb
283 = (const struct netem_skb_cb *)skb->cb;
284 psched_time_t now = psched_get_time();
285
286 /* if more time remaining? */
287 if (cb->time_to_send <= now) {
288 pr_debug("netem_dequeue: return skb=%p\n", skb);
289 sch->q.qlen--;
290 return skb;
291 }
292
293 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
294 qdisc_tree_decrease_qlen(q->qdisc, 1);
295 sch->qstats.drops++;
296 printk(KERN_ERR "netem: %s could not requeue\n",
297 q->qdisc->ops->id);
298 }
299
300 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
301 }
302
303 return NULL;
304 }
305
306 static void netem_reset(struct Qdisc *sch)
307 {
308 struct netem_sched_data *q = qdisc_priv(sch);
309
310 qdisc_reset(q->qdisc);
311 sch->q.qlen = 0;
312 qdisc_watchdog_cancel(&q->watchdog);
313 }
314
315 /* Pass size change message down to embedded FIFO */
316 static int set_fifo_limit(struct Qdisc *q, int limit)
317 {
318 struct rtattr *rta;
319 int ret = -ENOMEM;
320
321 /* Hack to avoid sending change message to non-FIFO */
322 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
323 return 0;
324
325 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
326 if (rta) {
327 rta->rta_type = RTM_NEWQDISC;
328 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
329 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
330
331 ret = q->ops->change(q, rta);
332 kfree(rta);
333 }
334 return ret;
335 }
336
337 /*
338 * Distribution data is a variable size payload containing
339 * signed 16 bit values.
340 */
341 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
342 {
343 struct netem_sched_data *q = qdisc_priv(sch);
344 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
345 const __s16 *data = RTA_DATA(attr);
346 struct disttable *d;
347 int i;
348
349 if (n > 65536)
350 return -EINVAL;
351
352 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
353 if (!d)
354 return -ENOMEM;
355
356 d->size = n;
357 for (i = 0; i < n; i++)
358 d->table[i] = data[i];
359
360 spin_lock_bh(&sch->dev->queue_lock);
361 d = xchg(&q->delay_dist, d);
362 spin_unlock_bh(&sch->dev->queue_lock);
363
364 kfree(d);
365 return 0;
366 }
367
368 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
369 {
370 struct netem_sched_data *q = qdisc_priv(sch);
371 const struct tc_netem_corr *c = RTA_DATA(attr);
372
373 if (RTA_PAYLOAD(attr) != sizeof(*c))
374 return -EINVAL;
375
376 init_crandom(&q->delay_cor, c->delay_corr);
377 init_crandom(&q->loss_cor, c->loss_corr);
378 init_crandom(&q->dup_cor, c->dup_corr);
379 return 0;
380 }
381
382 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
383 {
384 struct netem_sched_data *q = qdisc_priv(sch);
385 const struct tc_netem_reorder *r = RTA_DATA(attr);
386
387 if (RTA_PAYLOAD(attr) != sizeof(*r))
388 return -EINVAL;
389
390 q->reorder = r->probability;
391 init_crandom(&q->reorder_cor, r->correlation);
392 return 0;
393 }
394
395 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
396 {
397 struct netem_sched_data *q = qdisc_priv(sch);
398 const struct tc_netem_corrupt *r = RTA_DATA(attr);
399
400 if (RTA_PAYLOAD(attr) != sizeof(*r))
401 return -EINVAL;
402
403 q->corrupt = r->probability;
404 init_crandom(&q->corrupt_cor, r->correlation);
405 return 0;
406 }
407
408 /* Parse netlink message to set options */
409 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
410 {
411 struct netem_sched_data *q = qdisc_priv(sch);
412 struct tc_netem_qopt *qopt;
413 int ret;
414
415 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
416 return -EINVAL;
417
418 qopt = RTA_DATA(opt);
419 ret = set_fifo_limit(q->qdisc, qopt->limit);
420 if (ret) {
421 pr_debug("netem: can't set fifo limit\n");
422 return ret;
423 }
424
425 q->latency = qopt->latency;
426 q->jitter = qopt->jitter;
427 q->limit = qopt->limit;
428 q->gap = qopt->gap;
429 q->counter = 0;
430 q->loss = qopt->loss;
431 q->duplicate = qopt->duplicate;
432
433 /* for compatibility with earlier versions.
434 * if gap is set, need to assume 100% probability
435 */
436 if (q->gap)
437 q->reorder = ~0;
438
439 /* Handle nested options after initial queue options.
440 * Should have put all options in nested format but too late now.
441 */
442 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
443 struct rtattr *tb[TCA_NETEM_MAX];
444 if (rtattr_parse(tb, TCA_NETEM_MAX,
445 RTA_DATA(opt) + sizeof(*qopt),
446 RTA_PAYLOAD(opt) - sizeof(*qopt)))
447 return -EINVAL;
448
449 if (tb[TCA_NETEM_CORR-1]) {
450 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
451 if (ret)
452 return ret;
453 }
454
455 if (tb[TCA_NETEM_DELAY_DIST-1]) {
456 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
457 if (ret)
458 return ret;
459 }
460
461 if (tb[TCA_NETEM_REORDER-1]) {
462 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
463 if (ret)
464 return ret;
465 }
466
467 if (tb[TCA_NETEM_CORRUPT-1]) {
468 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
469 if (ret)
470 return ret;
471 }
472 }
473
474 return 0;
475 }
476
477 /*
478 * Special case version of FIFO queue for use by netem.
479 * It queues in order based on timestamps in skb's
480 */
481 struct fifo_sched_data {
482 u32 limit;
483 psched_time_t oldest;
484 };
485
486 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
487 {
488 struct fifo_sched_data *q = qdisc_priv(sch);
489 struct sk_buff_head *list = &sch->q;
490 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
491 struct sk_buff *skb;
492
493 if (likely(skb_queue_len(list) < q->limit)) {
494 /* Optimize for add at tail */
495 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
496 q->oldest = tnext;
497 return qdisc_enqueue_tail(nskb, sch);
498 }
499
500 skb_queue_reverse_walk(list, skb) {
501 const struct netem_skb_cb *cb
502 = (const struct netem_skb_cb *)skb->cb;
503
504 if (tnext >= cb->time_to_send)
505 break;
506 }
507
508 __skb_queue_after(list, skb, nskb);
509
510 sch->qstats.backlog += nskb->len;
511 sch->bstats.bytes += nskb->len;
512 sch->bstats.packets++;
513
514 return NET_XMIT_SUCCESS;
515 }
516
517 return qdisc_reshape_fail(nskb, sch);
518 }
519
520 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
521 {
522 struct fifo_sched_data *q = qdisc_priv(sch);
523
524 if (opt) {
525 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
526 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
527 return -EINVAL;
528
529 q->limit = ctl->limit;
530 } else
531 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
532
533 q->oldest = PSCHED_PASTPERFECT;
534 return 0;
535 }
536
537 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
538 {
539 struct fifo_sched_data *q = qdisc_priv(sch);
540 struct tc_fifo_qopt opt = { .limit = q->limit };
541
542 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
543 return skb->len;
544
545 rtattr_failure:
546 return -1;
547 }
548
549 static struct Qdisc_ops tfifo_qdisc_ops = {
550 .id = "tfifo",
551 .priv_size = sizeof(struct fifo_sched_data),
552 .enqueue = tfifo_enqueue,
553 .dequeue = qdisc_dequeue_head,
554 .requeue = qdisc_requeue,
555 .drop = qdisc_queue_drop,
556 .init = tfifo_init,
557 .reset = qdisc_reset_queue,
558 .change = tfifo_init,
559 .dump = tfifo_dump,
560 };
561
562 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
563 {
564 struct netem_sched_data *q = qdisc_priv(sch);
565 int ret;
566
567 if (!opt)
568 return -EINVAL;
569
570 qdisc_watchdog_init(&q->watchdog, sch);
571
572 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
573 TC_H_MAKE(sch->handle, 1));
574 if (!q->qdisc) {
575 pr_debug("netem: qdisc create failed\n");
576 return -ENOMEM;
577 }
578
579 ret = netem_change(sch, opt);
580 if (ret) {
581 pr_debug("netem: change failed\n");
582 qdisc_destroy(q->qdisc);
583 }
584 return ret;
585 }
586
587 static void netem_destroy(struct Qdisc *sch)
588 {
589 struct netem_sched_data *q = qdisc_priv(sch);
590
591 qdisc_watchdog_cancel(&q->watchdog);
592 qdisc_destroy(q->qdisc);
593 kfree(q->delay_dist);
594 }
595
596 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
597 {
598 const struct netem_sched_data *q = qdisc_priv(sch);
599 unsigned char *b = skb_tail_pointer(skb);
600 struct rtattr *rta = (struct rtattr *) b;
601 struct tc_netem_qopt qopt;
602 struct tc_netem_corr cor;
603 struct tc_netem_reorder reorder;
604 struct tc_netem_corrupt corrupt;
605
606 qopt.latency = q->latency;
607 qopt.jitter = q->jitter;
608 qopt.limit = q->limit;
609 qopt.loss = q->loss;
610 qopt.gap = q->gap;
611 qopt.duplicate = q->duplicate;
612 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
613
614 cor.delay_corr = q->delay_cor.rho;
615 cor.loss_corr = q->loss_cor.rho;
616 cor.dup_corr = q->dup_cor.rho;
617 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
618
619 reorder.probability = q->reorder;
620 reorder.correlation = q->reorder_cor.rho;
621 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
622
623 corrupt.probability = q->corrupt;
624 corrupt.correlation = q->corrupt_cor.rho;
625 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
626
627 rta->rta_len = skb_tail_pointer(skb) - b;
628
629 return skb->len;
630
631 rtattr_failure:
632 nlmsg_trim(skb, b);
633 return -1;
634 }
635
636 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
637 struct sk_buff *skb, struct tcmsg *tcm)
638 {
639 struct netem_sched_data *q = qdisc_priv(sch);
640
641 if (cl != 1) /* only one class */
642 return -ENOENT;
643
644 tcm->tcm_handle |= TC_H_MIN(1);
645 tcm->tcm_info = q->qdisc->handle;
646
647 return 0;
648 }
649
650 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
651 struct Qdisc **old)
652 {
653 struct netem_sched_data *q = qdisc_priv(sch);
654
655 if (new == NULL)
656 new = &noop_qdisc;
657
658 sch_tree_lock(sch);
659 *old = xchg(&q->qdisc, new);
660 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
661 qdisc_reset(*old);
662 sch_tree_unlock(sch);
663
664 return 0;
665 }
666
667 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
668 {
669 struct netem_sched_data *q = qdisc_priv(sch);
670 return q->qdisc;
671 }
672
673 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
674 {
675 return 1;
676 }
677
678 static void netem_put(struct Qdisc *sch, unsigned long arg)
679 {
680 }
681
682 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
683 struct rtattr **tca, unsigned long *arg)
684 {
685 return -ENOSYS;
686 }
687
688 static int netem_delete(struct Qdisc *sch, unsigned long arg)
689 {
690 return -ENOSYS;
691 }
692
693 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
694 {
695 if (!walker->stop) {
696 if (walker->count >= walker->skip)
697 if (walker->fn(sch, 1, walker) < 0) {
698 walker->stop = 1;
699 return;
700 }
701 walker->count++;
702 }
703 }
704
705 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
706 {
707 return NULL;
708 }
709
710 static struct Qdisc_class_ops netem_class_ops = {
711 .graft = netem_graft,
712 .leaf = netem_leaf,
713 .get = netem_get,
714 .put = netem_put,
715 .change = netem_change_class,
716 .delete = netem_delete,
717 .walk = netem_walk,
718 .tcf_chain = netem_find_tcf,
719 .dump = netem_dump_class,
720 };
721
722 static struct Qdisc_ops netem_qdisc_ops = {
723 .id = "netem",
724 .cl_ops = &netem_class_ops,
725 .priv_size = sizeof(struct netem_sched_data),
726 .enqueue = netem_enqueue,
727 .dequeue = netem_dequeue,
728 .requeue = netem_requeue,
729 .drop = netem_drop,
730 .init = netem_init,
731 .reset = netem_reset,
732 .destroy = netem_destroy,
733 .change = netem_change,
734 .dump = netem_dump,
735 .owner = THIS_MODULE,
736 };
737
738
739 static int __init netem_module_init(void)
740 {
741 pr_info("netem: version " VERSION "\n");
742 return register_qdisc(&netem_qdisc_ops);
743 }
744 static void __exit netem_module_exit(void)
745 {
746 unregister_qdisc(&netem_qdisc_ops);
747 }
748 module_init(netem_module_init)
749 module_exit(netem_module_exit)
750 MODULE_LICENSE("GPL");