]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/sched/sch_api.c
pkt_sched: Add and use qdisc_root() and qdisc_root_lock().
[mirror_ubuntu-artful-kernel.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30
31 #include <net/net_namespace.h>
32 #include <net/sock.h>
33 #include <net/netlink.h>
34 #include <net/pkt_sched.h>
35
36 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
37 struct Qdisc *old, struct Qdisc *new);
38 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
39 struct Qdisc *q, unsigned long cl, int event);
40
41 /*
42
43 Short review.
44 -------------
45
46 This file consists of two interrelated parts:
47
48 1. queueing disciplines manager frontend.
49 2. traffic classes manager frontend.
50
51 Generally, queueing discipline ("qdisc") is a black box,
52 which is able to enqueue packets and to dequeue them (when
53 device is ready to send something) in order and at times
54 determined by algorithm hidden in it.
55
56 qdisc's are divided to two categories:
57 - "queues", which have no internal structure visible from outside.
58 - "schedulers", which split all the packets to "traffic classes",
59 using "packet classifiers" (look at cls_api.c)
60
61 In turn, classes may have child qdiscs (as rule, queues)
62 attached to them etc. etc. etc.
63
64 The goal of the routines in this file is to translate
65 information supplied by user in the form of handles
66 to more intelligible for kernel form, to make some sanity
67 checks and part of work, which is common to all qdiscs
68 and to provide rtnetlink notifications.
69
70 All real intelligent work is done inside qdisc modules.
71
72
73
74 Every discipline has two major routines: enqueue and dequeue.
75
76 ---dequeue
77
78 dequeue usually returns a skb to send. It is allowed to return NULL,
79 but it does not mean that queue is empty, it just means that
80 discipline does not want to send anything this time.
81 Queue is really empty if q->q.qlen == 0.
82 For complicated disciplines with multiple queues q->q is not
83 real packet queue, but however q->q.qlen must be valid.
84
85 ---enqueue
86
87 enqueue returns 0, if packet was enqueued successfully.
88 If packet (this one or another one) was dropped, it returns
89 not zero error code.
90 NET_XMIT_DROP - this packet dropped
91 Expected action: do not backoff, but wait until queue will clear.
92 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
93 Expected action: backoff or ignore
94 NET_XMIT_POLICED - dropped by police.
95 Expected action: backoff or error to real-time apps.
96
97 Auxiliary routines:
98
99 ---requeue
100
101 requeues once dequeued packet. It is used for non-standard or
102 just buggy devices, which can defer output even if netif_queue_stopped()=0.
103
104 ---reset
105
106 returns qdisc to initial state: purge all buffers, clear all
107 timers, counters (except for statistics) etc.
108
109 ---init
110
111 initializes newly created qdisc.
112
113 ---destroy
114
115 destroys resources allocated by init and during lifetime of qdisc.
116
117 ---change
118
119 changes qdisc parameters.
120 */
121
122 /* Protects list of registered TC modules. It is pure SMP lock. */
123 static DEFINE_RWLOCK(qdisc_mod_lock);
124
125
126 /************************************************
127 * Queueing disciplines manipulation. *
128 ************************************************/
129
130
131 /* The list of all installed queueing disciplines. */
132
133 static struct Qdisc_ops *qdisc_base;
134
135 /* Register/uregister queueing discipline */
136
137 int register_qdisc(struct Qdisc_ops *qops)
138 {
139 struct Qdisc_ops *q, **qp;
140 int rc = -EEXIST;
141
142 write_lock(&qdisc_mod_lock);
143 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
144 if (!strcmp(qops->id, q->id))
145 goto out;
146
147 if (qops->enqueue == NULL)
148 qops->enqueue = noop_qdisc_ops.enqueue;
149 if (qops->requeue == NULL)
150 qops->requeue = noop_qdisc_ops.requeue;
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 qops->next = NULL;
155 *qp = qops;
156 rc = 0;
157 out:
158 write_unlock(&qdisc_mod_lock);
159 return rc;
160 }
161 EXPORT_SYMBOL(register_qdisc);
162
163 int unregister_qdisc(struct Qdisc_ops *qops)
164 {
165 struct Qdisc_ops *q, **qp;
166 int err = -ENOENT;
167
168 write_lock(&qdisc_mod_lock);
169 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
170 if (q == qops)
171 break;
172 if (q) {
173 *qp = q->next;
174 q->next = NULL;
175 err = 0;
176 }
177 write_unlock(&qdisc_mod_lock);
178 return err;
179 }
180 EXPORT_SYMBOL(unregister_qdisc);
181
182 /* We know handle. Find qdisc among all qdisc's attached to device
183 (root qdisc, all its children, children of children etc.)
184 */
185
186 static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle)
187 {
188 struct Qdisc *q;
189
190 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
191 if (q->handle == handle)
192 return q;
193 }
194 return NULL;
195 }
196
197 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
198 {
199 unsigned int i;
200
201 for (i = 0; i < dev->num_tx_queues; i++) {
202 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
203 struct Qdisc *q = __qdisc_lookup(txq, handle);
204 if (q)
205 return q;
206 }
207 return NULL;
208 }
209
210 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
211 {
212 unsigned long cl;
213 struct Qdisc *leaf;
214 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
215
216 if (cops == NULL)
217 return NULL;
218 cl = cops->get(p, classid);
219
220 if (cl == 0)
221 return NULL;
222 leaf = cops->leaf(p, cl);
223 cops->put(p, cl);
224 return leaf;
225 }
226
227 /* Find queueing discipline by name */
228
229 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
230 {
231 struct Qdisc_ops *q = NULL;
232
233 if (kind) {
234 read_lock(&qdisc_mod_lock);
235 for (q = qdisc_base; q; q = q->next) {
236 if (nla_strcmp(kind, q->id) == 0) {
237 if (!try_module_get(q->owner))
238 q = NULL;
239 break;
240 }
241 }
242 read_unlock(&qdisc_mod_lock);
243 }
244 return q;
245 }
246
247 static struct qdisc_rate_table *qdisc_rtab_list;
248
249 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
250 {
251 struct qdisc_rate_table *rtab;
252
253 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
254 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
255 rtab->refcnt++;
256 return rtab;
257 }
258 }
259
260 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
261 nla_len(tab) != TC_RTAB_SIZE)
262 return NULL;
263
264 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
265 if (rtab) {
266 rtab->rate = *r;
267 rtab->refcnt = 1;
268 memcpy(rtab->data, nla_data(tab), 1024);
269 rtab->next = qdisc_rtab_list;
270 qdisc_rtab_list = rtab;
271 }
272 return rtab;
273 }
274 EXPORT_SYMBOL(qdisc_get_rtab);
275
276 void qdisc_put_rtab(struct qdisc_rate_table *tab)
277 {
278 struct qdisc_rate_table *rtab, **rtabp;
279
280 if (!tab || --tab->refcnt)
281 return;
282
283 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
284 if (rtab == tab) {
285 *rtabp = rtab->next;
286 kfree(rtab);
287 return;
288 }
289 }
290 }
291 EXPORT_SYMBOL(qdisc_put_rtab);
292
293 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
294 {
295 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
296 timer);
297 struct netdev_queue *txq = wd->qdisc->dev_queue;
298
299 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
300 smp_wmb();
301 netif_schedule_queue(txq);
302
303 return HRTIMER_NORESTART;
304 }
305
306 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
307 {
308 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
309 wd->timer.function = qdisc_watchdog;
310 wd->qdisc = qdisc;
311 }
312 EXPORT_SYMBOL(qdisc_watchdog_init);
313
314 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
315 {
316 ktime_t time;
317
318 wd->qdisc->flags |= TCQ_F_THROTTLED;
319 time = ktime_set(0, 0);
320 time = ktime_add_ns(time, PSCHED_US2NS(expires));
321 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
322 }
323 EXPORT_SYMBOL(qdisc_watchdog_schedule);
324
325 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
326 {
327 hrtimer_cancel(&wd->timer);
328 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
329 }
330 EXPORT_SYMBOL(qdisc_watchdog_cancel);
331
332 struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
333 {
334 unsigned int size = n * sizeof(struct hlist_head), i;
335 struct hlist_head *h;
336
337 if (size <= PAGE_SIZE)
338 h = kmalloc(size, GFP_KERNEL);
339 else
340 h = (struct hlist_head *)
341 __get_free_pages(GFP_KERNEL, get_order(size));
342
343 if (h != NULL) {
344 for (i = 0; i < n; i++)
345 INIT_HLIST_HEAD(&h[i]);
346 }
347 return h;
348 }
349
350 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
351 {
352 unsigned int size = n * sizeof(struct hlist_head);
353
354 if (size <= PAGE_SIZE)
355 kfree(h);
356 else
357 free_pages((unsigned long)h, get_order(size));
358 }
359
360 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
361 {
362 struct Qdisc_class_common *cl;
363 struct hlist_node *n, *next;
364 struct hlist_head *nhash, *ohash;
365 unsigned int nsize, nmask, osize;
366 unsigned int i, h;
367
368 /* Rehash when load factor exceeds 0.75 */
369 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
370 return;
371 nsize = clhash->hashsize * 2;
372 nmask = nsize - 1;
373 nhash = qdisc_class_hash_alloc(nsize);
374 if (nhash == NULL)
375 return;
376
377 ohash = clhash->hash;
378 osize = clhash->hashsize;
379
380 sch_tree_lock(sch);
381 for (i = 0; i < osize; i++) {
382 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
383 h = qdisc_class_hash(cl->classid, nmask);
384 hlist_add_head(&cl->hnode, &nhash[h]);
385 }
386 }
387 clhash->hash = nhash;
388 clhash->hashsize = nsize;
389 clhash->hashmask = nmask;
390 sch_tree_unlock(sch);
391
392 qdisc_class_hash_free(ohash, osize);
393 }
394 EXPORT_SYMBOL(qdisc_class_hash_grow);
395
396 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
397 {
398 unsigned int size = 4;
399
400 clhash->hash = qdisc_class_hash_alloc(size);
401 if (clhash->hash == NULL)
402 return -ENOMEM;
403 clhash->hashsize = size;
404 clhash->hashmask = size - 1;
405 clhash->hashelems = 0;
406 return 0;
407 }
408 EXPORT_SYMBOL(qdisc_class_hash_init);
409
410 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
411 {
412 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
413 }
414 EXPORT_SYMBOL(qdisc_class_hash_destroy);
415
416 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
417 struct Qdisc_class_common *cl)
418 {
419 unsigned int h;
420
421 INIT_HLIST_NODE(&cl->hnode);
422 h = qdisc_class_hash(cl->classid, clhash->hashmask);
423 hlist_add_head(&cl->hnode, &clhash->hash[h]);
424 clhash->hashelems++;
425 }
426 EXPORT_SYMBOL(qdisc_class_hash_insert);
427
428 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
429 struct Qdisc_class_common *cl)
430 {
431 hlist_del(&cl->hnode);
432 clhash->hashelems--;
433 }
434 EXPORT_SYMBOL(qdisc_class_hash_remove);
435
436 /* Allocate an unique handle from space managed by kernel */
437
438 static u32 qdisc_alloc_handle(struct net_device *dev)
439 {
440 int i = 0x10000;
441 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
442
443 do {
444 autohandle += TC_H_MAKE(0x10000U, 0);
445 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
446 autohandle = TC_H_MAKE(0x80000000U, 0);
447 } while (qdisc_lookup(dev, autohandle) && --i > 0);
448
449 return i>0 ? autohandle : 0;
450 }
451
452 /* Attach toplevel qdisc to device dev */
453
454 static struct Qdisc *
455 dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
456 {
457 struct netdev_queue *dev_queue;
458 struct Qdisc *oqdisc;
459
460 if (dev->flags & IFF_UP)
461 dev_deactivate(dev);
462
463 qdisc_lock_tree(dev);
464 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
465 dev_queue = &dev->rx_queue;
466 oqdisc = dev_queue->qdisc;
467 /* Prune old scheduler */
468 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
469 /* delete */
470 qdisc_reset(oqdisc);
471 dev_queue->qdisc = NULL;
472 } else { /* new */
473 dev_queue->qdisc = qdisc;
474 }
475
476 } else {
477 dev_queue = netdev_get_tx_queue(dev, 0);
478 oqdisc = dev_queue->qdisc_sleeping;
479
480 /* Prune old scheduler */
481 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
482 qdisc_reset(oqdisc);
483
484 /* ... and graft new one */
485 if (qdisc == NULL)
486 qdisc = &noop_qdisc;
487 dev_queue->qdisc_sleeping = qdisc;
488 dev_queue->qdisc = &noop_qdisc;
489 }
490
491 qdisc_unlock_tree(dev);
492
493 if (dev->flags & IFF_UP)
494 dev_activate(dev);
495
496 return oqdisc;
497 }
498
499 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
500 {
501 const struct Qdisc_class_ops *cops;
502 unsigned long cl;
503 u32 parentid;
504
505 if (n == 0)
506 return;
507 while ((parentid = sch->parent)) {
508 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
509 return;
510
511 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
512 if (sch == NULL) {
513 WARN_ON(parentid != TC_H_ROOT);
514 return;
515 }
516 cops = sch->ops->cl_ops;
517 if (cops->qlen_notify) {
518 cl = cops->get(sch, parentid);
519 cops->qlen_notify(sch, cl);
520 cops->put(sch, cl);
521 }
522 sch->q.qlen -= n;
523 }
524 }
525 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
526
527 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
528 to device "dev".
529
530 Old qdisc is not destroyed but returned in *old.
531 */
532
533 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
534 u32 classid,
535 struct Qdisc *new, struct Qdisc **old)
536 {
537 int err = 0;
538 struct Qdisc *q = *old;
539
540
541 if (parent == NULL) {
542 if (q && q->flags&TCQ_F_INGRESS) {
543 *old = dev_graft_qdisc(dev, q);
544 } else {
545 *old = dev_graft_qdisc(dev, new);
546 }
547 } else {
548 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
549
550 err = -EINVAL;
551
552 if (cops) {
553 unsigned long cl = cops->get(parent, classid);
554 if (cl) {
555 err = cops->graft(parent, cl, new, old);
556 cops->put(parent, cl);
557 }
558 }
559 }
560 return err;
561 }
562
563 /*
564 Allocate and initialize new qdisc.
565
566 Parameters are passed via opt.
567 */
568
569 static struct Qdisc *
570 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
571 u32 parent, u32 handle, struct nlattr **tca, int *errp)
572 {
573 int err;
574 struct nlattr *kind = tca[TCA_KIND];
575 struct Qdisc *sch;
576 struct Qdisc_ops *ops;
577
578 ops = qdisc_lookup_ops(kind);
579 #ifdef CONFIG_KMOD
580 if (ops == NULL && kind != NULL) {
581 char name[IFNAMSIZ];
582 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
583 /* We dropped the RTNL semaphore in order to
584 * perform the module load. So, even if we
585 * succeeded in loading the module we have to
586 * tell the caller to replay the request. We
587 * indicate this using -EAGAIN.
588 * We replay the request because the device may
589 * go away in the mean time.
590 */
591 rtnl_unlock();
592 request_module("sch_%s", name);
593 rtnl_lock();
594 ops = qdisc_lookup_ops(kind);
595 if (ops != NULL) {
596 /* We will try again qdisc_lookup_ops,
597 * so don't keep a reference.
598 */
599 module_put(ops->owner);
600 err = -EAGAIN;
601 goto err_out;
602 }
603 }
604 }
605 #endif
606
607 err = -ENOENT;
608 if (ops == NULL)
609 goto err_out;
610
611 sch = qdisc_alloc(dev_queue, ops);
612 if (IS_ERR(sch)) {
613 err = PTR_ERR(sch);
614 goto err_out2;
615 }
616
617 sch->parent = parent;
618
619 if (handle == TC_H_INGRESS) {
620 sch->flags |= TCQ_F_INGRESS;
621 handle = TC_H_MAKE(TC_H_INGRESS, 0);
622 } else {
623 if (handle == 0) {
624 handle = qdisc_alloc_handle(dev);
625 err = -ENOMEM;
626 if (handle == 0)
627 goto err_out3;
628 }
629 }
630
631 sch->handle = handle;
632
633 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
634 if (tca[TCA_RATE]) {
635 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
636 qdisc_root_lock(sch),
637 tca[TCA_RATE]);
638 if (err) {
639 /*
640 * Any broken qdiscs that would require
641 * a ops->reset() here? The qdisc was never
642 * in action so it shouldn't be necessary.
643 */
644 if (ops->destroy)
645 ops->destroy(sch);
646 goto err_out3;
647 }
648 }
649 qdisc_lock_tree(dev);
650 list_add_tail(&sch->list, &dev_queue->qdisc_list);
651 qdisc_unlock_tree(dev);
652
653 return sch;
654 }
655 err_out3:
656 dev_put(dev);
657 kfree((char *) sch - sch->padded);
658 err_out2:
659 module_put(ops->owner);
660 err_out:
661 *errp = err;
662 return NULL;
663 }
664
665 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
666 {
667 if (tca[TCA_OPTIONS]) {
668 int err;
669
670 if (sch->ops->change == NULL)
671 return -EINVAL;
672 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
673 if (err)
674 return err;
675 }
676 if (tca[TCA_RATE])
677 gen_replace_estimator(&sch->bstats, &sch->rate_est,
678 qdisc_root_lock(sch), tca[TCA_RATE]);
679 return 0;
680 }
681
682 struct check_loop_arg
683 {
684 struct qdisc_walker w;
685 struct Qdisc *p;
686 int depth;
687 };
688
689 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
690
691 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
692 {
693 struct check_loop_arg arg;
694
695 if (q->ops->cl_ops == NULL)
696 return 0;
697
698 arg.w.stop = arg.w.skip = arg.w.count = 0;
699 arg.w.fn = check_loop_fn;
700 arg.depth = depth;
701 arg.p = p;
702 q->ops->cl_ops->walk(q, &arg.w);
703 return arg.w.stop ? -ELOOP : 0;
704 }
705
706 static int
707 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
708 {
709 struct Qdisc *leaf;
710 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
711 struct check_loop_arg *arg = (struct check_loop_arg *)w;
712
713 leaf = cops->leaf(q, cl);
714 if (leaf) {
715 if (leaf == arg->p || arg->depth > 7)
716 return -ELOOP;
717 return check_loop(leaf, arg->p, arg->depth + 1);
718 }
719 return 0;
720 }
721
722 /*
723 * Delete/get qdisc.
724 */
725
726 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
727 {
728 struct net *net = sock_net(skb->sk);
729 struct tcmsg *tcm = NLMSG_DATA(n);
730 struct nlattr *tca[TCA_MAX + 1];
731 struct net_device *dev;
732 u32 clid = tcm->tcm_parent;
733 struct Qdisc *q = NULL;
734 struct Qdisc *p = NULL;
735 int err;
736
737 if (net != &init_net)
738 return -EINVAL;
739
740 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
741 return -ENODEV;
742
743 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
744 if (err < 0)
745 return err;
746
747 if (clid) {
748 if (clid != TC_H_ROOT) {
749 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
750 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
751 return -ENOENT;
752 q = qdisc_leaf(p, clid);
753 } else { /* ingress */
754 q = dev->rx_queue.qdisc;
755 }
756 } else {
757 struct netdev_queue *dev_queue;
758 dev_queue = netdev_get_tx_queue(dev, 0);
759 q = dev_queue->qdisc_sleeping;
760 }
761 if (!q)
762 return -ENOENT;
763
764 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
765 return -EINVAL;
766 } else {
767 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
768 return -ENOENT;
769 }
770
771 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
772 return -EINVAL;
773
774 if (n->nlmsg_type == RTM_DELQDISC) {
775 if (!clid)
776 return -EINVAL;
777 if (q->handle == 0)
778 return -ENOENT;
779 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
780 return err;
781 if (q) {
782 qdisc_notify(skb, n, clid, q, NULL);
783 qdisc_lock_tree(dev);
784 qdisc_destroy(q);
785 qdisc_unlock_tree(dev);
786 }
787 } else {
788 qdisc_notify(skb, n, clid, NULL, q);
789 }
790 return 0;
791 }
792
793 /*
794 Create/change qdisc.
795 */
796
797 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
798 {
799 struct net *net = sock_net(skb->sk);
800 struct tcmsg *tcm;
801 struct nlattr *tca[TCA_MAX + 1];
802 struct net_device *dev;
803 u32 clid;
804 struct Qdisc *q, *p;
805 int err;
806
807 if (net != &init_net)
808 return -EINVAL;
809
810 replay:
811 /* Reinit, just in case something touches this. */
812 tcm = NLMSG_DATA(n);
813 clid = tcm->tcm_parent;
814 q = p = NULL;
815
816 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
817 return -ENODEV;
818
819 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
820 if (err < 0)
821 return err;
822
823 if (clid) {
824 if (clid != TC_H_ROOT) {
825 if (clid != TC_H_INGRESS) {
826 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
827 return -ENOENT;
828 q = qdisc_leaf(p, clid);
829 } else { /*ingress */
830 q = dev->rx_queue.qdisc;
831 }
832 } else {
833 struct netdev_queue *dev_queue;
834 dev_queue = netdev_get_tx_queue(dev, 0);
835 q = dev_queue->qdisc_sleeping;
836 }
837
838 /* It may be default qdisc, ignore it */
839 if (q && q->handle == 0)
840 q = NULL;
841
842 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
843 if (tcm->tcm_handle) {
844 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
845 return -EEXIST;
846 if (TC_H_MIN(tcm->tcm_handle))
847 return -EINVAL;
848 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
849 goto create_n_graft;
850 if (n->nlmsg_flags&NLM_F_EXCL)
851 return -EEXIST;
852 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
853 return -EINVAL;
854 if (q == p ||
855 (p && check_loop(q, p, 0)))
856 return -ELOOP;
857 atomic_inc(&q->refcnt);
858 goto graft;
859 } else {
860 if (q == NULL)
861 goto create_n_graft;
862
863 /* This magic test requires explanation.
864 *
865 * We know, that some child q is already
866 * attached to this parent and have choice:
867 * either to change it or to create/graft new one.
868 *
869 * 1. We are allowed to create/graft only
870 * if CREATE and REPLACE flags are set.
871 *
872 * 2. If EXCL is set, requestor wanted to say,
873 * that qdisc tcm_handle is not expected
874 * to exist, so that we choose create/graft too.
875 *
876 * 3. The last case is when no flags are set.
877 * Alas, it is sort of hole in API, we
878 * cannot decide what to do unambiguously.
879 * For now we select create/graft, if
880 * user gave KIND, which does not match existing.
881 */
882 if ((n->nlmsg_flags&NLM_F_CREATE) &&
883 (n->nlmsg_flags&NLM_F_REPLACE) &&
884 ((n->nlmsg_flags&NLM_F_EXCL) ||
885 (tca[TCA_KIND] &&
886 nla_strcmp(tca[TCA_KIND], q->ops->id))))
887 goto create_n_graft;
888 }
889 }
890 } else {
891 if (!tcm->tcm_handle)
892 return -EINVAL;
893 q = qdisc_lookup(dev, tcm->tcm_handle);
894 }
895
896 /* Change qdisc parameters */
897 if (q == NULL)
898 return -ENOENT;
899 if (n->nlmsg_flags&NLM_F_EXCL)
900 return -EEXIST;
901 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
902 return -EINVAL;
903 err = qdisc_change(q, tca);
904 if (err == 0)
905 qdisc_notify(skb, n, clid, NULL, q);
906 return err;
907
908 create_n_graft:
909 if (!(n->nlmsg_flags&NLM_F_CREATE))
910 return -ENOENT;
911 if (clid == TC_H_INGRESS)
912 q = qdisc_create(dev, &dev->rx_queue,
913 tcm->tcm_parent, tcm->tcm_parent,
914 tca, &err);
915 else
916 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
917 tcm->tcm_parent, tcm->tcm_handle,
918 tca, &err);
919 if (q == NULL) {
920 if (err == -EAGAIN)
921 goto replay;
922 return err;
923 }
924
925 graft:
926 if (1) {
927 struct Qdisc *old_q = NULL;
928 err = qdisc_graft(dev, p, clid, q, &old_q);
929 if (err) {
930 if (q) {
931 qdisc_lock_tree(dev);
932 qdisc_destroy(q);
933 qdisc_unlock_tree(dev);
934 }
935 return err;
936 }
937 qdisc_notify(skb, n, clid, old_q, q);
938 if (old_q) {
939 qdisc_lock_tree(dev);
940 qdisc_destroy(old_q);
941 qdisc_unlock_tree(dev);
942 }
943 }
944 return 0;
945 }
946
947 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
948 u32 pid, u32 seq, u16 flags, int event)
949 {
950 struct tcmsg *tcm;
951 struct nlmsghdr *nlh;
952 unsigned char *b = skb_tail_pointer(skb);
953 struct gnet_dump d;
954
955 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
956 tcm = NLMSG_DATA(nlh);
957 tcm->tcm_family = AF_UNSPEC;
958 tcm->tcm__pad1 = 0;
959 tcm->tcm__pad2 = 0;
960 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
961 tcm->tcm_parent = clid;
962 tcm->tcm_handle = q->handle;
963 tcm->tcm_info = atomic_read(&q->refcnt);
964 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
965 if (q->ops->dump && q->ops->dump(q, skb) < 0)
966 goto nla_put_failure;
967 q->qstats.qlen = q->q.qlen;
968
969 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
970 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
971 goto nla_put_failure;
972
973 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
974 goto nla_put_failure;
975
976 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
977 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
978 gnet_stats_copy_queue(&d, &q->qstats) < 0)
979 goto nla_put_failure;
980
981 if (gnet_stats_finish_copy(&d) < 0)
982 goto nla_put_failure;
983
984 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
985 return skb->len;
986
987 nlmsg_failure:
988 nla_put_failure:
989 nlmsg_trim(skb, b);
990 return -1;
991 }
992
993 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
994 u32 clid, struct Qdisc *old, struct Qdisc *new)
995 {
996 struct sk_buff *skb;
997 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
998
999 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1000 if (!skb)
1001 return -ENOBUFS;
1002
1003 if (old && old->handle) {
1004 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1005 goto err_out;
1006 }
1007 if (new) {
1008 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1009 goto err_out;
1010 }
1011
1012 if (skb->len)
1013 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1014
1015 err_out:
1016 kfree_skb(skb);
1017 return -EINVAL;
1018 }
1019
1020 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1021 {
1022 struct net *net = sock_net(skb->sk);
1023 int idx, q_idx;
1024 int s_idx, s_q_idx;
1025 struct net_device *dev;
1026 struct Qdisc *q;
1027
1028 if (net != &init_net)
1029 return 0;
1030
1031 s_idx = cb->args[0];
1032 s_q_idx = q_idx = cb->args[1];
1033 read_lock(&dev_base_lock);
1034 idx = 0;
1035 for_each_netdev(&init_net, dev) {
1036 struct netdev_queue *dev_queue;
1037 if (idx < s_idx)
1038 goto cont;
1039 if (idx > s_idx)
1040 s_q_idx = 0;
1041 q_idx = 0;
1042 dev_queue = netdev_get_tx_queue(dev, 0);
1043 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1044 if (q_idx < s_q_idx) {
1045 q_idx++;
1046 continue;
1047 }
1048 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1049 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1050 goto done;
1051 q_idx++;
1052 }
1053 cont:
1054 idx++;
1055 }
1056
1057 done:
1058 read_unlock(&dev_base_lock);
1059
1060 cb->args[0] = idx;
1061 cb->args[1] = q_idx;
1062
1063 return skb->len;
1064 }
1065
1066
1067
1068 /************************************************
1069 * Traffic classes manipulation. *
1070 ************************************************/
1071
1072
1073
1074 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1075 {
1076 struct net *net = sock_net(skb->sk);
1077 struct netdev_queue *dev_queue;
1078 struct tcmsg *tcm = NLMSG_DATA(n);
1079 struct nlattr *tca[TCA_MAX + 1];
1080 struct net_device *dev;
1081 struct Qdisc *q = NULL;
1082 const struct Qdisc_class_ops *cops;
1083 unsigned long cl = 0;
1084 unsigned long new_cl;
1085 u32 pid = tcm->tcm_parent;
1086 u32 clid = tcm->tcm_handle;
1087 u32 qid = TC_H_MAJ(clid);
1088 int err;
1089
1090 if (net != &init_net)
1091 return -EINVAL;
1092
1093 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1094 return -ENODEV;
1095
1096 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1097 if (err < 0)
1098 return err;
1099
1100 /*
1101 parent == TC_H_UNSPEC - unspecified parent.
1102 parent == TC_H_ROOT - class is root, which has no parent.
1103 parent == X:0 - parent is root class.
1104 parent == X:Y - parent is a node in hierarchy.
1105 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1106
1107 handle == 0:0 - generate handle from kernel pool.
1108 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1109 handle == X:Y - clear.
1110 handle == X:0 - root class.
1111 */
1112
1113 /* Step 1. Determine qdisc handle X:0 */
1114
1115 dev_queue = netdev_get_tx_queue(dev, 0);
1116 if (pid != TC_H_ROOT) {
1117 u32 qid1 = TC_H_MAJ(pid);
1118
1119 if (qid && qid1) {
1120 /* If both majors are known, they must be identical. */
1121 if (qid != qid1)
1122 return -EINVAL;
1123 } else if (qid1) {
1124 qid = qid1;
1125 } else if (qid == 0)
1126 qid = dev_queue->qdisc_sleeping->handle;
1127
1128 /* Now qid is genuine qdisc handle consistent
1129 both with parent and child.
1130
1131 TC_H_MAJ(pid) still may be unspecified, complete it now.
1132 */
1133 if (pid)
1134 pid = TC_H_MAKE(qid, pid);
1135 } else {
1136 if (qid == 0)
1137 qid = dev_queue->qdisc_sleeping->handle;
1138 }
1139
1140 /* OK. Locate qdisc */
1141 if ((q = qdisc_lookup(dev, qid)) == NULL)
1142 return -ENOENT;
1143
1144 /* An check that it supports classes */
1145 cops = q->ops->cl_ops;
1146 if (cops == NULL)
1147 return -EINVAL;
1148
1149 /* Now try to get class */
1150 if (clid == 0) {
1151 if (pid == TC_H_ROOT)
1152 clid = qid;
1153 } else
1154 clid = TC_H_MAKE(qid, clid);
1155
1156 if (clid)
1157 cl = cops->get(q, clid);
1158
1159 if (cl == 0) {
1160 err = -ENOENT;
1161 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1162 goto out;
1163 } else {
1164 switch (n->nlmsg_type) {
1165 case RTM_NEWTCLASS:
1166 err = -EEXIST;
1167 if (n->nlmsg_flags&NLM_F_EXCL)
1168 goto out;
1169 break;
1170 case RTM_DELTCLASS:
1171 err = cops->delete(q, cl);
1172 if (err == 0)
1173 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1174 goto out;
1175 case RTM_GETTCLASS:
1176 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1177 goto out;
1178 default:
1179 err = -EINVAL;
1180 goto out;
1181 }
1182 }
1183
1184 new_cl = cl;
1185 err = cops->change(q, clid, pid, tca, &new_cl);
1186 if (err == 0)
1187 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1188
1189 out:
1190 if (cl)
1191 cops->put(q, cl);
1192
1193 return err;
1194 }
1195
1196
1197 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1198 unsigned long cl,
1199 u32 pid, u32 seq, u16 flags, int event)
1200 {
1201 struct tcmsg *tcm;
1202 struct nlmsghdr *nlh;
1203 unsigned char *b = skb_tail_pointer(skb);
1204 struct gnet_dump d;
1205 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1206
1207 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1208 tcm = NLMSG_DATA(nlh);
1209 tcm->tcm_family = AF_UNSPEC;
1210 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1211 tcm->tcm_parent = q->handle;
1212 tcm->tcm_handle = q->handle;
1213 tcm->tcm_info = 0;
1214 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1215 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1216 goto nla_put_failure;
1217
1218 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1219 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1220 goto nla_put_failure;
1221
1222 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1223 goto nla_put_failure;
1224
1225 if (gnet_stats_finish_copy(&d) < 0)
1226 goto nla_put_failure;
1227
1228 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1229 return skb->len;
1230
1231 nlmsg_failure:
1232 nla_put_failure:
1233 nlmsg_trim(skb, b);
1234 return -1;
1235 }
1236
1237 static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1238 struct Qdisc *q, unsigned long cl, int event)
1239 {
1240 struct sk_buff *skb;
1241 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1242
1243 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1244 if (!skb)
1245 return -ENOBUFS;
1246
1247 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1248 kfree_skb(skb);
1249 return -EINVAL;
1250 }
1251
1252 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1253 }
1254
1255 struct qdisc_dump_args
1256 {
1257 struct qdisc_walker w;
1258 struct sk_buff *skb;
1259 struct netlink_callback *cb;
1260 };
1261
1262 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1263 {
1264 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1265
1266 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1267 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1268 }
1269
1270 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1271 {
1272 struct net *net = sock_net(skb->sk);
1273 struct netdev_queue *dev_queue;
1274 int t;
1275 int s_t;
1276 struct net_device *dev;
1277 struct Qdisc *q;
1278 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1279 struct qdisc_dump_args arg;
1280
1281 if (net != &init_net)
1282 return 0;
1283
1284 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1285 return 0;
1286 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1287 return 0;
1288
1289 s_t = cb->args[0];
1290 t = 0;
1291
1292 dev_queue = netdev_get_tx_queue(dev, 0);
1293 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1294 if (t < s_t || !q->ops->cl_ops ||
1295 (tcm->tcm_parent &&
1296 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1297 t++;
1298 continue;
1299 }
1300 if (t > s_t)
1301 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1302 arg.w.fn = qdisc_class_dump;
1303 arg.skb = skb;
1304 arg.cb = cb;
1305 arg.w.stop = 0;
1306 arg.w.skip = cb->args[1];
1307 arg.w.count = 0;
1308 q->ops->cl_ops->walk(q, &arg.w);
1309 cb->args[1] = arg.w.count;
1310 if (arg.w.stop)
1311 break;
1312 t++;
1313 }
1314
1315 cb->args[0] = t;
1316
1317 dev_put(dev);
1318 return skb->len;
1319 }
1320
1321 /* Main classifier routine: scans classifier chain attached
1322 to this qdisc, (optionally) tests for protocol and asks
1323 specific classifiers.
1324 */
1325 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1326 struct tcf_result *res)
1327 {
1328 __be16 protocol = skb->protocol;
1329 int err = 0;
1330
1331 for (; tp; tp = tp->next) {
1332 if ((tp->protocol == protocol ||
1333 tp->protocol == htons(ETH_P_ALL)) &&
1334 (err = tp->classify(skb, tp, res)) >= 0) {
1335 #ifdef CONFIG_NET_CLS_ACT
1336 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1337 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1338 #endif
1339 return err;
1340 }
1341 }
1342 return -1;
1343 }
1344 EXPORT_SYMBOL(tc_classify_compat);
1345
1346 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1347 struct tcf_result *res)
1348 {
1349 int err = 0;
1350 __be16 protocol;
1351 #ifdef CONFIG_NET_CLS_ACT
1352 struct tcf_proto *otp = tp;
1353 reclassify:
1354 #endif
1355 protocol = skb->protocol;
1356
1357 err = tc_classify_compat(skb, tp, res);
1358 #ifdef CONFIG_NET_CLS_ACT
1359 if (err == TC_ACT_RECLASSIFY) {
1360 u32 verd = G_TC_VERD(skb->tc_verd);
1361 tp = otp;
1362
1363 if (verd++ >= MAX_REC_LOOP) {
1364 printk("rule prio %u protocol %02x reclassify loop, "
1365 "packet dropped\n",
1366 tp->prio&0xffff, ntohs(tp->protocol));
1367 return TC_ACT_SHOT;
1368 }
1369 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1370 goto reclassify;
1371 }
1372 #endif
1373 return err;
1374 }
1375 EXPORT_SYMBOL(tc_classify);
1376
1377 void tcf_destroy(struct tcf_proto *tp)
1378 {
1379 tp->ops->destroy(tp);
1380 module_put(tp->ops->owner);
1381 kfree(tp);
1382 }
1383
1384 void tcf_destroy_chain(struct tcf_proto **fl)
1385 {
1386 struct tcf_proto *tp;
1387
1388 while ((tp = *fl) != NULL) {
1389 *fl = tp->next;
1390 tcf_destroy(tp);
1391 }
1392 }
1393 EXPORT_SYMBOL(tcf_destroy_chain);
1394
1395 #ifdef CONFIG_PROC_FS
1396 static int psched_show(struct seq_file *seq, void *v)
1397 {
1398 struct timespec ts;
1399
1400 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1401 seq_printf(seq, "%08x %08x %08x %08x\n",
1402 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1403 1000000,
1404 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1405
1406 return 0;
1407 }
1408
1409 static int psched_open(struct inode *inode, struct file *file)
1410 {
1411 return single_open(file, psched_show, PDE(inode)->data);
1412 }
1413
1414 static const struct file_operations psched_fops = {
1415 .owner = THIS_MODULE,
1416 .open = psched_open,
1417 .read = seq_read,
1418 .llseek = seq_lseek,
1419 .release = single_release,
1420 };
1421 #endif
1422
1423 static int __init pktsched_init(void)
1424 {
1425 register_qdisc(&pfifo_qdisc_ops);
1426 register_qdisc(&bfifo_qdisc_ops);
1427 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1428
1429 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1430 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1431 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1432 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1433 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1434 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1435
1436 return 0;
1437 }
1438
1439 subsys_initcall(pktsched_init);