]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - net/sched/sch_api.c
Revert "pkt_sched: Protect gen estimators under est_lock."
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_api.c
... / ...
CommitLineData
1/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
29#include <linux/hrtimer.h>
30#include <linux/lockdep.h>
31
32#include <net/net_namespace.h>
33#include <net/sock.h>
34#include <net/netlink.h>
35#include <net/pkt_sched.h>
36
37static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
38 struct Qdisc *old, struct Qdisc *new);
39static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
40 struct Qdisc *q, unsigned long cl, int event);
41
42/*
43
44 Short review.
45 -------------
46
47 This file consists of two interrelated parts:
48
49 1. queueing disciplines manager frontend.
50 2. traffic classes manager frontend.
51
52 Generally, queueing discipline ("qdisc") is a black box,
53 which is able to enqueue packets and to dequeue them (when
54 device is ready to send something) in order and at times
55 determined by algorithm hidden in it.
56
57 qdisc's are divided to two categories:
58 - "queues", which have no internal structure visible from outside.
59 - "schedulers", which split all the packets to "traffic classes",
60 using "packet classifiers" (look at cls_api.c)
61
62 In turn, classes may have child qdiscs (as rule, queues)
63 attached to them etc. etc. etc.
64
65 The goal of the routines in this file is to translate
66 information supplied by user in the form of handles
67 to more intelligible for kernel form, to make some sanity
68 checks and part of work, which is common to all qdiscs
69 and to provide rtnetlink notifications.
70
71 All real intelligent work is done inside qdisc modules.
72
73
74
75 Every discipline has two major routines: enqueue and dequeue.
76
77 ---dequeue
78
79 dequeue usually returns a skb to send. It is allowed to return NULL,
80 but it does not mean that queue is empty, it just means that
81 discipline does not want to send anything this time.
82 Queue is really empty if q->q.qlen == 0.
83 For complicated disciplines with multiple queues q->q is not
84 real packet queue, but however q->q.qlen must be valid.
85
86 ---enqueue
87
88 enqueue returns 0, if packet was enqueued successfully.
89 If packet (this one or another one) was dropped, it returns
90 not zero error code.
91 NET_XMIT_DROP - this packet dropped
92 Expected action: do not backoff, but wait until queue will clear.
93 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
94 Expected action: backoff or ignore
95 NET_XMIT_POLICED - dropped by police.
96 Expected action: backoff or error to real-time apps.
97
98 Auxiliary routines:
99
100 ---requeue
101
102 requeues once dequeued packet. It is used for non-standard or
103 just buggy devices, which can defer output even if netif_queue_stopped()=0.
104
105 ---reset
106
107 returns qdisc to initial state: purge all buffers, clear all
108 timers, counters (except for statistics) etc.
109
110 ---init
111
112 initializes newly created qdisc.
113
114 ---destroy
115
116 destroys resources allocated by init and during lifetime of qdisc.
117
118 ---change
119
120 changes qdisc parameters.
121 */
122
123/* Protects list of registered TC modules. It is pure SMP lock. */
124static DEFINE_RWLOCK(qdisc_mod_lock);
125
126
127/************************************************
128 * Queueing disciplines manipulation. *
129 ************************************************/
130
131
132/* The list of all installed queueing disciplines. */
133
134static struct Qdisc_ops *qdisc_base;
135
136/* Register/uregister queueing discipline */
137
138int register_qdisc(struct Qdisc_ops *qops)
139{
140 struct Qdisc_ops *q, **qp;
141 int rc = -EEXIST;
142
143 write_lock(&qdisc_mod_lock);
144 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
145 if (!strcmp(qops->id, q->id))
146 goto out;
147
148 if (qops->enqueue == NULL)
149 qops->enqueue = noop_qdisc_ops.enqueue;
150 if (qops->requeue == NULL)
151 qops->requeue = noop_qdisc_ops.requeue;
152 if (qops->dequeue == NULL)
153 qops->dequeue = noop_qdisc_ops.dequeue;
154
155 qops->next = NULL;
156 *qp = qops;
157 rc = 0;
158out:
159 write_unlock(&qdisc_mod_lock);
160 return rc;
161}
162EXPORT_SYMBOL(register_qdisc);
163
164int unregister_qdisc(struct Qdisc_ops *qops)
165{
166 struct Qdisc_ops *q, **qp;
167 int err = -ENOENT;
168
169 write_lock(&qdisc_mod_lock);
170 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
171 if (q == qops)
172 break;
173 if (q) {
174 *qp = q->next;
175 q->next = NULL;
176 err = 0;
177 }
178 write_unlock(&qdisc_mod_lock);
179 return err;
180}
181EXPORT_SYMBOL(unregister_qdisc);
182
183/* We know handle. Find qdisc among all qdisc's attached to device
184 (root qdisc, all its children, children of children etc.)
185 */
186
187struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
188{
189 struct Qdisc *q;
190
191 if (!(root->flags & TCQ_F_BUILTIN) &&
192 root->handle == handle)
193 return root;
194
195 list_for_each_entry(q, &root->list, list) {
196 if (q->handle == handle)
197 return q;
198 }
199 return NULL;
200}
201
202struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
203{
204 unsigned int i;
205
206 for (i = 0; i < dev->num_tx_queues; i++) {
207 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
208 struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
209
210 q = qdisc_match_from_root(txq_root, handle);
211 if (q)
212 return q;
213 }
214 return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
215}
216
217static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
218{
219 unsigned long cl;
220 struct Qdisc *leaf;
221 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
222
223 if (cops == NULL)
224 return NULL;
225 cl = cops->get(p, classid);
226
227 if (cl == 0)
228 return NULL;
229 leaf = cops->leaf(p, cl);
230 cops->put(p, cl);
231 return leaf;
232}
233
234/* Find queueing discipline by name */
235
236static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
237{
238 struct Qdisc_ops *q = NULL;
239
240 if (kind) {
241 read_lock(&qdisc_mod_lock);
242 for (q = qdisc_base; q; q = q->next) {
243 if (nla_strcmp(kind, q->id) == 0) {
244 if (!try_module_get(q->owner))
245 q = NULL;
246 break;
247 }
248 }
249 read_unlock(&qdisc_mod_lock);
250 }
251 return q;
252}
253
254static struct qdisc_rate_table *qdisc_rtab_list;
255
256struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
257{
258 struct qdisc_rate_table *rtab;
259
260 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
261 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
262 rtab->refcnt++;
263 return rtab;
264 }
265 }
266
267 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
268 nla_len(tab) != TC_RTAB_SIZE)
269 return NULL;
270
271 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
272 if (rtab) {
273 rtab->rate = *r;
274 rtab->refcnt = 1;
275 memcpy(rtab->data, nla_data(tab), 1024);
276 rtab->next = qdisc_rtab_list;
277 qdisc_rtab_list = rtab;
278 }
279 return rtab;
280}
281EXPORT_SYMBOL(qdisc_get_rtab);
282
283void qdisc_put_rtab(struct qdisc_rate_table *tab)
284{
285 struct qdisc_rate_table *rtab, **rtabp;
286
287 if (!tab || --tab->refcnt)
288 return;
289
290 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
291 if (rtab == tab) {
292 *rtabp = rtab->next;
293 kfree(rtab);
294 return;
295 }
296 }
297}
298EXPORT_SYMBOL(qdisc_put_rtab);
299
300static LIST_HEAD(qdisc_stab_list);
301static DEFINE_SPINLOCK(qdisc_stab_lock);
302
303static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
304 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
305 [TCA_STAB_DATA] = { .type = NLA_BINARY },
306};
307
308static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
309{
310 struct nlattr *tb[TCA_STAB_MAX + 1];
311 struct qdisc_size_table *stab;
312 struct tc_sizespec *s;
313 unsigned int tsize = 0;
314 u16 *tab = NULL;
315 int err;
316
317 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
318 if (err < 0)
319 return ERR_PTR(err);
320 if (!tb[TCA_STAB_BASE])
321 return ERR_PTR(-EINVAL);
322
323 s = nla_data(tb[TCA_STAB_BASE]);
324
325 if (s->tsize > 0) {
326 if (!tb[TCA_STAB_DATA])
327 return ERR_PTR(-EINVAL);
328 tab = nla_data(tb[TCA_STAB_DATA]);
329 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
330 }
331
332 if (!s || tsize != s->tsize || (!tab && tsize > 0))
333 return ERR_PTR(-EINVAL);
334
335 spin_lock_bh(&qdisc_stab_lock);
336
337 list_for_each_entry(stab, &qdisc_stab_list, list) {
338 if (memcmp(&stab->szopts, s, sizeof(*s)))
339 continue;
340 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
341 continue;
342 stab->refcnt++;
343 spin_unlock_bh(&qdisc_stab_lock);
344 return stab;
345 }
346
347 spin_unlock_bh(&qdisc_stab_lock);
348
349 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
350 if (!stab)
351 return ERR_PTR(-ENOMEM);
352
353 stab->refcnt = 1;
354 stab->szopts = *s;
355 if (tsize > 0)
356 memcpy(stab->data, tab, tsize * sizeof(u16));
357
358 spin_lock_bh(&qdisc_stab_lock);
359 list_add_tail(&stab->list, &qdisc_stab_list);
360 spin_unlock_bh(&qdisc_stab_lock);
361
362 return stab;
363}
364
365void qdisc_put_stab(struct qdisc_size_table *tab)
366{
367 if (!tab)
368 return;
369
370 spin_lock_bh(&qdisc_stab_lock);
371
372 if (--tab->refcnt == 0) {
373 list_del(&tab->list);
374 kfree(tab);
375 }
376
377 spin_unlock_bh(&qdisc_stab_lock);
378}
379EXPORT_SYMBOL(qdisc_put_stab);
380
381static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
382{
383 struct nlattr *nest;
384
385 nest = nla_nest_start(skb, TCA_STAB);
386 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
387 nla_nest_end(skb, nest);
388
389 return skb->len;
390
391nla_put_failure:
392 return -1;
393}
394
395void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
396{
397 int pkt_len, slot;
398
399 pkt_len = skb->len + stab->szopts.overhead;
400 if (unlikely(!stab->szopts.tsize))
401 goto out;
402
403 slot = pkt_len + stab->szopts.cell_align;
404 if (unlikely(slot < 0))
405 slot = 0;
406
407 slot >>= stab->szopts.cell_log;
408 if (likely(slot < stab->szopts.tsize))
409 pkt_len = stab->data[slot];
410 else
411 pkt_len = stab->data[stab->szopts.tsize - 1] *
412 (slot / stab->szopts.tsize) +
413 stab->data[slot % stab->szopts.tsize];
414
415 pkt_len <<= stab->szopts.size_log;
416out:
417 if (unlikely(pkt_len < 1))
418 pkt_len = 1;
419 qdisc_skb_cb(skb)->pkt_len = pkt_len;
420}
421EXPORT_SYMBOL(qdisc_calculate_pkt_len);
422
423static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
424{
425 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
426 timer);
427
428 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
429 smp_wmb();
430 __netif_schedule(qdisc_root(wd->qdisc));
431
432 return HRTIMER_NORESTART;
433}
434
435void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
436{
437 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
438 wd->timer.function = qdisc_watchdog;
439 wd->qdisc = qdisc;
440}
441EXPORT_SYMBOL(qdisc_watchdog_init);
442
443void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
444{
445 ktime_t time;
446
447 wd->qdisc->flags |= TCQ_F_THROTTLED;
448 time = ktime_set(0, 0);
449 time = ktime_add_ns(time, PSCHED_US2NS(expires));
450 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
451}
452EXPORT_SYMBOL(qdisc_watchdog_schedule);
453
454void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
455{
456 hrtimer_cancel(&wd->timer);
457 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
458}
459EXPORT_SYMBOL(qdisc_watchdog_cancel);
460
461static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
462{
463 unsigned int size = n * sizeof(struct hlist_head), i;
464 struct hlist_head *h;
465
466 if (size <= PAGE_SIZE)
467 h = kmalloc(size, GFP_KERNEL);
468 else
469 h = (struct hlist_head *)
470 __get_free_pages(GFP_KERNEL, get_order(size));
471
472 if (h != NULL) {
473 for (i = 0; i < n; i++)
474 INIT_HLIST_HEAD(&h[i]);
475 }
476 return h;
477}
478
479static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
480{
481 unsigned int size = n * sizeof(struct hlist_head);
482
483 if (size <= PAGE_SIZE)
484 kfree(h);
485 else
486 free_pages((unsigned long)h, get_order(size));
487}
488
489void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
490{
491 struct Qdisc_class_common *cl;
492 struct hlist_node *n, *next;
493 struct hlist_head *nhash, *ohash;
494 unsigned int nsize, nmask, osize;
495 unsigned int i, h;
496
497 /* Rehash when load factor exceeds 0.75 */
498 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
499 return;
500 nsize = clhash->hashsize * 2;
501 nmask = nsize - 1;
502 nhash = qdisc_class_hash_alloc(nsize);
503 if (nhash == NULL)
504 return;
505
506 ohash = clhash->hash;
507 osize = clhash->hashsize;
508
509 sch_tree_lock(sch);
510 for (i = 0; i < osize; i++) {
511 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
512 h = qdisc_class_hash(cl->classid, nmask);
513 hlist_add_head(&cl->hnode, &nhash[h]);
514 }
515 }
516 clhash->hash = nhash;
517 clhash->hashsize = nsize;
518 clhash->hashmask = nmask;
519 sch_tree_unlock(sch);
520
521 qdisc_class_hash_free(ohash, osize);
522}
523EXPORT_SYMBOL(qdisc_class_hash_grow);
524
525int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
526{
527 unsigned int size = 4;
528
529 clhash->hash = qdisc_class_hash_alloc(size);
530 if (clhash->hash == NULL)
531 return -ENOMEM;
532 clhash->hashsize = size;
533 clhash->hashmask = size - 1;
534 clhash->hashelems = 0;
535 return 0;
536}
537EXPORT_SYMBOL(qdisc_class_hash_init);
538
539void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
540{
541 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
542}
543EXPORT_SYMBOL(qdisc_class_hash_destroy);
544
545void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
546 struct Qdisc_class_common *cl)
547{
548 unsigned int h;
549
550 INIT_HLIST_NODE(&cl->hnode);
551 h = qdisc_class_hash(cl->classid, clhash->hashmask);
552 hlist_add_head(&cl->hnode, &clhash->hash[h]);
553 clhash->hashelems++;
554}
555EXPORT_SYMBOL(qdisc_class_hash_insert);
556
557void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
558 struct Qdisc_class_common *cl)
559{
560 hlist_del(&cl->hnode);
561 clhash->hashelems--;
562}
563EXPORT_SYMBOL(qdisc_class_hash_remove);
564
565/* Allocate an unique handle from space managed by kernel */
566
567static u32 qdisc_alloc_handle(struct net_device *dev)
568{
569 int i = 0x10000;
570 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
571
572 do {
573 autohandle += TC_H_MAKE(0x10000U, 0);
574 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
575 autohandle = TC_H_MAKE(0x80000000U, 0);
576 } while (qdisc_lookup(dev, autohandle) && --i > 0);
577
578 return i>0 ? autohandle : 0;
579}
580
581/* Attach toplevel qdisc to device queue. */
582
583static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
584 struct Qdisc *qdisc)
585{
586 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
587 spinlock_t *root_lock;
588
589 root_lock = qdisc_root_lock(oqdisc);
590 spin_lock_bh(root_lock);
591
592 /* Prune old scheduler */
593 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
594 qdisc_reset(oqdisc);
595
596 /* ... and graft new one */
597 if (qdisc == NULL)
598 qdisc = &noop_qdisc;
599 dev_queue->qdisc_sleeping = qdisc;
600 dev_queue->qdisc = &noop_qdisc;
601
602 spin_unlock_bh(root_lock);
603
604 return oqdisc;
605}
606
607void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
608{
609 const struct Qdisc_class_ops *cops;
610 unsigned long cl;
611 u32 parentid;
612
613 if (n == 0)
614 return;
615 while ((parentid = sch->parent)) {
616 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
617 return;
618
619 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
620 if (sch == NULL) {
621 WARN_ON(parentid != TC_H_ROOT);
622 return;
623 }
624 cops = sch->ops->cl_ops;
625 if (cops->qlen_notify) {
626 cl = cops->get(sch, parentid);
627 cops->qlen_notify(sch, cl);
628 cops->put(sch, cl);
629 }
630 sch->q.qlen -= n;
631 }
632}
633EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
634
635static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
636 struct Qdisc *old, struct Qdisc *new)
637{
638 if (new || old)
639 qdisc_notify(skb, n, clid, old, new);
640
641 if (old)
642 qdisc_destroy(old);
643}
644
645/* Graft qdisc "new" to class "classid" of qdisc "parent" or
646 * to device "dev".
647 *
648 * When appropriate send a netlink notification using 'skb'
649 * and "n".
650 *
651 * On success, destroy old qdisc.
652 */
653
654static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
655 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
656 struct Qdisc *new, struct Qdisc *old)
657{
658 struct Qdisc *q = old;
659 int err = 0;
660
661 if (parent == NULL) {
662 unsigned int i, num_q, ingress;
663
664 ingress = 0;
665 num_q = dev->num_tx_queues;
666 if ((q && q->flags & TCQ_F_INGRESS) ||
667 (new && new->flags & TCQ_F_INGRESS)) {
668 num_q = 1;
669 ingress = 1;
670 }
671
672 if (dev->flags & IFF_UP)
673 dev_deactivate(dev);
674
675 for (i = 0; i < num_q; i++) {
676 struct netdev_queue *dev_queue = &dev->rx_queue;
677
678 if (!ingress)
679 dev_queue = netdev_get_tx_queue(dev, i);
680
681 old = dev_graft_qdisc(dev_queue, new);
682 if (new && i > 0)
683 atomic_inc(&new->refcnt);
684
685 notify_and_destroy(skb, n, classid, old, new);
686 }
687
688 if (dev->flags & IFF_UP)
689 dev_activate(dev);
690 } else {
691 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
692
693 err = -EINVAL;
694
695 if (cops) {
696 unsigned long cl = cops->get(parent, classid);
697 if (cl) {
698 err = cops->graft(parent, cl, new, &old);
699 cops->put(parent, cl);
700 }
701 }
702 if (!err)
703 notify_and_destroy(skb, n, classid, old, new);
704 }
705 return err;
706}
707
708/* lockdep annotation is needed for ingress; egress gets it only for name */
709static struct lock_class_key qdisc_tx_lock;
710static struct lock_class_key qdisc_rx_lock;
711
712/*
713 Allocate and initialize new qdisc.
714
715 Parameters are passed via opt.
716 */
717
718static struct Qdisc *
719qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
720 u32 parent, u32 handle, struct nlattr **tca, int *errp)
721{
722 int err;
723 struct nlattr *kind = tca[TCA_KIND];
724 struct Qdisc *sch;
725 struct Qdisc_ops *ops;
726 struct qdisc_size_table *stab;
727
728 ops = qdisc_lookup_ops(kind);
729#ifdef CONFIG_KMOD
730 if (ops == NULL && kind != NULL) {
731 char name[IFNAMSIZ];
732 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
733 /* We dropped the RTNL semaphore in order to
734 * perform the module load. So, even if we
735 * succeeded in loading the module we have to
736 * tell the caller to replay the request. We
737 * indicate this using -EAGAIN.
738 * We replay the request because the device may
739 * go away in the mean time.
740 */
741 rtnl_unlock();
742 request_module("sch_%s", name);
743 rtnl_lock();
744 ops = qdisc_lookup_ops(kind);
745 if (ops != NULL) {
746 /* We will try again qdisc_lookup_ops,
747 * so don't keep a reference.
748 */
749 module_put(ops->owner);
750 err = -EAGAIN;
751 goto err_out;
752 }
753 }
754 }
755#endif
756
757 err = -ENOENT;
758 if (ops == NULL)
759 goto err_out;
760
761 sch = qdisc_alloc(dev_queue, ops);
762 if (IS_ERR(sch)) {
763 err = PTR_ERR(sch);
764 goto err_out2;
765 }
766
767 sch->parent = parent;
768
769 if (handle == TC_H_INGRESS) {
770 sch->flags |= TCQ_F_INGRESS;
771 handle = TC_H_MAKE(TC_H_INGRESS, 0);
772 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
773 } else {
774 if (handle == 0) {
775 handle = qdisc_alloc_handle(dev);
776 err = -ENOMEM;
777 if (handle == 0)
778 goto err_out3;
779 }
780 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
781 }
782
783 sch->handle = handle;
784
785 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
786 if (tca[TCA_STAB]) {
787 stab = qdisc_get_stab(tca[TCA_STAB]);
788 if (IS_ERR(stab)) {
789 err = PTR_ERR(stab);
790 goto err_out3;
791 }
792 sch->stab = stab;
793 }
794 if (tca[TCA_RATE]) {
795 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
796 qdisc_root_lock(sch),
797 tca[TCA_RATE]);
798 if (err) {
799 /*
800 * Any broken qdiscs that would require
801 * a ops->reset() here? The qdisc was never
802 * in action so it shouldn't be necessary.
803 */
804 if (ops->destroy)
805 ops->destroy(sch);
806 goto err_out3;
807 }
808 }
809 if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
810 list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
811
812 return sch;
813 }
814err_out3:
815 qdisc_put_stab(sch->stab);
816 dev_put(dev);
817 kfree((char *) sch - sch->padded);
818err_out2:
819 module_put(ops->owner);
820err_out:
821 *errp = err;
822 return NULL;
823}
824
825static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
826{
827 struct qdisc_size_table *stab = NULL;
828 int err = 0;
829
830 if (tca[TCA_OPTIONS]) {
831 if (sch->ops->change == NULL)
832 return -EINVAL;
833 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
834 if (err)
835 return err;
836 }
837
838 if (tca[TCA_STAB]) {
839 stab = qdisc_get_stab(tca[TCA_STAB]);
840 if (IS_ERR(stab))
841 return PTR_ERR(stab);
842 }
843
844 qdisc_put_stab(sch->stab);
845 sch->stab = stab;
846
847 if (tca[TCA_RATE])
848 gen_replace_estimator(&sch->bstats, &sch->rate_est,
849 qdisc_root_lock(sch), tca[TCA_RATE]);
850 return 0;
851}
852
853struct check_loop_arg
854{
855 struct qdisc_walker w;
856 struct Qdisc *p;
857 int depth;
858};
859
860static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
861
862static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
863{
864 struct check_loop_arg arg;
865
866 if (q->ops->cl_ops == NULL)
867 return 0;
868
869 arg.w.stop = arg.w.skip = arg.w.count = 0;
870 arg.w.fn = check_loop_fn;
871 arg.depth = depth;
872 arg.p = p;
873 q->ops->cl_ops->walk(q, &arg.w);
874 return arg.w.stop ? -ELOOP : 0;
875}
876
877static int
878check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
879{
880 struct Qdisc *leaf;
881 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
882 struct check_loop_arg *arg = (struct check_loop_arg *)w;
883
884 leaf = cops->leaf(q, cl);
885 if (leaf) {
886 if (leaf == arg->p || arg->depth > 7)
887 return -ELOOP;
888 return check_loop(leaf, arg->p, arg->depth + 1);
889 }
890 return 0;
891}
892
893/*
894 * Delete/get qdisc.
895 */
896
897static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
898{
899 struct net *net = sock_net(skb->sk);
900 struct tcmsg *tcm = NLMSG_DATA(n);
901 struct nlattr *tca[TCA_MAX + 1];
902 struct net_device *dev;
903 u32 clid = tcm->tcm_parent;
904 struct Qdisc *q = NULL;
905 struct Qdisc *p = NULL;
906 int err;
907
908 if (net != &init_net)
909 return -EINVAL;
910
911 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
912 return -ENODEV;
913
914 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
915 if (err < 0)
916 return err;
917
918 if (clid) {
919 if (clid != TC_H_ROOT) {
920 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
921 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
922 return -ENOENT;
923 q = qdisc_leaf(p, clid);
924 } else { /* ingress */
925 q = dev->rx_queue.qdisc_sleeping;
926 }
927 } else {
928 struct netdev_queue *dev_queue;
929 dev_queue = netdev_get_tx_queue(dev, 0);
930 q = dev_queue->qdisc_sleeping;
931 }
932 if (!q)
933 return -ENOENT;
934
935 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
936 return -EINVAL;
937 } else {
938 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
939 return -ENOENT;
940 }
941
942 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
943 return -EINVAL;
944
945 if (n->nlmsg_type == RTM_DELQDISC) {
946 if (!clid)
947 return -EINVAL;
948 if (q->handle == 0)
949 return -ENOENT;
950 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
951 return err;
952 } else {
953 qdisc_notify(skb, n, clid, NULL, q);
954 }
955 return 0;
956}
957
958/*
959 Create/change qdisc.
960 */
961
962static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
963{
964 struct net *net = sock_net(skb->sk);
965 struct tcmsg *tcm;
966 struct nlattr *tca[TCA_MAX + 1];
967 struct net_device *dev;
968 u32 clid;
969 struct Qdisc *q, *p;
970 int err;
971
972 if (net != &init_net)
973 return -EINVAL;
974
975replay:
976 /* Reinit, just in case something touches this. */
977 tcm = NLMSG_DATA(n);
978 clid = tcm->tcm_parent;
979 q = p = NULL;
980
981 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
982 return -ENODEV;
983
984 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
985 if (err < 0)
986 return err;
987
988 if (clid) {
989 if (clid != TC_H_ROOT) {
990 if (clid != TC_H_INGRESS) {
991 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
992 return -ENOENT;
993 q = qdisc_leaf(p, clid);
994 } else { /*ingress */
995 q = dev->rx_queue.qdisc_sleeping;
996 }
997 } else {
998 struct netdev_queue *dev_queue;
999 dev_queue = netdev_get_tx_queue(dev, 0);
1000 q = dev_queue->qdisc_sleeping;
1001 }
1002
1003 /* It may be default qdisc, ignore it */
1004 if (q && q->handle == 0)
1005 q = NULL;
1006
1007 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1008 if (tcm->tcm_handle) {
1009 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
1010 return -EEXIST;
1011 if (TC_H_MIN(tcm->tcm_handle))
1012 return -EINVAL;
1013 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
1014 goto create_n_graft;
1015 if (n->nlmsg_flags&NLM_F_EXCL)
1016 return -EEXIST;
1017 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1018 return -EINVAL;
1019 if (q == p ||
1020 (p && check_loop(q, p, 0)))
1021 return -ELOOP;
1022 atomic_inc(&q->refcnt);
1023 goto graft;
1024 } else {
1025 if (q == NULL)
1026 goto create_n_graft;
1027
1028 /* This magic test requires explanation.
1029 *
1030 * We know, that some child q is already
1031 * attached to this parent and have choice:
1032 * either to change it or to create/graft new one.
1033 *
1034 * 1. We are allowed to create/graft only
1035 * if CREATE and REPLACE flags are set.
1036 *
1037 * 2. If EXCL is set, requestor wanted to say,
1038 * that qdisc tcm_handle is not expected
1039 * to exist, so that we choose create/graft too.
1040 *
1041 * 3. The last case is when no flags are set.
1042 * Alas, it is sort of hole in API, we
1043 * cannot decide what to do unambiguously.
1044 * For now we select create/graft, if
1045 * user gave KIND, which does not match existing.
1046 */
1047 if ((n->nlmsg_flags&NLM_F_CREATE) &&
1048 (n->nlmsg_flags&NLM_F_REPLACE) &&
1049 ((n->nlmsg_flags&NLM_F_EXCL) ||
1050 (tca[TCA_KIND] &&
1051 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1052 goto create_n_graft;
1053 }
1054 }
1055 } else {
1056 if (!tcm->tcm_handle)
1057 return -EINVAL;
1058 q = qdisc_lookup(dev, tcm->tcm_handle);
1059 }
1060
1061 /* Change qdisc parameters */
1062 if (q == NULL)
1063 return -ENOENT;
1064 if (n->nlmsg_flags&NLM_F_EXCL)
1065 return -EEXIST;
1066 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1067 return -EINVAL;
1068 err = qdisc_change(q, tca);
1069 if (err == 0)
1070 qdisc_notify(skb, n, clid, NULL, q);
1071 return err;
1072
1073create_n_graft:
1074 if (!(n->nlmsg_flags&NLM_F_CREATE))
1075 return -ENOENT;
1076 if (clid == TC_H_INGRESS)
1077 q = qdisc_create(dev, &dev->rx_queue,
1078 tcm->tcm_parent, tcm->tcm_parent,
1079 tca, &err);
1080 else
1081 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
1082 tcm->tcm_parent, tcm->tcm_handle,
1083 tca, &err);
1084 if (q == NULL) {
1085 if (err == -EAGAIN)
1086 goto replay;
1087 return err;
1088 }
1089
1090graft:
1091 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1092 if (err) {
1093 if (q)
1094 qdisc_destroy(q);
1095 return err;
1096 }
1097
1098 return 0;
1099}
1100
1101static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1102 u32 pid, u32 seq, u16 flags, int event)
1103{
1104 struct tcmsg *tcm;
1105 struct nlmsghdr *nlh;
1106 unsigned char *b = skb_tail_pointer(skb);
1107 struct gnet_dump d;
1108
1109 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1110 tcm = NLMSG_DATA(nlh);
1111 tcm->tcm_family = AF_UNSPEC;
1112 tcm->tcm__pad1 = 0;
1113 tcm->tcm__pad2 = 0;
1114 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1115 tcm->tcm_parent = clid;
1116 tcm->tcm_handle = q->handle;
1117 tcm->tcm_info = atomic_read(&q->refcnt);
1118 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1119 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1120 goto nla_put_failure;
1121 q->qstats.qlen = q->q.qlen;
1122
1123 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1124 goto nla_put_failure;
1125
1126 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1127 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1128 goto nla_put_failure;
1129
1130 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1131 goto nla_put_failure;
1132
1133 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1134 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
1135 gnet_stats_copy_queue(&d, &q->qstats) < 0)
1136 goto nla_put_failure;
1137
1138 if (gnet_stats_finish_copy(&d) < 0)
1139 goto nla_put_failure;
1140
1141 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1142 return skb->len;
1143
1144nlmsg_failure:
1145nla_put_failure:
1146 nlmsg_trim(skb, b);
1147 return -1;
1148}
1149
1150static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1151 u32 clid, struct Qdisc *old, struct Qdisc *new)
1152{
1153 struct sk_buff *skb;
1154 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1155
1156 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1157 if (!skb)
1158 return -ENOBUFS;
1159
1160 if (old && old->handle) {
1161 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1162 goto err_out;
1163 }
1164 if (new) {
1165 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1166 goto err_out;
1167 }
1168
1169 if (skb->len)
1170 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1171
1172err_out:
1173 kfree_skb(skb);
1174 return -EINVAL;
1175}
1176
1177static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1178{
1179 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1180}
1181
1182static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1183 struct netlink_callback *cb,
1184 int *q_idx_p, int s_q_idx)
1185{
1186 int ret = 0, q_idx = *q_idx_p;
1187 struct Qdisc *q;
1188
1189 if (!root)
1190 return 0;
1191
1192 q = root;
1193 if (q_idx < s_q_idx) {
1194 q_idx++;
1195 } else {
1196 if (!tc_qdisc_dump_ignore(q) &&
1197 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1198 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1199 goto done;
1200 q_idx++;
1201 }
1202 list_for_each_entry(q, &root->list, list) {
1203 if (q_idx < s_q_idx) {
1204 q_idx++;
1205 continue;
1206 }
1207 if (!tc_qdisc_dump_ignore(q) &&
1208 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1209 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1210 goto done;
1211 q_idx++;
1212 }
1213
1214out:
1215 *q_idx_p = q_idx;
1216 return ret;
1217done:
1218 ret = -1;
1219 goto out;
1220}
1221
1222static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1223{
1224 struct net *net = sock_net(skb->sk);
1225 int idx, q_idx;
1226 int s_idx, s_q_idx;
1227 struct net_device *dev;
1228
1229 if (net != &init_net)
1230 return 0;
1231
1232 s_idx = cb->args[0];
1233 s_q_idx = q_idx = cb->args[1];
1234 read_lock(&dev_base_lock);
1235 idx = 0;
1236 for_each_netdev(&init_net, dev) {
1237 struct netdev_queue *dev_queue;
1238
1239 if (idx < s_idx)
1240 goto cont;
1241 if (idx > s_idx)
1242 s_q_idx = 0;
1243 q_idx = 0;
1244
1245 dev_queue = netdev_get_tx_queue(dev, 0);
1246 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1247 goto done;
1248
1249 dev_queue = &dev->rx_queue;
1250 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1251 goto done;
1252
1253cont:
1254 idx++;
1255 }
1256
1257done:
1258 read_unlock(&dev_base_lock);
1259
1260 cb->args[0] = idx;
1261 cb->args[1] = q_idx;
1262
1263 return skb->len;
1264}
1265
1266
1267
1268/************************************************
1269 * Traffic classes manipulation. *
1270 ************************************************/
1271
1272
1273
1274static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1275{
1276 struct net *net = sock_net(skb->sk);
1277 struct netdev_queue *dev_queue;
1278 struct tcmsg *tcm = NLMSG_DATA(n);
1279 struct nlattr *tca[TCA_MAX + 1];
1280 struct net_device *dev;
1281 struct Qdisc *q = NULL;
1282 const struct Qdisc_class_ops *cops;
1283 unsigned long cl = 0;
1284 unsigned long new_cl;
1285 u32 pid = tcm->tcm_parent;
1286 u32 clid = tcm->tcm_handle;
1287 u32 qid = TC_H_MAJ(clid);
1288 int err;
1289
1290 if (net != &init_net)
1291 return -EINVAL;
1292
1293 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1294 return -ENODEV;
1295
1296 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1297 if (err < 0)
1298 return err;
1299
1300 /*
1301 parent == TC_H_UNSPEC - unspecified parent.
1302 parent == TC_H_ROOT - class is root, which has no parent.
1303 parent == X:0 - parent is root class.
1304 parent == X:Y - parent is a node in hierarchy.
1305 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1306
1307 handle == 0:0 - generate handle from kernel pool.
1308 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1309 handle == X:Y - clear.
1310 handle == X:0 - root class.
1311 */
1312
1313 /* Step 1. Determine qdisc handle X:0 */
1314
1315 dev_queue = netdev_get_tx_queue(dev, 0);
1316 if (pid != TC_H_ROOT) {
1317 u32 qid1 = TC_H_MAJ(pid);
1318
1319 if (qid && qid1) {
1320 /* If both majors are known, they must be identical. */
1321 if (qid != qid1)
1322 return -EINVAL;
1323 } else if (qid1) {
1324 qid = qid1;
1325 } else if (qid == 0)
1326 qid = dev_queue->qdisc_sleeping->handle;
1327
1328 /* Now qid is genuine qdisc handle consistent
1329 both with parent and child.
1330
1331 TC_H_MAJ(pid) still may be unspecified, complete it now.
1332 */
1333 if (pid)
1334 pid = TC_H_MAKE(qid, pid);
1335 } else {
1336 if (qid == 0)
1337 qid = dev_queue->qdisc_sleeping->handle;
1338 }
1339
1340 /* OK. Locate qdisc */
1341 if ((q = qdisc_lookup(dev, qid)) == NULL)
1342 return -ENOENT;
1343
1344 /* An check that it supports classes */
1345 cops = q->ops->cl_ops;
1346 if (cops == NULL)
1347 return -EINVAL;
1348
1349 /* Now try to get class */
1350 if (clid == 0) {
1351 if (pid == TC_H_ROOT)
1352 clid = qid;
1353 } else
1354 clid = TC_H_MAKE(qid, clid);
1355
1356 if (clid)
1357 cl = cops->get(q, clid);
1358
1359 if (cl == 0) {
1360 err = -ENOENT;
1361 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1362 goto out;
1363 } else {
1364 switch (n->nlmsg_type) {
1365 case RTM_NEWTCLASS:
1366 err = -EEXIST;
1367 if (n->nlmsg_flags&NLM_F_EXCL)
1368 goto out;
1369 break;
1370 case RTM_DELTCLASS:
1371 err = cops->delete(q, cl);
1372 if (err == 0)
1373 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1374 goto out;
1375 case RTM_GETTCLASS:
1376 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1377 goto out;
1378 default:
1379 err = -EINVAL;
1380 goto out;
1381 }
1382 }
1383
1384 new_cl = cl;
1385 err = cops->change(q, clid, pid, tca, &new_cl);
1386 if (err == 0)
1387 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1388
1389out:
1390 if (cl)
1391 cops->put(q, cl);
1392
1393 return err;
1394}
1395
1396
1397static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1398 unsigned long cl,
1399 u32 pid, u32 seq, u16 flags, int event)
1400{
1401 struct tcmsg *tcm;
1402 struct nlmsghdr *nlh;
1403 unsigned char *b = skb_tail_pointer(skb);
1404 struct gnet_dump d;
1405 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1406
1407 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1408 tcm = NLMSG_DATA(nlh);
1409 tcm->tcm_family = AF_UNSPEC;
1410 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1411 tcm->tcm_parent = q->handle;
1412 tcm->tcm_handle = q->handle;
1413 tcm->tcm_info = 0;
1414 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1415 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1416 goto nla_put_failure;
1417
1418 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1419 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1420 goto nla_put_failure;
1421
1422 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1423 goto nla_put_failure;
1424
1425 if (gnet_stats_finish_copy(&d) < 0)
1426 goto nla_put_failure;
1427
1428 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1429 return skb->len;
1430
1431nlmsg_failure:
1432nla_put_failure:
1433 nlmsg_trim(skb, b);
1434 return -1;
1435}
1436
1437static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1438 struct Qdisc *q, unsigned long cl, int event)
1439{
1440 struct sk_buff *skb;
1441 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1442
1443 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1444 if (!skb)
1445 return -ENOBUFS;
1446
1447 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1448 kfree_skb(skb);
1449 return -EINVAL;
1450 }
1451
1452 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1453}
1454
1455struct qdisc_dump_args
1456{
1457 struct qdisc_walker w;
1458 struct sk_buff *skb;
1459 struct netlink_callback *cb;
1460};
1461
1462static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1463{
1464 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1465
1466 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1467 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1468}
1469
1470static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1471 struct tcmsg *tcm, struct netlink_callback *cb,
1472 int *t_p, int s_t)
1473{
1474 struct qdisc_dump_args arg;
1475
1476 if (tc_qdisc_dump_ignore(q) ||
1477 *t_p < s_t || !q->ops->cl_ops ||
1478 (tcm->tcm_parent &&
1479 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1480 (*t_p)++;
1481 return 0;
1482 }
1483 if (*t_p > s_t)
1484 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1485 arg.w.fn = qdisc_class_dump;
1486 arg.skb = skb;
1487 arg.cb = cb;
1488 arg.w.stop = 0;
1489 arg.w.skip = cb->args[1];
1490 arg.w.count = 0;
1491 q->ops->cl_ops->walk(q, &arg.w);
1492 cb->args[1] = arg.w.count;
1493 if (arg.w.stop)
1494 return -1;
1495 (*t_p)++;
1496 return 0;
1497}
1498
1499static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1500 struct tcmsg *tcm, struct netlink_callback *cb,
1501 int *t_p, int s_t)
1502{
1503 struct Qdisc *q;
1504
1505 if (!root)
1506 return 0;
1507
1508 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1509 return -1;
1510
1511 list_for_each_entry(q, &root->list, list) {
1512 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1513 return -1;
1514 }
1515
1516 return 0;
1517}
1518
1519static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1520{
1521 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1522 struct net *net = sock_net(skb->sk);
1523 struct netdev_queue *dev_queue;
1524 struct net_device *dev;
1525 int t, s_t;
1526
1527 if (net != &init_net)
1528 return 0;
1529
1530 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1531 return 0;
1532 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1533 return 0;
1534
1535 s_t = cb->args[0];
1536 t = 0;
1537
1538 dev_queue = netdev_get_tx_queue(dev, 0);
1539 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1540 goto done;
1541
1542 dev_queue = &dev->rx_queue;
1543 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1544 goto done;
1545
1546done:
1547 cb->args[0] = t;
1548
1549 dev_put(dev);
1550 return skb->len;
1551}
1552
1553/* Main classifier routine: scans classifier chain attached
1554 to this qdisc, (optionally) tests for protocol and asks
1555 specific classifiers.
1556 */
1557int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1558 struct tcf_result *res)
1559{
1560 __be16 protocol = skb->protocol;
1561 int err = 0;
1562
1563 for (; tp; tp = tp->next) {
1564 if ((tp->protocol == protocol ||
1565 tp->protocol == htons(ETH_P_ALL)) &&
1566 (err = tp->classify(skb, tp, res)) >= 0) {
1567#ifdef CONFIG_NET_CLS_ACT
1568 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1569 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1570#endif
1571 return err;
1572 }
1573 }
1574 return -1;
1575}
1576EXPORT_SYMBOL(tc_classify_compat);
1577
1578int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1579 struct tcf_result *res)
1580{
1581 int err = 0;
1582 __be16 protocol;
1583#ifdef CONFIG_NET_CLS_ACT
1584 struct tcf_proto *otp = tp;
1585reclassify:
1586#endif
1587 protocol = skb->protocol;
1588
1589 err = tc_classify_compat(skb, tp, res);
1590#ifdef CONFIG_NET_CLS_ACT
1591 if (err == TC_ACT_RECLASSIFY) {
1592 u32 verd = G_TC_VERD(skb->tc_verd);
1593 tp = otp;
1594
1595 if (verd++ >= MAX_REC_LOOP) {
1596 printk("rule prio %u protocol %02x reclassify loop, "
1597 "packet dropped\n",
1598 tp->prio&0xffff, ntohs(tp->protocol));
1599 return TC_ACT_SHOT;
1600 }
1601 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1602 goto reclassify;
1603 }
1604#endif
1605 return err;
1606}
1607EXPORT_SYMBOL(tc_classify);
1608
1609void tcf_destroy(struct tcf_proto *tp)
1610{
1611 tp->ops->destroy(tp);
1612 module_put(tp->ops->owner);
1613 kfree(tp);
1614}
1615
1616void tcf_destroy_chain(struct tcf_proto **fl)
1617{
1618 struct tcf_proto *tp;
1619
1620 while ((tp = *fl) != NULL) {
1621 *fl = tp->next;
1622 tcf_destroy(tp);
1623 }
1624}
1625EXPORT_SYMBOL(tcf_destroy_chain);
1626
1627#ifdef CONFIG_PROC_FS
1628static int psched_show(struct seq_file *seq, void *v)
1629{
1630 struct timespec ts;
1631
1632 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1633 seq_printf(seq, "%08x %08x %08x %08x\n",
1634 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
1635 1000000,
1636 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1637
1638 return 0;
1639}
1640
1641static int psched_open(struct inode *inode, struct file *file)
1642{
1643 return single_open(file, psched_show, PDE(inode)->data);
1644}
1645
1646static const struct file_operations psched_fops = {
1647 .owner = THIS_MODULE,
1648 .open = psched_open,
1649 .read = seq_read,
1650 .llseek = seq_lseek,
1651 .release = single_release,
1652};
1653#endif
1654
1655static int __init pktsched_init(void)
1656{
1657 register_qdisc(&pfifo_qdisc_ops);
1658 register_qdisc(&bfifo_qdisc_ops);
1659 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1660
1661 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1662 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1663 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1664 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1665 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1666 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1667
1668 return 0;
1669}
1670
1671subsys_initcall(pktsched_init);