]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/sched/sch_api.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
33
34 #include <net/net_namespace.h>
35 #include <net/sock.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
38
39 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
40 struct nlmsghdr *n, u32 clid,
41 struct Qdisc *old, struct Qdisc *new);
42 static int tclass_notify(struct net *net, struct sk_buff *oskb,
43 struct nlmsghdr *n, struct Qdisc *q,
44 unsigned long cl, int event);
45
46 /*
47
48 Short review.
49 -------------
50
51 This file consists of two interrelated parts:
52
53 1. queueing disciplines manager frontend.
54 2. traffic classes manager frontend.
55
56 Generally, queueing discipline ("qdisc") is a black box,
57 which is able to enqueue packets and to dequeue them (when
58 device is ready to send something) in order and at times
59 determined by algorithm hidden in it.
60
61 qdisc's are divided to two categories:
62 - "queues", which have no internal structure visible from outside.
63 - "schedulers", which split all the packets to "traffic classes",
64 using "packet classifiers" (look at cls_api.c)
65
66 In turn, classes may have child qdiscs (as rule, queues)
67 attached to them etc. etc. etc.
68
69 The goal of the routines in this file is to translate
70 information supplied by user in the form of handles
71 to more intelligible for kernel form, to make some sanity
72 checks and part of work, which is common to all qdiscs
73 and to provide rtnetlink notifications.
74
75 All real intelligent work is done inside qdisc modules.
76
77
78
79 Every discipline has two major routines: enqueue and dequeue.
80
81 ---dequeue
82
83 dequeue usually returns a skb to send. It is allowed to return NULL,
84 but it does not mean that queue is empty, it just means that
85 discipline does not want to send anything this time.
86 Queue is really empty if q->q.qlen == 0.
87 For complicated disciplines with multiple queues q->q is not
88 real packet queue, but however q->q.qlen must be valid.
89
90 ---enqueue
91
92 enqueue returns 0, if packet was enqueued successfully.
93 If packet (this one or another one) was dropped, it returns
94 not zero error code.
95 NET_XMIT_DROP - this packet dropped
96 Expected action: do not backoff, but wait until queue will clear.
97 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
98 Expected action: backoff or ignore
99
100 Auxiliary routines:
101
102 ---peek
103
104 like dequeue but without removing a packet from the queue
105
106 ---reset
107
108 returns qdisc to initial state: purge all buffers, clear all
109 timers, counters (except for statistics) etc.
110
111 ---init
112
113 initializes newly created qdisc.
114
115 ---destroy
116
117 destroys resources allocated by init and during lifetime of qdisc.
118
119 ---change
120
121 changes qdisc parameters.
122 */
123
124 /* Protects list of registered TC modules. It is pure SMP lock. */
125 static DEFINE_RWLOCK(qdisc_mod_lock);
126
127
128 /************************************************
129 * Queueing disciplines manipulation. *
130 ************************************************/
131
132
133 /* The list of all installed queueing disciplines. */
134
135 static struct Qdisc_ops *qdisc_base;
136
137 /* Register/unregister queueing discipline */
138
139 int register_qdisc(struct Qdisc_ops *qops)
140 {
141 struct Qdisc_ops *q, **qp;
142 int rc = -EEXIST;
143
144 write_lock(&qdisc_mod_lock);
145 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
146 if (!strcmp(qops->id, q->id))
147 goto out;
148
149 if (qops->enqueue == NULL)
150 qops->enqueue = noop_qdisc_ops.enqueue;
151 if (qops->peek == NULL) {
152 if (qops->dequeue == NULL)
153 qops->peek = noop_qdisc_ops.peek;
154 else
155 goto out_einval;
156 }
157 if (qops->dequeue == NULL)
158 qops->dequeue = noop_qdisc_ops.dequeue;
159
160 if (qops->cl_ops) {
161 const struct Qdisc_class_ops *cops = qops->cl_ops;
162
163 if (!(cops->get && cops->put && cops->walk && cops->leaf))
164 goto out_einval;
165
166 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
167 goto out_einval;
168 }
169
170 qops->next = NULL;
171 *qp = qops;
172 rc = 0;
173 out:
174 write_unlock(&qdisc_mod_lock);
175 return rc;
176
177 out_einval:
178 rc = -EINVAL;
179 goto out;
180 }
181 EXPORT_SYMBOL(register_qdisc);
182
183 int unregister_qdisc(struct Qdisc_ops *qops)
184 {
185 struct Qdisc_ops *q, **qp;
186 int err = -ENOENT;
187
188 write_lock(&qdisc_mod_lock);
189 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
190 if (q == qops)
191 break;
192 if (q) {
193 *qp = q->next;
194 q->next = NULL;
195 err = 0;
196 }
197 write_unlock(&qdisc_mod_lock);
198 return err;
199 }
200 EXPORT_SYMBOL(unregister_qdisc);
201
202 /* Get default qdisc if not otherwise specified */
203 void qdisc_get_default(char *name, size_t len)
204 {
205 read_lock(&qdisc_mod_lock);
206 strlcpy(name, default_qdisc_ops->id, len);
207 read_unlock(&qdisc_mod_lock);
208 }
209
210 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
211 {
212 struct Qdisc_ops *q = NULL;
213
214 for (q = qdisc_base; q; q = q->next) {
215 if (!strcmp(name, q->id)) {
216 if (!try_module_get(q->owner))
217 q = NULL;
218 break;
219 }
220 }
221
222 return q;
223 }
224
225 /* Set new default qdisc to use */
226 int qdisc_set_default(const char *name)
227 {
228 const struct Qdisc_ops *ops;
229
230 if (!capable(CAP_NET_ADMIN))
231 return -EPERM;
232
233 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
235 if (!ops) {
236 /* Not found, drop lock and try to load module */
237 write_unlock(&qdisc_mod_lock);
238 request_module("sch_%s", name);
239 write_lock(&qdisc_mod_lock);
240
241 ops = qdisc_lookup_default(name);
242 }
243
244 if (ops) {
245 /* Set new default */
246 module_put(default_qdisc_ops->owner);
247 default_qdisc_ops = ops;
248 }
249 write_unlock(&qdisc_mod_lock);
250
251 return ops ? 0 : -ENOENT;
252 }
253
254 #ifdef CONFIG_NET_SCH_DEFAULT
255 /* Set default value from kernel config */
256 static int __init sch_default_qdisc(void)
257 {
258 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
259 }
260 late_initcall(sch_default_qdisc);
261 #endif
262
263 /* We know handle. Find qdisc among all qdisc's attached to device
264 * (root qdisc, all its children, children of children etc.)
265 * Note: caller either uses rtnl or rcu_read_lock()
266 */
267
268 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
269 {
270 struct Qdisc *q;
271
272 if (!qdisc_dev(root))
273 return (root->handle == handle ? root : NULL);
274
275 if (!(root->flags & TCQ_F_BUILTIN) &&
276 root->handle == handle)
277 return root;
278
279 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
280 if (q->handle == handle)
281 return q;
282 }
283 return NULL;
284 }
285
286 void qdisc_hash_add(struct Qdisc *q, bool invisible)
287 {
288 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
289 ASSERT_RTNL();
290 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
291 if (invisible)
292 q->flags |= TCQ_F_INVISIBLE;
293 }
294 }
295 EXPORT_SYMBOL(qdisc_hash_add);
296
297 void qdisc_hash_del(struct Qdisc *q)
298 {
299 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
300 ASSERT_RTNL();
301 hash_del_rcu(&q->hash);
302 }
303 }
304 EXPORT_SYMBOL(qdisc_hash_del);
305
306 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
307 {
308 struct Qdisc *q;
309
310 if (!handle)
311 return NULL;
312 q = qdisc_match_from_root(dev->qdisc, handle);
313 if (q)
314 goto out;
315
316 if (dev_ingress_queue(dev))
317 q = qdisc_match_from_root(
318 dev_ingress_queue(dev)->qdisc_sleeping,
319 handle);
320 out:
321 return q;
322 }
323
324 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
325 {
326 unsigned long cl;
327 struct Qdisc *leaf;
328 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
329
330 if (cops == NULL)
331 return NULL;
332 cl = cops->get(p, classid);
333
334 if (cl == 0)
335 return NULL;
336 leaf = cops->leaf(p, cl);
337 cops->put(p, cl);
338 return leaf;
339 }
340
341 /* Find queueing discipline by name */
342
343 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
344 {
345 struct Qdisc_ops *q = NULL;
346
347 if (kind) {
348 read_lock(&qdisc_mod_lock);
349 for (q = qdisc_base; q; q = q->next) {
350 if (nla_strcmp(kind, q->id) == 0) {
351 if (!try_module_get(q->owner))
352 q = NULL;
353 break;
354 }
355 }
356 read_unlock(&qdisc_mod_lock);
357 }
358 return q;
359 }
360
361 /* The linklayer setting were not transferred from iproute2, in older
362 * versions, and the rate tables lookup systems have been dropped in
363 * the kernel. To keep backward compatible with older iproute2 tc
364 * utils, we detect the linklayer setting by detecting if the rate
365 * table were modified.
366 *
367 * For linklayer ATM table entries, the rate table will be aligned to
368 * 48 bytes, thus some table entries will contain the same value. The
369 * mpu (min packet unit) is also encoded into the old rate table, thus
370 * starting from the mpu, we find low and high table entries for
371 * mapping this cell. If these entries contain the same value, when
372 * the rate tables have been modified for linklayer ATM.
373 *
374 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
375 * and then roundup to the next cell, calc the table entry one below,
376 * and compare.
377 */
378 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
379 {
380 int low = roundup(r->mpu, 48);
381 int high = roundup(low+1, 48);
382 int cell_low = low >> r->cell_log;
383 int cell_high = (high >> r->cell_log) - 1;
384
385 /* rtab is too inaccurate at rates > 100Mbit/s */
386 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
387 pr_debug("TC linklayer: Giving up ATM detection\n");
388 return TC_LINKLAYER_ETHERNET;
389 }
390
391 if ((cell_high > cell_low) && (cell_high < 256)
392 && (rtab[cell_low] == rtab[cell_high])) {
393 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
394 cell_low, cell_high, rtab[cell_high]);
395 return TC_LINKLAYER_ATM;
396 }
397 return TC_LINKLAYER_ETHERNET;
398 }
399
400 static struct qdisc_rate_table *qdisc_rtab_list;
401
402 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
403 struct nlattr *tab)
404 {
405 struct qdisc_rate_table *rtab;
406
407 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
408 nla_len(tab) != TC_RTAB_SIZE)
409 return NULL;
410
411 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
412 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
413 !memcmp(&rtab->data, nla_data(tab), 1024)) {
414 rtab->refcnt++;
415 return rtab;
416 }
417 }
418
419 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
420 if (rtab) {
421 rtab->rate = *r;
422 rtab->refcnt = 1;
423 memcpy(rtab->data, nla_data(tab), 1024);
424 if (r->linklayer == TC_LINKLAYER_UNAWARE)
425 r->linklayer = __detect_linklayer(r, rtab->data);
426 rtab->next = qdisc_rtab_list;
427 qdisc_rtab_list = rtab;
428 }
429 return rtab;
430 }
431 EXPORT_SYMBOL(qdisc_get_rtab);
432
433 void qdisc_put_rtab(struct qdisc_rate_table *tab)
434 {
435 struct qdisc_rate_table *rtab, **rtabp;
436
437 if (!tab || --tab->refcnt)
438 return;
439
440 for (rtabp = &qdisc_rtab_list;
441 (rtab = *rtabp) != NULL;
442 rtabp = &rtab->next) {
443 if (rtab == tab) {
444 *rtabp = rtab->next;
445 kfree(rtab);
446 return;
447 }
448 }
449 }
450 EXPORT_SYMBOL(qdisc_put_rtab);
451
452 static LIST_HEAD(qdisc_stab_list);
453
454 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
455 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
456 [TCA_STAB_DATA] = { .type = NLA_BINARY },
457 };
458
459 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
460 {
461 struct nlattr *tb[TCA_STAB_MAX + 1];
462 struct qdisc_size_table *stab;
463 struct tc_sizespec *s;
464 unsigned int tsize = 0;
465 u16 *tab = NULL;
466 int err;
467
468 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
469 if (err < 0)
470 return ERR_PTR(err);
471 if (!tb[TCA_STAB_BASE])
472 return ERR_PTR(-EINVAL);
473
474 s = nla_data(tb[TCA_STAB_BASE]);
475
476 if (s->tsize > 0) {
477 if (!tb[TCA_STAB_DATA])
478 return ERR_PTR(-EINVAL);
479 tab = nla_data(tb[TCA_STAB_DATA]);
480 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
481 }
482
483 if (tsize != s->tsize || (!tab && tsize > 0))
484 return ERR_PTR(-EINVAL);
485
486 list_for_each_entry(stab, &qdisc_stab_list, list) {
487 if (memcmp(&stab->szopts, s, sizeof(*s)))
488 continue;
489 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
490 continue;
491 stab->refcnt++;
492 return stab;
493 }
494
495 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
496 if (!stab)
497 return ERR_PTR(-ENOMEM);
498
499 stab->refcnt = 1;
500 stab->szopts = *s;
501 if (tsize > 0)
502 memcpy(stab->data, tab, tsize * sizeof(u16));
503
504 list_add_tail(&stab->list, &qdisc_stab_list);
505
506 return stab;
507 }
508
509 static void stab_kfree_rcu(struct rcu_head *head)
510 {
511 kfree(container_of(head, struct qdisc_size_table, rcu));
512 }
513
514 void qdisc_put_stab(struct qdisc_size_table *tab)
515 {
516 if (!tab)
517 return;
518
519 if (--tab->refcnt == 0) {
520 list_del(&tab->list);
521 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
522 }
523 }
524 EXPORT_SYMBOL(qdisc_put_stab);
525
526 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
527 {
528 struct nlattr *nest;
529
530 nest = nla_nest_start(skb, TCA_STAB);
531 if (nest == NULL)
532 goto nla_put_failure;
533 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
534 goto nla_put_failure;
535 nla_nest_end(skb, nest);
536
537 return skb->len;
538
539 nla_put_failure:
540 return -1;
541 }
542
543 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
544 const struct qdisc_size_table *stab)
545 {
546 int pkt_len, slot;
547
548 pkt_len = skb->len + stab->szopts.overhead;
549 if (unlikely(!stab->szopts.tsize))
550 goto out;
551
552 slot = pkt_len + stab->szopts.cell_align;
553 if (unlikely(slot < 0))
554 slot = 0;
555
556 slot >>= stab->szopts.cell_log;
557 if (likely(slot < stab->szopts.tsize))
558 pkt_len = stab->data[slot];
559 else
560 pkt_len = stab->data[stab->szopts.tsize - 1] *
561 (slot / stab->szopts.tsize) +
562 stab->data[slot % stab->szopts.tsize];
563
564 pkt_len <<= stab->szopts.size_log;
565 out:
566 if (unlikely(pkt_len < 1))
567 pkt_len = 1;
568 qdisc_skb_cb(skb)->pkt_len = pkt_len;
569 }
570 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
571
572 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
573 {
574 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
575 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
576 txt, qdisc->ops->id, qdisc->handle >> 16);
577 qdisc->flags |= TCQ_F_WARN_NONWC;
578 }
579 }
580 EXPORT_SYMBOL(qdisc_warn_nonwc);
581
582 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
583 {
584 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
585 timer);
586
587 rcu_read_lock();
588 __netif_schedule(qdisc_root(wd->qdisc));
589 rcu_read_unlock();
590
591 return HRTIMER_NORESTART;
592 }
593
594 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
595 {
596 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
597 wd->timer.function = qdisc_watchdog;
598 wd->qdisc = qdisc;
599 }
600 EXPORT_SYMBOL(qdisc_watchdog_init);
601
602 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
603 {
604 if (test_bit(__QDISC_STATE_DEACTIVATED,
605 &qdisc_root_sleeping(wd->qdisc)->state))
606 return;
607
608 if (wd->last_expires == expires)
609 return;
610
611 wd->last_expires = expires;
612 hrtimer_start(&wd->timer,
613 ns_to_ktime(expires),
614 HRTIMER_MODE_ABS_PINNED);
615 }
616 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
617
618 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
619 {
620 hrtimer_cancel(&wd->timer);
621 }
622 EXPORT_SYMBOL(qdisc_watchdog_cancel);
623
624 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
625 {
626 unsigned int size = n * sizeof(struct hlist_head), i;
627 struct hlist_head *h;
628
629 if (size <= PAGE_SIZE)
630 h = kmalloc(size, GFP_KERNEL);
631 else
632 h = (struct hlist_head *)
633 __get_free_pages(GFP_KERNEL, get_order(size));
634
635 if (h != NULL) {
636 for (i = 0; i < n; i++)
637 INIT_HLIST_HEAD(&h[i]);
638 }
639 return h;
640 }
641
642 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
643 {
644 unsigned int size = n * sizeof(struct hlist_head);
645
646 if (size <= PAGE_SIZE)
647 kfree(h);
648 else
649 free_pages((unsigned long)h, get_order(size));
650 }
651
652 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
653 {
654 struct Qdisc_class_common *cl;
655 struct hlist_node *next;
656 struct hlist_head *nhash, *ohash;
657 unsigned int nsize, nmask, osize;
658 unsigned int i, h;
659
660 /* Rehash when load factor exceeds 0.75 */
661 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
662 return;
663 nsize = clhash->hashsize * 2;
664 nmask = nsize - 1;
665 nhash = qdisc_class_hash_alloc(nsize);
666 if (nhash == NULL)
667 return;
668
669 ohash = clhash->hash;
670 osize = clhash->hashsize;
671
672 sch_tree_lock(sch);
673 for (i = 0; i < osize; i++) {
674 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
675 h = qdisc_class_hash(cl->classid, nmask);
676 hlist_add_head(&cl->hnode, &nhash[h]);
677 }
678 }
679 clhash->hash = nhash;
680 clhash->hashsize = nsize;
681 clhash->hashmask = nmask;
682 sch_tree_unlock(sch);
683
684 qdisc_class_hash_free(ohash, osize);
685 }
686 EXPORT_SYMBOL(qdisc_class_hash_grow);
687
688 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
689 {
690 unsigned int size = 4;
691
692 clhash->hash = qdisc_class_hash_alloc(size);
693 if (clhash->hash == NULL)
694 return -ENOMEM;
695 clhash->hashsize = size;
696 clhash->hashmask = size - 1;
697 clhash->hashelems = 0;
698 return 0;
699 }
700 EXPORT_SYMBOL(qdisc_class_hash_init);
701
702 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
703 {
704 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
705 }
706 EXPORT_SYMBOL(qdisc_class_hash_destroy);
707
708 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
709 struct Qdisc_class_common *cl)
710 {
711 unsigned int h;
712
713 INIT_HLIST_NODE(&cl->hnode);
714 h = qdisc_class_hash(cl->classid, clhash->hashmask);
715 hlist_add_head(&cl->hnode, &clhash->hash[h]);
716 clhash->hashelems++;
717 }
718 EXPORT_SYMBOL(qdisc_class_hash_insert);
719
720 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
721 struct Qdisc_class_common *cl)
722 {
723 hlist_del(&cl->hnode);
724 clhash->hashelems--;
725 }
726 EXPORT_SYMBOL(qdisc_class_hash_remove);
727
728 /* Allocate an unique handle from space managed by kernel
729 * Possible range is [8000-FFFF]:0000 (0x8000 values)
730 */
731 static u32 qdisc_alloc_handle(struct net_device *dev)
732 {
733 int i = 0x8000;
734 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
735
736 do {
737 autohandle += TC_H_MAKE(0x10000U, 0);
738 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
739 autohandle = TC_H_MAKE(0x80000000U, 0);
740 if (!qdisc_lookup(dev, autohandle))
741 return autohandle;
742 cond_resched();
743 } while (--i > 0);
744
745 return 0;
746 }
747
748 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
749 unsigned int len)
750 {
751 const struct Qdisc_class_ops *cops;
752 unsigned long cl;
753 u32 parentid;
754 int drops;
755
756 if (n == 0 && len == 0)
757 return;
758 drops = max_t(int, n, 0);
759 rcu_read_lock();
760 while ((parentid = sch->parent)) {
761 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
762 break;
763
764 if (sch->flags & TCQ_F_NOPARENT)
765 break;
766 /* TODO: perform the search on a per txq basis */
767 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
768 if (sch == NULL) {
769 WARN_ON_ONCE(parentid != TC_H_ROOT);
770 break;
771 }
772 cops = sch->ops->cl_ops;
773 if (cops->qlen_notify) {
774 cl = cops->get(sch, parentid);
775 cops->qlen_notify(sch, cl);
776 cops->put(sch, cl);
777 }
778 sch->q.qlen -= n;
779 sch->qstats.backlog -= len;
780 __qdisc_qstats_drop(sch, drops);
781 }
782 rcu_read_unlock();
783 }
784 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
785
786 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
787 struct nlmsghdr *n, u32 clid,
788 struct Qdisc *old, struct Qdisc *new)
789 {
790 if (new || old)
791 qdisc_notify(net, skb, n, clid, old, new);
792
793 if (old)
794 qdisc_destroy(old);
795 }
796
797 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
798 * to device "dev".
799 *
800 * When appropriate send a netlink notification using 'skb'
801 * and "n".
802 *
803 * On success, destroy old qdisc.
804 */
805
806 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
807 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
808 struct Qdisc *new, struct Qdisc *old)
809 {
810 struct Qdisc *q = old;
811 struct net *net = dev_net(dev);
812 int err = 0;
813
814 if (parent == NULL) {
815 unsigned int i, num_q, ingress;
816
817 ingress = 0;
818 num_q = dev->num_tx_queues;
819 if ((q && q->flags & TCQ_F_INGRESS) ||
820 (new && new->flags & TCQ_F_INGRESS)) {
821 num_q = 1;
822 ingress = 1;
823 if (!dev_ingress_queue(dev))
824 return -ENOENT;
825 }
826
827 if (dev->flags & IFF_UP)
828 dev_deactivate(dev);
829
830 if (new && new->ops->attach)
831 goto skip;
832
833 for (i = 0; i < num_q; i++) {
834 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
835
836 if (!ingress)
837 dev_queue = netdev_get_tx_queue(dev, i);
838
839 old = dev_graft_qdisc(dev_queue, new);
840 if (new && i > 0)
841 qdisc_refcount_inc(new);
842
843 if (!ingress)
844 qdisc_destroy(old);
845 }
846
847 skip:
848 if (!ingress) {
849 notify_and_destroy(net, skb, n, classid,
850 dev->qdisc, new);
851 if (new && !new->ops->attach)
852 qdisc_refcount_inc(new);
853 dev->qdisc = new ? : &noop_qdisc;
854
855 if (new && new->ops->attach)
856 new->ops->attach(new);
857 } else {
858 notify_and_destroy(net, skb, n, classid, old, new);
859 }
860
861 if (dev->flags & IFF_UP)
862 dev_activate(dev);
863 } else {
864 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
865
866 err = -EOPNOTSUPP;
867 if (cops && cops->graft) {
868 unsigned long cl = cops->get(parent, classid);
869 if (cl) {
870 err = cops->graft(parent, cl, new, &old);
871 cops->put(parent, cl);
872 } else
873 err = -ENOENT;
874 }
875 if (!err)
876 notify_and_destroy(net, skb, n, classid, old, new);
877 }
878 return err;
879 }
880
881 /* lockdep annotation is needed for ingress; egress gets it only for name */
882 static struct lock_class_key qdisc_tx_lock;
883 static struct lock_class_key qdisc_rx_lock;
884
885 /*
886 Allocate and initialize new qdisc.
887
888 Parameters are passed via opt.
889 */
890
891 static struct Qdisc *qdisc_create(struct net_device *dev,
892 struct netdev_queue *dev_queue,
893 struct Qdisc *p, u32 parent, u32 handle,
894 struct nlattr **tca, int *errp)
895 {
896 int err;
897 struct nlattr *kind = tca[TCA_KIND];
898 struct Qdisc *sch;
899 struct Qdisc_ops *ops;
900 struct qdisc_size_table *stab;
901
902 ops = qdisc_lookup_ops(kind);
903 #ifdef CONFIG_MODULES
904 if (ops == NULL && kind != NULL) {
905 char name[IFNAMSIZ];
906 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
907 /* We dropped the RTNL semaphore in order to
908 * perform the module load. So, even if we
909 * succeeded in loading the module we have to
910 * tell the caller to replay the request. We
911 * indicate this using -EAGAIN.
912 * We replay the request because the device may
913 * go away in the mean time.
914 */
915 rtnl_unlock();
916 request_module("sch_%s", name);
917 rtnl_lock();
918 ops = qdisc_lookup_ops(kind);
919 if (ops != NULL) {
920 /* We will try again qdisc_lookup_ops,
921 * so don't keep a reference.
922 */
923 module_put(ops->owner);
924 err = -EAGAIN;
925 goto err_out;
926 }
927 }
928 }
929 #endif
930
931 err = -ENOENT;
932 if (ops == NULL)
933 goto err_out;
934
935 sch = qdisc_alloc(dev_queue, ops);
936 if (IS_ERR(sch)) {
937 err = PTR_ERR(sch);
938 goto err_out2;
939 }
940
941 sch->parent = parent;
942
943 if (handle == TC_H_INGRESS) {
944 sch->flags |= TCQ_F_INGRESS;
945 handle = TC_H_MAKE(TC_H_INGRESS, 0);
946 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
947 } else {
948 if (handle == 0) {
949 handle = qdisc_alloc_handle(dev);
950 err = -ENOMEM;
951 if (handle == 0)
952 goto err_out3;
953 }
954 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
955 if (!netif_is_multiqueue(dev))
956 sch->flags |= TCQ_F_ONETXQUEUE;
957 }
958
959 sch->handle = handle;
960
961 /* This exist to keep backward compatible with a userspace
962 * loophole, what allowed userspace to get IFF_NO_QUEUE
963 * facility on older kernels by setting tx_queue_len=0 (prior
964 * to qdisc init), and then forgot to reinit tx_queue_len
965 * before again attaching a qdisc.
966 */
967 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
968 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
969 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
970 }
971
972 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
973 if (qdisc_is_percpu_stats(sch)) {
974 sch->cpu_bstats =
975 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
976 if (!sch->cpu_bstats)
977 goto err_out4;
978
979 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
980 if (!sch->cpu_qstats)
981 goto err_out4;
982 }
983
984 if (tca[TCA_STAB]) {
985 stab = qdisc_get_stab(tca[TCA_STAB]);
986 if (IS_ERR(stab)) {
987 err = PTR_ERR(stab);
988 goto err_out4;
989 }
990 rcu_assign_pointer(sch->stab, stab);
991 }
992 if (tca[TCA_RATE]) {
993 seqcount_t *running;
994
995 err = -EOPNOTSUPP;
996 if (sch->flags & TCQ_F_MQROOT)
997 goto err_out4;
998
999 if ((sch->parent != TC_H_ROOT) &&
1000 !(sch->flags & TCQ_F_INGRESS) &&
1001 (!p || !(p->flags & TCQ_F_MQROOT)))
1002 running = qdisc_root_sleeping_running(sch);
1003 else
1004 running = &sch->running;
1005
1006 err = gen_new_estimator(&sch->bstats,
1007 sch->cpu_bstats,
1008 &sch->rate_est,
1009 NULL,
1010 running,
1011 tca[TCA_RATE]);
1012 if (err)
1013 goto err_out4;
1014 }
1015
1016 qdisc_hash_add(sch, false);
1017
1018 return sch;
1019 }
1020 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1021 if (ops->destroy)
1022 ops->destroy(sch);
1023 err_out3:
1024 dev_put(dev);
1025 kfree((char *) sch - sch->padded);
1026 err_out2:
1027 module_put(ops->owner);
1028 err_out:
1029 *errp = err;
1030 return NULL;
1031
1032 err_out4:
1033 free_percpu(sch->cpu_bstats);
1034 free_percpu(sch->cpu_qstats);
1035 /*
1036 * Any broken qdiscs that would require a ops->reset() here?
1037 * The qdisc was never in action so it shouldn't be necessary.
1038 */
1039 qdisc_put_stab(rtnl_dereference(sch->stab));
1040 if (ops->destroy)
1041 ops->destroy(sch);
1042 goto err_out3;
1043 }
1044
1045 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1046 {
1047 struct qdisc_size_table *ostab, *stab = NULL;
1048 int err = 0;
1049
1050 if (tca[TCA_OPTIONS]) {
1051 if (sch->ops->change == NULL)
1052 return -EINVAL;
1053 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1054 if (err)
1055 return err;
1056 }
1057
1058 if (tca[TCA_STAB]) {
1059 stab = qdisc_get_stab(tca[TCA_STAB]);
1060 if (IS_ERR(stab))
1061 return PTR_ERR(stab);
1062 }
1063
1064 ostab = rtnl_dereference(sch->stab);
1065 rcu_assign_pointer(sch->stab, stab);
1066 qdisc_put_stab(ostab);
1067
1068 if (tca[TCA_RATE]) {
1069 /* NB: ignores errors from replace_estimator
1070 because change can't be undone. */
1071 if (sch->flags & TCQ_F_MQROOT)
1072 goto out;
1073 gen_replace_estimator(&sch->bstats,
1074 sch->cpu_bstats,
1075 &sch->rate_est,
1076 NULL,
1077 qdisc_root_sleeping_running(sch),
1078 tca[TCA_RATE]);
1079 }
1080 out:
1081 return 0;
1082 }
1083
1084 struct check_loop_arg {
1085 struct qdisc_walker w;
1086 struct Qdisc *p;
1087 int depth;
1088 };
1089
1090 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1091 struct qdisc_walker *w);
1092
1093 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1094 {
1095 struct check_loop_arg arg;
1096
1097 if (q->ops->cl_ops == NULL)
1098 return 0;
1099
1100 arg.w.stop = arg.w.skip = arg.w.count = 0;
1101 arg.w.fn = check_loop_fn;
1102 arg.depth = depth;
1103 arg.p = p;
1104 q->ops->cl_ops->walk(q, &arg.w);
1105 return arg.w.stop ? -ELOOP : 0;
1106 }
1107
1108 static int
1109 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1110 {
1111 struct Qdisc *leaf;
1112 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1113 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1114
1115 leaf = cops->leaf(q, cl);
1116 if (leaf) {
1117 if (leaf == arg->p || arg->depth > 7)
1118 return -ELOOP;
1119 return check_loop(leaf, arg->p, arg->depth + 1);
1120 }
1121 return 0;
1122 }
1123
1124 /*
1125 * Delete/get qdisc.
1126 */
1127
1128 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1129 struct netlink_ext_ack *extack)
1130 {
1131 struct net *net = sock_net(skb->sk);
1132 struct tcmsg *tcm = nlmsg_data(n);
1133 struct nlattr *tca[TCA_MAX + 1];
1134 struct net_device *dev;
1135 u32 clid;
1136 struct Qdisc *q = NULL;
1137 struct Qdisc *p = NULL;
1138 int err;
1139
1140 if ((n->nlmsg_type != RTM_GETQDISC) &&
1141 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1142 return -EPERM;
1143
1144 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1145 if (err < 0)
1146 return err;
1147
1148 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1149 if (!dev)
1150 return -ENODEV;
1151
1152 clid = tcm->tcm_parent;
1153 if (clid) {
1154 if (clid != TC_H_ROOT) {
1155 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1156 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1157 if (!p)
1158 return -ENOENT;
1159 q = qdisc_leaf(p, clid);
1160 } else if (dev_ingress_queue(dev)) {
1161 q = dev_ingress_queue(dev)->qdisc_sleeping;
1162 }
1163 } else {
1164 q = dev->qdisc;
1165 }
1166 if (!q)
1167 return -ENOENT;
1168
1169 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1170 return -EINVAL;
1171 } else {
1172 q = qdisc_lookup(dev, tcm->tcm_handle);
1173 if (!q)
1174 return -ENOENT;
1175 }
1176
1177 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1178 return -EINVAL;
1179
1180 if (n->nlmsg_type == RTM_DELQDISC) {
1181 if (!clid)
1182 return -EINVAL;
1183 if (q->handle == 0)
1184 return -ENOENT;
1185 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1186 if (err != 0)
1187 return err;
1188 } else {
1189 qdisc_notify(net, skb, n, clid, NULL, q);
1190 }
1191 return 0;
1192 }
1193
1194 /*
1195 * Create/change qdisc.
1196 */
1197
1198 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1199 struct netlink_ext_ack *extack)
1200 {
1201 struct net *net = sock_net(skb->sk);
1202 struct tcmsg *tcm;
1203 struct nlattr *tca[TCA_MAX + 1];
1204 struct net_device *dev;
1205 u32 clid;
1206 struct Qdisc *q, *p;
1207 int err;
1208
1209 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1210 return -EPERM;
1211
1212 replay:
1213 /* Reinit, just in case something touches this. */
1214 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1215 if (err < 0)
1216 return err;
1217
1218 tcm = nlmsg_data(n);
1219 clid = tcm->tcm_parent;
1220 q = p = NULL;
1221
1222 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1223 if (!dev)
1224 return -ENODEV;
1225
1226
1227 if (clid) {
1228 if (clid != TC_H_ROOT) {
1229 if (clid != TC_H_INGRESS) {
1230 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1231 if (!p)
1232 return -ENOENT;
1233 q = qdisc_leaf(p, clid);
1234 } else if (dev_ingress_queue_create(dev)) {
1235 q = dev_ingress_queue(dev)->qdisc_sleeping;
1236 }
1237 } else {
1238 q = dev->qdisc;
1239 }
1240
1241 /* It may be default qdisc, ignore it */
1242 if (q && q->handle == 0)
1243 q = NULL;
1244
1245 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1246 if (tcm->tcm_handle) {
1247 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1248 return -EEXIST;
1249 if (TC_H_MIN(tcm->tcm_handle))
1250 return -EINVAL;
1251 q = qdisc_lookup(dev, tcm->tcm_handle);
1252 if (!q)
1253 goto create_n_graft;
1254 if (n->nlmsg_flags & NLM_F_EXCL)
1255 return -EEXIST;
1256 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1257 return -EINVAL;
1258 if (q == p ||
1259 (p && check_loop(q, p, 0)))
1260 return -ELOOP;
1261 qdisc_refcount_inc(q);
1262 goto graft;
1263 } else {
1264 if (!q)
1265 goto create_n_graft;
1266
1267 /* This magic test requires explanation.
1268 *
1269 * We know, that some child q is already
1270 * attached to this parent and have choice:
1271 * either to change it or to create/graft new one.
1272 *
1273 * 1. We are allowed to create/graft only
1274 * if CREATE and REPLACE flags are set.
1275 *
1276 * 2. If EXCL is set, requestor wanted to say,
1277 * that qdisc tcm_handle is not expected
1278 * to exist, so that we choose create/graft too.
1279 *
1280 * 3. The last case is when no flags are set.
1281 * Alas, it is sort of hole in API, we
1282 * cannot decide what to do unambiguously.
1283 * For now we select create/graft, if
1284 * user gave KIND, which does not match existing.
1285 */
1286 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1287 (n->nlmsg_flags & NLM_F_REPLACE) &&
1288 ((n->nlmsg_flags & NLM_F_EXCL) ||
1289 (tca[TCA_KIND] &&
1290 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1291 goto create_n_graft;
1292 }
1293 }
1294 } else {
1295 if (!tcm->tcm_handle)
1296 return -EINVAL;
1297 q = qdisc_lookup(dev, tcm->tcm_handle);
1298 }
1299
1300 /* Change qdisc parameters */
1301 if (q == NULL)
1302 return -ENOENT;
1303 if (n->nlmsg_flags & NLM_F_EXCL)
1304 return -EEXIST;
1305 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1306 return -EINVAL;
1307 err = qdisc_change(q, tca);
1308 if (err == 0)
1309 qdisc_notify(net, skb, n, clid, NULL, q);
1310 return err;
1311
1312 create_n_graft:
1313 if (!(n->nlmsg_flags & NLM_F_CREATE))
1314 return -ENOENT;
1315 if (clid == TC_H_INGRESS) {
1316 if (dev_ingress_queue(dev))
1317 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1318 tcm->tcm_parent, tcm->tcm_parent,
1319 tca, &err);
1320 else
1321 err = -ENOENT;
1322 } else {
1323 struct netdev_queue *dev_queue;
1324
1325 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1326 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1327 else if (p)
1328 dev_queue = p->dev_queue;
1329 else
1330 dev_queue = netdev_get_tx_queue(dev, 0);
1331
1332 q = qdisc_create(dev, dev_queue, p,
1333 tcm->tcm_parent, tcm->tcm_handle,
1334 tca, &err);
1335 }
1336 if (q == NULL) {
1337 if (err == -EAGAIN)
1338 goto replay;
1339 return err;
1340 }
1341
1342 graft:
1343 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1344 if (err) {
1345 if (q)
1346 qdisc_destroy(q);
1347 return err;
1348 }
1349
1350 return 0;
1351 }
1352
1353 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1354 u32 portid, u32 seq, u16 flags, int event)
1355 {
1356 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1357 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1358 struct tcmsg *tcm;
1359 struct nlmsghdr *nlh;
1360 unsigned char *b = skb_tail_pointer(skb);
1361 struct gnet_dump d;
1362 struct qdisc_size_table *stab;
1363 __u32 qlen;
1364
1365 cond_resched();
1366 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1367 if (!nlh)
1368 goto out_nlmsg_trim;
1369 tcm = nlmsg_data(nlh);
1370 tcm->tcm_family = AF_UNSPEC;
1371 tcm->tcm__pad1 = 0;
1372 tcm->tcm__pad2 = 0;
1373 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1374 tcm->tcm_parent = clid;
1375 tcm->tcm_handle = q->handle;
1376 tcm->tcm_info = refcount_read(&q->refcnt);
1377 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1378 goto nla_put_failure;
1379 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1380 goto nla_put_failure;
1381 qlen = q->q.qlen;
1382
1383 stab = rtnl_dereference(q->stab);
1384 if (stab && qdisc_dump_stab(skb, stab) < 0)
1385 goto nla_put_failure;
1386
1387 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1388 NULL, &d, TCA_PAD) < 0)
1389 goto nla_put_failure;
1390
1391 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1392 goto nla_put_failure;
1393
1394 if (qdisc_is_percpu_stats(q)) {
1395 cpu_bstats = q->cpu_bstats;
1396 cpu_qstats = q->cpu_qstats;
1397 }
1398
1399 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
1400 &d, cpu_bstats, &q->bstats) < 0 ||
1401 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
1402 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1403 goto nla_put_failure;
1404
1405 if (gnet_stats_finish_copy(&d) < 0)
1406 goto nla_put_failure;
1407
1408 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1409 return skb->len;
1410
1411 out_nlmsg_trim:
1412 nla_put_failure:
1413 nlmsg_trim(skb, b);
1414 return -1;
1415 }
1416
1417 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
1418 {
1419 if (q->flags & TCQ_F_BUILTIN)
1420 return true;
1421 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1422 return true;
1423
1424 return false;
1425 }
1426
1427 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1428 struct nlmsghdr *n, u32 clid,
1429 struct Qdisc *old, struct Qdisc *new)
1430 {
1431 struct sk_buff *skb;
1432 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1433
1434 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1435 if (!skb)
1436 return -ENOBUFS;
1437
1438 if (old && !tc_qdisc_dump_ignore(old, false)) {
1439 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1440 0, RTM_DELQDISC) < 0)
1441 goto err_out;
1442 }
1443 if (new && !tc_qdisc_dump_ignore(new, false)) {
1444 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1445 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1446 goto err_out;
1447 }
1448
1449 if (skb->len)
1450 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1451 n->nlmsg_flags & NLM_F_ECHO);
1452
1453 err_out:
1454 kfree_skb(skb);
1455 return -EINVAL;
1456 }
1457
1458 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1459 struct netlink_callback *cb,
1460 int *q_idx_p, int s_q_idx, bool recur,
1461 bool dump_invisible)
1462 {
1463 int ret = 0, q_idx = *q_idx_p;
1464 struct Qdisc *q;
1465 int b;
1466
1467 if (!root)
1468 return 0;
1469
1470 q = root;
1471 if (q_idx < s_q_idx) {
1472 q_idx++;
1473 } else {
1474 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1475 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1476 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1477 RTM_NEWQDISC) <= 0)
1478 goto done;
1479 q_idx++;
1480 }
1481
1482 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1483 * itself has already been dumped.
1484 *
1485 * If we've already dumped the top-level (ingress) qdisc above and the global
1486 * qdisc hashtable, we don't want to hit it again
1487 */
1488 if (!qdisc_dev(root) || !recur)
1489 goto out;
1490
1491 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1492 if (q_idx < s_q_idx) {
1493 q_idx++;
1494 continue;
1495 }
1496 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1497 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1498 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1499 RTM_NEWQDISC) <= 0)
1500 goto done;
1501 q_idx++;
1502 }
1503
1504 out:
1505 *q_idx_p = q_idx;
1506 return ret;
1507 done:
1508 ret = -1;
1509 goto out;
1510 }
1511
1512 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1513 {
1514 struct net *net = sock_net(skb->sk);
1515 int idx, q_idx;
1516 int s_idx, s_q_idx;
1517 struct net_device *dev;
1518 const struct nlmsghdr *nlh = cb->nlh;
1519 struct tcmsg *tcm = nlmsg_data(nlh);
1520 struct nlattr *tca[TCA_MAX + 1];
1521 int err;
1522
1523 s_idx = cb->args[0];
1524 s_q_idx = q_idx = cb->args[1];
1525
1526 idx = 0;
1527 ASSERT_RTNL();
1528
1529 err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
1530 if (err < 0)
1531 return err;
1532
1533 for_each_netdev(net, dev) {
1534 struct netdev_queue *dev_queue;
1535
1536 if (idx < s_idx)
1537 goto cont;
1538 if (idx > s_idx)
1539 s_q_idx = 0;
1540 q_idx = 0;
1541
1542 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1543 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1544 goto done;
1545
1546 dev_queue = dev_ingress_queue(dev);
1547 if (dev_queue &&
1548 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1549 &q_idx, s_q_idx, false,
1550 tca[TCA_DUMP_INVISIBLE]) < 0)
1551 goto done;
1552
1553 cont:
1554 idx++;
1555 }
1556
1557 done:
1558 cb->args[0] = idx;
1559 cb->args[1] = q_idx;
1560
1561 return skb->len;
1562 }
1563
1564
1565
1566 /************************************************
1567 * Traffic classes manipulation. *
1568 ************************************************/
1569
1570
1571
1572 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1573 struct netlink_ext_ack *extack)
1574 {
1575 struct net *net = sock_net(skb->sk);
1576 struct tcmsg *tcm = nlmsg_data(n);
1577 struct nlattr *tca[TCA_MAX + 1];
1578 struct net_device *dev;
1579 struct Qdisc *q = NULL;
1580 const struct Qdisc_class_ops *cops;
1581 unsigned long cl = 0;
1582 unsigned long new_cl;
1583 u32 portid;
1584 u32 clid;
1585 u32 qid;
1586 int err;
1587
1588 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1589 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1590 return -EPERM;
1591
1592 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1593 if (err < 0)
1594 return err;
1595
1596 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1597 if (!dev)
1598 return -ENODEV;
1599
1600 /*
1601 parent == TC_H_UNSPEC - unspecified parent.
1602 parent == TC_H_ROOT - class is root, which has no parent.
1603 parent == X:0 - parent is root class.
1604 parent == X:Y - parent is a node in hierarchy.
1605 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1606
1607 handle == 0:0 - generate handle from kernel pool.
1608 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1609 handle == X:Y - clear.
1610 handle == X:0 - root class.
1611 */
1612
1613 /* Step 1. Determine qdisc handle X:0 */
1614
1615 portid = tcm->tcm_parent;
1616 clid = tcm->tcm_handle;
1617 qid = TC_H_MAJ(clid);
1618
1619 if (portid != TC_H_ROOT) {
1620 u32 qid1 = TC_H_MAJ(portid);
1621
1622 if (qid && qid1) {
1623 /* If both majors are known, they must be identical. */
1624 if (qid != qid1)
1625 return -EINVAL;
1626 } else if (qid1) {
1627 qid = qid1;
1628 } else if (qid == 0)
1629 qid = dev->qdisc->handle;
1630
1631 /* Now qid is genuine qdisc handle consistent
1632 * both with parent and child.
1633 *
1634 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1635 */
1636 if (portid)
1637 portid = TC_H_MAKE(qid, portid);
1638 } else {
1639 if (qid == 0)
1640 qid = dev->qdisc->handle;
1641 }
1642
1643 /* OK. Locate qdisc */
1644 q = qdisc_lookup(dev, qid);
1645 if (!q)
1646 return -ENOENT;
1647
1648 /* An check that it supports classes */
1649 cops = q->ops->cl_ops;
1650 if (cops == NULL)
1651 return -EINVAL;
1652
1653 /* Now try to get class */
1654 if (clid == 0) {
1655 if (portid == TC_H_ROOT)
1656 clid = qid;
1657 } else
1658 clid = TC_H_MAKE(qid, clid);
1659
1660 if (clid)
1661 cl = cops->get(q, clid);
1662
1663 if (cl == 0) {
1664 err = -ENOENT;
1665 if (n->nlmsg_type != RTM_NEWTCLASS ||
1666 !(n->nlmsg_flags & NLM_F_CREATE))
1667 goto out;
1668 } else {
1669 switch (n->nlmsg_type) {
1670 case RTM_NEWTCLASS:
1671 err = -EEXIST;
1672 if (n->nlmsg_flags & NLM_F_EXCL)
1673 goto out;
1674 break;
1675 case RTM_DELTCLASS:
1676 err = -EOPNOTSUPP;
1677 if (cops->delete)
1678 err = cops->delete(q, cl);
1679 if (err == 0)
1680 tclass_notify(net, skb, n, q, cl,
1681 RTM_DELTCLASS);
1682 goto out;
1683 case RTM_GETTCLASS:
1684 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1685 goto out;
1686 default:
1687 err = -EINVAL;
1688 goto out;
1689 }
1690 }
1691
1692 new_cl = cl;
1693 err = -EOPNOTSUPP;
1694 if (cops->change)
1695 err = cops->change(q, clid, portid, tca, &new_cl);
1696 if (err == 0)
1697 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1698
1699 out:
1700 if (cl)
1701 cops->put(q, cl);
1702
1703 return err;
1704 }
1705
1706
1707 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1708 unsigned long cl,
1709 u32 portid, u32 seq, u16 flags, int event)
1710 {
1711 struct tcmsg *tcm;
1712 struct nlmsghdr *nlh;
1713 unsigned char *b = skb_tail_pointer(skb);
1714 struct gnet_dump d;
1715 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1716
1717 cond_resched();
1718 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1719 if (!nlh)
1720 goto out_nlmsg_trim;
1721 tcm = nlmsg_data(nlh);
1722 tcm->tcm_family = AF_UNSPEC;
1723 tcm->tcm__pad1 = 0;
1724 tcm->tcm__pad2 = 0;
1725 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1726 tcm->tcm_parent = q->handle;
1727 tcm->tcm_handle = q->handle;
1728 tcm->tcm_info = 0;
1729 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1730 goto nla_put_failure;
1731 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1732 goto nla_put_failure;
1733
1734 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1735 NULL, &d, TCA_PAD) < 0)
1736 goto nla_put_failure;
1737
1738 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1739 goto nla_put_failure;
1740
1741 if (gnet_stats_finish_copy(&d) < 0)
1742 goto nla_put_failure;
1743
1744 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1745 return skb->len;
1746
1747 out_nlmsg_trim:
1748 nla_put_failure:
1749 nlmsg_trim(skb, b);
1750 return -1;
1751 }
1752
1753 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1754 struct nlmsghdr *n, struct Qdisc *q,
1755 unsigned long cl, int event)
1756 {
1757 struct sk_buff *skb;
1758 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1759
1760 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1761 if (!skb)
1762 return -ENOBUFS;
1763
1764 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1765 kfree_skb(skb);
1766 return -EINVAL;
1767 }
1768
1769 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1770 n->nlmsg_flags & NLM_F_ECHO);
1771 }
1772
1773 struct qdisc_dump_args {
1774 struct qdisc_walker w;
1775 struct sk_buff *skb;
1776 struct netlink_callback *cb;
1777 };
1778
1779 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1780 struct qdisc_walker *arg)
1781 {
1782 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1783
1784 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1785 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1786 RTM_NEWTCLASS);
1787 }
1788
1789 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1790 struct tcmsg *tcm, struct netlink_callback *cb,
1791 int *t_p, int s_t)
1792 {
1793 struct qdisc_dump_args arg;
1794
1795 if (tc_qdisc_dump_ignore(q, false) ||
1796 *t_p < s_t || !q->ops->cl_ops ||
1797 (tcm->tcm_parent &&
1798 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1799 (*t_p)++;
1800 return 0;
1801 }
1802 if (*t_p > s_t)
1803 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1804 arg.w.fn = qdisc_class_dump;
1805 arg.skb = skb;
1806 arg.cb = cb;
1807 arg.w.stop = 0;
1808 arg.w.skip = cb->args[1];
1809 arg.w.count = 0;
1810 q->ops->cl_ops->walk(q, &arg.w);
1811 cb->args[1] = arg.w.count;
1812 if (arg.w.stop)
1813 return -1;
1814 (*t_p)++;
1815 return 0;
1816 }
1817
1818 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1819 struct tcmsg *tcm, struct netlink_callback *cb,
1820 int *t_p, int s_t)
1821 {
1822 struct Qdisc *q;
1823 int b;
1824
1825 if (!root)
1826 return 0;
1827
1828 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1829 return -1;
1830
1831 if (!qdisc_dev(root))
1832 return 0;
1833
1834 if (tcm->tcm_parent) {
1835 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1836 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1837 return -1;
1838 return 0;
1839 }
1840 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1841 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1842 return -1;
1843 }
1844
1845 return 0;
1846 }
1847
1848 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1849 {
1850 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1851 struct net *net = sock_net(skb->sk);
1852 struct netdev_queue *dev_queue;
1853 struct net_device *dev;
1854 int t, s_t;
1855
1856 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1857 return 0;
1858 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1859 if (!dev)
1860 return 0;
1861
1862 s_t = cb->args[0];
1863 t = 0;
1864
1865 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1866 goto done;
1867
1868 dev_queue = dev_ingress_queue(dev);
1869 if (dev_queue &&
1870 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1871 &t, s_t) < 0)
1872 goto done;
1873
1874 done:
1875 cb->args[0] = t;
1876
1877 dev_put(dev);
1878 return skb->len;
1879 }
1880
1881 #ifdef CONFIG_PROC_FS
1882 static int psched_show(struct seq_file *seq, void *v)
1883 {
1884 seq_printf(seq, "%08x %08x %08x %08x\n",
1885 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1886 1000000,
1887 (u32)NSEC_PER_SEC / hrtimer_resolution);
1888
1889 return 0;
1890 }
1891
1892 static int psched_open(struct inode *inode, struct file *file)
1893 {
1894 return single_open(file, psched_show, NULL);
1895 }
1896
1897 static const struct file_operations psched_fops = {
1898 .owner = THIS_MODULE,
1899 .open = psched_open,
1900 .read = seq_read,
1901 .llseek = seq_lseek,
1902 .release = single_release,
1903 };
1904
1905 static int __net_init psched_net_init(struct net *net)
1906 {
1907 struct proc_dir_entry *e;
1908
1909 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1910 if (e == NULL)
1911 return -ENOMEM;
1912
1913 return 0;
1914 }
1915
1916 static void __net_exit psched_net_exit(struct net *net)
1917 {
1918 remove_proc_entry("psched", net->proc_net);
1919 }
1920 #else
1921 static int __net_init psched_net_init(struct net *net)
1922 {
1923 return 0;
1924 }
1925
1926 static void __net_exit psched_net_exit(struct net *net)
1927 {
1928 }
1929 #endif
1930
1931 static struct pernet_operations psched_net_ops = {
1932 .init = psched_net_init,
1933 .exit = psched_net_exit,
1934 };
1935
1936 static int __init pktsched_init(void)
1937 {
1938 int err;
1939
1940 err = register_pernet_subsys(&psched_net_ops);
1941 if (err) {
1942 pr_err("pktsched_init: "
1943 "cannot initialize per netns operations\n");
1944 return err;
1945 }
1946
1947 register_qdisc(&pfifo_fast_ops);
1948 register_qdisc(&pfifo_qdisc_ops);
1949 register_qdisc(&bfifo_qdisc_ops);
1950 register_qdisc(&pfifo_head_drop_qdisc_ops);
1951 register_qdisc(&mq_qdisc_ops);
1952 register_qdisc(&noqueue_qdisc_ops);
1953
1954 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1955 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1956 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
1957 NULL);
1958 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1959 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1960 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
1961 NULL);
1962
1963 return 0;
1964 }
1965
1966 subsys_initcall(pktsched_init);