]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sched/sch_api.c
Merge branch 'lockless-qdisc-series'
[mirror_ubuntu-jammy-kernel.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
33
34 #include <net/net_namespace.h>
35 #include <net/sock.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
38 #include <net/pkt_cls.h>
39
40 /*
41
42 Short review.
43 -------------
44
45 This file consists of two interrelated parts:
46
47 1. queueing disciplines manager frontend.
48 2. traffic classes manager frontend.
49
50 Generally, queueing discipline ("qdisc") is a black box,
51 which is able to enqueue packets and to dequeue them (when
52 device is ready to send something) in order and at times
53 determined by algorithm hidden in it.
54
55 qdisc's are divided to two categories:
56 - "queues", which have no internal structure visible from outside.
57 - "schedulers", which split all the packets to "traffic classes",
58 using "packet classifiers" (look at cls_api.c)
59
60 In turn, classes may have child qdiscs (as rule, queues)
61 attached to them etc. etc. etc.
62
63 The goal of the routines in this file is to translate
64 information supplied by user in the form of handles
65 to more intelligible for kernel form, to make some sanity
66 checks and part of work, which is common to all qdiscs
67 and to provide rtnetlink notifications.
68
69 All real intelligent work is done inside qdisc modules.
70
71
72
73 Every discipline has two major routines: enqueue and dequeue.
74
75 ---dequeue
76
77 dequeue usually returns a skb to send. It is allowed to return NULL,
78 but it does not mean that queue is empty, it just means that
79 discipline does not want to send anything this time.
80 Queue is really empty if q->q.qlen == 0.
81 For complicated disciplines with multiple queues q->q is not
82 real packet queue, but however q->q.qlen must be valid.
83
84 ---enqueue
85
86 enqueue returns 0, if packet was enqueued successfully.
87 If packet (this one or another one) was dropped, it returns
88 not zero error code.
89 NET_XMIT_DROP - this packet dropped
90 Expected action: do not backoff, but wait until queue will clear.
91 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
92 Expected action: backoff or ignore
93
94 Auxiliary routines:
95
96 ---peek
97
98 like dequeue but without removing a packet from the queue
99
100 ---reset
101
102 returns qdisc to initial state: purge all buffers, clear all
103 timers, counters (except for statistics) etc.
104
105 ---init
106
107 initializes newly created qdisc.
108
109 ---destroy
110
111 destroys resources allocated by init and during lifetime of qdisc.
112
113 ---change
114
115 changes qdisc parameters.
116 */
117
118 /* Protects list of registered TC modules. It is pure SMP lock. */
119 static DEFINE_RWLOCK(qdisc_mod_lock);
120
121
122 /************************************************
123 * Queueing disciplines manipulation. *
124 ************************************************/
125
126
127 /* The list of all installed queueing disciplines. */
128
129 static struct Qdisc_ops *qdisc_base;
130
131 /* Register/unregister queueing discipline */
132
133 int register_qdisc(struct Qdisc_ops *qops)
134 {
135 struct Qdisc_ops *q, **qp;
136 int rc = -EEXIST;
137
138 write_lock(&qdisc_mod_lock);
139 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
140 if (!strcmp(qops->id, q->id))
141 goto out;
142
143 if (qops->enqueue == NULL)
144 qops->enqueue = noop_qdisc_ops.enqueue;
145 if (qops->peek == NULL) {
146 if (qops->dequeue == NULL)
147 qops->peek = noop_qdisc_ops.peek;
148 else
149 goto out_einval;
150 }
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 if (qops->cl_ops) {
155 const struct Qdisc_class_ops *cops = qops->cl_ops;
156
157 if (!(cops->find && cops->walk && cops->leaf))
158 goto out_einval;
159
160 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
161 goto out_einval;
162 }
163
164 qops->next = NULL;
165 *qp = qops;
166 rc = 0;
167 out:
168 write_unlock(&qdisc_mod_lock);
169 return rc;
170
171 out_einval:
172 rc = -EINVAL;
173 goto out;
174 }
175 EXPORT_SYMBOL(register_qdisc);
176
177 int unregister_qdisc(struct Qdisc_ops *qops)
178 {
179 struct Qdisc_ops *q, **qp;
180 int err = -ENOENT;
181
182 write_lock(&qdisc_mod_lock);
183 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
184 if (q == qops)
185 break;
186 if (q) {
187 *qp = q->next;
188 q->next = NULL;
189 err = 0;
190 }
191 write_unlock(&qdisc_mod_lock);
192 return err;
193 }
194 EXPORT_SYMBOL(unregister_qdisc);
195
196 /* Get default qdisc if not otherwise specified */
197 void qdisc_get_default(char *name, size_t len)
198 {
199 read_lock(&qdisc_mod_lock);
200 strlcpy(name, default_qdisc_ops->id, len);
201 read_unlock(&qdisc_mod_lock);
202 }
203
204 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 {
206 struct Qdisc_ops *q = NULL;
207
208 for (q = qdisc_base; q; q = q->next) {
209 if (!strcmp(name, q->id)) {
210 if (!try_module_get(q->owner))
211 q = NULL;
212 break;
213 }
214 }
215
216 return q;
217 }
218
219 /* Set new default qdisc to use */
220 int qdisc_set_default(const char *name)
221 {
222 const struct Qdisc_ops *ops;
223
224 if (!capable(CAP_NET_ADMIN))
225 return -EPERM;
226
227 write_lock(&qdisc_mod_lock);
228 ops = qdisc_lookup_default(name);
229 if (!ops) {
230 /* Not found, drop lock and try to load module */
231 write_unlock(&qdisc_mod_lock);
232 request_module("sch_%s", name);
233 write_lock(&qdisc_mod_lock);
234
235 ops = qdisc_lookup_default(name);
236 }
237
238 if (ops) {
239 /* Set new default */
240 module_put(default_qdisc_ops->owner);
241 default_qdisc_ops = ops;
242 }
243 write_unlock(&qdisc_mod_lock);
244
245 return ops ? 0 : -ENOENT;
246 }
247
248 #ifdef CONFIG_NET_SCH_DEFAULT
249 /* Set default value from kernel config */
250 static int __init sch_default_qdisc(void)
251 {
252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 }
254 late_initcall(sch_default_qdisc);
255 #endif
256
257 /* We know handle. Find qdisc among all qdisc's attached to device
258 * (root qdisc, all its children, children of children etc.)
259 * Note: caller either uses rtnl or rcu_read_lock()
260 */
261
262 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
263 {
264 struct Qdisc *q;
265
266 if (!qdisc_dev(root))
267 return (root->handle == handle ? root : NULL);
268
269 if (!(root->flags & TCQ_F_BUILTIN) &&
270 root->handle == handle)
271 return root;
272
273 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
274 if (q->handle == handle)
275 return q;
276 }
277 return NULL;
278 }
279
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
281 {
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
287 }
288 }
289 EXPORT_SYMBOL(qdisc_hash_add);
290
291 void qdisc_hash_del(struct Qdisc *q)
292 {
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 ASSERT_RTNL();
295 hash_del_rcu(&q->hash);
296 }
297 }
298 EXPORT_SYMBOL(qdisc_hash_del);
299
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301 {
302 struct Qdisc *q;
303
304 if (!handle)
305 return NULL;
306 q = qdisc_match_from_root(dev->qdisc, handle);
307 if (q)
308 goto out;
309
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
313 handle);
314 out:
315 return q;
316 }
317
318 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
319 {
320 unsigned long cl;
321 struct Qdisc *leaf;
322 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
323
324 if (cops == NULL)
325 return NULL;
326 cl = cops->find(p, classid);
327
328 if (cl == 0)
329 return NULL;
330 leaf = cops->leaf(p, cl);
331 return leaf;
332 }
333
334 /* Find queueing discipline by name */
335
336 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
337 {
338 struct Qdisc_ops *q = NULL;
339
340 if (kind) {
341 read_lock(&qdisc_mod_lock);
342 for (q = qdisc_base; q; q = q->next) {
343 if (nla_strcmp(kind, q->id) == 0) {
344 if (!try_module_get(q->owner))
345 q = NULL;
346 break;
347 }
348 }
349 read_unlock(&qdisc_mod_lock);
350 }
351 return q;
352 }
353
354 /* The linklayer setting were not transferred from iproute2, in older
355 * versions, and the rate tables lookup systems have been dropped in
356 * the kernel. To keep backward compatible with older iproute2 tc
357 * utils, we detect the linklayer setting by detecting if the rate
358 * table were modified.
359 *
360 * For linklayer ATM table entries, the rate table will be aligned to
361 * 48 bytes, thus some table entries will contain the same value. The
362 * mpu (min packet unit) is also encoded into the old rate table, thus
363 * starting from the mpu, we find low and high table entries for
364 * mapping this cell. If these entries contain the same value, when
365 * the rate tables have been modified for linklayer ATM.
366 *
367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
368 * and then roundup to the next cell, calc the table entry one below,
369 * and compare.
370 */
371 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
372 {
373 int low = roundup(r->mpu, 48);
374 int high = roundup(low+1, 48);
375 int cell_low = low >> r->cell_log;
376 int cell_high = (high >> r->cell_log) - 1;
377
378 /* rtab is too inaccurate at rates > 100Mbit/s */
379 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
380 pr_debug("TC linklayer: Giving up ATM detection\n");
381 return TC_LINKLAYER_ETHERNET;
382 }
383
384 if ((cell_high > cell_low) && (cell_high < 256)
385 && (rtab[cell_low] == rtab[cell_high])) {
386 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
387 cell_low, cell_high, rtab[cell_high]);
388 return TC_LINKLAYER_ATM;
389 }
390 return TC_LINKLAYER_ETHERNET;
391 }
392
393 static struct qdisc_rate_table *qdisc_rtab_list;
394
395 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
396 struct nlattr *tab)
397 {
398 struct qdisc_rate_table *rtab;
399
400 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
401 nla_len(tab) != TC_RTAB_SIZE)
402 return NULL;
403
404 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
405 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
406 !memcmp(&rtab->data, nla_data(tab), 1024)) {
407 rtab->refcnt++;
408 return rtab;
409 }
410 }
411
412 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
413 if (rtab) {
414 rtab->rate = *r;
415 rtab->refcnt = 1;
416 memcpy(rtab->data, nla_data(tab), 1024);
417 if (r->linklayer == TC_LINKLAYER_UNAWARE)
418 r->linklayer = __detect_linklayer(r, rtab->data);
419 rtab->next = qdisc_rtab_list;
420 qdisc_rtab_list = rtab;
421 }
422 return rtab;
423 }
424 EXPORT_SYMBOL(qdisc_get_rtab);
425
426 void qdisc_put_rtab(struct qdisc_rate_table *tab)
427 {
428 struct qdisc_rate_table *rtab, **rtabp;
429
430 if (!tab || --tab->refcnt)
431 return;
432
433 for (rtabp = &qdisc_rtab_list;
434 (rtab = *rtabp) != NULL;
435 rtabp = &rtab->next) {
436 if (rtab == tab) {
437 *rtabp = rtab->next;
438 kfree(rtab);
439 return;
440 }
441 }
442 }
443 EXPORT_SYMBOL(qdisc_put_rtab);
444
445 static LIST_HEAD(qdisc_stab_list);
446
447 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
448 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
449 [TCA_STAB_DATA] = { .type = NLA_BINARY },
450 };
451
452 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
453 {
454 struct nlattr *tb[TCA_STAB_MAX + 1];
455 struct qdisc_size_table *stab;
456 struct tc_sizespec *s;
457 unsigned int tsize = 0;
458 u16 *tab = NULL;
459 int err;
460
461 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
462 if (err < 0)
463 return ERR_PTR(err);
464 if (!tb[TCA_STAB_BASE])
465 return ERR_PTR(-EINVAL);
466
467 s = nla_data(tb[TCA_STAB_BASE]);
468
469 if (s->tsize > 0) {
470 if (!tb[TCA_STAB_DATA])
471 return ERR_PTR(-EINVAL);
472 tab = nla_data(tb[TCA_STAB_DATA]);
473 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
474 }
475
476 if (tsize != s->tsize || (!tab && tsize > 0))
477 return ERR_PTR(-EINVAL);
478
479 list_for_each_entry(stab, &qdisc_stab_list, list) {
480 if (memcmp(&stab->szopts, s, sizeof(*s)))
481 continue;
482 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
483 continue;
484 stab->refcnt++;
485 return stab;
486 }
487
488 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
489 if (!stab)
490 return ERR_PTR(-ENOMEM);
491
492 stab->refcnt = 1;
493 stab->szopts = *s;
494 if (tsize > 0)
495 memcpy(stab->data, tab, tsize * sizeof(u16));
496
497 list_add_tail(&stab->list, &qdisc_stab_list);
498
499 return stab;
500 }
501
502 static void stab_kfree_rcu(struct rcu_head *head)
503 {
504 kfree(container_of(head, struct qdisc_size_table, rcu));
505 }
506
507 void qdisc_put_stab(struct qdisc_size_table *tab)
508 {
509 if (!tab)
510 return;
511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
515 }
516 }
517 EXPORT_SYMBOL(qdisc_put_stab);
518
519 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
520 {
521 struct nlattr *nest;
522
523 nest = nla_nest_start(skb, TCA_STAB);
524 if (nest == NULL)
525 goto nla_put_failure;
526 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
527 goto nla_put_failure;
528 nla_nest_end(skb, nest);
529
530 return skb->len;
531
532 nla_put_failure:
533 return -1;
534 }
535
536 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
537 const struct qdisc_size_table *stab)
538 {
539 int pkt_len, slot;
540
541 pkt_len = skb->len + stab->szopts.overhead;
542 if (unlikely(!stab->szopts.tsize))
543 goto out;
544
545 slot = pkt_len + stab->szopts.cell_align;
546 if (unlikely(slot < 0))
547 slot = 0;
548
549 slot >>= stab->szopts.cell_log;
550 if (likely(slot < stab->szopts.tsize))
551 pkt_len = stab->data[slot];
552 else
553 pkt_len = stab->data[stab->szopts.tsize - 1] *
554 (slot / stab->szopts.tsize) +
555 stab->data[slot % stab->szopts.tsize];
556
557 pkt_len <<= stab->szopts.size_log;
558 out:
559 if (unlikely(pkt_len < 1))
560 pkt_len = 1;
561 qdisc_skb_cb(skb)->pkt_len = pkt_len;
562 }
563 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
564
565 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
566 {
567 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
568 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
569 txt, qdisc->ops->id, qdisc->handle >> 16);
570 qdisc->flags |= TCQ_F_WARN_NONWC;
571 }
572 }
573 EXPORT_SYMBOL(qdisc_warn_nonwc);
574
575 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
576 {
577 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
578 timer);
579
580 rcu_read_lock();
581 __netif_schedule(qdisc_root(wd->qdisc));
582 rcu_read_unlock();
583
584 return HRTIMER_NORESTART;
585 }
586
587 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
588 {
589 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
590 wd->timer.function = qdisc_watchdog;
591 wd->qdisc = qdisc;
592 }
593 EXPORT_SYMBOL(qdisc_watchdog_init);
594
595 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
596 {
597 if (test_bit(__QDISC_STATE_DEACTIVATED,
598 &qdisc_root_sleeping(wd->qdisc)->state))
599 return;
600
601 if (wd->last_expires == expires)
602 return;
603
604 wd->last_expires = expires;
605 hrtimer_start(&wd->timer,
606 ns_to_ktime(expires),
607 HRTIMER_MODE_ABS_PINNED);
608 }
609 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
610
611 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
612 {
613 hrtimer_cancel(&wd->timer);
614 }
615 EXPORT_SYMBOL(qdisc_watchdog_cancel);
616
617 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
618 {
619 struct hlist_head *h;
620 unsigned int i;
621
622 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
623
624 if (h != NULL) {
625 for (i = 0; i < n; i++)
626 INIT_HLIST_HEAD(&h[i]);
627 }
628 return h;
629 }
630
631 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
632 {
633 struct Qdisc_class_common *cl;
634 struct hlist_node *next;
635 struct hlist_head *nhash, *ohash;
636 unsigned int nsize, nmask, osize;
637 unsigned int i, h;
638
639 /* Rehash when load factor exceeds 0.75 */
640 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
641 return;
642 nsize = clhash->hashsize * 2;
643 nmask = nsize - 1;
644 nhash = qdisc_class_hash_alloc(nsize);
645 if (nhash == NULL)
646 return;
647
648 ohash = clhash->hash;
649 osize = clhash->hashsize;
650
651 sch_tree_lock(sch);
652 for (i = 0; i < osize; i++) {
653 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
654 h = qdisc_class_hash(cl->classid, nmask);
655 hlist_add_head(&cl->hnode, &nhash[h]);
656 }
657 }
658 clhash->hash = nhash;
659 clhash->hashsize = nsize;
660 clhash->hashmask = nmask;
661 sch_tree_unlock(sch);
662
663 kvfree(ohash);
664 }
665 EXPORT_SYMBOL(qdisc_class_hash_grow);
666
667 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
668 {
669 unsigned int size = 4;
670
671 clhash->hash = qdisc_class_hash_alloc(size);
672 if (clhash->hash == NULL)
673 return -ENOMEM;
674 clhash->hashsize = size;
675 clhash->hashmask = size - 1;
676 clhash->hashelems = 0;
677 return 0;
678 }
679 EXPORT_SYMBOL(qdisc_class_hash_init);
680
681 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
682 {
683 kvfree(clhash->hash);
684 }
685 EXPORT_SYMBOL(qdisc_class_hash_destroy);
686
687 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
688 struct Qdisc_class_common *cl)
689 {
690 unsigned int h;
691
692 INIT_HLIST_NODE(&cl->hnode);
693 h = qdisc_class_hash(cl->classid, clhash->hashmask);
694 hlist_add_head(&cl->hnode, &clhash->hash[h]);
695 clhash->hashelems++;
696 }
697 EXPORT_SYMBOL(qdisc_class_hash_insert);
698
699 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
700 struct Qdisc_class_common *cl)
701 {
702 hlist_del(&cl->hnode);
703 clhash->hashelems--;
704 }
705 EXPORT_SYMBOL(qdisc_class_hash_remove);
706
707 /* Allocate an unique handle from space managed by kernel
708 * Possible range is [8000-FFFF]:0000 (0x8000 values)
709 */
710 static u32 qdisc_alloc_handle(struct net_device *dev)
711 {
712 int i = 0x8000;
713 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
714
715 do {
716 autohandle += TC_H_MAKE(0x10000U, 0);
717 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
718 autohandle = TC_H_MAKE(0x80000000U, 0);
719 if (!qdisc_lookup(dev, autohandle))
720 return autohandle;
721 cond_resched();
722 } while (--i > 0);
723
724 return 0;
725 }
726
727 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
728 unsigned int len)
729 {
730 const struct Qdisc_class_ops *cops;
731 unsigned long cl;
732 u32 parentid;
733 bool notify;
734 int drops;
735
736 if (n == 0 && len == 0)
737 return;
738 drops = max_t(int, n, 0);
739 rcu_read_lock();
740 while ((parentid = sch->parent)) {
741 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
742 break;
743
744 if (sch->flags & TCQ_F_NOPARENT)
745 break;
746 /* Notify parent qdisc only if child qdisc becomes empty.
747 *
748 * If child was empty even before update then backlog
749 * counter is screwed and we skip notification because
750 * parent class is already passive.
751 */
752 notify = !sch->q.qlen && !WARN_ON_ONCE(!n);
753 /* TODO: perform the search on a per txq basis */
754 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
755 if (sch == NULL) {
756 WARN_ON_ONCE(parentid != TC_H_ROOT);
757 break;
758 }
759 cops = sch->ops->cl_ops;
760 if (notify && cops->qlen_notify) {
761 cl = cops->find(sch, parentid);
762 cops->qlen_notify(sch, cl);
763 }
764 sch->q.qlen -= n;
765 sch->qstats.backlog -= len;
766 __qdisc_qstats_drop(sch, drops);
767 }
768 rcu_read_unlock();
769 }
770 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
771
772 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
773 u32 portid, u32 seq, u16 flags, int event)
774 {
775 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
776 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
777 struct tcmsg *tcm;
778 struct nlmsghdr *nlh;
779 unsigned char *b = skb_tail_pointer(skb);
780 struct gnet_dump d;
781 struct qdisc_size_table *stab;
782 __u32 qlen;
783
784 cond_resched();
785 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
786 if (!nlh)
787 goto out_nlmsg_trim;
788 tcm = nlmsg_data(nlh);
789 tcm->tcm_family = AF_UNSPEC;
790 tcm->tcm__pad1 = 0;
791 tcm->tcm__pad2 = 0;
792 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
793 tcm->tcm_parent = clid;
794 tcm->tcm_handle = q->handle;
795 tcm->tcm_info = refcount_read(&q->refcnt);
796 if (nla_put_string(skb, TCA_KIND, q->ops->id))
797 goto nla_put_failure;
798 if (q->ops->dump && q->ops->dump(q, skb) < 0)
799 goto nla_put_failure;
800
801 qlen = qdisc_qlen_sum(q);
802
803 stab = rtnl_dereference(q->stab);
804 if (stab && qdisc_dump_stab(skb, stab) < 0)
805 goto nla_put_failure;
806
807 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
808 NULL, &d, TCA_PAD) < 0)
809 goto nla_put_failure;
810
811 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
812 goto nla_put_failure;
813
814 if (qdisc_is_percpu_stats(q)) {
815 cpu_bstats = q->cpu_bstats;
816 cpu_qstats = q->cpu_qstats;
817 }
818
819 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
820 &d, cpu_bstats, &q->bstats) < 0 ||
821 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
822 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
823 goto nla_put_failure;
824
825 if (gnet_stats_finish_copy(&d) < 0)
826 goto nla_put_failure;
827
828 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
829 return skb->len;
830
831 out_nlmsg_trim:
832 nla_put_failure:
833 nlmsg_trim(skb, b);
834 return -1;
835 }
836
837 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
838 {
839 if (q->flags & TCQ_F_BUILTIN)
840 return true;
841 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
842 return true;
843
844 return false;
845 }
846
847 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
848 struct nlmsghdr *n, u32 clid,
849 struct Qdisc *old, struct Qdisc *new)
850 {
851 struct sk_buff *skb;
852 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
853
854 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
855 if (!skb)
856 return -ENOBUFS;
857
858 if (old && !tc_qdisc_dump_ignore(old, false)) {
859 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
860 0, RTM_DELQDISC) < 0)
861 goto err_out;
862 }
863 if (new && !tc_qdisc_dump_ignore(new, false)) {
864 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
865 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
866 goto err_out;
867 }
868
869 if (skb->len)
870 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
871 n->nlmsg_flags & NLM_F_ECHO);
872
873 err_out:
874 kfree_skb(skb);
875 return -EINVAL;
876 }
877
878 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
879 struct nlmsghdr *n, u32 clid,
880 struct Qdisc *old, struct Qdisc *new)
881 {
882 if (new || old)
883 qdisc_notify(net, skb, n, clid, old, new);
884
885 if (old)
886 qdisc_destroy(old);
887 }
888
889 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
890 * to device "dev".
891 *
892 * When appropriate send a netlink notification using 'skb'
893 * and "n".
894 *
895 * On success, destroy old qdisc.
896 */
897
898 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
899 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
900 struct Qdisc *new, struct Qdisc *old)
901 {
902 struct Qdisc *q = old;
903 struct net *net = dev_net(dev);
904 int err = 0;
905
906 if (parent == NULL) {
907 unsigned int i, num_q, ingress;
908
909 ingress = 0;
910 num_q = dev->num_tx_queues;
911 if ((q && q->flags & TCQ_F_INGRESS) ||
912 (new && new->flags & TCQ_F_INGRESS)) {
913 num_q = 1;
914 ingress = 1;
915 if (!dev_ingress_queue(dev))
916 return -ENOENT;
917 }
918
919 if (dev->flags & IFF_UP)
920 dev_deactivate(dev);
921
922 if (new && new->ops->attach)
923 goto skip;
924
925 for (i = 0; i < num_q; i++) {
926 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
927
928 if (!ingress)
929 dev_queue = netdev_get_tx_queue(dev, i);
930
931 old = dev_graft_qdisc(dev_queue, new);
932 if (new && i > 0)
933 qdisc_refcount_inc(new);
934
935 if (!ingress)
936 qdisc_destroy(old);
937 }
938
939 skip:
940 if (!ingress) {
941 notify_and_destroy(net, skb, n, classid,
942 dev->qdisc, new);
943 if (new && !new->ops->attach)
944 qdisc_refcount_inc(new);
945 dev->qdisc = new ? : &noop_qdisc;
946
947 if (new && new->ops->attach)
948 new->ops->attach(new);
949 } else {
950 notify_and_destroy(net, skb, n, classid, old, new);
951 }
952
953 if (dev->flags & IFF_UP)
954 dev_activate(dev);
955 } else {
956 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
957
958 /* Only support running class lockless if parent is lockless */
959 if (new && (new->flags & TCQ_F_NOLOCK) &&
960 parent && !(parent->flags & TCQ_F_NOLOCK))
961 new->flags &= ~TCQ_F_NOLOCK;
962
963 err = -EOPNOTSUPP;
964 if (cops && cops->graft) {
965 unsigned long cl = cops->find(parent, classid);
966
967 if (cl)
968 err = cops->graft(parent, cl, new, &old);
969 else
970 err = -ENOENT;
971 }
972 if (!err)
973 notify_and_destroy(net, skb, n, classid, old, new);
974 }
975 return err;
976 }
977
978 /* lockdep annotation is needed for ingress; egress gets it only for name */
979 static struct lock_class_key qdisc_tx_lock;
980 static struct lock_class_key qdisc_rx_lock;
981
982 /*
983 Allocate and initialize new qdisc.
984
985 Parameters are passed via opt.
986 */
987
988 static struct Qdisc *qdisc_create(struct net_device *dev,
989 struct netdev_queue *dev_queue,
990 struct Qdisc *p, u32 parent, u32 handle,
991 struct nlattr **tca, int *errp)
992 {
993 int err;
994 struct nlattr *kind = tca[TCA_KIND];
995 struct Qdisc *sch;
996 struct Qdisc_ops *ops;
997 struct qdisc_size_table *stab;
998
999 ops = qdisc_lookup_ops(kind);
1000 #ifdef CONFIG_MODULES
1001 if (ops == NULL && kind != NULL) {
1002 char name[IFNAMSIZ];
1003 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1004 /* We dropped the RTNL semaphore in order to
1005 * perform the module load. So, even if we
1006 * succeeded in loading the module we have to
1007 * tell the caller to replay the request. We
1008 * indicate this using -EAGAIN.
1009 * We replay the request because the device may
1010 * go away in the mean time.
1011 */
1012 rtnl_unlock();
1013 request_module("sch_%s", name);
1014 rtnl_lock();
1015 ops = qdisc_lookup_ops(kind);
1016 if (ops != NULL) {
1017 /* We will try again qdisc_lookup_ops,
1018 * so don't keep a reference.
1019 */
1020 module_put(ops->owner);
1021 err = -EAGAIN;
1022 goto err_out;
1023 }
1024 }
1025 }
1026 #endif
1027
1028 err = -ENOENT;
1029 if (!ops)
1030 goto err_out;
1031
1032 sch = qdisc_alloc(dev_queue, ops);
1033 if (IS_ERR(sch)) {
1034 err = PTR_ERR(sch);
1035 goto err_out2;
1036 }
1037
1038 sch->parent = parent;
1039
1040 if (handle == TC_H_INGRESS) {
1041 sch->flags |= TCQ_F_INGRESS;
1042 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1043 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
1044 } else {
1045 if (handle == 0) {
1046 handle = qdisc_alloc_handle(dev);
1047 err = -ENOMEM;
1048 if (handle == 0)
1049 goto err_out3;
1050 }
1051 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
1052 if (!netif_is_multiqueue(dev))
1053 sch->flags |= TCQ_F_ONETXQUEUE;
1054 }
1055
1056 sch->handle = handle;
1057
1058 /* This exist to keep backward compatible with a userspace
1059 * loophole, what allowed userspace to get IFF_NO_QUEUE
1060 * facility on older kernels by setting tx_queue_len=0 (prior
1061 * to qdisc init), and then forgot to reinit tx_queue_len
1062 * before again attaching a qdisc.
1063 */
1064 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1065 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1066 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1067 }
1068
1069 if (ops->init) {
1070 err = ops->init(sch, tca[TCA_OPTIONS]);
1071 if (err != 0)
1072 goto err_out5;
1073 }
1074
1075 if (qdisc_is_percpu_stats(sch)) {
1076 sch->cpu_bstats =
1077 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
1078 if (!sch->cpu_bstats)
1079 goto err_out4;
1080
1081 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
1082 if (!sch->cpu_qstats)
1083 goto err_out4;
1084 }
1085
1086 if (tca[TCA_STAB]) {
1087 stab = qdisc_get_stab(tca[TCA_STAB]);
1088 if (IS_ERR(stab)) {
1089 err = PTR_ERR(stab);
1090 goto err_out4;
1091 }
1092 rcu_assign_pointer(sch->stab, stab);
1093 }
1094 if (tca[TCA_RATE]) {
1095 seqcount_t *running;
1096
1097 err = -EOPNOTSUPP;
1098 if (sch->flags & TCQ_F_MQROOT)
1099 goto err_out4;
1100
1101 if (sch->parent != TC_H_ROOT &&
1102 !(sch->flags & TCQ_F_INGRESS) &&
1103 (!p || !(p->flags & TCQ_F_MQROOT)))
1104 running = qdisc_root_sleeping_running(sch);
1105 else
1106 running = &sch->running;
1107
1108 err = gen_new_estimator(&sch->bstats,
1109 sch->cpu_bstats,
1110 &sch->rate_est,
1111 NULL,
1112 running,
1113 tca[TCA_RATE]);
1114 if (err)
1115 goto err_out4;
1116 }
1117
1118 qdisc_hash_add(sch, false);
1119
1120 return sch;
1121
1122 err_out5:
1123 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1124 if (ops->destroy)
1125 ops->destroy(sch);
1126 err_out3:
1127 dev_put(dev);
1128 kfree((char *) sch - sch->padded);
1129 err_out2:
1130 module_put(ops->owner);
1131 err_out:
1132 *errp = err;
1133 return NULL;
1134
1135 err_out4:
1136 free_percpu(sch->cpu_bstats);
1137 free_percpu(sch->cpu_qstats);
1138 /*
1139 * Any broken qdiscs that would require a ops->reset() here?
1140 * The qdisc was never in action so it shouldn't be necessary.
1141 */
1142 qdisc_put_stab(rtnl_dereference(sch->stab));
1143 if (ops->destroy)
1144 ops->destroy(sch);
1145 goto err_out3;
1146 }
1147
1148 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1149 {
1150 struct qdisc_size_table *ostab, *stab = NULL;
1151 int err = 0;
1152
1153 if (tca[TCA_OPTIONS]) {
1154 if (!sch->ops->change)
1155 return -EINVAL;
1156 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1157 if (err)
1158 return err;
1159 }
1160
1161 if (tca[TCA_STAB]) {
1162 stab = qdisc_get_stab(tca[TCA_STAB]);
1163 if (IS_ERR(stab))
1164 return PTR_ERR(stab);
1165 }
1166
1167 ostab = rtnl_dereference(sch->stab);
1168 rcu_assign_pointer(sch->stab, stab);
1169 qdisc_put_stab(ostab);
1170
1171 if (tca[TCA_RATE]) {
1172 /* NB: ignores errors from replace_estimator
1173 because change can't be undone. */
1174 if (sch->flags & TCQ_F_MQROOT)
1175 goto out;
1176 gen_replace_estimator(&sch->bstats,
1177 sch->cpu_bstats,
1178 &sch->rate_est,
1179 NULL,
1180 qdisc_root_sleeping_running(sch),
1181 tca[TCA_RATE]);
1182 }
1183 out:
1184 return 0;
1185 }
1186
1187 struct check_loop_arg {
1188 struct qdisc_walker w;
1189 struct Qdisc *p;
1190 int depth;
1191 };
1192
1193 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1194 struct qdisc_walker *w);
1195
1196 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1197 {
1198 struct check_loop_arg arg;
1199
1200 if (q->ops->cl_ops == NULL)
1201 return 0;
1202
1203 arg.w.stop = arg.w.skip = arg.w.count = 0;
1204 arg.w.fn = check_loop_fn;
1205 arg.depth = depth;
1206 arg.p = p;
1207 q->ops->cl_ops->walk(q, &arg.w);
1208 return arg.w.stop ? -ELOOP : 0;
1209 }
1210
1211 static int
1212 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1213 {
1214 struct Qdisc *leaf;
1215 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1216 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1217
1218 leaf = cops->leaf(q, cl);
1219 if (leaf) {
1220 if (leaf == arg->p || arg->depth > 7)
1221 return -ELOOP;
1222 return check_loop(leaf, arg->p, arg->depth + 1);
1223 }
1224 return 0;
1225 }
1226
1227 /*
1228 * Delete/get qdisc.
1229 */
1230
1231 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1232 struct netlink_ext_ack *extack)
1233 {
1234 struct net *net = sock_net(skb->sk);
1235 struct tcmsg *tcm = nlmsg_data(n);
1236 struct nlattr *tca[TCA_MAX + 1];
1237 struct net_device *dev;
1238 u32 clid;
1239 struct Qdisc *q = NULL;
1240 struct Qdisc *p = NULL;
1241 int err;
1242
1243 if ((n->nlmsg_type != RTM_GETQDISC) &&
1244 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1245 return -EPERM;
1246
1247 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1248 if (err < 0)
1249 return err;
1250
1251 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1252 if (!dev)
1253 return -ENODEV;
1254
1255 clid = tcm->tcm_parent;
1256 if (clid) {
1257 if (clid != TC_H_ROOT) {
1258 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1259 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1260 if (!p)
1261 return -ENOENT;
1262 q = qdisc_leaf(p, clid);
1263 } else if (dev_ingress_queue(dev)) {
1264 q = dev_ingress_queue(dev)->qdisc_sleeping;
1265 }
1266 } else {
1267 q = dev->qdisc;
1268 }
1269 if (!q)
1270 return -ENOENT;
1271
1272 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1273 return -EINVAL;
1274 } else {
1275 q = qdisc_lookup(dev, tcm->tcm_handle);
1276 if (!q)
1277 return -ENOENT;
1278 }
1279
1280 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1281 return -EINVAL;
1282
1283 if (n->nlmsg_type == RTM_DELQDISC) {
1284 if (!clid)
1285 return -EINVAL;
1286 if (q->handle == 0)
1287 return -ENOENT;
1288 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1289 if (err != 0)
1290 return err;
1291 } else {
1292 qdisc_notify(net, skb, n, clid, NULL, q);
1293 }
1294 return 0;
1295 }
1296
1297 /*
1298 * Create/change qdisc.
1299 */
1300
1301 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1302 struct netlink_ext_ack *extack)
1303 {
1304 struct net *net = sock_net(skb->sk);
1305 struct tcmsg *tcm;
1306 struct nlattr *tca[TCA_MAX + 1];
1307 struct net_device *dev;
1308 u32 clid;
1309 struct Qdisc *q, *p;
1310 int err;
1311
1312 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1313 return -EPERM;
1314
1315 replay:
1316 /* Reinit, just in case something touches this. */
1317 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1318 if (err < 0)
1319 return err;
1320
1321 tcm = nlmsg_data(n);
1322 clid = tcm->tcm_parent;
1323 q = p = NULL;
1324
1325 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1326 if (!dev)
1327 return -ENODEV;
1328
1329
1330 if (clid) {
1331 if (clid != TC_H_ROOT) {
1332 if (clid != TC_H_INGRESS) {
1333 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1334 if (!p)
1335 return -ENOENT;
1336 q = qdisc_leaf(p, clid);
1337 } else if (dev_ingress_queue_create(dev)) {
1338 q = dev_ingress_queue(dev)->qdisc_sleeping;
1339 }
1340 } else {
1341 q = dev->qdisc;
1342 }
1343
1344 /* It may be default qdisc, ignore it */
1345 if (q && q->handle == 0)
1346 q = NULL;
1347
1348 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1349 if (tcm->tcm_handle) {
1350 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1351 return -EEXIST;
1352 if (TC_H_MIN(tcm->tcm_handle))
1353 return -EINVAL;
1354 q = qdisc_lookup(dev, tcm->tcm_handle);
1355 if (!q)
1356 goto create_n_graft;
1357 if (n->nlmsg_flags & NLM_F_EXCL)
1358 return -EEXIST;
1359 if (tca[TCA_KIND] &&
1360 nla_strcmp(tca[TCA_KIND], q->ops->id))
1361 return -EINVAL;
1362 if (q == p ||
1363 (p && check_loop(q, p, 0)))
1364 return -ELOOP;
1365 qdisc_refcount_inc(q);
1366 goto graft;
1367 } else {
1368 if (!q)
1369 goto create_n_graft;
1370
1371 /* This magic test requires explanation.
1372 *
1373 * We know, that some child q is already
1374 * attached to this parent and have choice:
1375 * either to change it or to create/graft new one.
1376 *
1377 * 1. We are allowed to create/graft only
1378 * if CREATE and REPLACE flags are set.
1379 *
1380 * 2. If EXCL is set, requestor wanted to say,
1381 * that qdisc tcm_handle is not expected
1382 * to exist, so that we choose create/graft too.
1383 *
1384 * 3. The last case is when no flags are set.
1385 * Alas, it is sort of hole in API, we
1386 * cannot decide what to do unambiguously.
1387 * For now we select create/graft, if
1388 * user gave KIND, which does not match existing.
1389 */
1390 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1391 (n->nlmsg_flags & NLM_F_REPLACE) &&
1392 ((n->nlmsg_flags & NLM_F_EXCL) ||
1393 (tca[TCA_KIND] &&
1394 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1395 goto create_n_graft;
1396 }
1397 }
1398 } else {
1399 if (!tcm->tcm_handle)
1400 return -EINVAL;
1401 q = qdisc_lookup(dev, tcm->tcm_handle);
1402 }
1403
1404 /* Change qdisc parameters */
1405 if (!q)
1406 return -ENOENT;
1407 if (n->nlmsg_flags & NLM_F_EXCL)
1408 return -EEXIST;
1409 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1410 return -EINVAL;
1411 err = qdisc_change(q, tca);
1412 if (err == 0)
1413 qdisc_notify(net, skb, n, clid, NULL, q);
1414 return err;
1415
1416 create_n_graft:
1417 if (!(n->nlmsg_flags & NLM_F_CREATE))
1418 return -ENOENT;
1419 if (clid == TC_H_INGRESS) {
1420 if (dev_ingress_queue(dev))
1421 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1422 tcm->tcm_parent, tcm->tcm_parent,
1423 tca, &err);
1424 else
1425 err = -ENOENT;
1426 } else {
1427 struct netdev_queue *dev_queue;
1428
1429 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1430 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1431 else if (p)
1432 dev_queue = p->dev_queue;
1433 else
1434 dev_queue = netdev_get_tx_queue(dev, 0);
1435
1436 q = qdisc_create(dev, dev_queue, p,
1437 tcm->tcm_parent, tcm->tcm_handle,
1438 tca, &err);
1439 }
1440 if (q == NULL) {
1441 if (err == -EAGAIN)
1442 goto replay;
1443 return err;
1444 }
1445
1446 graft:
1447 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1448 if (err) {
1449 if (q)
1450 qdisc_destroy(q);
1451 return err;
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1458 struct netlink_callback *cb,
1459 int *q_idx_p, int s_q_idx, bool recur,
1460 bool dump_invisible)
1461 {
1462 int ret = 0, q_idx = *q_idx_p;
1463 struct Qdisc *q;
1464 int b;
1465
1466 if (!root)
1467 return 0;
1468
1469 q = root;
1470 if (q_idx < s_q_idx) {
1471 q_idx++;
1472 } else {
1473 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1474 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1475 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1476 RTM_NEWQDISC) <= 0)
1477 goto done;
1478 q_idx++;
1479 }
1480
1481 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1482 * itself has already been dumped.
1483 *
1484 * If we've already dumped the top-level (ingress) qdisc above and the global
1485 * qdisc hashtable, we don't want to hit it again
1486 */
1487 if (!qdisc_dev(root) || !recur)
1488 goto out;
1489
1490 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1491 if (q_idx < s_q_idx) {
1492 q_idx++;
1493 continue;
1494 }
1495 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1496 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1497 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1498 RTM_NEWQDISC) <= 0)
1499 goto done;
1500 q_idx++;
1501 }
1502
1503 out:
1504 *q_idx_p = q_idx;
1505 return ret;
1506 done:
1507 ret = -1;
1508 goto out;
1509 }
1510
1511 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1512 {
1513 struct net *net = sock_net(skb->sk);
1514 int idx, q_idx;
1515 int s_idx, s_q_idx;
1516 struct net_device *dev;
1517 const struct nlmsghdr *nlh = cb->nlh;
1518 struct nlattr *tca[TCA_MAX + 1];
1519 int err;
1520
1521 s_idx = cb->args[0];
1522 s_q_idx = q_idx = cb->args[1];
1523
1524 idx = 0;
1525 ASSERT_RTNL();
1526
1527 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
1528 if (err < 0)
1529 return err;
1530
1531 for_each_netdev(net, dev) {
1532 struct netdev_queue *dev_queue;
1533
1534 if (idx < s_idx)
1535 goto cont;
1536 if (idx > s_idx)
1537 s_q_idx = 0;
1538 q_idx = 0;
1539
1540 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1541 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1542 goto done;
1543
1544 dev_queue = dev_ingress_queue(dev);
1545 if (dev_queue &&
1546 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1547 &q_idx, s_q_idx, false,
1548 tca[TCA_DUMP_INVISIBLE]) < 0)
1549 goto done;
1550
1551 cont:
1552 idx++;
1553 }
1554
1555 done:
1556 cb->args[0] = idx;
1557 cb->args[1] = q_idx;
1558
1559 return skb->len;
1560 }
1561
1562
1563
1564 /************************************************
1565 * Traffic classes manipulation. *
1566 ************************************************/
1567
1568 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1569 unsigned long cl,
1570 u32 portid, u32 seq, u16 flags, int event)
1571 {
1572 struct tcmsg *tcm;
1573 struct nlmsghdr *nlh;
1574 unsigned char *b = skb_tail_pointer(skb);
1575 struct gnet_dump d;
1576 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1577
1578 cond_resched();
1579 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1580 if (!nlh)
1581 goto out_nlmsg_trim;
1582 tcm = nlmsg_data(nlh);
1583 tcm->tcm_family = AF_UNSPEC;
1584 tcm->tcm__pad1 = 0;
1585 tcm->tcm__pad2 = 0;
1586 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1587 tcm->tcm_parent = q->handle;
1588 tcm->tcm_handle = q->handle;
1589 tcm->tcm_info = 0;
1590 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1591 goto nla_put_failure;
1592 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1593 goto nla_put_failure;
1594
1595 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1596 NULL, &d, TCA_PAD) < 0)
1597 goto nla_put_failure;
1598
1599 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1600 goto nla_put_failure;
1601
1602 if (gnet_stats_finish_copy(&d) < 0)
1603 goto nla_put_failure;
1604
1605 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1606 return skb->len;
1607
1608 out_nlmsg_trim:
1609 nla_put_failure:
1610 nlmsg_trim(skb, b);
1611 return -1;
1612 }
1613
1614 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1615 struct nlmsghdr *n, struct Qdisc *q,
1616 unsigned long cl, int event)
1617 {
1618 struct sk_buff *skb;
1619 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1620
1621 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1622 if (!skb)
1623 return -ENOBUFS;
1624
1625 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1626 kfree_skb(skb);
1627 return -EINVAL;
1628 }
1629
1630 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1631 n->nlmsg_flags & NLM_F_ECHO);
1632 }
1633
1634 static int tclass_del_notify(struct net *net,
1635 const struct Qdisc_class_ops *cops,
1636 struct sk_buff *oskb, struct nlmsghdr *n,
1637 struct Qdisc *q, unsigned long cl)
1638 {
1639 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1640 struct sk_buff *skb;
1641 int err = 0;
1642
1643 if (!cops->delete)
1644 return -EOPNOTSUPP;
1645
1646 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1647 if (!skb)
1648 return -ENOBUFS;
1649
1650 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1651 RTM_DELTCLASS) < 0) {
1652 kfree_skb(skb);
1653 return -EINVAL;
1654 }
1655
1656 err = cops->delete(q, cl);
1657 if (err) {
1658 kfree_skb(skb);
1659 return err;
1660 }
1661
1662 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1663 n->nlmsg_flags & NLM_F_ECHO);
1664 }
1665
1666 #ifdef CONFIG_NET_CLS
1667
1668 struct tcf_bind_args {
1669 struct tcf_walker w;
1670 u32 classid;
1671 unsigned long cl;
1672 };
1673
1674 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1675 {
1676 struct tcf_bind_args *a = (void *)arg;
1677
1678 if (tp->ops->bind_class) {
1679 struct Qdisc *q = tcf_block_q(tp->chain->block);
1680
1681 sch_tree_lock(q);
1682 tp->ops->bind_class(n, a->classid, a->cl);
1683 sch_tree_unlock(q);
1684 }
1685 return 0;
1686 }
1687
1688 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1689 unsigned long new_cl)
1690 {
1691 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1692 struct tcf_block *block;
1693 struct tcf_chain *chain;
1694 unsigned long cl;
1695
1696 cl = cops->find(q, portid);
1697 if (!cl)
1698 return;
1699 block = cops->tcf_block(q, cl);
1700 if (!block)
1701 return;
1702 list_for_each_entry(chain, &block->chain_list, list) {
1703 struct tcf_proto *tp;
1704
1705 for (tp = rtnl_dereference(chain->filter_chain);
1706 tp; tp = rtnl_dereference(tp->next)) {
1707 struct tcf_bind_args arg = {};
1708
1709 arg.w.fn = tcf_node_bind;
1710 arg.classid = clid;
1711 arg.cl = new_cl;
1712 tp->ops->walk(tp, &arg.w);
1713 }
1714 }
1715 }
1716
1717 #else
1718
1719 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1720 unsigned long new_cl)
1721 {
1722 }
1723
1724 #endif
1725
1726 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1727 struct netlink_ext_ack *extack)
1728 {
1729 struct net *net = sock_net(skb->sk);
1730 struct tcmsg *tcm = nlmsg_data(n);
1731 struct nlattr *tca[TCA_MAX + 1];
1732 struct net_device *dev;
1733 struct Qdisc *q = NULL;
1734 const struct Qdisc_class_ops *cops;
1735 unsigned long cl = 0;
1736 unsigned long new_cl;
1737 u32 portid;
1738 u32 clid;
1739 u32 qid;
1740 int err;
1741
1742 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1743 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1744 return -EPERM;
1745
1746 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1747 if (err < 0)
1748 return err;
1749
1750 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1751 if (!dev)
1752 return -ENODEV;
1753
1754 /*
1755 parent == TC_H_UNSPEC - unspecified parent.
1756 parent == TC_H_ROOT - class is root, which has no parent.
1757 parent == X:0 - parent is root class.
1758 parent == X:Y - parent is a node in hierarchy.
1759 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1760
1761 handle == 0:0 - generate handle from kernel pool.
1762 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1763 handle == X:Y - clear.
1764 handle == X:0 - root class.
1765 */
1766
1767 /* Step 1. Determine qdisc handle X:0 */
1768
1769 portid = tcm->tcm_parent;
1770 clid = tcm->tcm_handle;
1771 qid = TC_H_MAJ(clid);
1772
1773 if (portid != TC_H_ROOT) {
1774 u32 qid1 = TC_H_MAJ(portid);
1775
1776 if (qid && qid1) {
1777 /* If both majors are known, they must be identical. */
1778 if (qid != qid1)
1779 return -EINVAL;
1780 } else if (qid1) {
1781 qid = qid1;
1782 } else if (qid == 0)
1783 qid = dev->qdisc->handle;
1784
1785 /* Now qid is genuine qdisc handle consistent
1786 * both with parent and child.
1787 *
1788 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1789 */
1790 if (portid)
1791 portid = TC_H_MAKE(qid, portid);
1792 } else {
1793 if (qid == 0)
1794 qid = dev->qdisc->handle;
1795 }
1796
1797 /* OK. Locate qdisc */
1798 q = qdisc_lookup(dev, qid);
1799 if (!q)
1800 return -ENOENT;
1801
1802 /* An check that it supports classes */
1803 cops = q->ops->cl_ops;
1804 if (cops == NULL)
1805 return -EINVAL;
1806
1807 /* Now try to get class */
1808 if (clid == 0) {
1809 if (portid == TC_H_ROOT)
1810 clid = qid;
1811 } else
1812 clid = TC_H_MAKE(qid, clid);
1813
1814 if (clid)
1815 cl = cops->find(q, clid);
1816
1817 if (cl == 0) {
1818 err = -ENOENT;
1819 if (n->nlmsg_type != RTM_NEWTCLASS ||
1820 !(n->nlmsg_flags & NLM_F_CREATE))
1821 goto out;
1822 } else {
1823 switch (n->nlmsg_type) {
1824 case RTM_NEWTCLASS:
1825 err = -EEXIST;
1826 if (n->nlmsg_flags & NLM_F_EXCL)
1827 goto out;
1828 break;
1829 case RTM_DELTCLASS:
1830 err = tclass_del_notify(net, cops, skb, n, q, cl);
1831 /* Unbind the class with flilters with 0 */
1832 tc_bind_tclass(q, portid, clid, 0);
1833 goto out;
1834 case RTM_GETTCLASS:
1835 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1836 goto out;
1837 default:
1838 err = -EINVAL;
1839 goto out;
1840 }
1841 }
1842
1843 new_cl = cl;
1844 err = -EOPNOTSUPP;
1845 if (cops->change)
1846 err = cops->change(q, clid, portid, tca, &new_cl);
1847 if (err == 0) {
1848 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1849 /* We just create a new class, need to do reverse binding. */
1850 if (cl != new_cl)
1851 tc_bind_tclass(q, portid, clid, new_cl);
1852 }
1853 out:
1854 return err;
1855 }
1856
1857 struct qdisc_dump_args {
1858 struct qdisc_walker w;
1859 struct sk_buff *skb;
1860 struct netlink_callback *cb;
1861 };
1862
1863 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1864 struct qdisc_walker *arg)
1865 {
1866 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1867
1868 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1869 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1870 RTM_NEWTCLASS);
1871 }
1872
1873 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1874 struct tcmsg *tcm, struct netlink_callback *cb,
1875 int *t_p, int s_t)
1876 {
1877 struct qdisc_dump_args arg;
1878
1879 if (tc_qdisc_dump_ignore(q, false) ||
1880 *t_p < s_t || !q->ops->cl_ops ||
1881 (tcm->tcm_parent &&
1882 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1883 (*t_p)++;
1884 return 0;
1885 }
1886 if (*t_p > s_t)
1887 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1888 arg.w.fn = qdisc_class_dump;
1889 arg.skb = skb;
1890 arg.cb = cb;
1891 arg.w.stop = 0;
1892 arg.w.skip = cb->args[1];
1893 arg.w.count = 0;
1894 q->ops->cl_ops->walk(q, &arg.w);
1895 cb->args[1] = arg.w.count;
1896 if (arg.w.stop)
1897 return -1;
1898 (*t_p)++;
1899 return 0;
1900 }
1901
1902 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1903 struct tcmsg *tcm, struct netlink_callback *cb,
1904 int *t_p, int s_t)
1905 {
1906 struct Qdisc *q;
1907 int b;
1908
1909 if (!root)
1910 return 0;
1911
1912 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1913 return -1;
1914
1915 if (!qdisc_dev(root))
1916 return 0;
1917
1918 if (tcm->tcm_parent) {
1919 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1920 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1921 return -1;
1922 return 0;
1923 }
1924 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1925 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1926 return -1;
1927 }
1928
1929 return 0;
1930 }
1931
1932 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1933 {
1934 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1935 struct net *net = sock_net(skb->sk);
1936 struct netdev_queue *dev_queue;
1937 struct net_device *dev;
1938 int t, s_t;
1939
1940 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1941 return 0;
1942 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1943 if (!dev)
1944 return 0;
1945
1946 s_t = cb->args[0];
1947 t = 0;
1948
1949 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1950 goto done;
1951
1952 dev_queue = dev_ingress_queue(dev);
1953 if (dev_queue &&
1954 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1955 &t, s_t) < 0)
1956 goto done;
1957
1958 done:
1959 cb->args[0] = t;
1960
1961 dev_put(dev);
1962 return skb->len;
1963 }
1964
1965 #ifdef CONFIG_PROC_FS
1966 static int psched_show(struct seq_file *seq, void *v)
1967 {
1968 seq_printf(seq, "%08x %08x %08x %08x\n",
1969 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1970 1000000,
1971 (u32)NSEC_PER_SEC / hrtimer_resolution);
1972
1973 return 0;
1974 }
1975
1976 static int psched_open(struct inode *inode, struct file *file)
1977 {
1978 return single_open(file, psched_show, NULL);
1979 }
1980
1981 static const struct file_operations psched_fops = {
1982 .owner = THIS_MODULE,
1983 .open = psched_open,
1984 .read = seq_read,
1985 .llseek = seq_lseek,
1986 .release = single_release,
1987 };
1988
1989 static int __net_init psched_net_init(struct net *net)
1990 {
1991 struct proc_dir_entry *e;
1992
1993 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1994 if (e == NULL)
1995 return -ENOMEM;
1996
1997 return 0;
1998 }
1999
2000 static void __net_exit psched_net_exit(struct net *net)
2001 {
2002 remove_proc_entry("psched", net->proc_net);
2003 }
2004 #else
2005 static int __net_init psched_net_init(struct net *net)
2006 {
2007 return 0;
2008 }
2009
2010 static void __net_exit psched_net_exit(struct net *net)
2011 {
2012 }
2013 #endif
2014
2015 static struct pernet_operations psched_net_ops = {
2016 .init = psched_net_init,
2017 .exit = psched_net_exit,
2018 };
2019
2020 static int __init pktsched_init(void)
2021 {
2022 int err;
2023
2024 err = register_pernet_subsys(&psched_net_ops);
2025 if (err) {
2026 pr_err("pktsched_init: "
2027 "cannot initialize per netns operations\n");
2028 return err;
2029 }
2030
2031 register_qdisc(&pfifo_fast_ops);
2032 register_qdisc(&pfifo_qdisc_ops);
2033 register_qdisc(&bfifo_qdisc_ops);
2034 register_qdisc(&pfifo_head_drop_qdisc_ops);
2035 register_qdisc(&mq_qdisc_ops);
2036 register_qdisc(&noqueue_qdisc_ops);
2037
2038 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2039 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2040 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2041 0);
2042 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2043 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2044 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2045 0);
2046
2047 return 0;
2048 }
2049
2050 subsys_initcall(pktsched_init);