]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sched/sch_api.c
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[mirror_ubuntu-jammy-kernel.git] / net / sched / sch_api.c
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
33
34 #include <net/net_namespace.h>
35 #include <net/sock.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
38 #include <net/pkt_cls.h>
39
40 /*
41
42 Short review.
43 -------------
44
45 This file consists of two interrelated parts:
46
47 1. queueing disciplines manager frontend.
48 2. traffic classes manager frontend.
49
50 Generally, queueing discipline ("qdisc") is a black box,
51 which is able to enqueue packets and to dequeue them (when
52 device is ready to send something) in order and at times
53 determined by algorithm hidden in it.
54
55 qdisc's are divided to two categories:
56 - "queues", which have no internal structure visible from outside.
57 - "schedulers", which split all the packets to "traffic classes",
58 using "packet classifiers" (look at cls_api.c)
59
60 In turn, classes may have child qdiscs (as rule, queues)
61 attached to them etc. etc. etc.
62
63 The goal of the routines in this file is to translate
64 information supplied by user in the form of handles
65 to more intelligible for kernel form, to make some sanity
66 checks and part of work, which is common to all qdiscs
67 and to provide rtnetlink notifications.
68
69 All real intelligent work is done inside qdisc modules.
70
71
72
73 Every discipline has two major routines: enqueue and dequeue.
74
75 ---dequeue
76
77 dequeue usually returns a skb to send. It is allowed to return NULL,
78 but it does not mean that queue is empty, it just means that
79 discipline does not want to send anything this time.
80 Queue is really empty if q->q.qlen == 0.
81 For complicated disciplines with multiple queues q->q is not
82 real packet queue, but however q->q.qlen must be valid.
83
84 ---enqueue
85
86 enqueue returns 0, if packet was enqueued successfully.
87 If packet (this one or another one) was dropped, it returns
88 not zero error code.
89 NET_XMIT_DROP - this packet dropped
90 Expected action: do not backoff, but wait until queue will clear.
91 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
92 Expected action: backoff or ignore
93
94 Auxiliary routines:
95
96 ---peek
97
98 like dequeue but without removing a packet from the queue
99
100 ---reset
101
102 returns qdisc to initial state: purge all buffers, clear all
103 timers, counters (except for statistics) etc.
104
105 ---init
106
107 initializes newly created qdisc.
108
109 ---destroy
110
111 destroys resources allocated by init and during lifetime of qdisc.
112
113 ---change
114
115 changes qdisc parameters.
116 */
117
118 /* Protects list of registered TC modules. It is pure SMP lock. */
119 static DEFINE_RWLOCK(qdisc_mod_lock);
120
121
122 /************************************************
123 * Queueing disciplines manipulation. *
124 ************************************************/
125
126
127 /* The list of all installed queueing disciplines. */
128
129 static struct Qdisc_ops *qdisc_base;
130
131 /* Register/unregister queueing discipline */
132
133 int register_qdisc(struct Qdisc_ops *qops)
134 {
135 struct Qdisc_ops *q, **qp;
136 int rc = -EEXIST;
137
138 write_lock(&qdisc_mod_lock);
139 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
140 if (!strcmp(qops->id, q->id))
141 goto out;
142
143 if (qops->enqueue == NULL)
144 qops->enqueue = noop_qdisc_ops.enqueue;
145 if (qops->peek == NULL) {
146 if (qops->dequeue == NULL)
147 qops->peek = noop_qdisc_ops.peek;
148 else
149 goto out_einval;
150 }
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
153
154 if (qops->cl_ops) {
155 const struct Qdisc_class_ops *cops = qops->cl_ops;
156
157 if (!(cops->find && cops->walk && cops->leaf))
158 goto out_einval;
159
160 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
161 goto out_einval;
162 }
163
164 qops->next = NULL;
165 *qp = qops;
166 rc = 0;
167 out:
168 write_unlock(&qdisc_mod_lock);
169 return rc;
170
171 out_einval:
172 rc = -EINVAL;
173 goto out;
174 }
175 EXPORT_SYMBOL(register_qdisc);
176
177 int unregister_qdisc(struct Qdisc_ops *qops)
178 {
179 struct Qdisc_ops *q, **qp;
180 int err = -ENOENT;
181
182 write_lock(&qdisc_mod_lock);
183 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
184 if (q == qops)
185 break;
186 if (q) {
187 *qp = q->next;
188 q->next = NULL;
189 err = 0;
190 }
191 write_unlock(&qdisc_mod_lock);
192 return err;
193 }
194 EXPORT_SYMBOL(unregister_qdisc);
195
196 /* Get default qdisc if not otherwise specified */
197 void qdisc_get_default(char *name, size_t len)
198 {
199 read_lock(&qdisc_mod_lock);
200 strlcpy(name, default_qdisc_ops->id, len);
201 read_unlock(&qdisc_mod_lock);
202 }
203
204 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 {
206 struct Qdisc_ops *q = NULL;
207
208 for (q = qdisc_base; q; q = q->next) {
209 if (!strcmp(name, q->id)) {
210 if (!try_module_get(q->owner))
211 q = NULL;
212 break;
213 }
214 }
215
216 return q;
217 }
218
219 /* Set new default qdisc to use */
220 int qdisc_set_default(const char *name)
221 {
222 const struct Qdisc_ops *ops;
223
224 if (!capable(CAP_NET_ADMIN))
225 return -EPERM;
226
227 write_lock(&qdisc_mod_lock);
228 ops = qdisc_lookup_default(name);
229 if (!ops) {
230 /* Not found, drop lock and try to load module */
231 write_unlock(&qdisc_mod_lock);
232 request_module("sch_%s", name);
233 write_lock(&qdisc_mod_lock);
234
235 ops = qdisc_lookup_default(name);
236 }
237
238 if (ops) {
239 /* Set new default */
240 module_put(default_qdisc_ops->owner);
241 default_qdisc_ops = ops;
242 }
243 write_unlock(&qdisc_mod_lock);
244
245 return ops ? 0 : -ENOENT;
246 }
247
248 #ifdef CONFIG_NET_SCH_DEFAULT
249 /* Set default value from kernel config */
250 static int __init sch_default_qdisc(void)
251 {
252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 }
254 late_initcall(sch_default_qdisc);
255 #endif
256
257 /* We know handle. Find qdisc among all qdisc's attached to device
258 * (root qdisc, all its children, children of children etc.)
259 * Note: caller either uses rtnl or rcu_read_lock()
260 */
261
262 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
263 {
264 struct Qdisc *q;
265
266 if (!qdisc_dev(root))
267 return (root->handle == handle ? root : NULL);
268
269 if (!(root->flags & TCQ_F_BUILTIN) &&
270 root->handle == handle)
271 return root;
272
273 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
274 if (q->handle == handle)
275 return q;
276 }
277 return NULL;
278 }
279
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
281 {
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 ASSERT_RTNL();
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 if (invisible)
286 q->flags |= TCQ_F_INVISIBLE;
287 }
288 }
289 EXPORT_SYMBOL(qdisc_hash_add);
290
291 void qdisc_hash_del(struct Qdisc *q)
292 {
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 ASSERT_RTNL();
295 hash_del_rcu(&q->hash);
296 }
297 }
298 EXPORT_SYMBOL(qdisc_hash_del);
299
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301 {
302 struct Qdisc *q;
303
304 if (!handle)
305 return NULL;
306 q = qdisc_match_from_root(dev->qdisc, handle);
307 if (q)
308 goto out;
309
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
313 handle);
314 out:
315 return q;
316 }
317
318 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
319 {
320 unsigned long cl;
321 struct Qdisc *leaf;
322 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
323
324 if (cops == NULL)
325 return NULL;
326 cl = cops->find(p, classid);
327
328 if (cl == 0)
329 return NULL;
330 leaf = cops->leaf(p, cl);
331 return leaf;
332 }
333
334 /* Find queueing discipline by name */
335
336 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
337 {
338 struct Qdisc_ops *q = NULL;
339
340 if (kind) {
341 read_lock(&qdisc_mod_lock);
342 for (q = qdisc_base; q; q = q->next) {
343 if (nla_strcmp(kind, q->id) == 0) {
344 if (!try_module_get(q->owner))
345 q = NULL;
346 break;
347 }
348 }
349 read_unlock(&qdisc_mod_lock);
350 }
351 return q;
352 }
353
354 /* The linklayer setting were not transferred from iproute2, in older
355 * versions, and the rate tables lookup systems have been dropped in
356 * the kernel. To keep backward compatible with older iproute2 tc
357 * utils, we detect the linklayer setting by detecting if the rate
358 * table were modified.
359 *
360 * For linklayer ATM table entries, the rate table will be aligned to
361 * 48 bytes, thus some table entries will contain the same value. The
362 * mpu (min packet unit) is also encoded into the old rate table, thus
363 * starting from the mpu, we find low and high table entries for
364 * mapping this cell. If these entries contain the same value, when
365 * the rate tables have been modified for linklayer ATM.
366 *
367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
368 * and then roundup to the next cell, calc the table entry one below,
369 * and compare.
370 */
371 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
372 {
373 int low = roundup(r->mpu, 48);
374 int high = roundup(low+1, 48);
375 int cell_low = low >> r->cell_log;
376 int cell_high = (high >> r->cell_log) - 1;
377
378 /* rtab is too inaccurate at rates > 100Mbit/s */
379 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
380 pr_debug("TC linklayer: Giving up ATM detection\n");
381 return TC_LINKLAYER_ETHERNET;
382 }
383
384 if ((cell_high > cell_low) && (cell_high < 256)
385 && (rtab[cell_low] == rtab[cell_high])) {
386 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
387 cell_low, cell_high, rtab[cell_high]);
388 return TC_LINKLAYER_ATM;
389 }
390 return TC_LINKLAYER_ETHERNET;
391 }
392
393 static struct qdisc_rate_table *qdisc_rtab_list;
394
395 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
396 struct nlattr *tab)
397 {
398 struct qdisc_rate_table *rtab;
399
400 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
401 nla_len(tab) != TC_RTAB_SIZE)
402 return NULL;
403
404 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
405 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
406 !memcmp(&rtab->data, nla_data(tab), 1024)) {
407 rtab->refcnt++;
408 return rtab;
409 }
410 }
411
412 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
413 if (rtab) {
414 rtab->rate = *r;
415 rtab->refcnt = 1;
416 memcpy(rtab->data, nla_data(tab), 1024);
417 if (r->linklayer == TC_LINKLAYER_UNAWARE)
418 r->linklayer = __detect_linklayer(r, rtab->data);
419 rtab->next = qdisc_rtab_list;
420 qdisc_rtab_list = rtab;
421 }
422 return rtab;
423 }
424 EXPORT_SYMBOL(qdisc_get_rtab);
425
426 void qdisc_put_rtab(struct qdisc_rate_table *tab)
427 {
428 struct qdisc_rate_table *rtab, **rtabp;
429
430 if (!tab || --tab->refcnt)
431 return;
432
433 for (rtabp = &qdisc_rtab_list;
434 (rtab = *rtabp) != NULL;
435 rtabp = &rtab->next) {
436 if (rtab == tab) {
437 *rtabp = rtab->next;
438 kfree(rtab);
439 return;
440 }
441 }
442 }
443 EXPORT_SYMBOL(qdisc_put_rtab);
444
445 static LIST_HEAD(qdisc_stab_list);
446
447 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
448 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
449 [TCA_STAB_DATA] = { .type = NLA_BINARY },
450 };
451
452 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
453 {
454 struct nlattr *tb[TCA_STAB_MAX + 1];
455 struct qdisc_size_table *stab;
456 struct tc_sizespec *s;
457 unsigned int tsize = 0;
458 u16 *tab = NULL;
459 int err;
460
461 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
462 if (err < 0)
463 return ERR_PTR(err);
464 if (!tb[TCA_STAB_BASE])
465 return ERR_PTR(-EINVAL);
466
467 s = nla_data(tb[TCA_STAB_BASE]);
468
469 if (s->tsize > 0) {
470 if (!tb[TCA_STAB_DATA])
471 return ERR_PTR(-EINVAL);
472 tab = nla_data(tb[TCA_STAB_DATA]);
473 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
474 }
475
476 if (tsize != s->tsize || (!tab && tsize > 0))
477 return ERR_PTR(-EINVAL);
478
479 list_for_each_entry(stab, &qdisc_stab_list, list) {
480 if (memcmp(&stab->szopts, s, sizeof(*s)))
481 continue;
482 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
483 continue;
484 stab->refcnt++;
485 return stab;
486 }
487
488 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
489 if (!stab)
490 return ERR_PTR(-ENOMEM);
491
492 stab->refcnt = 1;
493 stab->szopts = *s;
494 if (tsize > 0)
495 memcpy(stab->data, tab, tsize * sizeof(u16));
496
497 list_add_tail(&stab->list, &qdisc_stab_list);
498
499 return stab;
500 }
501
502 static void stab_kfree_rcu(struct rcu_head *head)
503 {
504 kfree(container_of(head, struct qdisc_size_table, rcu));
505 }
506
507 void qdisc_put_stab(struct qdisc_size_table *tab)
508 {
509 if (!tab)
510 return;
511
512 if (--tab->refcnt == 0) {
513 list_del(&tab->list);
514 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
515 }
516 }
517 EXPORT_SYMBOL(qdisc_put_stab);
518
519 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
520 {
521 struct nlattr *nest;
522
523 nest = nla_nest_start(skb, TCA_STAB);
524 if (nest == NULL)
525 goto nla_put_failure;
526 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
527 goto nla_put_failure;
528 nla_nest_end(skb, nest);
529
530 return skb->len;
531
532 nla_put_failure:
533 return -1;
534 }
535
536 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
537 const struct qdisc_size_table *stab)
538 {
539 int pkt_len, slot;
540
541 pkt_len = skb->len + stab->szopts.overhead;
542 if (unlikely(!stab->szopts.tsize))
543 goto out;
544
545 slot = pkt_len + stab->szopts.cell_align;
546 if (unlikely(slot < 0))
547 slot = 0;
548
549 slot >>= stab->szopts.cell_log;
550 if (likely(slot < stab->szopts.tsize))
551 pkt_len = stab->data[slot];
552 else
553 pkt_len = stab->data[stab->szopts.tsize - 1] *
554 (slot / stab->szopts.tsize) +
555 stab->data[slot % stab->szopts.tsize];
556
557 pkt_len <<= stab->szopts.size_log;
558 out:
559 if (unlikely(pkt_len < 1))
560 pkt_len = 1;
561 qdisc_skb_cb(skb)->pkt_len = pkt_len;
562 }
563 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
564
565 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
566 {
567 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
568 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
569 txt, qdisc->ops->id, qdisc->handle >> 16);
570 qdisc->flags |= TCQ_F_WARN_NONWC;
571 }
572 }
573 EXPORT_SYMBOL(qdisc_warn_nonwc);
574
575 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
576 {
577 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
578 timer);
579
580 rcu_read_lock();
581 __netif_schedule(qdisc_root(wd->qdisc));
582 rcu_read_unlock();
583
584 return HRTIMER_NORESTART;
585 }
586
587 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
588 {
589 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
590 wd->timer.function = qdisc_watchdog;
591 wd->qdisc = qdisc;
592 }
593 EXPORT_SYMBOL(qdisc_watchdog_init);
594
595 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
596 {
597 if (test_bit(__QDISC_STATE_DEACTIVATED,
598 &qdisc_root_sleeping(wd->qdisc)->state))
599 return;
600
601 if (wd->last_expires == expires)
602 return;
603
604 wd->last_expires = expires;
605 hrtimer_start(&wd->timer,
606 ns_to_ktime(expires),
607 HRTIMER_MODE_ABS_PINNED);
608 }
609 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
610
611 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
612 {
613 hrtimer_cancel(&wd->timer);
614 }
615 EXPORT_SYMBOL(qdisc_watchdog_cancel);
616
617 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
618 {
619 struct hlist_head *h;
620 unsigned int i;
621
622 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
623
624 if (h != NULL) {
625 for (i = 0; i < n; i++)
626 INIT_HLIST_HEAD(&h[i]);
627 }
628 return h;
629 }
630
631 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
632 {
633 struct Qdisc_class_common *cl;
634 struct hlist_node *next;
635 struct hlist_head *nhash, *ohash;
636 unsigned int nsize, nmask, osize;
637 unsigned int i, h;
638
639 /* Rehash when load factor exceeds 0.75 */
640 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
641 return;
642 nsize = clhash->hashsize * 2;
643 nmask = nsize - 1;
644 nhash = qdisc_class_hash_alloc(nsize);
645 if (nhash == NULL)
646 return;
647
648 ohash = clhash->hash;
649 osize = clhash->hashsize;
650
651 sch_tree_lock(sch);
652 for (i = 0; i < osize; i++) {
653 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
654 h = qdisc_class_hash(cl->classid, nmask);
655 hlist_add_head(&cl->hnode, &nhash[h]);
656 }
657 }
658 clhash->hash = nhash;
659 clhash->hashsize = nsize;
660 clhash->hashmask = nmask;
661 sch_tree_unlock(sch);
662
663 kvfree(ohash);
664 }
665 EXPORT_SYMBOL(qdisc_class_hash_grow);
666
667 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
668 {
669 unsigned int size = 4;
670
671 clhash->hash = qdisc_class_hash_alloc(size);
672 if (clhash->hash == NULL)
673 return -ENOMEM;
674 clhash->hashsize = size;
675 clhash->hashmask = size - 1;
676 clhash->hashelems = 0;
677 return 0;
678 }
679 EXPORT_SYMBOL(qdisc_class_hash_init);
680
681 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
682 {
683 kvfree(clhash->hash);
684 }
685 EXPORT_SYMBOL(qdisc_class_hash_destroy);
686
687 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
688 struct Qdisc_class_common *cl)
689 {
690 unsigned int h;
691
692 INIT_HLIST_NODE(&cl->hnode);
693 h = qdisc_class_hash(cl->classid, clhash->hashmask);
694 hlist_add_head(&cl->hnode, &clhash->hash[h]);
695 clhash->hashelems++;
696 }
697 EXPORT_SYMBOL(qdisc_class_hash_insert);
698
699 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
700 struct Qdisc_class_common *cl)
701 {
702 hlist_del(&cl->hnode);
703 clhash->hashelems--;
704 }
705 EXPORT_SYMBOL(qdisc_class_hash_remove);
706
707 /* Allocate an unique handle from space managed by kernel
708 * Possible range is [8000-FFFF]:0000 (0x8000 values)
709 */
710 static u32 qdisc_alloc_handle(struct net_device *dev)
711 {
712 int i = 0x8000;
713 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
714
715 do {
716 autohandle += TC_H_MAKE(0x10000U, 0);
717 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
718 autohandle = TC_H_MAKE(0x80000000U, 0);
719 if (!qdisc_lookup(dev, autohandle))
720 return autohandle;
721 cond_resched();
722 } while (--i > 0);
723
724 return 0;
725 }
726
727 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
728 unsigned int len)
729 {
730 const struct Qdisc_class_ops *cops;
731 unsigned long cl;
732 u32 parentid;
733 bool notify;
734 int drops;
735
736 if (n == 0 && len == 0)
737 return;
738 drops = max_t(int, n, 0);
739 rcu_read_lock();
740 while ((parentid = sch->parent)) {
741 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
742 break;
743
744 if (sch->flags & TCQ_F_NOPARENT)
745 break;
746 /* Notify parent qdisc only if child qdisc becomes empty.
747 *
748 * If child was empty even before update then backlog
749 * counter is screwed and we skip notification because
750 * parent class is already passive.
751 */
752 notify = !sch->q.qlen && !WARN_ON_ONCE(!n);
753 /* TODO: perform the search on a per txq basis */
754 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
755 if (sch == NULL) {
756 WARN_ON_ONCE(parentid != TC_H_ROOT);
757 break;
758 }
759 cops = sch->ops->cl_ops;
760 if (notify && cops->qlen_notify) {
761 cl = cops->find(sch, parentid);
762 cops->qlen_notify(sch, cl);
763 }
764 sch->q.qlen -= n;
765 sch->qstats.backlog -= len;
766 __qdisc_qstats_drop(sch, drops);
767 }
768 rcu_read_unlock();
769 }
770 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
771
772 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
773 u32 portid, u32 seq, u16 flags, int event)
774 {
775 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
776 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
777 struct tcmsg *tcm;
778 struct nlmsghdr *nlh;
779 unsigned char *b = skb_tail_pointer(skb);
780 struct gnet_dump d;
781 struct qdisc_size_table *stab;
782 __u32 qlen;
783
784 cond_resched();
785 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
786 if (!nlh)
787 goto out_nlmsg_trim;
788 tcm = nlmsg_data(nlh);
789 tcm->tcm_family = AF_UNSPEC;
790 tcm->tcm__pad1 = 0;
791 tcm->tcm__pad2 = 0;
792 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
793 tcm->tcm_parent = clid;
794 tcm->tcm_handle = q->handle;
795 tcm->tcm_info = refcount_read(&q->refcnt);
796 if (nla_put_string(skb, TCA_KIND, q->ops->id))
797 goto nla_put_failure;
798 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
799 goto nla_put_failure;
800 if (q->ops->dump && q->ops->dump(q, skb) < 0)
801 goto nla_put_failure;
802
803 qlen = qdisc_qlen_sum(q);
804
805 stab = rtnl_dereference(q->stab);
806 if (stab && qdisc_dump_stab(skb, stab) < 0)
807 goto nla_put_failure;
808
809 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
810 NULL, &d, TCA_PAD) < 0)
811 goto nla_put_failure;
812
813 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
814 goto nla_put_failure;
815
816 if (qdisc_is_percpu_stats(q)) {
817 cpu_bstats = q->cpu_bstats;
818 cpu_qstats = q->cpu_qstats;
819 }
820
821 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
822 &d, cpu_bstats, &q->bstats) < 0 ||
823 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
824 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
825 goto nla_put_failure;
826
827 if (gnet_stats_finish_copy(&d) < 0)
828 goto nla_put_failure;
829
830 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
831 return skb->len;
832
833 out_nlmsg_trim:
834 nla_put_failure:
835 nlmsg_trim(skb, b);
836 return -1;
837 }
838
839 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
840 {
841 if (q->flags & TCQ_F_BUILTIN)
842 return true;
843 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
844 return true;
845
846 return false;
847 }
848
849 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
850 struct nlmsghdr *n, u32 clid,
851 struct Qdisc *old, struct Qdisc *new)
852 {
853 struct sk_buff *skb;
854 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
855
856 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
857 if (!skb)
858 return -ENOBUFS;
859
860 if (old && !tc_qdisc_dump_ignore(old, false)) {
861 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
862 0, RTM_DELQDISC) < 0)
863 goto err_out;
864 }
865 if (new && !tc_qdisc_dump_ignore(new, false)) {
866 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
867 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
868 goto err_out;
869 }
870
871 if (skb->len)
872 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
873 n->nlmsg_flags & NLM_F_ECHO);
874
875 err_out:
876 kfree_skb(skb);
877 return -EINVAL;
878 }
879
880 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
881 struct nlmsghdr *n, u32 clid,
882 struct Qdisc *old, struct Qdisc *new)
883 {
884 if (new || old)
885 qdisc_notify(net, skb, n, clid, old, new);
886
887 if (old)
888 qdisc_destroy(old);
889 }
890
891 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
892 * to device "dev".
893 *
894 * When appropriate send a netlink notification using 'skb'
895 * and "n".
896 *
897 * On success, destroy old qdisc.
898 */
899
900 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
901 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
902 struct Qdisc *new, struct Qdisc *old)
903 {
904 struct Qdisc *q = old;
905 struct net *net = dev_net(dev);
906 int err = 0;
907
908 if (parent == NULL) {
909 unsigned int i, num_q, ingress;
910
911 ingress = 0;
912 num_q = dev->num_tx_queues;
913 if ((q && q->flags & TCQ_F_INGRESS) ||
914 (new && new->flags & TCQ_F_INGRESS)) {
915 num_q = 1;
916 ingress = 1;
917 if (!dev_ingress_queue(dev))
918 return -ENOENT;
919 }
920
921 if (dev->flags & IFF_UP)
922 dev_deactivate(dev);
923
924 if (new && new->ops->attach)
925 goto skip;
926
927 for (i = 0; i < num_q; i++) {
928 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
929
930 if (!ingress)
931 dev_queue = netdev_get_tx_queue(dev, i);
932
933 old = dev_graft_qdisc(dev_queue, new);
934 if (new && i > 0)
935 qdisc_refcount_inc(new);
936
937 if (!ingress)
938 qdisc_destroy(old);
939 }
940
941 skip:
942 if (!ingress) {
943 notify_and_destroy(net, skb, n, classid,
944 dev->qdisc, new);
945 if (new && !new->ops->attach)
946 qdisc_refcount_inc(new);
947 dev->qdisc = new ? : &noop_qdisc;
948
949 if (new && new->ops->attach)
950 new->ops->attach(new);
951 } else {
952 notify_and_destroy(net, skb, n, classid, old, new);
953 }
954
955 if (dev->flags & IFF_UP)
956 dev_activate(dev);
957 } else {
958 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
959
960 /* Only support running class lockless if parent is lockless */
961 if (new && (new->flags & TCQ_F_NOLOCK) &&
962 parent && !(parent->flags & TCQ_F_NOLOCK))
963 new->flags &= ~TCQ_F_NOLOCK;
964
965 err = -EOPNOTSUPP;
966 if (cops && cops->graft) {
967 unsigned long cl = cops->find(parent, classid);
968
969 if (cl)
970 err = cops->graft(parent, cl, new, &old);
971 else
972 err = -ENOENT;
973 }
974 if (!err)
975 notify_and_destroy(net, skb, n, classid, old, new);
976 }
977 return err;
978 }
979
980 /* lockdep annotation is needed for ingress; egress gets it only for name */
981 static struct lock_class_key qdisc_tx_lock;
982 static struct lock_class_key qdisc_rx_lock;
983
984 /*
985 Allocate and initialize new qdisc.
986
987 Parameters are passed via opt.
988 */
989
990 static struct Qdisc *qdisc_create(struct net_device *dev,
991 struct netdev_queue *dev_queue,
992 struct Qdisc *p, u32 parent, u32 handle,
993 struct nlattr **tca, int *errp)
994 {
995 int err;
996 struct nlattr *kind = tca[TCA_KIND];
997 struct Qdisc *sch;
998 struct Qdisc_ops *ops;
999 struct qdisc_size_table *stab;
1000
1001 ops = qdisc_lookup_ops(kind);
1002 #ifdef CONFIG_MODULES
1003 if (ops == NULL && kind != NULL) {
1004 char name[IFNAMSIZ];
1005 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1006 /* We dropped the RTNL semaphore in order to
1007 * perform the module load. So, even if we
1008 * succeeded in loading the module we have to
1009 * tell the caller to replay the request. We
1010 * indicate this using -EAGAIN.
1011 * We replay the request because the device may
1012 * go away in the mean time.
1013 */
1014 rtnl_unlock();
1015 request_module("sch_%s", name);
1016 rtnl_lock();
1017 ops = qdisc_lookup_ops(kind);
1018 if (ops != NULL) {
1019 /* We will try again qdisc_lookup_ops,
1020 * so don't keep a reference.
1021 */
1022 module_put(ops->owner);
1023 err = -EAGAIN;
1024 goto err_out;
1025 }
1026 }
1027 }
1028 #endif
1029
1030 err = -ENOENT;
1031 if (!ops)
1032 goto err_out;
1033
1034 sch = qdisc_alloc(dev_queue, ops);
1035 if (IS_ERR(sch)) {
1036 err = PTR_ERR(sch);
1037 goto err_out2;
1038 }
1039
1040 sch->parent = parent;
1041
1042 if (handle == TC_H_INGRESS) {
1043 sch->flags |= TCQ_F_INGRESS;
1044 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1045 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
1046 } else {
1047 if (handle == 0) {
1048 handle = qdisc_alloc_handle(dev);
1049 err = -ENOMEM;
1050 if (handle == 0)
1051 goto err_out3;
1052 }
1053 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
1054 if (!netif_is_multiqueue(dev))
1055 sch->flags |= TCQ_F_ONETXQUEUE;
1056 }
1057
1058 sch->handle = handle;
1059
1060 /* This exist to keep backward compatible with a userspace
1061 * loophole, what allowed userspace to get IFF_NO_QUEUE
1062 * facility on older kernels by setting tx_queue_len=0 (prior
1063 * to qdisc init), and then forgot to reinit tx_queue_len
1064 * before again attaching a qdisc.
1065 */
1066 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1067 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1068 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1069 }
1070
1071 if (ops->init) {
1072 err = ops->init(sch, tca[TCA_OPTIONS]);
1073 if (err != 0)
1074 goto err_out5;
1075 }
1076
1077 if (qdisc_is_percpu_stats(sch)) {
1078 sch->cpu_bstats =
1079 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
1080 if (!sch->cpu_bstats)
1081 goto err_out4;
1082
1083 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
1084 if (!sch->cpu_qstats)
1085 goto err_out4;
1086 }
1087
1088 if (tca[TCA_STAB]) {
1089 stab = qdisc_get_stab(tca[TCA_STAB]);
1090 if (IS_ERR(stab)) {
1091 err = PTR_ERR(stab);
1092 goto err_out4;
1093 }
1094 rcu_assign_pointer(sch->stab, stab);
1095 }
1096 if (tca[TCA_RATE]) {
1097 seqcount_t *running;
1098
1099 err = -EOPNOTSUPP;
1100 if (sch->flags & TCQ_F_MQROOT)
1101 goto err_out4;
1102
1103 if (sch->parent != TC_H_ROOT &&
1104 !(sch->flags & TCQ_F_INGRESS) &&
1105 (!p || !(p->flags & TCQ_F_MQROOT)))
1106 running = qdisc_root_sleeping_running(sch);
1107 else
1108 running = &sch->running;
1109
1110 err = gen_new_estimator(&sch->bstats,
1111 sch->cpu_bstats,
1112 &sch->rate_est,
1113 NULL,
1114 running,
1115 tca[TCA_RATE]);
1116 if (err)
1117 goto err_out4;
1118 }
1119
1120 qdisc_hash_add(sch, false);
1121
1122 return sch;
1123
1124 err_out5:
1125 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1126 if (ops->destroy)
1127 ops->destroy(sch);
1128 err_out3:
1129 dev_put(dev);
1130 kfree((char *) sch - sch->padded);
1131 err_out2:
1132 module_put(ops->owner);
1133 err_out:
1134 *errp = err;
1135 return NULL;
1136
1137 err_out4:
1138 free_percpu(sch->cpu_bstats);
1139 free_percpu(sch->cpu_qstats);
1140 /*
1141 * Any broken qdiscs that would require a ops->reset() here?
1142 * The qdisc was never in action so it shouldn't be necessary.
1143 */
1144 qdisc_put_stab(rtnl_dereference(sch->stab));
1145 if (ops->destroy)
1146 ops->destroy(sch);
1147 goto err_out3;
1148 }
1149
1150 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1151 {
1152 struct qdisc_size_table *ostab, *stab = NULL;
1153 int err = 0;
1154
1155 if (tca[TCA_OPTIONS]) {
1156 if (!sch->ops->change)
1157 return -EINVAL;
1158 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1159 if (err)
1160 return err;
1161 }
1162
1163 if (tca[TCA_STAB]) {
1164 stab = qdisc_get_stab(tca[TCA_STAB]);
1165 if (IS_ERR(stab))
1166 return PTR_ERR(stab);
1167 }
1168
1169 ostab = rtnl_dereference(sch->stab);
1170 rcu_assign_pointer(sch->stab, stab);
1171 qdisc_put_stab(ostab);
1172
1173 if (tca[TCA_RATE]) {
1174 /* NB: ignores errors from replace_estimator
1175 because change can't be undone. */
1176 if (sch->flags & TCQ_F_MQROOT)
1177 goto out;
1178 gen_replace_estimator(&sch->bstats,
1179 sch->cpu_bstats,
1180 &sch->rate_est,
1181 NULL,
1182 qdisc_root_sleeping_running(sch),
1183 tca[TCA_RATE]);
1184 }
1185 out:
1186 return 0;
1187 }
1188
1189 struct check_loop_arg {
1190 struct qdisc_walker w;
1191 struct Qdisc *p;
1192 int depth;
1193 };
1194
1195 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1196 struct qdisc_walker *w);
1197
1198 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1199 {
1200 struct check_loop_arg arg;
1201
1202 if (q->ops->cl_ops == NULL)
1203 return 0;
1204
1205 arg.w.stop = arg.w.skip = arg.w.count = 0;
1206 arg.w.fn = check_loop_fn;
1207 arg.depth = depth;
1208 arg.p = p;
1209 q->ops->cl_ops->walk(q, &arg.w);
1210 return arg.w.stop ? -ELOOP : 0;
1211 }
1212
1213 static int
1214 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1215 {
1216 struct Qdisc *leaf;
1217 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1218 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1219
1220 leaf = cops->leaf(q, cl);
1221 if (leaf) {
1222 if (leaf == arg->p || arg->depth > 7)
1223 return -ELOOP;
1224 return check_loop(leaf, arg->p, arg->depth + 1);
1225 }
1226 return 0;
1227 }
1228
1229 /*
1230 * Delete/get qdisc.
1231 */
1232
1233 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1234 struct netlink_ext_ack *extack)
1235 {
1236 struct net *net = sock_net(skb->sk);
1237 struct tcmsg *tcm = nlmsg_data(n);
1238 struct nlattr *tca[TCA_MAX + 1];
1239 struct net_device *dev;
1240 u32 clid;
1241 struct Qdisc *q = NULL;
1242 struct Qdisc *p = NULL;
1243 int err;
1244
1245 if ((n->nlmsg_type != RTM_GETQDISC) &&
1246 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1247 return -EPERM;
1248
1249 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1250 if (err < 0)
1251 return err;
1252
1253 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1254 if (!dev)
1255 return -ENODEV;
1256
1257 clid = tcm->tcm_parent;
1258 if (clid) {
1259 if (clid != TC_H_ROOT) {
1260 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1261 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1262 if (!p)
1263 return -ENOENT;
1264 q = qdisc_leaf(p, clid);
1265 } else if (dev_ingress_queue(dev)) {
1266 q = dev_ingress_queue(dev)->qdisc_sleeping;
1267 }
1268 } else {
1269 q = dev->qdisc;
1270 }
1271 if (!q)
1272 return -ENOENT;
1273
1274 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1275 return -EINVAL;
1276 } else {
1277 q = qdisc_lookup(dev, tcm->tcm_handle);
1278 if (!q)
1279 return -ENOENT;
1280 }
1281
1282 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1283 return -EINVAL;
1284
1285 if (n->nlmsg_type == RTM_DELQDISC) {
1286 if (!clid)
1287 return -EINVAL;
1288 if (q->handle == 0)
1289 return -ENOENT;
1290 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1291 if (err != 0)
1292 return err;
1293 } else {
1294 qdisc_notify(net, skb, n, clid, NULL, q);
1295 }
1296 return 0;
1297 }
1298
1299 /*
1300 * Create/change qdisc.
1301 */
1302
1303 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1304 struct netlink_ext_ack *extack)
1305 {
1306 struct net *net = sock_net(skb->sk);
1307 struct tcmsg *tcm;
1308 struct nlattr *tca[TCA_MAX + 1];
1309 struct net_device *dev;
1310 u32 clid;
1311 struct Qdisc *q, *p;
1312 int err;
1313
1314 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1315 return -EPERM;
1316
1317 replay:
1318 /* Reinit, just in case something touches this. */
1319 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1320 if (err < 0)
1321 return err;
1322
1323 tcm = nlmsg_data(n);
1324 clid = tcm->tcm_parent;
1325 q = p = NULL;
1326
1327 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1328 if (!dev)
1329 return -ENODEV;
1330
1331
1332 if (clid) {
1333 if (clid != TC_H_ROOT) {
1334 if (clid != TC_H_INGRESS) {
1335 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1336 if (!p)
1337 return -ENOENT;
1338 q = qdisc_leaf(p, clid);
1339 } else if (dev_ingress_queue_create(dev)) {
1340 q = dev_ingress_queue(dev)->qdisc_sleeping;
1341 }
1342 } else {
1343 q = dev->qdisc;
1344 }
1345
1346 /* It may be default qdisc, ignore it */
1347 if (q && q->handle == 0)
1348 q = NULL;
1349
1350 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1351 if (tcm->tcm_handle) {
1352 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1353 return -EEXIST;
1354 if (TC_H_MIN(tcm->tcm_handle))
1355 return -EINVAL;
1356 q = qdisc_lookup(dev, tcm->tcm_handle);
1357 if (!q)
1358 goto create_n_graft;
1359 if (n->nlmsg_flags & NLM_F_EXCL)
1360 return -EEXIST;
1361 if (tca[TCA_KIND] &&
1362 nla_strcmp(tca[TCA_KIND], q->ops->id))
1363 return -EINVAL;
1364 if (q == p ||
1365 (p && check_loop(q, p, 0)))
1366 return -ELOOP;
1367 qdisc_refcount_inc(q);
1368 goto graft;
1369 } else {
1370 if (!q)
1371 goto create_n_graft;
1372
1373 /* This magic test requires explanation.
1374 *
1375 * We know, that some child q is already
1376 * attached to this parent and have choice:
1377 * either to change it or to create/graft new one.
1378 *
1379 * 1. We are allowed to create/graft only
1380 * if CREATE and REPLACE flags are set.
1381 *
1382 * 2. If EXCL is set, requestor wanted to say,
1383 * that qdisc tcm_handle is not expected
1384 * to exist, so that we choose create/graft too.
1385 *
1386 * 3. The last case is when no flags are set.
1387 * Alas, it is sort of hole in API, we
1388 * cannot decide what to do unambiguously.
1389 * For now we select create/graft, if
1390 * user gave KIND, which does not match existing.
1391 */
1392 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1393 (n->nlmsg_flags & NLM_F_REPLACE) &&
1394 ((n->nlmsg_flags & NLM_F_EXCL) ||
1395 (tca[TCA_KIND] &&
1396 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1397 goto create_n_graft;
1398 }
1399 }
1400 } else {
1401 if (!tcm->tcm_handle)
1402 return -EINVAL;
1403 q = qdisc_lookup(dev, tcm->tcm_handle);
1404 }
1405
1406 /* Change qdisc parameters */
1407 if (!q)
1408 return -ENOENT;
1409 if (n->nlmsg_flags & NLM_F_EXCL)
1410 return -EEXIST;
1411 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1412 return -EINVAL;
1413 err = qdisc_change(q, tca);
1414 if (err == 0)
1415 qdisc_notify(net, skb, n, clid, NULL, q);
1416 return err;
1417
1418 create_n_graft:
1419 if (!(n->nlmsg_flags & NLM_F_CREATE))
1420 return -ENOENT;
1421 if (clid == TC_H_INGRESS) {
1422 if (dev_ingress_queue(dev))
1423 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1424 tcm->tcm_parent, tcm->tcm_parent,
1425 tca, &err);
1426 else
1427 err = -ENOENT;
1428 } else {
1429 struct netdev_queue *dev_queue;
1430
1431 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1432 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1433 else if (p)
1434 dev_queue = p->dev_queue;
1435 else
1436 dev_queue = netdev_get_tx_queue(dev, 0);
1437
1438 q = qdisc_create(dev, dev_queue, p,
1439 tcm->tcm_parent, tcm->tcm_handle,
1440 tca, &err);
1441 }
1442 if (q == NULL) {
1443 if (err == -EAGAIN)
1444 goto replay;
1445 return err;
1446 }
1447
1448 graft:
1449 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1450 if (err) {
1451 if (q)
1452 qdisc_destroy(q);
1453 return err;
1454 }
1455
1456 return 0;
1457 }
1458
1459 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1460 struct netlink_callback *cb,
1461 int *q_idx_p, int s_q_idx, bool recur,
1462 bool dump_invisible)
1463 {
1464 int ret = 0, q_idx = *q_idx_p;
1465 struct Qdisc *q;
1466 int b;
1467
1468 if (!root)
1469 return 0;
1470
1471 q = root;
1472 if (q_idx < s_q_idx) {
1473 q_idx++;
1474 } else {
1475 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1476 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1477 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1478 RTM_NEWQDISC) <= 0)
1479 goto done;
1480 q_idx++;
1481 }
1482
1483 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1484 * itself has already been dumped.
1485 *
1486 * If we've already dumped the top-level (ingress) qdisc above and the global
1487 * qdisc hashtable, we don't want to hit it again
1488 */
1489 if (!qdisc_dev(root) || !recur)
1490 goto out;
1491
1492 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1493 if (q_idx < s_q_idx) {
1494 q_idx++;
1495 continue;
1496 }
1497 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1498 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1499 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1500 RTM_NEWQDISC) <= 0)
1501 goto done;
1502 q_idx++;
1503 }
1504
1505 out:
1506 *q_idx_p = q_idx;
1507 return ret;
1508 done:
1509 ret = -1;
1510 goto out;
1511 }
1512
1513 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1514 {
1515 struct net *net = sock_net(skb->sk);
1516 int idx, q_idx;
1517 int s_idx, s_q_idx;
1518 struct net_device *dev;
1519 const struct nlmsghdr *nlh = cb->nlh;
1520 struct nlattr *tca[TCA_MAX + 1];
1521 int err;
1522
1523 s_idx = cb->args[0];
1524 s_q_idx = q_idx = cb->args[1];
1525
1526 idx = 0;
1527 ASSERT_RTNL();
1528
1529 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
1530 if (err < 0)
1531 return err;
1532
1533 for_each_netdev(net, dev) {
1534 struct netdev_queue *dev_queue;
1535
1536 if (idx < s_idx)
1537 goto cont;
1538 if (idx > s_idx)
1539 s_q_idx = 0;
1540 q_idx = 0;
1541
1542 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1543 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1544 goto done;
1545
1546 dev_queue = dev_ingress_queue(dev);
1547 if (dev_queue &&
1548 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1549 &q_idx, s_q_idx, false,
1550 tca[TCA_DUMP_INVISIBLE]) < 0)
1551 goto done;
1552
1553 cont:
1554 idx++;
1555 }
1556
1557 done:
1558 cb->args[0] = idx;
1559 cb->args[1] = q_idx;
1560
1561 return skb->len;
1562 }
1563
1564
1565
1566 /************************************************
1567 * Traffic classes manipulation. *
1568 ************************************************/
1569
1570 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1571 unsigned long cl,
1572 u32 portid, u32 seq, u16 flags, int event)
1573 {
1574 struct tcmsg *tcm;
1575 struct nlmsghdr *nlh;
1576 unsigned char *b = skb_tail_pointer(skb);
1577 struct gnet_dump d;
1578 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1579
1580 cond_resched();
1581 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1582 if (!nlh)
1583 goto out_nlmsg_trim;
1584 tcm = nlmsg_data(nlh);
1585 tcm->tcm_family = AF_UNSPEC;
1586 tcm->tcm__pad1 = 0;
1587 tcm->tcm__pad2 = 0;
1588 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1589 tcm->tcm_parent = q->handle;
1590 tcm->tcm_handle = q->handle;
1591 tcm->tcm_info = 0;
1592 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1593 goto nla_put_failure;
1594 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1595 goto nla_put_failure;
1596
1597 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1598 NULL, &d, TCA_PAD) < 0)
1599 goto nla_put_failure;
1600
1601 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1602 goto nla_put_failure;
1603
1604 if (gnet_stats_finish_copy(&d) < 0)
1605 goto nla_put_failure;
1606
1607 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1608 return skb->len;
1609
1610 out_nlmsg_trim:
1611 nla_put_failure:
1612 nlmsg_trim(skb, b);
1613 return -1;
1614 }
1615
1616 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1617 struct nlmsghdr *n, struct Qdisc *q,
1618 unsigned long cl, int event)
1619 {
1620 struct sk_buff *skb;
1621 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1622
1623 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1624 if (!skb)
1625 return -ENOBUFS;
1626
1627 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1628 kfree_skb(skb);
1629 return -EINVAL;
1630 }
1631
1632 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1633 n->nlmsg_flags & NLM_F_ECHO);
1634 }
1635
1636 static int tclass_del_notify(struct net *net,
1637 const struct Qdisc_class_ops *cops,
1638 struct sk_buff *oskb, struct nlmsghdr *n,
1639 struct Qdisc *q, unsigned long cl)
1640 {
1641 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1642 struct sk_buff *skb;
1643 int err = 0;
1644
1645 if (!cops->delete)
1646 return -EOPNOTSUPP;
1647
1648 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1649 if (!skb)
1650 return -ENOBUFS;
1651
1652 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1653 RTM_DELTCLASS) < 0) {
1654 kfree_skb(skb);
1655 return -EINVAL;
1656 }
1657
1658 err = cops->delete(q, cl);
1659 if (err) {
1660 kfree_skb(skb);
1661 return err;
1662 }
1663
1664 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1665 n->nlmsg_flags & NLM_F_ECHO);
1666 }
1667
1668 #ifdef CONFIG_NET_CLS
1669
1670 struct tcf_bind_args {
1671 struct tcf_walker w;
1672 u32 classid;
1673 unsigned long cl;
1674 };
1675
1676 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1677 {
1678 struct tcf_bind_args *a = (void *)arg;
1679
1680 if (tp->ops->bind_class) {
1681 struct Qdisc *q = tcf_block_q(tp->chain->block);
1682
1683 sch_tree_lock(q);
1684 tp->ops->bind_class(n, a->classid, a->cl);
1685 sch_tree_unlock(q);
1686 }
1687 return 0;
1688 }
1689
1690 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1691 unsigned long new_cl)
1692 {
1693 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1694 struct tcf_block *block;
1695 struct tcf_chain *chain;
1696 unsigned long cl;
1697
1698 cl = cops->find(q, portid);
1699 if (!cl)
1700 return;
1701 block = cops->tcf_block(q, cl);
1702 if (!block)
1703 return;
1704 list_for_each_entry(chain, &block->chain_list, list) {
1705 struct tcf_proto *tp;
1706
1707 for (tp = rtnl_dereference(chain->filter_chain);
1708 tp; tp = rtnl_dereference(tp->next)) {
1709 struct tcf_bind_args arg = {};
1710
1711 arg.w.fn = tcf_node_bind;
1712 arg.classid = clid;
1713 arg.cl = new_cl;
1714 tp->ops->walk(tp, &arg.w);
1715 }
1716 }
1717 }
1718
1719 #else
1720
1721 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1722 unsigned long new_cl)
1723 {
1724 }
1725
1726 #endif
1727
1728 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1729 struct netlink_ext_ack *extack)
1730 {
1731 struct net *net = sock_net(skb->sk);
1732 struct tcmsg *tcm = nlmsg_data(n);
1733 struct nlattr *tca[TCA_MAX + 1];
1734 struct net_device *dev;
1735 struct Qdisc *q = NULL;
1736 const struct Qdisc_class_ops *cops;
1737 unsigned long cl = 0;
1738 unsigned long new_cl;
1739 u32 portid;
1740 u32 clid;
1741 u32 qid;
1742 int err;
1743
1744 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1745 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1746 return -EPERM;
1747
1748 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1749 if (err < 0)
1750 return err;
1751
1752 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1753 if (!dev)
1754 return -ENODEV;
1755
1756 /*
1757 parent == TC_H_UNSPEC - unspecified parent.
1758 parent == TC_H_ROOT - class is root, which has no parent.
1759 parent == X:0 - parent is root class.
1760 parent == X:Y - parent is a node in hierarchy.
1761 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1762
1763 handle == 0:0 - generate handle from kernel pool.
1764 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1765 handle == X:Y - clear.
1766 handle == X:0 - root class.
1767 */
1768
1769 /* Step 1. Determine qdisc handle X:0 */
1770
1771 portid = tcm->tcm_parent;
1772 clid = tcm->tcm_handle;
1773 qid = TC_H_MAJ(clid);
1774
1775 if (portid != TC_H_ROOT) {
1776 u32 qid1 = TC_H_MAJ(portid);
1777
1778 if (qid && qid1) {
1779 /* If both majors are known, they must be identical. */
1780 if (qid != qid1)
1781 return -EINVAL;
1782 } else if (qid1) {
1783 qid = qid1;
1784 } else if (qid == 0)
1785 qid = dev->qdisc->handle;
1786
1787 /* Now qid is genuine qdisc handle consistent
1788 * both with parent and child.
1789 *
1790 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1791 */
1792 if (portid)
1793 portid = TC_H_MAKE(qid, portid);
1794 } else {
1795 if (qid == 0)
1796 qid = dev->qdisc->handle;
1797 }
1798
1799 /* OK. Locate qdisc */
1800 q = qdisc_lookup(dev, qid);
1801 if (!q)
1802 return -ENOENT;
1803
1804 /* An check that it supports classes */
1805 cops = q->ops->cl_ops;
1806 if (cops == NULL)
1807 return -EINVAL;
1808
1809 /* Now try to get class */
1810 if (clid == 0) {
1811 if (portid == TC_H_ROOT)
1812 clid = qid;
1813 } else
1814 clid = TC_H_MAKE(qid, clid);
1815
1816 if (clid)
1817 cl = cops->find(q, clid);
1818
1819 if (cl == 0) {
1820 err = -ENOENT;
1821 if (n->nlmsg_type != RTM_NEWTCLASS ||
1822 !(n->nlmsg_flags & NLM_F_CREATE))
1823 goto out;
1824 } else {
1825 switch (n->nlmsg_type) {
1826 case RTM_NEWTCLASS:
1827 err = -EEXIST;
1828 if (n->nlmsg_flags & NLM_F_EXCL)
1829 goto out;
1830 break;
1831 case RTM_DELTCLASS:
1832 err = tclass_del_notify(net, cops, skb, n, q, cl);
1833 /* Unbind the class with flilters with 0 */
1834 tc_bind_tclass(q, portid, clid, 0);
1835 goto out;
1836 case RTM_GETTCLASS:
1837 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1838 goto out;
1839 default:
1840 err = -EINVAL;
1841 goto out;
1842 }
1843 }
1844
1845 new_cl = cl;
1846 err = -EOPNOTSUPP;
1847 if (cops->change)
1848 err = cops->change(q, clid, portid, tca, &new_cl);
1849 if (err == 0) {
1850 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1851 /* We just create a new class, need to do reverse binding. */
1852 if (cl != new_cl)
1853 tc_bind_tclass(q, portid, clid, new_cl);
1854 }
1855 out:
1856 return err;
1857 }
1858
1859 struct qdisc_dump_args {
1860 struct qdisc_walker w;
1861 struct sk_buff *skb;
1862 struct netlink_callback *cb;
1863 };
1864
1865 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1866 struct qdisc_walker *arg)
1867 {
1868 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1869
1870 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1871 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1872 RTM_NEWTCLASS);
1873 }
1874
1875 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1876 struct tcmsg *tcm, struct netlink_callback *cb,
1877 int *t_p, int s_t)
1878 {
1879 struct qdisc_dump_args arg;
1880
1881 if (tc_qdisc_dump_ignore(q, false) ||
1882 *t_p < s_t || !q->ops->cl_ops ||
1883 (tcm->tcm_parent &&
1884 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1885 (*t_p)++;
1886 return 0;
1887 }
1888 if (*t_p > s_t)
1889 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1890 arg.w.fn = qdisc_class_dump;
1891 arg.skb = skb;
1892 arg.cb = cb;
1893 arg.w.stop = 0;
1894 arg.w.skip = cb->args[1];
1895 arg.w.count = 0;
1896 q->ops->cl_ops->walk(q, &arg.w);
1897 cb->args[1] = arg.w.count;
1898 if (arg.w.stop)
1899 return -1;
1900 (*t_p)++;
1901 return 0;
1902 }
1903
1904 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1905 struct tcmsg *tcm, struct netlink_callback *cb,
1906 int *t_p, int s_t)
1907 {
1908 struct Qdisc *q;
1909 int b;
1910
1911 if (!root)
1912 return 0;
1913
1914 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1915 return -1;
1916
1917 if (!qdisc_dev(root))
1918 return 0;
1919
1920 if (tcm->tcm_parent) {
1921 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1922 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1923 return -1;
1924 return 0;
1925 }
1926 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1927 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1928 return -1;
1929 }
1930
1931 return 0;
1932 }
1933
1934 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1935 {
1936 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1937 struct net *net = sock_net(skb->sk);
1938 struct netdev_queue *dev_queue;
1939 struct net_device *dev;
1940 int t, s_t;
1941
1942 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1943 return 0;
1944 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1945 if (!dev)
1946 return 0;
1947
1948 s_t = cb->args[0];
1949 t = 0;
1950
1951 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1952 goto done;
1953
1954 dev_queue = dev_ingress_queue(dev);
1955 if (dev_queue &&
1956 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1957 &t, s_t) < 0)
1958 goto done;
1959
1960 done:
1961 cb->args[0] = t;
1962
1963 dev_put(dev);
1964 return skb->len;
1965 }
1966
1967 #ifdef CONFIG_PROC_FS
1968 static int psched_show(struct seq_file *seq, void *v)
1969 {
1970 seq_printf(seq, "%08x %08x %08x %08x\n",
1971 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1972 1000000,
1973 (u32)NSEC_PER_SEC / hrtimer_resolution);
1974
1975 return 0;
1976 }
1977
1978 static int psched_open(struct inode *inode, struct file *file)
1979 {
1980 return single_open(file, psched_show, NULL);
1981 }
1982
1983 static const struct file_operations psched_fops = {
1984 .owner = THIS_MODULE,
1985 .open = psched_open,
1986 .read = seq_read,
1987 .llseek = seq_lseek,
1988 .release = single_release,
1989 };
1990
1991 static int __net_init psched_net_init(struct net *net)
1992 {
1993 struct proc_dir_entry *e;
1994
1995 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1996 if (e == NULL)
1997 return -ENOMEM;
1998
1999 return 0;
2000 }
2001
2002 static void __net_exit psched_net_exit(struct net *net)
2003 {
2004 remove_proc_entry("psched", net->proc_net);
2005 }
2006 #else
2007 static int __net_init psched_net_init(struct net *net)
2008 {
2009 return 0;
2010 }
2011
2012 static void __net_exit psched_net_exit(struct net *net)
2013 {
2014 }
2015 #endif
2016
2017 static struct pernet_operations psched_net_ops = {
2018 .init = psched_net_init,
2019 .exit = psched_net_exit,
2020 };
2021
2022 static int __init pktsched_init(void)
2023 {
2024 int err;
2025
2026 err = register_pernet_subsys(&psched_net_ops);
2027 if (err) {
2028 pr_err("pktsched_init: "
2029 "cannot initialize per netns operations\n");
2030 return err;
2031 }
2032
2033 register_qdisc(&pfifo_fast_ops);
2034 register_qdisc(&pfifo_qdisc_ops);
2035 register_qdisc(&bfifo_qdisc_ops);
2036 register_qdisc(&pfifo_head_drop_qdisc_ops);
2037 register_qdisc(&mq_qdisc_ops);
2038 register_qdisc(&noqueue_qdisc_ops);
2039
2040 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2041 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2042 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2043 0);
2044 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2045 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2046 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2047 0);
2048
2049 return 0;
2050 }
2051
2052 subsys_initcall(pktsched_init);