2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
34 #include <net/net_namespace.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
38 #include <net/pkt_cls.h>
45 This file consists of two interrelated parts:
47 1. queueing disciplines manager frontend.
48 2. traffic classes manager frontend.
50 Generally, queueing discipline ("qdisc") is a black box,
51 which is able to enqueue packets and to dequeue them (when
52 device is ready to send something) in order and at times
53 determined by algorithm hidden in it.
55 qdisc's are divided to two categories:
56 - "queues", which have no internal structure visible from outside.
57 - "schedulers", which split all the packets to "traffic classes",
58 using "packet classifiers" (look at cls_api.c)
60 In turn, classes may have child qdiscs (as rule, queues)
61 attached to them etc. etc. etc.
63 The goal of the routines in this file is to translate
64 information supplied by user in the form of handles
65 to more intelligible for kernel form, to make some sanity
66 checks and part of work, which is common to all qdiscs
67 and to provide rtnetlink notifications.
69 All real intelligent work is done inside qdisc modules.
73 Every discipline has two major routines: enqueue and dequeue.
77 dequeue usually returns a skb to send. It is allowed to return NULL,
78 but it does not mean that queue is empty, it just means that
79 discipline does not want to send anything this time.
80 Queue is really empty if q->q.qlen == 0.
81 For complicated disciplines with multiple queues q->q is not
82 real packet queue, but however q->q.qlen must be valid.
86 enqueue returns 0, if packet was enqueued successfully.
87 If packet (this one or another one) was dropped, it returns
89 NET_XMIT_DROP - this packet dropped
90 Expected action: do not backoff, but wait until queue will clear.
91 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
92 Expected action: backoff or ignore
98 like dequeue but without removing a packet from the queue
102 returns qdisc to initial state: purge all buffers, clear all
103 timers, counters (except for statistics) etc.
107 initializes newly created qdisc.
111 destroys resources allocated by init and during lifetime of qdisc.
115 changes qdisc parameters.
118 /* Protects list of registered TC modules. It is pure SMP lock. */
119 static DEFINE_RWLOCK(qdisc_mod_lock
);
122 /************************************************
123 * Queueing disciplines manipulation. *
124 ************************************************/
127 /* The list of all installed queueing disciplines. */
129 static struct Qdisc_ops
*qdisc_base
;
131 /* Register/unregister queueing discipline */
133 int register_qdisc(struct Qdisc_ops
*qops
)
135 struct Qdisc_ops
*q
, **qp
;
138 write_lock(&qdisc_mod_lock
);
139 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
140 if (!strcmp(qops
->id
, q
->id
))
143 if (qops
->enqueue
== NULL
)
144 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
145 if (qops
->peek
== NULL
) {
146 if (qops
->dequeue
== NULL
)
147 qops
->peek
= noop_qdisc_ops
.peek
;
151 if (qops
->dequeue
== NULL
)
152 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
155 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
157 if (!(cops
->find
&& cops
->walk
&& cops
->leaf
))
160 if (cops
->tcf_block
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
168 write_unlock(&qdisc_mod_lock
);
175 EXPORT_SYMBOL(register_qdisc
);
177 int unregister_qdisc(struct Qdisc_ops
*qops
)
179 struct Qdisc_ops
*q
, **qp
;
182 write_lock(&qdisc_mod_lock
);
183 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
191 write_unlock(&qdisc_mod_lock
);
194 EXPORT_SYMBOL(unregister_qdisc
);
196 /* Get default qdisc if not otherwise specified */
197 void qdisc_get_default(char *name
, size_t len
)
199 read_lock(&qdisc_mod_lock
);
200 strlcpy(name
, default_qdisc_ops
->id
, len
);
201 read_unlock(&qdisc_mod_lock
);
204 static struct Qdisc_ops
*qdisc_lookup_default(const char *name
)
206 struct Qdisc_ops
*q
= NULL
;
208 for (q
= qdisc_base
; q
; q
= q
->next
) {
209 if (!strcmp(name
, q
->id
)) {
210 if (!try_module_get(q
->owner
))
219 /* Set new default qdisc to use */
220 int qdisc_set_default(const char *name
)
222 const struct Qdisc_ops
*ops
;
224 if (!capable(CAP_NET_ADMIN
))
227 write_lock(&qdisc_mod_lock
);
228 ops
= qdisc_lookup_default(name
);
230 /* Not found, drop lock and try to load module */
231 write_unlock(&qdisc_mod_lock
);
232 request_module("sch_%s", name
);
233 write_lock(&qdisc_mod_lock
);
235 ops
= qdisc_lookup_default(name
);
239 /* Set new default */
240 module_put(default_qdisc_ops
->owner
);
241 default_qdisc_ops
= ops
;
243 write_unlock(&qdisc_mod_lock
);
245 return ops
? 0 : -ENOENT
;
248 #ifdef CONFIG_NET_SCH_DEFAULT
249 /* Set default value from kernel config */
250 static int __init
sch_default_qdisc(void)
252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH
);
254 late_initcall(sch_default_qdisc
);
257 /* We know handle. Find qdisc among all qdisc's attached to device
258 * (root qdisc, all its children, children of children etc.)
259 * Note: caller either uses rtnl or rcu_read_lock()
262 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
266 if (!qdisc_dev(root
))
267 return (root
->handle
== handle
? root
: NULL
);
269 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
270 root
->handle
== handle
)
273 hash_for_each_possible_rcu(qdisc_dev(root
)->qdisc_hash
, q
, hash
, handle
) {
274 if (q
->handle
== handle
)
280 void qdisc_hash_add(struct Qdisc
*q
, bool invisible
)
282 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
284 hash_add_rcu(qdisc_dev(q
)->qdisc_hash
, &q
->hash
, q
->handle
);
286 q
->flags
|= TCQ_F_INVISIBLE
;
289 EXPORT_SYMBOL(qdisc_hash_add
);
291 void qdisc_hash_del(struct Qdisc
*q
)
293 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
295 hash_del_rcu(&q
->hash
);
298 EXPORT_SYMBOL(qdisc_hash_del
);
300 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
306 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
310 if (dev_ingress_queue(dev
))
311 q
= qdisc_match_from_root(
312 dev_ingress_queue(dev
)->qdisc_sleeping
,
318 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
322 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
326 cl
= cops
->find(p
, classid
);
330 leaf
= cops
->leaf(p
, cl
);
334 /* Find queueing discipline by name */
336 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
338 struct Qdisc_ops
*q
= NULL
;
341 read_lock(&qdisc_mod_lock
);
342 for (q
= qdisc_base
; q
; q
= q
->next
) {
343 if (nla_strcmp(kind
, q
->id
) == 0) {
344 if (!try_module_get(q
->owner
))
349 read_unlock(&qdisc_mod_lock
);
354 /* The linklayer setting were not transferred from iproute2, in older
355 * versions, and the rate tables lookup systems have been dropped in
356 * the kernel. To keep backward compatible with older iproute2 tc
357 * utils, we detect the linklayer setting by detecting if the rate
358 * table were modified.
360 * For linklayer ATM table entries, the rate table will be aligned to
361 * 48 bytes, thus some table entries will contain the same value. The
362 * mpu (min packet unit) is also encoded into the old rate table, thus
363 * starting from the mpu, we find low and high table entries for
364 * mapping this cell. If these entries contain the same value, when
365 * the rate tables have been modified for linklayer ATM.
367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
368 * and then roundup to the next cell, calc the table entry one below,
371 static __u8
__detect_linklayer(struct tc_ratespec
*r
, __u32
*rtab
)
373 int low
= roundup(r
->mpu
, 48);
374 int high
= roundup(low
+1, 48);
375 int cell_low
= low
>> r
->cell_log
;
376 int cell_high
= (high
>> r
->cell_log
) - 1;
378 /* rtab is too inaccurate at rates > 100Mbit/s */
379 if ((r
->rate
> (100000000/8)) || (rtab
[0] == 0)) {
380 pr_debug("TC linklayer: Giving up ATM detection\n");
381 return TC_LINKLAYER_ETHERNET
;
384 if ((cell_high
> cell_low
) && (cell_high
< 256)
385 && (rtab
[cell_low
] == rtab
[cell_high
])) {
386 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
387 cell_low
, cell_high
, rtab
[cell_high
]);
388 return TC_LINKLAYER_ATM
;
390 return TC_LINKLAYER_ETHERNET
;
393 static struct qdisc_rate_table
*qdisc_rtab_list
;
395 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
,
398 struct qdisc_rate_table
*rtab
;
400 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
401 nla_len(tab
) != TC_RTAB_SIZE
)
404 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
405 if (!memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) &&
406 !memcmp(&rtab
->data
, nla_data(tab
), 1024)) {
412 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
416 memcpy(rtab
->data
, nla_data(tab
), 1024);
417 if (r
->linklayer
== TC_LINKLAYER_UNAWARE
)
418 r
->linklayer
= __detect_linklayer(r
, rtab
->data
);
419 rtab
->next
= qdisc_rtab_list
;
420 qdisc_rtab_list
= rtab
;
424 EXPORT_SYMBOL(qdisc_get_rtab
);
426 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
428 struct qdisc_rate_table
*rtab
, **rtabp
;
430 if (!tab
|| --tab
->refcnt
)
433 for (rtabp
= &qdisc_rtab_list
;
434 (rtab
= *rtabp
) != NULL
;
435 rtabp
= &rtab
->next
) {
443 EXPORT_SYMBOL(qdisc_put_rtab
);
445 static LIST_HEAD(qdisc_stab_list
);
447 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
448 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
449 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
452 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
454 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
455 struct qdisc_size_table
*stab
;
456 struct tc_sizespec
*s
;
457 unsigned int tsize
= 0;
461 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
, NULL
);
464 if (!tb
[TCA_STAB_BASE
])
465 return ERR_PTR(-EINVAL
);
467 s
= nla_data(tb
[TCA_STAB_BASE
]);
470 if (!tb
[TCA_STAB_DATA
])
471 return ERR_PTR(-EINVAL
);
472 tab
= nla_data(tb
[TCA_STAB_DATA
]);
473 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
476 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
477 return ERR_PTR(-EINVAL
);
479 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
480 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
482 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
488 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
490 return ERR_PTR(-ENOMEM
);
495 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
497 list_add_tail(&stab
->list
, &qdisc_stab_list
);
502 static void stab_kfree_rcu(struct rcu_head
*head
)
504 kfree(container_of(head
, struct qdisc_size_table
, rcu
));
507 void qdisc_put_stab(struct qdisc_size_table
*tab
)
512 if (--tab
->refcnt
== 0) {
513 list_del(&tab
->list
);
514 call_rcu_bh(&tab
->rcu
, stab_kfree_rcu
);
517 EXPORT_SYMBOL(qdisc_put_stab
);
519 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
523 nest
= nla_nest_start(skb
, TCA_STAB
);
525 goto nla_put_failure
;
526 if (nla_put(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
))
527 goto nla_put_failure
;
528 nla_nest_end(skb
, nest
);
536 void __qdisc_calculate_pkt_len(struct sk_buff
*skb
,
537 const struct qdisc_size_table
*stab
)
541 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
542 if (unlikely(!stab
->szopts
.tsize
))
545 slot
= pkt_len
+ stab
->szopts
.cell_align
;
546 if (unlikely(slot
< 0))
549 slot
>>= stab
->szopts
.cell_log
;
550 if (likely(slot
< stab
->szopts
.tsize
))
551 pkt_len
= stab
->data
[slot
];
553 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
554 (slot
/ stab
->szopts
.tsize
) +
555 stab
->data
[slot
% stab
->szopts
.tsize
];
557 pkt_len
<<= stab
->szopts
.size_log
;
559 if (unlikely(pkt_len
< 1))
561 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
563 EXPORT_SYMBOL(__qdisc_calculate_pkt_len
);
565 void qdisc_warn_nonwc(const char *txt
, struct Qdisc
*qdisc
)
567 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
568 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
569 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
570 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
573 EXPORT_SYMBOL(qdisc_warn_nonwc
);
575 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
577 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
581 __netif_schedule(qdisc_root(wd
->qdisc
));
584 return HRTIMER_NORESTART
;
587 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
589 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED
);
590 wd
->timer
.function
= qdisc_watchdog
;
593 EXPORT_SYMBOL(qdisc_watchdog_init
);
595 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog
*wd
, u64 expires
)
597 if (test_bit(__QDISC_STATE_DEACTIVATED
,
598 &qdisc_root_sleeping(wd
->qdisc
)->state
))
601 if (wd
->last_expires
== expires
)
604 wd
->last_expires
= expires
;
605 hrtimer_start(&wd
->timer
,
606 ns_to_ktime(expires
),
607 HRTIMER_MODE_ABS_PINNED
);
609 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns
);
611 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
613 hrtimer_cancel(&wd
->timer
);
615 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
617 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
619 struct hlist_head
*h
;
622 h
= kvmalloc_array(n
, sizeof(struct hlist_head
), GFP_KERNEL
);
625 for (i
= 0; i
< n
; i
++)
626 INIT_HLIST_HEAD(&h
[i
]);
631 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
633 struct Qdisc_class_common
*cl
;
634 struct hlist_node
*next
;
635 struct hlist_head
*nhash
, *ohash
;
636 unsigned int nsize
, nmask
, osize
;
639 /* Rehash when load factor exceeds 0.75 */
640 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
642 nsize
= clhash
->hashsize
* 2;
644 nhash
= qdisc_class_hash_alloc(nsize
);
648 ohash
= clhash
->hash
;
649 osize
= clhash
->hashsize
;
652 for (i
= 0; i
< osize
; i
++) {
653 hlist_for_each_entry_safe(cl
, next
, &ohash
[i
], hnode
) {
654 h
= qdisc_class_hash(cl
->classid
, nmask
);
655 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
658 clhash
->hash
= nhash
;
659 clhash
->hashsize
= nsize
;
660 clhash
->hashmask
= nmask
;
661 sch_tree_unlock(sch
);
665 EXPORT_SYMBOL(qdisc_class_hash_grow
);
667 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
669 unsigned int size
= 4;
671 clhash
->hash
= qdisc_class_hash_alloc(size
);
672 if (clhash
->hash
== NULL
)
674 clhash
->hashsize
= size
;
675 clhash
->hashmask
= size
- 1;
676 clhash
->hashelems
= 0;
679 EXPORT_SYMBOL(qdisc_class_hash_init
);
681 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
683 kvfree(clhash
->hash
);
685 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
687 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
688 struct Qdisc_class_common
*cl
)
692 INIT_HLIST_NODE(&cl
->hnode
);
693 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
694 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
697 EXPORT_SYMBOL(qdisc_class_hash_insert
);
699 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
700 struct Qdisc_class_common
*cl
)
702 hlist_del(&cl
->hnode
);
705 EXPORT_SYMBOL(qdisc_class_hash_remove
);
707 /* Allocate an unique handle from space managed by kernel
708 * Possible range is [8000-FFFF]:0000 (0x8000 values)
710 static u32
qdisc_alloc_handle(struct net_device
*dev
)
713 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
716 autohandle
+= TC_H_MAKE(0x10000U
, 0);
717 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
718 autohandle
= TC_H_MAKE(0x80000000U
, 0);
719 if (!qdisc_lookup(dev
, autohandle
))
727 void qdisc_tree_reduce_backlog(struct Qdisc
*sch
, unsigned int n
,
730 const struct Qdisc_class_ops
*cops
;
736 if (n
== 0 && len
== 0)
738 drops
= max_t(int, n
, 0);
740 while ((parentid
= sch
->parent
)) {
741 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
744 if (sch
->flags
& TCQ_F_NOPARENT
)
746 /* Notify parent qdisc only if child qdisc becomes empty.
748 * If child was empty even before update then backlog
749 * counter is screwed and we skip notification because
750 * parent class is already passive.
752 notify
= !sch
->q
.qlen
&& !WARN_ON_ONCE(!n
);
753 /* TODO: perform the search on a per txq basis */
754 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
756 WARN_ON_ONCE(parentid
!= TC_H_ROOT
);
759 cops
= sch
->ops
->cl_ops
;
760 if (notify
&& cops
->qlen_notify
) {
761 cl
= cops
->find(sch
, parentid
);
762 cops
->qlen_notify(sch
, cl
);
765 sch
->qstats
.backlog
-= len
;
766 __qdisc_qstats_drop(sch
, drops
);
770 EXPORT_SYMBOL(qdisc_tree_reduce_backlog
);
772 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
773 u32 portid
, u32 seq
, u16 flags
, int event
)
775 struct gnet_stats_basic_cpu __percpu
*cpu_bstats
= NULL
;
776 struct gnet_stats_queue __percpu
*cpu_qstats
= NULL
;
778 struct nlmsghdr
*nlh
;
779 unsigned char *b
= skb_tail_pointer(skb
);
781 struct qdisc_size_table
*stab
;
785 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
788 tcm
= nlmsg_data(nlh
);
789 tcm
->tcm_family
= AF_UNSPEC
;
792 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
793 tcm
->tcm_parent
= clid
;
794 tcm
->tcm_handle
= q
->handle
;
795 tcm
->tcm_info
= refcount_read(&q
->refcnt
);
796 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
797 goto nla_put_failure
;
798 if (nla_put_u8(skb
, TCA_HW_OFFLOAD
, !!(q
->flags
& TCQ_F_OFFLOADED
)))
799 goto nla_put_failure
;
800 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
801 goto nla_put_failure
;
803 qlen
= qdisc_qlen_sum(q
);
805 stab
= rtnl_dereference(q
->stab
);
806 if (stab
&& qdisc_dump_stab(skb
, stab
) < 0)
807 goto nla_put_failure
;
809 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
810 NULL
, &d
, TCA_PAD
) < 0)
811 goto nla_put_failure
;
813 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
814 goto nla_put_failure
;
816 if (qdisc_is_percpu_stats(q
)) {
817 cpu_bstats
= q
->cpu_bstats
;
818 cpu_qstats
= q
->cpu_qstats
;
821 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q
),
822 &d
, cpu_bstats
, &q
->bstats
) < 0 ||
823 gnet_stats_copy_rate_est(&d
, &q
->rate_est
) < 0 ||
824 gnet_stats_copy_queue(&d
, cpu_qstats
, &q
->qstats
, qlen
) < 0)
825 goto nla_put_failure
;
827 if (gnet_stats_finish_copy(&d
) < 0)
828 goto nla_put_failure
;
830 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
839 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
, bool dump_invisible
)
841 if (q
->flags
& TCQ_F_BUILTIN
)
843 if ((q
->flags
& TCQ_F_INVISIBLE
) && !dump_invisible
)
849 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
850 struct nlmsghdr
*n
, u32 clid
,
851 struct Qdisc
*old
, struct Qdisc
*new)
854 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
856 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
860 if (old
&& !tc_qdisc_dump_ignore(old
, false)) {
861 if (tc_fill_qdisc(skb
, old
, clid
, portid
, n
->nlmsg_seq
,
862 0, RTM_DELQDISC
) < 0)
865 if (new && !tc_qdisc_dump_ignore(new, false)) {
866 if (tc_fill_qdisc(skb
, new, clid
, portid
, n
->nlmsg_seq
,
867 old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
872 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
873 n
->nlmsg_flags
& NLM_F_ECHO
);
880 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
881 struct nlmsghdr
*n
, u32 clid
,
882 struct Qdisc
*old
, struct Qdisc
*new)
885 qdisc_notify(net
, skb
, n
, clid
, old
, new);
891 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
894 * When appropriate send a netlink notification using 'skb'
897 * On success, destroy old qdisc.
900 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
901 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
902 struct Qdisc
*new, struct Qdisc
*old
)
904 struct Qdisc
*q
= old
;
905 struct net
*net
= dev_net(dev
);
908 if (parent
== NULL
) {
909 unsigned int i
, num_q
, ingress
;
912 num_q
= dev
->num_tx_queues
;
913 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
914 (new && new->flags
& TCQ_F_INGRESS
)) {
917 if (!dev_ingress_queue(dev
))
921 if (dev
->flags
& IFF_UP
)
924 if (new && new->ops
->attach
)
927 for (i
= 0; i
< num_q
; i
++) {
928 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
931 dev_queue
= netdev_get_tx_queue(dev
, i
);
933 old
= dev_graft_qdisc(dev_queue
, new);
935 qdisc_refcount_inc(new);
943 notify_and_destroy(net
, skb
, n
, classid
,
945 if (new && !new->ops
->attach
)
946 qdisc_refcount_inc(new);
947 dev
->qdisc
= new ? : &noop_qdisc
;
949 if (new && new->ops
->attach
)
950 new->ops
->attach(new);
952 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
955 if (dev
->flags
& IFF_UP
)
958 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
960 /* Only support running class lockless if parent is lockless */
961 if (new && (new->flags
& TCQ_F_NOLOCK
) &&
962 parent
&& !(parent
->flags
& TCQ_F_NOLOCK
))
963 new->flags
&= ~TCQ_F_NOLOCK
;
966 if (cops
&& cops
->graft
) {
967 unsigned long cl
= cops
->find(parent
, classid
);
970 err
= cops
->graft(parent
, cl
, new, &old
);
975 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
980 /* lockdep annotation is needed for ingress; egress gets it only for name */
981 static struct lock_class_key qdisc_tx_lock
;
982 static struct lock_class_key qdisc_rx_lock
;
985 Allocate and initialize new qdisc.
987 Parameters are passed via opt.
990 static struct Qdisc
*qdisc_create(struct net_device
*dev
,
991 struct netdev_queue
*dev_queue
,
992 struct Qdisc
*p
, u32 parent
, u32 handle
,
993 struct nlattr
**tca
, int *errp
)
996 struct nlattr
*kind
= tca
[TCA_KIND
];
998 struct Qdisc_ops
*ops
;
999 struct qdisc_size_table
*stab
;
1001 ops
= qdisc_lookup_ops(kind
);
1002 #ifdef CONFIG_MODULES
1003 if (ops
== NULL
&& kind
!= NULL
) {
1004 char name
[IFNAMSIZ
];
1005 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
1006 /* We dropped the RTNL semaphore in order to
1007 * perform the module load. So, even if we
1008 * succeeded in loading the module we have to
1009 * tell the caller to replay the request. We
1010 * indicate this using -EAGAIN.
1011 * We replay the request because the device may
1012 * go away in the mean time.
1015 request_module("sch_%s", name
);
1017 ops
= qdisc_lookup_ops(kind
);
1019 /* We will try again qdisc_lookup_ops,
1020 * so don't keep a reference.
1022 module_put(ops
->owner
);
1034 sch
= qdisc_alloc(dev_queue
, ops
);
1040 sch
->parent
= parent
;
1042 if (handle
== TC_H_INGRESS
) {
1043 sch
->flags
|= TCQ_F_INGRESS
;
1044 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
1045 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
1048 handle
= qdisc_alloc_handle(dev
);
1053 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
1054 if (!netif_is_multiqueue(dev
))
1055 sch
->flags
|= TCQ_F_ONETXQUEUE
;
1058 sch
->handle
= handle
;
1060 /* This exist to keep backward compatible with a userspace
1061 * loophole, what allowed userspace to get IFF_NO_QUEUE
1062 * facility on older kernels by setting tx_queue_len=0 (prior
1063 * to qdisc init), and then forgot to reinit tx_queue_len
1064 * before again attaching a qdisc.
1066 if ((dev
->priv_flags
& IFF_NO_QUEUE
) && (dev
->tx_queue_len
== 0)) {
1067 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
1068 netdev_info(dev
, "Caught tx_queue_len zero misconfig\n");
1072 err
= ops
->init(sch
, tca
[TCA_OPTIONS
]);
1077 if (qdisc_is_percpu_stats(sch
)) {
1079 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
1080 if (!sch
->cpu_bstats
)
1083 sch
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
1084 if (!sch
->cpu_qstats
)
1088 if (tca
[TCA_STAB
]) {
1089 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1091 err
= PTR_ERR(stab
);
1094 rcu_assign_pointer(sch
->stab
, stab
);
1096 if (tca
[TCA_RATE
]) {
1097 seqcount_t
*running
;
1100 if (sch
->flags
& TCQ_F_MQROOT
)
1103 if (sch
->parent
!= TC_H_ROOT
&&
1104 !(sch
->flags
& TCQ_F_INGRESS
) &&
1105 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
1106 running
= qdisc_root_sleeping_running(sch
);
1108 running
= &sch
->running
;
1110 err
= gen_new_estimator(&sch
->bstats
,
1120 qdisc_hash_add(sch
, false);
1125 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1130 kfree((char *) sch
- sch
->padded
);
1132 module_put(ops
->owner
);
1138 free_percpu(sch
->cpu_bstats
);
1139 free_percpu(sch
->cpu_qstats
);
1141 * Any broken qdiscs that would require a ops->reset() here?
1142 * The qdisc was never in action so it shouldn't be necessary.
1144 qdisc_put_stab(rtnl_dereference(sch
->stab
));
1150 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
1152 struct qdisc_size_table
*ostab
, *stab
= NULL
;
1155 if (tca
[TCA_OPTIONS
]) {
1156 if (!sch
->ops
->change
)
1158 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
1163 if (tca
[TCA_STAB
]) {
1164 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1166 return PTR_ERR(stab
);
1169 ostab
= rtnl_dereference(sch
->stab
);
1170 rcu_assign_pointer(sch
->stab
, stab
);
1171 qdisc_put_stab(ostab
);
1173 if (tca
[TCA_RATE
]) {
1174 /* NB: ignores errors from replace_estimator
1175 because change can't be undone. */
1176 if (sch
->flags
& TCQ_F_MQROOT
)
1178 gen_replace_estimator(&sch
->bstats
,
1182 qdisc_root_sleeping_running(sch
),
1189 struct check_loop_arg
{
1190 struct qdisc_walker w
;
1195 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
,
1196 struct qdisc_walker
*w
);
1198 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
1200 struct check_loop_arg arg
;
1202 if (q
->ops
->cl_ops
== NULL
)
1205 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
1206 arg
.w
.fn
= check_loop_fn
;
1209 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1210 return arg
.w
.stop
? -ELOOP
: 0;
1214 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
1217 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1218 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
1220 leaf
= cops
->leaf(q
, cl
);
1222 if (leaf
== arg
->p
|| arg
->depth
> 7)
1224 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
1233 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1234 struct netlink_ext_ack
*extack
)
1236 struct net
*net
= sock_net(skb
->sk
);
1237 struct tcmsg
*tcm
= nlmsg_data(n
);
1238 struct nlattr
*tca
[TCA_MAX
+ 1];
1239 struct net_device
*dev
;
1241 struct Qdisc
*q
= NULL
;
1242 struct Qdisc
*p
= NULL
;
1245 if ((n
->nlmsg_type
!= RTM_GETQDISC
) &&
1246 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1249 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
, extack
);
1253 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1257 clid
= tcm
->tcm_parent
;
1259 if (clid
!= TC_H_ROOT
) {
1260 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
1261 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1264 q
= qdisc_leaf(p
, clid
);
1265 } else if (dev_ingress_queue(dev
)) {
1266 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1274 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
1277 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1282 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1285 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1290 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
);
1294 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1300 * Create/change qdisc.
1303 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1304 struct netlink_ext_ack
*extack
)
1306 struct net
*net
= sock_net(skb
->sk
);
1308 struct nlattr
*tca
[TCA_MAX
+ 1];
1309 struct net_device
*dev
;
1311 struct Qdisc
*q
, *p
;
1314 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1318 /* Reinit, just in case something touches this. */
1319 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
, extack
);
1323 tcm
= nlmsg_data(n
);
1324 clid
= tcm
->tcm_parent
;
1327 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1333 if (clid
!= TC_H_ROOT
) {
1334 if (clid
!= TC_H_INGRESS
) {
1335 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1338 q
= qdisc_leaf(p
, clid
);
1339 } else if (dev_ingress_queue_create(dev
)) {
1340 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1346 /* It may be default qdisc, ignore it */
1347 if (q
&& q
->handle
== 0)
1350 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1351 if (tcm
->tcm_handle
) {
1352 if (q
&& !(n
->nlmsg_flags
& NLM_F_REPLACE
))
1354 if (TC_H_MIN(tcm
->tcm_handle
))
1356 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1358 goto create_n_graft
;
1359 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1361 if (tca
[TCA_KIND
] &&
1362 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1365 (p
&& check_loop(q
, p
, 0)))
1367 qdisc_refcount_inc(q
);
1371 goto create_n_graft
;
1373 /* This magic test requires explanation.
1375 * We know, that some child q is already
1376 * attached to this parent and have choice:
1377 * either to change it or to create/graft new one.
1379 * 1. We are allowed to create/graft only
1380 * if CREATE and REPLACE flags are set.
1382 * 2. If EXCL is set, requestor wanted to say,
1383 * that qdisc tcm_handle is not expected
1384 * to exist, so that we choose create/graft too.
1386 * 3. The last case is when no flags are set.
1387 * Alas, it is sort of hole in API, we
1388 * cannot decide what to do unambiguously.
1389 * For now we select create/graft, if
1390 * user gave KIND, which does not match existing.
1392 if ((n
->nlmsg_flags
& NLM_F_CREATE
) &&
1393 (n
->nlmsg_flags
& NLM_F_REPLACE
) &&
1394 ((n
->nlmsg_flags
& NLM_F_EXCL
) ||
1396 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1397 goto create_n_graft
;
1401 if (!tcm
->tcm_handle
)
1403 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1406 /* Change qdisc parameters */
1409 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1411 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1413 err
= qdisc_change(q
, tca
);
1415 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1419 if (!(n
->nlmsg_flags
& NLM_F_CREATE
))
1421 if (clid
== TC_H_INGRESS
) {
1422 if (dev_ingress_queue(dev
))
1423 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1424 tcm
->tcm_parent
, tcm
->tcm_parent
,
1429 struct netdev_queue
*dev_queue
;
1431 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1432 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1434 dev_queue
= p
->dev_queue
;
1436 dev_queue
= netdev_get_tx_queue(dev
, 0);
1438 q
= qdisc_create(dev
, dev_queue
, p
,
1439 tcm
->tcm_parent
, tcm
->tcm_handle
,
1449 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1459 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1460 struct netlink_callback
*cb
,
1461 int *q_idx_p
, int s_q_idx
, bool recur
,
1462 bool dump_invisible
)
1464 int ret
= 0, q_idx
= *q_idx_p
;
1472 if (q_idx
< s_q_idx
) {
1475 if (!tc_qdisc_dump_ignore(q
, dump_invisible
) &&
1476 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1477 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1483 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1484 * itself has already been dumped.
1486 * If we've already dumped the top-level (ingress) qdisc above and the global
1487 * qdisc hashtable, we don't want to hit it again
1489 if (!qdisc_dev(root
) || !recur
)
1492 hash_for_each(qdisc_dev(root
)->qdisc_hash
, b
, q
, hash
) {
1493 if (q_idx
< s_q_idx
) {
1497 if (!tc_qdisc_dump_ignore(q
, dump_invisible
) &&
1498 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1499 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1513 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1515 struct net
*net
= sock_net(skb
->sk
);
1518 struct net_device
*dev
;
1519 const struct nlmsghdr
*nlh
= cb
->nlh
;
1520 struct nlattr
*tca
[TCA_MAX
+ 1];
1523 s_idx
= cb
->args
[0];
1524 s_q_idx
= q_idx
= cb
->args
[1];
1529 err
= nlmsg_parse(nlh
, sizeof(struct tcmsg
), tca
, TCA_MAX
, NULL
, NULL
);
1533 for_each_netdev(net
, dev
) {
1534 struct netdev_queue
*dev_queue
;
1542 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
,
1543 true, tca
[TCA_DUMP_INVISIBLE
]) < 0)
1546 dev_queue
= dev_ingress_queue(dev
);
1548 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1549 &q_idx
, s_q_idx
, false,
1550 tca
[TCA_DUMP_INVISIBLE
]) < 0)
1559 cb
->args
[1] = q_idx
;
1566 /************************************************
1567 * Traffic classes manipulation. *
1568 ************************************************/
1570 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1572 u32 portid
, u32 seq
, u16 flags
, int event
)
1575 struct nlmsghdr
*nlh
;
1576 unsigned char *b
= skb_tail_pointer(skb
);
1578 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1581 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1583 goto out_nlmsg_trim
;
1584 tcm
= nlmsg_data(nlh
);
1585 tcm
->tcm_family
= AF_UNSPEC
;
1588 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1589 tcm
->tcm_parent
= q
->handle
;
1590 tcm
->tcm_handle
= q
->handle
;
1592 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1593 goto nla_put_failure
;
1594 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1595 goto nla_put_failure
;
1597 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1598 NULL
, &d
, TCA_PAD
) < 0)
1599 goto nla_put_failure
;
1601 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1602 goto nla_put_failure
;
1604 if (gnet_stats_finish_copy(&d
) < 0)
1605 goto nla_put_failure
;
1607 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1616 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1617 struct nlmsghdr
*n
, struct Qdisc
*q
,
1618 unsigned long cl
, int event
)
1620 struct sk_buff
*skb
;
1621 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1623 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1627 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0, event
) < 0) {
1632 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1633 n
->nlmsg_flags
& NLM_F_ECHO
);
1636 static int tclass_del_notify(struct net
*net
,
1637 const struct Qdisc_class_ops
*cops
,
1638 struct sk_buff
*oskb
, struct nlmsghdr
*n
,
1639 struct Qdisc
*q
, unsigned long cl
)
1641 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1642 struct sk_buff
*skb
;
1648 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1652 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0,
1653 RTM_DELTCLASS
) < 0) {
1658 err
= cops
->delete(q
, cl
);
1664 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1665 n
->nlmsg_flags
& NLM_F_ECHO
);
1668 #ifdef CONFIG_NET_CLS
1670 struct tcf_bind_args
{
1671 struct tcf_walker w
;
1676 static int tcf_node_bind(struct tcf_proto
*tp
, void *n
, struct tcf_walker
*arg
)
1678 struct tcf_bind_args
*a
= (void *)arg
;
1680 if (tp
->ops
->bind_class
) {
1681 struct Qdisc
*q
= tcf_block_q(tp
->chain
->block
);
1684 tp
->ops
->bind_class(n
, a
->classid
, a
->cl
);
1690 static void tc_bind_tclass(struct Qdisc
*q
, u32 portid
, u32 clid
,
1691 unsigned long new_cl
)
1693 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1694 struct tcf_block
*block
;
1695 struct tcf_chain
*chain
;
1698 cl
= cops
->find(q
, portid
);
1701 block
= cops
->tcf_block(q
, cl
);
1704 list_for_each_entry(chain
, &block
->chain_list
, list
) {
1705 struct tcf_proto
*tp
;
1707 for (tp
= rtnl_dereference(chain
->filter_chain
);
1708 tp
; tp
= rtnl_dereference(tp
->next
)) {
1709 struct tcf_bind_args arg
= {};
1711 arg
.w
.fn
= tcf_node_bind
;
1714 tp
->ops
->walk(tp
, &arg
.w
);
1721 static void tc_bind_tclass(struct Qdisc
*q
, u32 portid
, u32 clid
,
1722 unsigned long new_cl
)
1728 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1729 struct netlink_ext_ack
*extack
)
1731 struct net
*net
= sock_net(skb
->sk
);
1732 struct tcmsg
*tcm
= nlmsg_data(n
);
1733 struct nlattr
*tca
[TCA_MAX
+ 1];
1734 struct net_device
*dev
;
1735 struct Qdisc
*q
= NULL
;
1736 const struct Qdisc_class_ops
*cops
;
1737 unsigned long cl
= 0;
1738 unsigned long new_cl
;
1744 if ((n
->nlmsg_type
!= RTM_GETTCLASS
) &&
1745 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1748 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
, extack
);
1752 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1757 parent == TC_H_UNSPEC - unspecified parent.
1758 parent == TC_H_ROOT - class is root, which has no parent.
1759 parent == X:0 - parent is root class.
1760 parent == X:Y - parent is a node in hierarchy.
1761 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1763 handle == 0:0 - generate handle from kernel pool.
1764 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1765 handle == X:Y - clear.
1766 handle == X:0 - root class.
1769 /* Step 1. Determine qdisc handle X:0 */
1771 portid
= tcm
->tcm_parent
;
1772 clid
= tcm
->tcm_handle
;
1773 qid
= TC_H_MAJ(clid
);
1775 if (portid
!= TC_H_ROOT
) {
1776 u32 qid1
= TC_H_MAJ(portid
);
1779 /* If both majors are known, they must be identical. */
1784 } else if (qid
== 0)
1785 qid
= dev
->qdisc
->handle
;
1787 /* Now qid is genuine qdisc handle consistent
1788 * both with parent and child.
1790 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1793 portid
= TC_H_MAKE(qid
, portid
);
1796 qid
= dev
->qdisc
->handle
;
1799 /* OK. Locate qdisc */
1800 q
= qdisc_lookup(dev
, qid
);
1804 /* An check that it supports classes */
1805 cops
= q
->ops
->cl_ops
;
1809 /* Now try to get class */
1811 if (portid
== TC_H_ROOT
)
1814 clid
= TC_H_MAKE(qid
, clid
);
1817 cl
= cops
->find(q
, clid
);
1821 if (n
->nlmsg_type
!= RTM_NEWTCLASS
||
1822 !(n
->nlmsg_flags
& NLM_F_CREATE
))
1825 switch (n
->nlmsg_type
) {
1828 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1832 err
= tclass_del_notify(net
, cops
, skb
, n
, q
, cl
);
1833 /* Unbind the class with flilters with 0 */
1834 tc_bind_tclass(q
, portid
, clid
, 0);
1837 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1848 err
= cops
->change(q
, clid
, portid
, tca
, &new_cl
);
1850 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1851 /* We just create a new class, need to do reverse binding. */
1853 tc_bind_tclass(q
, portid
, clid
, new_cl
);
1859 struct qdisc_dump_args
{
1860 struct qdisc_walker w
;
1861 struct sk_buff
*skb
;
1862 struct netlink_callback
*cb
;
1865 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
,
1866 struct qdisc_walker
*arg
)
1868 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1870 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).portid
,
1871 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1875 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1876 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1879 struct qdisc_dump_args arg
;
1881 if (tc_qdisc_dump_ignore(q
, false) ||
1882 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1884 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1889 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1890 arg
.w
.fn
= qdisc_class_dump
;
1894 arg
.w
.skip
= cb
->args
[1];
1896 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1897 cb
->args
[1] = arg
.w
.count
;
1904 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1905 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1914 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1917 if (!qdisc_dev(root
))
1920 if (tcm
->tcm_parent
) {
1921 q
= qdisc_match_from_root(root
, TC_H_MAJ(tcm
->tcm_parent
));
1922 if (q
&& tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1926 hash_for_each(qdisc_dev(root
)->qdisc_hash
, b
, q
, hash
) {
1927 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1934 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1936 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
1937 struct net
*net
= sock_net(skb
->sk
);
1938 struct netdev_queue
*dev_queue
;
1939 struct net_device
*dev
;
1942 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
1944 dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
);
1951 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1954 dev_queue
= dev_ingress_queue(dev
);
1956 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1967 #ifdef CONFIG_PROC_FS
1968 static int psched_show(struct seq_file
*seq
, void *v
)
1970 seq_printf(seq
, "%08x %08x %08x %08x\n",
1971 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1973 (u32
)NSEC_PER_SEC
/ hrtimer_resolution
);
1978 static int psched_open(struct inode
*inode
, struct file
*file
)
1980 return single_open(file
, psched_show
, NULL
);
1983 static const struct file_operations psched_fops
= {
1984 .owner
= THIS_MODULE
,
1985 .open
= psched_open
,
1987 .llseek
= seq_lseek
,
1988 .release
= single_release
,
1991 static int __net_init
psched_net_init(struct net
*net
)
1993 struct proc_dir_entry
*e
;
1995 e
= proc_create("psched", 0, net
->proc_net
, &psched_fops
);
2002 static void __net_exit
psched_net_exit(struct net
*net
)
2004 remove_proc_entry("psched", net
->proc_net
);
2007 static int __net_init
psched_net_init(struct net
*net
)
2012 static void __net_exit
psched_net_exit(struct net
*net
)
2017 static struct pernet_operations psched_net_ops
= {
2018 .init
= psched_net_init
,
2019 .exit
= psched_net_exit
,
2022 static int __init
pktsched_init(void)
2026 err
= register_pernet_subsys(&psched_net_ops
);
2028 pr_err("pktsched_init: "
2029 "cannot initialize per netns operations\n");
2033 register_qdisc(&pfifo_fast_ops
);
2034 register_qdisc(&pfifo_qdisc_ops
);
2035 register_qdisc(&bfifo_qdisc_ops
);
2036 register_qdisc(&pfifo_head_drop_qdisc_ops
);
2037 register_qdisc(&mq_qdisc_ops
);
2038 register_qdisc(&noqueue_qdisc_ops
);
2040 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
, 0);
2041 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
, 0);
2042 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
,
2044 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
, 0);
2045 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
, 0);
2046 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
,
2052 subsys_initcall(pktsched_init
);