2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
34 #include <net/net_namespace.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
38 #include <net/pkt_cls.h>
45 This file consists of two interrelated parts:
47 1. queueing disciplines manager frontend.
48 2. traffic classes manager frontend.
50 Generally, queueing discipline ("qdisc") is a black box,
51 which is able to enqueue packets and to dequeue them (when
52 device is ready to send something) in order and at times
53 determined by algorithm hidden in it.
55 qdisc's are divided to two categories:
56 - "queues", which have no internal structure visible from outside.
57 - "schedulers", which split all the packets to "traffic classes",
58 using "packet classifiers" (look at cls_api.c)
60 In turn, classes may have child qdiscs (as rule, queues)
61 attached to them etc. etc. etc.
63 The goal of the routines in this file is to translate
64 information supplied by user in the form of handles
65 to more intelligible for kernel form, to make some sanity
66 checks and part of work, which is common to all qdiscs
67 and to provide rtnetlink notifications.
69 All real intelligent work is done inside qdisc modules.
73 Every discipline has two major routines: enqueue and dequeue.
77 dequeue usually returns a skb to send. It is allowed to return NULL,
78 but it does not mean that queue is empty, it just means that
79 discipline does not want to send anything this time.
80 Queue is really empty if q->q.qlen == 0.
81 For complicated disciplines with multiple queues q->q is not
82 real packet queue, but however q->q.qlen must be valid.
86 enqueue returns 0, if packet was enqueued successfully.
87 If packet (this one or another one) was dropped, it returns
89 NET_XMIT_DROP - this packet dropped
90 Expected action: do not backoff, but wait until queue will clear.
91 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
92 Expected action: backoff or ignore
98 like dequeue but without removing a packet from the queue
102 returns qdisc to initial state: purge all buffers, clear all
103 timers, counters (except for statistics) etc.
107 initializes newly created qdisc.
111 destroys resources allocated by init and during lifetime of qdisc.
115 changes qdisc parameters.
118 /* Protects list of registered TC modules. It is pure SMP lock. */
119 static DEFINE_RWLOCK(qdisc_mod_lock
);
122 /************************************************
123 * Queueing disciplines manipulation. *
124 ************************************************/
127 /* The list of all installed queueing disciplines. */
129 static struct Qdisc_ops
*qdisc_base
;
131 /* Register/unregister queueing discipline */
133 int register_qdisc(struct Qdisc_ops
*qops
)
135 struct Qdisc_ops
*q
, **qp
;
138 write_lock(&qdisc_mod_lock
);
139 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
140 if (!strcmp(qops
->id
, q
->id
))
143 if (qops
->enqueue
== NULL
)
144 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
145 if (qops
->peek
== NULL
) {
146 if (qops
->dequeue
== NULL
)
147 qops
->peek
= noop_qdisc_ops
.peek
;
151 if (qops
->dequeue
== NULL
)
152 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
155 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
157 if (!(cops
->find
&& cops
->walk
&& cops
->leaf
))
160 if (cops
->tcf_block
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
168 write_unlock(&qdisc_mod_lock
);
175 EXPORT_SYMBOL(register_qdisc
);
177 int unregister_qdisc(struct Qdisc_ops
*qops
)
179 struct Qdisc_ops
*q
, **qp
;
182 write_lock(&qdisc_mod_lock
);
183 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
191 write_unlock(&qdisc_mod_lock
);
194 EXPORT_SYMBOL(unregister_qdisc
);
196 /* Get default qdisc if not otherwise specified */
197 void qdisc_get_default(char *name
, size_t len
)
199 read_lock(&qdisc_mod_lock
);
200 strlcpy(name
, default_qdisc_ops
->id
, len
);
201 read_unlock(&qdisc_mod_lock
);
204 static struct Qdisc_ops
*qdisc_lookup_default(const char *name
)
206 struct Qdisc_ops
*q
= NULL
;
208 for (q
= qdisc_base
; q
; q
= q
->next
) {
209 if (!strcmp(name
, q
->id
)) {
210 if (!try_module_get(q
->owner
))
219 /* Set new default qdisc to use */
220 int qdisc_set_default(const char *name
)
222 const struct Qdisc_ops
*ops
;
224 if (!capable(CAP_NET_ADMIN
))
227 write_lock(&qdisc_mod_lock
);
228 ops
= qdisc_lookup_default(name
);
230 /* Not found, drop lock and try to load module */
231 write_unlock(&qdisc_mod_lock
);
232 request_module("sch_%s", name
);
233 write_lock(&qdisc_mod_lock
);
235 ops
= qdisc_lookup_default(name
);
239 /* Set new default */
240 module_put(default_qdisc_ops
->owner
);
241 default_qdisc_ops
= ops
;
243 write_unlock(&qdisc_mod_lock
);
245 return ops
? 0 : -ENOENT
;
248 #ifdef CONFIG_NET_SCH_DEFAULT
249 /* Set default value from kernel config */
250 static int __init
sch_default_qdisc(void)
252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH
);
254 late_initcall(sch_default_qdisc
);
257 /* We know handle. Find qdisc among all qdisc's attached to device
258 * (root qdisc, all its children, children of children etc.)
259 * Note: caller either uses rtnl or rcu_read_lock()
262 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
266 if (!qdisc_dev(root
))
267 return (root
->handle
== handle
? root
: NULL
);
269 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
270 root
->handle
== handle
)
273 hash_for_each_possible_rcu(qdisc_dev(root
)->qdisc_hash
, q
, hash
, handle
) {
274 if (q
->handle
== handle
)
280 void qdisc_hash_add(struct Qdisc
*q
, bool invisible
)
282 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
284 hash_add_rcu(qdisc_dev(q
)->qdisc_hash
, &q
->hash
, q
->handle
);
286 q
->flags
|= TCQ_F_INVISIBLE
;
289 EXPORT_SYMBOL(qdisc_hash_add
);
291 void qdisc_hash_del(struct Qdisc
*q
)
293 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
295 hash_del_rcu(&q
->hash
);
298 EXPORT_SYMBOL(qdisc_hash_del
);
300 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
306 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
310 if (dev_ingress_queue(dev
))
311 q
= qdisc_match_from_root(
312 dev_ingress_queue(dev
)->qdisc_sleeping
,
318 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
322 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
326 cl
= cops
->find(p
, classid
);
330 leaf
= cops
->leaf(p
, cl
);
334 /* Find queueing discipline by name */
336 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
338 struct Qdisc_ops
*q
= NULL
;
341 read_lock(&qdisc_mod_lock
);
342 for (q
= qdisc_base
; q
; q
= q
->next
) {
343 if (nla_strcmp(kind
, q
->id
) == 0) {
344 if (!try_module_get(q
->owner
))
349 read_unlock(&qdisc_mod_lock
);
354 /* The linklayer setting were not transferred from iproute2, in older
355 * versions, and the rate tables lookup systems have been dropped in
356 * the kernel. To keep backward compatible with older iproute2 tc
357 * utils, we detect the linklayer setting by detecting if the rate
358 * table were modified.
360 * For linklayer ATM table entries, the rate table will be aligned to
361 * 48 bytes, thus some table entries will contain the same value. The
362 * mpu (min packet unit) is also encoded into the old rate table, thus
363 * starting from the mpu, we find low and high table entries for
364 * mapping this cell. If these entries contain the same value, when
365 * the rate tables have been modified for linklayer ATM.
367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
368 * and then roundup to the next cell, calc the table entry one below,
371 static __u8
__detect_linklayer(struct tc_ratespec
*r
, __u32
*rtab
)
373 int low
= roundup(r
->mpu
, 48);
374 int high
= roundup(low
+1, 48);
375 int cell_low
= low
>> r
->cell_log
;
376 int cell_high
= (high
>> r
->cell_log
) - 1;
378 /* rtab is too inaccurate at rates > 100Mbit/s */
379 if ((r
->rate
> (100000000/8)) || (rtab
[0] == 0)) {
380 pr_debug("TC linklayer: Giving up ATM detection\n");
381 return TC_LINKLAYER_ETHERNET
;
384 if ((cell_high
> cell_low
) && (cell_high
< 256)
385 && (rtab
[cell_low
] == rtab
[cell_high
])) {
386 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
387 cell_low
, cell_high
, rtab
[cell_high
]);
388 return TC_LINKLAYER_ATM
;
390 return TC_LINKLAYER_ETHERNET
;
393 static struct qdisc_rate_table
*qdisc_rtab_list
;
395 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
,
398 struct qdisc_rate_table
*rtab
;
400 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
401 nla_len(tab
) != TC_RTAB_SIZE
)
404 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
405 if (!memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) &&
406 !memcmp(&rtab
->data
, nla_data(tab
), 1024)) {
412 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
416 memcpy(rtab
->data
, nla_data(tab
), 1024);
417 if (r
->linklayer
== TC_LINKLAYER_UNAWARE
)
418 r
->linklayer
= __detect_linklayer(r
, rtab
->data
);
419 rtab
->next
= qdisc_rtab_list
;
420 qdisc_rtab_list
= rtab
;
424 EXPORT_SYMBOL(qdisc_get_rtab
);
426 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
428 struct qdisc_rate_table
*rtab
, **rtabp
;
430 if (!tab
|| --tab
->refcnt
)
433 for (rtabp
= &qdisc_rtab_list
;
434 (rtab
= *rtabp
) != NULL
;
435 rtabp
= &rtab
->next
) {
443 EXPORT_SYMBOL(qdisc_put_rtab
);
445 static LIST_HEAD(qdisc_stab_list
);
447 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
448 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
449 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
452 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
454 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
455 struct qdisc_size_table
*stab
;
456 struct tc_sizespec
*s
;
457 unsigned int tsize
= 0;
461 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
, NULL
);
464 if (!tb
[TCA_STAB_BASE
])
465 return ERR_PTR(-EINVAL
);
467 s
= nla_data(tb
[TCA_STAB_BASE
]);
470 if (!tb
[TCA_STAB_DATA
])
471 return ERR_PTR(-EINVAL
);
472 tab
= nla_data(tb
[TCA_STAB_DATA
]);
473 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
476 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
477 return ERR_PTR(-EINVAL
);
479 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
480 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
482 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
488 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
490 return ERR_PTR(-ENOMEM
);
495 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
497 list_add_tail(&stab
->list
, &qdisc_stab_list
);
502 static void stab_kfree_rcu(struct rcu_head
*head
)
504 kfree(container_of(head
, struct qdisc_size_table
, rcu
));
507 void qdisc_put_stab(struct qdisc_size_table
*tab
)
512 if (--tab
->refcnt
== 0) {
513 list_del(&tab
->list
);
514 call_rcu_bh(&tab
->rcu
, stab_kfree_rcu
);
517 EXPORT_SYMBOL(qdisc_put_stab
);
519 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
523 nest
= nla_nest_start(skb
, TCA_STAB
);
525 goto nla_put_failure
;
526 if (nla_put(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
))
527 goto nla_put_failure
;
528 nla_nest_end(skb
, nest
);
536 void __qdisc_calculate_pkt_len(struct sk_buff
*skb
,
537 const struct qdisc_size_table
*stab
)
541 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
542 if (unlikely(!stab
->szopts
.tsize
))
545 slot
= pkt_len
+ stab
->szopts
.cell_align
;
546 if (unlikely(slot
< 0))
549 slot
>>= stab
->szopts
.cell_log
;
550 if (likely(slot
< stab
->szopts
.tsize
))
551 pkt_len
= stab
->data
[slot
];
553 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
554 (slot
/ stab
->szopts
.tsize
) +
555 stab
->data
[slot
% stab
->szopts
.tsize
];
557 pkt_len
<<= stab
->szopts
.size_log
;
559 if (unlikely(pkt_len
< 1))
561 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
563 EXPORT_SYMBOL(__qdisc_calculate_pkt_len
);
565 void qdisc_warn_nonwc(const char *txt
, struct Qdisc
*qdisc
)
567 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
568 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
569 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
570 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
573 EXPORT_SYMBOL(qdisc_warn_nonwc
);
575 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
577 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
581 __netif_schedule(qdisc_root(wd
->qdisc
));
584 return HRTIMER_NORESTART
;
587 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
589 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED
);
590 wd
->timer
.function
= qdisc_watchdog
;
593 EXPORT_SYMBOL(qdisc_watchdog_init
);
595 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog
*wd
, u64 expires
)
597 if (test_bit(__QDISC_STATE_DEACTIVATED
,
598 &qdisc_root_sleeping(wd
->qdisc
)->state
))
601 if (wd
->last_expires
== expires
)
604 wd
->last_expires
= expires
;
605 hrtimer_start(&wd
->timer
,
606 ns_to_ktime(expires
),
607 HRTIMER_MODE_ABS_PINNED
);
609 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns
);
611 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
613 hrtimer_cancel(&wd
->timer
);
615 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
617 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
619 struct hlist_head
*h
;
622 h
= kvmalloc_array(n
, sizeof(struct hlist_head
), GFP_KERNEL
);
625 for (i
= 0; i
< n
; i
++)
626 INIT_HLIST_HEAD(&h
[i
]);
631 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
633 struct Qdisc_class_common
*cl
;
634 struct hlist_node
*next
;
635 struct hlist_head
*nhash
, *ohash
;
636 unsigned int nsize
, nmask
, osize
;
639 /* Rehash when load factor exceeds 0.75 */
640 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
642 nsize
= clhash
->hashsize
* 2;
644 nhash
= qdisc_class_hash_alloc(nsize
);
648 ohash
= clhash
->hash
;
649 osize
= clhash
->hashsize
;
652 for (i
= 0; i
< osize
; i
++) {
653 hlist_for_each_entry_safe(cl
, next
, &ohash
[i
], hnode
) {
654 h
= qdisc_class_hash(cl
->classid
, nmask
);
655 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
658 clhash
->hash
= nhash
;
659 clhash
->hashsize
= nsize
;
660 clhash
->hashmask
= nmask
;
661 sch_tree_unlock(sch
);
665 EXPORT_SYMBOL(qdisc_class_hash_grow
);
667 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
669 unsigned int size
= 4;
671 clhash
->hash
= qdisc_class_hash_alloc(size
);
672 if (clhash
->hash
== NULL
)
674 clhash
->hashsize
= size
;
675 clhash
->hashmask
= size
- 1;
676 clhash
->hashelems
= 0;
679 EXPORT_SYMBOL(qdisc_class_hash_init
);
681 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
683 kvfree(clhash
->hash
);
685 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
687 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
688 struct Qdisc_class_common
*cl
)
692 INIT_HLIST_NODE(&cl
->hnode
);
693 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
694 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
697 EXPORT_SYMBOL(qdisc_class_hash_insert
);
699 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
700 struct Qdisc_class_common
*cl
)
702 hlist_del(&cl
->hnode
);
705 EXPORT_SYMBOL(qdisc_class_hash_remove
);
707 /* Allocate an unique handle from space managed by kernel
708 * Possible range is [8000-FFFF]:0000 (0x8000 values)
710 static u32
qdisc_alloc_handle(struct net_device
*dev
)
713 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
716 autohandle
+= TC_H_MAKE(0x10000U
, 0);
717 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
718 autohandle
= TC_H_MAKE(0x80000000U
, 0);
719 if (!qdisc_lookup(dev
, autohandle
))
727 void qdisc_tree_reduce_backlog(struct Qdisc
*sch
, unsigned int n
,
730 const struct Qdisc_class_ops
*cops
;
736 if (n
== 0 && len
== 0)
738 drops
= max_t(int, n
, 0);
740 while ((parentid
= sch
->parent
)) {
741 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
744 if (sch
->flags
& TCQ_F_NOPARENT
)
746 /* Notify parent qdisc only if child qdisc becomes empty.
748 * If child was empty even before update then backlog
749 * counter is screwed and we skip notification because
750 * parent class is already passive.
752 notify
= !sch
->q
.qlen
&& !WARN_ON_ONCE(!n
);
753 /* TODO: perform the search on a per txq basis */
754 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
756 WARN_ON_ONCE(parentid
!= TC_H_ROOT
);
759 cops
= sch
->ops
->cl_ops
;
760 if (notify
&& cops
->qlen_notify
) {
761 cl
= cops
->find(sch
, parentid
);
762 cops
->qlen_notify(sch
, cl
);
765 sch
->qstats
.backlog
-= len
;
766 __qdisc_qstats_drop(sch
, drops
);
770 EXPORT_SYMBOL(qdisc_tree_reduce_backlog
);
772 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
773 u32 portid
, u32 seq
, u16 flags
, int event
)
775 struct gnet_stats_basic_cpu __percpu
*cpu_bstats
= NULL
;
776 struct gnet_stats_queue __percpu
*cpu_qstats
= NULL
;
778 struct nlmsghdr
*nlh
;
779 unsigned char *b
= skb_tail_pointer(skb
);
781 struct qdisc_size_table
*stab
;
785 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
788 tcm
= nlmsg_data(nlh
);
789 tcm
->tcm_family
= AF_UNSPEC
;
792 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
793 tcm
->tcm_parent
= clid
;
794 tcm
->tcm_handle
= q
->handle
;
795 tcm
->tcm_info
= refcount_read(&q
->refcnt
);
796 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
797 goto nla_put_failure
;
798 if (nla_put_u8(skb
, TCA_HW_OFFLOAD
, !!(q
->flags
& TCQ_F_OFFLOADED
)))
799 goto nla_put_failure
;
800 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
801 goto nla_put_failure
;
804 stab
= rtnl_dereference(q
->stab
);
805 if (stab
&& qdisc_dump_stab(skb
, stab
) < 0)
806 goto nla_put_failure
;
808 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
809 NULL
, &d
, TCA_PAD
) < 0)
810 goto nla_put_failure
;
812 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
813 goto nla_put_failure
;
815 if (qdisc_is_percpu_stats(q
)) {
816 cpu_bstats
= q
->cpu_bstats
;
817 cpu_qstats
= q
->cpu_qstats
;
820 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q
),
821 &d
, cpu_bstats
, &q
->bstats
) < 0 ||
822 gnet_stats_copy_rate_est(&d
, &q
->rate_est
) < 0 ||
823 gnet_stats_copy_queue(&d
, cpu_qstats
, &q
->qstats
, qlen
) < 0)
824 goto nla_put_failure
;
826 if (gnet_stats_finish_copy(&d
) < 0)
827 goto nla_put_failure
;
829 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
838 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
, bool dump_invisible
)
840 if (q
->flags
& TCQ_F_BUILTIN
)
842 if ((q
->flags
& TCQ_F_INVISIBLE
) && !dump_invisible
)
848 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
849 struct nlmsghdr
*n
, u32 clid
,
850 struct Qdisc
*old
, struct Qdisc
*new)
853 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
855 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
859 if (old
&& !tc_qdisc_dump_ignore(old
, false)) {
860 if (tc_fill_qdisc(skb
, old
, clid
, portid
, n
->nlmsg_seq
,
861 0, RTM_DELQDISC
) < 0)
864 if (new && !tc_qdisc_dump_ignore(new, false)) {
865 if (tc_fill_qdisc(skb
, new, clid
, portid
, n
->nlmsg_seq
,
866 old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
871 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
872 n
->nlmsg_flags
& NLM_F_ECHO
);
879 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
880 struct nlmsghdr
*n
, u32 clid
,
881 struct Qdisc
*old
, struct Qdisc
*new)
884 qdisc_notify(net
, skb
, n
, clid
, old
, new);
890 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
893 * When appropriate send a netlink notification using 'skb'
896 * On success, destroy old qdisc.
899 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
900 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
901 struct Qdisc
*new, struct Qdisc
*old
)
903 struct Qdisc
*q
= old
;
904 struct net
*net
= dev_net(dev
);
907 if (parent
== NULL
) {
908 unsigned int i
, num_q
, ingress
;
911 num_q
= dev
->num_tx_queues
;
912 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
913 (new && new->flags
& TCQ_F_INGRESS
)) {
916 if (!dev_ingress_queue(dev
))
920 if (dev
->flags
& IFF_UP
)
923 if (new && new->ops
->attach
)
926 for (i
= 0; i
< num_q
; i
++) {
927 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
930 dev_queue
= netdev_get_tx_queue(dev
, i
);
932 old
= dev_graft_qdisc(dev_queue
, new);
934 qdisc_refcount_inc(new);
942 notify_and_destroy(net
, skb
, n
, classid
,
944 if (new && !new->ops
->attach
)
945 qdisc_refcount_inc(new);
946 dev
->qdisc
= new ? : &noop_qdisc
;
948 if (new && new->ops
->attach
)
949 new->ops
->attach(new);
951 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
954 if (dev
->flags
& IFF_UP
)
957 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
960 if (cops
&& cops
->graft
) {
961 unsigned long cl
= cops
->find(parent
, classid
);
964 err
= cops
->graft(parent
, cl
, new, &old
);
969 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
974 /* lockdep annotation is needed for ingress; egress gets it only for name */
975 static struct lock_class_key qdisc_tx_lock
;
976 static struct lock_class_key qdisc_rx_lock
;
979 Allocate and initialize new qdisc.
981 Parameters are passed via opt.
984 static struct Qdisc
*qdisc_create(struct net_device
*dev
,
985 struct netdev_queue
*dev_queue
,
986 struct Qdisc
*p
, u32 parent
, u32 handle
,
987 struct nlattr
**tca
, int *errp
)
990 struct nlattr
*kind
= tca
[TCA_KIND
];
992 struct Qdisc_ops
*ops
;
993 struct qdisc_size_table
*stab
;
995 ops
= qdisc_lookup_ops(kind
);
996 #ifdef CONFIG_MODULES
997 if (ops
== NULL
&& kind
!= NULL
) {
999 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
1000 /* We dropped the RTNL semaphore in order to
1001 * perform the module load. So, even if we
1002 * succeeded in loading the module we have to
1003 * tell the caller to replay the request. We
1004 * indicate this using -EAGAIN.
1005 * We replay the request because the device may
1006 * go away in the mean time.
1009 request_module("sch_%s", name
);
1011 ops
= qdisc_lookup_ops(kind
);
1013 /* We will try again qdisc_lookup_ops,
1014 * so don't keep a reference.
1016 module_put(ops
->owner
);
1028 sch
= qdisc_alloc(dev_queue
, ops
);
1034 sch
->parent
= parent
;
1036 if (handle
== TC_H_INGRESS
) {
1037 sch
->flags
|= TCQ_F_INGRESS
;
1038 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
1039 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
1042 handle
= qdisc_alloc_handle(dev
);
1047 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
1048 if (!netif_is_multiqueue(dev
))
1049 sch
->flags
|= TCQ_F_ONETXQUEUE
;
1052 sch
->handle
= handle
;
1054 /* This exist to keep backward compatible with a userspace
1055 * loophole, what allowed userspace to get IFF_NO_QUEUE
1056 * facility on older kernels by setting tx_queue_len=0 (prior
1057 * to qdisc init), and then forgot to reinit tx_queue_len
1058 * before again attaching a qdisc.
1060 if ((dev
->priv_flags
& IFF_NO_QUEUE
) && (dev
->tx_queue_len
== 0)) {
1061 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
1062 netdev_info(dev
, "Caught tx_queue_len zero misconfig\n");
1065 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
1066 if (tca
[TCA_STAB
]) {
1067 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1069 err
= PTR_ERR(stab
);
1072 rcu_assign_pointer(sch
->stab
, stab
);
1074 if (tca
[TCA_RATE
]) {
1075 seqcount_t
*running
;
1078 if (sch
->flags
& TCQ_F_MQROOT
)
1081 if ((sch
->parent
!= TC_H_ROOT
) &&
1082 !(sch
->flags
& TCQ_F_INGRESS
) &&
1083 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
1084 running
= qdisc_root_sleeping_running(sch
);
1086 running
= &sch
->running
;
1088 err
= gen_new_estimator(&sch
->bstats
,
1098 qdisc_hash_add(sch
, false);
1102 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1109 module_put(ops
->owner
);
1116 * Any broken qdiscs that would require a ops->reset() here?
1117 * The qdisc was never in action so it shouldn't be necessary.
1119 qdisc_put_stab(rtnl_dereference(sch
->stab
));
1125 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
1127 struct qdisc_size_table
*ostab
, *stab
= NULL
;
1130 if (tca
[TCA_OPTIONS
]) {
1131 if (sch
->ops
->change
== NULL
)
1133 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
1138 if (tca
[TCA_STAB
]) {
1139 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1141 return PTR_ERR(stab
);
1144 ostab
= rtnl_dereference(sch
->stab
);
1145 rcu_assign_pointer(sch
->stab
, stab
);
1146 qdisc_put_stab(ostab
);
1148 if (tca
[TCA_RATE
]) {
1149 /* NB: ignores errors from replace_estimator
1150 because change can't be undone. */
1151 if (sch
->flags
& TCQ_F_MQROOT
)
1153 gen_replace_estimator(&sch
->bstats
,
1157 qdisc_root_sleeping_running(sch
),
1164 struct check_loop_arg
{
1165 struct qdisc_walker w
;
1170 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
,
1171 struct qdisc_walker
*w
);
1173 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
1175 struct check_loop_arg arg
;
1177 if (q
->ops
->cl_ops
== NULL
)
1180 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
1181 arg
.w
.fn
= check_loop_fn
;
1184 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1185 return arg
.w
.stop
? -ELOOP
: 0;
1189 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
1192 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1193 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
1195 leaf
= cops
->leaf(q
, cl
);
1197 if (leaf
== arg
->p
|| arg
->depth
> 7)
1199 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
1208 const struct nla_policy rtm_tca_policy
[TCA_MAX
+ 1] = {
1209 [TCA_KIND
] = { .type
= NLA_NUL_STRING
,
1210 .len
= IFNAMSIZ
- 1 },
1211 [TCA_RATE
] = { .type
= NLA_BINARY
,
1212 .len
= sizeof(struct tc_estimator
) },
1213 [TCA_STAB
] = { .type
= NLA_NESTED
},
1214 [TCA_DUMP_INVISIBLE
] = { .type
= NLA_FLAG
},
1215 [TCA_CHAIN
] = { .type
= NLA_U32
},
1218 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1219 struct netlink_ext_ack
*extack
)
1221 struct net
*net
= sock_net(skb
->sk
);
1222 struct tcmsg
*tcm
= nlmsg_data(n
);
1223 struct nlattr
*tca
[TCA_MAX
+ 1];
1224 struct net_device
*dev
;
1226 struct Qdisc
*q
= NULL
;
1227 struct Qdisc
*p
= NULL
;
1230 if ((n
->nlmsg_type
!= RTM_GETQDISC
) &&
1231 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1234 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, rtm_tca_policy
,
1239 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1243 clid
= tcm
->tcm_parent
;
1245 if (clid
!= TC_H_ROOT
) {
1246 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
1247 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1250 q
= qdisc_leaf(p
, clid
);
1251 } else if (dev_ingress_queue(dev
)) {
1252 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1260 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
1263 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1268 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1271 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1276 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
);
1280 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1286 * Create/change qdisc.
1289 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1290 struct netlink_ext_ack
*extack
)
1292 struct net
*net
= sock_net(skb
->sk
);
1294 struct nlattr
*tca
[TCA_MAX
+ 1];
1295 struct net_device
*dev
;
1297 struct Qdisc
*q
, *p
;
1300 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1304 /* Reinit, just in case something touches this. */
1305 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, rtm_tca_policy
,
1310 tcm
= nlmsg_data(n
);
1311 clid
= tcm
->tcm_parent
;
1314 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1320 if (clid
!= TC_H_ROOT
) {
1321 if (clid
!= TC_H_INGRESS
) {
1322 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1325 q
= qdisc_leaf(p
, clid
);
1326 } else if (dev_ingress_queue_create(dev
)) {
1327 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1333 /* It may be default qdisc, ignore it */
1334 if (q
&& q
->handle
== 0)
1337 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1338 if (tcm
->tcm_handle
) {
1339 if (q
&& !(n
->nlmsg_flags
& NLM_F_REPLACE
))
1341 if (TC_H_MIN(tcm
->tcm_handle
))
1343 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1345 goto create_n_graft
;
1346 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1348 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1351 (p
&& check_loop(q
, p
, 0)))
1353 qdisc_refcount_inc(q
);
1357 goto create_n_graft
;
1359 /* This magic test requires explanation.
1361 * We know, that some child q is already
1362 * attached to this parent and have choice:
1363 * either to change it or to create/graft new one.
1365 * 1. We are allowed to create/graft only
1366 * if CREATE and REPLACE flags are set.
1368 * 2. If EXCL is set, requestor wanted to say,
1369 * that qdisc tcm_handle is not expected
1370 * to exist, so that we choose create/graft too.
1372 * 3. The last case is when no flags are set.
1373 * Alas, it is sort of hole in API, we
1374 * cannot decide what to do unambiguously.
1375 * For now we select create/graft, if
1376 * user gave KIND, which does not match existing.
1378 if ((n
->nlmsg_flags
& NLM_F_CREATE
) &&
1379 (n
->nlmsg_flags
& NLM_F_REPLACE
) &&
1380 ((n
->nlmsg_flags
& NLM_F_EXCL
) ||
1382 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1383 goto create_n_graft
;
1387 if (!tcm
->tcm_handle
)
1389 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1392 /* Change qdisc parameters */
1395 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1397 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1399 err
= qdisc_change(q
, tca
);
1401 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1405 if (!(n
->nlmsg_flags
& NLM_F_CREATE
))
1407 if (clid
== TC_H_INGRESS
) {
1408 if (dev_ingress_queue(dev
))
1409 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1410 tcm
->tcm_parent
, tcm
->tcm_parent
,
1415 struct netdev_queue
*dev_queue
;
1417 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1418 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1420 dev_queue
= p
->dev_queue
;
1422 dev_queue
= netdev_get_tx_queue(dev
, 0);
1424 q
= qdisc_create(dev
, dev_queue
, p
,
1425 tcm
->tcm_parent
, tcm
->tcm_handle
,
1435 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1445 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1446 struct netlink_callback
*cb
,
1447 int *q_idx_p
, int s_q_idx
, bool recur
,
1448 bool dump_invisible
)
1450 int ret
= 0, q_idx
= *q_idx_p
;
1458 if (q_idx
< s_q_idx
) {
1461 if (!tc_qdisc_dump_ignore(q
, dump_invisible
) &&
1462 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1463 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1469 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1470 * itself has already been dumped.
1472 * If we've already dumped the top-level (ingress) qdisc above and the global
1473 * qdisc hashtable, we don't want to hit it again
1475 if (!qdisc_dev(root
) || !recur
)
1478 hash_for_each(qdisc_dev(root
)->qdisc_hash
, b
, q
, hash
) {
1479 if (q_idx
< s_q_idx
) {
1483 if (!tc_qdisc_dump_ignore(q
, dump_invisible
) &&
1484 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1485 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1499 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1501 struct net
*net
= sock_net(skb
->sk
);
1504 struct net_device
*dev
;
1505 const struct nlmsghdr
*nlh
= cb
->nlh
;
1506 struct nlattr
*tca
[TCA_MAX
+ 1];
1509 s_idx
= cb
->args
[0];
1510 s_q_idx
= q_idx
= cb
->args
[1];
1515 err
= nlmsg_parse(nlh
, sizeof(struct tcmsg
), tca
, TCA_MAX
,
1516 rtm_tca_policy
, NULL
);
1520 for_each_netdev(net
, dev
) {
1521 struct netdev_queue
*dev_queue
;
1529 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
,
1530 true, tca
[TCA_DUMP_INVISIBLE
]) < 0)
1533 dev_queue
= dev_ingress_queue(dev
);
1535 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1536 &q_idx
, s_q_idx
, false,
1537 tca
[TCA_DUMP_INVISIBLE
]) < 0)
1546 cb
->args
[1] = q_idx
;
1553 /************************************************
1554 * Traffic classes manipulation. *
1555 ************************************************/
1557 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1559 u32 portid
, u32 seq
, u16 flags
, int event
)
1562 struct nlmsghdr
*nlh
;
1563 unsigned char *b
= skb_tail_pointer(skb
);
1565 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1568 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1570 goto out_nlmsg_trim
;
1571 tcm
= nlmsg_data(nlh
);
1572 tcm
->tcm_family
= AF_UNSPEC
;
1575 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1576 tcm
->tcm_parent
= q
->handle
;
1577 tcm
->tcm_handle
= q
->handle
;
1579 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1580 goto nla_put_failure
;
1581 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1582 goto nla_put_failure
;
1584 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1585 NULL
, &d
, TCA_PAD
) < 0)
1586 goto nla_put_failure
;
1588 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1589 goto nla_put_failure
;
1591 if (gnet_stats_finish_copy(&d
) < 0)
1592 goto nla_put_failure
;
1594 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1603 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1604 struct nlmsghdr
*n
, struct Qdisc
*q
,
1605 unsigned long cl
, int event
)
1607 struct sk_buff
*skb
;
1608 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1610 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1614 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0, event
) < 0) {
1619 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1620 n
->nlmsg_flags
& NLM_F_ECHO
);
1623 static int tclass_del_notify(struct net
*net
,
1624 const struct Qdisc_class_ops
*cops
,
1625 struct sk_buff
*oskb
, struct nlmsghdr
*n
,
1626 struct Qdisc
*q
, unsigned long cl
)
1628 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1629 struct sk_buff
*skb
;
1635 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1639 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0,
1640 RTM_DELTCLASS
) < 0) {
1645 err
= cops
->delete(q
, cl
);
1651 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1652 n
->nlmsg_flags
& NLM_F_ECHO
);
1655 #ifdef CONFIG_NET_CLS
1657 struct tcf_bind_args
{
1658 struct tcf_walker w
;
1664 static int tcf_node_bind(struct tcf_proto
*tp
, void *n
, struct tcf_walker
*arg
)
1666 struct tcf_bind_args
*a
= (void *)arg
;
1668 if (tp
->ops
->bind_class
) {
1669 struct Qdisc
*q
= tcf_block_q(tp
->chain
->block
);
1672 tp
->ops
->bind_class(n
, a
->classid
, a
->cl
, q
, a
->base
);
1678 static void tc_bind_tclass(struct Qdisc
*q
, u32 portid
, u32 clid
,
1679 unsigned long new_cl
)
1681 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1682 struct tcf_block
*block
;
1683 struct tcf_chain
*chain
;
1686 cl
= cops
->find(q
, portid
);
1689 if (!cops
->tcf_block
)
1691 block
= cops
->tcf_block(q
, cl
);
1694 list_for_each_entry(chain
, &block
->chain_list
, list
) {
1695 struct tcf_proto
*tp
;
1697 for (tp
= rtnl_dereference(chain
->filter_chain
);
1698 tp
; tp
= rtnl_dereference(tp
->next
)) {
1699 struct tcf_bind_args arg
= {};
1701 arg
.w
.fn
= tcf_node_bind
;
1705 tp
->ops
->walk(tp
, &arg
.w
);
1712 static void tc_bind_tclass(struct Qdisc
*q
, u32 portid
, u32 clid
,
1713 unsigned long new_cl
)
1719 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1720 struct netlink_ext_ack
*extack
)
1722 struct net
*net
= sock_net(skb
->sk
);
1723 struct tcmsg
*tcm
= nlmsg_data(n
);
1724 struct nlattr
*tca
[TCA_MAX
+ 1];
1725 struct net_device
*dev
;
1726 struct Qdisc
*q
= NULL
;
1727 const struct Qdisc_class_ops
*cops
;
1728 unsigned long cl
= 0;
1729 unsigned long new_cl
;
1735 if ((n
->nlmsg_type
!= RTM_GETTCLASS
) &&
1736 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1739 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, rtm_tca_policy
,
1744 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1749 parent == TC_H_UNSPEC - unspecified parent.
1750 parent == TC_H_ROOT - class is root, which has no parent.
1751 parent == X:0 - parent is root class.
1752 parent == X:Y - parent is a node in hierarchy.
1753 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1755 handle == 0:0 - generate handle from kernel pool.
1756 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1757 handle == X:Y - clear.
1758 handle == X:0 - root class.
1761 /* Step 1. Determine qdisc handle X:0 */
1763 portid
= tcm
->tcm_parent
;
1764 clid
= tcm
->tcm_handle
;
1765 qid
= TC_H_MAJ(clid
);
1767 if (portid
!= TC_H_ROOT
) {
1768 u32 qid1
= TC_H_MAJ(portid
);
1771 /* If both majors are known, they must be identical. */
1776 } else if (qid
== 0)
1777 qid
= dev
->qdisc
->handle
;
1779 /* Now qid is genuine qdisc handle consistent
1780 * both with parent and child.
1782 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1785 portid
= TC_H_MAKE(qid
, portid
);
1788 qid
= dev
->qdisc
->handle
;
1791 /* OK. Locate qdisc */
1792 q
= qdisc_lookup(dev
, qid
);
1796 /* An check that it supports classes */
1797 cops
= q
->ops
->cl_ops
;
1801 /* Now try to get class */
1803 if (portid
== TC_H_ROOT
)
1806 clid
= TC_H_MAKE(qid
, clid
);
1809 cl
= cops
->find(q
, clid
);
1813 if (n
->nlmsg_type
!= RTM_NEWTCLASS
||
1814 !(n
->nlmsg_flags
& NLM_F_CREATE
))
1817 switch (n
->nlmsg_type
) {
1820 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1824 err
= tclass_del_notify(net
, cops
, skb
, n
, q
, cl
);
1825 /* Unbind the class with flilters with 0 */
1826 tc_bind_tclass(q
, portid
, clid
, 0);
1829 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1840 err
= cops
->change(q
, clid
, portid
, tca
, &new_cl
);
1842 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1843 /* We just create a new class, need to do reverse binding. */
1845 tc_bind_tclass(q
, portid
, clid
, new_cl
);
1851 struct qdisc_dump_args
{
1852 struct qdisc_walker w
;
1853 struct sk_buff
*skb
;
1854 struct netlink_callback
*cb
;
1857 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
,
1858 struct qdisc_walker
*arg
)
1860 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1862 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).portid
,
1863 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1867 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1868 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1871 struct qdisc_dump_args arg
;
1873 if (tc_qdisc_dump_ignore(q
, false) ||
1874 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1876 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1881 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1882 arg
.w
.fn
= qdisc_class_dump
;
1886 arg
.w
.skip
= cb
->args
[1];
1888 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1889 cb
->args
[1] = arg
.w
.count
;
1896 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1897 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1906 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1909 if (!qdisc_dev(root
))
1912 if (tcm
->tcm_parent
) {
1913 q
= qdisc_match_from_root(root
, TC_H_MAJ(tcm
->tcm_parent
));
1914 if (q
&& q
!= root
&&
1915 tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1919 hash_for_each(qdisc_dev(root
)->qdisc_hash
, b
, q
, hash
) {
1920 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1927 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1929 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
1930 struct net
*net
= sock_net(skb
->sk
);
1931 struct netdev_queue
*dev_queue
;
1932 struct net_device
*dev
;
1935 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
1937 dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
);
1944 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1947 dev_queue
= dev_ingress_queue(dev
);
1949 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1960 #ifdef CONFIG_PROC_FS
1961 static int psched_show(struct seq_file
*seq
, void *v
)
1963 seq_printf(seq
, "%08x %08x %08x %08x\n",
1964 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1966 (u32
)NSEC_PER_SEC
/ hrtimer_resolution
);
1971 static int psched_open(struct inode
*inode
, struct file
*file
)
1973 return single_open(file
, psched_show
, NULL
);
1976 static const struct file_operations psched_fops
= {
1977 .owner
= THIS_MODULE
,
1978 .open
= psched_open
,
1980 .llseek
= seq_lseek
,
1981 .release
= single_release
,
1984 static int __net_init
psched_net_init(struct net
*net
)
1986 struct proc_dir_entry
*e
;
1988 e
= proc_create("psched", 0, net
->proc_net
, &psched_fops
);
1995 static void __net_exit
psched_net_exit(struct net
*net
)
1997 remove_proc_entry("psched", net
->proc_net
);
2000 static int __net_init
psched_net_init(struct net
*net
)
2005 static void __net_exit
psched_net_exit(struct net
*net
)
2010 static struct pernet_operations psched_net_ops
= {
2011 .init
= psched_net_init
,
2012 .exit
= psched_net_exit
,
2015 static int __init
pktsched_init(void)
2019 err
= register_pernet_subsys(&psched_net_ops
);
2021 pr_err("pktsched_init: "
2022 "cannot initialize per netns operations\n");
2026 register_qdisc(&pfifo_fast_ops
);
2027 register_qdisc(&pfifo_qdisc_ops
);
2028 register_qdisc(&bfifo_qdisc_ops
);
2029 register_qdisc(&pfifo_head_drop_qdisc_ops
);
2030 register_qdisc(&mq_qdisc_ops
);
2031 register_qdisc(&noqueue_qdisc_ops
);
2033 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
, 0);
2034 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
, 0);
2035 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
,
2037 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
, 0);
2038 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
, 0);
2039 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
,
2045 subsys_initcall(pktsched_init
);