2 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
22 struct Qdisc_class_common common
;
23 unsigned int filter_cnt
;
25 struct gnet_stats_basic_packed bstats
;
26 struct gnet_stats_queue qstats
;
27 struct net_rate_estimator __rcu
*rate_est
;
28 struct list_head alist
;
36 struct list_head active
;
37 struct tcf_proto __rcu
*filter_list
;
38 struct tcf_block
*block
;
39 struct Qdisc_class_hash clhash
;
42 static struct drr_class
*drr_find_class(struct Qdisc
*sch
, u32 classid
)
44 struct drr_sched
*q
= qdisc_priv(sch
);
45 struct Qdisc_class_common
*clc
;
47 clc
= qdisc_class_find(&q
->clhash
, classid
);
50 return container_of(clc
, struct drr_class
, common
);
53 static void drr_purge_queue(struct drr_class
*cl
)
55 unsigned int len
= cl
->qdisc
->q
.qlen
;
56 unsigned int backlog
= cl
->qdisc
->qstats
.backlog
;
58 qdisc_reset(cl
->qdisc
);
59 qdisc_tree_reduce_backlog(cl
->qdisc
, len
, backlog
);
62 static const struct nla_policy drr_policy
[TCA_DRR_MAX
+ 1] = {
63 [TCA_DRR_QUANTUM
] = { .type
= NLA_U32
},
66 static int drr_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
67 struct nlattr
**tca
, unsigned long *arg
)
69 struct drr_sched
*q
= qdisc_priv(sch
);
70 struct drr_class
*cl
= (struct drr_class
*)*arg
;
71 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
72 struct nlattr
*tb
[TCA_DRR_MAX
+ 1];
79 err
= nla_parse_nested(tb
, TCA_DRR_MAX
, opt
, drr_policy
, NULL
);
83 if (tb
[TCA_DRR_QUANTUM
]) {
84 quantum
= nla_get_u32(tb
[TCA_DRR_QUANTUM
]);
88 quantum
= psched_mtu(qdisc_dev(sch
));
92 err
= gen_replace_estimator(&cl
->bstats
, NULL
,
95 qdisc_root_sleeping_running(sch
),
102 if (tb
[TCA_DRR_QUANTUM
])
103 cl
->quantum
= quantum
;
104 sch_tree_unlock(sch
);
109 cl
= kzalloc(sizeof(struct drr_class
), GFP_KERNEL
);
113 cl
->common
.classid
= classid
;
114 cl
->quantum
= quantum
;
115 cl
->qdisc
= qdisc_create_dflt(sch
->dev_queue
,
116 &pfifo_qdisc_ops
, classid
);
117 if (cl
->qdisc
== NULL
)
118 cl
->qdisc
= &noop_qdisc
;
120 qdisc_hash_add(cl
->qdisc
, true);
123 err
= gen_replace_estimator(&cl
->bstats
, NULL
, &cl
->rate_est
,
125 qdisc_root_sleeping_running(sch
),
128 qdisc_destroy(cl
->qdisc
);
135 qdisc_class_hash_insert(&q
->clhash
, &cl
->common
);
136 sch_tree_unlock(sch
);
138 qdisc_class_hash_grow(sch
, &q
->clhash
);
140 *arg
= (unsigned long)cl
;
144 static void drr_destroy_class(struct Qdisc
*sch
, struct drr_class
*cl
)
146 gen_kill_estimator(&cl
->rate_est
);
147 qdisc_destroy(cl
->qdisc
);
151 static int drr_delete_class(struct Qdisc
*sch
, unsigned long arg
)
153 struct drr_sched
*q
= qdisc_priv(sch
);
154 struct drr_class
*cl
= (struct drr_class
*)arg
;
156 if (cl
->filter_cnt
> 0)
162 qdisc_class_hash_remove(&q
->clhash
, &cl
->common
);
164 sch_tree_unlock(sch
);
166 drr_destroy_class(sch
, cl
);
170 static unsigned long drr_search_class(struct Qdisc
*sch
, u32 classid
)
172 return (unsigned long)drr_find_class(sch
, classid
);
175 static struct tcf_block
*drr_tcf_block(struct Qdisc
*sch
, unsigned long cl
)
177 struct drr_sched
*q
= qdisc_priv(sch
);
185 static unsigned long drr_bind_tcf(struct Qdisc
*sch
, unsigned long parent
,
188 struct drr_class
*cl
= drr_find_class(sch
, classid
);
193 return (unsigned long)cl
;
196 static void drr_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
198 struct drr_class
*cl
= (struct drr_class
*)arg
;
203 static int drr_graft_class(struct Qdisc
*sch
, unsigned long arg
,
204 struct Qdisc
*new, struct Qdisc
**old
)
206 struct drr_class
*cl
= (struct drr_class
*)arg
;
209 new = qdisc_create_dflt(sch
->dev_queue
,
210 &pfifo_qdisc_ops
, cl
->common
.classid
);
215 *old
= qdisc_replace(sch
, new, &cl
->qdisc
);
219 static struct Qdisc
*drr_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
221 struct drr_class
*cl
= (struct drr_class
*)arg
;
226 static void drr_qlen_notify(struct Qdisc
*csh
, unsigned long arg
)
228 struct drr_class
*cl
= (struct drr_class
*)arg
;
230 list_del(&cl
->alist
);
233 static int drr_dump_class(struct Qdisc
*sch
, unsigned long arg
,
234 struct sk_buff
*skb
, struct tcmsg
*tcm
)
236 struct drr_class
*cl
= (struct drr_class
*)arg
;
239 tcm
->tcm_parent
= TC_H_ROOT
;
240 tcm
->tcm_handle
= cl
->common
.classid
;
241 tcm
->tcm_info
= cl
->qdisc
->handle
;
243 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
245 goto nla_put_failure
;
246 if (nla_put_u32(skb
, TCA_DRR_QUANTUM
, cl
->quantum
))
247 goto nla_put_failure
;
248 return nla_nest_end(skb
, nest
);
251 nla_nest_cancel(skb
, nest
);
255 static int drr_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
258 struct drr_class
*cl
= (struct drr_class
*)arg
;
259 __u32 qlen
= cl
->qdisc
->q
.qlen
;
260 struct tc_drr_stats xstats
;
262 memset(&xstats
, 0, sizeof(xstats
));
264 xstats
.deficit
= cl
->deficit
;
266 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch
),
267 d
, NULL
, &cl
->bstats
) < 0 ||
268 gnet_stats_copy_rate_est(d
, &cl
->rate_est
) < 0 ||
269 gnet_stats_copy_queue(d
, NULL
, &cl
->qdisc
->qstats
, qlen
) < 0)
272 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
275 static void drr_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
277 struct drr_sched
*q
= qdisc_priv(sch
);
278 struct drr_class
*cl
;
284 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
285 hlist_for_each_entry(cl
, &q
->clhash
.hash
[i
], common
.hnode
) {
286 if (arg
->count
< arg
->skip
) {
290 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
299 static struct drr_class
*drr_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
302 struct drr_sched
*q
= qdisc_priv(sch
);
303 struct drr_class
*cl
;
304 struct tcf_result res
;
305 struct tcf_proto
*fl
;
308 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0) {
309 cl
= drr_find_class(sch
, skb
->priority
);
314 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
315 fl
= rcu_dereference_bh(q
->filter_list
);
316 result
= tcf_classify(skb
, fl
, &res
, false);
318 #ifdef CONFIG_NET_CLS_ACT
323 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
329 cl
= (struct drr_class
*)res
.class;
331 cl
= drr_find_class(sch
, res
.classid
);
337 static int drr_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
338 struct sk_buff
**to_free
)
340 struct drr_sched
*q
= qdisc_priv(sch
);
341 struct drr_class
*cl
;
344 cl
= drr_classify(skb
, sch
, &err
);
346 if (err
& __NET_XMIT_BYPASS
)
347 qdisc_qstats_drop(sch
);
348 __qdisc_drop(skb
, to_free
);
352 err
= qdisc_enqueue(skb
, cl
->qdisc
, to_free
);
353 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
354 if (net_xmit_drop_count(err
)) {
356 qdisc_qstats_drop(sch
);
361 if (cl
->qdisc
->q
.qlen
== 1) {
362 list_add_tail(&cl
->alist
, &q
->active
);
363 cl
->deficit
= cl
->quantum
;
366 qdisc_qstats_backlog_inc(sch
, skb
);
371 static struct sk_buff
*drr_dequeue(struct Qdisc
*sch
)
373 struct drr_sched
*q
= qdisc_priv(sch
);
374 struct drr_class
*cl
;
378 if (list_empty(&q
->active
))
381 cl
= list_first_entry(&q
->active
, struct drr_class
, alist
);
382 skb
= cl
->qdisc
->ops
->peek(cl
->qdisc
);
384 qdisc_warn_nonwc(__func__
, cl
->qdisc
);
388 len
= qdisc_pkt_len(skb
);
389 if (len
<= cl
->deficit
) {
391 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
392 if (unlikely(skb
== NULL
))
394 if (cl
->qdisc
->q
.qlen
== 0)
395 list_del(&cl
->alist
);
397 bstats_update(&cl
->bstats
, skb
);
398 qdisc_bstats_update(sch
, skb
);
399 qdisc_qstats_backlog_dec(sch
, skb
);
404 cl
->deficit
+= cl
->quantum
;
405 list_move_tail(&cl
->alist
, &q
->active
);
411 static int drr_init_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
413 struct drr_sched
*q
= qdisc_priv(sch
);
416 err
= tcf_block_get(&q
->block
, &q
->filter_list
, sch
);
419 err
= qdisc_class_hash_init(&q
->clhash
);
422 INIT_LIST_HEAD(&q
->active
);
426 static void drr_reset_qdisc(struct Qdisc
*sch
)
428 struct drr_sched
*q
= qdisc_priv(sch
);
429 struct drr_class
*cl
;
432 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
433 hlist_for_each_entry(cl
, &q
->clhash
.hash
[i
], common
.hnode
) {
434 if (cl
->qdisc
->q
.qlen
)
435 list_del(&cl
->alist
);
436 qdisc_reset(cl
->qdisc
);
439 sch
->qstats
.backlog
= 0;
443 static void drr_destroy_qdisc(struct Qdisc
*sch
)
445 struct drr_sched
*q
= qdisc_priv(sch
);
446 struct drr_class
*cl
;
447 struct hlist_node
*next
;
450 tcf_block_put(q
->block
);
452 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
453 hlist_for_each_entry_safe(cl
, next
, &q
->clhash
.hash
[i
],
455 drr_destroy_class(sch
, cl
);
457 qdisc_class_hash_destroy(&q
->clhash
);
460 static const struct Qdisc_class_ops drr_class_ops
= {
461 .change
= drr_change_class
,
462 .delete = drr_delete_class
,
463 .find
= drr_search_class
,
464 .tcf_block
= drr_tcf_block
,
465 .bind_tcf
= drr_bind_tcf
,
466 .unbind_tcf
= drr_unbind_tcf
,
467 .graft
= drr_graft_class
,
468 .leaf
= drr_class_leaf
,
469 .qlen_notify
= drr_qlen_notify
,
470 .dump
= drr_dump_class
,
471 .dump_stats
= drr_dump_class_stats
,
475 static struct Qdisc_ops drr_qdisc_ops __read_mostly
= {
476 .cl_ops
= &drr_class_ops
,
478 .priv_size
= sizeof(struct drr_sched
),
479 .enqueue
= drr_enqueue
,
480 .dequeue
= drr_dequeue
,
481 .peek
= qdisc_peek_dequeued
,
482 .init
= drr_init_qdisc
,
483 .reset
= drr_reset_qdisc
,
484 .destroy
= drr_destroy_qdisc
,
485 .owner
= THIS_MODULE
,
488 static int __init
drr_init(void)
490 return register_qdisc(&drr_qdisc_ops
);
493 static void __exit
drr_exit(void)
495 unregister_qdisc(&drr_qdisc_ops
);
498 module_init(drr_init
);
499 module_exit(drr_exit
);
500 MODULE_LICENSE("GPL");