]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/sched/sch_drr.c
net: sched: move tc_classify function to cls_api.c
[mirror_ubuntu-artful-kernel.git] / net / sched / sch_drr.c
CommitLineData
13d2a1d2
PM
1/*
2 * net/sched/sch_drr.c Deficit Round Robin scheduler
3 *
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
5a0e3ad6 12#include <linux/slab.h>
13d2a1d2
PM
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/netdevice.h>
16#include <linux/pkt_sched.h>
17#include <net/sch_generic.h>
18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
20
21struct drr_class {
22 struct Qdisc_class_common common;
23 unsigned int refcnt;
24 unsigned int filter_cnt;
25
c1a8f1f1 26 struct gnet_stats_basic_packed bstats;
13d2a1d2 27 struct gnet_stats_queue qstats;
1c0d32fd 28 struct net_rate_estimator __rcu *rate_est;
13d2a1d2
PM
29 struct list_head alist;
30 struct Qdisc *qdisc;
31
32 u32 quantum;
33 u32 deficit;
34};
35
36struct drr_sched {
37 struct list_head active;
25d8c0d5 38 struct tcf_proto __rcu *filter_list;
13d2a1d2
PM
39 struct Qdisc_class_hash clhash;
40};
41
42static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
43{
44 struct drr_sched *q = qdisc_priv(sch);
45 struct Qdisc_class_common *clc;
46
47 clc = qdisc_class_find(&q->clhash, classid);
48 if (clc == NULL)
49 return NULL;
50 return container_of(clc, struct drr_class, common);
51}
52
53static void drr_purge_queue(struct drr_class *cl)
54{
55 unsigned int len = cl->qdisc->q.qlen;
2ccccf5f 56 unsigned int backlog = cl->qdisc->qstats.backlog;
13d2a1d2
PM
57
58 qdisc_reset(cl->qdisc);
2ccccf5f 59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
13d2a1d2
PM
60}
61
62static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
63 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
64};
65
66static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
67 struct nlattr **tca, unsigned long *arg)
68{
69 struct drr_sched *q = qdisc_priv(sch);
70 struct drr_class *cl = (struct drr_class *)*arg;
1844f747 71 struct nlattr *opt = tca[TCA_OPTIONS];
13d2a1d2
PM
72 struct nlattr *tb[TCA_DRR_MAX + 1];
73 u32 quantum;
74 int err;
75
1844f747
JP
76 if (!opt)
77 return -EINVAL;
78
fceb6435 79 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
13d2a1d2
PM
80 if (err < 0)
81 return err;
82
83 if (tb[TCA_DRR_QUANTUM]) {
84 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
85 if (quantum == 0)
86 return -EINVAL;
87 } else
88 quantum = psched_mtu(qdisc_dev(sch));
89
90 if (cl != NULL) {
71bcb09a 91 if (tca[TCA_RATE]) {
22e0f8b9
JF
92 err = gen_replace_estimator(&cl->bstats, NULL,
93 &cl->rate_est,
edb09eb1
ED
94 NULL,
95 qdisc_root_sleeping_running(sch),
71bcb09a
SH
96 tca[TCA_RATE]);
97 if (err)
98 return err;
99 }
100
13d2a1d2
PM
101 sch_tree_lock(sch);
102 if (tb[TCA_DRR_QUANTUM])
103 cl->quantum = quantum;
104 sch_tree_unlock(sch);
105
13d2a1d2
PM
106 return 0;
107 }
108
109 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
110 if (cl == NULL)
111 return -ENOBUFS;
112
113 cl->refcnt = 1;
114 cl->common.classid = classid;
115 cl->quantum = quantum;
3511c913 116 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
13d2a1d2
PM
117 &pfifo_qdisc_ops, classid);
118 if (cl->qdisc == NULL)
119 cl->qdisc = &noop_qdisc;
49b49971
JK
120 else
121 qdisc_hash_add(cl->qdisc, true);
13d2a1d2 122
71bcb09a 123 if (tca[TCA_RATE]) {
22e0f8b9 124 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
edb09eb1
ED
125 NULL,
126 qdisc_root_sleeping_running(sch),
71bcb09a
SH
127 tca[TCA_RATE]);
128 if (err) {
129 qdisc_destroy(cl->qdisc);
130 kfree(cl);
131 return err;
132 }
133 }
13d2a1d2
PM
134
135 sch_tree_lock(sch);
136 qdisc_class_hash_insert(&q->clhash, &cl->common);
137 sch_tree_unlock(sch);
138
139 qdisc_class_hash_grow(sch, &q->clhash);
140
141 *arg = (unsigned long)cl;
142 return 0;
143}
144
145static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
146{
1c0d32fd 147 gen_kill_estimator(&cl->rate_est);
13d2a1d2
PM
148 qdisc_destroy(cl->qdisc);
149 kfree(cl);
150}
151
152static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
153{
154 struct drr_sched *q = qdisc_priv(sch);
155 struct drr_class *cl = (struct drr_class *)arg;
156
157 if (cl->filter_cnt > 0)
158 return -EBUSY;
159
160 sch_tree_lock(sch);
161
162 drr_purge_queue(cl);
163 qdisc_class_hash_remove(&q->clhash, &cl->common);
164
7cd0a638
JP
165 BUG_ON(--cl->refcnt == 0);
166 /*
167 * This shouldn't happen: we "hold" one cops->get() when called
168 * from tc_ctl_tclass; the destroy method is done from cops->put().
169 */
13d2a1d2
PM
170
171 sch_tree_unlock(sch);
172 return 0;
173}
174
175static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
176{
177 struct drr_class *cl = drr_find_class(sch, classid);
178
179 if (cl != NULL)
180 cl->refcnt++;
181
182 return (unsigned long)cl;
183}
184
185static void drr_put_class(struct Qdisc *sch, unsigned long arg)
186{
187 struct drr_class *cl = (struct drr_class *)arg;
188
189 if (--cl->refcnt == 0)
190 drr_destroy_class(sch, cl);
191}
192
25d8c0d5
JF
193static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
194 unsigned long cl)
13d2a1d2
PM
195{
196 struct drr_sched *q = qdisc_priv(sch);
197
198 if (cl)
199 return NULL;
200
201 return &q->filter_list;
202}
203
204static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
205 u32 classid)
206{
207 struct drr_class *cl = drr_find_class(sch, classid);
208
209 if (cl != NULL)
210 cl->filter_cnt++;
211
212 return (unsigned long)cl;
213}
214
215static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
216{
217 struct drr_class *cl = (struct drr_class *)arg;
218
219 cl->filter_cnt--;
220}
221
222static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
223 struct Qdisc *new, struct Qdisc **old)
224{
225 struct drr_class *cl = (struct drr_class *)arg;
226
227 if (new == NULL) {
3511c913 228 new = qdisc_create_dflt(sch->dev_queue,
13d2a1d2
PM
229 &pfifo_qdisc_ops, cl->common.classid);
230 if (new == NULL)
231 new = &noop_qdisc;
232 }
233
86a7996c 234 *old = qdisc_replace(sch, new, &cl->qdisc);
13d2a1d2
PM
235 return 0;
236}
237
238static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
239{
240 struct drr_class *cl = (struct drr_class *)arg;
241
242 return cl->qdisc;
243}
244
245static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
246{
247 struct drr_class *cl = (struct drr_class *)arg;
248
249 if (cl->qdisc->q.qlen == 0)
250 list_del(&cl->alist);
251}
252
253static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
254 struct sk_buff *skb, struct tcmsg *tcm)
255{
256 struct drr_class *cl = (struct drr_class *)arg;
257 struct nlattr *nest;
258
259 tcm->tcm_parent = TC_H_ROOT;
260 tcm->tcm_handle = cl->common.classid;
261 tcm->tcm_info = cl->qdisc->handle;
262
263 nest = nla_nest_start(skb, TCA_OPTIONS);
264 if (nest == NULL)
265 goto nla_put_failure;
1b34ec43
DM
266 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
267 goto nla_put_failure;
13d2a1d2
PM
268 return nla_nest_end(skb, nest);
269
270nla_put_failure:
271 nla_nest_cancel(skb, nest);
272 return -EMSGSIZE;
273}
274
275static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
276 struct gnet_dump *d)
277{
278 struct drr_class *cl = (struct drr_class *)arg;
64015853 279 __u32 qlen = cl->qdisc->q.qlen;
13d2a1d2
PM
280 struct tc_drr_stats xstats;
281
282 memset(&xstats, 0, sizeof(xstats));
64015853 283 if (qlen)
13d2a1d2
PM
284 xstats.deficit = cl->deficit;
285
edb09eb1
ED
286 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
287 d, NULL, &cl->bstats) < 0 ||
1c0d32fd 288 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
b0ab6f92 289 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
13d2a1d2
PM
290 return -1;
291
292 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
293}
294
295static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
296{
297 struct drr_sched *q = qdisc_priv(sch);
298 struct drr_class *cl;
13d2a1d2
PM
299 unsigned int i;
300
301 if (arg->stop)
302 return;
303
304 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 305 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
13d2a1d2
PM
306 if (arg->count < arg->skip) {
307 arg->count++;
308 continue;
309 }
310 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
311 arg->stop = 1;
312 return;
313 }
314 arg->count++;
315 }
316 }
317}
318
319static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
320 int *qerr)
321{
322 struct drr_sched *q = qdisc_priv(sch);
323 struct drr_class *cl;
324 struct tcf_result res;
25d8c0d5 325 struct tcf_proto *fl;
13d2a1d2
PM
326 int result;
327
328 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
329 cl = drr_find_class(sch, skb->priority);
330 if (cl != NULL)
331 return cl;
332 }
333
334 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
25d8c0d5 335 fl = rcu_dereference_bh(q->filter_list);
87d83093 336 result = tcf_classify(skb, fl, &res, false);
13d2a1d2
PM
337 if (result >= 0) {
338#ifdef CONFIG_NET_CLS_ACT
339 switch (result) {
340 case TC_ACT_QUEUED:
341 case TC_ACT_STOLEN:
342 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
343 case TC_ACT_SHOT:
344 return NULL;
345 }
346#endif
347 cl = (struct drr_class *)res.class;
348 if (cl == NULL)
349 cl = drr_find_class(sch, res.classid);
350 return cl;
351 }
352 return NULL;
353}
354
520ac30f
ED
355static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
356 struct sk_buff **to_free)
13d2a1d2
PM
357{
358 struct drr_sched *q = qdisc_priv(sch);
359 struct drr_class *cl;
f54ba779 360 int err = 0;
13d2a1d2
PM
361
362 cl = drr_classify(skb, sch, &err);
363 if (cl == NULL) {
364 if (err & __NET_XMIT_BYPASS)
25331d6c 365 qdisc_qstats_drop(sch);
520ac30f 366 __qdisc_drop(skb, to_free);
13d2a1d2
PM
367 return err;
368 }
369
520ac30f 370 err = qdisc_enqueue(skb, cl->qdisc, to_free);
13d2a1d2
PM
371 if (unlikely(err != NET_XMIT_SUCCESS)) {
372 if (net_xmit_drop_count(err)) {
373 cl->qstats.drops++;
25331d6c 374 qdisc_qstats_drop(sch);
13d2a1d2
PM
375 }
376 return err;
377 }
378
379 if (cl->qdisc->q.qlen == 1) {
380 list_add_tail(&cl->alist, &q->active);
381 cl->deficit = cl->quantum;
382 }
383
6a73b571 384 qdisc_qstats_backlog_inc(sch, skb);
13d2a1d2
PM
385 sch->q.qlen++;
386 return err;
387}
388
389static struct sk_buff *drr_dequeue(struct Qdisc *sch)
390{
391 struct drr_sched *q = qdisc_priv(sch);
392 struct drr_class *cl;
393 struct sk_buff *skb;
394 unsigned int len;
395
3f0947c3
PM
396 if (list_empty(&q->active))
397 goto out;
398 while (1) {
13d2a1d2
PM
399 cl = list_first_entry(&q->active, struct drr_class, alist);
400 skb = cl->qdisc->ops->peek(cl->qdisc);
6e765a00
FW
401 if (skb == NULL) {
402 qdisc_warn_nonwc(__func__, cl->qdisc);
3f0947c3 403 goto out;
6e765a00 404 }
13d2a1d2
PM
405
406 len = qdisc_pkt_len(skb);
407 if (len <= cl->deficit) {
408 cl->deficit -= len;
409 skb = qdisc_dequeue_peeked(cl->qdisc);
df3eb6cd
BH
410 if (unlikely(skb == NULL))
411 goto out;
13d2a1d2
PM
412 if (cl->qdisc->q.qlen == 0)
413 list_del(&cl->alist);
2dd875ff
ED
414
415 bstats_update(&cl->bstats, skb);
9190b3b3 416 qdisc_bstats_update(sch, skb);
6a73b571 417 qdisc_qstats_backlog_dec(sch, skb);
13d2a1d2
PM
418 sch->q.qlen--;
419 return skb;
420 }
421
422 cl->deficit += cl->quantum;
13d2a1d2
PM
423 list_move_tail(&cl->alist, &q->active);
424 }
3f0947c3 425out:
13d2a1d2
PM
426 return NULL;
427}
428
13d2a1d2
PM
429static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
430{
431 struct drr_sched *q = qdisc_priv(sch);
432 int err;
433
434 err = qdisc_class_hash_init(&q->clhash);
435 if (err < 0)
436 return err;
437 INIT_LIST_HEAD(&q->active);
438 return 0;
439}
440
441static void drr_reset_qdisc(struct Qdisc *sch)
442{
443 struct drr_sched *q = qdisc_priv(sch);
444 struct drr_class *cl;
13d2a1d2
PM
445 unsigned int i;
446
447 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 448 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
13d2a1d2
PM
449 if (cl->qdisc->q.qlen)
450 list_del(&cl->alist);
451 qdisc_reset(cl->qdisc);
452 }
453 }
6a73b571 454 sch->qstats.backlog = 0;
13d2a1d2
PM
455 sch->q.qlen = 0;
456}
457
458static void drr_destroy_qdisc(struct Qdisc *sch)
459{
460 struct drr_sched *q = qdisc_priv(sch);
461 struct drr_class *cl;
b67bfe0d 462 struct hlist_node *next;
13d2a1d2
PM
463 unsigned int i;
464
465 tcf_destroy_chain(&q->filter_list);
466
467 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 468 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
13d2a1d2
PM
469 common.hnode)
470 drr_destroy_class(sch, cl);
471 }
472 qdisc_class_hash_destroy(&q->clhash);
473}
474
475static const struct Qdisc_class_ops drr_class_ops = {
476 .change = drr_change_class,
477 .delete = drr_delete_class,
478 .get = drr_get_class,
479 .put = drr_put_class,
480 .tcf_chain = drr_tcf_chain,
481 .bind_tcf = drr_bind_tcf,
482 .unbind_tcf = drr_unbind_tcf,
483 .graft = drr_graft_class,
484 .leaf = drr_class_leaf,
485 .qlen_notify = drr_qlen_notify,
486 .dump = drr_dump_class,
487 .dump_stats = drr_dump_class_stats,
488 .walk = drr_walk,
489};
490
491static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
492 .cl_ops = &drr_class_ops,
493 .id = "drr",
494 .priv_size = sizeof(struct drr_sched),
495 .enqueue = drr_enqueue,
496 .dequeue = drr_dequeue,
497 .peek = qdisc_peek_dequeued,
13d2a1d2
PM
498 .init = drr_init_qdisc,
499 .reset = drr_reset_qdisc,
500 .destroy = drr_destroy_qdisc,
501 .owner = THIS_MODULE,
502};
503
504static int __init drr_init(void)
505{
506 return register_qdisc(&drr_qdisc_ops);
507}
508
509static void __exit drr_exit(void)
510{
511 unregister_qdisc(&drr_qdisc_ops);
512}
513
514module_init(drr_init);
515module_exit(drr_exit);
516MODULE_LICENSE("GPL");