]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/sched/sch_red.c
net_sched: remove tc class reference counting
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
dba051f3 12 * J Hadi Salim 980914: computation fixes
1da177e4 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 14 * J Hadi Salim 980816: ECN support
1da177e4
LT
15 */
16
1da177e4 17#include <linux/module.h>
1da177e4
LT
18#include <linux/types.h>
19#include <linux/kernel.h>
1da177e4 20#include <linux/skbuff.h>
1da177e4
LT
21#include <net/pkt_sched.h>
22#include <net/inet_ecn.h>
6b31b28a 23#include <net/red.h>
1da177e4
LT
24
25
6b31b28a 26/* Parameters, settable by user:
1da177e4
LT
27 -----------------------------
28
29 limit - bytes (must be > qth_max + burst)
30
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
1da177e4
LT
37 */
38
cc7ec456 39struct red_sched_data {
6b31b28a
TG
40 u32 limit; /* HARD maximal queue length */
41 unsigned char flags;
8af2a218 42 struct timer_list adapt_timer;
6b31b28a 43 struct red_parms parms;
eeca6688 44 struct red_vars vars;
6b31b28a 45 struct red_stats stats;
f38c39d6 46 struct Qdisc *qdisc;
1da177e4
LT
47};
48
6b31b28a 49static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 50{
6b31b28a 51 return q->flags & TC_RED_ECN;
1da177e4
LT
52}
53
bdc450a0
TG
54static inline int red_use_harddrop(struct red_sched_data *q)
55{
56 return q->flags & TC_RED_HARDDROP;
57}
58
520ac30f
ED
59static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
60 struct sk_buff **to_free)
1da177e4
LT
61{
62 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6
PM
63 struct Qdisc *child = q->qdisc;
64 int ret;
1da177e4 65
eeca6688
ED
66 q->vars.qavg = red_calc_qavg(&q->parms,
67 &q->vars,
68 child->qstats.backlog);
1da177e4 69
eeca6688
ED
70 if (red_is_idling(&q->vars))
71 red_end_of_idle_period(&q->vars);
1da177e4 72
eeca6688 73 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
cc7ec456
ED
74 case RED_DONT_MARK:
75 break;
76
77 case RED_PROB_MARK:
25331d6c 78 qdisc_qstats_overlimit(sch);
cc7ec456
ED
79 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
80 q->stats.prob_drop++;
81 goto congestion_drop;
82 }
83
84 q->stats.prob_mark++;
85 break;
86
87 case RED_HARD_MARK:
25331d6c 88 qdisc_qstats_overlimit(sch);
cc7ec456
ED
89 if (red_use_harddrop(q) || !red_use_ecn(q) ||
90 !INET_ECN_set_ce(skb)) {
91 q->stats.forced_drop++;
92 goto congestion_drop;
93 }
94
95 q->stats.forced_mark++;
96 break;
1da177e4
LT
97 }
98
520ac30f 99 ret = qdisc_enqueue(skb, child, to_free);
f38c39d6 100 if (likely(ret == NET_XMIT_SUCCESS)) {
d7f4f332 101 qdisc_qstats_backlog_inc(sch, skb);
f38c39d6 102 sch->q.qlen++;
378a2f09 103 } else if (net_xmit_drop_count(ret)) {
f38c39d6 104 q->stats.pdrop++;
25331d6c 105 qdisc_qstats_drop(sch);
f38c39d6
PM
106 }
107 return ret;
6b31b28a
TG
108
109congestion_drop:
520ac30f 110 qdisc_drop(skb, sch, to_free);
1da177e4
LT
111 return NET_XMIT_CN;
112}
113
cc7ec456 114static struct sk_buff *red_dequeue(struct Qdisc *sch)
1da177e4
LT
115{
116 struct sk_buff *skb;
117 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 118 struct Qdisc *child = q->qdisc;
1da177e4 119
f38c39d6 120 skb = child->dequeue(child);
9190b3b3
ED
121 if (skb) {
122 qdisc_bstats_update(sch, skb);
d7f4f332 123 qdisc_qstats_backlog_dec(sch, skb);
f38c39d6 124 sch->q.qlen--;
9190b3b3 125 } else {
eeca6688
ED
126 if (!red_is_idling(&q->vars))
127 red_start_of_idle_period(&q->vars);
9190b3b3 128 }
9e178ff2 129 return skb;
1da177e4
LT
130}
131
cc7ec456 132static struct sk_buff *red_peek(struct Qdisc *sch)
8e3af978
JP
133{
134 struct red_sched_data *q = qdisc_priv(sch);
135 struct Qdisc *child = q->qdisc;
136
137 return child->ops->peek(child);
138}
139
cc7ec456 140static void red_reset(struct Qdisc *sch)
1da177e4
LT
141{
142 struct red_sched_data *q = qdisc_priv(sch);
143
f38c39d6 144 qdisc_reset(q->qdisc);
d7f4f332 145 sch->qstats.backlog = 0;
f38c39d6 146 sch->q.qlen = 0;
eeca6688 147 red_restart(&q->vars);
1da177e4
LT
148}
149
f38c39d6
PM
150static void red_destroy(struct Qdisc *sch)
151{
152 struct red_sched_data *q = qdisc_priv(sch);
8af2a218
ED
153
154 del_timer_sync(&q->adapt_timer);
f38c39d6
PM
155 qdisc_destroy(q->qdisc);
156}
157
27a3421e
PM
158static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
159 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
160 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 161 [TCA_RED_MAX_P] = { .type = NLA_U32 },
27a3421e
PM
162};
163
1e90474c 164static int red_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
165{
166 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 167 struct nlattr *tb[TCA_RED_MAX + 1];
1da177e4 168 struct tc_red_qopt *ctl;
f38c39d6 169 struct Qdisc *child = NULL;
cee63723 170 int err;
a73ed26b 171 u32 max_P;
1da177e4 172
cee63723 173 if (opt == NULL)
dba051f3
TG
174 return -EINVAL;
175
fceb6435 176 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
cee63723
PM
177 if (err < 0)
178 return err;
179
1e90474c 180 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 181 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
182 return -EINVAL;
183
a73ed26b
ED
184 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
185
1e90474c 186 ctl = nla_data(tb[TCA_RED_PARMS]);
1da177e4 187
f38c39d6 188 if (ctl->limit > 0) {
fb0305ce
PM
189 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
190 if (IS_ERR(child))
191 return PTR_ERR(child);
f38c39d6
PM
192 }
193
49b49971
JK
194 if (child != &noop_qdisc)
195 qdisc_hash_add(child, true);
1da177e4
LT
196 sch_tree_lock(sch);
197 q->flags = ctl->flags;
1da177e4 198 q->limit = ctl->limit;
5e50da01 199 if (child) {
2ccccf5f
WC
200 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
201 q->qdisc->qstats.backlog);
b94c8afc
PM
202 qdisc_destroy(q->qdisc);
203 q->qdisc = child;
5e50da01 204 }
1da177e4 205
eeca6688
ED
206 red_set_parms(&q->parms,
207 ctl->qth_min, ctl->qth_max, ctl->Wlog,
a73ed26b
ED
208 ctl->Plog, ctl->Scell_log,
209 nla_data(tb[TCA_RED_STAB]),
210 max_P);
eeca6688 211 red_set_vars(&q->vars);
6b31b28a 212
8af2a218
ED
213 del_timer(&q->adapt_timer);
214 if (ctl->flags & TC_RED_ADAPTATIVE)
215 mod_timer(&q->adapt_timer, jiffies + HZ/2);
216
1ee5fa1e 217 if (!q->qdisc->q.qlen)
eeca6688 218 red_start_of_idle_period(&q->vars);
dba051f3 219
1da177e4
LT
220 sch_tree_unlock(sch);
221 return 0;
222}
223
8af2a218
ED
224static inline void red_adaptative_timer(unsigned long arg)
225{
226 struct Qdisc *sch = (struct Qdisc *)arg;
227 struct red_sched_data *q = qdisc_priv(sch);
228 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
229
230 spin_lock(root_lock);
eeca6688 231 red_adaptative_algo(&q->parms, &q->vars);
8af2a218
ED
232 mod_timer(&q->adapt_timer, jiffies + HZ/2);
233 spin_unlock(root_lock);
234}
235
cc7ec456 236static int red_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4 237{
f38c39d6
PM
238 struct red_sched_data *q = qdisc_priv(sch);
239
240 q->qdisc = &noop_qdisc;
8af2a218 241 setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
1da177e4
LT
242 return red_change(sch, opt);
243}
244
245static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
246{
247 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 248 struct nlattr *opts = NULL;
6b31b28a
TG
249 struct tc_red_qopt opt = {
250 .limit = q->limit,
251 .flags = q->flags,
252 .qth_min = q->parms.qth_min >> q->parms.Wlog,
253 .qth_max = q->parms.qth_max >> q->parms.Wlog,
254 .Wlog = q->parms.Wlog,
255 .Plog = q->parms.Plog,
256 .Scell_log = q->parms.Scell_log,
257 };
1da177e4 258
0dfb33a0 259 sch->qstats.backlog = q->qdisc->qstats.backlog;
1e90474c
PM
260 opts = nla_nest_start(skb, TCA_OPTIONS);
261 if (opts == NULL)
262 goto nla_put_failure;
1b34ec43
DM
263 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
264 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
265 goto nla_put_failure;
1e90474c 266 return nla_nest_end(skb, opts);
1da177e4 267
1e90474c 268nla_put_failure:
bc3ed28c
TG
269 nla_nest_cancel(skb, opts);
270 return -EMSGSIZE;
1da177e4
LT
271}
272
273static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
274{
275 struct red_sched_data *q = qdisc_priv(sch);
6b31b28a
TG
276 struct tc_red_xstats st = {
277 .early = q->stats.prob_drop + q->stats.forced_drop,
278 .pdrop = q->stats.pdrop,
279 .other = q->stats.other,
280 .marked = q->stats.prob_mark + q->stats.forced_mark,
281 };
282
283 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
284}
285
f38c39d6
PM
286static int red_dump_class(struct Qdisc *sch, unsigned long cl,
287 struct sk_buff *skb, struct tcmsg *tcm)
288{
289 struct red_sched_data *q = qdisc_priv(sch);
290
f38c39d6
PM
291 tcm->tcm_handle |= TC_H_MIN(1);
292 tcm->tcm_info = q->qdisc->handle;
293 return 0;
294}
295
296static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
297 struct Qdisc **old)
298{
299 struct red_sched_data *q = qdisc_priv(sch);
300
301 if (new == NULL)
302 new = &noop_qdisc;
303
86a7996c 304 *old = qdisc_replace(sch, new, &q->qdisc);
f38c39d6
PM
305 return 0;
306}
307
308static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
309{
310 struct red_sched_data *q = qdisc_priv(sch);
311 return q->qdisc;
312}
313
143976ce 314static unsigned long red_find(struct Qdisc *sch, u32 classid)
f38c39d6
PM
315{
316 return 1;
317}
318
f38c39d6
PM
319static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
320{
321 if (!walker->stop) {
322 if (walker->count >= walker->skip)
323 if (walker->fn(sch, 1, walker) < 0) {
324 walker->stop = 1;
325 return;
326 }
327 walker->count++;
328 }
329}
330
20fea08b 331static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
332 .graft = red_graft,
333 .leaf = red_leaf,
143976ce 334 .find = red_find,
f38c39d6 335 .walk = red_walk,
f38c39d6
PM
336 .dump = red_dump_class,
337};
338
20fea08b 339static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
340 .id = "red",
341 .priv_size = sizeof(struct red_sched_data),
f38c39d6 342 .cl_ops = &red_class_ops,
1da177e4
LT
343 .enqueue = red_enqueue,
344 .dequeue = red_dequeue,
8e3af978 345 .peek = red_peek,
1da177e4
LT
346 .init = red_init,
347 .reset = red_reset,
f38c39d6 348 .destroy = red_destroy,
1da177e4
LT
349 .change = red_change,
350 .dump = red_dump,
351 .dump_stats = red_dump_stats,
352 .owner = THIS_MODULE,
353};
354
355static int __init red_module_init(void)
356{
357 return register_qdisc(&red_qdisc_ops);
358}
dba051f3
TG
359
360static void __exit red_module_exit(void)
1da177e4
LT
361{
362 unregister_qdisc(&red_qdisc_ops);
363}
dba051f3 364
1da177e4
LT
365module_init(red_module_init)
366module_exit(red_module_exit)
dba051f3 367
1da177e4 368MODULE_LICENSE("GPL");