2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/inet_ecn.h>
27 /* Parameters, settable by user:
28 -----------------------------
30 limit - bytes (must be > qth_max + burst)
32 Hard limit on queue length, should be chosen >qth_max
33 to allow packet bursts. This parameter does not
34 affect the algorithms behaviour and can be chosen
35 arbitrarily high (well, less than ram size)
36 Really, this limit will never be reached
37 if RED works correctly.
40 struct red_sched_data
{
41 u32 limit
; /* HARD maximal queue length */
43 struct timer_list adapt_timer
;
45 struct red_parms parms
;
47 struct red_stats stats
;
51 static inline int red_use_ecn(struct red_sched_data
*q
)
53 return q
->flags
& TC_RED_ECN
;
56 static inline int red_use_harddrop(struct red_sched_data
*q
)
58 return q
->flags
& TC_RED_HARDDROP
;
61 static int red_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
62 struct sk_buff
**to_free
)
64 struct red_sched_data
*q
= qdisc_priv(sch
);
65 struct Qdisc
*child
= q
->qdisc
;
68 q
->vars
.qavg
= red_calc_qavg(&q
->parms
,
70 child
->qstats
.backlog
);
72 if (red_is_idling(&q
->vars
))
73 red_end_of_idle_period(&q
->vars
);
75 switch (red_action(&q
->parms
, &q
->vars
, q
->vars
.qavg
)) {
80 qdisc_qstats_overlimit(sch
);
81 if (!red_use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
90 qdisc_qstats_overlimit(sch
);
91 if (red_use_harddrop(q
) || !red_use_ecn(q
) ||
92 !INET_ECN_set_ce(skb
)) {
93 q
->stats
.forced_drop
++;
97 q
->stats
.forced_mark
++;
101 ret
= qdisc_enqueue(skb
, child
, to_free
);
102 if (likely(ret
== NET_XMIT_SUCCESS
)) {
103 qdisc_qstats_backlog_inc(sch
, skb
);
105 } else if (net_xmit_drop_count(ret
)) {
107 qdisc_qstats_drop(sch
);
112 qdisc_drop(skb
, sch
, to_free
);
116 static struct sk_buff
*red_dequeue(struct Qdisc
*sch
)
119 struct red_sched_data
*q
= qdisc_priv(sch
);
120 struct Qdisc
*child
= q
->qdisc
;
122 skb
= child
->dequeue(child
);
124 qdisc_bstats_update(sch
, skb
);
125 qdisc_qstats_backlog_dec(sch
, skb
);
128 if (!red_is_idling(&q
->vars
))
129 red_start_of_idle_period(&q
->vars
);
134 static struct sk_buff
*red_peek(struct Qdisc
*sch
)
136 struct red_sched_data
*q
= qdisc_priv(sch
);
137 struct Qdisc
*child
= q
->qdisc
;
139 return child
->ops
->peek(child
);
142 static void red_reset(struct Qdisc
*sch
)
144 struct red_sched_data
*q
= qdisc_priv(sch
);
146 qdisc_reset(q
->qdisc
);
147 sch
->qstats
.backlog
= 0;
149 red_restart(&q
->vars
);
152 static int red_offload(struct Qdisc
*sch
, bool enable
)
154 struct red_sched_data
*q
= qdisc_priv(sch
);
155 struct net_device
*dev
= qdisc_dev(sch
);
156 struct tc_red_qopt_offload opt
= {
157 .handle
= sch
->handle
,
158 .parent
= sch
->parent
,
161 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
165 opt
.command
= TC_RED_REPLACE
;
166 opt
.set
.min
= q
->parms
.qth_min
>> q
->parms
.Wlog
;
167 opt
.set
.max
= q
->parms
.qth_max
>> q
->parms
.Wlog
;
168 opt
.set
.probability
= q
->parms
.max_P
;
169 opt
.set
.is_ecn
= red_use_ecn(q
);
171 opt
.command
= TC_RED_DESTROY
;
174 return dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_RED
, &opt
);
177 static void red_destroy(struct Qdisc
*sch
)
179 struct red_sched_data
*q
= qdisc_priv(sch
);
181 del_timer_sync(&q
->adapt_timer
);
182 red_offload(sch
, false);
183 qdisc_destroy(q
->qdisc
);
186 static const struct nla_policy red_policy
[TCA_RED_MAX
+ 1] = {
187 [TCA_RED_PARMS
] = { .len
= sizeof(struct tc_red_qopt
) },
188 [TCA_RED_STAB
] = { .len
= RED_STAB_SIZE
},
189 [TCA_RED_MAX_P
] = { .type
= NLA_U32
},
192 static int red_change(struct Qdisc
*sch
, struct nlattr
*opt
)
194 struct red_sched_data
*q
= qdisc_priv(sch
);
195 struct nlattr
*tb
[TCA_RED_MAX
+ 1];
196 struct tc_red_qopt
*ctl
;
197 struct Qdisc
*child
= NULL
;
204 err
= nla_parse_nested(tb
, TCA_RED_MAX
, opt
, red_policy
, NULL
);
208 if (tb
[TCA_RED_PARMS
] == NULL
||
209 tb
[TCA_RED_STAB
] == NULL
)
212 max_P
= tb
[TCA_RED_MAX_P
] ? nla_get_u32(tb
[TCA_RED_MAX_P
]) : 0;
214 ctl
= nla_data(tb
[TCA_RED_PARMS
]);
215 if (!red_check_params(ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
))
218 if (ctl
->limit
> 0) {
219 child
= fifo_create_dflt(sch
, &bfifo_qdisc_ops
, ctl
->limit
);
221 return PTR_ERR(child
);
224 if (child
!= &noop_qdisc
)
225 qdisc_hash_add(child
, true);
227 q
->flags
= ctl
->flags
;
228 q
->limit
= ctl
->limit
;
230 qdisc_tree_reduce_backlog(q
->qdisc
, q
->qdisc
->q
.qlen
,
231 q
->qdisc
->qstats
.backlog
);
232 qdisc_destroy(q
->qdisc
);
236 red_set_parms(&q
->parms
,
237 ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
238 ctl
->Plog
, ctl
->Scell_log
,
239 nla_data(tb
[TCA_RED_STAB
]),
241 red_set_vars(&q
->vars
);
243 del_timer(&q
->adapt_timer
);
244 if (ctl
->flags
& TC_RED_ADAPTATIVE
)
245 mod_timer(&q
->adapt_timer
, jiffies
+ HZ
/2);
247 if (!q
->qdisc
->q
.qlen
)
248 red_start_of_idle_period(&q
->vars
);
250 sch_tree_unlock(sch
);
251 red_offload(sch
, true);
255 static inline void red_adaptative_timer(struct timer_list
*t
)
257 struct red_sched_data
*q
= from_timer(q
, t
, adapt_timer
);
258 struct Qdisc
*sch
= q
->sch
;
259 spinlock_t
*root_lock
= qdisc_lock(qdisc_root_sleeping(sch
));
261 spin_lock(root_lock
);
262 red_adaptative_algo(&q
->parms
, &q
->vars
);
263 mod_timer(&q
->adapt_timer
, jiffies
+ HZ
/2);
264 spin_unlock(root_lock
);
267 static int red_init(struct Qdisc
*sch
, struct nlattr
*opt
)
269 struct red_sched_data
*q
= qdisc_priv(sch
);
271 q
->qdisc
= &noop_qdisc
;
273 timer_setup(&q
->adapt_timer
, red_adaptative_timer
, 0);
274 return red_change(sch
, opt
);
277 static int red_dump_offload_stats(struct Qdisc
*sch
, struct tc_red_qopt
*opt
)
279 struct net_device
*dev
= qdisc_dev(sch
);
280 struct tc_red_qopt_offload hw_stats
= {
281 .command
= TC_RED_STATS
,
282 .handle
= sch
->handle
,
283 .parent
= sch
->parent
,
285 .stats
.bstats
= &sch
->bstats
,
286 .stats
.qstats
= &sch
->qstats
,
291 sch
->flags
&= ~TCQ_F_OFFLOADED
;
293 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
296 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_RED
,
298 if (err
== -EOPNOTSUPP
)
302 sch
->flags
|= TCQ_F_OFFLOADED
;
307 static int red_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
309 struct red_sched_data
*q
= qdisc_priv(sch
);
310 struct nlattr
*opts
= NULL
;
311 struct tc_red_qopt opt
= {
314 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
315 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
316 .Wlog
= q
->parms
.Wlog
,
317 .Plog
= q
->parms
.Plog
,
318 .Scell_log
= q
->parms
.Scell_log
,
322 sch
->qstats
.backlog
= q
->qdisc
->qstats
.backlog
;
323 err
= red_dump_offload_stats(sch
, &opt
);
325 goto nla_put_failure
;
327 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
329 goto nla_put_failure
;
330 if (nla_put(skb
, TCA_RED_PARMS
, sizeof(opt
), &opt
) ||
331 nla_put_u32(skb
, TCA_RED_MAX_P
, q
->parms
.max_P
))
332 goto nla_put_failure
;
333 return nla_nest_end(skb
, opts
);
336 nla_nest_cancel(skb
, opts
);
340 static int red_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
342 struct red_sched_data
*q
= qdisc_priv(sch
);
343 struct net_device
*dev
= qdisc_dev(sch
);
344 struct tc_red_xstats st
= {
345 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
346 .pdrop
= q
->stats
.pdrop
,
347 .other
= q
->stats
.other
,
348 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
351 if (sch
->flags
& TCQ_F_OFFLOADED
) {
352 struct red_stats hw_stats
= {0};
353 struct tc_red_qopt_offload hw_stats_request
= {
354 .command
= TC_RED_XSTATS
,
355 .handle
= sch
->handle
,
356 .parent
= sch
->parent
,
361 if (!dev
->netdev_ops
->ndo_setup_tc(dev
,
363 &hw_stats_request
)) {
364 st
.early
+= hw_stats
.prob_drop
+ hw_stats
.forced_drop
;
365 st
.pdrop
+= hw_stats
.pdrop
;
366 st
.other
+= hw_stats
.other
;
367 st
.marked
+= hw_stats
.prob_mark
+ hw_stats
.forced_mark
;
371 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
374 static int red_dump_class(struct Qdisc
*sch
, unsigned long cl
,
375 struct sk_buff
*skb
, struct tcmsg
*tcm
)
377 struct red_sched_data
*q
= qdisc_priv(sch
);
379 tcm
->tcm_handle
|= TC_H_MIN(1);
380 tcm
->tcm_info
= q
->qdisc
->handle
;
384 static int red_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
387 struct red_sched_data
*q
= qdisc_priv(sch
);
392 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
396 static struct Qdisc
*red_leaf(struct Qdisc
*sch
, unsigned long arg
)
398 struct red_sched_data
*q
= qdisc_priv(sch
);
402 static unsigned long red_find(struct Qdisc
*sch
, u32 classid
)
407 static void red_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
410 if (walker
->count
>= walker
->skip
)
411 if (walker
->fn(sch
, 1, walker
) < 0) {
419 static const struct Qdisc_class_ops red_class_ops
= {
424 .dump
= red_dump_class
,
427 static struct Qdisc_ops red_qdisc_ops __read_mostly
= {
429 .priv_size
= sizeof(struct red_sched_data
),
430 .cl_ops
= &red_class_ops
,
431 .enqueue
= red_enqueue
,
432 .dequeue
= red_dequeue
,
436 .destroy
= red_destroy
,
437 .change
= red_change
,
439 .dump_stats
= red_dump_stats
,
440 .owner
= THIS_MODULE
,
443 static int __init
red_module_init(void)
445 return register_qdisc(&red_qdisc_ops
);
448 static void __exit
red_module_exit(void)
450 unregister_qdisc(&red_qdisc_ops
);
453 module_init(red_module_init
)
454 module_exit(red_module_exit
)
456 MODULE_LICENSE("GPL");