]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/sched/sch_sfb.c
cls_flower: Fix incorrect idr release when failing to modify rule
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_sfb.c
CommitLineData
e13e02a3
ED
1/*
2 * net/sched/sch_sfb.c Stochastic Fair Blue
3 *
4 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12 * A New Class of Active Queue Management Algorithms.
13 * U. Michigan CSE-TR-387-99, April 1999.
14 *
15 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/skbuff.h>
24#include <linux/random.h>
25#include <linux/jhash.h>
26#include <net/ip.h>
27#include <net/pkt_sched.h>
cf1facda 28#include <net/pkt_cls.h>
e13e02a3
ED
29#include <net/inet_ecn.h>
30
31/*
32 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
33 * This implementation uses L = 8 and N = 16
34 * This permits us to split one 32bit hash (provided per packet by rxhash or
35 * external classifier) into 8 subhashes of 4 bits.
36 */
37#define SFB_BUCKET_SHIFT 4
38#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
39#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
40#define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
41
42/* SFB algo uses a virtual queue, named "bin" */
43struct sfb_bucket {
44 u16 qlen; /* length of virtual queue */
45 u16 p_mark; /* marking probability */
46};
47
48/* We use a double buffering right before hash change
49 * (Section 4.4 of SFB reference : moving hash functions)
50 */
51struct sfb_bins {
52 u32 perturbation; /* jhash perturbation */
53 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
54};
55
56struct sfb_sched_data {
57 struct Qdisc *qdisc;
25d8c0d5 58 struct tcf_proto __rcu *filter_list;
6529eaba 59 struct tcf_block *block;
e13e02a3
ED
60 unsigned long rehash_interval;
61 unsigned long warmup_time; /* double buffering warmup time in jiffies */
62 u32 max;
63 u32 bin_size; /* maximum queue length per bin */
64 u32 increment; /* d1 */
65 u32 decrement; /* d2 */
66 u32 limit; /* HARD maximal queue length */
67 u32 penalty_rate;
68 u32 penalty_burst;
69 u32 tokens_avail;
70 unsigned long rehash_time;
71 unsigned long token_time;
72
73 u8 slot; /* current active bins (0 or 1) */
74 bool double_buffering;
75 struct sfb_bins bins[2];
76
77 struct {
78 u32 earlydrop;
79 u32 penaltydrop;
80 u32 bucketdrop;
81 u32 queuedrop;
82 u32 childdrop; /* drops in child qdisc */
83 u32 marked; /* ECN mark */
84 } stats;
85};
86
87/*
88 * Each queued skb might be hashed on one or two bins
89 * We store in skb_cb the two hash values.
90 * (A zero value means double buffering was not used)
91 */
92struct sfb_skb_cb {
93 u32 hashes[2];
94};
95
96static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
97{
16bda13d 98 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
e13e02a3
ED
99 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
100}
101
102/*
103 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
104 * If using external classifier, hash comes from the classid.
105 */
106static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
107{
108 return sfb_skb_cb(skb)->hashes[slot];
109}
110
111/* Probabilities are coded as Q0.16 fixed-point values,
112 * with 0xFFFF representing 65535/65536 (almost 1.0)
113 * Addition and subtraction are saturating in [0, 65535]
114 */
115static u32 prob_plus(u32 p1, u32 p2)
116{
117 u32 res = p1 + p2;
118
119 return min_t(u32, res, SFB_MAX_PROB);
120}
121
122static u32 prob_minus(u32 p1, u32 p2)
123{
124 return p1 > p2 ? p1 - p2 : 0;
125}
126
127static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
128{
129 int i;
130 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
131
132 for (i = 0; i < SFB_LEVELS; i++) {
133 u32 hash = sfbhash & SFB_BUCKET_MASK;
134
135 sfbhash >>= SFB_BUCKET_SHIFT;
136 if (b[hash].qlen < 0xFFFF)
137 b[hash].qlen++;
138 b += SFB_NUMBUCKETS; /* next level */
139 }
140}
141
142static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
143{
144 u32 sfbhash;
145
146 sfbhash = sfb_hash(skb, 0);
147 if (sfbhash)
148 increment_one_qlen(sfbhash, 0, q);
149
150 sfbhash = sfb_hash(skb, 1);
151 if (sfbhash)
152 increment_one_qlen(sfbhash, 1, q);
153}
154
155static void decrement_one_qlen(u32 sfbhash, u32 slot,
156 struct sfb_sched_data *q)
157{
158 int i;
159 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
160
161 for (i = 0; i < SFB_LEVELS; i++) {
162 u32 hash = sfbhash & SFB_BUCKET_MASK;
163
164 sfbhash >>= SFB_BUCKET_SHIFT;
165 if (b[hash].qlen > 0)
166 b[hash].qlen--;
167 b += SFB_NUMBUCKETS; /* next level */
168 }
169}
170
171static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
172{
173 u32 sfbhash;
174
175 sfbhash = sfb_hash(skb, 0);
176 if (sfbhash)
177 decrement_one_qlen(sfbhash, 0, q);
178
179 sfbhash = sfb_hash(skb, 1);
180 if (sfbhash)
181 decrement_one_qlen(sfbhash, 1, q);
182}
183
184static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
185{
186 b->p_mark = prob_minus(b->p_mark, q->decrement);
187}
188
189static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
190{
191 b->p_mark = prob_plus(b->p_mark, q->increment);
192}
193
194static void sfb_zero_all_buckets(struct sfb_sched_data *q)
195{
196 memset(&q->bins, 0, sizeof(q->bins));
197}
198
199/*
200 * compute max qlen, max p_mark, and avg p_mark
201 */
202static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
203{
204 int i;
205 u32 qlen = 0, prob = 0, totalpm = 0;
206 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
207
208 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
209 if (qlen < b->qlen)
210 qlen = b->qlen;
211 totalpm += b->p_mark;
212 if (prob < b->p_mark)
213 prob = b->p_mark;
214 b++;
215 }
216 *prob_r = prob;
217 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
218 return qlen;
219}
220
221
222static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
223{
63862b5b 224 q->bins[slot].perturbation = prandom_u32();
e13e02a3
ED
225}
226
227static void sfb_swap_slot(struct sfb_sched_data *q)
228{
229 sfb_init_perturbation(q->slot, q);
230 q->slot ^= 1;
231 q->double_buffering = false;
232}
233
234/* Non elastic flows are allowed to use part of the bandwidth, expressed
235 * in "penalty_rate" packets per second, with "penalty_burst" burst
236 */
237static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
238{
239 if (q->penalty_rate == 0 || q->penalty_burst == 0)
240 return true;
241
242 if (q->tokens_avail < 1) {
243 unsigned long age = min(10UL * HZ, jiffies - q->token_time);
244
245 q->tokens_avail = (age * q->penalty_rate) / HZ;
246 if (q->tokens_avail > q->penalty_burst)
247 q->tokens_avail = q->penalty_burst;
248 q->token_time = jiffies;
249 if (q->tokens_avail < 1)
250 return true;
251 }
252
253 q->tokens_avail--;
254 return false;
255}
256
25d8c0d5 257static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
e13e02a3
ED
258 int *qerr, u32 *salt)
259{
260 struct tcf_result res;
261 int result;
262
87d83093 263 result = tcf_classify(skb, fl, &res, false);
e13e02a3
ED
264 if (result >= 0) {
265#ifdef CONFIG_NET_CLS_ACT
266 switch (result) {
267 case TC_ACT_STOLEN:
268 case TC_ACT_QUEUED:
e25ea21f 269 case TC_ACT_TRAP:
e13e02a3 270 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
f3ae608e 271 /* fall through */
e13e02a3
ED
272 case TC_ACT_SHOT:
273 return false;
274 }
275#endif
276 *salt = TC_H_MIN(res.classid);
277 return true;
278 }
279 return false;
280}
281
520ac30f
ED
282static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
283 struct sk_buff **to_free)
e13e02a3
ED
284{
285
286 struct sfb_sched_data *q = qdisc_priv(sch);
287 struct Qdisc *child = q->qdisc;
25d8c0d5 288 struct tcf_proto *fl;
e13e02a3
ED
289 int i;
290 u32 p_min = ~0;
291 u32 minqlen = ~0;
63c0ad4d
TH
292 u32 r, sfbhash;
293 u32 slot = q->slot;
e13e02a3
ED
294 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
295
363437f4 296 if (unlikely(sch->q.qlen >= q->limit)) {
25331d6c 297 qdisc_qstats_overlimit(sch);
363437f4
ED
298 q->stats.queuedrop++;
299 goto drop;
300 }
301
e13e02a3
ED
302 if (q->rehash_interval > 0) {
303 unsigned long limit = q->rehash_time + q->rehash_interval;
304
305 if (unlikely(time_after(jiffies, limit))) {
306 sfb_swap_slot(q);
307 q->rehash_time = jiffies;
308 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
309 time_after(jiffies, limit - q->warmup_time))) {
310 q->double_buffering = true;
311 }
312 }
313
25d8c0d5
JF
314 fl = rcu_dereference_bh(q->filter_list);
315 if (fl) {
63c0ad4d
TH
316 u32 salt;
317
e13e02a3 318 /* If using external classifiers, get result and record it. */
25d8c0d5 319 if (!sfb_classify(skb, fl, &ret, &salt))
e13e02a3 320 goto other_drop;
63c0ad4d 321 sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
e13e02a3 322 } else {
63c0ad4d 323 sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
e13e02a3
ED
324 }
325
e13e02a3 326
e13e02a3
ED
327 if (!sfbhash)
328 sfbhash = 1;
329 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
330
331 for (i = 0; i < SFB_LEVELS; i++) {
332 u32 hash = sfbhash & SFB_BUCKET_MASK;
333 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
334
335 sfbhash >>= SFB_BUCKET_SHIFT;
336 if (b->qlen == 0)
337 decrement_prob(b, q);
338 else if (b->qlen >= q->bin_size)
339 increment_prob(b, q);
340 if (minqlen > b->qlen)
341 minqlen = b->qlen;
342 if (p_min > b->p_mark)
343 p_min = b->p_mark;
344 }
345
346 slot ^= 1;
347 sfb_skb_cb(skb)->hashes[slot] = 0;
348
363437f4 349 if (unlikely(minqlen >= q->max)) {
25331d6c 350 qdisc_qstats_overlimit(sch);
363437f4 351 q->stats.bucketdrop++;
e13e02a3
ED
352 goto drop;
353 }
354
355 if (unlikely(p_min >= SFB_MAX_PROB)) {
356 /* Inelastic flow */
357 if (q->double_buffering) {
63c0ad4d
TH
358 sfbhash = skb_get_hash_perturb(skb,
359 q->bins[slot].perturbation);
e13e02a3
ED
360 if (!sfbhash)
361 sfbhash = 1;
362 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
363
364 for (i = 0; i < SFB_LEVELS; i++) {
365 u32 hash = sfbhash & SFB_BUCKET_MASK;
366 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
367
368 sfbhash >>= SFB_BUCKET_SHIFT;
369 if (b->qlen == 0)
370 decrement_prob(b, q);
371 else if (b->qlen >= q->bin_size)
372 increment_prob(b, q);
373 }
374 }
375 if (sfb_rate_limit(skb, q)) {
25331d6c 376 qdisc_qstats_overlimit(sch);
e13e02a3
ED
377 q->stats.penaltydrop++;
378 goto drop;
379 }
380 goto enqueue;
381 }
382
63862b5b 383 r = prandom_u32() & SFB_MAX_PROB;
e13e02a3
ED
384
385 if (unlikely(r < p_min)) {
386 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
387 /* If we're marking that many packets, then either
388 * this flow is unresponsive, or we're badly congested.
389 * In either case, we want to start dropping packets.
390 */
391 if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
392 q->stats.earlydrop++;
393 goto drop;
394 }
395 }
396 if (INET_ECN_set_ce(skb)) {
397 q->stats.marked++;
398 } else {
399 q->stats.earlydrop++;
400 goto drop;
401 }
402 }
403
404enqueue:
520ac30f 405 ret = qdisc_enqueue(skb, child, to_free);
e13e02a3 406 if (likely(ret == NET_XMIT_SUCCESS)) {
3d4357fb 407 qdisc_qstats_backlog_inc(sch, skb);
e13e02a3
ED
408 sch->q.qlen++;
409 increment_qlen(skb, q);
410 } else if (net_xmit_drop_count(ret)) {
411 q->stats.childdrop++;
25331d6c 412 qdisc_qstats_drop(sch);
e13e02a3
ED
413 }
414 return ret;
415
416drop:
520ac30f 417 qdisc_drop(skb, sch, to_free);
e13e02a3
ED
418 return NET_XMIT_CN;
419other_drop:
420 if (ret & __NET_XMIT_BYPASS)
25331d6c 421 qdisc_qstats_drop(sch);
e13e02a3
ED
422 kfree_skb(skb);
423 return ret;
424}
425
426static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
427{
428 struct sfb_sched_data *q = qdisc_priv(sch);
429 struct Qdisc *child = q->qdisc;
430 struct sk_buff *skb;
431
432 skb = child->dequeue(q->qdisc);
433
434 if (skb) {
435 qdisc_bstats_update(sch, skb);
3d4357fb 436 qdisc_qstats_backlog_dec(sch, skb);
e13e02a3
ED
437 sch->q.qlen--;
438 decrement_qlen(skb, q);
439 }
440
441 return skb;
442}
443
444static struct sk_buff *sfb_peek(struct Qdisc *sch)
445{
446 struct sfb_sched_data *q = qdisc_priv(sch);
447 struct Qdisc *child = q->qdisc;
448
449 return child->ops->peek(child);
450}
451
452/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
453
454static void sfb_reset(struct Qdisc *sch)
455{
456 struct sfb_sched_data *q = qdisc_priv(sch);
457
458 qdisc_reset(q->qdisc);
3d4357fb 459 sch->qstats.backlog = 0;
e13e02a3
ED
460 sch->q.qlen = 0;
461 q->slot = 0;
462 q->double_buffering = false;
463 sfb_zero_all_buckets(q);
464 sfb_init_perturbation(0, q);
465}
466
467static void sfb_destroy(struct Qdisc *sch)
468{
469 struct sfb_sched_data *q = qdisc_priv(sch);
470
6529eaba 471 tcf_block_put(q->block);
e13e02a3
ED
472 qdisc_destroy(q->qdisc);
473}
474
475static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
476 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
477};
478
479static const struct tc_sfb_qopt sfb_default_ops = {
480 .rehash_interval = 600 * MSEC_PER_SEC,
481 .warmup_time = 60 * MSEC_PER_SEC,
482 .limit = 0,
483 .max = 25,
484 .bin_size = 20,
485 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
486 .decrement = (SFB_MAX_PROB + 3000) / 6000,
487 .penalty_rate = 10,
488 .penalty_burst = 20,
489};
490
491static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
492{
493 struct sfb_sched_data *q = qdisc_priv(sch);
494 struct Qdisc *child;
495 struct nlattr *tb[TCA_SFB_MAX + 1];
496 const struct tc_sfb_qopt *ctl = &sfb_default_ops;
497 u32 limit;
498 int err;
499
500 if (opt) {
fceb6435 501 err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy, NULL);
e13e02a3
ED
502 if (err < 0)
503 return -EINVAL;
504
505 if (tb[TCA_SFB_PARMS] == NULL)
506 return -EINVAL;
507
508 ctl = nla_data(tb[TCA_SFB_PARMS]);
509 }
510
511 limit = ctl->limit;
512 if (limit == 0)
348e3435 513 limit = qdisc_dev(sch)->tx_queue_len;
e13e02a3
ED
514
515 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
516 if (IS_ERR(child))
517 return PTR_ERR(child);
518
49b49971
JK
519 if (child != &noop_qdisc)
520 qdisc_hash_add(child, true);
e13e02a3
ED
521 sch_tree_lock(sch);
522
2ccccf5f
WC
523 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
524 q->qdisc->qstats.backlog);
e13e02a3
ED
525 qdisc_destroy(q->qdisc);
526 q->qdisc = child;
527
528 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
529 q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
530 q->rehash_time = jiffies;
531 q->limit = limit;
532 q->increment = ctl->increment;
533 q->decrement = ctl->decrement;
534 q->max = ctl->max;
535 q->bin_size = ctl->bin_size;
536 q->penalty_rate = ctl->penalty_rate;
537 q->penalty_burst = ctl->penalty_burst;
538 q->tokens_avail = ctl->penalty_burst;
539 q->token_time = jiffies;
540
541 q->slot = 0;
542 q->double_buffering = false;
543 sfb_zero_all_buckets(q);
544 sfb_init_perturbation(0, q);
545 sfb_init_perturbation(1, q);
546
547 sch_tree_unlock(sch);
548
549 return 0;
550}
551
552static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
553{
554 struct sfb_sched_data *q = qdisc_priv(sch);
6529eaba
JP
555 int err;
556
69d78ef2 557 err = tcf_block_get(&q->block, &q->filter_list, sch);
6529eaba
JP
558 if (err)
559 return err;
e13e02a3
ED
560
561 q->qdisc = &noop_qdisc;
562 return sfb_change(sch, opt);
563}
564
565static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
566{
567 struct sfb_sched_data *q = qdisc_priv(sch);
568 struct nlattr *opts;
569 struct tc_sfb_qopt opt = {
570 .rehash_interval = jiffies_to_msecs(q->rehash_interval),
571 .warmup_time = jiffies_to_msecs(q->warmup_time),
572 .limit = q->limit,
573 .max = q->max,
574 .bin_size = q->bin_size,
575 .increment = q->increment,
576 .decrement = q->decrement,
577 .penalty_rate = q->penalty_rate,
578 .penalty_burst = q->penalty_burst,
579 };
580
581 sch->qstats.backlog = q->qdisc->qstats.backlog;
582 opts = nla_nest_start(skb, TCA_OPTIONS);
7ac2908e
AC
583 if (opts == NULL)
584 goto nla_put_failure;
1b34ec43
DM
585 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
586 goto nla_put_failure;
e13e02a3
ED
587 return nla_nest_end(skb, opts);
588
589nla_put_failure:
590 nla_nest_cancel(skb, opts);
591 return -EMSGSIZE;
592}
593
594static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
595{
596 struct sfb_sched_data *q = qdisc_priv(sch);
597 struct tc_sfb_xstats st = {
598 .earlydrop = q->stats.earlydrop,
599 .penaltydrop = q->stats.penaltydrop,
600 .bucketdrop = q->stats.bucketdrop,
601 .queuedrop = q->stats.queuedrop,
602 .childdrop = q->stats.childdrop,
603 .marked = q->stats.marked,
604 };
605
606 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
607
608 return gnet_stats_copy_app(d, &st, sizeof(st));
609}
610
611static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
612 struct sk_buff *skb, struct tcmsg *tcm)
613{
614 return -ENOSYS;
615}
616
617static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
618 struct Qdisc **old)
619{
620 struct sfb_sched_data *q = qdisc_priv(sch);
621
622 if (new == NULL)
623 new = &noop_qdisc;
624
86a7996c 625 *old = qdisc_replace(sch, new, &q->qdisc);
e13e02a3
ED
626 return 0;
627}
628
629static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
630{
631 struct sfb_sched_data *q = qdisc_priv(sch);
632
633 return q->qdisc;
634}
635
143976ce 636static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
e13e02a3
ED
637{
638 return 1;
639}
640
143976ce 641static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
e13e02a3
ED
642{
643}
644
645static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
646 struct nlattr **tca, unsigned long *arg)
647{
648 return -ENOSYS;
649}
650
651static int sfb_delete(struct Qdisc *sch, unsigned long cl)
652{
653 return -ENOSYS;
654}
655
656static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
657{
658 if (!walker->stop) {
659 if (walker->count >= walker->skip)
660 if (walker->fn(sch, 1, walker) < 0) {
661 walker->stop = 1;
662 return;
663 }
664 walker->count++;
665 }
666}
667
6529eaba 668static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
e13e02a3
ED
669{
670 struct sfb_sched_data *q = qdisc_priv(sch);
671
672 if (cl)
673 return NULL;
6529eaba 674 return q->block;
e13e02a3
ED
675}
676
677static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
678 u32 classid)
679{
680 return 0;
681}
682
683
684static const struct Qdisc_class_ops sfb_class_ops = {
685 .graft = sfb_graft,
686 .leaf = sfb_leaf,
143976ce 687 .find = sfb_find,
e13e02a3
ED
688 .change = sfb_change_class,
689 .delete = sfb_delete,
690 .walk = sfb_walk,
6529eaba 691 .tcf_block = sfb_tcf_block,
e13e02a3 692 .bind_tcf = sfb_bind,
143976ce 693 .unbind_tcf = sfb_unbind,
e13e02a3
ED
694 .dump = sfb_dump_class,
695};
696
697static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
698 .id = "sfb",
699 .priv_size = sizeof(struct sfb_sched_data),
700 .cl_ops = &sfb_class_ops,
701 .enqueue = sfb_enqueue,
702 .dequeue = sfb_dequeue,
703 .peek = sfb_peek,
704 .init = sfb_init,
705 .reset = sfb_reset,
706 .destroy = sfb_destroy,
707 .change = sfb_change,
708 .dump = sfb_dump,
709 .dump_stats = sfb_dump_stats,
710 .owner = THIS_MODULE,
711};
712
713static int __init sfb_module_init(void)
714{
715 return register_qdisc(&sfb_qdisc_ops);
716}
717
718static void __exit sfb_module_exit(void)
719{
720 unregister_qdisc(&sfb_qdisc_ops);
721}
722
723module_init(sfb_module_init)
724module_exit(sfb_module_exit)
725
726MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
727MODULE_AUTHOR("Juliusz Chroboczek");
728MODULE_AUTHOR("Eric Dumazet");
729MODULE_LICENSE("GPL");