2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
72 * kernel internal service curve representation:
73 * coordinates are given by 64 bit unsigned integers.
74 * x-axis: unit is clock count.
75 * y-axis: unit is byte.
77 * The service curve parameters are converted to the internal
78 * representation. The slope values are scaled to avoid overflow.
79 * the inverse slope values as well as the y-projection of the 1st
80 * segment are kept in order to avoid 64-bit divide operations
81 * that are expensive on 32-bit architectures.
85 u64 sm1
; /* scaled slope of the 1st segment */
86 u64 ism1
; /* scaled inverse-slope of the 1st segment */
87 u64 dx
; /* the x-projection of the 1st segment */
88 u64 dy
; /* the y-projection of the 1st segment */
89 u64 sm2
; /* scaled slope of the 2nd segment */
90 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
93 /* runtime service curve */
95 u64 x
; /* current starting position on x-axis */
96 u64 y
; /* current starting position on y-axis */
97 u64 sm1
; /* scaled slope of the 1st segment */
98 u64 ism1
; /* scaled inverse-slope of the 1st segment */
99 u64 dx
; /* the x-projection of the 1st segment */
100 u64 dy
; /* the y-projection of the 1st segment */
101 u64 sm2
; /* scaled slope of the 2nd segment */
102 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
105 enum hfsc_class_flags
{
112 struct Qdisc_class_common cl_common
;
113 unsigned int refcnt
; /* usage count */
115 struct gnet_stats_basic_packed bstats
;
116 struct gnet_stats_queue qstats
;
117 struct net_rate_estimator __rcu
*rate_est
;
118 struct tcf_proto __rcu
*filter_list
; /* filter list */
119 struct tcf_block
*block
;
120 unsigned int filter_cnt
; /* filter count */
121 unsigned int level
; /* class level in hierarchy */
123 struct hfsc_sched
*sched
; /* scheduler data */
124 struct hfsc_class
*cl_parent
; /* parent class */
125 struct list_head siblings
; /* sibling classes */
126 struct list_head children
; /* child classes */
127 struct Qdisc
*qdisc
; /* leaf qdisc */
129 struct rb_node el_node
; /* qdisc's eligible tree member */
130 struct rb_root vt_tree
; /* active children sorted by cl_vt */
131 struct rb_node vt_node
; /* parent's vt_tree member */
132 struct rb_root cf_tree
; /* active children sorted by cl_f */
133 struct rb_node cf_node
; /* parent's cf_heap member */
135 u64 cl_total
; /* total work in bytes */
136 u64 cl_cumul
; /* cumulative work in bytes done by
137 real-time criteria */
139 u64 cl_d
; /* deadline*/
140 u64 cl_e
; /* eligible time */
141 u64 cl_vt
; /* virtual time */
142 u64 cl_f
; /* time when this class will fit for
143 link-sharing, max(myf, cfmin) */
144 u64 cl_myf
; /* my fit-time (calculated from this
145 class's own upperlimit curve) */
146 u64 cl_cfmin
; /* earliest children's fit-time (used
147 with cl_myf to obtain cl_f) */
148 u64 cl_cvtmin
; /* minimal virtual time among the
149 children fit for link-sharing
150 (monotonic within a period) */
151 u64 cl_vtadj
; /* intra-period cumulative vt
153 u64 cl_cvtoff
; /* largest virtual time seen among
156 struct internal_sc cl_rsc
; /* internal real-time service curve */
157 struct internal_sc cl_fsc
; /* internal fair service curve */
158 struct internal_sc cl_usc
; /* internal upperlimit service curve */
159 struct runtime_sc cl_deadline
; /* deadline curve */
160 struct runtime_sc cl_eligible
; /* eligible curve */
161 struct runtime_sc cl_virtual
; /* virtual curve */
162 struct runtime_sc cl_ulimit
; /* upperlimit curve */
164 u8 cl_flags
; /* which curves are valid */
165 u32 cl_vtperiod
; /* vt period sequence number */
166 u32 cl_parentperiod
;/* parent's vt period sequence number*/
167 u32 cl_nactive
; /* number of active children */
171 u16 defcls
; /* default class id */
172 struct hfsc_class root
; /* root class */
173 struct Qdisc_class_hash clhash
; /* class hash */
174 struct rb_root eligible
; /* eligible tree */
175 struct qdisc_watchdog watchdog
; /* watchdog timer */
178 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
182 * eligible tree holds backlogged classes being sorted by their eligible times.
183 * there is one eligible tree per hfsc instance.
187 eltree_insert(struct hfsc_class
*cl
)
189 struct rb_node
**p
= &cl
->sched
->eligible
.rb_node
;
190 struct rb_node
*parent
= NULL
;
191 struct hfsc_class
*cl1
;
195 cl1
= rb_entry(parent
, struct hfsc_class
, el_node
);
196 if (cl
->cl_e
>= cl1
->cl_e
)
197 p
= &parent
->rb_right
;
199 p
= &parent
->rb_left
;
201 rb_link_node(&cl
->el_node
, parent
, p
);
202 rb_insert_color(&cl
->el_node
, &cl
->sched
->eligible
);
206 eltree_remove(struct hfsc_class
*cl
)
208 rb_erase(&cl
->el_node
, &cl
->sched
->eligible
);
212 eltree_update(struct hfsc_class
*cl
)
218 /* find the class with the minimum deadline among the eligible classes */
219 static inline struct hfsc_class
*
220 eltree_get_mindl(struct hfsc_sched
*q
, u64 cur_time
)
222 struct hfsc_class
*p
, *cl
= NULL
;
225 for (n
= rb_first(&q
->eligible
); n
!= NULL
; n
= rb_next(n
)) {
226 p
= rb_entry(n
, struct hfsc_class
, el_node
);
227 if (p
->cl_e
> cur_time
)
229 if (cl
== NULL
|| p
->cl_d
< cl
->cl_d
)
235 /* find the class with minimum eligible time among the eligible classes */
236 static inline struct hfsc_class
*
237 eltree_get_minel(struct hfsc_sched
*q
)
241 n
= rb_first(&q
->eligible
);
244 return rb_entry(n
, struct hfsc_class
, el_node
);
248 * vttree holds holds backlogged child classes being sorted by their virtual
249 * time. each intermediate class has one vttree.
252 vttree_insert(struct hfsc_class
*cl
)
254 struct rb_node
**p
= &cl
->cl_parent
->vt_tree
.rb_node
;
255 struct rb_node
*parent
= NULL
;
256 struct hfsc_class
*cl1
;
260 cl1
= rb_entry(parent
, struct hfsc_class
, vt_node
);
261 if (cl
->cl_vt
>= cl1
->cl_vt
)
262 p
= &parent
->rb_right
;
264 p
= &parent
->rb_left
;
266 rb_link_node(&cl
->vt_node
, parent
, p
);
267 rb_insert_color(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
271 vttree_remove(struct hfsc_class
*cl
)
273 rb_erase(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
277 vttree_update(struct hfsc_class
*cl
)
283 static inline struct hfsc_class
*
284 vttree_firstfit(struct hfsc_class
*cl
, u64 cur_time
)
286 struct hfsc_class
*p
;
289 for (n
= rb_first(&cl
->vt_tree
); n
!= NULL
; n
= rb_next(n
)) {
290 p
= rb_entry(n
, struct hfsc_class
, vt_node
);
291 if (p
->cl_f
<= cur_time
)
298 * get the leaf class with the minimum vt in the hierarchy
300 static struct hfsc_class
*
301 vttree_get_minvt(struct hfsc_class
*cl
, u64 cur_time
)
303 /* if root-class's cfmin is bigger than cur_time nothing to do */
304 if (cl
->cl_cfmin
> cur_time
)
307 while (cl
->level
> 0) {
308 cl
= vttree_firstfit(cl
, cur_time
);
312 * update parent's cl_cvtmin.
314 if (cl
->cl_parent
->cl_cvtmin
< cl
->cl_vt
)
315 cl
->cl_parent
->cl_cvtmin
= cl
->cl_vt
;
321 cftree_insert(struct hfsc_class
*cl
)
323 struct rb_node
**p
= &cl
->cl_parent
->cf_tree
.rb_node
;
324 struct rb_node
*parent
= NULL
;
325 struct hfsc_class
*cl1
;
329 cl1
= rb_entry(parent
, struct hfsc_class
, cf_node
);
330 if (cl
->cl_f
>= cl1
->cl_f
)
331 p
= &parent
->rb_right
;
333 p
= &parent
->rb_left
;
335 rb_link_node(&cl
->cf_node
, parent
, p
);
336 rb_insert_color(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
340 cftree_remove(struct hfsc_class
*cl
)
342 rb_erase(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
346 cftree_update(struct hfsc_class
*cl
)
353 * service curve support functions
355 * external service curve parameters
358 * internal service curve parameters
359 * sm: (bytes/psched_us) << SM_SHIFT
360 * ism: (psched_us/byte) << ISM_SHIFT
363 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
365 * sm and ism are scaled in order to keep effective digits.
366 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
367 * digits in decimal using the following table.
369 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
370 * ------------+-------------------------------------------------------
371 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
373 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
375 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
377 #define SM_SHIFT (30 - PSCHED_SHIFT)
378 #define ISM_SHIFT (8 + PSCHED_SHIFT)
380 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
381 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
384 seg_x2y(u64 x
, u64 sm
)
390 * y = x * sm >> SM_SHIFT
391 * but divide it for the upper and lower bits to avoid overflow
393 y
= (x
>> SM_SHIFT
) * sm
+ (((x
& SM_MASK
) * sm
) >> SM_SHIFT
);
398 seg_y2x(u64 y
, u64 ism
)
404 else if (ism
== HT_INFINITY
)
407 x
= (y
>> ISM_SHIFT
) * ism
408 + (((y
& ISM_MASK
) * ism
) >> ISM_SHIFT
);
413 /* Convert m (bps) into sm (bytes/psched us) */
419 sm
= ((u64
)m
<< SM_SHIFT
);
420 sm
+= PSCHED_TICKS_PER_SEC
- 1;
421 do_div(sm
, PSCHED_TICKS_PER_SEC
);
425 /* convert m (bps) into ism (psched us/byte) */
434 ism
= ((u64
)PSCHED_TICKS_PER_SEC
<< ISM_SHIFT
);
441 /* convert d (us) into dx (psched us) */
447 dx
= ((u64
)d
* PSCHED_TICKS_PER_SEC
);
448 dx
+= USEC_PER_SEC
- 1;
449 do_div(dx
, USEC_PER_SEC
);
453 /* convert sm (bytes/psched us) into m (bps) */
459 m
= (sm
* PSCHED_TICKS_PER_SEC
) >> SM_SHIFT
;
463 /* convert dx (psched us) into d (us) */
469 d
= dx
* USEC_PER_SEC
;
470 do_div(d
, PSCHED_TICKS_PER_SEC
);
475 sc2isc(struct tc_service_curve
*sc
, struct internal_sc
*isc
)
477 isc
->sm1
= m2sm(sc
->m1
);
478 isc
->ism1
= m2ism(sc
->m1
);
479 isc
->dx
= d2dx(sc
->d
);
480 isc
->dy
= seg_x2y(isc
->dx
, isc
->sm1
);
481 isc
->sm2
= m2sm(sc
->m2
);
482 isc
->ism2
= m2ism(sc
->m2
);
486 * initialize the runtime service curve with the given internal
487 * service curve starting at (x, y).
490 rtsc_init(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
494 rtsc
->sm1
= isc
->sm1
;
495 rtsc
->ism1
= isc
->ism1
;
498 rtsc
->sm2
= isc
->sm2
;
499 rtsc
->ism2
= isc
->ism2
;
503 * calculate the y-projection of the runtime service curve by the
504 * given x-projection value
507 rtsc_y2x(struct runtime_sc
*rtsc
, u64 y
)
513 else if (y
<= rtsc
->y
+ rtsc
->dy
) {
514 /* x belongs to the 1st segment */
516 x
= rtsc
->x
+ rtsc
->dx
;
518 x
= rtsc
->x
+ seg_y2x(y
- rtsc
->y
, rtsc
->ism1
);
520 /* x belongs to the 2nd segment */
521 x
= rtsc
->x
+ rtsc
->dx
522 + seg_y2x(y
- rtsc
->y
- rtsc
->dy
, rtsc
->ism2
);
528 rtsc_x2y(struct runtime_sc
*rtsc
, u64 x
)
534 else if (x
<= rtsc
->x
+ rtsc
->dx
)
535 /* y belongs to the 1st segment */
536 y
= rtsc
->y
+ seg_x2y(x
- rtsc
->x
, rtsc
->sm1
);
538 /* y belongs to the 2nd segment */
539 y
= rtsc
->y
+ rtsc
->dy
540 + seg_x2y(x
- rtsc
->x
- rtsc
->dx
, rtsc
->sm2
);
545 * update the runtime service curve by taking the minimum of the current
546 * runtime service curve and the service curve starting at (x, y).
549 rtsc_min(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
554 if (isc
->sm1
<= isc
->sm2
) {
555 /* service curve is convex */
556 y1
= rtsc_x2y(rtsc
, x
);
558 /* the current rtsc is smaller */
566 * service curve is concave
567 * compute the two y values of the current rtsc
571 y1
= rtsc_x2y(rtsc
, x
);
573 /* rtsc is below isc, no change to rtsc */
577 y2
= rtsc_x2y(rtsc
, x
+ isc
->dx
);
578 if (y2
>= y
+ isc
->dy
) {
579 /* rtsc is above isc, replace rtsc by isc */
588 * the two curves intersect
589 * compute the offsets (dx, dy) using the reverse
590 * function of seg_x2y()
591 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
593 dx
= (y1
- y
) << SM_SHIFT
;
594 dsm
= isc
->sm1
- isc
->sm2
;
597 * check if (x, y1) belongs to the 1st segment of rtsc.
598 * if so, add the offset.
600 if (rtsc
->x
+ rtsc
->dx
> x
)
601 dx
+= rtsc
->x
+ rtsc
->dx
- x
;
602 dy
= seg_x2y(dx
, isc
->sm1
);
611 init_ed(struct hfsc_class
*cl
, unsigned int next_len
)
613 u64 cur_time
= psched_get_time();
615 /* update the deadline curve */
616 rtsc_min(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
619 * update the eligible curve.
620 * for concave, it is equal to the deadline curve.
621 * for convex, it is a linear curve with slope m2.
623 cl
->cl_eligible
= cl
->cl_deadline
;
624 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
625 cl
->cl_eligible
.dx
= 0;
626 cl
->cl_eligible
.dy
= 0;
629 /* compute e and d */
630 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
631 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
637 update_ed(struct hfsc_class
*cl
, unsigned int next_len
)
639 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
640 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
646 update_d(struct hfsc_class
*cl
, unsigned int next_len
)
648 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
652 update_cfmin(struct hfsc_class
*cl
)
654 struct rb_node
*n
= rb_first(&cl
->cf_tree
);
655 struct hfsc_class
*p
;
661 p
= rb_entry(n
, struct hfsc_class
, cf_node
);
662 cl
->cl_cfmin
= p
->cl_f
;
666 init_vf(struct hfsc_class
*cl
, unsigned int len
)
668 struct hfsc_class
*max_cl
;
675 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
676 if (go_active
&& cl
->cl_nactive
++ == 0)
682 n
= rb_last(&cl
->cl_parent
->vt_tree
);
684 max_cl
= rb_entry(n
, struct hfsc_class
, vt_node
);
686 * set vt to the average of the min and max
687 * classes. if the parent's period didn't
688 * change, don't decrease vt of the class.
691 if (cl
->cl_parent
->cl_cvtmin
!= 0)
692 vt
= (cl
->cl_parent
->cl_cvtmin
+ vt
)/2;
694 if (cl
->cl_parent
->cl_vtperiod
!=
695 cl
->cl_parentperiod
|| vt
> cl
->cl_vt
)
699 * first child for a new parent backlog period.
700 * initialize cl_vt to the highest value seen
701 * among the siblings. this is analogous to
702 * what cur_time would provide in realtime case.
704 cl
->cl_vt
= cl
->cl_parent
->cl_cvtoff
;
705 cl
->cl_parent
->cl_cvtmin
= 0;
708 /* update the virtual curve */
709 rtsc_min(&cl
->cl_virtual
, &cl
->cl_fsc
, cl
->cl_vt
, cl
->cl_total
);
712 cl
->cl_vtperiod
++; /* increment vt period */
713 cl
->cl_parentperiod
= cl
->cl_parent
->cl_vtperiod
;
714 if (cl
->cl_parent
->cl_nactive
== 0)
715 cl
->cl_parentperiod
++;
721 if (cl
->cl_flags
& HFSC_USC
) {
722 /* class has upper limit curve */
724 cur_time
= psched_get_time();
726 /* update the ulimit curve */
727 rtsc_min(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
,
730 cl
->cl_myf
= rtsc_y2x(&cl
->cl_ulimit
,
735 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
740 update_cfmin(cl
->cl_parent
);
745 update_vf(struct hfsc_class
*cl
, unsigned int len
, u64 cur_time
)
747 u64 f
; /* , myf_bound, delta; */
750 if (cl
->qdisc
->q
.qlen
== 0 && cl
->cl_flags
& HFSC_FSC
)
753 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
756 if (!(cl
->cl_flags
& HFSC_FSC
) || cl
->cl_nactive
== 0)
759 if (go_passive
&& --cl
->cl_nactive
== 0)
765 cl
->cl_vt
= rtsc_y2x(&cl
->cl_virtual
, cl
->cl_total
) + cl
->cl_vtadj
;
768 * if vt of the class is smaller than cvtmin,
769 * the class was skipped in the past due to non-fit.
770 * if so, we need to adjust vtadj.
772 if (cl
->cl_vt
< cl
->cl_parent
->cl_cvtmin
) {
773 cl
->cl_vtadj
+= cl
->cl_parent
->cl_cvtmin
- cl
->cl_vt
;
774 cl
->cl_vt
= cl
->cl_parent
->cl_cvtmin
;
778 /* no more active child, going passive */
780 /* update cvtoff of the parent class */
781 if (cl
->cl_vt
> cl
->cl_parent
->cl_cvtoff
)
782 cl
->cl_parent
->cl_cvtoff
= cl
->cl_vt
;
784 /* remove this class from the vt tree */
788 update_cfmin(cl
->cl_parent
);
793 /* update the vt tree */
797 if (cl
->cl_flags
& HFSC_USC
) {
798 cl
->cl_myf
= rtsc_y2x(&cl
->cl_ulimit
, cl
->cl_total
);
800 cl
->cl_myf
= cl
->cl_myfadj
+ rtsc_y2x(&cl
->cl_ulimit
,
803 * This code causes classes to stay way under their
804 * limit when multiple classes are used at gigabit
805 * speed. needs investigation. -kaber
808 * if myf lags behind by more than one clock tick
809 * from the current time, adjust myfadj to prevent
810 * a rate-limited class from going greedy.
811 * in a steady state under rate-limiting, myf
812 * fluctuates within one clock tick.
814 myf_bound
= cur_time
- PSCHED_JIFFIE2US(1);
815 if (cl
->cl_myf
< myf_bound
) {
816 delta
= cur_time
- cl
->cl_myf
;
817 cl
->cl_myfadj
+= delta
;
823 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
827 update_cfmin(cl
->cl_parent
);
833 set_active(struct hfsc_class
*cl
, unsigned int len
)
835 if (cl
->cl_flags
& HFSC_RSC
)
837 if (cl
->cl_flags
& HFSC_FSC
)
843 set_passive(struct hfsc_class
*cl
)
845 if (cl
->cl_flags
& HFSC_RSC
)
849 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
850 * needs to be called explicitly to remove a class from vttree.
855 qdisc_peek_len(struct Qdisc
*sch
)
860 skb
= sch
->ops
->peek(sch
);
861 if (unlikely(skb
== NULL
)) {
862 qdisc_warn_nonwc("qdisc_peek_len", sch
);
865 len
= qdisc_pkt_len(skb
);
871 hfsc_purge_queue(struct Qdisc
*sch
, struct hfsc_class
*cl
)
873 unsigned int len
= cl
->qdisc
->q
.qlen
;
874 unsigned int backlog
= cl
->qdisc
->qstats
.backlog
;
876 qdisc_reset(cl
->qdisc
);
877 qdisc_tree_reduce_backlog(cl
->qdisc
, len
, backlog
);
881 hfsc_adjust_levels(struct hfsc_class
*cl
)
883 struct hfsc_class
*p
;
888 list_for_each_entry(p
, &cl
->children
, siblings
) {
889 if (p
->level
>= level
)
890 level
= p
->level
+ 1;
893 } while ((cl
= cl
->cl_parent
) != NULL
);
896 static inline struct hfsc_class
*
897 hfsc_find_class(u32 classid
, struct Qdisc
*sch
)
899 struct hfsc_sched
*q
= qdisc_priv(sch
);
900 struct Qdisc_class_common
*clc
;
902 clc
= qdisc_class_find(&q
->clhash
, classid
);
905 return container_of(clc
, struct hfsc_class
, cl_common
);
909 hfsc_change_rsc(struct hfsc_class
*cl
, struct tc_service_curve
*rsc
,
912 sc2isc(rsc
, &cl
->cl_rsc
);
913 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
914 cl
->cl_eligible
= cl
->cl_deadline
;
915 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
916 cl
->cl_eligible
.dx
= 0;
917 cl
->cl_eligible
.dy
= 0;
919 cl
->cl_flags
|= HFSC_RSC
;
923 hfsc_change_fsc(struct hfsc_class
*cl
, struct tc_service_curve
*fsc
)
925 sc2isc(fsc
, &cl
->cl_fsc
);
926 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, cl
->cl_vt
, cl
->cl_total
);
927 cl
->cl_flags
|= HFSC_FSC
;
931 hfsc_change_usc(struct hfsc_class
*cl
, struct tc_service_curve
*usc
,
934 sc2isc(usc
, &cl
->cl_usc
);
935 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
, cl
->cl_total
);
936 cl
->cl_flags
|= HFSC_USC
;
939 static const struct nla_policy hfsc_policy
[TCA_HFSC_MAX
+ 1] = {
940 [TCA_HFSC_RSC
] = { .len
= sizeof(struct tc_service_curve
) },
941 [TCA_HFSC_FSC
] = { .len
= sizeof(struct tc_service_curve
) },
942 [TCA_HFSC_USC
] = { .len
= sizeof(struct tc_service_curve
) },
946 hfsc_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
947 struct nlattr
**tca
, unsigned long *arg
)
949 struct hfsc_sched
*q
= qdisc_priv(sch
);
950 struct hfsc_class
*cl
= (struct hfsc_class
*)*arg
;
951 struct hfsc_class
*parent
= NULL
;
952 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
953 struct nlattr
*tb
[TCA_HFSC_MAX
+ 1];
954 struct tc_service_curve
*rsc
= NULL
, *fsc
= NULL
, *usc
= NULL
;
961 err
= nla_parse_nested(tb
, TCA_HFSC_MAX
, opt
, hfsc_policy
, NULL
);
965 if (tb
[TCA_HFSC_RSC
]) {
966 rsc
= nla_data(tb
[TCA_HFSC_RSC
]);
967 if (rsc
->m1
== 0 && rsc
->m2
== 0)
971 if (tb
[TCA_HFSC_FSC
]) {
972 fsc
= nla_data(tb
[TCA_HFSC_FSC
]);
973 if (fsc
->m1
== 0 && fsc
->m2
== 0)
977 if (tb
[TCA_HFSC_USC
]) {
978 usc
= nla_data(tb
[TCA_HFSC_USC
]);
979 if (usc
->m1
== 0 && usc
->m2
== 0)
986 cl
->cl_parent
->cl_common
.classid
!= parentid
)
988 if (cl
->cl_parent
== NULL
&& parentid
!= TC_H_ROOT
)
991 cur_time
= psched_get_time();
994 err
= gen_replace_estimator(&cl
->bstats
, NULL
,
997 qdisc_root_sleeping_running(sch
),
1005 hfsc_change_rsc(cl
, rsc
, cur_time
);
1007 hfsc_change_fsc(cl
, fsc
);
1009 hfsc_change_usc(cl
, usc
, cur_time
);
1011 if (cl
->qdisc
->q
.qlen
!= 0) {
1012 if (cl
->cl_flags
& HFSC_RSC
)
1013 update_ed(cl
, qdisc_peek_len(cl
->qdisc
));
1014 if (cl
->cl_flags
& HFSC_FSC
)
1015 update_vf(cl
, 0, cur_time
);
1017 sch_tree_unlock(sch
);
1022 if (parentid
== TC_H_ROOT
)
1027 parent
= hfsc_find_class(parentid
, sch
);
1032 if (classid
== 0 || TC_H_MAJ(classid
^ sch
->handle
) != 0)
1034 if (hfsc_find_class(classid
, sch
))
1037 if (rsc
== NULL
&& fsc
== NULL
)
1040 cl
= kzalloc(sizeof(struct hfsc_class
), GFP_KERNEL
);
1044 err
= tcf_block_get(&cl
->block
, &cl
->filter_list
);
1050 if (tca
[TCA_RATE
]) {
1051 err
= gen_new_estimator(&cl
->bstats
, NULL
, &cl
->rate_est
,
1053 qdisc_root_sleeping_running(sch
),
1056 tcf_block_put(cl
->block
);
1063 hfsc_change_rsc(cl
, rsc
, 0);
1065 hfsc_change_fsc(cl
, fsc
);
1067 hfsc_change_usc(cl
, usc
, 0);
1069 cl
->cl_common
.classid
= classid
;
1072 cl
->cl_parent
= parent
;
1073 cl
->qdisc
= qdisc_create_dflt(sch
->dev_queue
,
1074 &pfifo_qdisc_ops
, classid
);
1075 if (cl
->qdisc
== NULL
)
1076 cl
->qdisc
= &noop_qdisc
;
1078 qdisc_hash_add(cl
->qdisc
, true);
1079 INIT_LIST_HEAD(&cl
->children
);
1080 cl
->vt_tree
= RB_ROOT
;
1081 cl
->cf_tree
= RB_ROOT
;
1084 qdisc_class_hash_insert(&q
->clhash
, &cl
->cl_common
);
1085 list_add_tail(&cl
->siblings
, &parent
->children
);
1086 if (parent
->level
== 0)
1087 hfsc_purge_queue(sch
, parent
);
1088 hfsc_adjust_levels(parent
);
1089 sch_tree_unlock(sch
);
1091 qdisc_class_hash_grow(sch
, &q
->clhash
);
1093 *arg
= (unsigned long)cl
;
1098 hfsc_destroy_class(struct Qdisc
*sch
, struct hfsc_class
*cl
)
1100 struct hfsc_sched
*q
= qdisc_priv(sch
);
1102 tcf_block_put(cl
->block
);
1103 qdisc_destroy(cl
->qdisc
);
1104 gen_kill_estimator(&cl
->rate_est
);
1110 hfsc_delete_class(struct Qdisc
*sch
, unsigned long arg
)
1112 struct hfsc_sched
*q
= qdisc_priv(sch
);
1113 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1115 if (cl
->level
> 0 || cl
->filter_cnt
> 0 || cl
== &q
->root
)
1120 list_del(&cl
->siblings
);
1121 hfsc_adjust_levels(cl
->cl_parent
);
1123 hfsc_purge_queue(sch
, cl
);
1124 qdisc_class_hash_remove(&q
->clhash
, &cl
->cl_common
);
1126 BUG_ON(--cl
->refcnt
== 0);
1128 * This shouldn't happen: we "hold" one cops->get() when called
1129 * from tc_ctl_tclass; the destroy method is done from cops->put().
1132 sch_tree_unlock(sch
);
1136 static struct hfsc_class
*
1137 hfsc_classify(struct sk_buff
*skb
, struct Qdisc
*sch
, int *qerr
)
1139 struct hfsc_sched
*q
= qdisc_priv(sch
);
1140 struct hfsc_class
*head
, *cl
;
1141 struct tcf_result res
;
1142 struct tcf_proto
*tcf
;
1145 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0 &&
1146 (cl
= hfsc_find_class(skb
->priority
, sch
)) != NULL
)
1150 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
1152 tcf
= rcu_dereference_bh(q
->root
.filter_list
);
1153 while (tcf
&& (result
= tcf_classify(skb
, tcf
, &res
, false)) >= 0) {
1154 #ifdef CONFIG_NET_CLS_ACT
1158 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
1163 cl
= (struct hfsc_class
*)res
.class;
1165 cl
= hfsc_find_class(res
.classid
, sch
);
1167 break; /* filter selected invalid classid */
1168 if (cl
->level
>= head
->level
)
1169 break; /* filter may only point downwards */
1173 return cl
; /* hit leaf class */
1175 /* apply inner filter chain */
1176 tcf
= rcu_dereference_bh(cl
->filter_list
);
1180 /* classification failed, try default class */
1181 cl
= hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch
->handle
), q
->defcls
), sch
);
1182 if (cl
== NULL
|| cl
->level
> 0)
1189 hfsc_graft_class(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1192 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1197 new = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
1198 cl
->cl_common
.classid
);
1203 *old
= qdisc_replace(sch
, new, &cl
->qdisc
);
1207 static struct Qdisc
*
1208 hfsc_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
1210 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1219 hfsc_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
1221 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1223 if (cl
->qdisc
->q
.qlen
== 0) {
1224 update_vf(cl
, 0, 0);
1229 static unsigned long
1230 hfsc_get_class(struct Qdisc
*sch
, u32 classid
)
1232 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1237 return (unsigned long)cl
;
1241 hfsc_put_class(struct Qdisc
*sch
, unsigned long arg
)
1243 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1245 if (--cl
->refcnt
== 0)
1246 hfsc_destroy_class(sch
, cl
);
1249 static unsigned long
1250 hfsc_bind_tcf(struct Qdisc
*sch
, unsigned long parent
, u32 classid
)
1252 struct hfsc_class
*p
= (struct hfsc_class
*)parent
;
1253 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1256 if (p
!= NULL
&& p
->level
<= cl
->level
)
1261 return (unsigned long)cl
;
1265 hfsc_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
1267 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1272 static struct tcf_block
*hfsc_tcf_block(struct Qdisc
*sch
, unsigned long arg
)
1274 struct hfsc_sched
*q
= qdisc_priv(sch
);
1275 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1284 hfsc_dump_sc(struct sk_buff
*skb
, int attr
, struct internal_sc
*sc
)
1286 struct tc_service_curve tsc
;
1288 tsc
.m1
= sm2m(sc
->sm1
);
1289 tsc
.d
= dx2d(sc
->dx
);
1290 tsc
.m2
= sm2m(sc
->sm2
);
1291 if (nla_put(skb
, attr
, sizeof(tsc
), &tsc
))
1292 goto nla_put_failure
;
1301 hfsc_dump_curves(struct sk_buff
*skb
, struct hfsc_class
*cl
)
1303 if ((cl
->cl_flags
& HFSC_RSC
) &&
1304 (hfsc_dump_sc(skb
, TCA_HFSC_RSC
, &cl
->cl_rsc
) < 0))
1305 goto nla_put_failure
;
1307 if ((cl
->cl_flags
& HFSC_FSC
) &&
1308 (hfsc_dump_sc(skb
, TCA_HFSC_FSC
, &cl
->cl_fsc
) < 0))
1309 goto nla_put_failure
;
1311 if ((cl
->cl_flags
& HFSC_USC
) &&
1312 (hfsc_dump_sc(skb
, TCA_HFSC_USC
, &cl
->cl_usc
) < 0))
1313 goto nla_put_failure
;
1322 hfsc_dump_class(struct Qdisc
*sch
, unsigned long arg
, struct sk_buff
*skb
,
1325 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1326 struct nlattr
*nest
;
1328 tcm
->tcm_parent
= cl
->cl_parent
? cl
->cl_parent
->cl_common
.classid
:
1330 tcm
->tcm_handle
= cl
->cl_common
.classid
;
1332 tcm
->tcm_info
= cl
->qdisc
->handle
;
1334 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
1336 goto nla_put_failure
;
1337 if (hfsc_dump_curves(skb
, cl
) < 0)
1338 goto nla_put_failure
;
1339 return nla_nest_end(skb
, nest
);
1342 nla_nest_cancel(skb
, nest
);
1347 hfsc_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
1348 struct gnet_dump
*d
)
1350 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1351 struct tc_hfsc_stats xstats
;
1353 cl
->qstats
.backlog
= cl
->qdisc
->qstats
.backlog
;
1354 xstats
.level
= cl
->level
;
1355 xstats
.period
= cl
->cl_vtperiod
;
1356 xstats
.work
= cl
->cl_total
;
1357 xstats
.rtwork
= cl
->cl_cumul
;
1359 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch
), d
, NULL
, &cl
->bstats
) < 0 ||
1360 gnet_stats_copy_rate_est(d
, &cl
->rate_est
) < 0 ||
1361 gnet_stats_copy_queue(d
, NULL
, &cl
->qstats
, cl
->qdisc
->q
.qlen
) < 0)
1364 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
1370 hfsc_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1372 struct hfsc_sched
*q
= qdisc_priv(sch
);
1373 struct hfsc_class
*cl
;
1379 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1380 hlist_for_each_entry(cl
, &q
->clhash
.hash
[i
],
1382 if (arg
->count
< arg
->skip
) {
1386 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
1396 hfsc_schedule_watchdog(struct Qdisc
*sch
)
1398 struct hfsc_sched
*q
= qdisc_priv(sch
);
1399 struct hfsc_class
*cl
;
1402 cl
= eltree_get_minel(q
);
1404 next_time
= cl
->cl_e
;
1405 if (q
->root
.cl_cfmin
!= 0) {
1406 if (next_time
== 0 || next_time
> q
->root
.cl_cfmin
)
1407 next_time
= q
->root
.cl_cfmin
;
1409 WARN_ON(next_time
== 0);
1410 qdisc_watchdog_schedule(&q
->watchdog
, next_time
);
1414 hfsc_init_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
1416 struct hfsc_sched
*q
= qdisc_priv(sch
);
1417 struct tc_hfsc_qopt
*qopt
;
1420 if (opt
== NULL
|| nla_len(opt
) < sizeof(*qopt
))
1422 qopt
= nla_data(opt
);
1424 q
->defcls
= qopt
->defcls
;
1425 err
= qdisc_class_hash_init(&q
->clhash
);
1428 q
->eligible
= RB_ROOT
;
1430 q
->root
.cl_common
.classid
= sch
->handle
;
1433 q
->root
.qdisc
= qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
1435 if (q
->root
.qdisc
== NULL
)
1436 q
->root
.qdisc
= &noop_qdisc
;
1438 qdisc_hash_add(q
->root
.qdisc
, true);
1439 INIT_LIST_HEAD(&q
->root
.children
);
1440 q
->root
.vt_tree
= RB_ROOT
;
1441 q
->root
.cf_tree
= RB_ROOT
;
1443 qdisc_class_hash_insert(&q
->clhash
, &q
->root
.cl_common
);
1444 qdisc_class_hash_grow(sch
, &q
->clhash
);
1446 qdisc_watchdog_init(&q
->watchdog
, sch
);
1452 hfsc_change_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
1454 struct hfsc_sched
*q
= qdisc_priv(sch
);
1455 struct tc_hfsc_qopt
*qopt
;
1457 if (opt
== NULL
|| nla_len(opt
) < sizeof(*qopt
))
1459 qopt
= nla_data(opt
);
1462 q
->defcls
= qopt
->defcls
;
1463 sch_tree_unlock(sch
);
1469 hfsc_reset_class(struct hfsc_class
*cl
)
1479 cl
->cl_vtperiod
= 0;
1480 cl
->cl_parentperiod
= 0;
1486 cl
->vt_tree
= RB_ROOT
;
1487 cl
->cf_tree
= RB_ROOT
;
1488 qdisc_reset(cl
->qdisc
);
1490 if (cl
->cl_flags
& HFSC_RSC
)
1491 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, 0, 0);
1492 if (cl
->cl_flags
& HFSC_FSC
)
1493 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, 0, 0);
1494 if (cl
->cl_flags
& HFSC_USC
)
1495 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, 0, 0);
1499 hfsc_reset_qdisc(struct Qdisc
*sch
)
1501 struct hfsc_sched
*q
= qdisc_priv(sch
);
1502 struct hfsc_class
*cl
;
1505 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1506 hlist_for_each_entry(cl
, &q
->clhash
.hash
[i
], cl_common
.hnode
)
1507 hfsc_reset_class(cl
);
1509 q
->eligible
= RB_ROOT
;
1510 qdisc_watchdog_cancel(&q
->watchdog
);
1511 sch
->qstats
.backlog
= 0;
1516 hfsc_destroy_qdisc(struct Qdisc
*sch
)
1518 struct hfsc_sched
*q
= qdisc_priv(sch
);
1519 struct hlist_node
*next
;
1520 struct hfsc_class
*cl
;
1523 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1524 hlist_for_each_entry(cl
, &q
->clhash
.hash
[i
], cl_common
.hnode
)
1525 tcf_block_put(cl
->block
);
1527 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1528 hlist_for_each_entry_safe(cl
, next
, &q
->clhash
.hash
[i
],
1530 hfsc_destroy_class(sch
, cl
);
1532 qdisc_class_hash_destroy(&q
->clhash
);
1533 qdisc_watchdog_cancel(&q
->watchdog
);
1537 hfsc_dump_qdisc(struct Qdisc
*sch
, struct sk_buff
*skb
)
1539 struct hfsc_sched
*q
= qdisc_priv(sch
);
1540 unsigned char *b
= skb_tail_pointer(skb
);
1541 struct tc_hfsc_qopt qopt
;
1543 qopt
.defcls
= q
->defcls
;
1544 if (nla_put(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
))
1545 goto nla_put_failure
;
1554 hfsc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
, struct sk_buff
**to_free
)
1556 struct hfsc_class
*cl
;
1557 int uninitialized_var(err
);
1559 cl
= hfsc_classify(skb
, sch
, &err
);
1561 if (err
& __NET_XMIT_BYPASS
)
1562 qdisc_qstats_drop(sch
);
1563 __qdisc_drop(skb
, to_free
);
1567 err
= qdisc_enqueue(skb
, cl
->qdisc
, to_free
);
1568 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
1569 if (net_xmit_drop_count(err
)) {
1571 qdisc_qstats_drop(sch
);
1576 if (cl
->qdisc
->q
.qlen
== 1) {
1577 set_active(cl
, qdisc_pkt_len(skb
));
1579 * If this is the first packet, isolate the head so an eventual
1580 * head drop before the first dequeue operation has no chance
1581 * to invalidate the deadline.
1583 if (cl
->cl_flags
& HFSC_RSC
)
1584 cl
->qdisc
->ops
->peek(cl
->qdisc
);
1588 qdisc_qstats_backlog_inc(sch
, skb
);
1591 return NET_XMIT_SUCCESS
;
1594 static struct sk_buff
*
1595 hfsc_dequeue(struct Qdisc
*sch
)
1597 struct hfsc_sched
*q
= qdisc_priv(sch
);
1598 struct hfsc_class
*cl
;
1599 struct sk_buff
*skb
;
1601 unsigned int next_len
;
1604 if (sch
->q
.qlen
== 0)
1607 cur_time
= psched_get_time();
1610 * if there are eligible classes, use real-time criteria.
1611 * find the class with the minimum deadline among
1612 * the eligible classes.
1614 cl
= eltree_get_mindl(q
, cur_time
);
1619 * use link-sharing criteria
1620 * get the class with the minimum vt in the hierarchy
1622 cl
= vttree_get_minvt(&q
->root
, cur_time
);
1624 qdisc_qstats_overlimit(sch
);
1625 hfsc_schedule_watchdog(sch
);
1630 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
1632 qdisc_warn_nonwc("HFSC", cl
->qdisc
);
1636 bstats_update(&cl
->bstats
, skb
);
1637 update_vf(cl
, qdisc_pkt_len(skb
), cur_time
);
1639 cl
->cl_cumul
+= qdisc_pkt_len(skb
);
1641 if (cl
->qdisc
->q
.qlen
!= 0) {
1642 if (cl
->cl_flags
& HFSC_RSC
) {
1644 next_len
= qdisc_peek_len(cl
->qdisc
);
1646 update_ed(cl
, next_len
);
1648 update_d(cl
, next_len
);
1651 /* the class becomes passive */
1655 qdisc_bstats_update(sch
, skb
);
1656 qdisc_qstats_backlog_dec(sch
, skb
);
1662 static const struct Qdisc_class_ops hfsc_class_ops
= {
1663 .change
= hfsc_change_class
,
1664 .delete = hfsc_delete_class
,
1665 .graft
= hfsc_graft_class
,
1666 .leaf
= hfsc_class_leaf
,
1667 .qlen_notify
= hfsc_qlen_notify
,
1668 .get
= hfsc_get_class
,
1669 .put
= hfsc_put_class
,
1670 .bind_tcf
= hfsc_bind_tcf
,
1671 .unbind_tcf
= hfsc_unbind_tcf
,
1672 .tcf_block
= hfsc_tcf_block
,
1673 .dump
= hfsc_dump_class
,
1674 .dump_stats
= hfsc_dump_class_stats
,
1678 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly
= {
1680 .init
= hfsc_init_qdisc
,
1681 .change
= hfsc_change_qdisc
,
1682 .reset
= hfsc_reset_qdisc
,
1683 .destroy
= hfsc_destroy_qdisc
,
1684 .dump
= hfsc_dump_qdisc
,
1685 .enqueue
= hfsc_enqueue
,
1686 .dequeue
= hfsc_dequeue
,
1687 .peek
= qdisc_peek_dequeued
,
1688 .cl_ops
= &hfsc_class_ops
,
1689 .priv_size
= sizeof(struct hfsc_sched
),
1690 .owner
= THIS_MODULE
1696 return register_qdisc(&hfsc_qdisc_ops
);
1702 unregister_qdisc(&hfsc_qdisc_ops
);
1705 MODULE_LICENSE("GPL");
1706 module_init(hfsc_init
);
1707 module_exit(hfsc_cleanup
);