]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/sch_hfsc.c
a324f84b1ccdf647fa442ad705057c55fd28c8c8
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_hfsc.c
1 /*
2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * 2003-10-17 - Ported from altq
10 */
11 /*
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13 *
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
19 *
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33 * DAMAGE.
34 *
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
39 */
40 /*
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45 *
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
50 */
51
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
70
71 /*
72 * kernel internal service curve representation:
73 * coordinates are given by 64 bit unsigned integers.
74 * x-axis: unit is clock count.
75 * y-axis: unit is byte.
76 *
77 * The service curve parameters are converted to the internal
78 * representation. The slope values are scaled to avoid overflow.
79 * the inverse slope values as well as the y-projection of the 1st
80 * segment are kept in order to avoid 64-bit divide operations
81 * that are expensive on 32-bit architectures.
82 */
83
84 struct internal_sc {
85 u64 sm1; /* scaled slope of the 1st segment */
86 u64 ism1; /* scaled inverse-slope of the 1st segment */
87 u64 dx; /* the x-projection of the 1st segment */
88 u64 dy; /* the y-projection of the 1st segment */
89 u64 sm2; /* scaled slope of the 2nd segment */
90 u64 ism2; /* scaled inverse-slope of the 2nd segment */
91 };
92
93 /* runtime service curve */
94 struct runtime_sc {
95 u64 x; /* current starting position on x-axis */
96 u64 y; /* current starting position on y-axis */
97 u64 sm1; /* scaled slope of the 1st segment */
98 u64 ism1; /* scaled inverse-slope of the 1st segment */
99 u64 dx; /* the x-projection of the 1st segment */
100 u64 dy; /* the y-projection of the 1st segment */
101 u64 sm2; /* scaled slope of the 2nd segment */
102 u64 ism2; /* scaled inverse-slope of the 2nd segment */
103 };
104
105 enum hfsc_class_flags {
106 HFSC_RSC = 0x1,
107 HFSC_FSC = 0x2,
108 HFSC_USC = 0x4
109 };
110
111 struct hfsc_class {
112 struct Qdisc_class_common cl_common;
113 unsigned int refcnt; /* usage count */
114
115 struct gnet_stats_basic_packed bstats;
116 struct gnet_stats_queue qstats;
117 struct net_rate_estimator __rcu *rate_est;
118 struct tcf_proto __rcu *filter_list; /* filter list */
119 struct tcf_block *block;
120 unsigned int filter_cnt; /* filter count */
121 unsigned int level; /* class level in hierarchy */
122
123 struct hfsc_sched *sched; /* scheduler data */
124 struct hfsc_class *cl_parent; /* parent class */
125 struct list_head siblings; /* sibling classes */
126 struct list_head children; /* child classes */
127 struct Qdisc *qdisc; /* leaf qdisc */
128
129 struct rb_node el_node; /* qdisc's eligible tree member */
130 struct rb_root vt_tree; /* active children sorted by cl_vt */
131 struct rb_node vt_node; /* parent's vt_tree member */
132 struct rb_root cf_tree; /* active children sorted by cl_f */
133 struct rb_node cf_node; /* parent's cf_heap member */
134
135 u64 cl_total; /* total work in bytes */
136 u64 cl_cumul; /* cumulative work in bytes done by
137 real-time criteria */
138
139 u64 cl_d; /* deadline*/
140 u64 cl_e; /* eligible time */
141 u64 cl_vt; /* virtual time */
142 u64 cl_f; /* time when this class will fit for
143 link-sharing, max(myf, cfmin) */
144 u64 cl_myf; /* my fit-time (calculated from this
145 class's own upperlimit curve) */
146 u64 cl_cfmin; /* earliest children's fit-time (used
147 with cl_myf to obtain cl_f) */
148 u64 cl_cvtmin; /* minimal virtual time among the
149 children fit for link-sharing
150 (monotonic within a period) */
151 u64 cl_vtadj; /* intra-period cumulative vt
152 adjustment */
153 u64 cl_cvtoff; /* largest virtual time seen among
154 the children */
155
156 struct internal_sc cl_rsc; /* internal real-time service curve */
157 struct internal_sc cl_fsc; /* internal fair service curve */
158 struct internal_sc cl_usc; /* internal upperlimit service curve */
159 struct runtime_sc cl_deadline; /* deadline curve */
160 struct runtime_sc cl_eligible; /* eligible curve */
161 struct runtime_sc cl_virtual; /* virtual curve */
162 struct runtime_sc cl_ulimit; /* upperlimit curve */
163
164 u8 cl_flags; /* which curves are valid */
165 u32 cl_vtperiod; /* vt period sequence number */
166 u32 cl_parentperiod;/* parent's vt period sequence number*/
167 u32 cl_nactive; /* number of active children */
168 };
169
170 struct hfsc_sched {
171 u16 defcls; /* default class id */
172 struct hfsc_class root; /* root class */
173 struct Qdisc_class_hash clhash; /* class hash */
174 struct rb_root eligible; /* eligible tree */
175 struct qdisc_watchdog watchdog; /* watchdog timer */
176 };
177
178 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
179
180
181 /*
182 * eligible tree holds backlogged classes being sorted by their eligible times.
183 * there is one eligible tree per hfsc instance.
184 */
185
186 static void
187 eltree_insert(struct hfsc_class *cl)
188 {
189 struct rb_node **p = &cl->sched->eligible.rb_node;
190 struct rb_node *parent = NULL;
191 struct hfsc_class *cl1;
192
193 while (*p != NULL) {
194 parent = *p;
195 cl1 = rb_entry(parent, struct hfsc_class, el_node);
196 if (cl->cl_e >= cl1->cl_e)
197 p = &parent->rb_right;
198 else
199 p = &parent->rb_left;
200 }
201 rb_link_node(&cl->el_node, parent, p);
202 rb_insert_color(&cl->el_node, &cl->sched->eligible);
203 }
204
205 static inline void
206 eltree_remove(struct hfsc_class *cl)
207 {
208 rb_erase(&cl->el_node, &cl->sched->eligible);
209 }
210
211 static inline void
212 eltree_update(struct hfsc_class *cl)
213 {
214 eltree_remove(cl);
215 eltree_insert(cl);
216 }
217
218 /* find the class with the minimum deadline among the eligible classes */
219 static inline struct hfsc_class *
220 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
221 {
222 struct hfsc_class *p, *cl = NULL;
223 struct rb_node *n;
224
225 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
226 p = rb_entry(n, struct hfsc_class, el_node);
227 if (p->cl_e > cur_time)
228 break;
229 if (cl == NULL || p->cl_d < cl->cl_d)
230 cl = p;
231 }
232 return cl;
233 }
234
235 /* find the class with minimum eligible time among the eligible classes */
236 static inline struct hfsc_class *
237 eltree_get_minel(struct hfsc_sched *q)
238 {
239 struct rb_node *n;
240
241 n = rb_first(&q->eligible);
242 if (n == NULL)
243 return NULL;
244 return rb_entry(n, struct hfsc_class, el_node);
245 }
246
247 /*
248 * vttree holds holds backlogged child classes being sorted by their virtual
249 * time. each intermediate class has one vttree.
250 */
251 static void
252 vttree_insert(struct hfsc_class *cl)
253 {
254 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
255 struct rb_node *parent = NULL;
256 struct hfsc_class *cl1;
257
258 while (*p != NULL) {
259 parent = *p;
260 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
261 if (cl->cl_vt >= cl1->cl_vt)
262 p = &parent->rb_right;
263 else
264 p = &parent->rb_left;
265 }
266 rb_link_node(&cl->vt_node, parent, p);
267 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
268 }
269
270 static inline void
271 vttree_remove(struct hfsc_class *cl)
272 {
273 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
274 }
275
276 static inline void
277 vttree_update(struct hfsc_class *cl)
278 {
279 vttree_remove(cl);
280 vttree_insert(cl);
281 }
282
283 static inline struct hfsc_class *
284 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
285 {
286 struct hfsc_class *p;
287 struct rb_node *n;
288
289 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
290 p = rb_entry(n, struct hfsc_class, vt_node);
291 if (p->cl_f <= cur_time)
292 return p;
293 }
294 return NULL;
295 }
296
297 /*
298 * get the leaf class with the minimum vt in the hierarchy
299 */
300 static struct hfsc_class *
301 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
302 {
303 /* if root-class's cfmin is bigger than cur_time nothing to do */
304 if (cl->cl_cfmin > cur_time)
305 return NULL;
306
307 while (cl->level > 0) {
308 cl = vttree_firstfit(cl, cur_time);
309 if (cl == NULL)
310 return NULL;
311 /*
312 * update parent's cl_cvtmin.
313 */
314 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
315 cl->cl_parent->cl_cvtmin = cl->cl_vt;
316 }
317 return cl;
318 }
319
320 static void
321 cftree_insert(struct hfsc_class *cl)
322 {
323 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
324 struct rb_node *parent = NULL;
325 struct hfsc_class *cl1;
326
327 while (*p != NULL) {
328 parent = *p;
329 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
330 if (cl->cl_f >= cl1->cl_f)
331 p = &parent->rb_right;
332 else
333 p = &parent->rb_left;
334 }
335 rb_link_node(&cl->cf_node, parent, p);
336 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
337 }
338
339 static inline void
340 cftree_remove(struct hfsc_class *cl)
341 {
342 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
343 }
344
345 static inline void
346 cftree_update(struct hfsc_class *cl)
347 {
348 cftree_remove(cl);
349 cftree_insert(cl);
350 }
351
352 /*
353 * service curve support functions
354 *
355 * external service curve parameters
356 * m: bps
357 * d: us
358 * internal service curve parameters
359 * sm: (bytes/psched_us) << SM_SHIFT
360 * ism: (psched_us/byte) << ISM_SHIFT
361 * dx: psched_us
362 *
363 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
364 *
365 * sm and ism are scaled in order to keep effective digits.
366 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
367 * digits in decimal using the following table.
368 *
369 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
370 * ------------+-------------------------------------------------------
371 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
372 *
373 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
374 *
375 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
376 */
377 #define SM_SHIFT (30 - PSCHED_SHIFT)
378 #define ISM_SHIFT (8 + PSCHED_SHIFT)
379
380 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
381 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
382
383 static inline u64
384 seg_x2y(u64 x, u64 sm)
385 {
386 u64 y;
387
388 /*
389 * compute
390 * y = x * sm >> SM_SHIFT
391 * but divide it for the upper and lower bits to avoid overflow
392 */
393 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
394 return y;
395 }
396
397 static inline u64
398 seg_y2x(u64 y, u64 ism)
399 {
400 u64 x;
401
402 if (y == 0)
403 x = 0;
404 else if (ism == HT_INFINITY)
405 x = HT_INFINITY;
406 else {
407 x = (y >> ISM_SHIFT) * ism
408 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
409 }
410 return x;
411 }
412
413 /* Convert m (bps) into sm (bytes/psched us) */
414 static u64
415 m2sm(u32 m)
416 {
417 u64 sm;
418
419 sm = ((u64)m << SM_SHIFT);
420 sm += PSCHED_TICKS_PER_SEC - 1;
421 do_div(sm, PSCHED_TICKS_PER_SEC);
422 return sm;
423 }
424
425 /* convert m (bps) into ism (psched us/byte) */
426 static u64
427 m2ism(u32 m)
428 {
429 u64 ism;
430
431 if (m == 0)
432 ism = HT_INFINITY;
433 else {
434 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
435 ism += m - 1;
436 do_div(ism, m);
437 }
438 return ism;
439 }
440
441 /* convert d (us) into dx (psched us) */
442 static u64
443 d2dx(u32 d)
444 {
445 u64 dx;
446
447 dx = ((u64)d * PSCHED_TICKS_PER_SEC);
448 dx += USEC_PER_SEC - 1;
449 do_div(dx, USEC_PER_SEC);
450 return dx;
451 }
452
453 /* convert sm (bytes/psched us) into m (bps) */
454 static u32
455 sm2m(u64 sm)
456 {
457 u64 m;
458
459 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
460 return (u32)m;
461 }
462
463 /* convert dx (psched us) into d (us) */
464 static u32
465 dx2d(u64 dx)
466 {
467 u64 d;
468
469 d = dx * USEC_PER_SEC;
470 do_div(d, PSCHED_TICKS_PER_SEC);
471 return (u32)d;
472 }
473
474 static void
475 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
476 {
477 isc->sm1 = m2sm(sc->m1);
478 isc->ism1 = m2ism(sc->m1);
479 isc->dx = d2dx(sc->d);
480 isc->dy = seg_x2y(isc->dx, isc->sm1);
481 isc->sm2 = m2sm(sc->m2);
482 isc->ism2 = m2ism(sc->m2);
483 }
484
485 /*
486 * initialize the runtime service curve with the given internal
487 * service curve starting at (x, y).
488 */
489 static void
490 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
491 {
492 rtsc->x = x;
493 rtsc->y = y;
494 rtsc->sm1 = isc->sm1;
495 rtsc->ism1 = isc->ism1;
496 rtsc->dx = isc->dx;
497 rtsc->dy = isc->dy;
498 rtsc->sm2 = isc->sm2;
499 rtsc->ism2 = isc->ism2;
500 }
501
502 /*
503 * calculate the y-projection of the runtime service curve by the
504 * given x-projection value
505 */
506 static u64
507 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
508 {
509 u64 x;
510
511 if (y < rtsc->y)
512 x = rtsc->x;
513 else if (y <= rtsc->y + rtsc->dy) {
514 /* x belongs to the 1st segment */
515 if (rtsc->dy == 0)
516 x = rtsc->x + rtsc->dx;
517 else
518 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
519 } else {
520 /* x belongs to the 2nd segment */
521 x = rtsc->x + rtsc->dx
522 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
523 }
524 return x;
525 }
526
527 static u64
528 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
529 {
530 u64 y;
531
532 if (x <= rtsc->x)
533 y = rtsc->y;
534 else if (x <= rtsc->x + rtsc->dx)
535 /* y belongs to the 1st segment */
536 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
537 else
538 /* y belongs to the 2nd segment */
539 y = rtsc->y + rtsc->dy
540 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
541 return y;
542 }
543
544 /*
545 * update the runtime service curve by taking the minimum of the current
546 * runtime service curve and the service curve starting at (x, y).
547 */
548 static void
549 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
550 {
551 u64 y1, y2, dx, dy;
552 u32 dsm;
553
554 if (isc->sm1 <= isc->sm2) {
555 /* service curve is convex */
556 y1 = rtsc_x2y(rtsc, x);
557 if (y1 < y)
558 /* the current rtsc is smaller */
559 return;
560 rtsc->x = x;
561 rtsc->y = y;
562 return;
563 }
564
565 /*
566 * service curve is concave
567 * compute the two y values of the current rtsc
568 * y1: at x
569 * y2: at (x + dx)
570 */
571 y1 = rtsc_x2y(rtsc, x);
572 if (y1 <= y) {
573 /* rtsc is below isc, no change to rtsc */
574 return;
575 }
576
577 y2 = rtsc_x2y(rtsc, x + isc->dx);
578 if (y2 >= y + isc->dy) {
579 /* rtsc is above isc, replace rtsc by isc */
580 rtsc->x = x;
581 rtsc->y = y;
582 rtsc->dx = isc->dx;
583 rtsc->dy = isc->dy;
584 return;
585 }
586
587 /*
588 * the two curves intersect
589 * compute the offsets (dx, dy) using the reverse
590 * function of seg_x2y()
591 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
592 */
593 dx = (y1 - y) << SM_SHIFT;
594 dsm = isc->sm1 - isc->sm2;
595 do_div(dx, dsm);
596 /*
597 * check if (x, y1) belongs to the 1st segment of rtsc.
598 * if so, add the offset.
599 */
600 if (rtsc->x + rtsc->dx > x)
601 dx += rtsc->x + rtsc->dx - x;
602 dy = seg_x2y(dx, isc->sm1);
603
604 rtsc->x = x;
605 rtsc->y = y;
606 rtsc->dx = dx;
607 rtsc->dy = dy;
608 }
609
610 static void
611 init_ed(struct hfsc_class *cl, unsigned int next_len)
612 {
613 u64 cur_time = psched_get_time();
614
615 /* update the deadline curve */
616 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
617
618 /*
619 * update the eligible curve.
620 * for concave, it is equal to the deadline curve.
621 * for convex, it is a linear curve with slope m2.
622 */
623 cl->cl_eligible = cl->cl_deadline;
624 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
625 cl->cl_eligible.dx = 0;
626 cl->cl_eligible.dy = 0;
627 }
628
629 /* compute e and d */
630 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
631 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
632
633 eltree_insert(cl);
634 }
635
636 static void
637 update_ed(struct hfsc_class *cl, unsigned int next_len)
638 {
639 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
640 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
641
642 eltree_update(cl);
643 }
644
645 static inline void
646 update_d(struct hfsc_class *cl, unsigned int next_len)
647 {
648 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
649 }
650
651 static inline void
652 update_cfmin(struct hfsc_class *cl)
653 {
654 struct rb_node *n = rb_first(&cl->cf_tree);
655 struct hfsc_class *p;
656
657 if (n == NULL) {
658 cl->cl_cfmin = 0;
659 return;
660 }
661 p = rb_entry(n, struct hfsc_class, cf_node);
662 cl->cl_cfmin = p->cl_f;
663 }
664
665 static void
666 init_vf(struct hfsc_class *cl, unsigned int len)
667 {
668 struct hfsc_class *max_cl;
669 struct rb_node *n;
670 u64 vt, f, cur_time;
671 int go_active;
672
673 cur_time = 0;
674 go_active = 1;
675 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
676 if (go_active && cl->cl_nactive++ == 0)
677 go_active = 1;
678 else
679 go_active = 0;
680
681 if (go_active) {
682 n = rb_last(&cl->cl_parent->vt_tree);
683 if (n != NULL) {
684 max_cl = rb_entry(n, struct hfsc_class, vt_node);
685 /*
686 * set vt to the average of the min and max
687 * classes. if the parent's period didn't
688 * change, don't decrease vt of the class.
689 */
690 vt = max_cl->cl_vt;
691 if (cl->cl_parent->cl_cvtmin != 0)
692 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
693
694 if (cl->cl_parent->cl_vtperiod !=
695 cl->cl_parentperiod || vt > cl->cl_vt)
696 cl->cl_vt = vt;
697 } else {
698 /*
699 * first child for a new parent backlog period.
700 * initialize cl_vt to the highest value seen
701 * among the siblings. this is analogous to
702 * what cur_time would provide in realtime case.
703 */
704 cl->cl_vt = cl->cl_parent->cl_cvtoff;
705 cl->cl_parent->cl_cvtmin = 0;
706 }
707
708 /* update the virtual curve */
709 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
710 cl->cl_vtadj = 0;
711
712 cl->cl_vtperiod++; /* increment vt period */
713 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
714 if (cl->cl_parent->cl_nactive == 0)
715 cl->cl_parentperiod++;
716 cl->cl_f = 0;
717
718 vttree_insert(cl);
719 cftree_insert(cl);
720
721 if (cl->cl_flags & HFSC_USC) {
722 /* class has upper limit curve */
723 if (cur_time == 0)
724 cur_time = psched_get_time();
725
726 /* update the ulimit curve */
727 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
728 cl->cl_total);
729 /* compute myf */
730 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
731 cl->cl_total);
732 }
733 }
734
735 f = max(cl->cl_myf, cl->cl_cfmin);
736 if (f != cl->cl_f) {
737 cl->cl_f = f;
738 cftree_update(cl);
739 }
740 update_cfmin(cl->cl_parent);
741 }
742 }
743
744 static void
745 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
746 {
747 u64 f; /* , myf_bound, delta; */
748 int go_passive = 0;
749
750 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
751 go_passive = 1;
752
753 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
754 cl->cl_total += len;
755
756 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
757 continue;
758
759 if (go_passive && --cl->cl_nactive == 0)
760 go_passive = 1;
761 else
762 go_passive = 0;
763
764 /* update vt */
765 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
766
767 /*
768 * if vt of the class is smaller than cvtmin,
769 * the class was skipped in the past due to non-fit.
770 * if so, we need to adjust vtadj.
771 */
772 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
773 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
774 cl->cl_vt = cl->cl_parent->cl_cvtmin;
775 }
776
777 if (go_passive) {
778 /* no more active child, going passive */
779
780 /* update cvtoff of the parent class */
781 if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
782 cl->cl_parent->cl_cvtoff = cl->cl_vt;
783
784 /* remove this class from the vt tree */
785 vttree_remove(cl);
786
787 cftree_remove(cl);
788 update_cfmin(cl->cl_parent);
789
790 continue;
791 }
792
793 /* update the vt tree */
794 vttree_update(cl);
795
796 /* update f */
797 if (cl->cl_flags & HFSC_USC) {
798 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
799 #if 0
800 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
801 cl->cl_total);
802 /*
803 * This code causes classes to stay way under their
804 * limit when multiple classes are used at gigabit
805 * speed. needs investigation. -kaber
806 */
807 /*
808 * if myf lags behind by more than one clock tick
809 * from the current time, adjust myfadj to prevent
810 * a rate-limited class from going greedy.
811 * in a steady state under rate-limiting, myf
812 * fluctuates within one clock tick.
813 */
814 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
815 if (cl->cl_myf < myf_bound) {
816 delta = cur_time - cl->cl_myf;
817 cl->cl_myfadj += delta;
818 cl->cl_myf += delta;
819 }
820 #endif
821 }
822
823 f = max(cl->cl_myf, cl->cl_cfmin);
824 if (f != cl->cl_f) {
825 cl->cl_f = f;
826 cftree_update(cl);
827 update_cfmin(cl->cl_parent);
828 }
829 }
830 }
831
832 static void
833 set_active(struct hfsc_class *cl, unsigned int len)
834 {
835 if (cl->cl_flags & HFSC_RSC)
836 init_ed(cl, len);
837 if (cl->cl_flags & HFSC_FSC)
838 init_vf(cl, len);
839
840 }
841
842 static void
843 set_passive(struct hfsc_class *cl)
844 {
845 if (cl->cl_flags & HFSC_RSC)
846 eltree_remove(cl);
847
848 /*
849 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
850 * needs to be called explicitly to remove a class from vttree.
851 */
852 }
853
854 static unsigned int
855 qdisc_peek_len(struct Qdisc *sch)
856 {
857 struct sk_buff *skb;
858 unsigned int len;
859
860 skb = sch->ops->peek(sch);
861 if (unlikely(skb == NULL)) {
862 qdisc_warn_nonwc("qdisc_peek_len", sch);
863 return 0;
864 }
865 len = qdisc_pkt_len(skb);
866
867 return len;
868 }
869
870 static void
871 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
872 {
873 unsigned int len = cl->qdisc->q.qlen;
874 unsigned int backlog = cl->qdisc->qstats.backlog;
875
876 qdisc_reset(cl->qdisc);
877 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
878 }
879
880 static void
881 hfsc_adjust_levels(struct hfsc_class *cl)
882 {
883 struct hfsc_class *p;
884 unsigned int level;
885
886 do {
887 level = 0;
888 list_for_each_entry(p, &cl->children, siblings) {
889 if (p->level >= level)
890 level = p->level + 1;
891 }
892 cl->level = level;
893 } while ((cl = cl->cl_parent) != NULL);
894 }
895
896 static inline struct hfsc_class *
897 hfsc_find_class(u32 classid, struct Qdisc *sch)
898 {
899 struct hfsc_sched *q = qdisc_priv(sch);
900 struct Qdisc_class_common *clc;
901
902 clc = qdisc_class_find(&q->clhash, classid);
903 if (clc == NULL)
904 return NULL;
905 return container_of(clc, struct hfsc_class, cl_common);
906 }
907
908 static void
909 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
910 u64 cur_time)
911 {
912 sc2isc(rsc, &cl->cl_rsc);
913 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
914 cl->cl_eligible = cl->cl_deadline;
915 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
916 cl->cl_eligible.dx = 0;
917 cl->cl_eligible.dy = 0;
918 }
919 cl->cl_flags |= HFSC_RSC;
920 }
921
922 static void
923 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
924 {
925 sc2isc(fsc, &cl->cl_fsc);
926 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
927 cl->cl_flags |= HFSC_FSC;
928 }
929
930 static void
931 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
932 u64 cur_time)
933 {
934 sc2isc(usc, &cl->cl_usc);
935 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
936 cl->cl_flags |= HFSC_USC;
937 }
938
939 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
940 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
941 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
942 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
943 };
944
945 static int
946 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
947 struct nlattr **tca, unsigned long *arg)
948 {
949 struct hfsc_sched *q = qdisc_priv(sch);
950 struct hfsc_class *cl = (struct hfsc_class *)*arg;
951 struct hfsc_class *parent = NULL;
952 struct nlattr *opt = tca[TCA_OPTIONS];
953 struct nlattr *tb[TCA_HFSC_MAX + 1];
954 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
955 u64 cur_time;
956 int err;
957
958 if (opt == NULL)
959 return -EINVAL;
960
961 err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy, NULL);
962 if (err < 0)
963 return err;
964
965 if (tb[TCA_HFSC_RSC]) {
966 rsc = nla_data(tb[TCA_HFSC_RSC]);
967 if (rsc->m1 == 0 && rsc->m2 == 0)
968 rsc = NULL;
969 }
970
971 if (tb[TCA_HFSC_FSC]) {
972 fsc = nla_data(tb[TCA_HFSC_FSC]);
973 if (fsc->m1 == 0 && fsc->m2 == 0)
974 fsc = NULL;
975 }
976
977 if (tb[TCA_HFSC_USC]) {
978 usc = nla_data(tb[TCA_HFSC_USC]);
979 if (usc->m1 == 0 && usc->m2 == 0)
980 usc = NULL;
981 }
982
983 if (cl != NULL) {
984 if (parentid) {
985 if (cl->cl_parent &&
986 cl->cl_parent->cl_common.classid != parentid)
987 return -EINVAL;
988 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
989 return -EINVAL;
990 }
991 cur_time = psched_get_time();
992
993 if (tca[TCA_RATE]) {
994 err = gen_replace_estimator(&cl->bstats, NULL,
995 &cl->rate_est,
996 NULL,
997 qdisc_root_sleeping_running(sch),
998 tca[TCA_RATE]);
999 if (err)
1000 return err;
1001 }
1002
1003 sch_tree_lock(sch);
1004 if (rsc != NULL)
1005 hfsc_change_rsc(cl, rsc, cur_time);
1006 if (fsc != NULL)
1007 hfsc_change_fsc(cl, fsc);
1008 if (usc != NULL)
1009 hfsc_change_usc(cl, usc, cur_time);
1010
1011 if (cl->qdisc->q.qlen != 0) {
1012 if (cl->cl_flags & HFSC_RSC)
1013 update_ed(cl, qdisc_peek_len(cl->qdisc));
1014 if (cl->cl_flags & HFSC_FSC)
1015 update_vf(cl, 0, cur_time);
1016 }
1017 sch_tree_unlock(sch);
1018
1019 return 0;
1020 }
1021
1022 if (parentid == TC_H_ROOT)
1023 return -EEXIST;
1024
1025 parent = &q->root;
1026 if (parentid) {
1027 parent = hfsc_find_class(parentid, sch);
1028 if (parent == NULL)
1029 return -ENOENT;
1030 }
1031
1032 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1033 return -EINVAL;
1034 if (hfsc_find_class(classid, sch))
1035 return -EEXIST;
1036
1037 if (rsc == NULL && fsc == NULL)
1038 return -EINVAL;
1039
1040 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1041 if (cl == NULL)
1042 return -ENOBUFS;
1043
1044 err = tcf_block_get(&cl->block, &cl->filter_list);
1045 if (err) {
1046 kfree(cl);
1047 return err;
1048 }
1049
1050 if (tca[TCA_RATE]) {
1051 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1052 NULL,
1053 qdisc_root_sleeping_running(sch),
1054 tca[TCA_RATE]);
1055 if (err) {
1056 tcf_block_put(cl->block);
1057 kfree(cl);
1058 return err;
1059 }
1060 }
1061
1062 if (rsc != NULL)
1063 hfsc_change_rsc(cl, rsc, 0);
1064 if (fsc != NULL)
1065 hfsc_change_fsc(cl, fsc);
1066 if (usc != NULL)
1067 hfsc_change_usc(cl, usc, 0);
1068
1069 cl->cl_common.classid = classid;
1070 cl->refcnt = 1;
1071 cl->sched = q;
1072 cl->cl_parent = parent;
1073 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
1074 &pfifo_qdisc_ops, classid);
1075 if (cl->qdisc == NULL)
1076 cl->qdisc = &noop_qdisc;
1077 else
1078 qdisc_hash_add(cl->qdisc, true);
1079 INIT_LIST_HEAD(&cl->children);
1080 cl->vt_tree = RB_ROOT;
1081 cl->cf_tree = RB_ROOT;
1082
1083 sch_tree_lock(sch);
1084 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1085 list_add_tail(&cl->siblings, &parent->children);
1086 if (parent->level == 0)
1087 hfsc_purge_queue(sch, parent);
1088 hfsc_adjust_levels(parent);
1089 sch_tree_unlock(sch);
1090
1091 qdisc_class_hash_grow(sch, &q->clhash);
1092
1093 *arg = (unsigned long)cl;
1094 return 0;
1095 }
1096
1097 static void
1098 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1099 {
1100 struct hfsc_sched *q = qdisc_priv(sch);
1101
1102 tcf_block_put(cl->block);
1103 qdisc_destroy(cl->qdisc);
1104 gen_kill_estimator(&cl->rate_est);
1105 if (cl != &q->root)
1106 kfree(cl);
1107 }
1108
1109 static int
1110 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1111 {
1112 struct hfsc_sched *q = qdisc_priv(sch);
1113 struct hfsc_class *cl = (struct hfsc_class *)arg;
1114
1115 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1116 return -EBUSY;
1117
1118 sch_tree_lock(sch);
1119
1120 list_del(&cl->siblings);
1121 hfsc_adjust_levels(cl->cl_parent);
1122
1123 hfsc_purge_queue(sch, cl);
1124 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1125
1126 BUG_ON(--cl->refcnt == 0);
1127 /*
1128 * This shouldn't happen: we "hold" one cops->get() when called
1129 * from tc_ctl_tclass; the destroy method is done from cops->put().
1130 */
1131
1132 sch_tree_unlock(sch);
1133 return 0;
1134 }
1135
1136 static struct hfsc_class *
1137 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1138 {
1139 struct hfsc_sched *q = qdisc_priv(sch);
1140 struct hfsc_class *head, *cl;
1141 struct tcf_result res;
1142 struct tcf_proto *tcf;
1143 int result;
1144
1145 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1146 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1147 if (cl->level == 0)
1148 return cl;
1149
1150 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1151 head = &q->root;
1152 tcf = rcu_dereference_bh(q->root.filter_list);
1153 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
1154 #ifdef CONFIG_NET_CLS_ACT
1155 switch (result) {
1156 case TC_ACT_QUEUED:
1157 case TC_ACT_STOLEN:
1158 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1159 case TC_ACT_SHOT:
1160 return NULL;
1161 }
1162 #endif
1163 cl = (struct hfsc_class *)res.class;
1164 if (!cl) {
1165 cl = hfsc_find_class(res.classid, sch);
1166 if (!cl)
1167 break; /* filter selected invalid classid */
1168 if (cl->level >= head->level)
1169 break; /* filter may only point downwards */
1170 }
1171
1172 if (cl->level == 0)
1173 return cl; /* hit leaf class */
1174
1175 /* apply inner filter chain */
1176 tcf = rcu_dereference_bh(cl->filter_list);
1177 head = cl;
1178 }
1179
1180 /* classification failed, try default class */
1181 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1182 if (cl == NULL || cl->level > 0)
1183 return NULL;
1184
1185 return cl;
1186 }
1187
1188 static int
1189 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1190 struct Qdisc **old)
1191 {
1192 struct hfsc_class *cl = (struct hfsc_class *)arg;
1193
1194 if (cl->level > 0)
1195 return -EINVAL;
1196 if (new == NULL) {
1197 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1198 cl->cl_common.classid);
1199 if (new == NULL)
1200 new = &noop_qdisc;
1201 }
1202
1203 *old = qdisc_replace(sch, new, &cl->qdisc);
1204 return 0;
1205 }
1206
1207 static struct Qdisc *
1208 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1209 {
1210 struct hfsc_class *cl = (struct hfsc_class *)arg;
1211
1212 if (cl->level == 0)
1213 return cl->qdisc;
1214
1215 return NULL;
1216 }
1217
1218 static void
1219 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1220 {
1221 struct hfsc_class *cl = (struct hfsc_class *)arg;
1222
1223 if (cl->qdisc->q.qlen == 0) {
1224 update_vf(cl, 0, 0);
1225 set_passive(cl);
1226 }
1227 }
1228
1229 static unsigned long
1230 hfsc_get_class(struct Qdisc *sch, u32 classid)
1231 {
1232 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1233
1234 if (cl != NULL)
1235 cl->refcnt++;
1236
1237 return (unsigned long)cl;
1238 }
1239
1240 static void
1241 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1242 {
1243 struct hfsc_class *cl = (struct hfsc_class *)arg;
1244
1245 if (--cl->refcnt == 0)
1246 hfsc_destroy_class(sch, cl);
1247 }
1248
1249 static unsigned long
1250 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1251 {
1252 struct hfsc_class *p = (struct hfsc_class *)parent;
1253 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1254
1255 if (cl != NULL) {
1256 if (p != NULL && p->level <= cl->level)
1257 return 0;
1258 cl->filter_cnt++;
1259 }
1260
1261 return (unsigned long)cl;
1262 }
1263
1264 static void
1265 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1266 {
1267 struct hfsc_class *cl = (struct hfsc_class *)arg;
1268
1269 cl->filter_cnt--;
1270 }
1271
1272 static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
1273 {
1274 struct hfsc_sched *q = qdisc_priv(sch);
1275 struct hfsc_class *cl = (struct hfsc_class *)arg;
1276
1277 if (cl == NULL)
1278 cl = &q->root;
1279
1280 return cl->block;
1281 }
1282
1283 static int
1284 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1285 {
1286 struct tc_service_curve tsc;
1287
1288 tsc.m1 = sm2m(sc->sm1);
1289 tsc.d = dx2d(sc->dx);
1290 tsc.m2 = sm2m(sc->sm2);
1291 if (nla_put(skb, attr, sizeof(tsc), &tsc))
1292 goto nla_put_failure;
1293
1294 return skb->len;
1295
1296 nla_put_failure:
1297 return -1;
1298 }
1299
1300 static int
1301 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1302 {
1303 if ((cl->cl_flags & HFSC_RSC) &&
1304 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1305 goto nla_put_failure;
1306
1307 if ((cl->cl_flags & HFSC_FSC) &&
1308 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1309 goto nla_put_failure;
1310
1311 if ((cl->cl_flags & HFSC_USC) &&
1312 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1313 goto nla_put_failure;
1314
1315 return skb->len;
1316
1317 nla_put_failure:
1318 return -1;
1319 }
1320
1321 static int
1322 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1323 struct tcmsg *tcm)
1324 {
1325 struct hfsc_class *cl = (struct hfsc_class *)arg;
1326 struct nlattr *nest;
1327
1328 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1329 TC_H_ROOT;
1330 tcm->tcm_handle = cl->cl_common.classid;
1331 if (cl->level == 0)
1332 tcm->tcm_info = cl->qdisc->handle;
1333
1334 nest = nla_nest_start(skb, TCA_OPTIONS);
1335 if (nest == NULL)
1336 goto nla_put_failure;
1337 if (hfsc_dump_curves(skb, cl) < 0)
1338 goto nla_put_failure;
1339 return nla_nest_end(skb, nest);
1340
1341 nla_put_failure:
1342 nla_nest_cancel(skb, nest);
1343 return -EMSGSIZE;
1344 }
1345
1346 static int
1347 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1348 struct gnet_dump *d)
1349 {
1350 struct hfsc_class *cl = (struct hfsc_class *)arg;
1351 struct tc_hfsc_stats xstats;
1352
1353 cl->qstats.backlog = cl->qdisc->qstats.backlog;
1354 xstats.level = cl->level;
1355 xstats.period = cl->cl_vtperiod;
1356 xstats.work = cl->cl_total;
1357 xstats.rtwork = cl->cl_cumul;
1358
1359 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1360 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1361 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
1362 return -1;
1363
1364 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1365 }
1366
1367
1368
1369 static void
1370 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1371 {
1372 struct hfsc_sched *q = qdisc_priv(sch);
1373 struct hfsc_class *cl;
1374 unsigned int i;
1375
1376 if (arg->stop)
1377 return;
1378
1379 for (i = 0; i < q->clhash.hashsize; i++) {
1380 hlist_for_each_entry(cl, &q->clhash.hash[i],
1381 cl_common.hnode) {
1382 if (arg->count < arg->skip) {
1383 arg->count++;
1384 continue;
1385 }
1386 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1387 arg->stop = 1;
1388 return;
1389 }
1390 arg->count++;
1391 }
1392 }
1393 }
1394
1395 static void
1396 hfsc_schedule_watchdog(struct Qdisc *sch)
1397 {
1398 struct hfsc_sched *q = qdisc_priv(sch);
1399 struct hfsc_class *cl;
1400 u64 next_time = 0;
1401
1402 cl = eltree_get_minel(q);
1403 if (cl)
1404 next_time = cl->cl_e;
1405 if (q->root.cl_cfmin != 0) {
1406 if (next_time == 0 || next_time > q->root.cl_cfmin)
1407 next_time = q->root.cl_cfmin;
1408 }
1409 WARN_ON(next_time == 0);
1410 qdisc_watchdog_schedule(&q->watchdog, next_time);
1411 }
1412
1413 static int
1414 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1415 {
1416 struct hfsc_sched *q = qdisc_priv(sch);
1417 struct tc_hfsc_qopt *qopt;
1418 int err;
1419
1420 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1421 return -EINVAL;
1422 qopt = nla_data(opt);
1423
1424 q->defcls = qopt->defcls;
1425 err = qdisc_class_hash_init(&q->clhash);
1426 if (err < 0)
1427 return err;
1428 q->eligible = RB_ROOT;
1429
1430 q->root.cl_common.classid = sch->handle;
1431 q->root.refcnt = 1;
1432 q->root.sched = q;
1433 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1434 sch->handle);
1435 if (q->root.qdisc == NULL)
1436 q->root.qdisc = &noop_qdisc;
1437 else
1438 qdisc_hash_add(q->root.qdisc, true);
1439 INIT_LIST_HEAD(&q->root.children);
1440 q->root.vt_tree = RB_ROOT;
1441 q->root.cf_tree = RB_ROOT;
1442
1443 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1444 qdisc_class_hash_grow(sch, &q->clhash);
1445
1446 qdisc_watchdog_init(&q->watchdog, sch);
1447
1448 return 0;
1449 }
1450
1451 static int
1452 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1453 {
1454 struct hfsc_sched *q = qdisc_priv(sch);
1455 struct tc_hfsc_qopt *qopt;
1456
1457 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1458 return -EINVAL;
1459 qopt = nla_data(opt);
1460
1461 sch_tree_lock(sch);
1462 q->defcls = qopt->defcls;
1463 sch_tree_unlock(sch);
1464
1465 return 0;
1466 }
1467
1468 static void
1469 hfsc_reset_class(struct hfsc_class *cl)
1470 {
1471 cl->cl_total = 0;
1472 cl->cl_cumul = 0;
1473 cl->cl_d = 0;
1474 cl->cl_e = 0;
1475 cl->cl_vt = 0;
1476 cl->cl_vtadj = 0;
1477 cl->cl_cvtmin = 0;
1478 cl->cl_cvtoff = 0;
1479 cl->cl_vtperiod = 0;
1480 cl->cl_parentperiod = 0;
1481 cl->cl_f = 0;
1482 cl->cl_myf = 0;
1483 cl->cl_cfmin = 0;
1484 cl->cl_nactive = 0;
1485
1486 cl->vt_tree = RB_ROOT;
1487 cl->cf_tree = RB_ROOT;
1488 qdisc_reset(cl->qdisc);
1489
1490 if (cl->cl_flags & HFSC_RSC)
1491 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1492 if (cl->cl_flags & HFSC_FSC)
1493 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1494 if (cl->cl_flags & HFSC_USC)
1495 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1496 }
1497
1498 static void
1499 hfsc_reset_qdisc(struct Qdisc *sch)
1500 {
1501 struct hfsc_sched *q = qdisc_priv(sch);
1502 struct hfsc_class *cl;
1503 unsigned int i;
1504
1505 for (i = 0; i < q->clhash.hashsize; i++) {
1506 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1507 hfsc_reset_class(cl);
1508 }
1509 q->eligible = RB_ROOT;
1510 qdisc_watchdog_cancel(&q->watchdog);
1511 sch->qstats.backlog = 0;
1512 sch->q.qlen = 0;
1513 }
1514
1515 static void
1516 hfsc_destroy_qdisc(struct Qdisc *sch)
1517 {
1518 struct hfsc_sched *q = qdisc_priv(sch);
1519 struct hlist_node *next;
1520 struct hfsc_class *cl;
1521 unsigned int i;
1522
1523 for (i = 0; i < q->clhash.hashsize; i++) {
1524 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1525 tcf_block_put(cl->block);
1526 }
1527 for (i = 0; i < q->clhash.hashsize; i++) {
1528 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1529 cl_common.hnode)
1530 hfsc_destroy_class(sch, cl);
1531 }
1532 qdisc_class_hash_destroy(&q->clhash);
1533 qdisc_watchdog_cancel(&q->watchdog);
1534 }
1535
1536 static int
1537 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1538 {
1539 struct hfsc_sched *q = qdisc_priv(sch);
1540 unsigned char *b = skb_tail_pointer(skb);
1541 struct tc_hfsc_qopt qopt;
1542
1543 qopt.defcls = q->defcls;
1544 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1545 goto nla_put_failure;
1546 return skb->len;
1547
1548 nla_put_failure:
1549 nlmsg_trim(skb, b);
1550 return -1;
1551 }
1552
1553 static int
1554 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1555 {
1556 struct hfsc_class *cl;
1557 int uninitialized_var(err);
1558
1559 cl = hfsc_classify(skb, sch, &err);
1560 if (cl == NULL) {
1561 if (err & __NET_XMIT_BYPASS)
1562 qdisc_qstats_drop(sch);
1563 __qdisc_drop(skb, to_free);
1564 return err;
1565 }
1566
1567 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1568 if (unlikely(err != NET_XMIT_SUCCESS)) {
1569 if (net_xmit_drop_count(err)) {
1570 cl->qstats.drops++;
1571 qdisc_qstats_drop(sch);
1572 }
1573 return err;
1574 }
1575
1576 if (cl->qdisc->q.qlen == 1) {
1577 set_active(cl, qdisc_pkt_len(skb));
1578 /*
1579 * If this is the first packet, isolate the head so an eventual
1580 * head drop before the first dequeue operation has no chance
1581 * to invalidate the deadline.
1582 */
1583 if (cl->cl_flags & HFSC_RSC)
1584 cl->qdisc->ops->peek(cl->qdisc);
1585
1586 }
1587
1588 qdisc_qstats_backlog_inc(sch, skb);
1589 sch->q.qlen++;
1590
1591 return NET_XMIT_SUCCESS;
1592 }
1593
1594 static struct sk_buff *
1595 hfsc_dequeue(struct Qdisc *sch)
1596 {
1597 struct hfsc_sched *q = qdisc_priv(sch);
1598 struct hfsc_class *cl;
1599 struct sk_buff *skb;
1600 u64 cur_time;
1601 unsigned int next_len;
1602 int realtime = 0;
1603
1604 if (sch->q.qlen == 0)
1605 return NULL;
1606
1607 cur_time = psched_get_time();
1608
1609 /*
1610 * if there are eligible classes, use real-time criteria.
1611 * find the class with the minimum deadline among
1612 * the eligible classes.
1613 */
1614 cl = eltree_get_mindl(q, cur_time);
1615 if (cl) {
1616 realtime = 1;
1617 } else {
1618 /*
1619 * use link-sharing criteria
1620 * get the class with the minimum vt in the hierarchy
1621 */
1622 cl = vttree_get_minvt(&q->root, cur_time);
1623 if (cl == NULL) {
1624 qdisc_qstats_overlimit(sch);
1625 hfsc_schedule_watchdog(sch);
1626 return NULL;
1627 }
1628 }
1629
1630 skb = qdisc_dequeue_peeked(cl->qdisc);
1631 if (skb == NULL) {
1632 qdisc_warn_nonwc("HFSC", cl->qdisc);
1633 return NULL;
1634 }
1635
1636 bstats_update(&cl->bstats, skb);
1637 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1638 if (realtime)
1639 cl->cl_cumul += qdisc_pkt_len(skb);
1640
1641 if (cl->qdisc->q.qlen != 0) {
1642 if (cl->cl_flags & HFSC_RSC) {
1643 /* update ed */
1644 next_len = qdisc_peek_len(cl->qdisc);
1645 if (realtime)
1646 update_ed(cl, next_len);
1647 else
1648 update_d(cl, next_len);
1649 }
1650 } else {
1651 /* the class becomes passive */
1652 set_passive(cl);
1653 }
1654
1655 qdisc_bstats_update(sch, skb);
1656 qdisc_qstats_backlog_dec(sch, skb);
1657 sch->q.qlen--;
1658
1659 return skb;
1660 }
1661
1662 static const struct Qdisc_class_ops hfsc_class_ops = {
1663 .change = hfsc_change_class,
1664 .delete = hfsc_delete_class,
1665 .graft = hfsc_graft_class,
1666 .leaf = hfsc_class_leaf,
1667 .qlen_notify = hfsc_qlen_notify,
1668 .get = hfsc_get_class,
1669 .put = hfsc_put_class,
1670 .bind_tcf = hfsc_bind_tcf,
1671 .unbind_tcf = hfsc_unbind_tcf,
1672 .tcf_block = hfsc_tcf_block,
1673 .dump = hfsc_dump_class,
1674 .dump_stats = hfsc_dump_class_stats,
1675 .walk = hfsc_walk
1676 };
1677
1678 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1679 .id = "hfsc",
1680 .init = hfsc_init_qdisc,
1681 .change = hfsc_change_qdisc,
1682 .reset = hfsc_reset_qdisc,
1683 .destroy = hfsc_destroy_qdisc,
1684 .dump = hfsc_dump_qdisc,
1685 .enqueue = hfsc_enqueue,
1686 .dequeue = hfsc_dequeue,
1687 .peek = qdisc_peek_dequeued,
1688 .cl_ops = &hfsc_class_ops,
1689 .priv_size = sizeof(struct hfsc_sched),
1690 .owner = THIS_MODULE
1691 };
1692
1693 static int __init
1694 hfsc_init(void)
1695 {
1696 return register_qdisc(&hfsc_qdisc_ops);
1697 }
1698
1699 static void __exit
1700 hfsc_cleanup(void)
1701 {
1702 unregister_qdisc(&hfsc_qdisc_ops);
1703 }
1704
1705 MODULE_LICENSE("GPL");
1706 module_init(hfsc_init);
1707 module_exit(hfsc_cleanup);