]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version 2 | |
7 | * of the License, or (at your option) any later version. | |
8 | * | |
9 | * 2003-10-17 - Ported from altq | |
10 | */ | |
11 | /* | |
12 | * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. | |
13 | * | |
14 | * Permission to use, copy, modify, and distribute this software and | |
15 | * its documentation is hereby granted (including for commercial or | |
16 | * for-profit use), provided that both the copyright notice and this | |
17 | * permission notice appear in all copies of the software, derivative | |
18 | * works, or modified versions, and any portions thereof. | |
19 | * | |
20 | * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF | |
21 | * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS | |
22 | * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED | |
23 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
24 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
25 | * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE | |
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT | |
28 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
30 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |
32 | * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | |
33 | * DAMAGE. | |
34 | * | |
35 | * Carnegie Mellon encourages (but does not require) users of this | |
36 | * software to return any improvements or extensions that they make, | |
37 | * and to grant Carnegie Mellon the rights to redistribute these | |
38 | * changes without encumbrance. | |
39 | */ | |
40 | /* | |
41 | * H-FSC is described in Proceedings of SIGCOMM'97, | |
42 | * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, | |
43 | * Real-Time and Priority Service" | |
44 | * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. | |
45 | * | |
46 | * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. | |
47 | * when a class has an upperlimit, the fit-time is computed from the | |
48 | * upperlimit service curve. the link-sharing scheduler does not schedule | |
49 | * a class whose fit-time exceeds the current time. | |
50 | */ | |
51 | ||
52 | #include <linux/kernel.h> | |
53 | #include <linux/module.h> | |
54 | #include <linux/types.h> | |
55 | #include <linux/errno.h> | |
56 | #include <linux/compiler.h> | |
57 | #include <linux/spinlock.h> | |
58 | #include <linux/skbuff.h> | |
59 | #include <linux/string.h> | |
60 | #include <linux/slab.h> | |
61 | #include <linux/list.h> | |
62 | #include <linux/rbtree.h> | |
63 | #include <linux/init.h> | |
64 | #include <linux/rtnetlink.h> | |
65 | #include <linux/pkt_sched.h> | |
66 | #include <net/netlink.h> | |
67 | #include <net/pkt_sched.h> | |
68 | #include <net/pkt_cls.h> | |
69 | #include <asm/div64.h> | |
70 | ||
71 | /* | |
72 | * kernel internal service curve representation: | |
73 | * coordinates are given by 64 bit unsigned integers. | |
74 | * x-axis: unit is clock count. | |
75 | * y-axis: unit is byte. | |
76 | * | |
77 | * The service curve parameters are converted to the internal | |
78 | * representation. The slope values are scaled to avoid overflow. | |
79 | * the inverse slope values as well as the y-projection of the 1st | |
80 | * segment are kept in order to avoid 64-bit divide operations | |
81 | * that are expensive on 32-bit architectures. | |
82 | */ | |
83 | ||
84 | struct internal_sc | |
85 | { | |
86 | u64 sm1; /* scaled slope of the 1st segment */ | |
87 | u64 ism1; /* scaled inverse-slope of the 1st segment */ | |
88 | u64 dx; /* the x-projection of the 1st segment */ | |
89 | u64 dy; /* the y-projection of the 1st segment */ | |
90 | u64 sm2; /* scaled slope of the 2nd segment */ | |
91 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ | |
92 | }; | |
93 | ||
94 | /* runtime service curve */ | |
95 | struct runtime_sc | |
96 | { | |
97 | u64 x; /* current starting position on x-axis */ | |
98 | u64 y; /* current starting position on y-axis */ | |
99 | u64 sm1; /* scaled slope of the 1st segment */ | |
100 | u64 ism1; /* scaled inverse-slope of the 1st segment */ | |
101 | u64 dx; /* the x-projection of the 1st segment */ | |
102 | u64 dy; /* the y-projection of the 1st segment */ | |
103 | u64 sm2; /* scaled slope of the 2nd segment */ | |
104 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ | |
105 | }; | |
106 | ||
107 | enum hfsc_class_flags | |
108 | { | |
109 | HFSC_RSC = 0x1, | |
110 | HFSC_FSC = 0x2, | |
111 | HFSC_USC = 0x4 | |
112 | }; | |
113 | ||
114 | struct hfsc_class | |
115 | { | |
116 | struct Qdisc_class_common cl_common; | |
117 | unsigned int refcnt; /* usage count */ | |
118 | ||
119 | struct gnet_stats_basic_packed bstats; | |
120 | struct gnet_stats_queue qstats; | |
121 | struct gnet_stats_rate_est rate_est; | |
122 | unsigned int level; /* class level in hierarchy */ | |
123 | struct tcf_proto *filter_list; /* filter list */ | |
124 | unsigned int filter_cnt; /* filter count */ | |
125 | ||
126 | struct hfsc_sched *sched; /* scheduler data */ | |
127 | struct hfsc_class *cl_parent; /* parent class */ | |
128 | struct list_head siblings; /* sibling classes */ | |
129 | struct list_head children; /* child classes */ | |
130 | struct Qdisc *qdisc; /* leaf qdisc */ | |
131 | ||
132 | struct rb_node el_node; /* qdisc's eligible tree member */ | |
133 | struct rb_root vt_tree; /* active children sorted by cl_vt */ | |
134 | struct rb_node vt_node; /* parent's vt_tree member */ | |
135 | struct rb_root cf_tree; /* active children sorted by cl_f */ | |
136 | struct rb_node cf_node; /* parent's cf_heap member */ | |
137 | struct list_head dlist; /* drop list member */ | |
138 | ||
139 | u64 cl_total; /* total work in bytes */ | |
140 | u64 cl_cumul; /* cumulative work in bytes done by | |
141 | real-time criteria */ | |
142 | ||
143 | u64 cl_d; /* deadline*/ | |
144 | u64 cl_e; /* eligible time */ | |
145 | u64 cl_vt; /* virtual time */ | |
146 | u64 cl_f; /* time when this class will fit for | |
147 | link-sharing, max(myf, cfmin) */ | |
148 | u64 cl_myf; /* my fit-time (calculated from this | |
149 | class's own upperlimit curve) */ | |
150 | u64 cl_myfadj; /* my fit-time adjustment (to cancel | |
151 | history dependence) */ | |
152 | u64 cl_cfmin; /* earliest children's fit-time (used | |
153 | with cl_myf to obtain cl_f) */ | |
154 | u64 cl_cvtmin; /* minimal virtual time among the | |
155 | children fit for link-sharing | |
156 | (monotonic within a period) */ | |
157 | u64 cl_vtadj; /* intra-period cumulative vt | |
158 | adjustment */ | |
159 | u64 cl_vtoff; /* inter-period cumulative vt offset */ | |
160 | u64 cl_cvtmax; /* max child's vt in the last period */ | |
161 | u64 cl_cvtoff; /* cumulative cvtmax of all periods */ | |
162 | u64 cl_pcvtoff; /* parent's cvtoff at initialization | |
163 | time */ | |
164 | ||
165 | struct internal_sc cl_rsc; /* internal real-time service curve */ | |
166 | struct internal_sc cl_fsc; /* internal fair service curve */ | |
167 | struct internal_sc cl_usc; /* internal upperlimit service curve */ | |
168 | struct runtime_sc cl_deadline; /* deadline curve */ | |
169 | struct runtime_sc cl_eligible; /* eligible curve */ | |
170 | struct runtime_sc cl_virtual; /* virtual curve */ | |
171 | struct runtime_sc cl_ulimit; /* upperlimit curve */ | |
172 | ||
173 | unsigned long cl_flags; /* which curves are valid */ | |
174 | unsigned long cl_vtperiod; /* vt period sequence number */ | |
175 | unsigned long cl_parentperiod;/* parent's vt period sequence number*/ | |
176 | unsigned long cl_nactive; /* number of active children */ | |
177 | }; | |
178 | ||
179 | struct hfsc_sched | |
180 | { | |
181 | u16 defcls; /* default class id */ | |
182 | struct hfsc_class root; /* root class */ | |
183 | struct Qdisc_class_hash clhash; /* class hash */ | |
184 | struct rb_root eligible; /* eligible tree */ | |
185 | struct list_head droplist; /* active leaf class list (for | |
186 | dropping) */ | |
187 | struct qdisc_watchdog watchdog; /* watchdog timer */ | |
188 | }; | |
189 | ||
190 | #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ | |
191 | ||
192 | ||
193 | /* | |
194 | * eligible tree holds backlogged classes being sorted by their eligible times. | |
195 | * there is one eligible tree per hfsc instance. | |
196 | */ | |
197 | ||
198 | static void | |
199 | eltree_insert(struct hfsc_class *cl) | |
200 | { | |
201 | struct rb_node **p = &cl->sched->eligible.rb_node; | |
202 | struct rb_node *parent = NULL; | |
203 | struct hfsc_class *cl1; | |
204 | ||
205 | while (*p != NULL) { | |
206 | parent = *p; | |
207 | cl1 = rb_entry(parent, struct hfsc_class, el_node); | |
208 | if (cl->cl_e >= cl1->cl_e) | |
209 | p = &parent->rb_right; | |
210 | else | |
211 | p = &parent->rb_left; | |
212 | } | |
213 | rb_link_node(&cl->el_node, parent, p); | |
214 | rb_insert_color(&cl->el_node, &cl->sched->eligible); | |
215 | } | |
216 | ||
217 | static inline void | |
218 | eltree_remove(struct hfsc_class *cl) | |
219 | { | |
220 | rb_erase(&cl->el_node, &cl->sched->eligible); | |
221 | } | |
222 | ||
223 | static inline void | |
224 | eltree_update(struct hfsc_class *cl) | |
225 | { | |
226 | eltree_remove(cl); | |
227 | eltree_insert(cl); | |
228 | } | |
229 | ||
230 | /* find the class with the minimum deadline among the eligible classes */ | |
231 | static inline struct hfsc_class * | |
232 | eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) | |
233 | { | |
234 | struct hfsc_class *p, *cl = NULL; | |
235 | struct rb_node *n; | |
236 | ||
237 | for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { | |
238 | p = rb_entry(n, struct hfsc_class, el_node); | |
239 | if (p->cl_e > cur_time) | |
240 | break; | |
241 | if (cl == NULL || p->cl_d < cl->cl_d) | |
242 | cl = p; | |
243 | } | |
244 | return cl; | |
245 | } | |
246 | ||
247 | /* find the class with minimum eligible time among the eligible classes */ | |
248 | static inline struct hfsc_class * | |
249 | eltree_get_minel(struct hfsc_sched *q) | |
250 | { | |
251 | struct rb_node *n; | |
252 | ||
253 | n = rb_first(&q->eligible); | |
254 | if (n == NULL) | |
255 | return NULL; | |
256 | return rb_entry(n, struct hfsc_class, el_node); | |
257 | } | |
258 | ||
259 | /* | |
260 | * vttree holds holds backlogged child classes being sorted by their virtual | |
261 | * time. each intermediate class has one vttree. | |
262 | */ | |
263 | static void | |
264 | vttree_insert(struct hfsc_class *cl) | |
265 | { | |
266 | struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; | |
267 | struct rb_node *parent = NULL; | |
268 | struct hfsc_class *cl1; | |
269 | ||
270 | while (*p != NULL) { | |
271 | parent = *p; | |
272 | cl1 = rb_entry(parent, struct hfsc_class, vt_node); | |
273 | if (cl->cl_vt >= cl1->cl_vt) | |
274 | p = &parent->rb_right; | |
275 | else | |
276 | p = &parent->rb_left; | |
277 | } | |
278 | rb_link_node(&cl->vt_node, parent, p); | |
279 | rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); | |
280 | } | |
281 | ||
282 | static inline void | |
283 | vttree_remove(struct hfsc_class *cl) | |
284 | { | |
285 | rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); | |
286 | } | |
287 | ||
288 | static inline void | |
289 | vttree_update(struct hfsc_class *cl) | |
290 | { | |
291 | vttree_remove(cl); | |
292 | vttree_insert(cl); | |
293 | } | |
294 | ||
295 | static inline struct hfsc_class * | |
296 | vttree_firstfit(struct hfsc_class *cl, u64 cur_time) | |
297 | { | |
298 | struct hfsc_class *p; | |
299 | struct rb_node *n; | |
300 | ||
301 | for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { | |
302 | p = rb_entry(n, struct hfsc_class, vt_node); | |
303 | if (p->cl_f <= cur_time) | |
304 | return p; | |
305 | } | |
306 | return NULL; | |
307 | } | |
308 | ||
309 | /* | |
310 | * get the leaf class with the minimum vt in the hierarchy | |
311 | */ | |
312 | static struct hfsc_class * | |
313 | vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) | |
314 | { | |
315 | /* if root-class's cfmin is bigger than cur_time nothing to do */ | |
316 | if (cl->cl_cfmin > cur_time) | |
317 | return NULL; | |
318 | ||
319 | while (cl->level > 0) { | |
320 | cl = vttree_firstfit(cl, cur_time); | |
321 | if (cl == NULL) | |
322 | return NULL; | |
323 | /* | |
324 | * update parent's cl_cvtmin. | |
325 | */ | |
326 | if (cl->cl_parent->cl_cvtmin < cl->cl_vt) | |
327 | cl->cl_parent->cl_cvtmin = cl->cl_vt; | |
328 | } | |
329 | return cl; | |
330 | } | |
331 | ||
332 | static void | |
333 | cftree_insert(struct hfsc_class *cl) | |
334 | { | |
335 | struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; | |
336 | struct rb_node *parent = NULL; | |
337 | struct hfsc_class *cl1; | |
338 | ||
339 | while (*p != NULL) { | |
340 | parent = *p; | |
341 | cl1 = rb_entry(parent, struct hfsc_class, cf_node); | |
342 | if (cl->cl_f >= cl1->cl_f) | |
343 | p = &parent->rb_right; | |
344 | else | |
345 | p = &parent->rb_left; | |
346 | } | |
347 | rb_link_node(&cl->cf_node, parent, p); | |
348 | rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); | |
349 | } | |
350 | ||
351 | static inline void | |
352 | cftree_remove(struct hfsc_class *cl) | |
353 | { | |
354 | rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); | |
355 | } | |
356 | ||
357 | static inline void | |
358 | cftree_update(struct hfsc_class *cl) | |
359 | { | |
360 | cftree_remove(cl); | |
361 | cftree_insert(cl); | |
362 | } | |
363 | ||
364 | /* | |
365 | * service curve support functions | |
366 | * | |
367 | * external service curve parameters | |
368 | * m: bps | |
369 | * d: us | |
370 | * internal service curve parameters | |
371 | * sm: (bytes/psched_us) << SM_SHIFT | |
372 | * ism: (psched_us/byte) << ISM_SHIFT | |
373 | * dx: psched_us | |
374 | * | |
375 | * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us. | |
376 | * | |
377 | * sm and ism are scaled in order to keep effective digits. | |
378 | * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective | |
379 | * digits in decimal using the following table. | |
380 | * | |
381 | * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps | |
382 | * ------------+------------------------------------------------------- | |
383 | * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 | |
384 | * | |
385 | * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 | |
386 | * | |
387 | * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18. | |
388 | */ | |
389 | #define SM_SHIFT (30 - PSCHED_SHIFT) | |
390 | #define ISM_SHIFT (8 + PSCHED_SHIFT) | |
391 | ||
392 | #define SM_MASK ((1ULL << SM_SHIFT) - 1) | |
393 | #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) | |
394 | ||
395 | static inline u64 | |
396 | seg_x2y(u64 x, u64 sm) | |
397 | { | |
398 | u64 y; | |
399 | ||
400 | /* | |
401 | * compute | |
402 | * y = x * sm >> SM_SHIFT | |
403 | * but divide it for the upper and lower bits to avoid overflow | |
404 | */ | |
405 | y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); | |
406 | return y; | |
407 | } | |
408 | ||
409 | static inline u64 | |
410 | seg_y2x(u64 y, u64 ism) | |
411 | { | |
412 | u64 x; | |
413 | ||
414 | if (y == 0) | |
415 | x = 0; | |
416 | else if (ism == HT_INFINITY) | |
417 | x = HT_INFINITY; | |
418 | else { | |
419 | x = (y >> ISM_SHIFT) * ism | |
420 | + (((y & ISM_MASK) * ism) >> ISM_SHIFT); | |
421 | } | |
422 | return x; | |
423 | } | |
424 | ||
425 | /* Convert m (bps) into sm (bytes/psched us) */ | |
426 | static u64 | |
427 | m2sm(u32 m) | |
428 | { | |
429 | u64 sm; | |
430 | ||
431 | sm = ((u64)m << SM_SHIFT); | |
432 | sm += PSCHED_TICKS_PER_SEC - 1; | |
433 | do_div(sm, PSCHED_TICKS_PER_SEC); | |
434 | return sm; | |
435 | } | |
436 | ||
437 | /* convert m (bps) into ism (psched us/byte) */ | |
438 | static u64 | |
439 | m2ism(u32 m) | |
440 | { | |
441 | u64 ism; | |
442 | ||
443 | if (m == 0) | |
444 | ism = HT_INFINITY; | |
445 | else { | |
446 | ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT); | |
447 | ism += m - 1; | |
448 | do_div(ism, m); | |
449 | } | |
450 | return ism; | |
451 | } | |
452 | ||
453 | /* convert d (us) into dx (psched us) */ | |
454 | static u64 | |
455 | d2dx(u32 d) | |
456 | { | |
457 | u64 dx; | |
458 | ||
459 | dx = ((u64)d * PSCHED_TICKS_PER_SEC); | |
460 | dx += USEC_PER_SEC - 1; | |
461 | do_div(dx, USEC_PER_SEC); | |
462 | return dx; | |
463 | } | |
464 | ||
465 | /* convert sm (bytes/psched us) into m (bps) */ | |
466 | static u32 | |
467 | sm2m(u64 sm) | |
468 | { | |
469 | u64 m; | |
470 | ||
471 | m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT; | |
472 | return (u32)m; | |
473 | } | |
474 | ||
475 | /* convert dx (psched us) into d (us) */ | |
476 | static u32 | |
477 | dx2d(u64 dx) | |
478 | { | |
479 | u64 d; | |
480 | ||
481 | d = dx * USEC_PER_SEC; | |
482 | do_div(d, PSCHED_TICKS_PER_SEC); | |
483 | return (u32)d; | |
484 | } | |
485 | ||
486 | static void | |
487 | sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) | |
488 | { | |
489 | isc->sm1 = m2sm(sc->m1); | |
490 | isc->ism1 = m2ism(sc->m1); | |
491 | isc->dx = d2dx(sc->d); | |
492 | isc->dy = seg_x2y(isc->dx, isc->sm1); | |
493 | isc->sm2 = m2sm(sc->m2); | |
494 | isc->ism2 = m2ism(sc->m2); | |
495 | } | |
496 | ||
497 | /* | |
498 | * initialize the runtime service curve with the given internal | |
499 | * service curve starting at (x, y). | |
500 | */ | |
501 | static void | |
502 | rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) | |
503 | { | |
504 | rtsc->x = x; | |
505 | rtsc->y = y; | |
506 | rtsc->sm1 = isc->sm1; | |
507 | rtsc->ism1 = isc->ism1; | |
508 | rtsc->dx = isc->dx; | |
509 | rtsc->dy = isc->dy; | |
510 | rtsc->sm2 = isc->sm2; | |
511 | rtsc->ism2 = isc->ism2; | |
512 | } | |
513 | ||
514 | /* | |
515 | * calculate the y-projection of the runtime service curve by the | |
516 | * given x-projection value | |
517 | */ | |
518 | static u64 | |
519 | rtsc_y2x(struct runtime_sc *rtsc, u64 y) | |
520 | { | |
521 | u64 x; | |
522 | ||
523 | if (y < rtsc->y) | |
524 | x = rtsc->x; | |
525 | else if (y <= rtsc->y + rtsc->dy) { | |
526 | /* x belongs to the 1st segment */ | |
527 | if (rtsc->dy == 0) | |
528 | x = rtsc->x + rtsc->dx; | |
529 | else | |
530 | x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); | |
531 | } else { | |
532 | /* x belongs to the 2nd segment */ | |
533 | x = rtsc->x + rtsc->dx | |
534 | + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); | |
535 | } | |
536 | return x; | |
537 | } | |
538 | ||
539 | static u64 | |
540 | rtsc_x2y(struct runtime_sc *rtsc, u64 x) | |
541 | { | |
542 | u64 y; | |
543 | ||
544 | if (x <= rtsc->x) | |
545 | y = rtsc->y; | |
546 | else if (x <= rtsc->x + rtsc->dx) | |
547 | /* y belongs to the 1st segment */ | |
548 | y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); | |
549 | else | |
550 | /* y belongs to the 2nd segment */ | |
551 | y = rtsc->y + rtsc->dy | |
552 | + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); | |
553 | return y; | |
554 | } | |
555 | ||
556 | /* | |
557 | * update the runtime service curve by taking the minimum of the current | |
558 | * runtime service curve and the service curve starting at (x, y). | |
559 | */ | |
560 | static void | |
561 | rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) | |
562 | { | |
563 | u64 y1, y2, dx, dy; | |
564 | u32 dsm; | |
565 | ||
566 | if (isc->sm1 <= isc->sm2) { | |
567 | /* service curve is convex */ | |
568 | y1 = rtsc_x2y(rtsc, x); | |
569 | if (y1 < y) | |
570 | /* the current rtsc is smaller */ | |
571 | return; | |
572 | rtsc->x = x; | |
573 | rtsc->y = y; | |
574 | return; | |
575 | } | |
576 | ||
577 | /* | |
578 | * service curve is concave | |
579 | * compute the two y values of the current rtsc | |
580 | * y1: at x | |
581 | * y2: at (x + dx) | |
582 | */ | |
583 | y1 = rtsc_x2y(rtsc, x); | |
584 | if (y1 <= y) { | |
585 | /* rtsc is below isc, no change to rtsc */ | |
586 | return; | |
587 | } | |
588 | ||
589 | y2 = rtsc_x2y(rtsc, x + isc->dx); | |
590 | if (y2 >= y + isc->dy) { | |
591 | /* rtsc is above isc, replace rtsc by isc */ | |
592 | rtsc->x = x; | |
593 | rtsc->y = y; | |
594 | rtsc->dx = isc->dx; | |
595 | rtsc->dy = isc->dy; | |
596 | return; | |
597 | } | |
598 | ||
599 | /* | |
600 | * the two curves intersect | |
601 | * compute the offsets (dx, dy) using the reverse | |
602 | * function of seg_x2y() | |
603 | * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) | |
604 | */ | |
605 | dx = (y1 - y) << SM_SHIFT; | |
606 | dsm = isc->sm1 - isc->sm2; | |
607 | do_div(dx, dsm); | |
608 | /* | |
609 | * check if (x, y1) belongs to the 1st segment of rtsc. | |
610 | * if so, add the offset. | |
611 | */ | |
612 | if (rtsc->x + rtsc->dx > x) | |
613 | dx += rtsc->x + rtsc->dx - x; | |
614 | dy = seg_x2y(dx, isc->sm1); | |
615 | ||
616 | rtsc->x = x; | |
617 | rtsc->y = y; | |
618 | rtsc->dx = dx; | |
619 | rtsc->dy = dy; | |
620 | } | |
621 | ||
622 | static void | |
623 | init_ed(struct hfsc_class *cl, unsigned int next_len) | |
624 | { | |
625 | u64 cur_time = psched_get_time(); | |
626 | ||
627 | /* update the deadline curve */ | |
628 | rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); | |
629 | ||
630 | /* | |
631 | * update the eligible curve. | |
632 | * for concave, it is equal to the deadline curve. | |
633 | * for convex, it is a linear curve with slope m2. | |
634 | */ | |
635 | cl->cl_eligible = cl->cl_deadline; | |
636 | if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { | |
637 | cl->cl_eligible.dx = 0; | |
638 | cl->cl_eligible.dy = 0; | |
639 | } | |
640 | ||
641 | /* compute e and d */ | |
642 | cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); | |
643 | cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); | |
644 | ||
645 | eltree_insert(cl); | |
646 | } | |
647 | ||
648 | static void | |
649 | update_ed(struct hfsc_class *cl, unsigned int next_len) | |
650 | { | |
651 | cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); | |
652 | cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); | |
653 | ||
654 | eltree_update(cl); | |
655 | } | |
656 | ||
657 | static inline void | |
658 | update_d(struct hfsc_class *cl, unsigned int next_len) | |
659 | { | |
660 | cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); | |
661 | } | |
662 | ||
663 | static inline void | |
664 | update_cfmin(struct hfsc_class *cl) | |
665 | { | |
666 | struct rb_node *n = rb_first(&cl->cf_tree); | |
667 | struct hfsc_class *p; | |
668 | ||
669 | if (n == NULL) { | |
670 | cl->cl_cfmin = 0; | |
671 | return; | |
672 | } | |
673 | p = rb_entry(n, struct hfsc_class, cf_node); | |
674 | cl->cl_cfmin = p->cl_f; | |
675 | } | |
676 | ||
677 | static void | |
678 | init_vf(struct hfsc_class *cl, unsigned int len) | |
679 | { | |
680 | struct hfsc_class *max_cl; | |
681 | struct rb_node *n; | |
682 | u64 vt, f, cur_time; | |
683 | int go_active; | |
684 | ||
685 | cur_time = 0; | |
686 | go_active = 1; | |
687 | for (; cl->cl_parent != NULL; cl = cl->cl_parent) { | |
688 | if (go_active && cl->cl_nactive++ == 0) | |
689 | go_active = 1; | |
690 | else | |
691 | go_active = 0; | |
692 | ||
693 | if (go_active) { | |
694 | n = rb_last(&cl->cl_parent->vt_tree); | |
695 | if (n != NULL) { | |
696 | max_cl = rb_entry(n, struct hfsc_class,vt_node); | |
697 | /* | |
698 | * set vt to the average of the min and max | |
699 | * classes. if the parent's period didn't | |
700 | * change, don't decrease vt of the class. | |
701 | */ | |
702 | vt = max_cl->cl_vt; | |
703 | if (cl->cl_parent->cl_cvtmin != 0) | |
704 | vt = (cl->cl_parent->cl_cvtmin + vt)/2; | |
705 | ||
706 | if (cl->cl_parent->cl_vtperiod != | |
707 | cl->cl_parentperiod || vt > cl->cl_vt) | |
708 | cl->cl_vt = vt; | |
709 | } else { | |
710 | /* | |
711 | * first child for a new parent backlog period. | |
712 | * add parent's cvtmax to cvtoff to make a new | |
713 | * vt (vtoff + vt) larger than the vt in the | |
714 | * last period for all children. | |
715 | */ | |
716 | vt = cl->cl_parent->cl_cvtmax; | |
717 | cl->cl_parent->cl_cvtoff += vt; | |
718 | cl->cl_parent->cl_cvtmax = 0; | |
719 | cl->cl_parent->cl_cvtmin = 0; | |
720 | cl->cl_vt = 0; | |
721 | } | |
722 | ||
723 | cl->cl_vtoff = cl->cl_parent->cl_cvtoff - | |
724 | cl->cl_pcvtoff; | |
725 | ||
726 | /* update the virtual curve */ | |
727 | vt = cl->cl_vt + cl->cl_vtoff; | |
728 | rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, | |
729 | cl->cl_total); | |
730 | if (cl->cl_virtual.x == vt) { | |
731 | cl->cl_virtual.x -= cl->cl_vtoff; | |
732 | cl->cl_vtoff = 0; | |
733 | } | |
734 | cl->cl_vtadj = 0; | |
735 | ||
736 | cl->cl_vtperiod++; /* increment vt period */ | |
737 | cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; | |
738 | if (cl->cl_parent->cl_nactive == 0) | |
739 | cl->cl_parentperiod++; | |
740 | cl->cl_f = 0; | |
741 | ||
742 | vttree_insert(cl); | |
743 | cftree_insert(cl); | |
744 | ||
745 | if (cl->cl_flags & HFSC_USC) { | |
746 | /* class has upper limit curve */ | |
747 | if (cur_time == 0) | |
748 | cur_time = psched_get_time(); | |
749 | ||
750 | /* update the ulimit curve */ | |
751 | rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, | |
752 | cl->cl_total); | |
753 | /* compute myf */ | |
754 | cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, | |
755 | cl->cl_total); | |
756 | cl->cl_myfadj = 0; | |
757 | } | |
758 | } | |
759 | ||
760 | f = max(cl->cl_myf, cl->cl_cfmin); | |
761 | if (f != cl->cl_f) { | |
762 | cl->cl_f = f; | |
763 | cftree_update(cl); | |
764 | } | |
765 | update_cfmin(cl->cl_parent); | |
766 | } | |
767 | } | |
768 | ||
769 | static void | |
770 | update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) | |
771 | { | |
772 | u64 f; /* , myf_bound, delta; */ | |
773 | int go_passive = 0; | |
774 | ||
775 | if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) | |
776 | go_passive = 1; | |
777 | ||
778 | for (; cl->cl_parent != NULL; cl = cl->cl_parent) { | |
779 | cl->cl_total += len; | |
780 | ||
781 | if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) | |
782 | continue; | |
783 | ||
784 | if (go_passive && --cl->cl_nactive == 0) | |
785 | go_passive = 1; | |
786 | else | |
787 | go_passive = 0; | |
788 | ||
789 | if (go_passive) { | |
790 | /* no more active child, going passive */ | |
791 | ||
792 | /* update cvtmax of the parent class */ | |
793 | if (cl->cl_vt > cl->cl_parent->cl_cvtmax) | |
794 | cl->cl_parent->cl_cvtmax = cl->cl_vt; | |
795 | ||
796 | /* remove this class from the vt tree */ | |
797 | vttree_remove(cl); | |
798 | ||
799 | cftree_remove(cl); | |
800 | update_cfmin(cl->cl_parent); | |
801 | ||
802 | continue; | |
803 | } | |
804 | ||
805 | /* | |
806 | * update vt and f | |
807 | */ | |
808 | cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) | |
809 | - cl->cl_vtoff + cl->cl_vtadj; | |
810 | ||
811 | /* | |
812 | * if vt of the class is smaller than cvtmin, | |
813 | * the class was skipped in the past due to non-fit. | |
814 | * if so, we need to adjust vtadj. | |
815 | */ | |
816 | if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { | |
817 | cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; | |
818 | cl->cl_vt = cl->cl_parent->cl_cvtmin; | |
819 | } | |
820 | ||
821 | /* update the vt tree */ | |
822 | vttree_update(cl); | |
823 | ||
824 | if (cl->cl_flags & HFSC_USC) { | |
825 | cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, | |
826 | cl->cl_total); | |
827 | #if 0 | |
828 | /* | |
829 | * This code causes classes to stay way under their | |
830 | * limit when multiple classes are used at gigabit | |
831 | * speed. needs investigation. -kaber | |
832 | */ | |
833 | /* | |
834 | * if myf lags behind by more than one clock tick | |
835 | * from the current time, adjust myfadj to prevent | |
836 | * a rate-limited class from going greedy. | |
837 | * in a steady state under rate-limiting, myf | |
838 | * fluctuates within one clock tick. | |
839 | */ | |
840 | myf_bound = cur_time - PSCHED_JIFFIE2US(1); | |
841 | if (cl->cl_myf < myf_bound) { | |
842 | delta = cur_time - cl->cl_myf; | |
843 | cl->cl_myfadj += delta; | |
844 | cl->cl_myf += delta; | |
845 | } | |
846 | #endif | |
847 | } | |
848 | ||
849 | f = max(cl->cl_myf, cl->cl_cfmin); | |
850 | if (f != cl->cl_f) { | |
851 | cl->cl_f = f; | |
852 | cftree_update(cl); | |
853 | update_cfmin(cl->cl_parent); | |
854 | } | |
855 | } | |
856 | } | |
857 | ||
858 | static void | |
859 | set_active(struct hfsc_class *cl, unsigned int len) | |
860 | { | |
861 | if (cl->cl_flags & HFSC_RSC) | |
862 | init_ed(cl, len); | |
863 | if (cl->cl_flags & HFSC_FSC) | |
864 | init_vf(cl, len); | |
865 | ||
866 | list_add_tail(&cl->dlist, &cl->sched->droplist); | |
867 | } | |
868 | ||
869 | static void | |
870 | set_passive(struct hfsc_class *cl) | |
871 | { | |
872 | if (cl->cl_flags & HFSC_RSC) | |
873 | eltree_remove(cl); | |
874 | ||
875 | list_del(&cl->dlist); | |
876 | ||
877 | /* | |
878 | * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) | |
879 | * needs to be called explicitly to remove a class from vttree. | |
880 | */ | |
881 | } | |
882 | ||
883 | static unsigned int | |
884 | qdisc_peek_len(struct Qdisc *sch) | |
885 | { | |
886 | struct sk_buff *skb; | |
887 | unsigned int len; | |
888 | ||
889 | skb = sch->ops->peek(sch); | |
890 | if (skb == NULL) { | |
891 | qdisc_warn_nonwc("qdisc_peek_len", sch); | |
892 | return 0; | |
893 | } | |
894 | len = qdisc_pkt_len(skb); | |
895 | ||
896 | return len; | |
897 | } | |
898 | ||
899 | static void | |
900 | hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) | |
901 | { | |
902 | unsigned int len = cl->qdisc->q.qlen; | |
903 | ||
904 | qdisc_reset(cl->qdisc); | |
905 | qdisc_tree_decrease_qlen(cl->qdisc, len); | |
906 | } | |
907 | ||
908 | static void | |
909 | hfsc_adjust_levels(struct hfsc_class *cl) | |
910 | { | |
911 | struct hfsc_class *p; | |
912 | unsigned int level; | |
913 | ||
914 | do { | |
915 | level = 0; | |
916 | list_for_each_entry(p, &cl->children, siblings) { | |
917 | if (p->level >= level) | |
918 | level = p->level + 1; | |
919 | } | |
920 | cl->level = level; | |
921 | } while ((cl = cl->cl_parent) != NULL); | |
922 | } | |
923 | ||
924 | static inline struct hfsc_class * | |
925 | hfsc_find_class(u32 classid, struct Qdisc *sch) | |
926 | { | |
927 | struct hfsc_sched *q = qdisc_priv(sch); | |
928 | struct Qdisc_class_common *clc; | |
929 | ||
930 | clc = qdisc_class_find(&q->clhash, classid); | |
931 | if (clc == NULL) | |
932 | return NULL; | |
933 | return container_of(clc, struct hfsc_class, cl_common); | |
934 | } | |
935 | ||
936 | static void | |
937 | hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, | |
938 | u64 cur_time) | |
939 | { | |
940 | sc2isc(rsc, &cl->cl_rsc); | |
941 | rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); | |
942 | cl->cl_eligible = cl->cl_deadline; | |
943 | if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { | |
944 | cl->cl_eligible.dx = 0; | |
945 | cl->cl_eligible.dy = 0; | |
946 | } | |
947 | cl->cl_flags |= HFSC_RSC; | |
948 | } | |
949 | ||
950 | static void | |
951 | hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) | |
952 | { | |
953 | sc2isc(fsc, &cl->cl_fsc); | |
954 | rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); | |
955 | cl->cl_flags |= HFSC_FSC; | |
956 | } | |
957 | ||
958 | static void | |
959 | hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, | |
960 | u64 cur_time) | |
961 | { | |
962 | sc2isc(usc, &cl->cl_usc); | |
963 | rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); | |
964 | cl->cl_flags |= HFSC_USC; | |
965 | } | |
966 | ||
967 | static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { | |
968 | [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, | |
969 | [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, | |
970 | [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) }, | |
971 | }; | |
972 | ||
973 | static int | |
974 | hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |
975 | struct nlattr **tca, unsigned long *arg) | |
976 | { | |
977 | struct hfsc_sched *q = qdisc_priv(sch); | |
978 | struct hfsc_class *cl = (struct hfsc_class *)*arg; | |
979 | struct hfsc_class *parent = NULL; | |
980 | struct nlattr *opt = tca[TCA_OPTIONS]; | |
981 | struct nlattr *tb[TCA_HFSC_MAX + 1]; | |
982 | struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; | |
983 | u64 cur_time; | |
984 | int err; | |
985 | ||
986 | if (opt == NULL) | |
987 | return -EINVAL; | |
988 | ||
989 | err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy); | |
990 | if (err < 0) | |
991 | return err; | |
992 | ||
993 | if (tb[TCA_HFSC_RSC]) { | |
994 | rsc = nla_data(tb[TCA_HFSC_RSC]); | |
995 | if (rsc->m1 == 0 && rsc->m2 == 0) | |
996 | rsc = NULL; | |
997 | } | |
998 | ||
999 | if (tb[TCA_HFSC_FSC]) { | |
1000 | fsc = nla_data(tb[TCA_HFSC_FSC]); | |
1001 | if (fsc->m1 == 0 && fsc->m2 == 0) | |
1002 | fsc = NULL; | |
1003 | } | |
1004 | ||
1005 | if (tb[TCA_HFSC_USC]) { | |
1006 | usc = nla_data(tb[TCA_HFSC_USC]); | |
1007 | if (usc->m1 == 0 && usc->m2 == 0) | |
1008 | usc = NULL; | |
1009 | } | |
1010 | ||
1011 | if (cl != NULL) { | |
1012 | if (parentid) { | |
1013 | if (cl->cl_parent && | |
1014 | cl->cl_parent->cl_common.classid != parentid) | |
1015 | return -EINVAL; | |
1016 | if (cl->cl_parent == NULL && parentid != TC_H_ROOT) | |
1017 | return -EINVAL; | |
1018 | } | |
1019 | cur_time = psched_get_time(); | |
1020 | ||
1021 | if (tca[TCA_RATE]) { | |
1022 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, | |
1023 | qdisc_root_sleeping_lock(sch), | |
1024 | tca[TCA_RATE]); | |
1025 | if (err) | |
1026 | return err; | |
1027 | } | |
1028 | ||
1029 | sch_tree_lock(sch); | |
1030 | if (rsc != NULL) | |
1031 | hfsc_change_rsc(cl, rsc, cur_time); | |
1032 | if (fsc != NULL) | |
1033 | hfsc_change_fsc(cl, fsc); | |
1034 | if (usc != NULL) | |
1035 | hfsc_change_usc(cl, usc, cur_time); | |
1036 | ||
1037 | if (cl->qdisc->q.qlen != 0) { | |
1038 | if (cl->cl_flags & HFSC_RSC) | |
1039 | update_ed(cl, qdisc_peek_len(cl->qdisc)); | |
1040 | if (cl->cl_flags & HFSC_FSC) | |
1041 | update_vf(cl, 0, cur_time); | |
1042 | } | |
1043 | sch_tree_unlock(sch); | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
1048 | if (parentid == TC_H_ROOT) | |
1049 | return -EEXIST; | |
1050 | ||
1051 | parent = &q->root; | |
1052 | if (parentid) { | |
1053 | parent = hfsc_find_class(parentid, sch); | |
1054 | if (parent == NULL) | |
1055 | return -ENOENT; | |
1056 | } | |
1057 | ||
1058 | if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) | |
1059 | return -EINVAL; | |
1060 | if (hfsc_find_class(classid, sch)) | |
1061 | return -EEXIST; | |
1062 | ||
1063 | if (rsc == NULL && fsc == NULL) | |
1064 | return -EINVAL; | |
1065 | ||
1066 | cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); | |
1067 | if (cl == NULL) | |
1068 | return -ENOBUFS; | |
1069 | ||
1070 | if (tca[TCA_RATE]) { | |
1071 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, | |
1072 | qdisc_root_sleeping_lock(sch), | |
1073 | tca[TCA_RATE]); | |
1074 | if (err) { | |
1075 | kfree(cl); | |
1076 | return err; | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | if (rsc != NULL) | |
1081 | hfsc_change_rsc(cl, rsc, 0); | |
1082 | if (fsc != NULL) | |
1083 | hfsc_change_fsc(cl, fsc); | |
1084 | if (usc != NULL) | |
1085 | hfsc_change_usc(cl, usc, 0); | |
1086 | ||
1087 | cl->cl_common.classid = classid; | |
1088 | cl->refcnt = 1; | |
1089 | cl->sched = q; | |
1090 | cl->cl_parent = parent; | |
1091 | cl->qdisc = qdisc_create_dflt(sch->dev_queue, | |
1092 | &pfifo_qdisc_ops, classid); | |
1093 | if (cl->qdisc == NULL) | |
1094 | cl->qdisc = &noop_qdisc; | |
1095 | INIT_LIST_HEAD(&cl->children); | |
1096 | cl->vt_tree = RB_ROOT; | |
1097 | cl->cf_tree = RB_ROOT; | |
1098 | ||
1099 | sch_tree_lock(sch); | |
1100 | qdisc_class_hash_insert(&q->clhash, &cl->cl_common); | |
1101 | list_add_tail(&cl->siblings, &parent->children); | |
1102 | if (parent->level == 0) | |
1103 | hfsc_purge_queue(sch, parent); | |
1104 | hfsc_adjust_levels(parent); | |
1105 | cl->cl_pcvtoff = parent->cl_cvtoff; | |
1106 | sch_tree_unlock(sch); | |
1107 | ||
1108 | qdisc_class_hash_grow(sch, &q->clhash); | |
1109 | ||
1110 | *arg = (unsigned long)cl; | |
1111 | return 0; | |
1112 | } | |
1113 | ||
1114 | static void | |
1115 | hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) | |
1116 | { | |
1117 | struct hfsc_sched *q = qdisc_priv(sch); | |
1118 | ||
1119 | tcf_destroy_chain(&cl->filter_list); | |
1120 | qdisc_destroy(cl->qdisc); | |
1121 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | |
1122 | if (cl != &q->root) | |
1123 | kfree(cl); | |
1124 | } | |
1125 | ||
1126 | static int | |
1127 | hfsc_delete_class(struct Qdisc *sch, unsigned long arg) | |
1128 | { | |
1129 | struct hfsc_sched *q = qdisc_priv(sch); | |
1130 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1131 | ||
1132 | if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) | |
1133 | return -EBUSY; | |
1134 | ||
1135 | sch_tree_lock(sch); | |
1136 | ||
1137 | list_del(&cl->siblings); | |
1138 | hfsc_adjust_levels(cl->cl_parent); | |
1139 | ||
1140 | hfsc_purge_queue(sch, cl); | |
1141 | qdisc_class_hash_remove(&q->clhash, &cl->cl_common); | |
1142 | ||
1143 | BUG_ON(--cl->refcnt == 0); | |
1144 | /* | |
1145 | * This shouldn't happen: we "hold" one cops->get() when called | |
1146 | * from tc_ctl_tclass; the destroy method is done from cops->put(). | |
1147 | */ | |
1148 | ||
1149 | sch_tree_unlock(sch); | |
1150 | return 0; | |
1151 | } | |
1152 | ||
1153 | static struct hfsc_class * | |
1154 | hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |
1155 | { | |
1156 | struct hfsc_sched *q = qdisc_priv(sch); | |
1157 | struct hfsc_class *head, *cl; | |
1158 | struct tcf_result res; | |
1159 | struct tcf_proto *tcf; | |
1160 | int result; | |
1161 | ||
1162 | if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && | |
1163 | (cl = hfsc_find_class(skb->priority, sch)) != NULL) | |
1164 | if (cl->level == 0) | |
1165 | return cl; | |
1166 | ||
1167 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
1168 | head = &q->root; | |
1169 | tcf = q->root.filter_list; | |
1170 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | |
1171 | #ifdef CONFIG_NET_CLS_ACT | |
1172 | switch (result) { | |
1173 | case TC_ACT_QUEUED: | |
1174 | case TC_ACT_STOLEN: | |
1175 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | |
1176 | case TC_ACT_SHOT: | |
1177 | return NULL; | |
1178 | } | |
1179 | #endif | |
1180 | if ((cl = (struct hfsc_class *)res.class) == NULL) { | |
1181 | if ((cl = hfsc_find_class(res.classid, sch)) == NULL) | |
1182 | break; /* filter selected invalid classid */ | |
1183 | if (cl->level >= head->level) | |
1184 | break; /* filter may only point downwards */ | |
1185 | } | |
1186 | ||
1187 | if (cl->level == 0) | |
1188 | return cl; /* hit leaf class */ | |
1189 | ||
1190 | /* apply inner filter chain */ | |
1191 | tcf = cl->filter_list; | |
1192 | head = cl; | |
1193 | } | |
1194 | ||
1195 | /* classification failed, try default class */ | |
1196 | cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); | |
1197 | if (cl == NULL || cl->level > 0) | |
1198 | return NULL; | |
1199 | ||
1200 | return cl; | |
1201 | } | |
1202 | ||
1203 | static int | |
1204 | hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
1205 | struct Qdisc **old) | |
1206 | { | |
1207 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1208 | ||
1209 | if (cl->level > 0) | |
1210 | return -EINVAL; | |
1211 | if (new == NULL) { | |
1212 | new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, | |
1213 | cl->cl_common.classid); | |
1214 | if (new == NULL) | |
1215 | new = &noop_qdisc; | |
1216 | } | |
1217 | ||
1218 | sch_tree_lock(sch); | |
1219 | hfsc_purge_queue(sch, cl); | |
1220 | *old = cl->qdisc; | |
1221 | cl->qdisc = new; | |
1222 | sch_tree_unlock(sch); | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | static struct Qdisc * | |
1227 | hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) | |
1228 | { | |
1229 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1230 | ||
1231 | if (cl->level == 0) | |
1232 | return cl->qdisc; | |
1233 | ||
1234 | return NULL; | |
1235 | } | |
1236 | ||
1237 | static void | |
1238 | hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) | |
1239 | { | |
1240 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1241 | ||
1242 | if (cl->qdisc->q.qlen == 0) { | |
1243 | update_vf(cl, 0, 0); | |
1244 | set_passive(cl); | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | static unsigned long | |
1249 | hfsc_get_class(struct Qdisc *sch, u32 classid) | |
1250 | { | |
1251 | struct hfsc_class *cl = hfsc_find_class(classid, sch); | |
1252 | ||
1253 | if (cl != NULL) | |
1254 | cl->refcnt++; | |
1255 | ||
1256 | return (unsigned long)cl; | |
1257 | } | |
1258 | ||
1259 | static void | |
1260 | hfsc_put_class(struct Qdisc *sch, unsigned long arg) | |
1261 | { | |
1262 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1263 | ||
1264 | if (--cl->refcnt == 0) | |
1265 | hfsc_destroy_class(sch, cl); | |
1266 | } | |
1267 | ||
1268 | static unsigned long | |
1269 | hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) | |
1270 | { | |
1271 | struct hfsc_class *p = (struct hfsc_class *)parent; | |
1272 | struct hfsc_class *cl = hfsc_find_class(classid, sch); | |
1273 | ||
1274 | if (cl != NULL) { | |
1275 | if (p != NULL && p->level <= cl->level) | |
1276 | return 0; | |
1277 | cl->filter_cnt++; | |
1278 | } | |
1279 | ||
1280 | return (unsigned long)cl; | |
1281 | } | |
1282 | ||
1283 | static void | |
1284 | hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) | |
1285 | { | |
1286 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1287 | ||
1288 | cl->filter_cnt--; | |
1289 | } | |
1290 | ||
1291 | static struct tcf_proto ** | |
1292 | hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) | |
1293 | { | |
1294 | struct hfsc_sched *q = qdisc_priv(sch); | |
1295 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1296 | ||
1297 | if (cl == NULL) | |
1298 | cl = &q->root; | |
1299 | ||
1300 | return &cl->filter_list; | |
1301 | } | |
1302 | ||
1303 | static int | |
1304 | hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) | |
1305 | { | |
1306 | struct tc_service_curve tsc; | |
1307 | ||
1308 | tsc.m1 = sm2m(sc->sm1); | |
1309 | tsc.d = dx2d(sc->dx); | |
1310 | tsc.m2 = sm2m(sc->sm2); | |
1311 | NLA_PUT(skb, attr, sizeof(tsc), &tsc); | |
1312 | ||
1313 | return skb->len; | |
1314 | ||
1315 | nla_put_failure: | |
1316 | return -1; | |
1317 | } | |
1318 | ||
1319 | static inline int | |
1320 | hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) | |
1321 | { | |
1322 | if ((cl->cl_flags & HFSC_RSC) && | |
1323 | (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) | |
1324 | goto nla_put_failure; | |
1325 | ||
1326 | if ((cl->cl_flags & HFSC_FSC) && | |
1327 | (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) | |
1328 | goto nla_put_failure; | |
1329 | ||
1330 | if ((cl->cl_flags & HFSC_USC) && | |
1331 | (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) | |
1332 | goto nla_put_failure; | |
1333 | ||
1334 | return skb->len; | |
1335 | ||
1336 | nla_put_failure: | |
1337 | return -1; | |
1338 | } | |
1339 | ||
1340 | static int | |
1341 | hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, | |
1342 | struct tcmsg *tcm) | |
1343 | { | |
1344 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1345 | struct nlattr *nest; | |
1346 | ||
1347 | tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid : | |
1348 | TC_H_ROOT; | |
1349 | tcm->tcm_handle = cl->cl_common.classid; | |
1350 | if (cl->level == 0) | |
1351 | tcm->tcm_info = cl->qdisc->handle; | |
1352 | ||
1353 | nest = nla_nest_start(skb, TCA_OPTIONS); | |
1354 | if (nest == NULL) | |
1355 | goto nla_put_failure; | |
1356 | if (hfsc_dump_curves(skb, cl) < 0) | |
1357 | goto nla_put_failure; | |
1358 | nla_nest_end(skb, nest); | |
1359 | return skb->len; | |
1360 | ||
1361 | nla_put_failure: | |
1362 | nla_nest_cancel(skb, nest); | |
1363 | return -EMSGSIZE; | |
1364 | } | |
1365 | ||
1366 | static int | |
1367 | hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |
1368 | struct gnet_dump *d) | |
1369 | { | |
1370 | struct hfsc_class *cl = (struct hfsc_class *)arg; | |
1371 | struct tc_hfsc_stats xstats; | |
1372 | ||
1373 | cl->qstats.qlen = cl->qdisc->q.qlen; | |
1374 | xstats.level = cl->level; | |
1375 | xstats.period = cl->cl_vtperiod; | |
1376 | xstats.work = cl->cl_total; | |
1377 | xstats.rtwork = cl->cl_cumul; | |
1378 | ||
1379 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | |
1380 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || | |
1381 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | |
1382 | return -1; | |
1383 | ||
1384 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | |
1385 | } | |
1386 | ||
1387 | ||
1388 | ||
1389 | static void | |
1390 | hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
1391 | { | |
1392 | struct hfsc_sched *q = qdisc_priv(sch); | |
1393 | struct hlist_node *n; | |
1394 | struct hfsc_class *cl; | |
1395 | unsigned int i; | |
1396 | ||
1397 | if (arg->stop) | |
1398 | return; | |
1399 | ||
1400 | for (i = 0; i < q->clhash.hashsize; i++) { | |
1401 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], | |
1402 | cl_common.hnode) { | |
1403 | if (arg->count < arg->skip) { | |
1404 | arg->count++; | |
1405 | continue; | |
1406 | } | |
1407 | if (arg->fn(sch, (unsigned long)cl, arg) < 0) { | |
1408 | arg->stop = 1; | |
1409 | return; | |
1410 | } | |
1411 | arg->count++; | |
1412 | } | |
1413 | } | |
1414 | } | |
1415 | ||
1416 | static void | |
1417 | hfsc_schedule_watchdog(struct Qdisc *sch) | |
1418 | { | |
1419 | struct hfsc_sched *q = qdisc_priv(sch); | |
1420 | struct hfsc_class *cl; | |
1421 | u64 next_time = 0; | |
1422 | ||
1423 | if ((cl = eltree_get_minel(q)) != NULL) | |
1424 | next_time = cl->cl_e; | |
1425 | if (q->root.cl_cfmin != 0) { | |
1426 | if (next_time == 0 || next_time > q->root.cl_cfmin) | |
1427 | next_time = q->root.cl_cfmin; | |
1428 | } | |
1429 | WARN_ON(next_time == 0); | |
1430 | qdisc_watchdog_schedule(&q->watchdog, next_time); | |
1431 | } | |
1432 | ||
1433 | static int | |
1434 | hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |
1435 | { | |
1436 | struct hfsc_sched *q = qdisc_priv(sch); | |
1437 | struct tc_hfsc_qopt *qopt; | |
1438 | int err; | |
1439 | ||
1440 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) | |
1441 | return -EINVAL; | |
1442 | qopt = nla_data(opt); | |
1443 | ||
1444 | q->defcls = qopt->defcls; | |
1445 | err = qdisc_class_hash_init(&q->clhash); | |
1446 | if (err < 0) | |
1447 | return err; | |
1448 | q->eligible = RB_ROOT; | |
1449 | INIT_LIST_HEAD(&q->droplist); | |
1450 | ||
1451 | q->root.cl_common.classid = sch->handle; | |
1452 | q->root.refcnt = 1; | |
1453 | q->root.sched = q; | |
1454 | q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, | |
1455 | sch->handle); | |
1456 | if (q->root.qdisc == NULL) | |
1457 | q->root.qdisc = &noop_qdisc; | |
1458 | INIT_LIST_HEAD(&q->root.children); | |
1459 | q->root.vt_tree = RB_ROOT; | |
1460 | q->root.cf_tree = RB_ROOT; | |
1461 | ||
1462 | qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); | |
1463 | qdisc_class_hash_grow(sch, &q->clhash); | |
1464 | ||
1465 | qdisc_watchdog_init(&q->watchdog, sch); | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | static int | |
1471 | hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) | |
1472 | { | |
1473 | struct hfsc_sched *q = qdisc_priv(sch); | |
1474 | struct tc_hfsc_qopt *qopt; | |
1475 | ||
1476 | if (opt == NULL || nla_len(opt) < sizeof(*qopt)) | |
1477 | return -EINVAL; | |
1478 | qopt = nla_data(opt); | |
1479 | ||
1480 | sch_tree_lock(sch); | |
1481 | q->defcls = qopt->defcls; | |
1482 | sch_tree_unlock(sch); | |
1483 | ||
1484 | return 0; | |
1485 | } | |
1486 | ||
1487 | static void | |
1488 | hfsc_reset_class(struct hfsc_class *cl) | |
1489 | { | |
1490 | cl->cl_total = 0; | |
1491 | cl->cl_cumul = 0; | |
1492 | cl->cl_d = 0; | |
1493 | cl->cl_e = 0; | |
1494 | cl->cl_vt = 0; | |
1495 | cl->cl_vtadj = 0; | |
1496 | cl->cl_vtoff = 0; | |
1497 | cl->cl_cvtmin = 0; | |
1498 | cl->cl_cvtmax = 0; | |
1499 | cl->cl_cvtoff = 0; | |
1500 | cl->cl_pcvtoff = 0; | |
1501 | cl->cl_vtperiod = 0; | |
1502 | cl->cl_parentperiod = 0; | |
1503 | cl->cl_f = 0; | |
1504 | cl->cl_myf = 0; | |
1505 | cl->cl_myfadj = 0; | |
1506 | cl->cl_cfmin = 0; | |
1507 | cl->cl_nactive = 0; | |
1508 | ||
1509 | cl->vt_tree = RB_ROOT; | |
1510 | cl->cf_tree = RB_ROOT; | |
1511 | qdisc_reset(cl->qdisc); | |
1512 | ||
1513 | if (cl->cl_flags & HFSC_RSC) | |
1514 | rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); | |
1515 | if (cl->cl_flags & HFSC_FSC) | |
1516 | rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); | |
1517 | if (cl->cl_flags & HFSC_USC) | |
1518 | rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0); | |
1519 | } | |
1520 | ||
1521 | static void | |
1522 | hfsc_reset_qdisc(struct Qdisc *sch) | |
1523 | { | |
1524 | struct hfsc_sched *q = qdisc_priv(sch); | |
1525 | struct hfsc_class *cl; | |
1526 | struct hlist_node *n; | |
1527 | unsigned int i; | |
1528 | ||
1529 | for (i = 0; i < q->clhash.hashsize; i++) { | |
1530 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | |
1531 | hfsc_reset_class(cl); | |
1532 | } | |
1533 | q->eligible = RB_ROOT; | |
1534 | INIT_LIST_HEAD(&q->droplist); | |
1535 | qdisc_watchdog_cancel(&q->watchdog); | |
1536 | sch->q.qlen = 0; | |
1537 | } | |
1538 | ||
1539 | static void | |
1540 | hfsc_destroy_qdisc(struct Qdisc *sch) | |
1541 | { | |
1542 | struct hfsc_sched *q = qdisc_priv(sch); | |
1543 | struct hlist_node *n, *next; | |
1544 | struct hfsc_class *cl; | |
1545 | unsigned int i; | |
1546 | ||
1547 | for (i = 0; i < q->clhash.hashsize; i++) { | |
1548 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | |
1549 | tcf_destroy_chain(&cl->filter_list); | |
1550 | } | |
1551 | for (i = 0; i < q->clhash.hashsize; i++) { | |
1552 | hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], | |
1553 | cl_common.hnode) | |
1554 | hfsc_destroy_class(sch, cl); | |
1555 | } | |
1556 | qdisc_class_hash_destroy(&q->clhash); | |
1557 | qdisc_watchdog_cancel(&q->watchdog); | |
1558 | } | |
1559 | ||
1560 | static int | |
1561 | hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |
1562 | { | |
1563 | struct hfsc_sched *q = qdisc_priv(sch); | |
1564 | unsigned char *b = skb_tail_pointer(skb); | |
1565 | struct tc_hfsc_qopt qopt; | |
1566 | ||
1567 | qopt.defcls = q->defcls; | |
1568 | NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); | |
1569 | return skb->len; | |
1570 | ||
1571 | nla_put_failure: | |
1572 | nlmsg_trim(skb, b); | |
1573 | return -1; | |
1574 | } | |
1575 | ||
1576 | static int | |
1577 | hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
1578 | { | |
1579 | struct hfsc_class *cl; | |
1580 | int uninitialized_var(err); | |
1581 | ||
1582 | cl = hfsc_classify(skb, sch, &err); | |
1583 | if (cl == NULL) { | |
1584 | if (err & __NET_XMIT_BYPASS) | |
1585 | sch->qstats.drops++; | |
1586 | kfree_skb(skb); | |
1587 | return err; | |
1588 | } | |
1589 | ||
1590 | err = qdisc_enqueue(skb, cl->qdisc); | |
1591 | if (unlikely(err != NET_XMIT_SUCCESS)) { | |
1592 | if (net_xmit_drop_count(err)) { | |
1593 | cl->qstats.drops++; | |
1594 | sch->qstats.drops++; | |
1595 | } | |
1596 | return err; | |
1597 | } | |
1598 | ||
1599 | if (cl->qdisc->q.qlen == 1) | |
1600 | set_active(cl, qdisc_pkt_len(skb)); | |
1601 | ||
1602 | bstats_update(&cl->bstats, skb); | |
1603 | sch->q.qlen++; | |
1604 | ||
1605 | return NET_XMIT_SUCCESS; | |
1606 | } | |
1607 | ||
1608 | static struct sk_buff * | |
1609 | hfsc_dequeue(struct Qdisc *sch) | |
1610 | { | |
1611 | struct hfsc_sched *q = qdisc_priv(sch); | |
1612 | struct hfsc_class *cl; | |
1613 | struct sk_buff *skb; | |
1614 | u64 cur_time; | |
1615 | unsigned int next_len; | |
1616 | int realtime = 0; | |
1617 | ||
1618 | if (sch->q.qlen == 0) | |
1619 | return NULL; | |
1620 | ||
1621 | cur_time = psched_get_time(); | |
1622 | ||
1623 | /* | |
1624 | * if there are eligible classes, use real-time criteria. | |
1625 | * find the class with the minimum deadline among | |
1626 | * the eligible classes. | |
1627 | */ | |
1628 | if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { | |
1629 | realtime = 1; | |
1630 | } else { | |
1631 | /* | |
1632 | * use link-sharing criteria | |
1633 | * get the class with the minimum vt in the hierarchy | |
1634 | */ | |
1635 | cl = vttree_get_minvt(&q->root, cur_time); | |
1636 | if (cl == NULL) { | |
1637 | sch->qstats.overlimits++; | |
1638 | hfsc_schedule_watchdog(sch); | |
1639 | return NULL; | |
1640 | } | |
1641 | } | |
1642 | ||
1643 | skb = qdisc_dequeue_peeked(cl->qdisc); | |
1644 | if (skb == NULL) { | |
1645 | qdisc_warn_nonwc("HFSC", cl->qdisc); | |
1646 | return NULL; | |
1647 | } | |
1648 | ||
1649 | update_vf(cl, qdisc_pkt_len(skb), cur_time); | |
1650 | if (realtime) | |
1651 | cl->cl_cumul += qdisc_pkt_len(skb); | |
1652 | ||
1653 | if (cl->qdisc->q.qlen != 0) { | |
1654 | if (cl->cl_flags & HFSC_RSC) { | |
1655 | /* update ed */ | |
1656 | next_len = qdisc_peek_len(cl->qdisc); | |
1657 | if (realtime) | |
1658 | update_ed(cl, next_len); | |
1659 | else | |
1660 | update_d(cl, next_len); | |
1661 | } | |
1662 | } else { | |
1663 | /* the class becomes passive */ | |
1664 | set_passive(cl); | |
1665 | } | |
1666 | ||
1667 | sch->flags &= ~TCQ_F_THROTTLED; | |
1668 | qdisc_bstats_update(sch, skb); | |
1669 | sch->q.qlen--; | |
1670 | ||
1671 | return skb; | |
1672 | } | |
1673 | ||
1674 | static unsigned int | |
1675 | hfsc_drop(struct Qdisc *sch) | |
1676 | { | |
1677 | struct hfsc_sched *q = qdisc_priv(sch); | |
1678 | struct hfsc_class *cl; | |
1679 | unsigned int len; | |
1680 | ||
1681 | list_for_each_entry(cl, &q->droplist, dlist) { | |
1682 | if (cl->qdisc->ops->drop != NULL && | |
1683 | (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { | |
1684 | if (cl->qdisc->q.qlen == 0) { | |
1685 | update_vf(cl, 0, 0); | |
1686 | set_passive(cl); | |
1687 | } else { | |
1688 | list_move_tail(&cl->dlist, &q->droplist); | |
1689 | } | |
1690 | cl->qstats.drops++; | |
1691 | sch->qstats.drops++; | |
1692 | sch->q.qlen--; | |
1693 | return len; | |
1694 | } | |
1695 | } | |
1696 | return 0; | |
1697 | } | |
1698 | ||
1699 | static const struct Qdisc_class_ops hfsc_class_ops = { | |
1700 | .change = hfsc_change_class, | |
1701 | .delete = hfsc_delete_class, | |
1702 | .graft = hfsc_graft_class, | |
1703 | .leaf = hfsc_class_leaf, | |
1704 | .qlen_notify = hfsc_qlen_notify, | |
1705 | .get = hfsc_get_class, | |
1706 | .put = hfsc_put_class, | |
1707 | .bind_tcf = hfsc_bind_tcf, | |
1708 | .unbind_tcf = hfsc_unbind_tcf, | |
1709 | .tcf_chain = hfsc_tcf_chain, | |
1710 | .dump = hfsc_dump_class, | |
1711 | .dump_stats = hfsc_dump_class_stats, | |
1712 | .walk = hfsc_walk | |
1713 | }; | |
1714 | ||
1715 | static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { | |
1716 | .id = "hfsc", | |
1717 | .init = hfsc_init_qdisc, | |
1718 | .change = hfsc_change_qdisc, | |
1719 | .reset = hfsc_reset_qdisc, | |
1720 | .destroy = hfsc_destroy_qdisc, | |
1721 | .dump = hfsc_dump_qdisc, | |
1722 | .enqueue = hfsc_enqueue, | |
1723 | .dequeue = hfsc_dequeue, | |
1724 | .peek = qdisc_peek_dequeued, | |
1725 | .drop = hfsc_drop, | |
1726 | .cl_ops = &hfsc_class_ops, | |
1727 | .priv_size = sizeof(struct hfsc_sched), | |
1728 | .owner = THIS_MODULE | |
1729 | }; | |
1730 | ||
1731 | static int __init | |
1732 | hfsc_init(void) | |
1733 | { | |
1734 | return register_qdisc(&hfsc_qdisc_ops); | |
1735 | } | |
1736 | ||
1737 | static void __exit | |
1738 | hfsc_cleanup(void) | |
1739 | { | |
1740 | unregister_qdisc(&hfsc_qdisc_ops); | |
1741 | } | |
1742 | ||
1743 | MODULE_LICENSE("GPL"); | |
1744 | module_init(hfsc_init); | |
1745 | module_exit(hfsc_cleanup); |