]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_cbq.c Class-Based Queueing discipline. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
10 | * | |
11 | */ | |
12 | ||
1da177e4 | 13 | #include <linux/module.h> |
5a0e3ad6 | 14 | #include <linux/slab.h> |
1da177e4 LT |
15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | |
1da177e4 | 17 | #include <linux/string.h> |
1da177e4 | 18 | #include <linux/errno.h> |
1da177e4 | 19 | #include <linux/skbuff.h> |
0ba48053 | 20 | #include <net/netlink.h> |
1da177e4 LT |
21 | #include <net/pkt_sched.h> |
22 | ||
23 | ||
24 | /* Class-Based Queueing (CBQ) algorithm. | |
25 | ======================================= | |
26 | ||
27 | Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource | |
10297b99 | 28 | Management Models for Packet Networks", |
1da177e4 LT |
29 | IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 |
30 | ||
10297b99 | 31 | [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 |
1da177e4 | 32 | |
10297b99 | 33 | [3] Sally Floyd, "Notes on Class-Based Queueing: Setting |
1da177e4 LT |
34 | Parameters", 1996 |
35 | ||
36 | [4] Sally Floyd and Michael Speer, "Experimental Results | |
37 | for Class-Based Queueing", 1998, not published. | |
38 | ||
39 | ----------------------------------------------------------------------- | |
40 | ||
41 | Algorithm skeleton was taken from NS simulator cbq.cc. | |
42 | If someone wants to check this code against the LBL version, | |
43 | he should take into account that ONLY the skeleton was borrowed, | |
44 | the implementation is different. Particularly: | |
45 | ||
46 | --- The WRR algorithm is different. Our version looks more | |
10297b99 YH |
47 | reasonable (I hope) and works when quanta are allowed to be |
48 | less than MTU, which is always the case when real time classes | |
49 | have small rates. Note, that the statement of [3] is | |
50 | incomplete, delay may actually be estimated even if class | |
51 | per-round allotment is less than MTU. Namely, if per-round | |
52 | allotment is W*r_i, and r_1+...+r_k = r < 1 | |
1da177e4 LT |
53 | |
54 | delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B | |
55 | ||
56 | In the worst case we have IntServ estimate with D = W*r+k*MTU | |
57 | and C = MTU*r. The proof (if correct at all) is trivial. | |
58 | ||
59 | ||
60 | --- It seems that cbq-2.0 is not very accurate. At least, I cannot | |
61 | interpret some places, which look like wrong translations | |
62 | from NS. Anyone is advised to find these differences | |
63 | and explain to me, why I am wrong 8). | |
64 | ||
65 | --- Linux has no EOI event, so that we cannot estimate true class | |
66 | idle time. Workaround is to consider the next dequeue event | |
67 | as sign that previous packet is finished. This is wrong because of | |
68 | internal device queueing, but on a permanently loaded link it is true. | |
69 | Moreover, combined with clock integrator, this scheme looks | |
70 | very close to an ideal solution. */ | |
71 | ||
72 | struct cbq_sched_data; | |
73 | ||
74 | ||
cc7ec456 | 75 | struct cbq_class { |
d77fea2e | 76 | struct Qdisc_class_common common; |
1da177e4 LT |
77 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ |
78 | ||
79 | /* Parameters */ | |
1da177e4 LT |
80 | unsigned char priority; /* class priority */ |
81 | unsigned char priority2; /* priority to be used after overlimit */ | |
82 | unsigned char ewma_log; /* time constant for idle time calculation */ | |
83 | unsigned char ovl_strategy; | |
c3bc7cff | 84 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
85 | unsigned char police; |
86 | #endif | |
87 | ||
88 | u32 defmap; | |
89 | ||
90 | /* Link-sharing scheduler parameters */ | |
91 | long maxidle; /* Class parameters: see below. */ | |
92 | long offtime; | |
93 | long minidle; | |
94 | u32 avpkt; | |
95 | struct qdisc_rate_table *R_tab; | |
96 | ||
97 | /* Overlimit strategy parameters */ | |
98 | void (*overlimit)(struct cbq_class *cl); | |
1a13cb63 | 99 | psched_tdiff_t penalty; |
1da177e4 LT |
100 | |
101 | /* General scheduler (WRR) parameters */ | |
102 | long allot; | |
103 | long quantum; /* Allotment per WRR round */ | |
104 | long weight; /* Relative allotment: see below */ | |
105 | ||
106 | struct Qdisc *qdisc; /* Ptr to CBQ discipline */ | |
107 | struct cbq_class *split; /* Ptr to split node */ | |
108 | struct cbq_class *share; /* Ptr to LS parent in the class tree */ | |
109 | struct cbq_class *tparent; /* Ptr to tree parent in the class tree */ | |
110 | struct cbq_class *borrow; /* NULL if class is bandwidth limited; | |
111 | parent otherwise */ | |
112 | struct cbq_class *sibling; /* Sibling chain */ | |
113 | struct cbq_class *children; /* Pointer to children chain */ | |
114 | ||
115 | struct Qdisc *q; /* Elementary queueing discipline */ | |
116 | ||
117 | ||
118 | /* Variables */ | |
119 | unsigned char cpriority; /* Effective priority */ | |
120 | unsigned char delayed; | |
121 | unsigned char level; /* level of the class in hierarchy: | |
122 | 0 for leaf classes, and maximal | |
123 | level of children + 1 for nodes. | |
124 | */ | |
125 | ||
126 | psched_time_t last; /* Last end of service */ | |
127 | psched_time_t undertime; | |
128 | long avgidle; | |
129 | long deficit; /* Saved deficit for WRR */ | |
1a13cb63 | 130 | psched_time_t penalized; |
c1a8f1f1 | 131 | struct gnet_stats_basic_packed bstats; |
1da177e4 | 132 | struct gnet_stats_queue qstats; |
45203a3b | 133 | struct gnet_stats_rate_est64 rate_est; |
1da177e4 LT |
134 | struct tc_cbq_xstats xstats; |
135 | ||
136 | struct tcf_proto *filter_list; | |
137 | ||
138 | int refcnt; | |
139 | int filters; | |
140 | ||
cc7ec456 | 141 | struct cbq_class *defaults[TC_PRIO_MAX + 1]; |
1da177e4 LT |
142 | }; |
143 | ||
cc7ec456 | 144 | struct cbq_sched_data { |
d77fea2e | 145 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ |
cc7ec456 ED |
146 | int nclasses[TC_CBQ_MAXPRIO + 1]; |
147 | unsigned int quanta[TC_CBQ_MAXPRIO + 1]; | |
1da177e4 LT |
148 | |
149 | struct cbq_class link; | |
150 | ||
cc7ec456 ED |
151 | unsigned int activemask; |
152 | struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes | |
1da177e4 LT |
153 | with backlog */ |
154 | ||
c3bc7cff | 155 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
156 | struct cbq_class *rx_class; |
157 | #endif | |
158 | struct cbq_class *tx_class; | |
159 | struct cbq_class *tx_borrowed; | |
160 | int tx_len; | |
161 | psched_time_t now; /* Cached timestamp */ | |
162 | psched_time_t now_rt; /* Cached real time */ | |
cc7ec456 | 163 | unsigned int pmask; |
1da177e4 | 164 | |
2fbd3da3 | 165 | struct hrtimer delay_timer; |
88a99354 | 166 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
1da177e4 LT |
167 | started when CBQ has |
168 | backlog, but cannot | |
169 | transmit just now */ | |
88a99354 | 170 | psched_tdiff_t wd_expires; |
1da177e4 LT |
171 | int toplevel; |
172 | u32 hgenerator; | |
173 | }; | |
174 | ||
175 | ||
cc7ec456 | 176 | #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) |
1da177e4 | 177 | |
cc7ec456 | 178 | static inline struct cbq_class * |
1da177e4 LT |
179 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) |
180 | { | |
d77fea2e | 181 | struct Qdisc_class_common *clc; |
1da177e4 | 182 | |
d77fea2e PM |
183 | clc = qdisc_class_find(&q->clhash, classid); |
184 | if (clc == NULL) | |
185 | return NULL; | |
186 | return container_of(clc, struct cbq_class, common); | |
1da177e4 LT |
187 | } |
188 | ||
c3bc7cff | 189 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
190 | |
191 | static struct cbq_class * | |
192 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) | |
193 | { | |
cc7ec456 | 194 | struct cbq_class *cl; |
1da177e4 | 195 | |
cc7ec456 ED |
196 | for (cl = this->tparent; cl; cl = cl->tparent) { |
197 | struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; | |
1da177e4 | 198 | |
cc7ec456 ED |
199 | if (new != NULL && new != this) |
200 | return new; | |
201 | } | |
1da177e4 LT |
202 | return NULL; |
203 | } | |
204 | ||
205 | #endif | |
206 | ||
207 | /* Classify packet. The procedure is pretty complicated, but | |
cc7ec456 ED |
208 | * it allows us to combine link sharing and priority scheduling |
209 | * transparently. | |
210 | * | |
211 | * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, | |
212 | * so that it resolves to split nodes. Then packets are classified | |
213 | * by logical priority, or a more specific classifier may be attached | |
214 | * to the split node. | |
1da177e4 LT |
215 | */ |
216 | ||
217 | static struct cbq_class * | |
218 | cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | |
219 | { | |
220 | struct cbq_sched_data *q = qdisc_priv(sch); | |
221 | struct cbq_class *head = &q->link; | |
222 | struct cbq_class **defmap; | |
223 | struct cbq_class *cl = NULL; | |
224 | u32 prio = skb->priority; | |
225 | struct tcf_result res; | |
226 | ||
227 | /* | |
228 | * Step 1. If skb->priority points to one of our classes, use it. | |
229 | */ | |
cc7ec456 | 230 | if (TC_H_MAJ(prio ^ sch->handle) == 0 && |
1da177e4 LT |
231 | (cl = cbq_class_lookup(q, prio)) != NULL) |
232 | return cl; | |
233 | ||
c27f339a | 234 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4 LT |
235 | for (;;) { |
236 | int result = 0; | |
237 | defmap = head->defaults; | |
238 | ||
239 | /* | |
240 | * Step 2+n. Apply classifier. | |
241 | */ | |
73ca4918 PM |
242 | if (!head->filter_list || |
243 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) | |
1da177e4 LT |
244 | goto fallback; |
245 | ||
cc7ec456 ED |
246 | cl = (void *)res.class; |
247 | if (!cl) { | |
1da177e4 LT |
248 | if (TC_H_MAJ(res.classid)) |
249 | cl = cbq_class_lookup(q, res.classid); | |
cc7ec456 | 250 | else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
1da177e4 LT |
251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
252 | ||
bdfc87f7 | 253 | if (cl == NULL) |
1da177e4 LT |
254 | goto fallback; |
255 | } | |
bdfc87f7 ED |
256 | if (cl->level >= head->level) |
257 | goto fallback; | |
1da177e4 LT |
258 | #ifdef CONFIG_NET_CLS_ACT |
259 | switch (result) { | |
260 | case TC_ACT_QUEUED: | |
10297b99 | 261 | case TC_ACT_STOLEN: |
378a2f09 | 262 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
1da177e4 LT |
263 | case TC_ACT_SHOT: |
264 | return NULL; | |
73ca4918 PM |
265 | case TC_ACT_RECLASSIFY: |
266 | return cbq_reclassify(skb, cl); | |
1da177e4 | 267 | } |
1da177e4 LT |
268 | #endif |
269 | if (cl->level == 0) | |
270 | return cl; | |
271 | ||
272 | /* | |
273 | * Step 3+n. If classifier selected a link sharing class, | |
274 | * apply agency specific classifier. | |
275 | * Repeat this procdure until we hit a leaf node. | |
276 | */ | |
277 | head = cl; | |
278 | } | |
279 | ||
280 | fallback: | |
281 | cl = head; | |
282 | ||
283 | /* | |
284 | * Step 4. No success... | |
285 | */ | |
286 | if (TC_H_MAJ(prio) == 0 && | |
cc7ec456 | 287 | !(cl = head->defaults[prio & TC_PRIO_MAX]) && |
1da177e4 LT |
288 | !(cl = head->defaults[TC_PRIO_BESTEFFORT])) |
289 | return head; | |
290 | ||
291 | return cl; | |
292 | } | |
293 | ||
294 | /* | |
cc7ec456 ED |
295 | * A packet has just been enqueued on the empty class. |
296 | * cbq_activate_class adds it to the tail of active class list | |
297 | * of its priority band. | |
1da177e4 LT |
298 | */ |
299 | ||
cc7ec456 | 300 | static inline void cbq_activate_class(struct cbq_class *cl) |
1da177e4 LT |
301 | { |
302 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
303 | int prio = cl->cpriority; | |
304 | struct cbq_class *cl_tail; | |
305 | ||
306 | cl_tail = q->active[prio]; | |
307 | q->active[prio] = cl; | |
308 | ||
309 | if (cl_tail != NULL) { | |
310 | cl->next_alive = cl_tail->next_alive; | |
311 | cl_tail->next_alive = cl; | |
312 | } else { | |
313 | cl->next_alive = cl; | |
314 | q->activemask |= (1<<prio); | |
315 | } | |
316 | } | |
317 | ||
318 | /* | |
cc7ec456 ED |
319 | * Unlink class from active chain. |
320 | * Note that this same procedure is done directly in cbq_dequeue* | |
321 | * during round-robin procedure. | |
1da177e4 LT |
322 | */ |
323 | ||
324 | static void cbq_deactivate_class(struct cbq_class *this) | |
325 | { | |
326 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); | |
327 | int prio = this->cpriority; | |
328 | struct cbq_class *cl; | |
329 | struct cbq_class *cl_prev = q->active[prio]; | |
330 | ||
331 | do { | |
332 | cl = cl_prev->next_alive; | |
333 | if (cl == this) { | |
334 | cl_prev->next_alive = cl->next_alive; | |
335 | cl->next_alive = NULL; | |
336 | ||
337 | if (cl == q->active[prio]) { | |
338 | q->active[prio] = cl_prev; | |
339 | if (cl == q->active[prio]) { | |
340 | q->active[prio] = NULL; | |
341 | q->activemask &= ~(1<<prio); | |
342 | return; | |
343 | } | |
344 | } | |
1da177e4 LT |
345 | return; |
346 | } | |
347 | } while ((cl_prev = cl) != q->active[prio]); | |
348 | } | |
349 | ||
350 | static void | |
351 | cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) | |
352 | { | |
353 | int toplevel = q->toplevel; | |
354 | ||
fd245a4a | 355 | if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { |
1da177e4 LT |
356 | psched_time_t now; |
357 | psched_tdiff_t incr; | |
358 | ||
3bebcda2 | 359 | now = psched_get_time(); |
8edc0c31 | 360 | incr = now - q->now_rt; |
7c59e25f | 361 | now = q->now + incr; |
1da177e4 LT |
362 | |
363 | do { | |
104e0878 | 364 | if (cl->undertime < now) { |
1da177e4 LT |
365 | q->toplevel = cl->level; |
366 | return; | |
367 | } | |
cc7ec456 | 368 | } while ((cl = cl->borrow) != NULL && toplevel > cl->level); |
1da177e4 LT |
369 | } |
370 | } | |
371 | ||
372 | static int | |
373 | cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
374 | { | |
375 | struct cbq_sched_data *q = qdisc_priv(sch); | |
ddeee3ce | 376 | int uninitialized_var(ret); |
1da177e4 LT |
377 | struct cbq_class *cl = cbq_classify(skb, sch, &ret); |
378 | ||
c3bc7cff | 379 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
380 | q->rx_class = cl; |
381 | #endif | |
382 | if (cl == NULL) { | |
c27f339a | 383 | if (ret & __NET_XMIT_BYPASS) |
1da177e4 LT |
384 | sch->qstats.drops++; |
385 | kfree_skb(skb); | |
386 | return ret; | |
387 | } | |
388 | ||
c3bc7cff | 389 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
390 | cl->q->__parent = sch; |
391 | #endif | |
5f86173b JK |
392 | ret = qdisc_enqueue(skb, cl->q); |
393 | if (ret == NET_XMIT_SUCCESS) { | |
1da177e4 | 394 | sch->q.qlen++; |
1da177e4 LT |
395 | cbq_mark_toplevel(q, cl); |
396 | if (!cl->next_alive) | |
397 | cbq_activate_class(cl); | |
398 | return ret; | |
399 | } | |
400 | ||
378a2f09 JP |
401 | if (net_xmit_drop_count(ret)) { |
402 | sch->qstats.drops++; | |
403 | cbq_mark_toplevel(q, cl); | |
404 | cl->qstats.drops++; | |
405 | } | |
1da177e4 LT |
406 | return ret; |
407 | } | |
408 | ||
1da177e4 LT |
409 | /* Overlimit actions */ |
410 | ||
411 | /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */ | |
412 | ||
413 | static void cbq_ovl_classic(struct cbq_class *cl) | |
414 | { | |
415 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
8edc0c31 | 416 | psched_tdiff_t delay = cl->undertime - q->now; |
1da177e4 LT |
417 | |
418 | if (!cl->delayed) { | |
419 | delay += cl->offtime; | |
420 | ||
10297b99 | 421 | /* |
cc7ec456 ED |
422 | * Class goes to sleep, so that it will have no |
423 | * chance to work avgidle. Let's forgive it 8) | |
424 | * | |
425 | * BTW cbq-2.0 has a crap in this | |
426 | * place, apparently they forgot to shift it by cl->ewma_log. | |
1da177e4 LT |
427 | */ |
428 | if (cl->avgidle < 0) | |
429 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); | |
430 | if (cl->avgidle < cl->minidle) | |
431 | cl->avgidle = cl->minidle; | |
432 | if (delay <= 0) | |
433 | delay = 1; | |
7c59e25f | 434 | cl->undertime = q->now + delay; |
1da177e4 LT |
435 | |
436 | cl->xstats.overactions++; | |
437 | cl->delayed = 1; | |
438 | } | |
439 | if (q->wd_expires == 0 || q->wd_expires > delay) | |
440 | q->wd_expires = delay; | |
441 | ||
442 | /* Dirty work! We must schedule wakeups based on | |
cc7ec456 ED |
443 | * real available rate, rather than leaf rate, |
444 | * which may be tiny (even zero). | |
1da177e4 LT |
445 | */ |
446 | if (q->toplevel == TC_CBQ_MAXLEVEL) { | |
447 | struct cbq_class *b; | |
448 | psched_tdiff_t base_delay = q->wd_expires; | |
449 | ||
450 | for (b = cl->borrow; b; b = b->borrow) { | |
8edc0c31 | 451 | delay = b->undertime - q->now; |
1da177e4 LT |
452 | if (delay < base_delay) { |
453 | if (delay <= 0) | |
454 | delay = 1; | |
455 | base_delay = delay; | |
456 | } | |
457 | } | |
458 | ||
459 | q->wd_expires = base_delay; | |
460 | } | |
461 | } | |
462 | ||
463 | /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when | |
cc7ec456 | 464 | * they go overlimit |
1da177e4 LT |
465 | */ |
466 | ||
467 | static void cbq_ovl_rclassic(struct cbq_class *cl) | |
468 | { | |
469 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
470 | struct cbq_class *this = cl; | |
471 | ||
472 | do { | |
473 | if (cl->level > q->toplevel) { | |
474 | cl = NULL; | |
475 | break; | |
476 | } | |
477 | } while ((cl = cl->borrow) != NULL); | |
478 | ||
479 | if (cl == NULL) | |
480 | cl = this; | |
481 | cbq_ovl_classic(cl); | |
482 | } | |
483 | ||
484 | /* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */ | |
485 | ||
486 | static void cbq_ovl_delay(struct cbq_class *cl) | |
487 | { | |
488 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
8edc0c31 | 489 | psched_tdiff_t delay = cl->undertime - q->now; |
1da177e4 | 490 | |
2540e051 JP |
491 | if (test_bit(__QDISC_STATE_DEACTIVATED, |
492 | &qdisc_root_sleeping(cl->qdisc)->state)) | |
493 | return; | |
494 | ||
1da177e4 | 495 | if (!cl->delayed) { |
1a13cb63 PM |
496 | psched_time_t sched = q->now; |
497 | ktime_t expires; | |
1da177e4 LT |
498 | |
499 | delay += cl->offtime; | |
500 | if (cl->avgidle < 0) | |
501 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); | |
502 | if (cl->avgidle < cl->minidle) | |
503 | cl->avgidle = cl->minidle; | |
7c59e25f | 504 | cl->undertime = q->now + delay; |
1da177e4 LT |
505 | |
506 | if (delay > 0) { | |
1a13cb63 | 507 | sched += delay + cl->penalty; |
1da177e4 LT |
508 | cl->penalized = sched; |
509 | cl->cpriority = TC_CBQ_MAXPRIO; | |
510 | q->pmask |= (1<<TC_CBQ_MAXPRIO); | |
1a13cb63 | 511 | |
46baac38 | 512 | expires = ns_to_ktime(PSCHED_TICKS2NS(sched)); |
2fbd3da3 DM |
513 | if (hrtimer_try_to_cancel(&q->delay_timer) && |
514 | ktime_to_ns(ktime_sub( | |
515 | hrtimer_get_expires(&q->delay_timer), | |
516 | expires)) > 0) | |
517 | hrtimer_set_expires(&q->delay_timer, expires); | |
518 | hrtimer_restart(&q->delay_timer); | |
1da177e4 LT |
519 | cl->delayed = 1; |
520 | cl->xstats.overactions++; | |
521 | return; | |
522 | } | |
523 | delay = 1; | |
524 | } | |
525 | if (q->wd_expires == 0 || q->wd_expires > delay) | |
526 | q->wd_expires = delay; | |
527 | } | |
528 | ||
529 | /* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */ | |
530 | ||
531 | static void cbq_ovl_lowprio(struct cbq_class *cl) | |
532 | { | |
533 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
534 | ||
1a13cb63 | 535 | cl->penalized = q->now + cl->penalty; |
1da177e4 LT |
536 | |
537 | if (cl->cpriority != cl->priority2) { | |
538 | cl->cpriority = cl->priority2; | |
539 | q->pmask |= (1<<cl->cpriority); | |
540 | cl->xstats.overactions++; | |
541 | } | |
542 | cbq_ovl_classic(cl); | |
543 | } | |
544 | ||
545 | /* TC_CBQ_OVL_DROP: penalize class by dropping */ | |
546 | ||
547 | static void cbq_ovl_drop(struct cbq_class *cl) | |
548 | { | |
549 | if (cl->q->ops->drop) | |
550 | if (cl->q->ops->drop(cl->q)) | |
551 | cl->qdisc->q.qlen--; | |
552 | cl->xstats.overactions++; | |
553 | cbq_ovl_classic(cl); | |
554 | } | |
555 | ||
1a13cb63 PM |
556 | static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, |
557 | psched_time_t now) | |
1da177e4 LT |
558 | { |
559 | struct cbq_class *cl; | |
560 | struct cbq_class *cl_prev = q->active[prio]; | |
1a13cb63 | 561 | psched_time_t sched = now; |
1da177e4 LT |
562 | |
563 | if (cl_prev == NULL) | |
e9054a33 | 564 | return 0; |
1da177e4 LT |
565 | |
566 | do { | |
567 | cl = cl_prev->next_alive; | |
1a13cb63 | 568 | if (now - cl->penalized > 0) { |
1da177e4 LT |
569 | cl_prev->next_alive = cl->next_alive; |
570 | cl->next_alive = NULL; | |
571 | cl->cpriority = cl->priority; | |
572 | cl->delayed = 0; | |
573 | cbq_activate_class(cl); | |
574 | ||
575 | if (cl == q->active[prio]) { | |
576 | q->active[prio] = cl_prev; | |
577 | if (cl == q->active[prio]) { | |
578 | q->active[prio] = NULL; | |
579 | return 0; | |
580 | } | |
581 | } | |
582 | ||
583 | cl = cl_prev->next_alive; | |
1a13cb63 | 584 | } else if (sched - cl->penalized > 0) |
1da177e4 LT |
585 | sched = cl->penalized; |
586 | } while ((cl_prev = cl) != q->active[prio]); | |
587 | ||
1a13cb63 | 588 | return sched - now; |
1da177e4 LT |
589 | } |
590 | ||
1a13cb63 | 591 | static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) |
1da177e4 | 592 | { |
1a13cb63 | 593 | struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, |
2fbd3da3 | 594 | delay_timer); |
1a13cb63 PM |
595 | struct Qdisc *sch = q->watchdog.qdisc; |
596 | psched_time_t now; | |
597 | psched_tdiff_t delay = 0; | |
cc7ec456 | 598 | unsigned int pmask; |
1da177e4 | 599 | |
3bebcda2 | 600 | now = psched_get_time(); |
1a13cb63 | 601 | |
1da177e4 LT |
602 | pmask = q->pmask; |
603 | q->pmask = 0; | |
604 | ||
605 | while (pmask) { | |
606 | int prio = ffz(~pmask); | |
1a13cb63 | 607 | psched_tdiff_t tmp; |
1da177e4 LT |
608 | |
609 | pmask &= ~(1<<prio); | |
610 | ||
1a13cb63 | 611 | tmp = cbq_undelay_prio(q, prio, now); |
1da177e4 LT |
612 | if (tmp > 0) { |
613 | q->pmask |= 1<<prio; | |
614 | if (tmp < delay || delay == 0) | |
615 | delay = tmp; | |
616 | } | |
617 | } | |
618 | ||
619 | if (delay) { | |
1a13cb63 PM |
620 | ktime_t time; |
621 | ||
622 | time = ktime_set(0, 0); | |
ca44d6e6 | 623 | time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); |
2fbd3da3 | 624 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); |
1da177e4 LT |
625 | } |
626 | ||
fd245a4a | 627 | qdisc_unthrottled(sch); |
8608db03 | 628 | __netif_schedule(qdisc_root(sch)); |
1a13cb63 | 629 | return HRTIMER_NORESTART; |
1da177e4 LT |
630 | } |
631 | ||
c3bc7cff | 632 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
633 | static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) |
634 | { | |
1da177e4 LT |
635 | struct Qdisc *sch = child->__parent; |
636 | struct cbq_sched_data *q = qdisc_priv(sch); | |
637 | struct cbq_class *cl = q->rx_class; | |
638 | ||
639 | q->rx_class = NULL; | |
640 | ||
641 | if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { | |
378a2f09 | 642 | int ret; |
1da177e4 LT |
643 | |
644 | cbq_mark_toplevel(q, cl); | |
645 | ||
646 | q->rx_class = cl; | |
647 | cl->q->__parent = sch; | |
648 | ||
378a2f09 JP |
649 | ret = qdisc_enqueue(skb, cl->q); |
650 | if (ret == NET_XMIT_SUCCESS) { | |
1da177e4 | 651 | sch->q.qlen++; |
1da177e4 LT |
652 | if (!cl->next_alive) |
653 | cbq_activate_class(cl); | |
654 | return 0; | |
655 | } | |
378a2f09 JP |
656 | if (net_xmit_drop_count(ret)) |
657 | sch->qstats.drops++; | |
1da177e4 LT |
658 | return 0; |
659 | } | |
660 | ||
661 | sch->qstats.drops++; | |
662 | return -1; | |
663 | } | |
664 | #endif | |
665 | ||
10297b99 | 666 | /* |
cc7ec456 ED |
667 | * It is mission critical procedure. |
668 | * | |
669 | * We "regenerate" toplevel cutoff, if transmitting class | |
670 | * has backlog and it is not regulated. It is not part of | |
671 | * original CBQ description, but looks more reasonable. | |
672 | * Probably, it is wrong. This question needs further investigation. | |
673 | */ | |
1da177e4 | 674 | |
cc7ec456 | 675 | static inline void |
1da177e4 LT |
676 | cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, |
677 | struct cbq_class *borrowed) | |
678 | { | |
679 | if (cl && q->toplevel >= borrowed->level) { | |
680 | if (cl->q->q.qlen > 1) { | |
681 | do { | |
a084980d | 682 | if (borrowed->undertime == PSCHED_PASTPERFECT) { |
1da177e4 LT |
683 | q->toplevel = borrowed->level; |
684 | return; | |
685 | } | |
cc7ec456 | 686 | } while ((borrowed = borrowed->borrow) != NULL); |
1da177e4 | 687 | } |
10297b99 | 688 | #if 0 |
1da177e4 LT |
689 | /* It is not necessary now. Uncommenting it |
690 | will save CPU cycles, but decrease fairness. | |
691 | */ | |
692 | q->toplevel = TC_CBQ_MAXLEVEL; | |
693 | #endif | |
694 | } | |
695 | } | |
696 | ||
697 | static void | |
698 | cbq_update(struct cbq_sched_data *q) | |
699 | { | |
700 | struct cbq_class *this = q->tx_class; | |
701 | struct cbq_class *cl = this; | |
702 | int len = q->tx_len; | |
703 | ||
704 | q->tx_class = NULL; | |
705 | ||
706 | for ( ; cl; cl = cl->share) { | |
707 | long avgidle = cl->avgidle; | |
708 | long idle; | |
709 | ||
710 | cl->bstats.packets++; | |
711 | cl->bstats.bytes += len; | |
712 | ||
713 | /* | |
cc7ec456 ED |
714 | * (now - last) is total time between packet right edges. |
715 | * (last_pktlen/rate) is "virtual" busy time, so that | |
716 | * | |
717 | * idle = (now - last) - last_pktlen/rate | |
1da177e4 LT |
718 | */ |
719 | ||
8edc0c31 | 720 | idle = q->now - cl->last; |
1da177e4 LT |
721 | if ((unsigned long)idle > 128*1024*1024) { |
722 | avgidle = cl->maxidle; | |
723 | } else { | |
724 | idle -= L2T(cl, len); | |
725 | ||
726 | /* true_avgidle := (1-W)*true_avgidle + W*idle, | |
cc7ec456 ED |
727 | * where W=2^{-ewma_log}. But cl->avgidle is scaled: |
728 | * cl->avgidle == true_avgidle/W, | |
729 | * hence: | |
1da177e4 LT |
730 | */ |
731 | avgidle += idle - (avgidle>>cl->ewma_log); | |
732 | } | |
733 | ||
734 | if (avgidle <= 0) { | |
735 | /* Overlimit or at-limit */ | |
736 | ||
737 | if (avgidle < cl->minidle) | |
738 | avgidle = cl->minidle; | |
739 | ||
740 | cl->avgidle = avgidle; | |
741 | ||
742 | /* Calculate expected time, when this class | |
cc7ec456 ED |
743 | * will be allowed to send. |
744 | * It will occur, when: | |
745 | * (1-W)*true_avgidle + W*delay = 0, i.e. | |
746 | * idle = (1/W - 1)*(-true_avgidle) | |
747 | * or | |
748 | * idle = (1 - W)*(-cl->avgidle); | |
1da177e4 LT |
749 | */ |
750 | idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); | |
751 | ||
752 | /* | |
cc7ec456 ED |
753 | * That is not all. |
754 | * To maintain the rate allocated to the class, | |
755 | * we add to undertime virtual clock, | |
756 | * necessary to complete transmitted packet. | |
757 | * (len/phys_bandwidth has been already passed | |
758 | * to the moment of cbq_update) | |
1da177e4 LT |
759 | */ |
760 | ||
761 | idle -= L2T(&q->link, len); | |
762 | idle += L2T(cl, len); | |
763 | ||
7c59e25f | 764 | cl->undertime = q->now + idle; |
1da177e4 LT |
765 | } else { |
766 | /* Underlimit */ | |
767 | ||
a084980d | 768 | cl->undertime = PSCHED_PASTPERFECT; |
1da177e4 LT |
769 | if (avgidle > cl->maxidle) |
770 | cl->avgidle = cl->maxidle; | |
771 | else | |
772 | cl->avgidle = avgidle; | |
773 | } | |
774 | cl->last = q->now; | |
775 | } | |
776 | ||
777 | cbq_update_toplevel(q, this, q->tx_borrowed); | |
778 | } | |
779 | ||
cc7ec456 | 780 | static inline struct cbq_class * |
1da177e4 LT |
781 | cbq_under_limit(struct cbq_class *cl) |
782 | { | |
783 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
784 | struct cbq_class *this_cl = cl; | |
785 | ||
786 | if (cl->tparent == NULL) | |
787 | return cl; | |
788 | ||
a084980d | 789 | if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { |
1da177e4 LT |
790 | cl->delayed = 0; |
791 | return cl; | |
792 | } | |
793 | ||
794 | do { | |
795 | /* It is very suspicious place. Now overlimit | |
cc7ec456 ED |
796 | * action is generated for not bounded classes |
797 | * only if link is completely congested. | |
798 | * Though it is in agree with ancestor-only paradigm, | |
799 | * it looks very stupid. Particularly, | |
800 | * it means that this chunk of code will either | |
801 | * never be called or result in strong amplification | |
802 | * of burstiness. Dangerous, silly, and, however, | |
803 | * no another solution exists. | |
1da177e4 | 804 | */ |
cc7ec456 ED |
805 | cl = cl->borrow; |
806 | if (!cl) { | |
1da177e4 LT |
807 | this_cl->qstats.overlimits++; |
808 | this_cl->overlimit(this_cl); | |
809 | return NULL; | |
810 | } | |
811 | if (cl->level > q->toplevel) | |
812 | return NULL; | |
a084980d | 813 | } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); |
1da177e4 LT |
814 | |
815 | cl->delayed = 0; | |
816 | return cl; | |
817 | } | |
818 | ||
cc7ec456 | 819 | static inline struct sk_buff * |
1da177e4 LT |
820 | cbq_dequeue_prio(struct Qdisc *sch, int prio) |
821 | { | |
822 | struct cbq_sched_data *q = qdisc_priv(sch); | |
823 | struct cbq_class *cl_tail, *cl_prev, *cl; | |
824 | struct sk_buff *skb; | |
825 | int deficit; | |
826 | ||
827 | cl_tail = cl_prev = q->active[prio]; | |
828 | cl = cl_prev->next_alive; | |
829 | ||
830 | do { | |
831 | deficit = 0; | |
832 | ||
833 | /* Start round */ | |
834 | do { | |
835 | struct cbq_class *borrow = cl; | |
836 | ||
837 | if (cl->q->q.qlen && | |
838 | (borrow = cbq_under_limit(cl)) == NULL) | |
839 | goto skip_class; | |
840 | ||
841 | if (cl->deficit <= 0) { | |
842 | /* Class exhausted its allotment per | |
cc7ec456 | 843 | * this round. Switch to the next one. |
1da177e4 LT |
844 | */ |
845 | deficit = 1; | |
846 | cl->deficit += cl->quantum; | |
847 | goto next_class; | |
848 | } | |
849 | ||
850 | skb = cl->q->dequeue(cl->q); | |
851 | ||
852 | /* Class did not give us any skb :-( | |
cc7ec456 ED |
853 | * It could occur even if cl->q->q.qlen != 0 |
854 | * f.e. if cl->q == "tbf" | |
1da177e4 LT |
855 | */ |
856 | if (skb == NULL) | |
857 | goto skip_class; | |
858 | ||
0abf77e5 | 859 | cl->deficit -= qdisc_pkt_len(skb); |
1da177e4 LT |
860 | q->tx_class = cl; |
861 | q->tx_borrowed = borrow; | |
862 | if (borrow != cl) { | |
863 | #ifndef CBQ_XSTATS_BORROWS_BYTES | |
864 | borrow->xstats.borrows++; | |
865 | cl->xstats.borrows++; | |
866 | #else | |
0abf77e5 JK |
867 | borrow->xstats.borrows += qdisc_pkt_len(skb); |
868 | cl->xstats.borrows += qdisc_pkt_len(skb); | |
1da177e4 LT |
869 | #endif |
870 | } | |
0abf77e5 | 871 | q->tx_len = qdisc_pkt_len(skb); |
1da177e4 LT |
872 | |
873 | if (cl->deficit <= 0) { | |
874 | q->active[prio] = cl; | |
875 | cl = cl->next_alive; | |
876 | cl->deficit += cl->quantum; | |
877 | } | |
878 | return skb; | |
879 | ||
880 | skip_class: | |
881 | if (cl->q->q.qlen == 0 || prio != cl->cpriority) { | |
882 | /* Class is empty or penalized. | |
cc7ec456 | 883 | * Unlink it from active chain. |
1da177e4 LT |
884 | */ |
885 | cl_prev->next_alive = cl->next_alive; | |
886 | cl->next_alive = NULL; | |
887 | ||
888 | /* Did cl_tail point to it? */ | |
889 | if (cl == cl_tail) { | |
890 | /* Repair it! */ | |
891 | cl_tail = cl_prev; | |
892 | ||
893 | /* Was it the last class in this band? */ | |
894 | if (cl == cl_tail) { | |
895 | /* Kill the band! */ | |
896 | q->active[prio] = NULL; | |
897 | q->activemask &= ~(1<<prio); | |
898 | if (cl->q->q.qlen) | |
899 | cbq_activate_class(cl); | |
900 | return NULL; | |
901 | } | |
902 | ||
903 | q->active[prio] = cl_tail; | |
904 | } | |
905 | if (cl->q->q.qlen) | |
906 | cbq_activate_class(cl); | |
907 | ||
908 | cl = cl_prev; | |
909 | } | |
910 | ||
911 | next_class: | |
912 | cl_prev = cl; | |
913 | cl = cl->next_alive; | |
914 | } while (cl_prev != cl_tail); | |
915 | } while (deficit); | |
916 | ||
917 | q->active[prio] = cl_prev; | |
918 | ||
919 | return NULL; | |
920 | } | |
921 | ||
cc7ec456 | 922 | static inline struct sk_buff * |
1da177e4 LT |
923 | cbq_dequeue_1(struct Qdisc *sch) |
924 | { | |
925 | struct cbq_sched_data *q = qdisc_priv(sch); | |
926 | struct sk_buff *skb; | |
cc7ec456 | 927 | unsigned int activemask; |
1da177e4 | 928 | |
cc7ec456 | 929 | activemask = q->activemask & 0xFF; |
1da177e4 LT |
930 | while (activemask) { |
931 | int prio = ffz(~activemask); | |
932 | activemask &= ~(1<<prio); | |
933 | skb = cbq_dequeue_prio(sch, prio); | |
934 | if (skb) | |
935 | return skb; | |
936 | } | |
937 | return NULL; | |
938 | } | |
939 | ||
940 | static struct sk_buff * | |
941 | cbq_dequeue(struct Qdisc *sch) | |
942 | { | |
943 | struct sk_buff *skb; | |
944 | struct cbq_sched_data *q = qdisc_priv(sch); | |
945 | psched_time_t now; | |
946 | psched_tdiff_t incr; | |
947 | ||
3bebcda2 | 948 | now = psched_get_time(); |
8edc0c31 | 949 | incr = now - q->now_rt; |
1da177e4 LT |
950 | |
951 | if (q->tx_class) { | |
952 | psched_tdiff_t incr2; | |
953 | /* Time integrator. We calculate EOS time | |
cc7ec456 ED |
954 | * by adding expected packet transmission time. |
955 | * If real time is greater, we warp artificial clock, | |
956 | * so that: | |
957 | * | |
958 | * cbq_time = max(real_time, work); | |
1da177e4 LT |
959 | */ |
960 | incr2 = L2T(&q->link, q->tx_len); | |
7c59e25f | 961 | q->now += incr2; |
1da177e4 LT |
962 | cbq_update(q); |
963 | if ((incr -= incr2) < 0) | |
964 | incr = 0; | |
f0f6ee1f VA |
965 | q->now += incr; |
966 | } else { | |
967 | if (now > q->now) | |
968 | q->now = now; | |
1da177e4 | 969 | } |
1da177e4 LT |
970 | q->now_rt = now; |
971 | ||
972 | for (;;) { | |
973 | q->wd_expires = 0; | |
974 | ||
975 | skb = cbq_dequeue_1(sch); | |
976 | if (skb) { | |
9190b3b3 | 977 | qdisc_bstats_update(sch, skb); |
1da177e4 | 978 | sch->q.qlen--; |
fd245a4a | 979 | qdisc_unthrottled(sch); |
1da177e4 LT |
980 | return skb; |
981 | } | |
982 | ||
983 | /* All the classes are overlimit. | |
cc7ec456 ED |
984 | * |
985 | * It is possible, if: | |
986 | * | |
987 | * 1. Scheduler is empty. | |
988 | * 2. Toplevel cutoff inhibited borrowing. | |
989 | * 3. Root class is overlimit. | |
990 | * | |
991 | * Reset 2d and 3d conditions and retry. | |
992 | * | |
993 | * Note, that NS and cbq-2.0 are buggy, peeking | |
994 | * an arbitrary class is appropriate for ancestor-only | |
995 | * sharing, but not for toplevel algorithm. | |
996 | * | |
997 | * Our version is better, but slower, because it requires | |
998 | * two passes, but it is unavoidable with top-level sharing. | |
999 | */ | |
1da177e4 LT |
1000 | |
1001 | if (q->toplevel == TC_CBQ_MAXLEVEL && | |
a084980d | 1002 | q->link.undertime == PSCHED_PASTPERFECT) |
1da177e4 LT |
1003 | break; |
1004 | ||
1005 | q->toplevel = TC_CBQ_MAXLEVEL; | |
a084980d | 1006 | q->link.undertime = PSCHED_PASTPERFECT; |
1da177e4 LT |
1007 | } |
1008 | ||
1009 | /* No packets in scheduler or nobody wants to give them to us :-( | |
cc7ec456 ED |
1010 | * Sigh... start watchdog timer in the last case. |
1011 | */ | |
1da177e4 LT |
1012 | |
1013 | if (sch->q.qlen) { | |
1014 | sch->qstats.overlimits++; | |
88a99354 PM |
1015 | if (q->wd_expires) |
1016 | qdisc_watchdog_schedule(&q->watchdog, | |
bb239acf | 1017 | now + q->wd_expires); |
1da177e4 LT |
1018 | } |
1019 | return NULL; | |
1020 | } | |
1021 | ||
1022 | /* CBQ class maintanance routines */ | |
1023 | ||
1024 | static void cbq_adjust_levels(struct cbq_class *this) | |
1025 | { | |
1026 | if (this == NULL) | |
1027 | return; | |
1028 | ||
1029 | do { | |
1030 | int level = 0; | |
1031 | struct cbq_class *cl; | |
1032 | ||
cc7ec456 ED |
1033 | cl = this->children; |
1034 | if (cl) { | |
1da177e4 LT |
1035 | do { |
1036 | if (cl->level > level) | |
1037 | level = cl->level; | |
1038 | } while ((cl = cl->sibling) != this->children); | |
1039 | } | |
cc7ec456 | 1040 | this->level = level + 1; |
1da177e4 LT |
1041 | } while ((this = this->tparent) != NULL); |
1042 | } | |
1043 | ||
1044 | static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | |
1045 | { | |
1046 | struct cbq_class *cl; | |
d77fea2e | 1047 | unsigned int h; |
1da177e4 LT |
1048 | |
1049 | if (q->quanta[prio] == 0) | |
1050 | return; | |
1051 | ||
d77fea2e | 1052 | for (h = 0; h < q->clhash.hashsize; h++) { |
b67bfe0d | 1053 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
1da177e4 | 1054 | /* BUGGGG... Beware! This expression suffer of |
cc7ec456 | 1055 | * arithmetic overflows! |
1da177e4 LT |
1056 | */ |
1057 | if (cl->priority == prio) { | |
1058 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ | |
1059 | q->quanta[prio]; | |
1060 | } | |
833fa743 YY |
1061 | if (cl->quantum <= 0 || |
1062 | cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { | |
cc7ec456 ED |
1063 | pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n", |
1064 | cl->common.classid, cl->quantum); | |
5ce2d488 | 1065 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1da177e4 LT |
1066 | } |
1067 | } | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | static void cbq_sync_defmap(struct cbq_class *cl) | |
1072 | { | |
1073 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
1074 | struct cbq_class *split = cl->split; | |
cc7ec456 | 1075 | unsigned int h; |
1da177e4 LT |
1076 | int i; |
1077 | ||
1078 | if (split == NULL) | |
1079 | return; | |
1080 | ||
cc7ec456 ED |
1081 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
1082 | if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) | |
1da177e4 LT |
1083 | split->defaults[i] = NULL; |
1084 | } | |
1085 | ||
cc7ec456 | 1086 | for (i = 0; i <= TC_PRIO_MAX; i++) { |
1da177e4 LT |
1087 | int level = split->level; |
1088 | ||
1089 | if (split->defaults[i]) | |
1090 | continue; | |
1091 | ||
d77fea2e | 1092 | for (h = 0; h < q->clhash.hashsize; h++) { |
1da177e4 LT |
1093 | struct cbq_class *c; |
1094 | ||
b67bfe0d | 1095 | hlist_for_each_entry(c, &q->clhash.hash[h], |
d77fea2e | 1096 | common.hnode) { |
1da177e4 | 1097 | if (c->split == split && c->level < level && |
cc7ec456 | 1098 | c->defmap & (1<<i)) { |
1da177e4 LT |
1099 | split->defaults[i] = c; |
1100 | level = c->level; | |
1101 | } | |
1102 | } | |
1103 | } | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) | |
1108 | { | |
1109 | struct cbq_class *split = NULL; | |
1110 | ||
1111 | if (splitid == 0) { | |
cc7ec456 ED |
1112 | split = cl->split; |
1113 | if (!split) | |
1da177e4 | 1114 | return; |
d77fea2e | 1115 | splitid = split->common.classid; |
1da177e4 LT |
1116 | } |
1117 | ||
d77fea2e | 1118 | if (split == NULL || split->common.classid != splitid) { |
1da177e4 | 1119 | for (split = cl->tparent; split; split = split->tparent) |
d77fea2e | 1120 | if (split->common.classid == splitid) |
1da177e4 LT |
1121 | break; |
1122 | } | |
1123 | ||
1124 | if (split == NULL) | |
1125 | return; | |
1126 | ||
1127 | if (cl->split != split) { | |
1128 | cl->defmap = 0; | |
1129 | cbq_sync_defmap(cl); | |
1130 | cl->split = split; | |
cc7ec456 | 1131 | cl->defmap = def & mask; |
1da177e4 | 1132 | } else |
cc7ec456 | 1133 | cl->defmap = (cl->defmap & ~mask) | (def & mask); |
1da177e4 LT |
1134 | |
1135 | cbq_sync_defmap(cl); | |
1136 | } | |
1137 | ||
1138 | static void cbq_unlink_class(struct cbq_class *this) | |
1139 | { | |
1140 | struct cbq_class *cl, **clp; | |
1141 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); | |
1142 | ||
d77fea2e | 1143 | qdisc_class_hash_remove(&q->clhash, &this->common); |
1da177e4 LT |
1144 | |
1145 | if (this->tparent) { | |
cc7ec456 | 1146 | clp = &this->sibling; |
1da177e4 LT |
1147 | cl = *clp; |
1148 | do { | |
1149 | if (cl == this) { | |
1150 | *clp = cl->sibling; | |
1151 | break; | |
1152 | } | |
1153 | clp = &cl->sibling; | |
1154 | } while ((cl = *clp) != this->sibling); | |
1155 | ||
1156 | if (this->tparent->children == this) { | |
1157 | this->tparent->children = this->sibling; | |
1158 | if (this->sibling == this) | |
1159 | this->tparent->children = NULL; | |
1160 | } | |
1161 | } else { | |
547b792c | 1162 | WARN_ON(this->sibling != this); |
1da177e4 LT |
1163 | } |
1164 | } | |
1165 | ||
1166 | static void cbq_link_class(struct cbq_class *this) | |
1167 | { | |
1168 | struct cbq_sched_data *q = qdisc_priv(this->qdisc); | |
1da177e4 LT |
1169 | struct cbq_class *parent = this->tparent; |
1170 | ||
1171 | this->sibling = this; | |
d77fea2e | 1172 | qdisc_class_hash_insert(&q->clhash, &this->common); |
1da177e4 LT |
1173 | |
1174 | if (parent == NULL) | |
1175 | return; | |
1176 | ||
1177 | if (parent->children == NULL) { | |
1178 | parent->children = this; | |
1179 | } else { | |
1180 | this->sibling = parent->children->sibling; | |
1181 | parent->children->sibling = this; | |
1182 | } | |
1183 | } | |
1184 | ||
cc7ec456 | 1185 | static unsigned int cbq_drop(struct Qdisc *sch) |
1da177e4 LT |
1186 | { |
1187 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1188 | struct cbq_class *cl, *cl_head; | |
1189 | int prio; | |
1190 | unsigned int len; | |
1191 | ||
1192 | for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { | |
cc7ec456 ED |
1193 | cl_head = q->active[prio]; |
1194 | if (!cl_head) | |
1da177e4 LT |
1195 | continue; |
1196 | ||
1197 | cl = cl_head; | |
1198 | do { | |
1199 | if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) { | |
1200 | sch->q.qlen--; | |
a37ef2e3 JP |
1201 | if (!cl->q->q.qlen) |
1202 | cbq_deactivate_class(cl); | |
1da177e4 LT |
1203 | return len; |
1204 | } | |
1205 | } while ((cl = cl->next_alive) != cl_head); | |
1206 | } | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | static void | |
cc7ec456 | 1211 | cbq_reset(struct Qdisc *sch) |
1da177e4 LT |
1212 | { |
1213 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1214 | struct cbq_class *cl; | |
1215 | int prio; | |
cc7ec456 | 1216 | unsigned int h; |
1da177e4 LT |
1217 | |
1218 | q->activemask = 0; | |
1219 | q->pmask = 0; | |
1220 | q->tx_class = NULL; | |
1221 | q->tx_borrowed = NULL; | |
88a99354 | 1222 | qdisc_watchdog_cancel(&q->watchdog); |
2fbd3da3 | 1223 | hrtimer_cancel(&q->delay_timer); |
1da177e4 | 1224 | q->toplevel = TC_CBQ_MAXLEVEL; |
3bebcda2 | 1225 | q->now = psched_get_time(); |
1da177e4 LT |
1226 | q->now_rt = q->now; |
1227 | ||
1228 | for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) | |
1229 | q->active[prio] = NULL; | |
1230 | ||
d77fea2e | 1231 | for (h = 0; h < q->clhash.hashsize; h++) { |
b67bfe0d | 1232 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
1da177e4 LT |
1233 | qdisc_reset(cl->q); |
1234 | ||
1235 | cl->next_alive = NULL; | |
a084980d | 1236 | cl->undertime = PSCHED_PASTPERFECT; |
1da177e4 LT |
1237 | cl->avgidle = cl->maxidle; |
1238 | cl->deficit = cl->quantum; | |
1239 | cl->cpriority = cl->priority; | |
1240 | } | |
1241 | } | |
1242 | sch->q.qlen = 0; | |
1243 | } | |
1244 | ||
1245 | ||
1246 | static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) | |
1247 | { | |
cc7ec456 ED |
1248 | if (lss->change & TCF_CBQ_LSS_FLAGS) { |
1249 | cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; | |
1250 | cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; | |
1da177e4 | 1251 | } |
cc7ec456 | 1252 | if (lss->change & TCF_CBQ_LSS_EWMA) |
1da177e4 | 1253 | cl->ewma_log = lss->ewma_log; |
cc7ec456 | 1254 | if (lss->change & TCF_CBQ_LSS_AVPKT) |
1da177e4 | 1255 | cl->avpkt = lss->avpkt; |
cc7ec456 | 1256 | if (lss->change & TCF_CBQ_LSS_MINIDLE) |
1da177e4 | 1257 | cl->minidle = -(long)lss->minidle; |
cc7ec456 | 1258 | if (lss->change & TCF_CBQ_LSS_MAXIDLE) { |
1da177e4 LT |
1259 | cl->maxidle = lss->maxidle; |
1260 | cl->avgidle = lss->maxidle; | |
1261 | } | |
cc7ec456 | 1262 | if (lss->change & TCF_CBQ_LSS_OFFTIME) |
1da177e4 LT |
1263 | cl->offtime = lss->offtime; |
1264 | return 0; | |
1265 | } | |
1266 | ||
1267 | static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) | |
1268 | { | |
1269 | q->nclasses[cl->priority]--; | |
1270 | q->quanta[cl->priority] -= cl->weight; | |
1271 | cbq_normalize_quanta(q, cl->priority); | |
1272 | } | |
1273 | ||
1274 | static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) | |
1275 | { | |
1276 | q->nclasses[cl->priority]++; | |
1277 | q->quanta[cl->priority] += cl->weight; | |
1278 | cbq_normalize_quanta(q, cl->priority); | |
1279 | } | |
1280 | ||
1281 | static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) | |
1282 | { | |
1283 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); | |
1284 | ||
1285 | if (wrr->allot) | |
1286 | cl->allot = wrr->allot; | |
1287 | if (wrr->weight) | |
1288 | cl->weight = wrr->weight; | |
1289 | if (wrr->priority) { | |
cc7ec456 | 1290 | cl->priority = wrr->priority - 1; |
1da177e4 LT |
1291 | cl->cpriority = cl->priority; |
1292 | if (cl->priority >= cl->priority2) | |
cc7ec456 | 1293 | cl->priority2 = TC_CBQ_MAXPRIO - 1; |
1da177e4 LT |
1294 | } |
1295 | ||
1296 | cbq_addprio(q, cl); | |
1297 | return 0; | |
1298 | } | |
1299 | ||
1300 | static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) | |
1301 | { | |
1302 | switch (ovl->strategy) { | |
1303 | case TC_CBQ_OVL_CLASSIC: | |
1304 | cl->overlimit = cbq_ovl_classic; | |
1305 | break; | |
1306 | case TC_CBQ_OVL_DELAY: | |
1307 | cl->overlimit = cbq_ovl_delay; | |
1308 | break; | |
1309 | case TC_CBQ_OVL_LOWPRIO: | |
cc7ec456 ED |
1310 | if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO || |
1311 | ovl->priority2 - 1 <= cl->priority) | |
1da177e4 | 1312 | return -EINVAL; |
cc7ec456 | 1313 | cl->priority2 = ovl->priority2 - 1; |
1da177e4 LT |
1314 | cl->overlimit = cbq_ovl_lowprio; |
1315 | break; | |
1316 | case TC_CBQ_OVL_DROP: | |
1317 | cl->overlimit = cbq_ovl_drop; | |
1318 | break; | |
1319 | case TC_CBQ_OVL_RCLASSIC: | |
1320 | cl->overlimit = cbq_ovl_rclassic; | |
1321 | break; | |
1322 | default: | |
1323 | return -EINVAL; | |
1324 | } | |
1a13cb63 | 1325 | cl->penalty = ovl->penalty; |
1da177e4 LT |
1326 | return 0; |
1327 | } | |
1328 | ||
c3bc7cff | 1329 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
1330 | static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) |
1331 | { | |
1332 | cl->police = p->police; | |
1333 | ||
1334 | if (cl->q->handle) { | |
1335 | if (p->police == TC_POLICE_RECLASSIFY) | |
1336 | cl->q->reshape_fail = cbq_reshape_fail; | |
1337 | else | |
1338 | cl->q->reshape_fail = NULL; | |
1339 | } | |
1340 | return 0; | |
1341 | } | |
1342 | #endif | |
1343 | ||
1344 | static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) | |
1345 | { | |
1346 | cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); | |
1347 | return 0; | |
1348 | } | |
1349 | ||
27a3421e PM |
1350 | static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = { |
1351 | [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) }, | |
1352 | [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) }, | |
1353 | [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) }, | |
1354 | [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) }, | |
1355 | [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) }, | |
1356 | [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | |
1357 | [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) }, | |
1358 | }; | |
1359 | ||
1e90474c | 1360 | static int cbq_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4 LT |
1361 | { |
1362 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1e90474c | 1363 | struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1da177e4 | 1364 | struct tc_ratespec *r; |
cee63723 PM |
1365 | int err; |
1366 | ||
27a3421e | 1367 | err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); |
cee63723 PM |
1368 | if (err < 0) |
1369 | return err; | |
1da177e4 | 1370 | |
27a3421e | 1371 | if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL) |
1da177e4 LT |
1372 | return -EINVAL; |
1373 | ||
1e90474c | 1374 | r = nla_data(tb[TCA_CBQ_RATE]); |
1da177e4 | 1375 | |
1e90474c | 1376 | if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) |
1da177e4 LT |
1377 | return -EINVAL; |
1378 | ||
d77fea2e PM |
1379 | err = qdisc_class_hash_init(&q->clhash); |
1380 | if (err < 0) | |
1381 | goto put_rtab; | |
1382 | ||
1da177e4 LT |
1383 | q->link.refcnt = 1; |
1384 | q->link.sibling = &q->link; | |
d77fea2e | 1385 | q->link.common.classid = sch->handle; |
1da177e4 | 1386 | q->link.qdisc = sch; |
3511c913 CG |
1387 | q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, |
1388 | sch->handle); | |
1389 | if (!q->link.q) | |
1da177e4 LT |
1390 | q->link.q = &noop_qdisc; |
1391 | ||
cc7ec456 ED |
1392 | q->link.priority = TC_CBQ_MAXPRIO - 1; |
1393 | q->link.priority2 = TC_CBQ_MAXPRIO - 1; | |
1394 | q->link.cpriority = TC_CBQ_MAXPRIO - 1; | |
1da177e4 LT |
1395 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; |
1396 | q->link.overlimit = cbq_ovl_classic; | |
5ce2d488 | 1397 | q->link.allot = psched_mtu(qdisc_dev(sch)); |
1da177e4 LT |
1398 | q->link.quantum = q->link.allot; |
1399 | q->link.weight = q->link.R_tab->rate.rate; | |
1400 | ||
1401 | q->link.ewma_log = TC_CBQ_DEF_EWMA; | |
1402 | q->link.avpkt = q->link.allot/2; | |
1403 | q->link.minidle = -0x7FFFFFFF; | |
1da177e4 | 1404 | |
88a99354 | 1405 | qdisc_watchdog_init(&q->watchdog, sch); |
2fbd3da3 | 1406 | hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1da177e4 LT |
1407 | q->delay_timer.function = cbq_undelay; |
1408 | q->toplevel = TC_CBQ_MAXLEVEL; | |
3bebcda2 | 1409 | q->now = psched_get_time(); |
1da177e4 LT |
1410 | q->now_rt = q->now; |
1411 | ||
1412 | cbq_link_class(&q->link); | |
1413 | ||
1e90474c PM |
1414 | if (tb[TCA_CBQ_LSSOPT]) |
1415 | cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); | |
1da177e4 LT |
1416 | |
1417 | cbq_addprio(q, &q->link); | |
1418 | return 0; | |
d77fea2e PM |
1419 | |
1420 | put_rtab: | |
1421 | qdisc_put_rtab(q->link.R_tab); | |
1422 | return err; | |
1da177e4 LT |
1423 | } |
1424 | ||
cc7ec456 | 1425 | static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4 | 1426 | { |
27a884dc | 1427 | unsigned char *b = skb_tail_pointer(skb); |
1da177e4 | 1428 | |
1b34ec43 DM |
1429 | if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) |
1430 | goto nla_put_failure; | |
1da177e4 LT |
1431 | return skb->len; |
1432 | ||
1e90474c | 1433 | nla_put_failure: |
dc5fc579 | 1434 | nlmsg_trim(skb, b); |
1da177e4 LT |
1435 | return -1; |
1436 | } | |
1437 | ||
cc7ec456 | 1438 | static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4 | 1439 | { |
27a884dc | 1440 | unsigned char *b = skb_tail_pointer(skb); |
1da177e4 LT |
1441 | struct tc_cbq_lssopt opt; |
1442 | ||
1443 | opt.flags = 0; | |
1444 | if (cl->borrow == NULL) | |
1445 | opt.flags |= TCF_CBQ_LSS_BOUNDED; | |
1446 | if (cl->share == NULL) | |
1447 | opt.flags |= TCF_CBQ_LSS_ISOLATED; | |
1448 | opt.ewma_log = cl->ewma_log; | |
1449 | opt.level = cl->level; | |
1450 | opt.avpkt = cl->avpkt; | |
1451 | opt.maxidle = cl->maxidle; | |
1452 | opt.minidle = (u32)(-cl->minidle); | |
1453 | opt.offtime = cl->offtime; | |
1454 | opt.change = ~0; | |
1b34ec43 DM |
1455 | if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt)) |
1456 | goto nla_put_failure; | |
1da177e4 LT |
1457 | return skb->len; |
1458 | ||
1e90474c | 1459 | nla_put_failure: |
dc5fc579 | 1460 | nlmsg_trim(skb, b); |
1da177e4 LT |
1461 | return -1; |
1462 | } | |
1463 | ||
cc7ec456 | 1464 | static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4 | 1465 | { |
27a884dc | 1466 | unsigned char *b = skb_tail_pointer(skb); |
1da177e4 LT |
1467 | struct tc_cbq_wrropt opt; |
1468 | ||
a0db856a | 1469 | memset(&opt, 0, sizeof(opt)); |
1da177e4 LT |
1470 | opt.flags = 0; |
1471 | opt.allot = cl->allot; | |
cc7ec456 ED |
1472 | opt.priority = cl->priority + 1; |
1473 | opt.cpriority = cl->cpriority + 1; | |
1da177e4 | 1474 | opt.weight = cl->weight; |
1b34ec43 DM |
1475 | if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt)) |
1476 | goto nla_put_failure; | |
1da177e4 LT |
1477 | return skb->len; |
1478 | ||
1e90474c | 1479 | nla_put_failure: |
dc5fc579 | 1480 | nlmsg_trim(skb, b); |
1da177e4 LT |
1481 | return -1; |
1482 | } | |
1483 | ||
cc7ec456 | 1484 | static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4 | 1485 | { |
27a884dc | 1486 | unsigned char *b = skb_tail_pointer(skb); |
1da177e4 LT |
1487 | struct tc_cbq_ovl opt; |
1488 | ||
1489 | opt.strategy = cl->ovl_strategy; | |
cc7ec456 | 1490 | opt.priority2 = cl->priority2 + 1; |
8a47077a | 1491 | opt.pad = 0; |
1a13cb63 | 1492 | opt.penalty = cl->penalty; |
1b34ec43 DM |
1493 | if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt)) |
1494 | goto nla_put_failure; | |
1da177e4 LT |
1495 | return skb->len; |
1496 | ||
1e90474c | 1497 | nla_put_failure: |
dc5fc579 | 1498 | nlmsg_trim(skb, b); |
1da177e4 LT |
1499 | return -1; |
1500 | } | |
1501 | ||
cc7ec456 | 1502 | static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4 | 1503 | { |
27a884dc | 1504 | unsigned char *b = skb_tail_pointer(skb); |
1da177e4 LT |
1505 | struct tc_cbq_fopt opt; |
1506 | ||
1507 | if (cl->split || cl->defmap) { | |
d77fea2e | 1508 | opt.split = cl->split ? cl->split->common.classid : 0; |
1da177e4 LT |
1509 | opt.defmap = cl->defmap; |
1510 | opt.defchange = ~0; | |
1b34ec43 DM |
1511 | if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt)) |
1512 | goto nla_put_failure; | |
1da177e4 LT |
1513 | } |
1514 | return skb->len; | |
1515 | ||
1e90474c | 1516 | nla_put_failure: |
dc5fc579 | 1517 | nlmsg_trim(skb, b); |
1da177e4 LT |
1518 | return -1; |
1519 | } | |
1520 | ||
c3bc7cff | 1521 | #ifdef CONFIG_NET_CLS_ACT |
cc7ec456 | 1522 | static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4 | 1523 | { |
27a884dc | 1524 | unsigned char *b = skb_tail_pointer(skb); |
1da177e4 LT |
1525 | struct tc_cbq_police opt; |
1526 | ||
1527 | if (cl->police) { | |
1528 | opt.police = cl->police; | |
9ef1d4c7 PM |
1529 | opt.__res1 = 0; |
1530 | opt.__res2 = 0; | |
1b34ec43 DM |
1531 | if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt)) |
1532 | goto nla_put_failure; | |
1da177e4 LT |
1533 | } |
1534 | return skb->len; | |
1535 | ||
1e90474c | 1536 | nla_put_failure: |
dc5fc579 | 1537 | nlmsg_trim(skb, b); |
1da177e4 LT |
1538 | return -1; |
1539 | } | |
1540 | #endif | |
1541 | ||
1542 | static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) | |
1543 | { | |
1544 | if (cbq_dump_lss(skb, cl) < 0 || | |
1545 | cbq_dump_rate(skb, cl) < 0 || | |
1546 | cbq_dump_wrr(skb, cl) < 0 || | |
1547 | cbq_dump_ovl(skb, cl) < 0 || | |
c3bc7cff | 1548 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
1549 | cbq_dump_police(skb, cl) < 0 || |
1550 | #endif | |
1551 | cbq_dump_fopt(skb, cl) < 0) | |
1552 | return -1; | |
1553 | return 0; | |
1554 | } | |
1555 | ||
1556 | static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) | |
1557 | { | |
1558 | struct cbq_sched_data *q = qdisc_priv(sch); | |
4b3550ef | 1559 | struct nlattr *nest; |
1da177e4 | 1560 | |
4b3550ef PM |
1561 | nest = nla_nest_start(skb, TCA_OPTIONS); |
1562 | if (nest == NULL) | |
1563 | goto nla_put_failure; | |
1da177e4 | 1564 | if (cbq_dump_attr(skb, &q->link) < 0) |
1e90474c | 1565 | goto nla_put_failure; |
4b3550ef | 1566 | nla_nest_end(skb, nest); |
1da177e4 LT |
1567 | return skb->len; |
1568 | ||
1e90474c | 1569 | nla_put_failure: |
4b3550ef | 1570 | nla_nest_cancel(skb, nest); |
1da177e4 LT |
1571 | return -1; |
1572 | } | |
1573 | ||
1574 | static int | |
1575 | cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
1576 | { | |
1577 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1578 | ||
1579 | q->link.xstats.avgidle = q->link.avgidle; | |
1580 | return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); | |
1581 | } | |
1582 | ||
1583 | static int | |
1584 | cbq_dump_class(struct Qdisc *sch, unsigned long arg, | |
1585 | struct sk_buff *skb, struct tcmsg *tcm) | |
1586 | { | |
cc7ec456 | 1587 | struct cbq_class *cl = (struct cbq_class *)arg; |
4b3550ef | 1588 | struct nlattr *nest; |
1da177e4 LT |
1589 | |
1590 | if (cl->tparent) | |
d77fea2e | 1591 | tcm->tcm_parent = cl->tparent->common.classid; |
1da177e4 LT |
1592 | else |
1593 | tcm->tcm_parent = TC_H_ROOT; | |
d77fea2e | 1594 | tcm->tcm_handle = cl->common.classid; |
1da177e4 LT |
1595 | tcm->tcm_info = cl->q->handle; |
1596 | ||
4b3550ef PM |
1597 | nest = nla_nest_start(skb, TCA_OPTIONS); |
1598 | if (nest == NULL) | |
1599 | goto nla_put_failure; | |
1da177e4 | 1600 | if (cbq_dump_attr(skb, cl) < 0) |
1e90474c | 1601 | goto nla_put_failure; |
4b3550ef | 1602 | nla_nest_end(skb, nest); |
1da177e4 LT |
1603 | return skb->len; |
1604 | ||
1e90474c | 1605 | nla_put_failure: |
4b3550ef | 1606 | nla_nest_cancel(skb, nest); |
1da177e4 LT |
1607 | return -1; |
1608 | } | |
1609 | ||
1610 | static int | |
1611 | cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |
1612 | struct gnet_dump *d) | |
1613 | { | |
1614 | struct cbq_sched_data *q = qdisc_priv(sch); | |
cc7ec456 | 1615 | struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4 LT |
1616 | |
1617 | cl->qstats.qlen = cl->q->q.qlen; | |
1618 | cl->xstats.avgidle = cl->avgidle; | |
1619 | cl->xstats.undertime = 0; | |
1620 | ||
a084980d | 1621 | if (cl->undertime != PSCHED_PASTPERFECT) |
8edc0c31 | 1622 | cl->xstats.undertime = cl->undertime - q->now; |
1da177e4 LT |
1623 | |
1624 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | |
d250a5f9 | 1625 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1da177e4 LT |
1626 | gnet_stats_copy_queue(d, &cl->qstats) < 0) |
1627 | return -1; | |
1628 | ||
1629 | return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); | |
1630 | } | |
1631 | ||
1632 | static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
1633 | struct Qdisc **old) | |
1634 | { | |
cc7ec456 | 1635 | struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4 | 1636 | |
5b9a9ccf | 1637 | if (new == NULL) { |
3511c913 | 1638 | new = qdisc_create_dflt(sch->dev_queue, |
5b9a9ccf PM |
1639 | &pfifo_qdisc_ops, cl->common.classid); |
1640 | if (new == NULL) | |
1641 | return -ENOBUFS; | |
1642 | } else { | |
c3bc7cff | 1643 | #ifdef CONFIG_NET_CLS_ACT |
5b9a9ccf PM |
1644 | if (cl->police == TC_POLICE_RECLASSIFY) |
1645 | new->reshape_fail = cbq_reshape_fail; | |
1da177e4 | 1646 | #endif |
1da177e4 | 1647 | } |
5b9a9ccf PM |
1648 | sch_tree_lock(sch); |
1649 | *old = cl->q; | |
1650 | cl->q = new; | |
1651 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | |
1652 | qdisc_reset(*old); | |
1653 | sch_tree_unlock(sch); | |
1654 | ||
1655 | return 0; | |
1da177e4 LT |
1656 | } |
1657 | ||
cc7ec456 | 1658 | static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) |
1da177e4 | 1659 | { |
cc7ec456 | 1660 | struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4 | 1661 | |
5b9a9ccf | 1662 | return cl->q; |
1da177e4 LT |
1663 | } |
1664 | ||
a37ef2e3 JP |
1665 | static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
1666 | { | |
1667 | struct cbq_class *cl = (struct cbq_class *)arg; | |
1668 | ||
1669 | if (cl->q->q.qlen == 0) | |
1670 | cbq_deactivate_class(cl); | |
1671 | } | |
1672 | ||
1da177e4 LT |
1673 | static unsigned long cbq_get(struct Qdisc *sch, u32 classid) |
1674 | { | |
1675 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1676 | struct cbq_class *cl = cbq_class_lookup(q, classid); | |
1677 | ||
1678 | if (cl) { | |
1679 | cl->refcnt++; | |
1680 | return (unsigned long)cl; | |
1681 | } | |
1682 | return 0; | |
1683 | } | |
1684 | ||
1da177e4 LT |
1685 | static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) |
1686 | { | |
1687 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1688 | ||
547b792c | 1689 | WARN_ON(cl->filters); |
1da177e4 | 1690 | |
ff31ab56 | 1691 | tcf_destroy_chain(&cl->filter_list); |
1da177e4 LT |
1692 | qdisc_destroy(cl->q); |
1693 | qdisc_put_rtab(cl->R_tab); | |
1da177e4 | 1694 | gen_kill_estimator(&cl->bstats, &cl->rate_est); |
1da177e4 LT |
1695 | if (cl != &q->link) |
1696 | kfree(cl); | |
1697 | } | |
1698 | ||
cc7ec456 | 1699 | static void cbq_destroy(struct Qdisc *sch) |
1da177e4 LT |
1700 | { |
1701 | struct cbq_sched_data *q = qdisc_priv(sch); | |
b67bfe0d | 1702 | struct hlist_node *next; |
1da177e4 | 1703 | struct cbq_class *cl; |
cc7ec456 | 1704 | unsigned int h; |
1da177e4 | 1705 | |
c3bc7cff | 1706 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
1707 | q->rx_class = NULL; |
1708 | #endif | |
1709 | /* | |
1710 | * Filters must be destroyed first because we don't destroy the | |
1711 | * classes from root to leafs which means that filters can still | |
1712 | * be bound to classes which have been destroyed already. --TGR '04 | |
1713 | */ | |
d77fea2e | 1714 | for (h = 0; h < q->clhash.hashsize; h++) { |
b67bfe0d | 1715 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) |
ff31ab56 | 1716 | tcf_destroy_chain(&cl->filter_list); |
b00b4bf9 | 1717 | } |
d77fea2e | 1718 | for (h = 0; h < q->clhash.hashsize; h++) { |
b67bfe0d | 1719 | hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], |
d77fea2e | 1720 | common.hnode) |
1da177e4 | 1721 | cbq_destroy_class(sch, cl); |
1da177e4 | 1722 | } |
d77fea2e | 1723 | qdisc_class_hash_destroy(&q->clhash); |
1da177e4 LT |
1724 | } |
1725 | ||
1726 | static void cbq_put(struct Qdisc *sch, unsigned long arg) | |
1727 | { | |
cc7ec456 | 1728 | struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4 LT |
1729 | |
1730 | if (--cl->refcnt == 0) { | |
c3bc7cff | 1731 | #ifdef CONFIG_NET_CLS_ACT |
102396ae | 1732 | spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); |
1da177e4 LT |
1733 | struct cbq_sched_data *q = qdisc_priv(sch); |
1734 | ||
7698b4fc | 1735 | spin_lock_bh(root_lock); |
1da177e4 LT |
1736 | if (q->rx_class == cl) |
1737 | q->rx_class = NULL; | |
7698b4fc | 1738 | spin_unlock_bh(root_lock); |
1da177e4 LT |
1739 | #endif |
1740 | ||
1741 | cbq_destroy_class(sch, cl); | |
1742 | } | |
1743 | } | |
1744 | ||
1745 | static int | |
1e90474c | 1746 | cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, |
1da177e4 LT |
1747 | unsigned long *arg) |
1748 | { | |
1749 | int err; | |
1750 | struct cbq_sched_data *q = qdisc_priv(sch); | |
cc7ec456 | 1751 | struct cbq_class *cl = (struct cbq_class *)*arg; |
1e90474c PM |
1752 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1753 | struct nlattr *tb[TCA_CBQ_MAX + 1]; | |
1da177e4 LT |
1754 | struct cbq_class *parent; |
1755 | struct qdisc_rate_table *rtab = NULL; | |
1756 | ||
cee63723 | 1757 | if (opt == NULL) |
1da177e4 LT |
1758 | return -EINVAL; |
1759 | ||
27a3421e | 1760 | err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); |
cee63723 PM |
1761 | if (err < 0) |
1762 | return err; | |
1763 | ||
1da177e4 LT |
1764 | if (cl) { |
1765 | /* Check parent */ | |
1766 | if (parentid) { | |
d77fea2e PM |
1767 | if (cl->tparent && |
1768 | cl->tparent->common.classid != parentid) | |
1da177e4 LT |
1769 | return -EINVAL; |
1770 | if (!cl->tparent && parentid != TC_H_ROOT) | |
1771 | return -EINVAL; | |
1772 | } | |
1773 | ||
1e90474c | 1774 | if (tb[TCA_CBQ_RATE]) { |
71bcb09a SH |
1775 | rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), |
1776 | tb[TCA_CBQ_RTAB]); | |
1da177e4 LT |
1777 | if (rtab == NULL) |
1778 | return -EINVAL; | |
1779 | } | |
1780 | ||
71bcb09a SH |
1781 | if (tca[TCA_RATE]) { |
1782 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, | |
1783 | qdisc_root_sleeping_lock(sch), | |
1784 | tca[TCA_RATE]); | |
1785 | if (err) { | |
1786 | if (rtab) | |
1787 | qdisc_put_rtab(rtab); | |
1788 | return err; | |
1789 | } | |
1790 | } | |
1791 | ||
1da177e4 LT |
1792 | /* Change class parameters */ |
1793 | sch_tree_lock(sch); | |
1794 | ||
1795 | if (cl->next_alive != NULL) | |
1796 | cbq_deactivate_class(cl); | |
1797 | ||
1798 | if (rtab) { | |
b94c8afc PM |
1799 | qdisc_put_rtab(cl->R_tab); |
1800 | cl->R_tab = rtab; | |
1da177e4 LT |
1801 | } |
1802 | ||
1e90474c PM |
1803 | if (tb[TCA_CBQ_LSSOPT]) |
1804 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); | |
1da177e4 | 1805 | |
1e90474c | 1806 | if (tb[TCA_CBQ_WRROPT]) { |
1da177e4 | 1807 | cbq_rmprio(q, cl); |
1e90474c | 1808 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1da177e4 LT |
1809 | } |
1810 | ||
1e90474c PM |
1811 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
1812 | cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); | |
1da177e4 | 1813 | |
c3bc7cff | 1814 | #ifdef CONFIG_NET_CLS_ACT |
1e90474c PM |
1815 | if (tb[TCA_CBQ_POLICE]) |
1816 | cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); | |
1da177e4 LT |
1817 | #endif |
1818 | ||
1e90474c PM |
1819 | if (tb[TCA_CBQ_FOPT]) |
1820 | cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); | |
1da177e4 LT |
1821 | |
1822 | if (cl->q->q.qlen) | |
1823 | cbq_activate_class(cl); | |
1824 | ||
1825 | sch_tree_unlock(sch); | |
1826 | ||
1da177e4 LT |
1827 | return 0; |
1828 | } | |
1829 | ||
1830 | if (parentid == TC_H_ROOT) | |
1831 | return -EINVAL; | |
1832 | ||
1e90474c PM |
1833 | if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL || |
1834 | tb[TCA_CBQ_LSSOPT] == NULL) | |
1da177e4 LT |
1835 | return -EINVAL; |
1836 | ||
1e90474c | 1837 | rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); |
1da177e4 LT |
1838 | if (rtab == NULL) |
1839 | return -EINVAL; | |
1840 | ||
1841 | if (classid) { | |
1842 | err = -EINVAL; | |
cc7ec456 ED |
1843 | if (TC_H_MAJ(classid ^ sch->handle) || |
1844 | cbq_class_lookup(q, classid)) | |
1da177e4 LT |
1845 | goto failure; |
1846 | } else { | |
1847 | int i; | |
cc7ec456 | 1848 | classid = TC_H_MAKE(sch->handle, 0x8000); |
1da177e4 | 1849 | |
cc7ec456 | 1850 | for (i = 0; i < 0x8000; i++) { |
1da177e4 LT |
1851 | if (++q->hgenerator >= 0x8000) |
1852 | q->hgenerator = 1; | |
1853 | if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) | |
1854 | break; | |
1855 | } | |
1856 | err = -ENOSR; | |
1857 | if (i >= 0x8000) | |
1858 | goto failure; | |
1859 | classid = classid|q->hgenerator; | |
1860 | } | |
1861 | ||
1862 | parent = &q->link; | |
1863 | if (parentid) { | |
1864 | parent = cbq_class_lookup(q, parentid); | |
1865 | err = -EINVAL; | |
1866 | if (parent == NULL) | |
1867 | goto failure; | |
1868 | } | |
1869 | ||
1870 | err = -ENOBUFS; | |
0da974f4 | 1871 | cl = kzalloc(sizeof(*cl), GFP_KERNEL); |
1da177e4 LT |
1872 | if (cl == NULL) |
1873 | goto failure; | |
71bcb09a SH |
1874 | |
1875 | if (tca[TCA_RATE]) { | |
1876 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, | |
1877 | qdisc_root_sleeping_lock(sch), | |
1878 | tca[TCA_RATE]); | |
1879 | if (err) { | |
1880 | kfree(cl); | |
1881 | goto failure; | |
1882 | } | |
1883 | } | |
1884 | ||
1da177e4 LT |
1885 | cl->R_tab = rtab; |
1886 | rtab = NULL; | |
1887 | cl->refcnt = 1; | |
3511c913 CG |
1888 | cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); |
1889 | if (!cl->q) | |
1da177e4 | 1890 | cl->q = &noop_qdisc; |
d77fea2e | 1891 | cl->common.classid = classid; |
1da177e4 LT |
1892 | cl->tparent = parent; |
1893 | cl->qdisc = sch; | |
1894 | cl->allot = parent->allot; | |
1895 | cl->quantum = cl->allot; | |
1896 | cl->weight = cl->R_tab->rate.rate; | |
1da177e4 LT |
1897 | |
1898 | sch_tree_lock(sch); | |
1899 | cbq_link_class(cl); | |
1900 | cl->borrow = cl->tparent; | |
1901 | if (cl->tparent != &q->link) | |
1902 | cl->share = cl->tparent; | |
1903 | cbq_adjust_levels(parent); | |
1904 | cl->minidle = -0x7FFFFFFF; | |
1e90474c PM |
1905 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1906 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); | |
cc7ec456 | 1907 | if (cl->ewma_log == 0) |
1da177e4 | 1908 | cl->ewma_log = q->link.ewma_log; |
cc7ec456 | 1909 | if (cl->maxidle == 0) |
1da177e4 | 1910 | cl->maxidle = q->link.maxidle; |
cc7ec456 | 1911 | if (cl->avpkt == 0) |
1da177e4 LT |
1912 | cl->avpkt = q->link.avpkt; |
1913 | cl->overlimit = cbq_ovl_classic; | |
1e90474c PM |
1914 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
1915 | cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); | |
c3bc7cff | 1916 | #ifdef CONFIG_NET_CLS_ACT |
1e90474c PM |
1917 | if (tb[TCA_CBQ_POLICE]) |
1918 | cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); | |
1da177e4 | 1919 | #endif |
1e90474c PM |
1920 | if (tb[TCA_CBQ_FOPT]) |
1921 | cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); | |
1da177e4 LT |
1922 | sch_tree_unlock(sch); |
1923 | ||
d77fea2e PM |
1924 | qdisc_class_hash_grow(sch, &q->clhash); |
1925 | ||
1da177e4 LT |
1926 | *arg = (unsigned long)cl; |
1927 | return 0; | |
1928 | ||
1929 | failure: | |
1930 | qdisc_put_rtab(rtab); | |
1931 | return err; | |
1932 | } | |
1933 | ||
1934 | static int cbq_delete(struct Qdisc *sch, unsigned long arg) | |
1935 | { | |
1936 | struct cbq_sched_data *q = qdisc_priv(sch); | |
cc7ec456 | 1937 | struct cbq_class *cl = (struct cbq_class *)arg; |
a37ef2e3 | 1938 | unsigned int qlen; |
1da177e4 LT |
1939 | |
1940 | if (cl->filters || cl->children || cl == &q->link) | |
1941 | return -EBUSY; | |
1942 | ||
1943 | sch_tree_lock(sch); | |
1944 | ||
a37ef2e3 JP |
1945 | qlen = cl->q->q.qlen; |
1946 | qdisc_reset(cl->q); | |
1947 | qdisc_tree_decrease_qlen(cl->q, qlen); | |
1948 | ||
1da177e4 LT |
1949 | if (cl->next_alive) |
1950 | cbq_deactivate_class(cl); | |
1951 | ||
1952 | if (q->tx_borrowed == cl) | |
1953 | q->tx_borrowed = q->tx_class; | |
1954 | if (q->tx_class == cl) { | |
1955 | q->tx_class = NULL; | |
1956 | q->tx_borrowed = NULL; | |
1957 | } | |
c3bc7cff | 1958 | #ifdef CONFIG_NET_CLS_ACT |
1da177e4 LT |
1959 | if (q->rx_class == cl) |
1960 | q->rx_class = NULL; | |
1961 | #endif | |
1962 | ||
1963 | cbq_unlink_class(cl); | |
1964 | cbq_adjust_levels(cl->tparent); | |
1965 | cl->defmap = 0; | |
1966 | cbq_sync_defmap(cl); | |
1967 | ||
1968 | cbq_rmprio(q, cl); | |
1969 | sch_tree_unlock(sch); | |
1970 | ||
7cd0a638 JP |
1971 | BUG_ON(--cl->refcnt == 0); |
1972 | /* | |
1973 | * This shouldn't happen: we "hold" one cops->get() when called | |
1974 | * from tc_ctl_tclass; the destroy method is done from cops->put(). | |
1975 | */ | |
1da177e4 LT |
1976 | |
1977 | return 0; | |
1978 | } | |
1979 | ||
1980 | static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg) | |
1981 | { | |
1982 | struct cbq_sched_data *q = qdisc_priv(sch); | |
1983 | struct cbq_class *cl = (struct cbq_class *)arg; | |
1984 | ||
1985 | if (cl == NULL) | |
1986 | cl = &q->link; | |
1987 | ||
1988 | return &cl->filter_list; | |
1989 | } | |
1990 | ||
1991 | static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, | |
1992 | u32 classid) | |
1993 | { | |
1994 | struct cbq_sched_data *q = qdisc_priv(sch); | |
cc7ec456 | 1995 | struct cbq_class *p = (struct cbq_class *)parent; |
1da177e4 LT |
1996 | struct cbq_class *cl = cbq_class_lookup(q, classid); |
1997 | ||
1998 | if (cl) { | |
1999 | if (p && p->level <= cl->level) | |
2000 | return 0; | |
2001 | cl->filters++; | |
2002 | return (unsigned long)cl; | |
2003 | } | |
2004 | return 0; | |
2005 | } | |
2006 | ||
2007 | static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) | |
2008 | { | |
cc7ec456 | 2009 | struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4 LT |
2010 | |
2011 | cl->filters--; | |
2012 | } | |
2013 | ||
2014 | static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
2015 | { | |
2016 | struct cbq_sched_data *q = qdisc_priv(sch); | |
d77fea2e | 2017 | struct cbq_class *cl; |
cc7ec456 | 2018 | unsigned int h; |
1da177e4 LT |
2019 | |
2020 | if (arg->stop) | |
2021 | return; | |
2022 | ||
d77fea2e | 2023 | for (h = 0; h < q->clhash.hashsize; h++) { |
b67bfe0d | 2024 | hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { |
1da177e4 LT |
2025 | if (arg->count < arg->skip) { |
2026 | arg->count++; | |
2027 | continue; | |
2028 | } | |
2029 | if (arg->fn(sch, (unsigned long)cl, arg) < 0) { | |
2030 | arg->stop = 1; | |
2031 | return; | |
2032 | } | |
2033 | arg->count++; | |
2034 | } | |
2035 | } | |
2036 | } | |
2037 | ||
20fea08b | 2038 | static const struct Qdisc_class_ops cbq_class_ops = { |
1da177e4 LT |
2039 | .graft = cbq_graft, |
2040 | .leaf = cbq_leaf, | |
a37ef2e3 | 2041 | .qlen_notify = cbq_qlen_notify, |
1da177e4 LT |
2042 | .get = cbq_get, |
2043 | .put = cbq_put, | |
2044 | .change = cbq_change_class, | |
2045 | .delete = cbq_delete, | |
2046 | .walk = cbq_walk, | |
2047 | .tcf_chain = cbq_find_tcf, | |
2048 | .bind_tcf = cbq_bind_filter, | |
2049 | .unbind_tcf = cbq_unbind_filter, | |
2050 | .dump = cbq_dump_class, | |
2051 | .dump_stats = cbq_dump_class_stats, | |
2052 | }; | |
2053 | ||
20fea08b | 2054 | static struct Qdisc_ops cbq_qdisc_ops __read_mostly = { |
1da177e4 LT |
2055 | .next = NULL, |
2056 | .cl_ops = &cbq_class_ops, | |
2057 | .id = "cbq", | |
2058 | .priv_size = sizeof(struct cbq_sched_data), | |
2059 | .enqueue = cbq_enqueue, | |
2060 | .dequeue = cbq_dequeue, | |
77be155c | 2061 | .peek = qdisc_peek_dequeued, |
1da177e4 LT |
2062 | .drop = cbq_drop, |
2063 | .init = cbq_init, | |
2064 | .reset = cbq_reset, | |
2065 | .destroy = cbq_destroy, | |
2066 | .change = NULL, | |
2067 | .dump = cbq_dump, | |
2068 | .dump_stats = cbq_dump_stats, | |
2069 | .owner = THIS_MODULE, | |
2070 | }; | |
2071 | ||
2072 | static int __init cbq_module_init(void) | |
2073 | { | |
2074 | return register_qdisc(&cbq_qdisc_ops); | |
2075 | } | |
10297b99 | 2076 | static void __exit cbq_module_exit(void) |
1da177e4 LT |
2077 | { |
2078 | unregister_qdisc(&cbq_qdisc_ops); | |
2079 | } | |
2080 | module_init(cbq_module_init) | |
2081 | module_exit(cbq_module_exit) | |
2082 | MODULE_LICENSE("GPL"); |