]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * net/sched/sch_tbf.c Token Bucket Filter queue. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
10 | * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs - | |
11 | * original idea by Martin Devera | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/errno.h> | |
20 | #include <linux/skbuff.h> | |
21 | #include <net/netlink.h> | |
22 | #include <net/sch_generic.h> | |
23 | #include <net/pkt_sched.h> | |
24 | ||
25 | ||
26 | /* Simple Token Bucket Filter. | |
27 | ======================================= | |
28 | ||
29 | SOURCE. | |
30 | ------- | |
31 | ||
32 | None. | |
33 | ||
34 | Description. | |
35 | ------------ | |
36 | ||
37 | A data flow obeys TBF with rate R and depth B, if for any | |
38 | time interval t_i...t_f the number of transmitted bits | |
39 | does not exceed B + R*(t_f-t_i). | |
40 | ||
41 | Packetized version of this definition: | |
42 | The sequence of packets of sizes s_i served at moments t_i | |
43 | obeys TBF, if for any i<=k: | |
44 | ||
45 | s_i+....+s_k <= B + R*(t_k - t_i) | |
46 | ||
47 | Algorithm. | |
48 | ---------- | |
49 | ||
50 | Let N(t_i) be B/R initially and N(t) grow continuously with time as: | |
51 | ||
52 | N(t+delta) = min{B/R, N(t) + delta} | |
53 | ||
54 | If the first packet in queue has length S, it may be | |
55 | transmitted only at the time t_* when S/R <= N(t_*), | |
56 | and in this case N(t) jumps: | |
57 | ||
58 | N(t_* + 0) = N(t_* - 0) - S/R. | |
59 | ||
60 | ||
61 | ||
62 | Actually, QoS requires two TBF to be applied to a data stream. | |
63 | One of them controls steady state burst size, another | |
64 | one with rate P (peak rate) and depth M (equal to link MTU) | |
65 | limits bursts at a smaller time scale. | |
66 | ||
67 | It is easy to see that P>R, and B>M. If P is infinity, this double | |
68 | TBF is equivalent to a single one. | |
69 | ||
70 | When TBF works in reshaping mode, latency is estimated as: | |
71 | ||
72 | lat = max ((L-B)/R, (L-M)/P) | |
73 | ||
74 | ||
75 | NOTES. | |
76 | ------ | |
77 | ||
78 | If TBF throttles, it starts a watchdog timer, which will wake it up | |
79 | when it is ready to transmit. | |
80 | Note that the minimal timer resolution is 1/HZ. | |
81 | If no new packets arrive during this period, | |
82 | or if the device is not awaken by EOI for some previous packet, | |
83 | TBF can stop its activity for 1/HZ. | |
84 | ||
85 | ||
86 | This means, that with depth B, the maximal rate is | |
87 | ||
88 | R_crit = B*HZ | |
89 | ||
90 | F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes. | |
91 | ||
92 | Note that the peak rate TBF is much more tough: with MTU 1500 | |
93 | P_crit = 150Kbytes/sec. So, if you need greater peak | |
94 | rates, use alpha with HZ=1000 :-) | |
95 | ||
96 | With classful TBF, limit is just kept for backwards compatibility. | |
97 | It is passed to the default bfifo qdisc - if the inner qdisc is | |
98 | changed the limit is not effective anymore. | |
99 | */ | |
100 | ||
101 | struct tbf_sched_data { | |
102 | /* Parameters */ | |
103 | u32 limit; /* Maximal length of backlog: bytes */ | |
104 | u32 max_size; | |
105 | s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ | |
106 | s64 mtu; | |
107 | struct psched_ratecfg rate; | |
108 | struct psched_ratecfg peak; | |
109 | ||
110 | /* Variables */ | |
111 | s64 tokens; /* Current number of B tokens */ | |
112 | s64 ptokens; /* Current number of P tokens */ | |
113 | s64 t_c; /* Time check-point */ | |
114 | struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */ | |
115 | struct qdisc_watchdog watchdog; /* Watchdog timer */ | |
116 | }; | |
117 | ||
118 | ||
119 | /* Time to Length, convert time in ns to length in bytes | |
120 | * to determinate how many bytes can be sent in given time. | |
121 | */ | |
122 | static u64 psched_ns_t2l(const struct psched_ratecfg *r, | |
123 | u64 time_in_ns) | |
124 | { | |
125 | /* The formula is : | |
126 | * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC | |
127 | */ | |
128 | u64 len = time_in_ns * r->rate_bytes_ps; | |
129 | ||
130 | do_div(len, NSEC_PER_SEC); | |
131 | ||
132 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) { | |
133 | do_div(len, 53); | |
134 | len = len * 48; | |
135 | } | |
136 | ||
137 | if (len > r->overhead) | |
138 | len -= r->overhead; | |
139 | else | |
140 | len = 0; | |
141 | ||
142 | return len; | |
143 | } | |
144 | ||
145 | /* GSO packet is too big, segment it so that tbf can transmit | |
146 | * each segment in time | |
147 | */ | |
148 | static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, | |
149 | struct sk_buff **to_free) | |
150 | { | |
151 | struct tbf_sched_data *q = qdisc_priv(sch); | |
152 | struct sk_buff *segs, *nskb; | |
153 | netdev_features_t features = netif_skb_features(skb); | |
154 | unsigned int len = 0, prev_len = qdisc_pkt_len(skb); | |
155 | int ret, nb; | |
156 | ||
157 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | |
158 | ||
159 | if (IS_ERR_OR_NULL(segs)) | |
160 | return qdisc_drop(skb, sch, to_free); | |
161 | ||
162 | nb = 0; | |
163 | while (segs) { | |
164 | nskb = segs->next; | |
165 | segs->next = NULL; | |
166 | qdisc_skb_cb(segs)->pkt_len = segs->len; | |
167 | len += segs->len; | |
168 | ret = qdisc_enqueue(segs, q->qdisc, to_free); | |
169 | if (ret != NET_XMIT_SUCCESS) { | |
170 | if (net_xmit_drop_count(ret)) | |
171 | qdisc_qstats_drop(sch); | |
172 | } else { | |
173 | nb++; | |
174 | } | |
175 | segs = nskb; | |
176 | } | |
177 | sch->q.qlen += nb; | |
178 | if (nb > 1) | |
179 | qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); | |
180 | consume_skb(skb); | |
181 | return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; | |
182 | } | |
183 | ||
184 | static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |
185 | struct sk_buff **to_free) | |
186 | { | |
187 | struct tbf_sched_data *q = qdisc_priv(sch); | |
188 | int ret; | |
189 | ||
190 | if (qdisc_pkt_len(skb) > q->max_size) { | |
191 | if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) | |
192 | return tbf_segment(skb, sch, to_free); | |
193 | return qdisc_drop(skb, sch, to_free); | |
194 | } | |
195 | ret = qdisc_enqueue(skb, q->qdisc, to_free); | |
196 | if (ret != NET_XMIT_SUCCESS) { | |
197 | if (net_xmit_drop_count(ret)) | |
198 | qdisc_qstats_drop(sch); | |
199 | return ret; | |
200 | } | |
201 | ||
202 | qdisc_qstats_backlog_inc(sch, skb); | |
203 | sch->q.qlen++; | |
204 | return NET_XMIT_SUCCESS; | |
205 | } | |
206 | ||
207 | static bool tbf_peak_present(const struct tbf_sched_data *q) | |
208 | { | |
209 | return q->peak.rate_bytes_ps; | |
210 | } | |
211 | ||
212 | static struct sk_buff *tbf_dequeue(struct Qdisc *sch) | |
213 | { | |
214 | struct tbf_sched_data *q = qdisc_priv(sch); | |
215 | struct sk_buff *skb; | |
216 | ||
217 | skb = q->qdisc->ops->peek(q->qdisc); | |
218 | ||
219 | if (skb) { | |
220 | s64 now; | |
221 | s64 toks; | |
222 | s64 ptoks = 0; | |
223 | unsigned int len = qdisc_pkt_len(skb); | |
224 | ||
225 | now = ktime_get_ns(); | |
226 | toks = min_t(s64, now - q->t_c, q->buffer); | |
227 | ||
228 | if (tbf_peak_present(q)) { | |
229 | ptoks = toks + q->ptokens; | |
230 | if (ptoks > q->mtu) | |
231 | ptoks = q->mtu; | |
232 | ptoks -= (s64) psched_l2t_ns(&q->peak, len); | |
233 | } | |
234 | toks += q->tokens; | |
235 | if (toks > q->buffer) | |
236 | toks = q->buffer; | |
237 | toks -= (s64) psched_l2t_ns(&q->rate, len); | |
238 | ||
239 | if ((toks|ptoks) >= 0) { | |
240 | skb = qdisc_dequeue_peeked(q->qdisc); | |
241 | if (unlikely(!skb)) | |
242 | return NULL; | |
243 | ||
244 | q->t_c = now; | |
245 | q->tokens = toks; | |
246 | q->ptokens = ptoks; | |
247 | qdisc_qstats_backlog_dec(sch, skb); | |
248 | sch->q.qlen--; | |
249 | qdisc_bstats_update(sch, skb); | |
250 | return skb; | |
251 | } | |
252 | ||
253 | qdisc_watchdog_schedule_ns(&q->watchdog, | |
254 | now + max_t(long, -toks, -ptoks)); | |
255 | ||
256 | /* Maybe we have a shorter packet in the queue, | |
257 | which can be sent now. It sounds cool, | |
258 | but, however, this is wrong in principle. | |
259 | We MUST NOT reorder packets under these circumstances. | |
260 | ||
261 | Really, if we split the flow into independent | |
262 | subflows, it would be a very good solution. | |
263 | This is the main idea of all FQ algorithms | |
264 | (cf. CSZ, HPFQ, HFSC) | |
265 | */ | |
266 | ||
267 | qdisc_qstats_overlimit(sch); | |
268 | } | |
269 | return NULL; | |
270 | } | |
271 | ||
272 | static void tbf_reset(struct Qdisc *sch) | |
273 | { | |
274 | struct tbf_sched_data *q = qdisc_priv(sch); | |
275 | ||
276 | qdisc_reset(q->qdisc); | |
277 | sch->qstats.backlog = 0; | |
278 | sch->q.qlen = 0; | |
279 | q->t_c = ktime_get_ns(); | |
280 | q->tokens = q->buffer; | |
281 | q->ptokens = q->mtu; | |
282 | qdisc_watchdog_cancel(&q->watchdog); | |
283 | } | |
284 | ||
285 | static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { | |
286 | [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, | |
287 | [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | |
288 | [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, | |
289 | [TCA_TBF_RATE64] = { .type = NLA_U64 }, | |
290 | [TCA_TBF_PRATE64] = { .type = NLA_U64 }, | |
291 | [TCA_TBF_BURST] = { .type = NLA_U32 }, | |
292 | [TCA_TBF_PBURST] = { .type = NLA_U32 }, | |
293 | }; | |
294 | ||
295 | static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |
296 | { | |
297 | int err; | |
298 | struct tbf_sched_data *q = qdisc_priv(sch); | |
299 | struct nlattr *tb[TCA_TBF_MAX + 1]; | |
300 | struct tc_tbf_qopt *qopt; | |
301 | struct Qdisc *child = NULL; | |
302 | struct psched_ratecfg rate; | |
303 | struct psched_ratecfg peak; | |
304 | u64 max_size; | |
305 | s64 buffer, mtu; | |
306 | u64 rate64 = 0, prate64 = 0; | |
307 | ||
308 | err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL); | |
309 | if (err < 0) | |
310 | return err; | |
311 | ||
312 | err = -EINVAL; | |
313 | if (tb[TCA_TBF_PARMS] == NULL) | |
314 | goto done; | |
315 | ||
316 | qopt = nla_data(tb[TCA_TBF_PARMS]); | |
317 | if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE) | |
318 | qdisc_put_rtab(qdisc_get_rtab(&qopt->rate, | |
319 | tb[TCA_TBF_RTAB])); | |
320 | ||
321 | if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE) | |
322 | qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, | |
323 | tb[TCA_TBF_PTAB])); | |
324 | ||
325 | buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); | |
326 | mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); | |
327 | ||
328 | if (tb[TCA_TBF_RATE64]) | |
329 | rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); | |
330 | psched_ratecfg_precompute(&rate, &qopt->rate, rate64); | |
331 | ||
332 | if (tb[TCA_TBF_BURST]) { | |
333 | max_size = nla_get_u32(tb[TCA_TBF_BURST]); | |
334 | buffer = psched_l2t_ns(&rate, max_size); | |
335 | } else { | |
336 | max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U); | |
337 | } | |
338 | ||
339 | if (qopt->peakrate.rate) { | |
340 | if (tb[TCA_TBF_PRATE64]) | |
341 | prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); | |
342 | psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64); | |
343 | if (peak.rate_bytes_ps <= rate.rate_bytes_ps) { | |
344 | pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n", | |
345 | peak.rate_bytes_ps, rate.rate_bytes_ps); | |
346 | err = -EINVAL; | |
347 | goto done; | |
348 | } | |
349 | ||
350 | if (tb[TCA_TBF_PBURST]) { | |
351 | u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]); | |
352 | max_size = min_t(u32, max_size, pburst); | |
353 | mtu = psched_l2t_ns(&peak, pburst); | |
354 | } else { | |
355 | max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); | |
356 | } | |
357 | } else { | |
358 | memset(&peak, 0, sizeof(peak)); | |
359 | } | |
360 | ||
361 | if (max_size < psched_mtu(qdisc_dev(sch))) | |
362 | pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n", | |
363 | max_size, qdisc_dev(sch)->name, | |
364 | psched_mtu(qdisc_dev(sch))); | |
365 | ||
366 | if (!max_size) { | |
367 | err = -EINVAL; | |
368 | goto done; | |
369 | } | |
370 | ||
371 | if (q->qdisc != &noop_qdisc) { | |
372 | err = fifo_set_limit(q->qdisc, qopt->limit); | |
373 | if (err) | |
374 | goto done; | |
375 | } else if (qopt->limit > 0) { | |
376 | child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); | |
377 | if (IS_ERR(child)) { | |
378 | err = PTR_ERR(child); | |
379 | goto done; | |
380 | } | |
381 | ||
382 | /* child is fifo, no need to check for noop_qdisc */ | |
383 | qdisc_hash_add(child, true); | |
384 | } | |
385 | ||
386 | sch_tree_lock(sch); | |
387 | if (child) { | |
388 | qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, | |
389 | q->qdisc->qstats.backlog); | |
390 | qdisc_destroy(q->qdisc); | |
391 | q->qdisc = child; | |
392 | } | |
393 | q->limit = qopt->limit; | |
394 | if (tb[TCA_TBF_PBURST]) | |
395 | q->mtu = mtu; | |
396 | else | |
397 | q->mtu = PSCHED_TICKS2NS(qopt->mtu); | |
398 | q->max_size = max_size; | |
399 | if (tb[TCA_TBF_BURST]) | |
400 | q->buffer = buffer; | |
401 | else | |
402 | q->buffer = PSCHED_TICKS2NS(qopt->buffer); | |
403 | q->tokens = q->buffer; | |
404 | q->ptokens = q->mtu; | |
405 | ||
406 | memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); | |
407 | memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); | |
408 | ||
409 | sch_tree_unlock(sch); | |
410 | err = 0; | |
411 | done: | |
412 | return err; | |
413 | } | |
414 | ||
415 | static int tbf_init(struct Qdisc *sch, struct nlattr *opt) | |
416 | { | |
417 | struct tbf_sched_data *q = qdisc_priv(sch); | |
418 | ||
419 | qdisc_watchdog_init(&q->watchdog, sch); | |
420 | q->qdisc = &noop_qdisc; | |
421 | ||
422 | if (opt == NULL) | |
423 | return -EINVAL; | |
424 | ||
425 | q->t_c = ktime_get_ns(); | |
426 | ||
427 | return tbf_change(sch, opt); | |
428 | } | |
429 | ||
430 | static void tbf_destroy(struct Qdisc *sch) | |
431 | { | |
432 | struct tbf_sched_data *q = qdisc_priv(sch); | |
433 | ||
434 | qdisc_watchdog_cancel(&q->watchdog); | |
435 | qdisc_destroy(q->qdisc); | |
436 | } | |
437 | ||
438 | static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) | |
439 | { | |
440 | struct tbf_sched_data *q = qdisc_priv(sch); | |
441 | struct nlattr *nest; | |
442 | struct tc_tbf_qopt opt; | |
443 | ||
444 | sch->qstats.backlog = q->qdisc->qstats.backlog; | |
445 | nest = nla_nest_start(skb, TCA_OPTIONS); | |
446 | if (nest == NULL) | |
447 | goto nla_put_failure; | |
448 | ||
449 | opt.limit = q->limit; | |
450 | psched_ratecfg_getrate(&opt.rate, &q->rate); | |
451 | if (tbf_peak_present(q)) | |
452 | psched_ratecfg_getrate(&opt.peakrate, &q->peak); | |
453 | else | |
454 | memset(&opt.peakrate, 0, sizeof(opt.peakrate)); | |
455 | opt.mtu = PSCHED_NS2TICKS(q->mtu); | |
456 | opt.buffer = PSCHED_NS2TICKS(q->buffer); | |
457 | if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) | |
458 | goto nla_put_failure; | |
459 | if (q->rate.rate_bytes_ps >= (1ULL << 32) && | |
460 | nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, | |
461 | TCA_TBF_PAD)) | |
462 | goto nla_put_failure; | |
463 | if (tbf_peak_present(q) && | |
464 | q->peak.rate_bytes_ps >= (1ULL << 32) && | |
465 | nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, | |
466 | TCA_TBF_PAD)) | |
467 | goto nla_put_failure; | |
468 | ||
469 | return nla_nest_end(skb, nest); | |
470 | ||
471 | nla_put_failure: | |
472 | nla_nest_cancel(skb, nest); | |
473 | return -1; | |
474 | } | |
475 | ||
476 | static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, | |
477 | struct sk_buff *skb, struct tcmsg *tcm) | |
478 | { | |
479 | struct tbf_sched_data *q = qdisc_priv(sch); | |
480 | ||
481 | tcm->tcm_handle |= TC_H_MIN(1); | |
482 | tcm->tcm_info = q->qdisc->handle; | |
483 | ||
484 | return 0; | |
485 | } | |
486 | ||
487 | static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |
488 | struct Qdisc **old) | |
489 | { | |
490 | struct tbf_sched_data *q = qdisc_priv(sch); | |
491 | ||
492 | if (new == NULL) | |
493 | new = &noop_qdisc; | |
494 | ||
495 | *old = qdisc_replace(sch, new, &q->qdisc); | |
496 | return 0; | |
497 | } | |
498 | ||
499 | static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg) | |
500 | { | |
501 | struct tbf_sched_data *q = qdisc_priv(sch); | |
502 | return q->qdisc; | |
503 | } | |
504 | ||
505 | static unsigned long tbf_find(struct Qdisc *sch, u32 classid) | |
506 | { | |
507 | return 1; | |
508 | } | |
509 | ||
510 | static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |
511 | { | |
512 | if (!walker->stop) { | |
513 | if (walker->count >= walker->skip) | |
514 | if (walker->fn(sch, 1, walker) < 0) { | |
515 | walker->stop = 1; | |
516 | return; | |
517 | } | |
518 | walker->count++; | |
519 | } | |
520 | } | |
521 | ||
522 | static const struct Qdisc_class_ops tbf_class_ops = { | |
523 | .graft = tbf_graft, | |
524 | .leaf = tbf_leaf, | |
525 | .find = tbf_find, | |
526 | .walk = tbf_walk, | |
527 | .dump = tbf_dump_class, | |
528 | }; | |
529 | ||
530 | static struct Qdisc_ops tbf_qdisc_ops __read_mostly = { | |
531 | .next = NULL, | |
532 | .cl_ops = &tbf_class_ops, | |
533 | .id = "tbf", | |
534 | .priv_size = sizeof(struct tbf_sched_data), | |
535 | .enqueue = tbf_enqueue, | |
536 | .dequeue = tbf_dequeue, | |
537 | .peek = qdisc_peek_dequeued, | |
538 | .init = tbf_init, | |
539 | .reset = tbf_reset, | |
540 | .destroy = tbf_destroy, | |
541 | .change = tbf_change, | |
542 | .dump = tbf_dump, | |
543 | .owner = THIS_MODULE, | |
544 | }; | |
545 | ||
546 | static int __init tbf_module_init(void) | |
547 | { | |
548 | return register_qdisc(&tbf_qdisc_ops); | |
549 | } | |
550 | ||
551 | static void __exit tbf_module_exit(void) | |
552 | { | |
553 | unregister_qdisc(&tbf_qdisc_ops); | |
554 | } | |
555 | module_init(tbf_module_init) | |
556 | module_exit(tbf_module_exit) | |
557 | MODULE_LICENSE("GPL"); |