]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/sched/sch_tbf.c
cls_flower: Fix incorrect idr release when failing to modify rule
[mirror_ubuntu-bionic-kernel.git] / net / sched / sch_tbf.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
1da177e4 15#include <linux/module.h>
1da177e4
LT
16#include <linux/types.h>
17#include <linux/kernel.h>
1da177e4 18#include <linux/string.h>
1da177e4 19#include <linux/errno.h>
1da177e4 20#include <linux/skbuff.h>
0ba48053 21#include <net/netlink.h>
b757c933 22#include <net/sch_generic.h>
1da177e4
LT
23#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
cc7ec456 101struct tbf_sched_data {
1da177e4
LT
102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
a135e598 104 u32 max_size;
b757c933
JP
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
b757c933
JP
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
1da177e4
LT
109
110/* Variables */
b757c933
JP
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
1da177e4 114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
f7f593e3 115 struct qdisc_watchdog watchdog; /* Watchdog timer */
1da177e4
LT
116};
117
e43ac79a 118
cc106e44
YY
119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
d55d282e
YY
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
cc106e44
YY
136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
e43ac79a
ED
145/* GSO packet is too big, segment it so that tbf can transmit
146 * each segment in time
147 */
520ac30f
ED
148static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
149 struct sk_buff **to_free)
e43ac79a
ED
150{
151 struct tbf_sched_data *q = qdisc_priv(sch);
152 struct sk_buff *segs, *nskb;
153 netdev_features_t features = netif_skb_features(skb);
2ccccf5f 154 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
e43ac79a
ED
155 int ret, nb;
156
157 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
158
159 if (IS_ERR_OR_NULL(segs))
520ac30f 160 return qdisc_drop(skb, sch, to_free);
e43ac79a
ED
161
162 nb = 0;
163 while (segs) {
164 nskb = segs->next;
165 segs->next = NULL;
4d0820cf 166 qdisc_skb_cb(segs)->pkt_len = segs->len;
2ccccf5f 167 len += segs->len;
520ac30f 168 ret = qdisc_enqueue(segs, q->qdisc, to_free);
e43ac79a
ED
169 if (ret != NET_XMIT_SUCCESS) {
170 if (net_xmit_drop_count(ret))
25331d6c 171 qdisc_qstats_drop(sch);
e43ac79a
ED
172 } else {
173 nb++;
174 }
175 segs = nskb;
176 }
177 sch->q.qlen += nb;
178 if (nb > 1)
2ccccf5f 179 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
e43ac79a
ED
180 consume_skb(skb);
181 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
182}
183
520ac30f
ED
184static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free)
1da177e4
LT
186{
187 struct tbf_sched_data *q = qdisc_priv(sch);
188 int ret;
189
e43ac79a 190 if (qdisc_pkt_len(skb) > q->max_size) {
de960aa9 191 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
520ac30f
ED
192 return tbf_segment(skb, sch, to_free);
193 return qdisc_drop(skb, sch, to_free);
e43ac79a 194 }
520ac30f 195 ret = qdisc_enqueue(skb, q->qdisc, to_free);
9871e50e 196 if (ret != NET_XMIT_SUCCESS) {
378a2f09 197 if (net_xmit_drop_count(ret))
25331d6c 198 qdisc_qstats_drop(sch);
1da177e4
LT
199 return ret;
200 }
201
8d5958f4 202 qdisc_qstats_backlog_inc(sch, skb);
1da177e4 203 sch->q.qlen++;
9871e50e 204 return NET_XMIT_SUCCESS;
1da177e4
LT
205}
206
a135e598
HS
207static bool tbf_peak_present(const struct tbf_sched_data *q)
208{
209 return q->peak.rate_bytes_ps;
210}
211
cc7ec456 212static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
1da177e4
LT
213{
214 struct tbf_sched_data *q = qdisc_priv(sch);
215 struct sk_buff *skb;
216
03c05f0d 217 skb = q->qdisc->ops->peek(q->qdisc);
1da177e4
LT
218
219 if (skb) {
b757c933
JP
220 s64 now;
221 s64 toks;
222 s64 ptoks = 0;
0abf77e5 223 unsigned int len = qdisc_pkt_len(skb);
1da177e4 224
d2de875c 225 now = ktime_get_ns();
b757c933 226 toks = min_t(s64, now - q->t_c, q->buffer);
1da177e4 227
a135e598 228 if (tbf_peak_present(q)) {
1da177e4 229 ptoks = toks + q->ptokens;
b757c933 230 if (ptoks > q->mtu)
1da177e4 231 ptoks = q->mtu;
b757c933 232 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
1da177e4
LT
233 }
234 toks += q->tokens;
b757c933 235 if (toks > q->buffer)
1da177e4 236 toks = q->buffer;
b757c933 237 toks -= (s64) psched_l2t_ns(&q->rate, len);
1da177e4
LT
238
239 if ((toks|ptoks) >= 0) {
77be155c 240 skb = qdisc_dequeue_peeked(q->qdisc);
03c05f0d
JP
241 if (unlikely(!skb))
242 return NULL;
243
1da177e4
LT
244 q->t_c = now;
245 q->tokens = toks;
246 q->ptokens = ptoks;
8d5958f4 247 qdisc_qstats_backlog_dec(sch, skb);
1da177e4 248 sch->q.qlen--;
9190b3b3 249 qdisc_bstats_update(sch, skb);
1da177e4
LT
250 return skb;
251 }
252
b757c933 253 qdisc_watchdog_schedule_ns(&q->watchdog,
45f50bed 254 now + max_t(long, -toks, -ptoks));
1da177e4
LT
255
256 /* Maybe we have a shorter packet in the queue,
257 which can be sent now. It sounds cool,
258 but, however, this is wrong in principle.
259 We MUST NOT reorder packets under these circumstances.
260
261 Really, if we split the flow into independent
262 subflows, it would be a very good solution.
263 This is the main idea of all FQ algorithms
264 (cf. CSZ, HPFQ, HFSC)
265 */
266
25331d6c 267 qdisc_qstats_overlimit(sch);
1da177e4
LT
268 }
269 return NULL;
270}
271
cc7ec456 272static void tbf_reset(struct Qdisc *sch)
1da177e4
LT
273{
274 struct tbf_sched_data *q = qdisc_priv(sch);
275
276 qdisc_reset(q->qdisc);
8d5958f4 277 sch->qstats.backlog = 0;
1da177e4 278 sch->q.qlen = 0;
d2de875c 279 q->t_c = ktime_get_ns();
1da177e4
LT
280 q->tokens = q->buffer;
281 q->ptokens = q->mtu;
f7f593e3 282 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
283}
284
27a3421e
PM
285static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
286 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
287 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
288 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
a33c4a26
YY
289 [TCA_TBF_RATE64] = { .type = NLA_U64 },
290 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
2e04ad42
YY
291 [TCA_TBF_BURST] = { .type = NLA_U32 },
292 [TCA_TBF_PBURST] = { .type = NLA_U32 },
27a3421e
PM
293};
294
cc7ec456 295static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4 296{
cee63723 297 int err;
1da177e4 298 struct tbf_sched_data *q = qdisc_priv(sch);
a33c4a26 299 struct nlattr *tb[TCA_TBF_MAX + 1];
1da177e4 300 struct tc_tbf_qopt *qopt;
1da177e4 301 struct Qdisc *child = NULL;
cc106e44
YY
302 struct psched_ratecfg rate;
303 struct psched_ratecfg peak;
304 u64 max_size;
305 s64 buffer, mtu;
a33c4a26 306 u64 rate64 = 0, prate64 = 0;
1da177e4 307
fceb6435 308 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy, NULL);
cee63723
PM
309 if (err < 0)
310 return err;
311
312 err = -EINVAL;
27a3421e 313 if (tb[TCA_TBF_PARMS] == NULL)
1da177e4
LT
314 goto done;
315
1e90474c 316 qopt = nla_data(tb[TCA_TBF_PARMS]);
cc106e44
YY
317 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
318 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
319 tb[TCA_TBF_RTAB]));
1da177e4 320
cc106e44
YY
321 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
322 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
323 tb[TCA_TBF_PTAB]));
4d0820cf 324
cc106e44
YY
325 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
326 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
327
328 if (tb[TCA_TBF_RATE64])
329 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
330 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
331
2e04ad42
YY
332 if (tb[TCA_TBF_BURST]) {
333 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
334 buffer = psched_l2t_ns(&rate, max_size);
335 } else {
336 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
337 }
cc106e44
YY
338
339 if (qopt->peakrate.rate) {
340 if (tb[TCA_TBF_PRATE64])
341 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
342 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
343 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
344 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
2e04ad42 345 peak.rate_bytes_ps, rate.rate_bytes_ps);
cc106e44
YY
346 err = -EINVAL;
347 goto done;
348 }
349
2e04ad42
YY
350 if (tb[TCA_TBF_PBURST]) {
351 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
352 max_size = min_t(u32, max_size, pburst);
353 mtu = psched_l2t_ns(&peak, pburst);
354 } else {
355 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
356 }
a135e598
HS
357 } else {
358 memset(&peak, 0, sizeof(peak));
cc106e44
YY
359 }
360
361 if (max_size < psched_mtu(qdisc_dev(sch)))
362 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
363 max_size, qdisc_dev(sch)->name,
364 psched_mtu(qdisc_dev(sch)));
365
366 if (!max_size) {
367 err = -EINVAL;
368 goto done;
369 }
370
724b9e1d
HS
371 if (q->qdisc != &noop_qdisc) {
372 err = fifo_set_limit(q->qdisc, qopt->limit);
373 if (err)
374 goto done;
375 } else if (qopt->limit > 0) {
376 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
377 if (IS_ERR(child)) {
378 err = PTR_ERR(child);
379 goto done;
380 }
b5bf17ed
PA
381
382 /* child is fifo, no need to check for noop_qdisc */
383 qdisc_hash_add(child, true);
724b9e1d
HS
384 }
385
1da177e4 386 sch_tree_lock(sch);
5e50da01 387 if (child) {
2ccccf5f
WC
388 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
389 q->qdisc->qstats.backlog);
b94c8afc
PM
390 qdisc_destroy(q->qdisc);
391 q->qdisc = child;
5e50da01 392 }
1da177e4 393 q->limit = qopt->limit;
2e04ad42
YY
394 if (tb[TCA_TBF_PBURST])
395 q->mtu = mtu;
396 else
397 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
1da177e4 398 q->max_size = max_size;
2e04ad42
YY
399 if (tb[TCA_TBF_BURST])
400 q->buffer = buffer;
401 else
402 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
1da177e4
LT
403 q->tokens = q->buffer;
404 q->ptokens = q->mtu;
b94c8afc 405
cc106e44 406 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
a135e598 407 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
b94c8afc 408
1da177e4
LT
409 sch_tree_unlock(sch);
410 err = 0;
411done:
1da177e4
LT
412 return err;
413}
414
cc7ec456 415static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
416{
417 struct tbf_sched_data *q = qdisc_priv(sch);
418
c2d6511e
NA
419 qdisc_watchdog_init(&q->watchdog, sch);
420 q->qdisc = &noop_qdisc;
421
1da177e4
LT
422 if (opt == NULL)
423 return -EINVAL;
424
d2de875c 425 q->t_c = ktime_get_ns();
1da177e4
LT
426
427 return tbf_change(sch, opt);
428}
429
430static void tbf_destroy(struct Qdisc *sch)
431{
432 struct tbf_sched_data *q = qdisc_priv(sch);
433
f7f593e3 434 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
435 qdisc_destroy(q->qdisc);
436}
437
438static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
439{
440 struct tbf_sched_data *q = qdisc_priv(sch);
4b3550ef 441 struct nlattr *nest;
1da177e4
LT
442 struct tc_tbf_qopt opt;
443
b0460e44 444 sch->qstats.backlog = q->qdisc->qstats.backlog;
4b3550ef
PM
445 nest = nla_nest_start(skb, TCA_OPTIONS);
446 if (nest == NULL)
447 goto nla_put_failure;
1da177e4
LT
448
449 opt.limit = q->limit;
01cb71d2 450 psched_ratecfg_getrate(&opt.rate, &q->rate);
a135e598 451 if (tbf_peak_present(q))
01cb71d2 452 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
1da177e4
LT
453 else
454 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
b757c933
JP
455 opt.mtu = PSCHED_NS2TICKS(q->mtu);
456 opt.buffer = PSCHED_NS2TICKS(q->buffer);
1b34ec43
DM
457 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
458 goto nla_put_failure;
a33c4a26 459 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
2a51c1e8
ND
460 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
461 TCA_TBF_PAD))
a33c4a26 462 goto nla_put_failure;
a135e598 463 if (tbf_peak_present(q) &&
a33c4a26 464 q->peak.rate_bytes_ps >= (1ULL << 32) &&
2a51c1e8
ND
465 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
466 TCA_TBF_PAD))
a33c4a26 467 goto nla_put_failure;
1da177e4 468
d59b7d80 469 return nla_nest_end(skb, nest);
1da177e4 470
1e90474c 471nla_put_failure:
4b3550ef 472 nla_nest_cancel(skb, nest);
1da177e4
LT
473 return -1;
474}
475
476static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
477 struct sk_buff *skb, struct tcmsg *tcm)
478{
479 struct tbf_sched_data *q = qdisc_priv(sch);
480
1da177e4
LT
481 tcm->tcm_handle |= TC_H_MIN(1);
482 tcm->tcm_info = q->qdisc->handle;
483
484 return 0;
485}
486
487static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
488 struct Qdisc **old)
489{
490 struct tbf_sched_data *q = qdisc_priv(sch);
491
492 if (new == NULL)
493 new = &noop_qdisc;
494
86a7996c 495 *old = qdisc_replace(sch, new, &q->qdisc);
1da177e4
LT
496 return 0;
497}
498
499static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
500{
501 struct tbf_sched_data *q = qdisc_priv(sch);
502 return q->qdisc;
503}
504
143976ce 505static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
1da177e4
LT
506{
507 return 1;
508}
509
1da177e4
LT
510static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
511{
512 if (!walker->stop) {
513 if (walker->count >= walker->skip)
514 if (walker->fn(sch, 1, walker) < 0) {
515 walker->stop = 1;
516 return;
517 }
518 walker->count++;
519 }
520}
521
cc7ec456 522static const struct Qdisc_class_ops tbf_class_ops = {
1da177e4
LT
523 .graft = tbf_graft,
524 .leaf = tbf_leaf,
143976ce 525 .find = tbf_find,
1da177e4 526 .walk = tbf_walk,
1da177e4
LT
527 .dump = tbf_dump_class,
528};
529
20fea08b 530static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
1da177e4
LT
531 .next = NULL,
532 .cl_ops = &tbf_class_ops,
533 .id = "tbf",
534 .priv_size = sizeof(struct tbf_sched_data),
535 .enqueue = tbf_enqueue,
536 .dequeue = tbf_dequeue,
77be155c 537 .peek = qdisc_peek_dequeued,
1da177e4
LT
538 .init = tbf_init,
539 .reset = tbf_reset,
540 .destroy = tbf_destroy,
541 .change = tbf_change,
542 .dump = tbf_dump,
543 .owner = THIS_MODULE,
544};
545
546static int __init tbf_module_init(void)
547{
548 return register_qdisc(&tbf_qdisc_ops);
549}
550
551static void __exit tbf_module_exit(void)
552{
553 unregister_qdisc(&tbf_qdisc_ops);
554}
555module_init(tbf_module_init)
556module_exit(tbf_module_exit)
557MODULE_LICENSE("GPL");