]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/sched/sch_choke.c
Merge tag 'dmaengine-fix-4.11-rc5' of git://git.infradead.org/users/vkoul/slave-dma
[mirror_ubuntu-focal-kernel.git] / net / sched / sch_choke.c
CommitLineData
45e14433 1/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
cdfb74d4 17#include <linux/vmalloc.h>
45e14433 18#include <net/pkt_sched.h>
cf1facda 19#include <net/pkt_cls.h>
45e14433 20#include <net/inet_ecn.h>
21#include <net/red.h>
1bd758eb 22#include <net/flow_dissector.h>
45e14433 23
24/*
25 CHOKe stateless AQM for fair bandwidth allocation
26 =================================================
27
28 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
29 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
30 maintains no flow state. The difference from RED is an additional step
31 during the enqueuing process. If average queue size is over the
32 low threshold (qmin), a packet is chosen at random from the queue.
33 If both the new and chosen packet are from the same flow, both
34 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
35 needs to access packets in queue randomly. It has a minimal class
36 interface to allow overriding the builtin flow classifier with
37 filters.
38
39 Source:
40 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
41 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
42 IEEE INFOCOM, 2000.
43
44 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
45 Characteristics", IEEE/ACM Transactions on Networking, 2004
46
47 */
48
49/* Upper bound on size of sk_buff table (packets) */
50#define CHOKE_MAX_QUEUE (128*1024 - 1)
51
52struct choke_sched_data {
53/* Parameters */
54 u32 limit;
55 unsigned char flags;
56
57 struct red_parms parms;
58
59/* Variables */
eeca6688 60 struct red_vars vars;
25d8c0d5 61 struct tcf_proto __rcu *filter_list;
45e14433 62 struct {
63 u32 prob_drop; /* Early probability drops */
64 u32 prob_mark; /* Early probability marks */
65 u32 forced_drop; /* Forced drops, qavg > max_thresh */
66 u32 forced_mark; /* Forced marks, qavg > max_thresh */
67 u32 pdrop; /* Drops due to queue limits */
68 u32 other; /* Drops due to drop() calls */
69 u32 matched; /* Drops to flow match */
70 } stats;
71
72 unsigned int head;
73 unsigned int tail;
74
75 unsigned int tab_mask; /* size - 1 */
76
77 struct sk_buff **tab;
78};
79
45e14433 80/* number of elements in queue including holes */
81static unsigned int choke_len(const struct choke_sched_data *q)
82{
83 return (q->tail - q->head) & q->tab_mask;
84}
85
86/* Is ECN parameter configured */
87static int use_ecn(const struct choke_sched_data *q)
88{
89 return q->flags & TC_RED_ECN;
90}
91
92/* Should packets over max just be dropped (versus marked) */
93static int use_harddrop(const struct choke_sched_data *q)
94{
95 return q->flags & TC_RED_HARDDROP;
96}
97
98/* Move head pointer forward to skip over holes */
99static void choke_zap_head_holes(struct choke_sched_data *q)
100{
101 do {
102 q->head = (q->head + 1) & q->tab_mask;
103 if (q->head == q->tail)
104 break;
105 } while (q->tab[q->head] == NULL);
106}
107
108/* Move tail pointer backwards to reuse holes */
109static void choke_zap_tail_holes(struct choke_sched_data *q)
110{
111 do {
112 q->tail = (q->tail - 1) & q->tab_mask;
113 if (q->head == q->tail)
114 break;
115 } while (q->tab[q->tail] == NULL);
116}
117
118/* Drop packet from queue array by creating a "hole" */
520ac30f
ED
119static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
120 struct sk_buff **to_free)
45e14433 121{
122 struct choke_sched_data *q = qdisc_priv(sch);
123 struct sk_buff *skb = q->tab[idx];
124
125 q->tab[idx] = NULL;
126
127 if (idx == q->head)
128 choke_zap_head_holes(q);
129 if (idx == q->tail)
130 choke_zap_tail_holes(q);
131
25331d6c 132 qdisc_qstats_backlog_dec(sch, skb);
2ccccf5f 133 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
520ac30f 134 qdisc_drop(skb, sch, to_free);
45e14433 135 --sch->q.qlen;
136}
137
26f70e12 138struct choke_skb_cb {
2bcc34bb
ED
139 u16 classid;
140 u8 keys_valid;
2e99403d 141 struct flow_keys_digest keys;
26f70e12
ED
142};
143
144static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
145{
16bda13d 146 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
26f70e12
ED
147 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
148}
149
45e14433 150static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
151{
26f70e12 152 choke_skb_cb(skb)->classid = classid;
45e14433 153}
154
155static u16 choke_get_classid(const struct sk_buff *skb)
156{
26f70e12 157 return choke_skb_cb(skb)->classid;
45e14433 158}
159
2bcc34bb
ED
160/*
161 * Compare flow of two packets
162 * Returns true only if source and destination address and port match.
163 * false for special cases
164 */
165static bool choke_match_flow(struct sk_buff *skb1,
166 struct sk_buff *skb2)
167{
25711786
ED
168 struct flow_keys temp;
169
2bcc34bb
ED
170 if (skb1->protocol != skb2->protocol)
171 return false;
172
173 if (!choke_skb_cb(skb1)->keys_valid) {
174 choke_skb_cb(skb1)->keys_valid = 1;
cd79a238 175 skb_flow_dissect_flow_keys(skb1, &temp, 0);
2e99403d 176 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
2bcc34bb
ED
177 }
178
179 if (!choke_skb_cb(skb2)->keys_valid) {
180 choke_skb_cb(skb2)->keys_valid = 1;
cd79a238 181 skb_flow_dissect_flow_keys(skb2, &temp, 0);
2e99403d 182 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
2bcc34bb
ED
183 }
184
185 return !memcmp(&choke_skb_cb(skb1)->keys,
186 &choke_skb_cb(skb2)->keys,
2e99403d 187 sizeof(choke_skb_cb(skb1)->keys));
2bcc34bb
ED
188}
189
45e14433 190/*
191 * Classify flow using either:
192 * 1. pre-existing classification result in skb
193 * 2. fast internal classification
194 * 3. use TC filter based classification
195 */
196static bool choke_classify(struct sk_buff *skb,
197 struct Qdisc *sch, int *qerr)
198
199{
200 struct choke_sched_data *q = qdisc_priv(sch);
201 struct tcf_result res;
25d8c0d5 202 struct tcf_proto *fl;
45e14433 203 int result;
204
25d8c0d5 205 fl = rcu_dereference_bh(q->filter_list);
3b3ae880 206 result = tc_classify(skb, fl, &res, false);
45e14433 207 if (result >= 0) {
208#ifdef CONFIG_NET_CLS_ACT
209 switch (result) {
210 case TC_ACT_STOLEN:
211 case TC_ACT_QUEUED:
212 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
213 case TC_ACT_SHOT:
214 return false;
215 }
216#endif
217 choke_set_classid(skb, TC_H_MIN(res.classid));
218 return true;
219 }
220
221 return false;
222}
223
224/*
225 * Select a packet at random from queue
226 * HACK: since queue can have holes from previous deletion; retry several
227 * times to find a random skb but then just give up and return the head
228 * Will return NULL if queue is empty (q->head == q->tail)
229 */
230static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
231 unsigned int *pidx)
232{
233 struct sk_buff *skb;
234 int retrys = 3;
235
236 do {
f337db64 237 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
45e14433 238 skb = q->tab[*pidx];
239 if (skb)
240 return skb;
241 } while (--retrys > 0);
242
243 return q->tab[*pidx = q->head];
244}
245
246/*
247 * Compare new packet with random packet in queue
248 * returns true if matched and sets *pidx
249 */
250static bool choke_match_random(const struct choke_sched_data *q,
251 struct sk_buff *nskb,
252 unsigned int *pidx)
253{
254 struct sk_buff *oskb;
255
256 if (q->head == q->tail)
257 return false;
258
259 oskb = choke_peek_random(q, pidx);
25d8c0d5 260 if (rcu_access_pointer(q->filter_list))
45e14433 261 return choke_get_classid(nskb) == choke_get_classid(oskb);
262
263 return choke_match_flow(oskb, nskb);
264}
265
520ac30f
ED
266static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
267 struct sk_buff **to_free)
45e14433 268{
25d8c0d5 269 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
45e14433 270 struct choke_sched_data *q = qdisc_priv(sch);
eeca6688 271 const struct red_parms *p = &q->parms;
45e14433 272
25d8c0d5 273 if (rcu_access_pointer(q->filter_list)) {
45e14433 274 /* If using external classifiers, get result and record it. */
275 if (!choke_classify(skb, sch, &ret))
276 goto other_drop; /* Packet was eaten by filter */
277 }
278
2bcc34bb 279 choke_skb_cb(skb)->keys_valid = 0;
45e14433 280 /* Compute average queue usage (see RED) */
eeca6688
ED
281 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
282 if (red_is_idling(&q->vars))
283 red_end_of_idle_period(&q->vars);
45e14433 284
285 /* Is queue small? */
eeca6688
ED
286 if (q->vars.qavg <= p->qth_min)
287 q->vars.qcount = -1;
45e14433 288 else {
289 unsigned int idx;
290
291 /* Draw a packet at random from queue and compare flow */
292 if (choke_match_random(q, skb, &idx)) {
293 q->stats.matched++;
520ac30f 294 choke_drop_by_idx(sch, idx, to_free);
45e14433 295 goto congestion_drop;
296 }
297
298 /* Queue is large, always mark/drop */
eeca6688
ED
299 if (q->vars.qavg > p->qth_max) {
300 q->vars.qcount = -1;
45e14433 301
25331d6c 302 qdisc_qstats_overlimit(sch);
45e14433 303 if (use_harddrop(q) || !use_ecn(q) ||
304 !INET_ECN_set_ce(skb)) {
305 q->stats.forced_drop++;
306 goto congestion_drop;
307 }
308
309 q->stats.forced_mark++;
eeca6688
ED
310 } else if (++q->vars.qcount) {
311 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
312 q->vars.qcount = 0;
313 q->vars.qR = red_random(p);
45e14433 314
25331d6c 315 qdisc_qstats_overlimit(sch);
45e14433 316 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
317 q->stats.prob_drop++;
318 goto congestion_drop;
319 }
320
321 q->stats.prob_mark++;
322 }
323 } else
eeca6688 324 q->vars.qR = red_random(p);
45e14433 325 }
326
327 /* Admit new packet */
328 if (sch->q.qlen < q->limit) {
329 q->tab[q->tail] = skb;
330 q->tail = (q->tail + 1) & q->tab_mask;
331 ++sch->q.qlen;
25331d6c 332 qdisc_qstats_backlog_inc(sch, skb);
45e14433 333 return NET_XMIT_SUCCESS;
334 }
335
336 q->stats.pdrop++;
520ac30f 337 return qdisc_drop(skb, sch, to_free);
45e14433 338
17045755 339congestion_drop:
520ac30f 340 qdisc_drop(skb, sch, to_free);
45e14433 341 return NET_XMIT_CN;
342
17045755 343other_drop:
45e14433 344 if (ret & __NET_XMIT_BYPASS)
25331d6c 345 qdisc_qstats_drop(sch);
520ac30f 346 __qdisc_drop(skb, to_free);
45e14433 347 return ret;
348}
349
350static struct sk_buff *choke_dequeue(struct Qdisc *sch)
351{
352 struct choke_sched_data *q = qdisc_priv(sch);
353 struct sk_buff *skb;
354
355 if (q->head == q->tail) {
eeca6688
ED
356 if (!red_is_idling(&q->vars))
357 red_start_of_idle_period(&q->vars);
45e14433 358 return NULL;
359 }
360
361 skb = q->tab[q->head];
362 q->tab[q->head] = NULL;
363 choke_zap_head_holes(q);
364 --sch->q.qlen;
25331d6c 365 qdisc_qstats_backlog_dec(sch, skb);
45e14433 366 qdisc_bstats_update(sch, skb);
367
368 return skb;
369}
370
45e14433 371static void choke_reset(struct Qdisc *sch)
372{
373 struct choke_sched_data *q = qdisc_priv(sch);
374
77e62da6
WC
375 while (q->head != q->tail) {
376 struct sk_buff *skb = q->tab[q->head];
377
378 q->head = (q->head + 1) & q->tab_mask;
379 if (!skb)
380 continue;
f9aed311 381 rtnl_qdisc_drop(skb, sch);
77e62da6
WC
382 }
383
f9aed311
ED
384 sch->q.qlen = 0;
385 sch->qstats.backlog = 0;
77e62da6
WC
386 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
387 q->head = q->tail = 0;
eeca6688 388 red_restart(&q->vars);
45e14433 389}
390
391static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
392 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
393 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 394 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
45e14433 395};
396
397
398static void choke_free(void *addr)
399{
4cb28970 400 kvfree(addr);
45e14433 401}
402
403static int choke_change(struct Qdisc *sch, struct nlattr *opt)
404{
405 struct choke_sched_data *q = qdisc_priv(sch);
406 struct nlattr *tb[TCA_CHOKE_MAX + 1];
407 const struct tc_red_qopt *ctl;
408 int err;
409 struct sk_buff **old = NULL;
410 unsigned int mask;
a73ed26b 411 u32 max_P;
45e14433 412
413 if (opt == NULL)
414 return -EINVAL;
415
416 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
417 if (err < 0)
418 return err;
419
420 if (tb[TCA_CHOKE_PARMS] == NULL ||
421 tb[TCA_CHOKE_STAB] == NULL)
422 return -EINVAL;
423
a73ed26b
ED
424 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
425
45e14433 426 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
427
428 if (ctl->limit > CHOKE_MAX_QUEUE)
429 return -EINVAL;
430
431 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
432 if (mask != q->tab_mask) {
433 struct sk_buff **ntab;
434
8be04b93
JP
435 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
436 GFP_KERNEL | __GFP_NOWARN);
45e14433 437 if (!ntab)
438 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
439 if (!ntab)
440 return -ENOMEM;
441
442 sch_tree_lock(sch);
443 old = q->tab;
444 if (old) {
445 unsigned int oqlen = sch->q.qlen, tail = 0;
2ccccf5f 446 unsigned dropped = 0;
45e14433 447
448 while (q->head != q->tail) {
449 struct sk_buff *skb = q->tab[q->head];
450
451 q->head = (q->head + 1) & q->tab_mask;
452 if (!skb)
453 continue;
454 if (tail < mask) {
455 ntab[tail++] = skb;
456 continue;
457 }
2ccccf5f 458 dropped += qdisc_pkt_len(skb);
25331d6c 459 qdisc_qstats_backlog_dec(sch, skb);
45e14433 460 --sch->q.qlen;
f9aed311 461 rtnl_qdisc_drop(skb, sch);
45e14433 462 }
2ccccf5f 463 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
45e14433 464 q->head = 0;
465 q->tail = tail;
466 }
467
468 q->tab_mask = mask;
469 q->tab = ntab;
470 } else
471 sch_tree_lock(sch);
472
473 q->flags = ctl->flags;
474 q->limit = ctl->limit;
475
476 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
477 ctl->Plog, ctl->Scell_log,
a73ed26b
ED
478 nla_data(tb[TCA_CHOKE_STAB]),
479 max_P);
eeca6688 480 red_set_vars(&q->vars);
45e14433 481
482 if (q->head == q->tail)
eeca6688 483 red_end_of_idle_period(&q->vars);
45e14433 484
485 sch_tree_unlock(sch);
486 choke_free(old);
487 return 0;
488}
489
490static int choke_init(struct Qdisc *sch, struct nlattr *opt)
491{
492 return choke_change(sch, opt);
493}
494
495static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
496{
497 struct choke_sched_data *q = qdisc_priv(sch);
498 struct nlattr *opts = NULL;
499 struct tc_red_qopt opt = {
500 .limit = q->limit,
501 .flags = q->flags,
502 .qth_min = q->parms.qth_min >> q->parms.Wlog,
503 .qth_max = q->parms.qth_max >> q->parms.Wlog,
504 .Wlog = q->parms.Wlog,
505 .Plog = q->parms.Plog,
506 .Scell_log = q->parms.Scell_log,
507 };
508
509 opts = nla_nest_start(skb, TCA_OPTIONS);
510 if (opts == NULL)
511 goto nla_put_failure;
512
1b34ec43
DM
513 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
514 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
515 goto nla_put_failure;
45e14433 516 return nla_nest_end(skb, opts);
517
518nla_put_failure:
519 nla_nest_cancel(skb, opts);
520 return -EMSGSIZE;
521}
522
523static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
524{
525 struct choke_sched_data *q = qdisc_priv(sch);
526 struct tc_choke_xstats st = {
527 .early = q->stats.prob_drop + q->stats.forced_drop,
528 .marked = q->stats.prob_mark + q->stats.forced_mark,
529 .pdrop = q->stats.pdrop,
530 .other = q->stats.other,
531 .matched = q->stats.matched,
532 };
533
534 return gnet_stats_copy_app(d, &st, sizeof(st));
535}
536
537static void choke_destroy(struct Qdisc *sch)
538{
539 struct choke_sched_data *q = qdisc_priv(sch);
540
541 tcf_destroy_chain(&q->filter_list);
542 choke_free(q->tab);
543}
544
45e14433 545static struct sk_buff *choke_peek_head(struct Qdisc *sch)
546{
547 struct choke_sched_data *q = qdisc_priv(sch);
548
549 return (q->head != q->tail) ? q->tab[q->head] : NULL;
550}
551
552static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
553 .id = "choke",
554 .priv_size = sizeof(struct choke_sched_data),
555
556 .enqueue = choke_enqueue,
557 .dequeue = choke_dequeue,
558 .peek = choke_peek_head,
45e14433 559 .init = choke_init,
560 .destroy = choke_destroy,
561 .reset = choke_reset,
562 .change = choke_change,
563 .dump = choke_dump,
564 .dump_stats = choke_dump_stats,
565 .owner = THIS_MODULE,
566};
567
568static int __init choke_module_init(void)
569{
570 return register_qdisc(&choke_qdisc_ops);
571}
572
573static void __exit choke_module_exit(void)
574{
575 unregister_qdisc(&choke_qdisc_ops);
576}
577
578module_init(choke_module_init)
579module_exit(choke_module_exit)
580
581MODULE_LICENSE("GPL");