]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/sched/sch_fq.c
Merge tag 'pwm/for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[mirror_ubuntu-hirsute-kernel.git] / net / sched / sch_fq.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 *
5 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 *
7 * Meant to be mostly used for locally generated traffic :
8 * Fast classification depends on skb->sk being set before reaching us.
9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10 * All packets belonging to a socket are considered as a 'flow'.
11 *
12 * Flows are dynamically allocated and stored in a hash table of RB trees
13 * They are also part of one Round Robin 'queues' (new or old flows)
14 *
15 * Burst avoidance (aka pacing) capability :
16 *
17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18 * bunch of packets, and this packet scheduler adds delay between
19 * packets to respect rate limitation.
20 *
21 * enqueue() :
22 * - lookup one RB tree (out of 1024 or more) to find the flow.
23 * If non existent flow, create it, add it to the tree.
24 * Add skb to the per flow list of skb (fifo).
25 * - Use a special fifo for high prio packets
26 *
27 * dequeue() : serves flows in Round Robin
28 * Note : When a flow becomes empty, we do not immediately remove it from
29 * rb trees, for performance reasons (its expected to send additional packets,
30 * or SLAB cache will reuse socket for another flow)
31 */
32
33 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/jiffies.h>
37 #include <linux/string.h>
38 #include <linux/in.h>
39 #include <linux/errno.h>
40 #include <linux/init.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/rbtree.h>
44 #include <linux/hash.h>
45 #include <linux/prefetch.h>
46 #include <linux/vmalloc.h>
47 #include <net/netlink.h>
48 #include <net/pkt_sched.h>
49 #include <net/sock.h>
50 #include <net/tcp_states.h>
51 #include <net/tcp.h>
52
53 struct fq_skb_cb {
54 u64 time_to_send;
55 };
56
57 static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
58 {
59 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
60 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
61 }
62
63 /*
64 * Per flow structure, dynamically allocated.
65 * If packets have monotically increasing time_to_send, they are placed in O(1)
66 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
67 */
68 struct fq_flow {
69 struct rb_root t_root;
70 struct sk_buff *head; /* list of skbs for this flow : first skb */
71 union {
72 struct sk_buff *tail; /* last skb in the list */
73 unsigned long age; /* jiffies when flow was emptied, for gc */
74 };
75 struct rb_node fq_node; /* anchor in fq_root[] trees */
76 struct sock *sk;
77 int qlen; /* number of packets in flow queue */
78 int credit;
79 u32 socket_hash; /* sk_hash */
80 struct fq_flow *next; /* next pointer in RR lists, or &detached */
81
82 struct rb_node rate_node; /* anchor in q->delayed tree */
83 u64 time_next_packet;
84 };
85
86 struct fq_flow_head {
87 struct fq_flow *first;
88 struct fq_flow *last;
89 };
90
91 struct fq_sched_data {
92 struct fq_flow_head new_flows;
93
94 struct fq_flow_head old_flows;
95
96 struct rb_root delayed; /* for rate limited flows */
97 u64 time_next_delayed_flow;
98 unsigned long unthrottle_latency_ns;
99
100 struct fq_flow internal; /* for non classified or high prio packets */
101 u32 quantum;
102 u32 initial_quantum;
103 u32 flow_refill_delay;
104 u32 flow_plimit; /* max packets per flow */
105 unsigned long flow_max_rate; /* optional max rate per flow */
106 u64 ce_threshold;
107 u32 orphan_mask; /* mask for orphaned skb */
108 u32 low_rate_threshold;
109 struct rb_root *fq_root;
110 u8 rate_enable;
111 u8 fq_trees_log;
112
113 u32 flows;
114 u32 inactive_flows;
115 u32 throttled_flows;
116
117 u64 stat_gc_flows;
118 u64 stat_internal_packets;
119 u64 stat_throttled;
120 u64 stat_ce_mark;
121 u64 stat_flows_plimit;
122 u64 stat_pkts_too_long;
123 u64 stat_allocation_errors;
124 struct qdisc_watchdog watchdog;
125 };
126
127 /* special value to mark a detached flow (not on old/new list) */
128 static struct fq_flow detached, throttled;
129
130 static void fq_flow_set_detached(struct fq_flow *f)
131 {
132 f->next = &detached;
133 f->age = jiffies;
134 }
135
136 static bool fq_flow_is_detached(const struct fq_flow *f)
137 {
138 return f->next == &detached;
139 }
140
141 static bool fq_flow_is_throttled(const struct fq_flow *f)
142 {
143 return f->next == &throttled;
144 }
145
146 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
147 {
148 if (head->first)
149 head->last->next = flow;
150 else
151 head->first = flow;
152 head->last = flow;
153 flow->next = NULL;
154 }
155
156 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
157 {
158 rb_erase(&f->rate_node, &q->delayed);
159 q->throttled_flows--;
160 fq_flow_add_tail(&q->old_flows, f);
161 }
162
163 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
164 {
165 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
166
167 while (*p) {
168 struct fq_flow *aux;
169
170 parent = *p;
171 aux = rb_entry(parent, struct fq_flow, rate_node);
172 if (f->time_next_packet >= aux->time_next_packet)
173 p = &parent->rb_right;
174 else
175 p = &parent->rb_left;
176 }
177 rb_link_node(&f->rate_node, parent, p);
178 rb_insert_color(&f->rate_node, &q->delayed);
179 q->throttled_flows++;
180 q->stat_throttled++;
181
182 f->next = &throttled;
183 if (q->time_next_delayed_flow > f->time_next_packet)
184 q->time_next_delayed_flow = f->time_next_packet;
185 }
186
187
188 static struct kmem_cache *fq_flow_cachep __read_mostly;
189
190
191 /* limit number of collected flows per round */
192 #define FQ_GC_MAX 8
193 #define FQ_GC_AGE (3*HZ)
194
195 static bool fq_gc_candidate(const struct fq_flow *f)
196 {
197 return fq_flow_is_detached(f) &&
198 time_after(jiffies, f->age + FQ_GC_AGE);
199 }
200
201 static void fq_gc(struct fq_sched_data *q,
202 struct rb_root *root,
203 struct sock *sk)
204 {
205 struct fq_flow *f, *tofree[FQ_GC_MAX];
206 struct rb_node **p, *parent;
207 int fcnt = 0;
208
209 p = &root->rb_node;
210 parent = NULL;
211 while (*p) {
212 parent = *p;
213
214 f = rb_entry(parent, struct fq_flow, fq_node);
215 if (f->sk == sk)
216 break;
217
218 if (fq_gc_candidate(f)) {
219 tofree[fcnt++] = f;
220 if (fcnt == FQ_GC_MAX)
221 break;
222 }
223
224 if (f->sk > sk)
225 p = &parent->rb_right;
226 else
227 p = &parent->rb_left;
228 }
229
230 q->flows -= fcnt;
231 q->inactive_flows -= fcnt;
232 q->stat_gc_flows += fcnt;
233 while (fcnt) {
234 struct fq_flow *f = tofree[--fcnt];
235
236 rb_erase(&f->fq_node, root);
237 kmem_cache_free(fq_flow_cachep, f);
238 }
239 }
240
241 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
242 {
243 struct rb_node **p, *parent;
244 struct sock *sk = skb->sk;
245 struct rb_root *root;
246 struct fq_flow *f;
247
248 /* warning: no starvation prevention... */
249 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
250 return &q->internal;
251
252 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
253 * or a listener (SYNCOOKIE mode)
254 * 1) request sockets are not full blown,
255 * they do not contain sk_pacing_rate
256 * 2) They are not part of a 'flow' yet
257 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
258 * especially if the listener set SO_MAX_PACING_RATE
259 * 4) We pretend they are orphaned
260 */
261 if (!sk || sk_listener(sk)) {
262 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
263
264 /* By forcing low order bit to 1, we make sure to not
265 * collide with a local flow (socket pointers are word aligned)
266 */
267 sk = (struct sock *)((hash << 1) | 1UL);
268 skb_orphan(skb);
269 } else if (sk->sk_state == TCP_CLOSE) {
270 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
271 /*
272 * Sockets in TCP_CLOSE are non connected.
273 * Typical use case is UDP sockets, they can send packets
274 * with sendto() to many different destinations.
275 * We probably could use a generic bit advertising
276 * non connected sockets, instead of sk_state == TCP_CLOSE,
277 * if we care enough.
278 */
279 sk = (struct sock *)((hash << 1) | 1UL);
280 }
281
282 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
283
284 if (q->flows >= (2U << q->fq_trees_log) &&
285 q->inactive_flows > q->flows/2)
286 fq_gc(q, root, sk);
287
288 p = &root->rb_node;
289 parent = NULL;
290 while (*p) {
291 parent = *p;
292
293 f = rb_entry(parent, struct fq_flow, fq_node);
294 if (f->sk == sk) {
295 /* socket might have been reallocated, so check
296 * if its sk_hash is the same.
297 * It not, we need to refill credit with
298 * initial quantum
299 */
300 if (unlikely(skb->sk == sk &&
301 f->socket_hash != sk->sk_hash)) {
302 f->credit = q->initial_quantum;
303 f->socket_hash = sk->sk_hash;
304 if (q->rate_enable)
305 smp_store_release(&sk->sk_pacing_status,
306 SK_PACING_FQ);
307 if (fq_flow_is_throttled(f))
308 fq_flow_unset_throttled(q, f);
309 f->time_next_packet = 0ULL;
310 }
311 return f;
312 }
313 if (f->sk > sk)
314 p = &parent->rb_right;
315 else
316 p = &parent->rb_left;
317 }
318
319 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
320 if (unlikely(!f)) {
321 q->stat_allocation_errors++;
322 return &q->internal;
323 }
324 /* f->t_root is already zeroed after kmem_cache_zalloc() */
325
326 fq_flow_set_detached(f);
327 f->sk = sk;
328 if (skb->sk == sk) {
329 f->socket_hash = sk->sk_hash;
330 if (q->rate_enable)
331 smp_store_release(&sk->sk_pacing_status,
332 SK_PACING_FQ);
333 }
334 f->credit = q->initial_quantum;
335
336 rb_link_node(&f->fq_node, parent, p);
337 rb_insert_color(&f->fq_node, root);
338
339 q->flows++;
340 q->inactive_flows++;
341 return f;
342 }
343
344 static struct sk_buff *fq_peek(struct fq_flow *flow)
345 {
346 struct sk_buff *skb = skb_rb_first(&flow->t_root);
347 struct sk_buff *head = flow->head;
348
349 if (!skb)
350 return head;
351
352 if (!head)
353 return skb;
354
355 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
356 return skb;
357 return head;
358 }
359
360 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
361 struct sk_buff *skb)
362 {
363 if (skb == flow->head) {
364 flow->head = skb->next;
365 } else {
366 rb_erase(&skb->rbnode, &flow->t_root);
367 skb->dev = qdisc_dev(sch);
368 }
369 }
370
371 /* remove one skb from head of flow queue */
372 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
373 {
374 struct sk_buff *skb = fq_peek(flow);
375
376 if (skb) {
377 fq_erase_head(sch, flow, skb);
378 skb_mark_not_on_list(skb);
379 flow->qlen--;
380 qdisc_qstats_backlog_dec(sch, skb);
381 sch->q.qlen--;
382 }
383 return skb;
384 }
385
386 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
387 {
388 struct rb_node **p, *parent;
389 struct sk_buff *head, *aux;
390
391 fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns();
392
393 head = flow->head;
394 if (!head ||
395 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
396 if (!head)
397 flow->head = skb;
398 else
399 flow->tail->next = skb;
400 flow->tail = skb;
401 skb->next = NULL;
402 return;
403 }
404
405 p = &flow->t_root.rb_node;
406 parent = NULL;
407
408 while (*p) {
409 parent = *p;
410 aux = rb_to_skb(parent);
411 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
412 p = &parent->rb_right;
413 else
414 p = &parent->rb_left;
415 }
416 rb_link_node(&skb->rbnode, parent, p);
417 rb_insert_color(&skb->rbnode, &flow->t_root);
418 }
419
420 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
421 struct sk_buff **to_free)
422 {
423 struct fq_sched_data *q = qdisc_priv(sch);
424 struct fq_flow *f;
425
426 if (unlikely(sch->q.qlen >= sch->limit))
427 return qdisc_drop(skb, sch, to_free);
428
429 f = fq_classify(skb, q);
430 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
431 q->stat_flows_plimit++;
432 return qdisc_drop(skb, sch, to_free);
433 }
434
435 f->qlen++;
436 qdisc_qstats_backlog_inc(sch, skb);
437 if (fq_flow_is_detached(f)) {
438 fq_flow_add_tail(&q->new_flows, f);
439 if (time_after(jiffies, f->age + q->flow_refill_delay))
440 f->credit = max_t(u32, f->credit, q->quantum);
441 q->inactive_flows--;
442 }
443
444 /* Note: this overwrites f->age */
445 flow_queue_add(f, skb);
446
447 if (unlikely(f == &q->internal)) {
448 q->stat_internal_packets++;
449 }
450 sch->q.qlen++;
451
452 return NET_XMIT_SUCCESS;
453 }
454
455 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
456 {
457 unsigned long sample;
458 struct rb_node *p;
459
460 if (q->time_next_delayed_flow > now)
461 return;
462
463 /* Update unthrottle latency EWMA.
464 * This is cheap and can help diagnosing timer/latency problems.
465 */
466 sample = (unsigned long)(now - q->time_next_delayed_flow);
467 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
468 q->unthrottle_latency_ns += sample >> 3;
469
470 q->time_next_delayed_flow = ~0ULL;
471 while ((p = rb_first(&q->delayed)) != NULL) {
472 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
473
474 if (f->time_next_packet > now) {
475 q->time_next_delayed_flow = f->time_next_packet;
476 break;
477 }
478 fq_flow_unset_throttled(q, f);
479 }
480 }
481
482 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
483 {
484 struct fq_sched_data *q = qdisc_priv(sch);
485 struct fq_flow_head *head;
486 struct sk_buff *skb;
487 struct fq_flow *f;
488 unsigned long rate;
489 u32 plen;
490 u64 now;
491
492 if (!sch->q.qlen)
493 return NULL;
494
495 skb = fq_dequeue_head(sch, &q->internal);
496 if (skb)
497 goto out;
498
499 now = ktime_get_ns();
500 fq_check_throttled(q, now);
501 begin:
502 head = &q->new_flows;
503 if (!head->first) {
504 head = &q->old_flows;
505 if (!head->first) {
506 if (q->time_next_delayed_flow != ~0ULL)
507 qdisc_watchdog_schedule_ns(&q->watchdog,
508 q->time_next_delayed_flow);
509 return NULL;
510 }
511 }
512 f = head->first;
513
514 if (f->credit <= 0) {
515 f->credit += q->quantum;
516 head->first = f->next;
517 fq_flow_add_tail(&q->old_flows, f);
518 goto begin;
519 }
520
521 skb = fq_peek(f);
522 if (skb) {
523 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
524 f->time_next_packet);
525
526 if (now < time_next_packet) {
527 head->first = f->next;
528 f->time_next_packet = time_next_packet;
529 fq_flow_set_throttled(q, f);
530 goto begin;
531 }
532 if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
533 INET_ECN_set_ce(skb);
534 q->stat_ce_mark++;
535 }
536 }
537
538 skb = fq_dequeue_head(sch, f);
539 if (!skb) {
540 head->first = f->next;
541 /* force a pass through old_flows to prevent starvation */
542 if ((head == &q->new_flows) && q->old_flows.first) {
543 fq_flow_add_tail(&q->old_flows, f);
544 } else {
545 fq_flow_set_detached(f);
546 q->inactive_flows++;
547 }
548 goto begin;
549 }
550 prefetch(&skb->end);
551 plen = qdisc_pkt_len(skb);
552 f->credit -= plen;
553
554 if (!q->rate_enable)
555 goto out;
556
557 rate = q->flow_max_rate;
558
559 /* If EDT time was provided for this skb, we need to
560 * update f->time_next_packet only if this qdisc enforces
561 * a flow max rate.
562 */
563 if (!skb->tstamp) {
564 if (skb->sk)
565 rate = min(skb->sk->sk_pacing_rate, rate);
566
567 if (rate <= q->low_rate_threshold) {
568 f->credit = 0;
569 } else {
570 plen = max(plen, q->quantum);
571 if (f->credit > 0)
572 goto out;
573 }
574 }
575 if (rate != ~0UL) {
576 u64 len = (u64)plen * NSEC_PER_SEC;
577
578 if (likely(rate))
579 len = div64_ul(len, rate);
580 /* Since socket rate can change later,
581 * clamp the delay to 1 second.
582 * Really, providers of too big packets should be fixed !
583 */
584 if (unlikely(len > NSEC_PER_SEC)) {
585 len = NSEC_PER_SEC;
586 q->stat_pkts_too_long++;
587 }
588 /* Account for schedule/timers drifts.
589 * f->time_next_packet was set when prior packet was sent,
590 * and current time (@now) can be too late by tens of us.
591 */
592 if (f->time_next_packet)
593 len -= min(len/2, now - f->time_next_packet);
594 f->time_next_packet = now + len;
595 }
596 out:
597 qdisc_bstats_update(sch, skb);
598 return skb;
599 }
600
601 static void fq_flow_purge(struct fq_flow *flow)
602 {
603 struct rb_node *p = rb_first(&flow->t_root);
604
605 while (p) {
606 struct sk_buff *skb = rb_to_skb(p);
607
608 p = rb_next(p);
609 rb_erase(&skb->rbnode, &flow->t_root);
610 rtnl_kfree_skbs(skb, skb);
611 }
612 rtnl_kfree_skbs(flow->head, flow->tail);
613 flow->head = NULL;
614 flow->qlen = 0;
615 }
616
617 static void fq_reset(struct Qdisc *sch)
618 {
619 struct fq_sched_data *q = qdisc_priv(sch);
620 struct rb_root *root;
621 struct rb_node *p;
622 struct fq_flow *f;
623 unsigned int idx;
624
625 sch->q.qlen = 0;
626 sch->qstats.backlog = 0;
627
628 fq_flow_purge(&q->internal);
629
630 if (!q->fq_root)
631 return;
632
633 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
634 root = &q->fq_root[idx];
635 while ((p = rb_first(root)) != NULL) {
636 f = rb_entry(p, struct fq_flow, fq_node);
637 rb_erase(p, root);
638
639 fq_flow_purge(f);
640
641 kmem_cache_free(fq_flow_cachep, f);
642 }
643 }
644 q->new_flows.first = NULL;
645 q->old_flows.first = NULL;
646 q->delayed = RB_ROOT;
647 q->flows = 0;
648 q->inactive_flows = 0;
649 q->throttled_flows = 0;
650 }
651
652 static void fq_rehash(struct fq_sched_data *q,
653 struct rb_root *old_array, u32 old_log,
654 struct rb_root *new_array, u32 new_log)
655 {
656 struct rb_node *op, **np, *parent;
657 struct rb_root *oroot, *nroot;
658 struct fq_flow *of, *nf;
659 int fcnt = 0;
660 u32 idx;
661
662 for (idx = 0; idx < (1U << old_log); idx++) {
663 oroot = &old_array[idx];
664 while ((op = rb_first(oroot)) != NULL) {
665 rb_erase(op, oroot);
666 of = rb_entry(op, struct fq_flow, fq_node);
667 if (fq_gc_candidate(of)) {
668 fcnt++;
669 kmem_cache_free(fq_flow_cachep, of);
670 continue;
671 }
672 nroot = &new_array[hash_ptr(of->sk, new_log)];
673
674 np = &nroot->rb_node;
675 parent = NULL;
676 while (*np) {
677 parent = *np;
678
679 nf = rb_entry(parent, struct fq_flow, fq_node);
680 BUG_ON(nf->sk == of->sk);
681
682 if (nf->sk > of->sk)
683 np = &parent->rb_right;
684 else
685 np = &parent->rb_left;
686 }
687
688 rb_link_node(&of->fq_node, parent, np);
689 rb_insert_color(&of->fq_node, nroot);
690 }
691 }
692 q->flows -= fcnt;
693 q->inactive_flows -= fcnt;
694 q->stat_gc_flows += fcnt;
695 }
696
697 static void fq_free(void *addr)
698 {
699 kvfree(addr);
700 }
701
702 static int fq_resize(struct Qdisc *sch, u32 log)
703 {
704 struct fq_sched_data *q = qdisc_priv(sch);
705 struct rb_root *array;
706 void *old_fq_root;
707 u32 idx;
708
709 if (q->fq_root && log == q->fq_trees_log)
710 return 0;
711
712 /* If XPS was setup, we can allocate memory on right NUMA node */
713 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
714 netdev_queue_numa_node_read(sch->dev_queue));
715 if (!array)
716 return -ENOMEM;
717
718 for (idx = 0; idx < (1U << log); idx++)
719 array[idx] = RB_ROOT;
720
721 sch_tree_lock(sch);
722
723 old_fq_root = q->fq_root;
724 if (old_fq_root)
725 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
726
727 q->fq_root = array;
728 q->fq_trees_log = log;
729
730 sch_tree_unlock(sch);
731
732 fq_free(old_fq_root);
733
734 return 0;
735 }
736
737 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
738 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
739 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
740 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
741 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
742 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
743 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
744 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
745 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
746 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
747 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
748 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
749 };
750
751 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
752 struct netlink_ext_ack *extack)
753 {
754 struct fq_sched_data *q = qdisc_priv(sch);
755 struct nlattr *tb[TCA_FQ_MAX + 1];
756 int err, drop_count = 0;
757 unsigned drop_len = 0;
758 u32 fq_log;
759
760 if (!opt)
761 return -EINVAL;
762
763 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
764 NULL);
765 if (err < 0)
766 return err;
767
768 sch_tree_lock(sch);
769
770 fq_log = q->fq_trees_log;
771
772 if (tb[TCA_FQ_BUCKETS_LOG]) {
773 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
774
775 if (nval >= 1 && nval <= ilog2(256*1024))
776 fq_log = nval;
777 else
778 err = -EINVAL;
779 }
780 if (tb[TCA_FQ_PLIMIT])
781 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
782
783 if (tb[TCA_FQ_FLOW_PLIMIT])
784 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
785
786 if (tb[TCA_FQ_QUANTUM]) {
787 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
788
789 if (quantum > 0 && quantum <= (1 << 20)) {
790 q->quantum = quantum;
791 } else {
792 NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
793 err = -EINVAL;
794 }
795 }
796
797 if (tb[TCA_FQ_INITIAL_QUANTUM])
798 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
799
800 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
801 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
802 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
803
804 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
805 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
806
807 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
808 }
809 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
810 q->low_rate_threshold =
811 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
812
813 if (tb[TCA_FQ_RATE_ENABLE]) {
814 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
815
816 if (enable <= 1)
817 q->rate_enable = enable;
818 else
819 err = -EINVAL;
820 }
821
822 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
823 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
824
825 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
826 }
827
828 if (tb[TCA_FQ_ORPHAN_MASK])
829 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
830
831 if (tb[TCA_FQ_CE_THRESHOLD])
832 q->ce_threshold = (u64)NSEC_PER_USEC *
833 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
834
835 if (!err) {
836 sch_tree_unlock(sch);
837 err = fq_resize(sch, fq_log);
838 sch_tree_lock(sch);
839 }
840 while (sch->q.qlen > sch->limit) {
841 struct sk_buff *skb = fq_dequeue(sch);
842
843 if (!skb)
844 break;
845 drop_len += qdisc_pkt_len(skb);
846 rtnl_kfree_skbs(skb, skb);
847 drop_count++;
848 }
849 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
850
851 sch_tree_unlock(sch);
852 return err;
853 }
854
855 static void fq_destroy(struct Qdisc *sch)
856 {
857 struct fq_sched_data *q = qdisc_priv(sch);
858
859 fq_reset(sch);
860 fq_free(q->fq_root);
861 qdisc_watchdog_cancel(&q->watchdog);
862 }
863
864 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
865 struct netlink_ext_ack *extack)
866 {
867 struct fq_sched_data *q = qdisc_priv(sch);
868 int err;
869
870 sch->limit = 10000;
871 q->flow_plimit = 100;
872 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
873 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
874 q->flow_refill_delay = msecs_to_jiffies(40);
875 q->flow_max_rate = ~0UL;
876 q->time_next_delayed_flow = ~0ULL;
877 q->rate_enable = 1;
878 q->new_flows.first = NULL;
879 q->old_flows.first = NULL;
880 q->delayed = RB_ROOT;
881 q->fq_root = NULL;
882 q->fq_trees_log = ilog2(1024);
883 q->orphan_mask = 1024 - 1;
884 q->low_rate_threshold = 550000 / 8;
885
886 /* Default ce_threshold of 4294 seconds */
887 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
888
889 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
890
891 if (opt)
892 err = fq_change(sch, opt, extack);
893 else
894 err = fq_resize(sch, q->fq_trees_log);
895
896 return err;
897 }
898
899 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
900 {
901 struct fq_sched_data *q = qdisc_priv(sch);
902 u64 ce_threshold = q->ce_threshold;
903 struct nlattr *opts;
904
905 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
906 if (opts == NULL)
907 goto nla_put_failure;
908
909 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
910
911 do_div(ce_threshold, NSEC_PER_USEC);
912
913 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
914 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
915 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
916 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
917 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
918 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
919 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
920 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
921 jiffies_to_usecs(q->flow_refill_delay)) ||
922 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
923 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
924 q->low_rate_threshold) ||
925 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
926 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
927 goto nla_put_failure;
928
929 return nla_nest_end(skb, opts);
930
931 nla_put_failure:
932 return -1;
933 }
934
935 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
936 {
937 struct fq_sched_data *q = qdisc_priv(sch);
938 struct tc_fq_qd_stats st;
939
940 sch_tree_lock(sch);
941
942 st.gc_flows = q->stat_gc_flows;
943 st.highprio_packets = q->stat_internal_packets;
944 st.tcp_retrans = 0;
945 st.throttled = q->stat_throttled;
946 st.flows_plimit = q->stat_flows_plimit;
947 st.pkts_too_long = q->stat_pkts_too_long;
948 st.allocation_errors = q->stat_allocation_errors;
949 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
950 st.flows = q->flows;
951 st.inactive_flows = q->inactive_flows;
952 st.throttled_flows = q->throttled_flows;
953 st.unthrottle_latency_ns = min_t(unsigned long,
954 q->unthrottle_latency_ns, ~0U);
955 st.ce_mark = q->stat_ce_mark;
956 sch_tree_unlock(sch);
957
958 return gnet_stats_copy_app(d, &st, sizeof(st));
959 }
960
961 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
962 .id = "fq",
963 .priv_size = sizeof(struct fq_sched_data),
964
965 .enqueue = fq_enqueue,
966 .dequeue = fq_dequeue,
967 .peek = qdisc_peek_dequeued,
968 .init = fq_init,
969 .reset = fq_reset,
970 .destroy = fq_destroy,
971 .change = fq_change,
972 .dump = fq_dump,
973 .dump_stats = fq_dump_stats,
974 .owner = THIS_MODULE,
975 };
976
977 static int __init fq_module_init(void)
978 {
979 int ret;
980
981 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
982 sizeof(struct fq_flow),
983 0, 0, NULL);
984 if (!fq_flow_cachep)
985 return -ENOMEM;
986
987 ret = register_qdisc(&fq_qdisc_ops);
988 if (ret)
989 kmem_cache_destroy(fq_flow_cachep);
990 return ret;
991 }
992
993 static void __exit fq_module_exit(void)
994 {
995 unregister_qdisc(&fq_qdisc_ops);
996 kmem_cache_destroy(fq_flow_cachep);
997 }
998
999 module_init(fq_module_init)
1000 module_exit(fq_module_exit)
1001 MODULE_AUTHOR("Eric Dumazet");
1002 MODULE_LICENSE("GPL");