2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
30 /* Stochastic Fairness Queuing algorithm.
31 =======================================
34 Paul E. McKenney "Stochastic Fairness Queuing",
35 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
37 Paul E. McKenney "Stochastic Fairness Queuing",
38 "Interworking: Research and Experience", v.2, 1991, p.113-131.
42 M. Shreedhar and George Varghese "Efficient Fair
43 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
46 This is not the thing that is usually called (W)FQ nowadays.
47 It does not use any timestamp mechanism, but instead
48 processes queues in round-robin order.
52 - It is very cheap. Both CPU and memory requirements are minimal.
56 - "Stochastic" -> It is not 100% fair.
57 When hash collisions occur, several flows are considered as one.
59 - "Round-robin" -> It introduces larger delays than virtual clock
60 based schemes, and should not be used for isolating interactive
61 traffic from non-interactive. It means, that this scheduler
62 should be used as leaf of CBQ or P3, which put interactive traffic
63 to higher priority band.
65 We still need true WFQ for top level CSZ, but using WFQ
66 for the best effort traffic is absolutely pointless:
67 SFQ is superior for this purpose.
70 This implementation limits :
71 - maximal queue length per flow to 127 packets.
74 - number of hash buckets to 65536.
76 It is easy to increase these values, but not in flight. */
78 #define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
79 #define SFQ_DEFAULT_FLOWS 128
80 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
81 #define SFQ_EMPTY_SLOT 0xffff
82 #define SFQ_DEFAULT_HASH_DIVISOR 1024
84 /* We use 16 bits to store allot, and want to handle packets up to 64K
85 * Scale allot by 8 (1<<3) so that no overflow occurs.
87 #define SFQ_ALLOT_SHIFT 3
88 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
90 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
91 typedef u16 sfq_index
;
94 * We dont use pointers to save space.
95 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
96 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
97 * are 'pointers' to dep[] array
105 struct sk_buff
*skblist_next
;
106 struct sk_buff
*skblist_prev
;
107 sfq_index qlen
; /* number of skbs in skblist */
108 sfq_index next
; /* next slot in sfq RR chain */
109 struct sfq_head dep
; /* anchor in dep[] chains */
110 unsigned short hash
; /* hash value (index in ht[]) */
111 short allot
; /* credit for this slot */
113 unsigned int backlog
;
114 struct red_vars vars
;
117 struct sfq_sched_data
{
118 /* frequently used fields */
119 int limit
; /* limit of total number of packets in this qdisc */
120 unsigned int divisor
; /* number of slots in hash table */
122 u8 maxdepth
; /* limit of packets per flow */
125 u8 cur_depth
; /* depth of longest slot */
127 unsigned short scaled_quantum
; /* SFQ_ALLOT_SIZE(quantum) */
128 struct tcf_proto __rcu
*filter_list
;
129 struct tcf_block
*block
;
130 sfq_index
*ht
; /* Hash table ('divisor' slots) */
131 struct sfq_slot
*slots
; /* Flows table ('maxflows' entries) */
133 struct red_parms
*red_parms
;
134 struct tc_sfqred_stats stats
;
135 struct sfq_slot
*tail
; /* current slot in round */
137 struct sfq_head dep
[SFQ_MAX_DEPTH
+ 1];
138 /* Linked lists of slots, indexed by depth
139 * dep[0] : list of unused flows
140 * dep[1] : list of flows with 1 packet
141 * dep[X] : list of flows with X packets
144 unsigned int maxflows
; /* number of flows in flows array */
146 unsigned int quantum
; /* Allotment per round: MUST BE >= MTU */
147 struct timer_list perturb_timer
;
151 * sfq_head are either in a sfq_slot or in dep[] array
153 static inline struct sfq_head
*sfq_dep_head(struct sfq_sched_data
*q
, sfq_index val
)
155 if (val
< SFQ_MAX_FLOWS
)
156 return &q
->slots
[val
].dep
;
157 return &q
->dep
[val
- SFQ_MAX_FLOWS
];
160 static unsigned int sfq_hash(const struct sfq_sched_data
*q
,
161 const struct sk_buff
*skb
)
163 return skb_get_hash_perturb(skb
, q
->perturbation
) & (q
->divisor
- 1);
166 static unsigned int sfq_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
169 struct sfq_sched_data
*q
= qdisc_priv(sch
);
170 struct tcf_result res
;
171 struct tcf_proto
*fl
;
174 if (TC_H_MAJ(skb
->priority
) == sch
->handle
&&
175 TC_H_MIN(skb
->priority
) > 0 &&
176 TC_H_MIN(skb
->priority
) <= q
->divisor
)
177 return TC_H_MIN(skb
->priority
);
179 fl
= rcu_dereference_bh(q
->filter_list
);
181 return sfq_hash(q
, skb
) + 1;
183 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
184 result
= tcf_classify(skb
, fl
, &res
, false);
186 #ifdef CONFIG_NET_CLS_ACT
191 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
196 if (TC_H_MIN(res
.classid
) <= q
->divisor
)
197 return TC_H_MIN(res
.classid
);
203 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
205 static inline void sfq_link(struct sfq_sched_data
*q
, sfq_index x
)
208 struct sfq_slot
*slot
= &q
->slots
[x
];
209 int qlen
= slot
->qlen
;
211 p
= qlen
+ SFQ_MAX_FLOWS
;
212 n
= q
->dep
[qlen
].next
;
217 q
->dep
[qlen
].next
= x
; /* sfq_dep_head(q, p)->next = x */
218 sfq_dep_head(q
, n
)->prev
= x
;
221 #define sfq_unlink(q, x, n, p) \
223 n = q->slots[x].dep.next; \
224 p = q->slots[x].dep.prev; \
225 sfq_dep_head(q, p)->next = n; \
226 sfq_dep_head(q, n)->prev = p; \
230 static inline void sfq_dec(struct sfq_sched_data
*q
, sfq_index x
)
235 sfq_unlink(q
, x
, n
, p
);
237 d
= q
->slots
[x
].qlen
--;
238 if (n
== p
&& q
->cur_depth
== d
)
243 static inline void sfq_inc(struct sfq_sched_data
*q
, sfq_index x
)
248 sfq_unlink(q
, x
, n
, p
);
250 d
= ++q
->slots
[x
].qlen
;
251 if (q
->cur_depth
< d
)
256 /* helper functions : might be changed when/if skb use a standard list_head */
258 /* remove one skb from tail of slot queue */
259 static inline struct sk_buff
*slot_dequeue_tail(struct sfq_slot
*slot
)
261 struct sk_buff
*skb
= slot
->skblist_prev
;
263 slot
->skblist_prev
= skb
->prev
;
264 skb
->prev
->next
= (struct sk_buff
*)slot
;
265 skb
->next
= skb
->prev
= NULL
;
269 /* remove one skb from head of slot queue */
270 static inline struct sk_buff
*slot_dequeue_head(struct sfq_slot
*slot
)
272 struct sk_buff
*skb
= slot
->skblist_next
;
274 slot
->skblist_next
= skb
->next
;
275 skb
->next
->prev
= (struct sk_buff
*)slot
;
276 skb
->next
= skb
->prev
= NULL
;
280 static inline void slot_queue_init(struct sfq_slot
*slot
)
282 memset(slot
, 0, sizeof(*slot
));
283 slot
->skblist_prev
= slot
->skblist_next
= (struct sk_buff
*)slot
;
286 /* add skb to slot queue (tail add) */
287 static inline void slot_queue_add(struct sfq_slot
*slot
, struct sk_buff
*skb
)
289 skb
->prev
= slot
->skblist_prev
;
290 skb
->next
= (struct sk_buff
*)slot
;
291 slot
->skblist_prev
->next
= skb
;
292 slot
->skblist_prev
= skb
;
295 static unsigned int sfq_drop(struct Qdisc
*sch
)
297 struct sfq_sched_data
*q
= qdisc_priv(sch
);
298 sfq_index x
, d
= q
->cur_depth
;
301 struct sfq_slot
*slot
;
303 /* Queue is full! Find the longest slot and drop tail packet from it */
308 skb
= q
->headdrop
? slot_dequeue_head(slot
) : slot_dequeue_tail(slot
);
309 len
= qdisc_pkt_len(skb
);
310 slot
->backlog
-= len
;
313 qdisc_qstats_drop(sch
);
314 qdisc_qstats_backlog_dec(sch
, skb
);
320 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
323 q
->tail
->next
= slot
->next
;
324 q
->ht
[slot
->hash
] = SFQ_EMPTY_SLOT
;
331 /* Is ECN parameter configured */
332 static int sfq_prob_mark(const struct sfq_sched_data
*q
)
334 return q
->flags
& TC_RED_ECN
;
337 /* Should packets over max threshold just be marked */
338 static int sfq_hard_mark(const struct sfq_sched_data
*q
)
340 return (q
->flags
& (TC_RED_ECN
| TC_RED_HARDDROP
)) == TC_RED_ECN
;
343 static int sfq_headdrop(const struct sfq_sched_data
*q
)
349 sfq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
, struct sk_buff
**to_free
)
351 struct sfq_sched_data
*q
= qdisc_priv(sch
);
352 unsigned int hash
, dropped
;
354 struct sfq_slot
*slot
;
355 int uninitialized_var(ret
);
356 struct sk_buff
*head
;
359 hash
= sfq_classify(skb
, sch
, &ret
);
361 if (ret
& __NET_XMIT_BYPASS
)
362 qdisc_qstats_drop(sch
);
370 if (x
== SFQ_EMPTY_SLOT
) {
371 x
= q
->dep
[0].next
; /* get a free slot */
372 if (x
>= SFQ_MAX_FLOWS
)
373 return qdisc_drop(skb
, sch
, to_free
);
377 slot
->backlog
= 0; /* should already be 0 anyway... */
378 red_set_vars(&slot
->vars
);
382 slot
->vars
.qavg
= red_calc_qavg_no_idle_time(q
->red_parms
,
385 switch (red_action(q
->red_parms
,
392 qdisc_qstats_overlimit(sch
);
393 if (sfq_prob_mark(q
)) {
394 /* We know we have at least one packet in queue */
395 if (sfq_headdrop(q
) &&
396 INET_ECN_set_ce(slot
->skblist_next
)) {
397 q
->stats
.prob_mark_head
++;
400 if (INET_ECN_set_ce(skb
)) {
401 q
->stats
.prob_mark
++;
405 q
->stats
.prob_drop
++;
406 goto congestion_drop
;
409 qdisc_qstats_overlimit(sch
);
410 if (sfq_hard_mark(q
)) {
411 /* We know we have at least one packet in queue */
412 if (sfq_headdrop(q
) &&
413 INET_ECN_set_ce(slot
->skblist_next
)) {
414 q
->stats
.forced_mark_head
++;
417 if (INET_ECN_set_ce(skb
)) {
418 q
->stats
.forced_mark
++;
422 q
->stats
.forced_drop
++;
423 goto congestion_drop
;
427 if (slot
->qlen
>= q
->maxdepth
) {
429 if (!sfq_headdrop(q
))
430 return qdisc_drop(skb
, sch
, to_free
);
432 /* We know we have at least one packet in queue */
433 head
= slot_dequeue_head(slot
);
434 delta
= qdisc_pkt_len(head
) - qdisc_pkt_len(skb
);
435 sch
->qstats
.backlog
-= delta
;
436 slot
->backlog
-= delta
;
437 qdisc_drop(head
, sch
, to_free
);
439 slot_queue_add(slot
, skb
);
440 qdisc_tree_reduce_backlog(sch
, 0, delta
);
445 qdisc_qstats_backlog_inc(sch
, skb
);
446 slot
->backlog
+= qdisc_pkt_len(skb
);
447 slot_queue_add(slot
, skb
);
449 if (slot
->qlen
== 1) { /* The flow is new */
450 if (q
->tail
== NULL
) { /* It is the first flow */
453 slot
->next
= q
->tail
->next
;
456 /* We put this flow at the end of our flow list.
457 * This might sound unfair for a new flow to wait after old ones,
458 * but we could endup servicing new flows only, and freeze old ones.
461 /* We could use a bigger initial quantum for new flows */
462 slot
->allot
= q
->scaled_quantum
;
464 if (++sch
->q
.qlen
<= q
->limit
)
465 return NET_XMIT_SUCCESS
;
468 dropped
= sfq_drop(sch
);
469 /* Return Congestion Notification only if we dropped a packet
472 if (qlen
!= slot
->qlen
) {
473 qdisc_tree_reduce_backlog(sch
, 0, dropped
- qdisc_pkt_len(skb
));
477 /* As we dropped a packet, better let upper stack know this */
478 qdisc_tree_reduce_backlog(sch
, 1, dropped
);
479 return NET_XMIT_SUCCESS
;
482 static struct sk_buff
*
483 sfq_dequeue(struct Qdisc
*sch
)
485 struct sfq_sched_data
*q
= qdisc_priv(sch
);
488 struct sfq_slot
*slot
;
490 /* No active slots */
497 if (slot
->allot
<= 0) {
499 slot
->allot
+= q
->scaled_quantum
;
502 skb
= slot_dequeue_head(slot
);
504 qdisc_bstats_update(sch
, skb
);
506 qdisc_qstats_backlog_dec(sch
, skb
);
507 slot
->backlog
-= qdisc_pkt_len(skb
);
508 /* Is the slot empty? */
509 if (slot
->qlen
== 0) {
510 q
->ht
[slot
->hash
] = SFQ_EMPTY_SLOT
;
513 q
->tail
= NULL
; /* no more active slots */
516 q
->tail
->next
= next_a
;
518 slot
->allot
-= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb
));
524 sfq_reset(struct Qdisc
*sch
)
528 while ((skb
= sfq_dequeue(sch
)) != NULL
)
529 rtnl_kfree_skbs(skb
, skb
);
533 * When q->perturbation is changed, we rehash all queued skbs
534 * to avoid OOO (Out Of Order) effects.
535 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
538 static void sfq_rehash(struct Qdisc
*sch
)
540 struct sfq_sched_data
*q
= qdisc_priv(sch
);
543 struct sfq_slot
*slot
;
544 struct sk_buff_head list
;
546 unsigned int drop_len
= 0;
548 __skb_queue_head_init(&list
);
550 for (i
= 0; i
< q
->maxflows
; i
++) {
555 skb
= slot_dequeue_head(slot
);
557 __skb_queue_tail(&list
, skb
);
560 red_set_vars(&slot
->vars
);
561 q
->ht
[slot
->hash
] = SFQ_EMPTY_SLOT
;
565 while ((skb
= __skb_dequeue(&list
)) != NULL
) {
566 unsigned int hash
= sfq_hash(q
, skb
);
567 sfq_index x
= q
->ht
[hash
];
570 if (x
== SFQ_EMPTY_SLOT
) {
571 x
= q
->dep
[0].next
; /* get a free slot */
572 if (x
>= SFQ_MAX_FLOWS
) {
574 qdisc_qstats_backlog_dec(sch
, skb
);
575 drop_len
+= qdisc_pkt_len(skb
);
584 if (slot
->qlen
>= q
->maxdepth
)
586 slot_queue_add(slot
, skb
);
588 slot
->vars
.qavg
= red_calc_qavg(q
->red_parms
,
591 slot
->backlog
+= qdisc_pkt_len(skb
);
593 if (slot
->qlen
== 1) { /* The flow is new */
594 if (q
->tail
== NULL
) { /* It is the first flow */
597 slot
->next
= q
->tail
->next
;
601 slot
->allot
= q
->scaled_quantum
;
604 sch
->q
.qlen
-= dropped
;
605 qdisc_tree_reduce_backlog(sch
, dropped
, drop_len
);
608 static void sfq_perturbation(unsigned long arg
)
610 struct Qdisc
*sch
= (struct Qdisc
*)arg
;
611 struct sfq_sched_data
*q
= qdisc_priv(sch
);
612 spinlock_t
*root_lock
= qdisc_lock(qdisc_root_sleeping(sch
));
614 spin_lock(root_lock
);
615 q
->perturbation
= prandom_u32();
616 if (!q
->filter_list
&& q
->tail
)
618 spin_unlock(root_lock
);
620 if (q
->perturb_period
)
621 mod_timer(&q
->perturb_timer
, jiffies
+ q
->perturb_period
);
624 static int sfq_change(struct Qdisc
*sch
, struct nlattr
*opt
)
626 struct sfq_sched_data
*q
= qdisc_priv(sch
);
627 struct tc_sfq_qopt
*ctl
= nla_data(opt
);
628 struct tc_sfq_qopt_v1
*ctl_v1
= NULL
;
629 unsigned int qlen
, dropped
= 0;
630 struct red_parms
*p
= NULL
;
632 if (opt
->nla_len
< nla_attr_size(sizeof(*ctl
)))
634 if (opt
->nla_len
>= nla_attr_size(sizeof(*ctl_v1
)))
635 ctl_v1
= nla_data(opt
);
637 (!is_power_of_2(ctl
->divisor
) || ctl
->divisor
> 65536))
639 if (ctl_v1
&& ctl_v1
->qth_min
) {
640 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
646 q
->quantum
= ctl
->quantum
;
647 q
->scaled_quantum
= SFQ_ALLOT_SIZE(q
->quantum
);
649 q
->perturb_period
= ctl
->perturb_period
* HZ
;
651 q
->maxflows
= min_t(u32
, ctl
->flows
, SFQ_MAX_FLOWS
);
653 q
->divisor
= ctl
->divisor
;
654 q
->maxflows
= min_t(u32
, q
->maxflows
, q
->divisor
);
658 q
->maxdepth
= min_t(u32
, ctl_v1
->depth
, SFQ_MAX_DEPTH
);
660 swap(q
->red_parms
, p
);
661 red_set_parms(q
->red_parms
,
662 ctl_v1
->qth_min
, ctl_v1
->qth_max
,
664 ctl_v1
->Plog
, ctl_v1
->Scell_log
,
668 q
->flags
= ctl_v1
->flags
;
669 q
->headdrop
= ctl_v1
->headdrop
;
672 q
->limit
= min_t(u32
, ctl
->limit
, q
->maxdepth
* q
->maxflows
);
673 q
->maxflows
= min_t(u32
, q
->maxflows
, q
->limit
);
677 while (sch
->q
.qlen
> q
->limit
)
678 dropped
+= sfq_drop(sch
);
679 qdisc_tree_reduce_backlog(sch
, qlen
- sch
->q
.qlen
, dropped
);
681 del_timer(&q
->perturb_timer
);
682 if (q
->perturb_period
) {
683 mod_timer(&q
->perturb_timer
, jiffies
+ q
->perturb_period
);
684 q
->perturbation
= prandom_u32();
686 sch_tree_unlock(sch
);
691 static void *sfq_alloc(size_t sz
)
693 return kvmalloc(sz
, GFP_KERNEL
);
696 static void sfq_free(void *addr
)
701 static void sfq_destroy(struct Qdisc
*sch
)
703 struct sfq_sched_data
*q
= qdisc_priv(sch
);
705 tcf_block_put(q
->block
);
706 q
->perturb_period
= 0;
707 del_timer_sync(&q
->perturb_timer
);
713 static int sfq_init(struct Qdisc
*sch
, struct nlattr
*opt
)
715 struct sfq_sched_data
*q
= qdisc_priv(sch
);
719 setup_deferrable_timer(&q
->perturb_timer
, sfq_perturbation
,
722 err
= tcf_block_get(&q
->block
, &q
->filter_list
);
726 for (i
= 0; i
< SFQ_MAX_DEPTH
+ 1; i
++) {
727 q
->dep
[i
].next
= i
+ SFQ_MAX_FLOWS
;
728 q
->dep
[i
].prev
= i
+ SFQ_MAX_FLOWS
;
731 q
->limit
= SFQ_MAX_DEPTH
;
732 q
->maxdepth
= SFQ_MAX_DEPTH
;
735 q
->divisor
= SFQ_DEFAULT_HASH_DIVISOR
;
736 q
->maxflows
= SFQ_DEFAULT_FLOWS
;
737 q
->quantum
= psched_mtu(qdisc_dev(sch
));
738 q
->scaled_quantum
= SFQ_ALLOT_SIZE(q
->quantum
);
739 q
->perturb_period
= 0;
740 q
->perturbation
= prandom_u32();
743 int err
= sfq_change(sch
, opt
);
748 q
->ht
= sfq_alloc(sizeof(q
->ht
[0]) * q
->divisor
);
749 q
->slots
= sfq_alloc(sizeof(q
->slots
[0]) * q
->maxflows
);
750 if (!q
->ht
|| !q
->slots
) {
751 /* Note: sfq_destroy() will be called by our caller */
755 for (i
= 0; i
< q
->divisor
; i
++)
756 q
->ht
[i
] = SFQ_EMPTY_SLOT
;
758 for (i
= 0; i
< q
->maxflows
; i
++) {
759 slot_queue_init(&q
->slots
[i
]);
763 sch
->flags
|= TCQ_F_CAN_BYPASS
;
765 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
769 static int sfq_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
771 struct sfq_sched_data
*q
= qdisc_priv(sch
);
772 unsigned char *b
= skb_tail_pointer(skb
);
773 struct tc_sfq_qopt_v1 opt
;
774 struct red_parms
*p
= q
->red_parms
;
776 memset(&opt
, 0, sizeof(opt
));
777 opt
.v0
.quantum
= q
->quantum
;
778 opt
.v0
.perturb_period
= q
->perturb_period
/ HZ
;
779 opt
.v0
.limit
= q
->limit
;
780 opt
.v0
.divisor
= q
->divisor
;
781 opt
.v0
.flows
= q
->maxflows
;
782 opt
.depth
= q
->maxdepth
;
783 opt
.headdrop
= q
->headdrop
;
786 opt
.qth_min
= p
->qth_min
>> p
->Wlog
;
787 opt
.qth_max
= p
->qth_max
>> p
->Wlog
;
790 opt
.Scell_log
= p
->Scell_log
;
791 opt
.max_P
= p
->max_P
;
793 memcpy(&opt
.stats
, &q
->stats
, sizeof(opt
.stats
));
794 opt
.flags
= q
->flags
;
796 if (nla_put(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
))
797 goto nla_put_failure
;
806 static struct Qdisc
*sfq_leaf(struct Qdisc
*sch
, unsigned long arg
)
811 static unsigned long sfq_get(struct Qdisc
*sch
, u32 classid
)
816 static unsigned long sfq_bind(struct Qdisc
*sch
, unsigned long parent
,
819 /* we cannot bypass queue discipline anymore */
820 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
824 static void sfq_put(struct Qdisc
*q
, unsigned long cl
)
828 static struct tcf_block
*sfq_tcf_block(struct Qdisc
*sch
, unsigned long cl
)
830 struct sfq_sched_data
*q
= qdisc_priv(sch
);
837 static int sfq_dump_class(struct Qdisc
*sch
, unsigned long cl
,
838 struct sk_buff
*skb
, struct tcmsg
*tcm
)
840 tcm
->tcm_handle
|= TC_H_MIN(cl
);
844 static int sfq_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
847 struct sfq_sched_data
*q
= qdisc_priv(sch
);
848 sfq_index idx
= q
->ht
[cl
- 1];
849 struct gnet_stats_queue qs
= { 0 };
850 struct tc_sfq_xstats xstats
= { 0 };
852 if (idx
!= SFQ_EMPTY_SLOT
) {
853 const struct sfq_slot
*slot
= &q
->slots
[idx
];
855 xstats
.allot
= slot
->allot
<< SFQ_ALLOT_SHIFT
;
856 qs
.qlen
= slot
->qlen
;
857 qs
.backlog
= slot
->backlog
;
859 if (gnet_stats_copy_queue(d
, NULL
, &qs
, qs
.qlen
) < 0)
861 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
864 static void sfq_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
866 struct sfq_sched_data
*q
= qdisc_priv(sch
);
872 for (i
= 0; i
< q
->divisor
; i
++) {
873 if (q
->ht
[i
] == SFQ_EMPTY_SLOT
||
874 arg
->count
< arg
->skip
) {
878 if (arg
->fn(sch
, i
+ 1, arg
) < 0) {
886 static const struct Qdisc_class_ops sfq_class_ops
= {
890 .tcf_block
= sfq_tcf_block
,
891 .bind_tcf
= sfq_bind
,
892 .unbind_tcf
= sfq_put
,
893 .dump
= sfq_dump_class
,
894 .dump_stats
= sfq_dump_class_stats
,
898 static struct Qdisc_ops sfq_qdisc_ops __read_mostly
= {
899 .cl_ops
= &sfq_class_ops
,
901 .priv_size
= sizeof(struct sfq_sched_data
),
902 .enqueue
= sfq_enqueue
,
903 .dequeue
= sfq_dequeue
,
904 .peek
= qdisc_peek_dequeued
,
907 .destroy
= sfq_destroy
,
910 .owner
= THIS_MODULE
,
913 static int __init
sfq_module_init(void)
915 return register_qdisc(&sfq_qdisc_ops
);
917 static void __exit
sfq_module_exit(void)
919 unregister_qdisc(&sfq_qdisc_ops
);
921 module_init(sfq_module_init
)
922 module_exit(sfq_module_exit
)
923 MODULE_LICENSE("GPL");