2 * Fair Queue CoDel discipline
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
27 #include <net/codel.h>
28 #include <net/codel_impl.h>
29 #include <net/codel_qdisc.h>
34 * Packets are classified (internal classifier or external) on flows.
35 * This is a Stochastic model (as we use a hash, several flows
36 * might be hashed on same slot)
37 * Each flow has a CoDel managed queue.
38 * Flows are linked onto two (Round Robin) lists,
39 * so that new flows have priority on old ones.
41 * For a given flow, packets are not reordered (CoDel uses a FIFO)
43 * ECN capability is on by default.
44 * Low memory footprint (64 bytes per flow)
47 struct fq_codel_flow
{
50 struct list_head flowchain
;
52 u32 dropped
; /* number of drops (or ECN marks) on this flow */
53 struct codel_vars cvars
;
54 }; /* please try to keep this structure <= 64 bytes */
56 struct fq_codel_sched_data
{
57 struct tcf_proto __rcu
*filter_list
; /* optional external classifier */
58 struct fq_codel_flow
*flows
; /* Flows table [flows_cnt] */
59 u32
*backlogs
; /* backlog table [flows_cnt] */
60 u32 flows_cnt
; /* number of flows */
61 u32 quantum
; /* psched_mtu(qdisc_dev(sch)); */
64 struct codel_params cparams
;
65 struct codel_stats cstats
;
71 struct list_head new_flows
; /* list of new flows */
72 struct list_head old_flows
; /* list of old flows */
75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data
*q
,
78 return reciprocal_scale(skb_get_hash(skb
), q
->flows_cnt
);
81 static unsigned int fq_codel_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
84 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
85 struct tcf_proto
*filter
;
86 struct tcf_result res
;
89 if (TC_H_MAJ(skb
->priority
) == sch
->handle
&&
90 TC_H_MIN(skb
->priority
) > 0 &&
91 TC_H_MIN(skb
->priority
) <= q
->flows_cnt
)
92 return TC_H_MIN(skb
->priority
);
94 filter
= rcu_dereference_bh(q
->filter_list
);
96 return fq_codel_hash(q
, skb
) + 1;
98 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
99 result
= tc_classify(skb
, filter
, &res
, false);
101 #ifdef CONFIG_NET_CLS_ACT
105 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
110 if (TC_H_MIN(res
.classid
) <= q
->flows_cnt
)
111 return TC_H_MIN(res
.classid
);
116 /* helper functions : might be changed when/if skb use a standard list_head */
118 /* remove one skb from head of slot queue */
119 static inline struct sk_buff
*dequeue_head(struct fq_codel_flow
*flow
)
121 struct sk_buff
*skb
= flow
->head
;
123 flow
->head
= skb
->next
;
128 /* add skb to flow queue (tail add) */
129 static inline void flow_queue_add(struct fq_codel_flow
*flow
,
132 if (flow
->head
== NULL
)
135 flow
->tail
->next
= skb
;
140 static unsigned int fq_codel_drop(struct Qdisc
*sch
, unsigned int max_packets
,
141 struct sk_buff
**to_free
)
143 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
145 unsigned int maxbacklog
= 0, idx
= 0, i
, len
;
146 struct fq_codel_flow
*flow
;
147 unsigned int threshold
;
148 unsigned int mem
= 0;
150 /* Queue is full! Find the fat flow and drop packet(s) from it.
151 * This might sound expensive, but with 1024 flows, we scan
152 * 4KB of memory, and we dont need to handle a complex tree
153 * in fast path (packet queue/enqueue) with many cache misses.
154 * In stress mode, we'll try to drop 64 packets from the flow,
155 * amortizing this linear lookup to one cache line per drop.
157 for (i
= 0; i
< q
->flows_cnt
; i
++) {
158 if (q
->backlogs
[i
] > maxbacklog
) {
159 maxbacklog
= q
->backlogs
[i
];
164 /* Our goal is to drop half of this fat flow backlog */
165 threshold
= maxbacklog
>> 1;
167 flow
= &q
->flows
[idx
];
171 skb
= dequeue_head(flow
);
172 len
+= qdisc_pkt_len(skb
);
173 mem
+= get_codel_cb(skb
)->mem_usage
;
174 __qdisc_drop(skb
, to_free
);
175 } while (++i
< max_packets
&& len
< threshold
);
178 q
->backlogs
[idx
] -= len
;
179 q
->memory_usage
-= mem
;
180 sch
->qstats
.drops
+= i
;
181 sch
->qstats
.backlog
-= len
;
186 static int fq_codel_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
187 struct sk_buff
**to_free
)
189 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
190 unsigned int idx
, prev_backlog
, prev_qlen
;
191 struct fq_codel_flow
*flow
;
192 int uninitialized_var(ret
);
193 unsigned int pkt_len
;
196 idx
= fq_codel_classify(skb
, sch
, &ret
);
198 if (ret
& __NET_XMIT_BYPASS
)
199 qdisc_qstats_drop(sch
);
200 __qdisc_drop(skb
, to_free
);
205 codel_set_enqueue_time(skb
);
206 flow
= &q
->flows
[idx
];
207 flow_queue_add(flow
, skb
);
208 q
->backlogs
[idx
] += qdisc_pkt_len(skb
);
209 qdisc_qstats_backlog_inc(sch
, skb
);
211 if (list_empty(&flow
->flowchain
)) {
212 list_add_tail(&flow
->flowchain
, &q
->new_flows
);
214 flow
->deficit
= q
->quantum
;
217 get_codel_cb(skb
)->mem_usage
= skb
->truesize
;
218 q
->memory_usage
+= get_codel_cb(skb
)->mem_usage
;
219 memory_limited
= q
->memory_usage
> q
->memory_limit
;
220 if (++sch
->q
.qlen
<= sch
->limit
&& !memory_limited
)
221 return NET_XMIT_SUCCESS
;
223 prev_backlog
= sch
->qstats
.backlog
;
224 prev_qlen
= sch
->q
.qlen
;
226 /* save this packet length as it might be dropped by fq_codel_drop() */
227 pkt_len
= qdisc_pkt_len(skb
);
228 /* fq_codel_drop() is quite expensive, as it performs a linear search
229 * in q->backlogs[] to find a fat flow.
230 * So instead of dropping a single packet, drop half of its backlog
231 * with a 64 packets limit to not add a too big cpu spike here.
233 ret
= fq_codel_drop(sch
, q
->drop_batch_size
, to_free
);
235 prev_qlen
-= sch
->q
.qlen
;
236 prev_backlog
-= sch
->qstats
.backlog
;
237 q
->drop_overlimit
+= prev_qlen
;
239 q
->drop_overmemory
+= prev_qlen
;
241 /* As we dropped packet(s), better let upper stack know this.
242 * If we dropped a packet for this flow, return NET_XMIT_CN,
243 * but in this case, our parents wont increase their backlogs.
246 qdisc_tree_reduce_backlog(sch
, prev_qlen
- 1,
247 prev_backlog
- pkt_len
);
250 qdisc_tree_reduce_backlog(sch
, prev_qlen
, prev_backlog
);
251 return NET_XMIT_SUCCESS
;
254 /* This is the specific function called from codel_dequeue()
255 * to dequeue a packet from queue. Note: backlog is handled in
256 * codel, we dont need to reduce it here.
258 static struct sk_buff
*dequeue_func(struct codel_vars
*vars
, void *ctx
)
260 struct Qdisc
*sch
= ctx
;
261 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
262 struct fq_codel_flow
*flow
;
263 struct sk_buff
*skb
= NULL
;
265 flow
= container_of(vars
, struct fq_codel_flow
, cvars
);
267 skb
= dequeue_head(flow
);
268 q
->backlogs
[flow
- q
->flows
] -= qdisc_pkt_len(skb
);
269 q
->memory_usage
-= get_codel_cb(skb
)->mem_usage
;
271 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
276 static void drop_func(struct sk_buff
*skb
, void *ctx
)
278 struct Qdisc
*sch
= ctx
;
281 qdisc_qstats_drop(sch
);
284 static struct sk_buff
*fq_codel_dequeue(struct Qdisc
*sch
)
286 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
288 struct fq_codel_flow
*flow
;
289 struct list_head
*head
;
290 u32 prev_drop_count
, prev_ecn_mark
;
291 unsigned int prev_backlog
;
294 head
= &q
->new_flows
;
295 if (list_empty(head
)) {
296 head
= &q
->old_flows
;
297 if (list_empty(head
))
300 flow
= list_first_entry(head
, struct fq_codel_flow
, flowchain
);
302 if (flow
->deficit
<= 0) {
303 flow
->deficit
+= q
->quantum
;
304 list_move_tail(&flow
->flowchain
, &q
->old_flows
);
308 prev_drop_count
= q
->cstats
.drop_count
;
309 prev_ecn_mark
= q
->cstats
.ecn_mark
;
310 prev_backlog
= sch
->qstats
.backlog
;
312 skb
= codel_dequeue(sch
, &sch
->qstats
.backlog
, &q
->cparams
,
313 &flow
->cvars
, &q
->cstats
, qdisc_pkt_len
,
314 codel_get_enqueue_time
, drop_func
, dequeue_func
);
316 flow
->dropped
+= q
->cstats
.drop_count
- prev_drop_count
;
317 flow
->dropped
+= q
->cstats
.ecn_mark
- prev_ecn_mark
;
320 /* force a pass through old_flows to prevent starvation */
321 if ((head
== &q
->new_flows
) && !list_empty(&q
->old_flows
))
322 list_move_tail(&flow
->flowchain
, &q
->old_flows
);
324 list_del_init(&flow
->flowchain
);
327 qdisc_bstats_update(sch
, skb
);
328 flow
->deficit
-= qdisc_pkt_len(skb
);
329 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
330 * or HTB crashes. Defer it for next round.
332 if (q
->cstats
.drop_count
&& sch
->q
.qlen
) {
333 qdisc_tree_reduce_backlog(sch
, q
->cstats
.drop_count
,
335 q
->cstats
.drop_count
= 0;
336 q
->cstats
.drop_len
= 0;
341 static void fq_codel_flow_purge(struct fq_codel_flow
*flow
)
343 rtnl_kfree_skbs(flow
->head
, flow
->tail
);
347 static void fq_codel_reset(struct Qdisc
*sch
)
349 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
352 INIT_LIST_HEAD(&q
->new_flows
);
353 INIT_LIST_HEAD(&q
->old_flows
);
354 for (i
= 0; i
< q
->flows_cnt
; i
++) {
355 struct fq_codel_flow
*flow
= q
->flows
+ i
;
357 fq_codel_flow_purge(flow
);
358 INIT_LIST_HEAD(&flow
->flowchain
);
359 codel_vars_init(&flow
->cvars
);
361 memset(q
->backlogs
, 0, q
->flows_cnt
* sizeof(u32
));
363 sch
->qstats
.backlog
= 0;
367 static const struct nla_policy fq_codel_policy
[TCA_FQ_CODEL_MAX
+ 1] = {
368 [TCA_FQ_CODEL_TARGET
] = { .type
= NLA_U32
},
369 [TCA_FQ_CODEL_LIMIT
] = { .type
= NLA_U32
},
370 [TCA_FQ_CODEL_INTERVAL
] = { .type
= NLA_U32
},
371 [TCA_FQ_CODEL_ECN
] = { .type
= NLA_U32
},
372 [TCA_FQ_CODEL_FLOWS
] = { .type
= NLA_U32
},
373 [TCA_FQ_CODEL_QUANTUM
] = { .type
= NLA_U32
},
374 [TCA_FQ_CODEL_CE_THRESHOLD
] = { .type
= NLA_U32
},
375 [TCA_FQ_CODEL_DROP_BATCH_SIZE
] = { .type
= NLA_U32
},
376 [TCA_FQ_CODEL_MEMORY_LIMIT
] = { .type
= NLA_U32
},
379 static int fq_codel_change(struct Qdisc
*sch
, struct nlattr
*opt
)
381 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
382 struct nlattr
*tb
[TCA_FQ_CODEL_MAX
+ 1];
388 err
= nla_parse_nested(tb
, TCA_FQ_CODEL_MAX
, opt
, fq_codel_policy
);
391 if (tb
[TCA_FQ_CODEL_FLOWS
]) {
394 q
->flows_cnt
= nla_get_u32(tb
[TCA_FQ_CODEL_FLOWS
]);
396 q
->flows_cnt
> 65536)
401 if (tb
[TCA_FQ_CODEL_TARGET
]) {
402 u64 target
= nla_get_u32(tb
[TCA_FQ_CODEL_TARGET
]);
404 q
->cparams
.target
= (target
* NSEC_PER_USEC
) >> CODEL_SHIFT
;
407 if (tb
[TCA_FQ_CODEL_CE_THRESHOLD
]) {
408 u64 val
= nla_get_u32(tb
[TCA_FQ_CODEL_CE_THRESHOLD
]);
410 q
->cparams
.ce_threshold
= (val
* NSEC_PER_USEC
) >> CODEL_SHIFT
;
413 if (tb
[TCA_FQ_CODEL_INTERVAL
]) {
414 u64 interval
= nla_get_u32(tb
[TCA_FQ_CODEL_INTERVAL
]);
416 q
->cparams
.interval
= (interval
* NSEC_PER_USEC
) >> CODEL_SHIFT
;
419 if (tb
[TCA_FQ_CODEL_LIMIT
])
420 sch
->limit
= nla_get_u32(tb
[TCA_FQ_CODEL_LIMIT
]);
422 if (tb
[TCA_FQ_CODEL_ECN
])
423 q
->cparams
.ecn
= !!nla_get_u32(tb
[TCA_FQ_CODEL_ECN
]);
425 if (tb
[TCA_FQ_CODEL_QUANTUM
])
426 q
->quantum
= max(256U, nla_get_u32(tb
[TCA_FQ_CODEL_QUANTUM
]));
428 if (tb
[TCA_FQ_CODEL_DROP_BATCH_SIZE
])
429 q
->drop_batch_size
= min(1U, nla_get_u32(tb
[TCA_FQ_CODEL_DROP_BATCH_SIZE
]));
431 if (tb
[TCA_FQ_CODEL_MEMORY_LIMIT
])
432 q
->memory_limit
= min(1U << 31, nla_get_u32(tb
[TCA_FQ_CODEL_MEMORY_LIMIT
]));
434 while (sch
->q
.qlen
> sch
->limit
||
435 q
->memory_usage
> q
->memory_limit
) {
436 struct sk_buff
*skb
= fq_codel_dequeue(sch
);
438 q
->cstats
.drop_len
+= qdisc_pkt_len(skb
);
439 rtnl_kfree_skbs(skb
, skb
);
440 q
->cstats
.drop_count
++;
442 qdisc_tree_reduce_backlog(sch
, q
->cstats
.drop_count
, q
->cstats
.drop_len
);
443 q
->cstats
.drop_count
= 0;
444 q
->cstats
.drop_len
= 0;
446 sch_tree_unlock(sch
);
450 static void *fq_codel_zalloc(size_t sz
)
452 void *ptr
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
);
459 static void fq_codel_free(void *addr
)
464 static void fq_codel_destroy(struct Qdisc
*sch
)
466 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
468 tcf_destroy_chain(&q
->filter_list
);
469 fq_codel_free(q
->backlogs
);
470 fq_codel_free(q
->flows
);
473 static int fq_codel_init(struct Qdisc
*sch
, struct nlattr
*opt
)
475 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
478 sch
->limit
= 10*1024;
480 q
->memory_limit
= 32 << 20; /* 32 MBytes */
481 q
->drop_batch_size
= 64;
482 q
->quantum
= psched_mtu(qdisc_dev(sch
));
483 INIT_LIST_HEAD(&q
->new_flows
);
484 INIT_LIST_HEAD(&q
->old_flows
);
485 codel_params_init(&q
->cparams
);
486 codel_stats_init(&q
->cstats
);
487 q
->cparams
.ecn
= true;
488 q
->cparams
.mtu
= psched_mtu(qdisc_dev(sch
));
491 int err
= fq_codel_change(sch
, opt
);
497 q
->flows
= fq_codel_zalloc(q
->flows_cnt
*
498 sizeof(struct fq_codel_flow
));
501 q
->backlogs
= fq_codel_zalloc(q
->flows_cnt
* sizeof(u32
));
503 fq_codel_free(q
->flows
);
506 for (i
= 0; i
< q
->flows_cnt
; i
++) {
507 struct fq_codel_flow
*flow
= q
->flows
+ i
;
509 INIT_LIST_HEAD(&flow
->flowchain
);
510 codel_vars_init(&flow
->cvars
);
514 sch
->flags
|= TCQ_F_CAN_BYPASS
;
516 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
520 static int fq_codel_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
522 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
525 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
527 goto nla_put_failure
;
529 if (nla_put_u32(skb
, TCA_FQ_CODEL_TARGET
,
530 codel_time_to_us(q
->cparams
.target
)) ||
531 nla_put_u32(skb
, TCA_FQ_CODEL_LIMIT
,
533 nla_put_u32(skb
, TCA_FQ_CODEL_INTERVAL
,
534 codel_time_to_us(q
->cparams
.interval
)) ||
535 nla_put_u32(skb
, TCA_FQ_CODEL_ECN
,
537 nla_put_u32(skb
, TCA_FQ_CODEL_QUANTUM
,
539 nla_put_u32(skb
, TCA_FQ_CODEL_DROP_BATCH_SIZE
,
540 q
->drop_batch_size
) ||
541 nla_put_u32(skb
, TCA_FQ_CODEL_MEMORY_LIMIT
,
543 nla_put_u32(skb
, TCA_FQ_CODEL_FLOWS
,
545 goto nla_put_failure
;
547 if (q
->cparams
.ce_threshold
!= CODEL_DISABLED_THRESHOLD
&&
548 nla_put_u32(skb
, TCA_FQ_CODEL_CE_THRESHOLD
,
549 codel_time_to_us(q
->cparams
.ce_threshold
)))
550 goto nla_put_failure
;
552 return nla_nest_end(skb
, opts
);
558 static int fq_codel_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
560 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
561 struct tc_fq_codel_xstats st
= {
562 .type
= TCA_FQ_CODEL_XSTATS_QDISC
,
564 struct list_head
*pos
;
566 st
.qdisc_stats
.maxpacket
= q
->cstats
.maxpacket
;
567 st
.qdisc_stats
.drop_overlimit
= q
->drop_overlimit
;
568 st
.qdisc_stats
.ecn_mark
= q
->cstats
.ecn_mark
;
569 st
.qdisc_stats
.new_flow_count
= q
->new_flow_count
;
570 st
.qdisc_stats
.ce_mark
= q
->cstats
.ce_mark
;
571 st
.qdisc_stats
.memory_usage
= q
->memory_usage
;
572 st
.qdisc_stats
.drop_overmemory
= q
->drop_overmemory
;
575 list_for_each(pos
, &q
->new_flows
)
576 st
.qdisc_stats
.new_flows_len
++;
578 list_for_each(pos
, &q
->old_flows
)
579 st
.qdisc_stats
.old_flows_len
++;
580 sch_tree_unlock(sch
);
582 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
585 static struct Qdisc
*fq_codel_leaf(struct Qdisc
*sch
, unsigned long arg
)
590 static unsigned long fq_codel_get(struct Qdisc
*sch
, u32 classid
)
595 static unsigned long fq_codel_bind(struct Qdisc
*sch
, unsigned long parent
,
598 /* we cannot bypass queue discipline anymore */
599 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
603 static void fq_codel_put(struct Qdisc
*q
, unsigned long cl
)
607 static struct tcf_proto __rcu
**fq_codel_find_tcf(struct Qdisc
*sch
,
610 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
614 return &q
->filter_list
;
617 static int fq_codel_dump_class(struct Qdisc
*sch
, unsigned long cl
,
618 struct sk_buff
*skb
, struct tcmsg
*tcm
)
620 tcm
->tcm_handle
|= TC_H_MIN(cl
);
624 static int fq_codel_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
627 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
629 struct gnet_stats_queue qs
= { 0 };
630 struct tc_fq_codel_xstats xstats
;
632 if (idx
< q
->flows_cnt
) {
633 const struct fq_codel_flow
*flow
= &q
->flows
[idx
];
634 const struct sk_buff
*skb
;
636 memset(&xstats
, 0, sizeof(xstats
));
637 xstats
.type
= TCA_FQ_CODEL_XSTATS_CLASS
;
638 xstats
.class_stats
.deficit
= flow
->deficit
;
639 xstats
.class_stats
.ldelay
=
640 codel_time_to_us(flow
->cvars
.ldelay
);
641 xstats
.class_stats
.count
= flow
->cvars
.count
;
642 xstats
.class_stats
.lastcount
= flow
->cvars
.lastcount
;
643 xstats
.class_stats
.dropping
= flow
->cvars
.dropping
;
644 if (flow
->cvars
.dropping
) {
645 codel_tdiff_t delta
= flow
->cvars
.drop_next
-
648 xstats
.class_stats
.drop_next
= (delta
>= 0) ?
649 codel_time_to_us(delta
) :
650 -codel_time_to_us(-delta
);
659 sch_tree_unlock(sch
);
661 qs
.backlog
= q
->backlogs
[idx
];
662 qs
.drops
= flow
->dropped
;
664 if (gnet_stats_copy_queue(d
, NULL
, &qs
, qs
.qlen
) < 0)
666 if (idx
< q
->flows_cnt
)
667 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
671 static void fq_codel_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
673 struct fq_codel_sched_data
*q
= qdisc_priv(sch
);
679 for (i
= 0; i
< q
->flows_cnt
; i
++) {
680 if (list_empty(&q
->flows
[i
].flowchain
) ||
681 arg
->count
< arg
->skip
) {
685 if (arg
->fn(sch
, i
+ 1, arg
) < 0) {
693 static const struct Qdisc_class_ops fq_codel_class_ops
= {
694 .leaf
= fq_codel_leaf
,
697 .tcf_chain
= fq_codel_find_tcf
,
698 .bind_tcf
= fq_codel_bind
,
699 .unbind_tcf
= fq_codel_put
,
700 .dump
= fq_codel_dump_class
,
701 .dump_stats
= fq_codel_dump_class_stats
,
702 .walk
= fq_codel_walk
,
705 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly
= {
706 .cl_ops
= &fq_codel_class_ops
,
708 .priv_size
= sizeof(struct fq_codel_sched_data
),
709 .enqueue
= fq_codel_enqueue
,
710 .dequeue
= fq_codel_dequeue
,
711 .peek
= qdisc_peek_dequeued
,
712 .init
= fq_codel_init
,
713 .reset
= fq_codel_reset
,
714 .destroy
= fq_codel_destroy
,
715 .change
= fq_codel_change
,
716 .dump
= fq_codel_dump
,
717 .dump_stats
= fq_codel_dump_stats
,
718 .owner
= THIS_MODULE
,
721 static int __init
fq_codel_module_init(void)
723 return register_qdisc(&fq_codel_qdisc_ops
);
726 static void __exit
fq_codel_module_exit(void)
728 unregister_qdisc(&fq_codel_qdisc_ops
);
731 module_init(fq_codel_module_init
)
732 module_exit(fq_codel_module_exit
)
733 MODULE_AUTHOR("Eric Dumazet");
734 MODULE_LICENSE("GPL");