]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/net/sch_generic.h
Merge tag 'devicetree-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/robh...
[mirror_ubuntu-artful-kernel.git] / include / net / sch_generic.h
CommitLineData
1da177e4
LT
1#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
1da177e4
LT
4#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
1da177e4
LT
7#include <linux/pkt_sched.h>
8#include <linux/pkt_cls.h>
22e0f8b9 9#include <linux/percpu.h>
5772e9a3 10#include <linux/dynamic_queue_limits.h>
1da177e4 11#include <net/gen_stats.h>
be577ddc 12#include <net/rtnetlink.h>
1da177e4
LT
13
14struct Qdisc_ops;
15struct qdisc_walker;
16struct tcf_walker;
17struct module;
18
fd2c3ef7 19struct qdisc_rate_table {
1da177e4
LT
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
fd2c3ef7 26enum qdisc_state_t {
37437bb2 27 __QDISC_STATE_SCHED,
a9312ae8 28 __QDISC_STATE_DEACTIVATED,
e2627c8c
DM
29};
30
175f9c1b 31struct qdisc_size_table {
a2da570d 32 struct rcu_head rcu;
175f9c1b
JK
33 struct list_head list;
34 struct tc_sizespec szopts;
35 int refcnt;
36 u16 data[];
37};
38
fd2c3ef7 39struct Qdisc {
520ac30f
ED
40 int (*enqueue)(struct sk_buff *skb,
41 struct Qdisc *sch,
42 struct sk_buff **to_free);
43 struct sk_buff * (*dequeue)(struct Qdisc *sch);
05bdd2f1 44 unsigned int flags;
b00355db 45#define TCQ_F_BUILTIN 1
fd245a4a
ED
46#define TCQ_F_INGRESS 2
47#define TCQ_F_CAN_BYPASS 4
48#define TCQ_F_MQROOT 8
1abbe139
ED
49#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
50 * q->dev_queue : It can test
51 * netif_xmit_frozen_or_stopped() before
52 * dequeueing next packet.
53 * Its true for MQ/MQPRIO slaves, or non
54 * multiqueue device.
55 */
b00355db 56#define TCQ_F_WARN_NONWC (1 << 16)
22e0f8b9 57#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
4eaf3b84
ED
58#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
59 * qdisc_tree_decrease_qlen() should stop.
60 */
45203a3b 61 u32 limit;
05bdd2f1 62 const struct Qdisc_ops *ops;
a2da570d 63 struct qdisc_size_table __rcu *stab;
5e140dfc 64 struct list_head list;
1da177e4
LT
65 u32 handle;
66 u32 parent;
72b25a91
DM
67 void *u32_node;
68
5e140dfc 69 struct netdev_queue *dev_queue;
5e140dfc 70
45203a3b 71 struct gnet_stats_rate_est64 rate_est;
0d32ef8c
ED
72 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
73 struct gnet_stats_queue __percpu *cpu_qstats;
74
5e140dfc
ED
75 /*
76 * For performance sake on SMP, we put highly modified fields at the end
77 */
4d202a0d 78 struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
5e140dfc 79 struct sk_buff_head q;
0d32ef8c 80 struct gnet_stats_basic_packed bstats;
f9eb8aea 81 seqcount_t running;
0d32ef8c 82 struct gnet_stats_queue qstats;
4d202a0d
ED
83 unsigned long state;
84 struct Qdisc *next_sched;
85 struct sk_buff *skb_bad_txq;
79640a4c 86 struct rcu_head rcu_head;
45203a3b
ED
87 int padded;
88 atomic_t refcnt;
89
90 spinlock_t busylock ____cacheline_aligned_in_smp;
1da177e4
LT
91};
92
fd245a4a 93static inline bool qdisc_is_running(const struct Qdisc *qdisc)
bc135b23 94{
f9eb8aea 95 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
bc135b23
ED
96}
97
98static inline bool qdisc_run_begin(struct Qdisc *qdisc)
99{
fd245a4a
ED
100 if (qdisc_is_running(qdisc))
101 return false;
52fbb290
ED
102 /* Variant of write_seqcount_begin() telling lockdep a trylock
103 * was attempted.
104 */
105 raw_write_seqcount_begin(&qdisc->running);
106 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
fd245a4a 107 return true;
bc135b23
ED
108}
109
110static inline void qdisc_run_end(struct Qdisc *qdisc)
111{
f9eb8aea 112 write_seqcount_end(&qdisc->running);
fd245a4a
ED
113}
114
5772e9a3
JDB
115static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
116{
117 return qdisc->flags & TCQ_F_ONETXQUEUE;
118}
119
120static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
121{
122#ifdef CONFIG_BQL
123 /* Non-BQL migrated drivers will return 0, too. */
124 return dql_avail(&txq->dql);
125#else
126 return 0;
127#endif
128}
129
fd2c3ef7 130struct Qdisc_class_ops {
1da177e4 131 /* Child qdisc manipulation */
926e61b7 132 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
1da177e4
LT
133 int (*graft)(struct Qdisc *, unsigned long cl,
134 struct Qdisc *, struct Qdisc **);
135 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
43effa1e 136 void (*qlen_notify)(struct Qdisc *, unsigned long);
1da177e4
LT
137
138 /* Class manipulation routines */
139 unsigned long (*get)(struct Qdisc *, u32 classid);
140 void (*put)(struct Qdisc *, unsigned long);
141 int (*change)(struct Qdisc *, u32, u32,
1e90474c 142 struct nlattr **, unsigned long *);
1da177e4
LT
143 int (*delete)(struct Qdisc *, unsigned long);
144 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
145
146 /* Filter manipulation */
25d8c0d5 147 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
92c075db 148 bool (*tcf_cl_offload)(u32 classid);
1da177e4
LT
149 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
150 u32 classid);
151 void (*unbind_tcf)(struct Qdisc *, unsigned long);
152
153 /* rtnetlink specific */
154 int (*dump)(struct Qdisc *, unsigned long,
155 struct sk_buff *skb, struct tcmsg*);
156 int (*dump_stats)(struct Qdisc *, unsigned long,
157 struct gnet_dump *);
158};
159
fd2c3ef7 160struct Qdisc_ops {
1da177e4 161 struct Qdisc_ops *next;
20fea08b 162 const struct Qdisc_class_ops *cl_ops;
1da177e4
LT
163 char id[IFNAMSIZ];
164 int priv_size;
165
520ac30f
ED
166 int (*enqueue)(struct sk_buff *skb,
167 struct Qdisc *sch,
168 struct sk_buff **to_free);
1da177e4 169 struct sk_buff * (*dequeue)(struct Qdisc *);
90d841fd 170 struct sk_buff * (*peek)(struct Qdisc *);
1da177e4 171
1e90474c 172 int (*init)(struct Qdisc *, struct nlattr *arg);
1da177e4
LT
173 void (*reset)(struct Qdisc *);
174 void (*destroy)(struct Qdisc *);
1e90474c 175 int (*change)(struct Qdisc *, struct nlattr *arg);
6ec1c69a 176 void (*attach)(struct Qdisc *);
1da177e4
LT
177
178 int (*dump)(struct Qdisc *, struct sk_buff *);
179 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
180
181 struct module *owner;
182};
183
184
fd2c3ef7 185struct tcf_result {
1da177e4
LT
186 unsigned long class;
187 u32 classid;
188};
189
fd2c3ef7 190struct tcf_proto_ops {
36272874 191 struct list_head head;
1da177e4
LT
192 char kind[IFNAMSIZ];
193
dc7f9f6e
ED
194 int (*classify)(struct sk_buff *,
195 const struct tcf_proto *,
196 struct tcf_result *);
1da177e4 197 int (*init)(struct tcf_proto*);
1e052be6 198 bool (*destroy)(struct tcf_proto*, bool);
1da177e4
LT
199
200 unsigned long (*get)(struct tcf_proto*, u32 handle);
c1b52739 201 int (*change)(struct net *net, struct sk_buff *,
af4c6641 202 struct tcf_proto*, unsigned long,
add93b61 203 u32 handle, struct nlattr **,
2f7ef2f8 204 unsigned long *, bool);
1da177e4
LT
205 int (*delete)(struct tcf_proto*, unsigned long);
206 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
207
208 /* rtnetlink specific */
832d1d5b 209 int (*dump)(struct net*, struct tcf_proto*, unsigned long,
1da177e4
LT
210 struct sk_buff *skb, struct tcmsg*);
211
212 struct module *owner;
213};
214
fd2c3ef7 215struct tcf_proto {
1da177e4 216 /* Fast access part */
25d8c0d5
JF
217 struct tcf_proto __rcu *next;
218 void __rcu *root;
dc7f9f6e
ED
219 int (*classify)(struct sk_buff *,
220 const struct tcf_proto *,
221 struct tcf_result *);
66c6f529 222 __be16 protocol;
1da177e4
LT
223
224 /* All the rest */
225 u32 prio;
226 u32 classid;
227 struct Qdisc *q;
228 void *data;
dc7f9f6e 229 const struct tcf_proto_ops *ops;
25d8c0d5 230 struct rcu_head rcu;
1da177e4
LT
231};
232
175f9c1b
JK
233struct qdisc_skb_cb {
234 unsigned int pkt_len;
df4ab5b3 235 u16 slave_dev_queue_mapping;
045efa82 236 u16 tc_classid;
25711786
ED
237#define QDISC_CB_PRIV_LEN 20
238 unsigned char data[QDISC_CB_PRIV_LEN];
175f9c1b
JK
239};
240
16bda13d
DM
241static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
242{
243 struct qdisc_skb_cb *qcb;
5ee31c68
ED
244
245 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
16bda13d
DM
246 BUILD_BUG_ON(sizeof(qcb->data) < sz);
247}
248
05bdd2f1 249static inline int qdisc_qlen(const struct Qdisc *q)
bbd8a0d3
KK
250{
251 return q->q.qlen;
252}
253
bfe0d029 254static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
175f9c1b
JK
255{
256 return (struct qdisc_skb_cb *)skb->cb;
257}
258
83874000
DM
259static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
260{
261 return &qdisc->q.lock;
262}
263
05bdd2f1 264static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
7698b4fc 265{
46e5da40
JF
266 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
267
268 return q;
7698b4fc
DM
269}
270
05bdd2f1 271static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
2540e051
JP
272{
273 return qdisc->dev_queue->qdisc_sleeping;
274}
275
7e43f112
DM
276/* The qdisc root lock is a mechanism by which to top level
277 * of a qdisc tree can be locked from any qdisc node in the
278 * forest. This allows changing the configuration of some
279 * aspect of the qdisc tree while blocking out asynchronous
280 * qdisc access in the packet processing paths.
281 *
282 * It is only legal to do this when the root will not change
283 * on us. Otherwise we'll potentially lock the wrong qdisc
284 * root. This is enforced by holding the RTNL semaphore, which
285 * all users of this lock accessor must do.
286 */
05bdd2f1 287static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
7698b4fc
DM
288{
289 struct Qdisc *root = qdisc_root(qdisc);
290
7e43f112 291 ASSERT_RTNL();
83874000 292 return qdisc_lock(root);
7698b4fc
DM
293}
294
05bdd2f1 295static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
f6f9b93f
JP
296{
297 struct Qdisc *root = qdisc_root_sleeping(qdisc);
298
299 ASSERT_RTNL();
300 return qdisc_lock(root);
301}
302
edb09eb1
ED
303static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
304{
305 struct Qdisc *root = qdisc_root_sleeping(qdisc);
306
307 ASSERT_RTNL();
308 return &root->running;
309}
310
05bdd2f1 311static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
5ce2d488
DM
312{
313 return qdisc->dev_queue->dev;
314}
1da177e4 315
05bdd2f1 316static inline void sch_tree_lock(const struct Qdisc *q)
78a5b30b 317{
fe439dd0 318 spin_lock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
319}
320
05bdd2f1 321static inline void sch_tree_unlock(const struct Qdisc *q)
78a5b30b 322{
fe439dd0 323 spin_unlock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
324}
325
326#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
327#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
1da177e4 328
e41a33e6
TG
329extern struct Qdisc noop_qdisc;
330extern struct Qdisc_ops noop_qdisc_ops;
6ec1c69a
DM
331extern struct Qdisc_ops pfifo_fast_ops;
332extern struct Qdisc_ops mq_qdisc_ops;
d66d6c31 333extern struct Qdisc_ops noqueue_qdisc_ops;
6da7c8fc 334extern const struct Qdisc_ops *default_qdisc_ops;
1f27cde3
ED
335static inline const struct Qdisc_ops *
336get_default_qdisc_ops(const struct net_device *dev, int ntx)
337{
338 return ntx < dev->real_num_tx_queues ?
339 default_qdisc_ops : &pfifo_fast_ops;
340}
e41a33e6 341
fd2c3ef7 342struct Qdisc_class_common {
6fe1c7a5
PM
343 u32 classid;
344 struct hlist_node hnode;
345};
346
fd2c3ef7 347struct Qdisc_class_hash {
6fe1c7a5
PM
348 struct hlist_head *hash;
349 unsigned int hashsize;
350 unsigned int hashmask;
351 unsigned int hashelems;
352};
353
354static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
355{
356 id ^= id >> 8;
357 id ^= id >> 4;
358 return id & mask;
359}
360
361static inline struct Qdisc_class_common *
05bdd2f1 362qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
6fe1c7a5
PM
363{
364 struct Qdisc_class_common *cl;
6fe1c7a5
PM
365 unsigned int h;
366
367 h = qdisc_class_hash(id, hash->hashmask);
b67bfe0d 368 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
6fe1c7a5
PM
369 if (cl->classid == id)
370 return cl;
371 }
372 return NULL;
373}
374
5c15257f
JP
375int qdisc_class_hash_init(struct Qdisc_class_hash *);
376void qdisc_class_hash_insert(struct Qdisc_class_hash *,
377 struct Qdisc_class_common *);
378void qdisc_class_hash_remove(struct Qdisc_class_hash *,
379 struct Qdisc_class_common *);
380void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
381void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
382
383void dev_init_scheduler(struct net_device *dev);
384void dev_shutdown(struct net_device *dev);
385void dev_activate(struct net_device *dev);
386void dev_deactivate(struct net_device *dev);
387void dev_deactivate_many(struct list_head *head);
388struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
389 struct Qdisc *qdisc);
390void qdisc_reset(struct Qdisc *qdisc);
391void qdisc_destroy(struct Qdisc *qdisc);
2ccccf5f
WC
392void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
393 unsigned int len);
5c15257f 394struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
d2a7f269 395 const struct Qdisc_ops *ops);
5c15257f 396struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
d2a7f269 397 const struct Qdisc_ops *ops, u32 parentid);
5c15257f
JP
398void __qdisc_calculate_pkt_len(struct sk_buff *skb,
399 const struct qdisc_size_table *stab);
1e052be6 400bool tcf_destroy(struct tcf_proto *tp, bool force);
25d8c0d5 401void tcf_destroy_chain(struct tcf_proto __rcu **fl);
27b29f63 402int skb_do_redirect(struct sk_buff *);
1da177e4 403
fdc5432a
DB
404static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
405{
406#ifdef CONFIG_NET_CLS_ACT
407 return G_TC_AT(skb->tc_verd) & AT_INGRESS;
408#else
409 return false;
410#endif
411}
412
f0796d5c
JF
413/* Reset all TX qdiscs greater then index of a device. */
414static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
5aa70995 415{
4ef6acff
JF
416 struct Qdisc *qdisc;
417
f0796d5c 418 for (; i < dev->num_tx_queues; i++) {
46e5da40 419 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
4ef6acff
JF
420 if (qdisc) {
421 spin_lock_bh(qdisc_lock(qdisc));
422 qdisc_reset(qdisc);
423 spin_unlock_bh(qdisc_lock(qdisc));
424 }
425 }
5aa70995
DM
426}
427
5aa70995
DM
428static inline void qdisc_reset_all_tx(struct net_device *dev)
429{
f0796d5c 430 qdisc_reset_all_tx_gt(dev, 0);
5aa70995
DM
431}
432
3e745dd6
DM
433/* Are all TX queues of the device empty? */
434static inline bool qdisc_all_tx_empty(const struct net_device *dev)
435{
e8a0464c 436 unsigned int i;
46e5da40
JF
437
438 rcu_read_lock();
e8a0464c
DM
439 for (i = 0; i < dev->num_tx_queues; i++) {
440 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 441 const struct Qdisc *q = rcu_dereference(txq->qdisc);
3e745dd6 442
46e5da40
JF
443 if (q->q.qlen) {
444 rcu_read_unlock();
e8a0464c 445 return false;
46e5da40 446 }
e8a0464c 447 }
46e5da40 448 rcu_read_unlock();
e8a0464c 449 return true;
3e745dd6
DM
450}
451
6fa9864b 452/* Are any of the TX qdiscs changing? */
05bdd2f1 453static inline bool qdisc_tx_changing(const struct net_device *dev)
6fa9864b 454{
e8a0464c 455 unsigned int i;
46e5da40 456
e8a0464c
DM
457 for (i = 0; i < dev->num_tx_queues; i++) {
458 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 459 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
e8a0464c
DM
460 return true;
461 }
462 return false;
6fa9864b
DM
463}
464
e8a0464c 465/* Is the device using the noop qdisc on all queues? */
05297949
DM
466static inline bool qdisc_tx_is_noop(const struct net_device *dev)
467{
e8a0464c 468 unsigned int i;
46e5da40 469
e8a0464c
DM
470 for (i = 0; i < dev->num_tx_queues; i++) {
471 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 472 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
e8a0464c
DM
473 return false;
474 }
475 return true;
05297949
DM
476}
477
bfe0d029 478static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
0abf77e5 479{
175f9c1b 480 return qdisc_skb_cb(skb)->pkt_len;
0abf77e5
JK
481}
482
c27f339a 483/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
378a2f09
JP
484enum net_xmit_qdisc_t {
485 __NET_XMIT_STOLEN = 0x00010000,
c27f339a 486 __NET_XMIT_BYPASS = 0x00020000,
378a2f09
JP
487};
488
c27f339a 489#ifdef CONFIG_NET_CLS_ACT
378a2f09 490#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
378a2f09
JP
491#else
492#define net_xmit_drop_count(e) (1)
493#endif
494
a2da570d
ED
495static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
496 const struct Qdisc *sch)
5f86173b 497{
3a682fbd 498#ifdef CONFIG_NET_SCHED
a2da570d
ED
499 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
500
501 if (stab)
502 __qdisc_calculate_pkt_len(skb, stab);
3a682fbd 503#endif
a2da570d
ED
504}
505
520ac30f
ED
506static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
507 struct sk_buff **to_free)
a2da570d
ED
508{
509 qdisc_calculate_pkt_len(skb, sch);
520ac30f 510 return sch->enqueue(skb, sch, to_free);
5f86173b
JK
511}
512
22e0f8b9
JF
513static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
514{
515 return q->flags & TCQ_F_CPUSTATS;
516}
bfe0d029 517
38040702
AV
518static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
519 __u64 bytes, __u32 packets)
520{
521 bstats->bytes += bytes;
522 bstats->packets += packets;
523}
524
bfe0d029
ED
525static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
526 const struct sk_buff *skb)
527{
38040702
AV
528 _bstats_update(bstats,
529 qdisc_pkt_len(skb),
530 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
531}
532
533static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
534 __u64 bytes, __u32 packets)
535{
536 u64_stats_update_begin(&bstats->syncp);
537 _bstats_update(&bstats->bstats, bytes, packets);
538 u64_stats_update_end(&bstats->syncp);
bfe0d029
ED
539}
540
24ea591d
ED
541static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
542 const struct sk_buff *skb)
22e0f8b9 543{
22e0f8b9
JF
544 u64_stats_update_begin(&bstats->syncp);
545 bstats_update(&bstats->bstats, skb);
546 u64_stats_update_end(&bstats->syncp);
547}
548
24ea591d
ED
549static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
550 const struct sk_buff *skb)
551{
552 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
553}
554
bfe0d029
ED
555static inline void qdisc_bstats_update(struct Qdisc *sch,
556 const struct sk_buff *skb)
bbd8a0d3 557{
bfe0d029 558 bstats_update(&sch->bstats, skb);
bbd8a0d3
KK
559}
560
25331d6c
JF
561static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
562 const struct sk_buff *skb)
563{
564 sch->qstats.backlog -= qdisc_pkt_len(skb);
565}
566
567static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
568 const struct sk_buff *skb)
569{
570 sch->qstats.backlog += qdisc_pkt_len(skb);
571}
572
573static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
574{
575 sch->qstats.drops += count;
576}
577
24ea591d 578static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
25331d6c 579{
24ea591d 580 qstats->drops++;
25331d6c
JF
581}
582
24ea591d 583static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
b0ab6f92 584{
24ea591d
ED
585 qstats->overlimits++;
586}
b0ab6f92 587
24ea591d
ED
588static inline void qdisc_qstats_drop(struct Qdisc *sch)
589{
590 qstats_drop_inc(&sch->qstats);
591}
592
593static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
594{
595 qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
b0ab6f92
JF
596}
597
25331d6c
JF
598static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
599{
600 sch->qstats.overlimits++;
601}
602
9972b25d
TG
603static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
604 struct sk_buff_head *list)
605{
606 __skb_queue_tail(list, skb);
25331d6c 607 qdisc_qstats_backlog_inc(sch, skb);
9972b25d
TG
608
609 return NET_XMIT_SUCCESS;
610}
611
612static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
613{
614 return __qdisc_enqueue_tail(skb, sch, &sch->q);
615}
616
617static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
618 struct sk_buff_head *list)
619{
620 struct sk_buff *skb = __skb_dequeue(list);
621
9190b3b3 622 if (likely(skb != NULL)) {
25331d6c 623 qdisc_qstats_backlog_dec(sch, skb);
9190b3b3
ED
624 qdisc_bstats_update(sch, skb);
625 }
9972b25d
TG
626
627 return skb;
628}
629
630static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
631{
632 return __qdisc_dequeue_head(sch, &sch->q);
57dbb2d8
HPP
633}
634
520ac30f
ED
635/* Instead of calling kfree_skb() while root qdisc lock is held,
636 * queue the skb for future freeing at end of __dev_xmit_skb()
637 */
638static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
639{
640 skb->next = *to_free;
641 *to_free = skb;
642}
643
57dbb2d8 644static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
520ac30f
ED
645 struct sk_buff_head *list,
646 struct sk_buff **to_free)
57dbb2d8 647{
9190b3b3 648 struct sk_buff *skb = __skb_dequeue(list);
57dbb2d8
HPP
649
650 if (likely(skb != NULL)) {
651 unsigned int len = qdisc_pkt_len(skb);
520ac30f 652
25331d6c 653 qdisc_qstats_backlog_dec(sch, skb);
520ac30f 654 __qdisc_drop(skb, to_free);
57dbb2d8
HPP
655 return len;
656 }
657
658 return 0;
659}
660
520ac30f
ED
661static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
662 struct sk_buff **to_free)
57dbb2d8 663{
520ac30f 664 return __qdisc_queue_drop_head(sch, &sch->q, to_free);
9972b25d
TG
665}
666
48a8f519
PM
667static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
668{
669 return skb_peek(&sch->q);
670}
671
77be155c
JP
672/* generic pseudo peek method for non-work-conserving qdisc */
673static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
674{
675 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
61c9eaf9 676 if (!sch->gso_skb) {
77be155c 677 sch->gso_skb = sch->dequeue(sch);
a27758ff 678 if (sch->gso_skb) {
61c9eaf9 679 /* it's still part of the queue */
a27758ff 680 qdisc_qstats_backlog_inc(sch, sch->gso_skb);
61c9eaf9 681 sch->q.qlen++;
a27758ff 682 }
61c9eaf9 683 }
77be155c
JP
684
685 return sch->gso_skb;
686}
687
688/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
689static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
690{
691 struct sk_buff *skb = sch->gso_skb;
692
61c9eaf9 693 if (skb) {
77be155c 694 sch->gso_skb = NULL;
a27758ff 695 qdisc_qstats_backlog_dec(sch, skb);
61c9eaf9
JP
696 sch->q.qlen--;
697 } else {
77be155c 698 skb = sch->dequeue(sch);
61c9eaf9 699 }
77be155c
JP
700
701 return skb;
702}
703
1b5c5493 704static inline void __qdisc_reset_queue(struct sk_buff_head *list)
9972b25d
TG
705{
706 /*
707 * We do not know the backlog in bytes of this list, it
708 * is up to the caller to correct it
709 */
1b5c5493
ED
710 if (!skb_queue_empty(list)) {
711 rtnl_kfree_skbs(list->next, list->prev);
712 __skb_queue_head_init(list);
713 }
9972b25d
TG
714}
715
716static inline void qdisc_reset_queue(struct Qdisc *sch)
717{
1b5c5493 718 __qdisc_reset_queue(&sch->q);
9972b25d
TG
719 sch->qstats.backlog = 0;
720}
721
86a7996c
WC
722static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
723 struct Qdisc **pold)
724{
725 struct Qdisc *old;
726
727 sch_tree_lock(sch);
728 old = *pold;
729 *pold = new;
730 if (old != NULL) {
2ccccf5f 731 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
86a7996c
WC
732 qdisc_reset(old);
733 }
734 sch_tree_unlock(sch);
735
736 return old;
737}
738
1b5c5493
ED
739static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
740{
741 rtnl_kfree_skbs(skb, skb);
742 qdisc_qstats_drop(sch);
743}
744
520ac30f
ED
745
746static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
747 struct sk_buff **to_free)
9972b25d 748{
520ac30f 749 __qdisc_drop(skb, to_free);
25331d6c 750 qdisc_qstats_drop(sch);
9972b25d
TG
751
752 return NET_XMIT_DROP;
753}
754
e9bef55d
JDB
755/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
756 long it will take to send a packet given its size.
757 */
758static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
759{
e08b0998
JDB
760 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
761 if (slot < 0)
762 slot = 0;
e9bef55d
JDB
763 slot >>= rtab->rate.cell_log;
764 if (slot > 255)
a02cec21 765 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
e9bef55d
JDB
766 return rtab->data[slot];
767}
768
292f1c7f 769struct psched_ratecfg {
130d3d68 770 u64 rate_bytes_ps; /* bytes per second */
01cb71d2
ED
771 u32 mult;
772 u16 overhead;
8a8e3d84 773 u8 linklayer;
01cb71d2 774 u8 shift;
292f1c7f
JP
775};
776
777static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
778 unsigned int len)
779{
8a8e3d84
JDB
780 len += r->overhead;
781
782 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
783 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
784
785 return ((u64)len * r->mult) >> r->shift;
292f1c7f
JP
786}
787
5c15257f 788void psched_ratecfg_precompute(struct psched_ratecfg *r,
3e1e3aae
ED
789 const struct tc_ratespec *conf,
790 u64 rate64);
292f1c7f 791
01cb71d2
ED
792static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
793 const struct psched_ratecfg *r)
292f1c7f 794{
01cb71d2 795 memset(res, 0, sizeof(*res));
3e1e3aae
ED
796
797 /* legacy struct tc_ratespec has a 32bit @rate field
798 * Qdisc using 64bit rate should add new attributes
799 * in order to maintain compatibility.
800 */
801 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
802
01cb71d2 803 res->overhead = r->overhead;
8a8e3d84 804 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
292f1c7f
JP
805}
806
1da177e4 807#endif