]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/net/sch_generic.h
Merge tag 'for-linus-20161008' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-zesty-kernel.git] / include / net / sch_generic.h
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
3
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
6 #include <linux/rcupdate.h>
7 #include <linux/pkt_sched.h>
8 #include <linux/pkt_cls.h>
9 #include <linux/percpu.h>
10 #include <linux/dynamic_queue_limits.h>
11 #include <net/gen_stats.h>
12 #include <net/rtnetlink.h>
13
14 struct Qdisc_ops;
15 struct qdisc_walker;
16 struct tcf_walker;
17 struct module;
18
19 struct qdisc_rate_table {
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24 };
25
26 enum qdisc_state_t {
27 __QDISC_STATE_SCHED,
28 __QDISC_STATE_DEACTIVATED,
29 };
30
31 struct qdisc_size_table {
32 struct rcu_head rcu;
33 struct list_head list;
34 struct tc_sizespec szopts;
35 int refcnt;
36 u16 data[];
37 };
38
39 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
40 struct qdisc_skb_head {
41 struct sk_buff *head;
42 struct sk_buff *tail;
43 __u32 qlen;
44 spinlock_t lock;
45 };
46
47 struct Qdisc {
48 int (*enqueue)(struct sk_buff *skb,
49 struct Qdisc *sch,
50 struct sk_buff **to_free);
51 struct sk_buff * (*dequeue)(struct Qdisc *sch);
52 unsigned int flags;
53 #define TCQ_F_BUILTIN 1
54 #define TCQ_F_INGRESS 2
55 #define TCQ_F_CAN_BYPASS 4
56 #define TCQ_F_MQROOT 8
57 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
58 * q->dev_queue : It can test
59 * netif_xmit_frozen_or_stopped() before
60 * dequeueing next packet.
61 * Its true for MQ/MQPRIO slaves, or non
62 * multiqueue device.
63 */
64 #define TCQ_F_WARN_NONWC (1 << 16)
65 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
66 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
67 * qdisc_tree_decrease_qlen() should stop.
68 */
69 u32 limit;
70 const struct Qdisc_ops *ops;
71 struct qdisc_size_table __rcu *stab;
72 struct hlist_node hash;
73 u32 handle;
74 u32 parent;
75 void *u32_node;
76
77 struct netdev_queue *dev_queue;
78
79 struct gnet_stats_rate_est64 rate_est;
80 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
81 struct gnet_stats_queue __percpu *cpu_qstats;
82
83 /*
84 * For performance sake on SMP, we put highly modified fields at the end
85 */
86 struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
87 struct qdisc_skb_head q;
88 struct gnet_stats_basic_packed bstats;
89 seqcount_t running;
90 struct gnet_stats_queue qstats;
91 unsigned long state;
92 struct Qdisc *next_sched;
93 struct sk_buff *skb_bad_txq;
94 struct rcu_head rcu_head;
95 int padded;
96 atomic_t refcnt;
97
98 spinlock_t busylock ____cacheline_aligned_in_smp;
99 };
100
101 static inline bool qdisc_is_running(const struct Qdisc *qdisc)
102 {
103 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
104 }
105
106 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
107 {
108 if (qdisc_is_running(qdisc))
109 return false;
110 /* Variant of write_seqcount_begin() telling lockdep a trylock
111 * was attempted.
112 */
113 raw_write_seqcount_begin(&qdisc->running);
114 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
115 return true;
116 }
117
118 static inline void qdisc_run_end(struct Qdisc *qdisc)
119 {
120 write_seqcount_end(&qdisc->running);
121 }
122
123 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
124 {
125 return qdisc->flags & TCQ_F_ONETXQUEUE;
126 }
127
128 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
129 {
130 #ifdef CONFIG_BQL
131 /* Non-BQL migrated drivers will return 0, too. */
132 return dql_avail(&txq->dql);
133 #else
134 return 0;
135 #endif
136 }
137
138 struct Qdisc_class_ops {
139 /* Child qdisc manipulation */
140 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
141 int (*graft)(struct Qdisc *, unsigned long cl,
142 struct Qdisc *, struct Qdisc **);
143 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
144 void (*qlen_notify)(struct Qdisc *, unsigned long);
145
146 /* Class manipulation routines */
147 unsigned long (*get)(struct Qdisc *, u32 classid);
148 void (*put)(struct Qdisc *, unsigned long);
149 int (*change)(struct Qdisc *, u32, u32,
150 struct nlattr **, unsigned long *);
151 int (*delete)(struct Qdisc *, unsigned long);
152 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
153
154 /* Filter manipulation */
155 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
156 bool (*tcf_cl_offload)(u32 classid);
157 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
158 u32 classid);
159 void (*unbind_tcf)(struct Qdisc *, unsigned long);
160
161 /* rtnetlink specific */
162 int (*dump)(struct Qdisc *, unsigned long,
163 struct sk_buff *skb, struct tcmsg*);
164 int (*dump_stats)(struct Qdisc *, unsigned long,
165 struct gnet_dump *);
166 };
167
168 struct Qdisc_ops {
169 struct Qdisc_ops *next;
170 const struct Qdisc_class_ops *cl_ops;
171 char id[IFNAMSIZ];
172 int priv_size;
173
174 int (*enqueue)(struct sk_buff *skb,
175 struct Qdisc *sch,
176 struct sk_buff **to_free);
177 struct sk_buff * (*dequeue)(struct Qdisc *);
178 struct sk_buff * (*peek)(struct Qdisc *);
179
180 int (*init)(struct Qdisc *, struct nlattr *arg);
181 void (*reset)(struct Qdisc *);
182 void (*destroy)(struct Qdisc *);
183 int (*change)(struct Qdisc *, struct nlattr *arg);
184 void (*attach)(struct Qdisc *);
185
186 int (*dump)(struct Qdisc *, struct sk_buff *);
187 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
188
189 struct module *owner;
190 };
191
192
193 struct tcf_result {
194 unsigned long class;
195 u32 classid;
196 };
197
198 struct tcf_proto_ops {
199 struct list_head head;
200 char kind[IFNAMSIZ];
201
202 int (*classify)(struct sk_buff *,
203 const struct tcf_proto *,
204 struct tcf_result *);
205 int (*init)(struct tcf_proto*);
206 bool (*destroy)(struct tcf_proto*, bool);
207
208 unsigned long (*get)(struct tcf_proto*, u32 handle);
209 int (*change)(struct net *net, struct sk_buff *,
210 struct tcf_proto*, unsigned long,
211 u32 handle, struct nlattr **,
212 unsigned long *, bool);
213 int (*delete)(struct tcf_proto*, unsigned long);
214 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
215
216 /* rtnetlink specific */
217 int (*dump)(struct net*, struct tcf_proto*, unsigned long,
218 struct sk_buff *skb, struct tcmsg*);
219
220 struct module *owner;
221 };
222
223 struct tcf_proto {
224 /* Fast access part */
225 struct tcf_proto __rcu *next;
226 void __rcu *root;
227 int (*classify)(struct sk_buff *,
228 const struct tcf_proto *,
229 struct tcf_result *);
230 __be16 protocol;
231
232 /* All the rest */
233 u32 prio;
234 u32 classid;
235 struct Qdisc *q;
236 void *data;
237 const struct tcf_proto_ops *ops;
238 struct rcu_head rcu;
239 };
240
241 struct qdisc_skb_cb {
242 unsigned int pkt_len;
243 u16 slave_dev_queue_mapping;
244 u16 tc_classid;
245 #define QDISC_CB_PRIV_LEN 20
246 unsigned char data[QDISC_CB_PRIV_LEN];
247 };
248
249 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
250 {
251 struct qdisc_skb_cb *qcb;
252
253 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
254 BUILD_BUG_ON(sizeof(qcb->data) < sz);
255 }
256
257 static inline int qdisc_qlen(const struct Qdisc *q)
258 {
259 return q->q.qlen;
260 }
261
262 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
263 {
264 return (struct qdisc_skb_cb *)skb->cb;
265 }
266
267 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
268 {
269 return &qdisc->q.lock;
270 }
271
272 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
273 {
274 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
275
276 return q;
277 }
278
279 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
280 {
281 return qdisc->dev_queue->qdisc_sleeping;
282 }
283
284 /* The qdisc root lock is a mechanism by which to top level
285 * of a qdisc tree can be locked from any qdisc node in the
286 * forest. This allows changing the configuration of some
287 * aspect of the qdisc tree while blocking out asynchronous
288 * qdisc access in the packet processing paths.
289 *
290 * It is only legal to do this when the root will not change
291 * on us. Otherwise we'll potentially lock the wrong qdisc
292 * root. This is enforced by holding the RTNL semaphore, which
293 * all users of this lock accessor must do.
294 */
295 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
296 {
297 struct Qdisc *root = qdisc_root(qdisc);
298
299 ASSERT_RTNL();
300 return qdisc_lock(root);
301 }
302
303 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
304 {
305 struct Qdisc *root = qdisc_root_sleeping(qdisc);
306
307 ASSERT_RTNL();
308 return qdisc_lock(root);
309 }
310
311 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
312 {
313 struct Qdisc *root = qdisc_root_sleeping(qdisc);
314
315 ASSERT_RTNL();
316 return &root->running;
317 }
318
319 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
320 {
321 return qdisc->dev_queue->dev;
322 }
323
324 static inline void sch_tree_lock(const struct Qdisc *q)
325 {
326 spin_lock_bh(qdisc_root_sleeping_lock(q));
327 }
328
329 static inline void sch_tree_unlock(const struct Qdisc *q)
330 {
331 spin_unlock_bh(qdisc_root_sleeping_lock(q));
332 }
333
334 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
335 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
336
337 extern struct Qdisc noop_qdisc;
338 extern struct Qdisc_ops noop_qdisc_ops;
339 extern struct Qdisc_ops pfifo_fast_ops;
340 extern struct Qdisc_ops mq_qdisc_ops;
341 extern struct Qdisc_ops noqueue_qdisc_ops;
342 extern const struct Qdisc_ops *default_qdisc_ops;
343 static inline const struct Qdisc_ops *
344 get_default_qdisc_ops(const struct net_device *dev, int ntx)
345 {
346 return ntx < dev->real_num_tx_queues ?
347 default_qdisc_ops : &pfifo_fast_ops;
348 }
349
350 struct Qdisc_class_common {
351 u32 classid;
352 struct hlist_node hnode;
353 };
354
355 struct Qdisc_class_hash {
356 struct hlist_head *hash;
357 unsigned int hashsize;
358 unsigned int hashmask;
359 unsigned int hashelems;
360 };
361
362 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
363 {
364 id ^= id >> 8;
365 id ^= id >> 4;
366 return id & mask;
367 }
368
369 static inline struct Qdisc_class_common *
370 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
371 {
372 struct Qdisc_class_common *cl;
373 unsigned int h;
374
375 h = qdisc_class_hash(id, hash->hashmask);
376 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
377 if (cl->classid == id)
378 return cl;
379 }
380 return NULL;
381 }
382
383 int qdisc_class_hash_init(struct Qdisc_class_hash *);
384 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
385 struct Qdisc_class_common *);
386 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
387 struct Qdisc_class_common *);
388 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
389 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
390
391 void dev_init_scheduler(struct net_device *dev);
392 void dev_shutdown(struct net_device *dev);
393 void dev_activate(struct net_device *dev);
394 void dev_deactivate(struct net_device *dev);
395 void dev_deactivate_many(struct list_head *head);
396 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
397 struct Qdisc *qdisc);
398 void qdisc_reset(struct Qdisc *qdisc);
399 void qdisc_destroy(struct Qdisc *qdisc);
400 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
401 unsigned int len);
402 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
403 const struct Qdisc_ops *ops);
404 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
405 const struct Qdisc_ops *ops, u32 parentid);
406 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
407 const struct qdisc_size_table *stab);
408 bool tcf_destroy(struct tcf_proto *tp, bool force);
409 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
410 int skb_do_redirect(struct sk_buff *);
411
412 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
413 {
414 #ifdef CONFIG_NET_CLS_ACT
415 return G_TC_AT(skb->tc_verd) & AT_INGRESS;
416 #else
417 return false;
418 #endif
419 }
420
421 /* Reset all TX qdiscs greater then index of a device. */
422 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
423 {
424 struct Qdisc *qdisc;
425
426 for (; i < dev->num_tx_queues; i++) {
427 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
428 if (qdisc) {
429 spin_lock_bh(qdisc_lock(qdisc));
430 qdisc_reset(qdisc);
431 spin_unlock_bh(qdisc_lock(qdisc));
432 }
433 }
434 }
435
436 static inline void qdisc_reset_all_tx(struct net_device *dev)
437 {
438 qdisc_reset_all_tx_gt(dev, 0);
439 }
440
441 /* Are all TX queues of the device empty? */
442 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
443 {
444 unsigned int i;
445
446 rcu_read_lock();
447 for (i = 0; i < dev->num_tx_queues; i++) {
448 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
449 const struct Qdisc *q = rcu_dereference(txq->qdisc);
450
451 if (q->q.qlen) {
452 rcu_read_unlock();
453 return false;
454 }
455 }
456 rcu_read_unlock();
457 return true;
458 }
459
460 /* Are any of the TX qdiscs changing? */
461 static inline bool qdisc_tx_changing(const struct net_device *dev)
462 {
463 unsigned int i;
464
465 for (i = 0; i < dev->num_tx_queues; i++) {
466 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
467 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
468 return true;
469 }
470 return false;
471 }
472
473 /* Is the device using the noop qdisc on all queues? */
474 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
475 {
476 unsigned int i;
477
478 for (i = 0; i < dev->num_tx_queues; i++) {
479 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
480 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
481 return false;
482 }
483 return true;
484 }
485
486 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
487 {
488 return qdisc_skb_cb(skb)->pkt_len;
489 }
490
491 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
492 enum net_xmit_qdisc_t {
493 __NET_XMIT_STOLEN = 0x00010000,
494 __NET_XMIT_BYPASS = 0x00020000,
495 };
496
497 #ifdef CONFIG_NET_CLS_ACT
498 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
499 #else
500 #define net_xmit_drop_count(e) (1)
501 #endif
502
503 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
504 const struct Qdisc *sch)
505 {
506 #ifdef CONFIG_NET_SCHED
507 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
508
509 if (stab)
510 __qdisc_calculate_pkt_len(skb, stab);
511 #endif
512 }
513
514 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
515 struct sk_buff **to_free)
516 {
517 qdisc_calculate_pkt_len(skb, sch);
518 return sch->enqueue(skb, sch, to_free);
519 }
520
521 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
522 {
523 return q->flags & TCQ_F_CPUSTATS;
524 }
525
526 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
527 __u64 bytes, __u32 packets)
528 {
529 bstats->bytes += bytes;
530 bstats->packets += packets;
531 }
532
533 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
534 const struct sk_buff *skb)
535 {
536 _bstats_update(bstats,
537 qdisc_pkt_len(skb),
538 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
539 }
540
541 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
542 __u64 bytes, __u32 packets)
543 {
544 u64_stats_update_begin(&bstats->syncp);
545 _bstats_update(&bstats->bstats, bytes, packets);
546 u64_stats_update_end(&bstats->syncp);
547 }
548
549 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
550 const struct sk_buff *skb)
551 {
552 u64_stats_update_begin(&bstats->syncp);
553 bstats_update(&bstats->bstats, skb);
554 u64_stats_update_end(&bstats->syncp);
555 }
556
557 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
558 const struct sk_buff *skb)
559 {
560 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
561 }
562
563 static inline void qdisc_bstats_update(struct Qdisc *sch,
564 const struct sk_buff *skb)
565 {
566 bstats_update(&sch->bstats, skb);
567 }
568
569 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
570 const struct sk_buff *skb)
571 {
572 sch->qstats.backlog -= qdisc_pkt_len(skb);
573 }
574
575 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
576 const struct sk_buff *skb)
577 {
578 sch->qstats.backlog += qdisc_pkt_len(skb);
579 }
580
581 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
582 {
583 sch->qstats.drops += count;
584 }
585
586 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
587 {
588 qstats->drops++;
589 }
590
591 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
592 {
593 qstats->overlimits++;
594 }
595
596 static inline void qdisc_qstats_drop(struct Qdisc *sch)
597 {
598 qstats_drop_inc(&sch->qstats);
599 }
600
601 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
602 {
603 this_cpu_inc(sch->cpu_qstats->drops);
604 }
605
606 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
607 {
608 sch->qstats.overlimits++;
609 }
610
611 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
612 {
613 qh->head = NULL;
614 qh->tail = NULL;
615 qh->qlen = 0;
616 }
617
618 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
619 struct qdisc_skb_head *qh)
620 {
621 struct sk_buff *last = qh->tail;
622
623 if (last) {
624 skb->next = NULL;
625 last->next = skb;
626 qh->tail = skb;
627 } else {
628 qh->tail = skb;
629 qh->head = skb;
630 }
631 qh->qlen++;
632 qdisc_qstats_backlog_inc(sch, skb);
633
634 return NET_XMIT_SUCCESS;
635 }
636
637 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
638 {
639 return __qdisc_enqueue_tail(skb, sch, &sch->q);
640 }
641
642 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
643 {
644 struct sk_buff *skb = qh->head;
645
646 if (likely(skb != NULL)) {
647 qh->head = skb->next;
648 qh->qlen--;
649 if (qh->head == NULL)
650 qh->tail = NULL;
651 skb->next = NULL;
652 }
653
654 return skb;
655 }
656
657 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
658 {
659 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
660
661 if (likely(skb != NULL)) {
662 qdisc_qstats_backlog_dec(sch, skb);
663 qdisc_bstats_update(sch, skb);
664 }
665
666 return skb;
667 }
668
669 /* Instead of calling kfree_skb() while root qdisc lock is held,
670 * queue the skb for future freeing at end of __dev_xmit_skb()
671 */
672 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
673 {
674 skb->next = *to_free;
675 *to_free = skb;
676 }
677
678 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
679 struct qdisc_skb_head *qh,
680 struct sk_buff **to_free)
681 {
682 struct sk_buff *skb = __qdisc_dequeue_head(qh);
683
684 if (likely(skb != NULL)) {
685 unsigned int len = qdisc_pkt_len(skb);
686
687 qdisc_qstats_backlog_dec(sch, skb);
688 __qdisc_drop(skb, to_free);
689 return len;
690 }
691
692 return 0;
693 }
694
695 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
696 struct sk_buff **to_free)
697 {
698 return __qdisc_queue_drop_head(sch, &sch->q, to_free);
699 }
700
701 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
702 {
703 const struct qdisc_skb_head *qh = &sch->q;
704
705 return qh->head;
706 }
707
708 /* generic pseudo peek method for non-work-conserving qdisc */
709 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
710 {
711 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
712 if (!sch->gso_skb) {
713 sch->gso_skb = sch->dequeue(sch);
714 if (sch->gso_skb) {
715 /* it's still part of the queue */
716 qdisc_qstats_backlog_inc(sch, sch->gso_skb);
717 sch->q.qlen++;
718 }
719 }
720
721 return sch->gso_skb;
722 }
723
724 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
725 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
726 {
727 struct sk_buff *skb = sch->gso_skb;
728
729 if (skb) {
730 sch->gso_skb = NULL;
731 qdisc_qstats_backlog_dec(sch, skb);
732 sch->q.qlen--;
733 } else {
734 skb = sch->dequeue(sch);
735 }
736
737 return skb;
738 }
739
740 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
741 {
742 /*
743 * We do not know the backlog in bytes of this list, it
744 * is up to the caller to correct it
745 */
746 ASSERT_RTNL();
747 if (qh->qlen) {
748 rtnl_kfree_skbs(qh->head, qh->tail);
749
750 qh->head = NULL;
751 qh->tail = NULL;
752 qh->qlen = 0;
753 }
754 }
755
756 static inline void qdisc_reset_queue(struct Qdisc *sch)
757 {
758 __qdisc_reset_queue(&sch->q);
759 sch->qstats.backlog = 0;
760 }
761
762 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
763 struct Qdisc **pold)
764 {
765 struct Qdisc *old;
766
767 sch_tree_lock(sch);
768 old = *pold;
769 *pold = new;
770 if (old != NULL) {
771 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
772 qdisc_reset(old);
773 }
774 sch_tree_unlock(sch);
775
776 return old;
777 }
778
779 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
780 {
781 rtnl_kfree_skbs(skb, skb);
782 qdisc_qstats_drop(sch);
783 }
784
785
786 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
787 struct sk_buff **to_free)
788 {
789 __qdisc_drop(skb, to_free);
790 qdisc_qstats_drop(sch);
791
792 return NET_XMIT_DROP;
793 }
794
795 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
796 long it will take to send a packet given its size.
797 */
798 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
799 {
800 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
801 if (slot < 0)
802 slot = 0;
803 slot >>= rtab->rate.cell_log;
804 if (slot > 255)
805 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
806 return rtab->data[slot];
807 }
808
809 struct psched_ratecfg {
810 u64 rate_bytes_ps; /* bytes per second */
811 u32 mult;
812 u16 overhead;
813 u8 linklayer;
814 u8 shift;
815 };
816
817 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
818 unsigned int len)
819 {
820 len += r->overhead;
821
822 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
823 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
824
825 return ((u64)len * r->mult) >> r->shift;
826 }
827
828 void psched_ratecfg_precompute(struct psched_ratecfg *r,
829 const struct tc_ratespec *conf,
830 u64 rate64);
831
832 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
833 const struct psched_ratecfg *r)
834 {
835 memset(res, 0, sizeof(*res));
836
837 /* legacy struct tc_ratespec has a 32bit @rate field
838 * Qdisc using 64bit rate should add new attributes
839 * in order to maintain compatibility.
840 */
841 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
842
843 res->overhead = r->overhead;
844 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
845 }
846
847 #endif