]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __NET_SCHED_GENERIC_H |
2 | #define __NET_SCHED_GENERIC_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/netdevice.h> |
5 | #include <linux/types.h> | |
6 | #include <linux/rcupdate.h> | |
7 | #include <linux/module.h> | |
1da177e4 LT |
8 | #include <linux/pkt_sched.h> |
9 | #include <linux/pkt_cls.h> | |
10 | #include <net/gen_stats.h> | |
be577ddc | 11 | #include <net/rtnetlink.h> |
1da177e4 LT |
12 | |
13 | struct Qdisc_ops; | |
14 | struct qdisc_walker; | |
15 | struct tcf_walker; | |
16 | struct module; | |
17 | ||
fd2c3ef7 | 18 | struct qdisc_rate_table { |
1da177e4 LT |
19 | struct tc_ratespec rate; |
20 | u32 data[256]; | |
21 | struct qdisc_rate_table *next; | |
22 | int refcnt; | |
23 | }; | |
24 | ||
fd2c3ef7 | 25 | enum qdisc_state_t { |
e2627c8c | 26 | __QDISC_STATE_RUNNING, |
37437bb2 | 27 | __QDISC_STATE_SCHED, |
a9312ae8 | 28 | __QDISC_STATE_DEACTIVATED, |
e2627c8c DM |
29 | }; |
30 | ||
175f9c1b JK |
31 | struct qdisc_size_table { |
32 | struct list_head list; | |
33 | struct tc_sizespec szopts; | |
34 | int refcnt; | |
35 | u16 data[]; | |
36 | }; | |
37 | ||
fd2c3ef7 | 38 | struct Qdisc { |
1da177e4 LT |
39 | int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); |
40 | struct sk_buff * (*dequeue)(struct Qdisc *dev); | |
41 | unsigned flags; | |
b00355db JP |
42 | #define TCQ_F_BUILTIN 1 |
43 | #define TCQ_F_THROTTLED 2 | |
44 | #define TCQ_F_INGRESS 4 | |
bbd8a0d3 | 45 | #define TCQ_F_CAN_BYPASS 8 |
23bcf634 | 46 | #define TCQ_F_MQROOT 16 |
b00355db | 47 | #define TCQ_F_WARN_NONWC (1 << 16) |
1da177e4 LT |
48 | int padded; |
49 | struct Qdisc_ops *ops; | |
175f9c1b | 50 | struct qdisc_size_table *stab; |
5e140dfc | 51 | struct list_head list; |
1da177e4 LT |
52 | u32 handle; |
53 | u32 parent; | |
54 | atomic_t refcnt; | |
1da177e4 | 55 | struct gnet_stats_rate_est rate_est; |
1da177e4 LT |
56 | int (*reshape_fail)(struct sk_buff *skb, |
57 | struct Qdisc *q); | |
58 | ||
72b25a91 DM |
59 | void *u32_node; |
60 | ||
1da177e4 LT |
61 | /* This field is deprecated, but it is still used by CBQ |
62 | * and it will live until better solution will be invented. | |
63 | */ | |
64 | struct Qdisc *__parent; | |
5e140dfc ED |
65 | struct netdev_queue *dev_queue; |
66 | struct Qdisc *next_sched; | |
67 | ||
68 | struct sk_buff *gso_skb; | |
69 | /* | |
70 | * For performance sake on SMP, we put highly modified fields at the end | |
71 | */ | |
72 | unsigned long state; | |
73 | struct sk_buff_head q; | |
c1a8f1f1 | 74 | struct gnet_stats_basic_packed bstats; |
5e140dfc | 75 | struct gnet_stats_queue qstats; |
1da177e4 LT |
76 | }; |
77 | ||
fd2c3ef7 | 78 | struct Qdisc_class_ops { |
1da177e4 | 79 | /* Child qdisc manipulation */ |
926e61b7 | 80 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); |
1da177e4 LT |
81 | int (*graft)(struct Qdisc *, unsigned long cl, |
82 | struct Qdisc *, struct Qdisc **); | |
83 | struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); | |
43effa1e | 84 | void (*qlen_notify)(struct Qdisc *, unsigned long); |
1da177e4 LT |
85 | |
86 | /* Class manipulation routines */ | |
87 | unsigned long (*get)(struct Qdisc *, u32 classid); | |
88 | void (*put)(struct Qdisc *, unsigned long); | |
89 | int (*change)(struct Qdisc *, u32, u32, | |
1e90474c | 90 | struct nlattr **, unsigned long *); |
1da177e4 LT |
91 | int (*delete)(struct Qdisc *, unsigned long); |
92 | void (*walk)(struct Qdisc *, struct qdisc_walker * arg); | |
93 | ||
94 | /* Filter manipulation */ | |
95 | struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); | |
96 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, | |
97 | u32 classid); | |
98 | void (*unbind_tcf)(struct Qdisc *, unsigned long); | |
99 | ||
100 | /* rtnetlink specific */ | |
101 | int (*dump)(struct Qdisc *, unsigned long, | |
102 | struct sk_buff *skb, struct tcmsg*); | |
103 | int (*dump_stats)(struct Qdisc *, unsigned long, | |
104 | struct gnet_dump *); | |
105 | }; | |
106 | ||
fd2c3ef7 | 107 | struct Qdisc_ops { |
1da177e4 | 108 | struct Qdisc_ops *next; |
20fea08b | 109 | const struct Qdisc_class_ops *cl_ops; |
1da177e4 LT |
110 | char id[IFNAMSIZ]; |
111 | int priv_size; | |
112 | ||
113 | int (*enqueue)(struct sk_buff *, struct Qdisc *); | |
114 | struct sk_buff * (*dequeue)(struct Qdisc *); | |
90d841fd | 115 | struct sk_buff * (*peek)(struct Qdisc *); |
1da177e4 LT |
116 | unsigned int (*drop)(struct Qdisc *); |
117 | ||
1e90474c | 118 | int (*init)(struct Qdisc *, struct nlattr *arg); |
1da177e4 LT |
119 | void (*reset)(struct Qdisc *); |
120 | void (*destroy)(struct Qdisc *); | |
1e90474c | 121 | int (*change)(struct Qdisc *, struct nlattr *arg); |
6ec1c69a | 122 | void (*attach)(struct Qdisc *); |
1da177e4 LT |
123 | |
124 | int (*dump)(struct Qdisc *, struct sk_buff *); | |
125 | int (*dump_stats)(struct Qdisc *, struct gnet_dump *); | |
126 | ||
127 | struct module *owner; | |
128 | }; | |
129 | ||
130 | ||
fd2c3ef7 | 131 | struct tcf_result { |
1da177e4 LT |
132 | unsigned long class; |
133 | u32 classid; | |
134 | }; | |
135 | ||
fd2c3ef7 | 136 | struct tcf_proto_ops { |
1da177e4 LT |
137 | struct tcf_proto_ops *next; |
138 | char kind[IFNAMSIZ]; | |
139 | ||
140 | int (*classify)(struct sk_buff*, struct tcf_proto*, | |
141 | struct tcf_result *); | |
142 | int (*init)(struct tcf_proto*); | |
143 | void (*destroy)(struct tcf_proto*); | |
144 | ||
145 | unsigned long (*get)(struct tcf_proto*, u32 handle); | |
146 | void (*put)(struct tcf_proto*, unsigned long); | |
147 | int (*change)(struct tcf_proto*, unsigned long, | |
add93b61 | 148 | u32 handle, struct nlattr **, |
1da177e4 LT |
149 | unsigned long *); |
150 | int (*delete)(struct tcf_proto*, unsigned long); | |
151 | void (*walk)(struct tcf_proto*, struct tcf_walker *arg); | |
152 | ||
153 | /* rtnetlink specific */ | |
154 | int (*dump)(struct tcf_proto*, unsigned long, | |
155 | struct sk_buff *skb, struct tcmsg*); | |
156 | ||
157 | struct module *owner; | |
158 | }; | |
159 | ||
fd2c3ef7 | 160 | struct tcf_proto { |
1da177e4 LT |
161 | /* Fast access part */ |
162 | struct tcf_proto *next; | |
163 | void *root; | |
164 | int (*classify)(struct sk_buff*, struct tcf_proto*, | |
165 | struct tcf_result *); | |
66c6f529 | 166 | __be16 protocol; |
1da177e4 LT |
167 | |
168 | /* All the rest */ | |
169 | u32 prio; | |
170 | u32 classid; | |
171 | struct Qdisc *q; | |
172 | void *data; | |
173 | struct tcf_proto_ops *ops; | |
174 | }; | |
175 | ||
175f9c1b JK |
176 | struct qdisc_skb_cb { |
177 | unsigned int pkt_len; | |
178 | char data[]; | |
179 | }; | |
180 | ||
bbd8a0d3 KK |
181 | static inline int qdisc_qlen(struct Qdisc *q) |
182 | { | |
183 | return q->q.qlen; | |
184 | } | |
185 | ||
175f9c1b JK |
186 | static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) |
187 | { | |
188 | return (struct qdisc_skb_cb *)skb->cb; | |
189 | } | |
190 | ||
83874000 DM |
191 | static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) |
192 | { | |
193 | return &qdisc->q.lock; | |
194 | } | |
195 | ||
7698b4fc DM |
196 | static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) |
197 | { | |
198 | return qdisc->dev_queue->qdisc; | |
199 | } | |
200 | ||
2540e051 JP |
201 | static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) |
202 | { | |
203 | return qdisc->dev_queue->qdisc_sleeping; | |
204 | } | |
205 | ||
7e43f112 DM |
206 | /* The qdisc root lock is a mechanism by which to top level |
207 | * of a qdisc tree can be locked from any qdisc node in the | |
208 | * forest. This allows changing the configuration of some | |
209 | * aspect of the qdisc tree while blocking out asynchronous | |
210 | * qdisc access in the packet processing paths. | |
211 | * | |
212 | * It is only legal to do this when the root will not change | |
213 | * on us. Otherwise we'll potentially lock the wrong qdisc | |
214 | * root. This is enforced by holding the RTNL semaphore, which | |
215 | * all users of this lock accessor must do. | |
216 | */ | |
7698b4fc DM |
217 | static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) |
218 | { | |
219 | struct Qdisc *root = qdisc_root(qdisc); | |
220 | ||
7e43f112 | 221 | ASSERT_RTNL(); |
83874000 | 222 | return qdisc_lock(root); |
7698b4fc DM |
223 | } |
224 | ||
f6f9b93f JP |
225 | static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc) |
226 | { | |
227 | struct Qdisc *root = qdisc_root_sleeping(qdisc); | |
228 | ||
229 | ASSERT_RTNL(); | |
230 | return qdisc_lock(root); | |
231 | } | |
232 | ||
5ce2d488 DM |
233 | static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) |
234 | { | |
235 | return qdisc->dev_queue->dev; | |
236 | } | |
1da177e4 | 237 | |
78a5b30b DM |
238 | static inline void sch_tree_lock(struct Qdisc *q) |
239 | { | |
fe439dd0 | 240 | spin_lock_bh(qdisc_root_sleeping_lock(q)); |
78a5b30b DM |
241 | } |
242 | ||
243 | static inline void sch_tree_unlock(struct Qdisc *q) | |
244 | { | |
fe439dd0 | 245 | spin_unlock_bh(qdisc_root_sleeping_lock(q)); |
78a5b30b DM |
246 | } |
247 | ||
248 | #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) | |
249 | #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) | |
1da177e4 | 250 | |
e41a33e6 TG |
251 | extern struct Qdisc noop_qdisc; |
252 | extern struct Qdisc_ops noop_qdisc_ops; | |
6ec1c69a DM |
253 | extern struct Qdisc_ops pfifo_fast_ops; |
254 | extern struct Qdisc_ops mq_qdisc_ops; | |
e41a33e6 | 255 | |
fd2c3ef7 | 256 | struct Qdisc_class_common { |
6fe1c7a5 PM |
257 | u32 classid; |
258 | struct hlist_node hnode; | |
259 | }; | |
260 | ||
fd2c3ef7 | 261 | struct Qdisc_class_hash { |
6fe1c7a5 PM |
262 | struct hlist_head *hash; |
263 | unsigned int hashsize; | |
264 | unsigned int hashmask; | |
265 | unsigned int hashelems; | |
266 | }; | |
267 | ||
268 | static inline unsigned int qdisc_class_hash(u32 id, u32 mask) | |
269 | { | |
270 | id ^= id >> 8; | |
271 | id ^= id >> 4; | |
272 | return id & mask; | |
273 | } | |
274 | ||
275 | static inline struct Qdisc_class_common * | |
276 | qdisc_class_find(struct Qdisc_class_hash *hash, u32 id) | |
277 | { | |
278 | struct Qdisc_class_common *cl; | |
279 | struct hlist_node *n; | |
280 | unsigned int h; | |
281 | ||
282 | h = qdisc_class_hash(id, hash->hashmask); | |
283 | hlist_for_each_entry(cl, n, &hash->hash[h], hnode) { | |
284 | if (cl->classid == id) | |
285 | return cl; | |
286 | } | |
287 | return NULL; | |
288 | } | |
289 | ||
290 | extern int qdisc_class_hash_init(struct Qdisc_class_hash *); | |
291 | extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *); | |
292 | extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *); | |
293 | extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); | |
294 | extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *); | |
295 | ||
e41a33e6 TG |
296 | extern void dev_init_scheduler(struct net_device *dev); |
297 | extern void dev_shutdown(struct net_device *dev); | |
298 | extern void dev_activate(struct net_device *dev); | |
299 | extern void dev_deactivate(struct net_device *dev); | |
589983cd PM |
300 | extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, |
301 | struct Qdisc *qdisc); | |
e41a33e6 TG |
302 | extern void qdisc_reset(struct Qdisc *qdisc); |
303 | extern void qdisc_destroy(struct Qdisc *qdisc); | |
43effa1e | 304 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); |
5ce2d488 | 305 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
bb949fbd | 306 | struct Qdisc_ops *ops); |
e41a33e6 | 307 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, |
bb949fbd | 308 | struct netdev_queue *dev_queue, |
9f9afec4 | 309 | struct Qdisc_ops *ops, u32 parentid); |
175f9c1b JK |
310 | extern void qdisc_calculate_pkt_len(struct sk_buff *skb, |
311 | struct qdisc_size_table *stab); | |
a48b5a61 | 312 | extern void tcf_destroy(struct tcf_proto *tp); |
ff31ab56 | 313 | extern void tcf_destroy_chain(struct tcf_proto **fl); |
1da177e4 | 314 | |
5aa70995 DM |
315 | /* Reset all TX qdiscs of a device. */ |
316 | static inline void qdisc_reset_all_tx(struct net_device *dev) | |
317 | { | |
e8a0464c DM |
318 | unsigned int i; |
319 | for (i = 0; i < dev->num_tx_queues; i++) | |
320 | qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc); | |
5aa70995 DM |
321 | } |
322 | ||
3e745dd6 DM |
323 | /* Are all TX queues of the device empty? */ |
324 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) | |
325 | { | |
e8a0464c DM |
326 | unsigned int i; |
327 | for (i = 0; i < dev->num_tx_queues; i++) { | |
328 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
329 | const struct Qdisc *q = txq->qdisc; | |
3e745dd6 | 330 | |
e8a0464c DM |
331 | if (q->q.qlen) |
332 | return false; | |
333 | } | |
334 | return true; | |
3e745dd6 DM |
335 | } |
336 | ||
6fa9864b DM |
337 | /* Are any of the TX qdiscs changing? */ |
338 | static inline bool qdisc_tx_changing(struct net_device *dev) | |
339 | { | |
e8a0464c DM |
340 | unsigned int i; |
341 | for (i = 0; i < dev->num_tx_queues; i++) { | |
342 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
343 | if (txq->qdisc != txq->qdisc_sleeping) | |
344 | return true; | |
345 | } | |
346 | return false; | |
6fa9864b DM |
347 | } |
348 | ||
e8a0464c | 349 | /* Is the device using the noop qdisc on all queues? */ |
05297949 DM |
350 | static inline bool qdisc_tx_is_noop(const struct net_device *dev) |
351 | { | |
e8a0464c DM |
352 | unsigned int i; |
353 | for (i = 0; i < dev->num_tx_queues; i++) { | |
354 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
355 | if (txq->qdisc != &noop_qdisc) | |
356 | return false; | |
357 | } | |
358 | return true; | |
05297949 DM |
359 | } |
360 | ||
0abf77e5 JK |
361 | static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) |
362 | { | |
175f9c1b | 363 | return qdisc_skb_cb(skb)->pkt_len; |
0abf77e5 JK |
364 | } |
365 | ||
c27f339a | 366 | /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */ |
378a2f09 JP |
367 | enum net_xmit_qdisc_t { |
368 | __NET_XMIT_STOLEN = 0x00010000, | |
c27f339a | 369 | __NET_XMIT_BYPASS = 0x00020000, |
378a2f09 JP |
370 | }; |
371 | ||
c27f339a | 372 | #ifdef CONFIG_NET_CLS_ACT |
378a2f09 | 373 | #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1) |
378a2f09 JP |
374 | #else |
375 | #define net_xmit_drop_count(e) (1) | |
376 | #endif | |
377 | ||
5f86173b JK |
378 | static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
379 | { | |
3a682fbd | 380 | #ifdef CONFIG_NET_SCHED |
175f9c1b JK |
381 | if (sch->stab) |
382 | qdisc_calculate_pkt_len(skb, sch->stab); | |
3a682fbd | 383 | #endif |
5f86173b JK |
384 | return sch->enqueue(skb, sch); |
385 | } | |
386 | ||
387 | static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) | |
388 | { | |
175f9c1b | 389 | qdisc_skb_cb(skb)->pkt_len = skb->len; |
378a2f09 | 390 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; |
5f86173b JK |
391 | } |
392 | ||
bbd8a0d3 KK |
393 | static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) |
394 | { | |
395 | sch->bstats.bytes += len; | |
396 | sch->bstats.packets++; | |
397 | } | |
398 | ||
9972b25d TG |
399 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
400 | struct sk_buff_head *list) | |
401 | { | |
402 | __skb_queue_tail(list, skb); | |
0abf77e5 | 403 | sch->qstats.backlog += qdisc_pkt_len(skb); |
bbd8a0d3 | 404 | __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); |
9972b25d TG |
405 | |
406 | return NET_XMIT_SUCCESS; | |
407 | } | |
408 | ||
409 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) | |
410 | { | |
411 | return __qdisc_enqueue_tail(skb, sch, &sch->q); | |
412 | } | |
413 | ||
414 | static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, | |
415 | struct sk_buff_head *list) | |
416 | { | |
417 | struct sk_buff *skb = __skb_dequeue(list); | |
418 | ||
419 | if (likely(skb != NULL)) | |
0abf77e5 | 420 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
9972b25d TG |
421 | |
422 | return skb; | |
423 | } | |
424 | ||
425 | static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) | |
426 | { | |
427 | return __qdisc_dequeue_head(sch, &sch->q); | |
57dbb2d8 HPP |
428 | } |
429 | ||
430 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, | |
431 | struct sk_buff_head *list) | |
432 | { | |
433 | struct sk_buff *skb = __qdisc_dequeue_head(sch, list); | |
434 | ||
435 | if (likely(skb != NULL)) { | |
436 | unsigned int len = qdisc_pkt_len(skb); | |
437 | kfree_skb(skb); | |
438 | return len; | |
439 | } | |
440 | ||
441 | return 0; | |
442 | } | |
443 | ||
444 | static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch) | |
445 | { | |
446 | return __qdisc_queue_drop_head(sch, &sch->q); | |
9972b25d TG |
447 | } |
448 | ||
449 | static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch, | |
450 | struct sk_buff_head *list) | |
451 | { | |
452 | struct sk_buff *skb = __skb_dequeue_tail(list); | |
453 | ||
454 | if (likely(skb != NULL)) | |
0abf77e5 | 455 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
9972b25d TG |
456 | |
457 | return skb; | |
458 | } | |
459 | ||
460 | static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch) | |
461 | { | |
462 | return __qdisc_dequeue_tail(sch, &sch->q); | |
463 | } | |
464 | ||
48a8f519 PM |
465 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) |
466 | { | |
467 | return skb_peek(&sch->q); | |
468 | } | |
469 | ||
77be155c JP |
470 | /* generic pseudo peek method for non-work-conserving qdisc */ |
471 | static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) | |
472 | { | |
473 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ | |
61c9eaf9 | 474 | if (!sch->gso_skb) { |
77be155c | 475 | sch->gso_skb = sch->dequeue(sch); |
61c9eaf9 JP |
476 | if (sch->gso_skb) |
477 | /* it's still part of the queue */ | |
478 | sch->q.qlen++; | |
479 | } | |
77be155c JP |
480 | |
481 | return sch->gso_skb; | |
482 | } | |
483 | ||
484 | /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ | |
485 | static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) | |
486 | { | |
487 | struct sk_buff *skb = sch->gso_skb; | |
488 | ||
61c9eaf9 | 489 | if (skb) { |
77be155c | 490 | sch->gso_skb = NULL; |
61c9eaf9 JP |
491 | sch->q.qlen--; |
492 | } else { | |
77be155c | 493 | skb = sch->dequeue(sch); |
61c9eaf9 | 494 | } |
77be155c JP |
495 | |
496 | return skb; | |
497 | } | |
498 | ||
9972b25d TG |
499 | static inline void __qdisc_reset_queue(struct Qdisc *sch, |
500 | struct sk_buff_head *list) | |
501 | { | |
502 | /* | |
503 | * We do not know the backlog in bytes of this list, it | |
504 | * is up to the caller to correct it | |
505 | */ | |
93245dd6 | 506 | __skb_queue_purge(list); |
9972b25d TG |
507 | } |
508 | ||
509 | static inline void qdisc_reset_queue(struct Qdisc *sch) | |
510 | { | |
511 | __qdisc_reset_queue(sch, &sch->q); | |
512 | sch->qstats.backlog = 0; | |
513 | } | |
514 | ||
515 | static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch, | |
516 | struct sk_buff_head *list) | |
517 | { | |
518 | struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); | |
519 | ||
520 | if (likely(skb != NULL)) { | |
0abf77e5 | 521 | unsigned int len = qdisc_pkt_len(skb); |
9972b25d TG |
522 | kfree_skb(skb); |
523 | return len; | |
524 | } | |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
529 | static inline unsigned int qdisc_queue_drop(struct Qdisc *sch) | |
530 | { | |
531 | return __qdisc_queue_drop(sch, &sch->q); | |
532 | } | |
533 | ||
534 | static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) | |
535 | { | |
536 | kfree_skb(skb); | |
537 | sch->qstats.drops++; | |
538 | ||
539 | return NET_XMIT_DROP; | |
540 | } | |
541 | ||
542 | static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) | |
543 | { | |
544 | sch->qstats.drops++; | |
545 | ||
c3bc7cff | 546 | #ifdef CONFIG_NET_CLS_ACT |
9972b25d TG |
547 | if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) |
548 | goto drop; | |
549 | ||
550 | return NET_XMIT_SUCCESS; | |
551 | ||
552 | drop: | |
553 | #endif | |
554 | kfree_skb(skb); | |
555 | return NET_XMIT_DROP; | |
556 | } | |
557 | ||
e9bef55d JDB |
558 | /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how |
559 | long it will take to send a packet given its size. | |
560 | */ | |
561 | static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) | |
562 | { | |
e08b0998 JDB |
563 | int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; |
564 | if (slot < 0) | |
565 | slot = 0; | |
e9bef55d JDB |
566 | slot >>= rtab->rate.cell_log; |
567 | if (slot > 255) | |
568 | return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); | |
569 | return rtab->data[slot]; | |
570 | } | |
571 | ||
12da81d1 JHS |
572 | #ifdef CONFIG_NET_CLS_ACT |
573 | static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) | |
574 | { | |
575 | struct sk_buff *n = skb_clone(skb, gfp_mask); | |
576 | ||
577 | if (n) { | |
578 | n->tc_verd = SET_TC_VERD(n->tc_verd, 0); | |
579 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); | |
580 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | |
12da81d1 JHS |
581 | } |
582 | return n; | |
583 | } | |
584 | #endif | |
585 | ||
1da177e4 | 586 | #endif |