]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/net/sch_generic.h
bnx2: Fix build with VLAN_8021Q disabled.
[mirror_ubuntu-jammy-kernel.git] / include / net / sch_generic.h
CommitLineData
1da177e4
LT
1#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
1da177e4
LT
4#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/module.h>
1da177e4
LT
8#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
10#include <net/gen_stats.h>
be577ddc 11#include <net/rtnetlink.h>
1da177e4
LT
12
13struct Qdisc_ops;
14struct qdisc_walker;
15struct tcf_walker;
16struct module;
17
18struct qdisc_rate_table
19{
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
e2627c8c
DM
26enum qdisc_state_t
27{
28 __QDISC_STATE_RUNNING,
37437bb2 29 __QDISC_STATE_SCHED,
e2627c8c
DM
30};
31
175f9c1b
JK
32struct qdisc_size_table {
33 struct list_head list;
34 struct tc_sizespec szopts;
35 int refcnt;
36 u16 data[];
37};
38
1da177e4
LT
39struct Qdisc
40{
41 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
42 struct sk_buff * (*dequeue)(struct Qdisc *dev);
43 unsigned flags;
44#define TCQ_F_BUILTIN 1
45#define TCQ_F_THROTTLED 2
46#define TCQ_F_INGRESS 4
47 int padded;
48 struct Qdisc_ops *ops;
175f9c1b 49 struct qdisc_size_table *stab;
1da177e4
LT
50 u32 handle;
51 u32 parent;
52 atomic_t refcnt;
e2627c8c 53 unsigned long state;
d3b753db 54 struct sk_buff *gso_skb;
1da177e4 55 struct sk_buff_head q;
bb949fbd 56 struct netdev_queue *dev_queue;
37437bb2 57 struct Qdisc *next_sched;
1da177e4
LT
58 struct list_head list;
59
60 struct gnet_stats_basic bstats;
61 struct gnet_stats_queue qstats;
62 struct gnet_stats_rate_est rate_est;
1da177e4
LT
63 struct rcu_head q_rcu;
64 int (*reshape_fail)(struct sk_buff *skb,
65 struct Qdisc *q);
66
72b25a91
DM
67 void *u32_node;
68
1da177e4
LT
69 /* This field is deprecated, but it is still used by CBQ
70 * and it will live until better solution will be invented.
71 */
72 struct Qdisc *__parent;
73};
74
75struct Qdisc_class_ops
76{
77 /* Child qdisc manipulation */
78 int (*graft)(struct Qdisc *, unsigned long cl,
79 struct Qdisc *, struct Qdisc **);
80 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
43effa1e 81 void (*qlen_notify)(struct Qdisc *, unsigned long);
1da177e4
LT
82
83 /* Class manipulation routines */
84 unsigned long (*get)(struct Qdisc *, u32 classid);
85 void (*put)(struct Qdisc *, unsigned long);
86 int (*change)(struct Qdisc *, u32, u32,
1e90474c 87 struct nlattr **, unsigned long *);
1da177e4
LT
88 int (*delete)(struct Qdisc *, unsigned long);
89 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
90
91 /* Filter manipulation */
92 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
93 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
94 u32 classid);
95 void (*unbind_tcf)(struct Qdisc *, unsigned long);
96
97 /* rtnetlink specific */
98 int (*dump)(struct Qdisc *, unsigned long,
99 struct sk_buff *skb, struct tcmsg*);
100 int (*dump_stats)(struct Qdisc *, unsigned long,
101 struct gnet_dump *);
102};
103
104struct Qdisc_ops
105{
106 struct Qdisc_ops *next;
20fea08b 107 const struct Qdisc_class_ops *cl_ops;
1da177e4
LT
108 char id[IFNAMSIZ];
109 int priv_size;
110
111 int (*enqueue)(struct sk_buff *, struct Qdisc *);
112 struct sk_buff * (*dequeue)(struct Qdisc *);
113 int (*requeue)(struct sk_buff *, struct Qdisc *);
114 unsigned int (*drop)(struct Qdisc *);
115
1e90474c 116 int (*init)(struct Qdisc *, struct nlattr *arg);
1da177e4
LT
117 void (*reset)(struct Qdisc *);
118 void (*destroy)(struct Qdisc *);
1e90474c 119 int (*change)(struct Qdisc *, struct nlattr *arg);
1da177e4
LT
120
121 int (*dump)(struct Qdisc *, struct sk_buff *);
122 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
123
124 struct module *owner;
125};
126
127
128struct tcf_result
129{
130 unsigned long class;
131 u32 classid;
132};
133
134struct tcf_proto_ops
135{
136 struct tcf_proto_ops *next;
137 char kind[IFNAMSIZ];
138
139 int (*classify)(struct sk_buff*, struct tcf_proto*,
140 struct tcf_result *);
141 int (*init)(struct tcf_proto*);
142 void (*destroy)(struct tcf_proto*);
143
144 unsigned long (*get)(struct tcf_proto*, u32 handle);
145 void (*put)(struct tcf_proto*, unsigned long);
146 int (*change)(struct tcf_proto*, unsigned long,
add93b61 147 u32 handle, struct nlattr **,
1da177e4
LT
148 unsigned long *);
149 int (*delete)(struct tcf_proto*, unsigned long);
150 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
151
152 /* rtnetlink specific */
153 int (*dump)(struct tcf_proto*, unsigned long,
154 struct sk_buff *skb, struct tcmsg*);
155
156 struct module *owner;
157};
158
159struct tcf_proto
160{
161 /* Fast access part */
162 struct tcf_proto *next;
163 void *root;
164 int (*classify)(struct sk_buff*, struct tcf_proto*,
165 struct tcf_result *);
66c6f529 166 __be16 protocol;
1da177e4
LT
167
168 /* All the rest */
169 u32 prio;
170 u32 classid;
171 struct Qdisc *q;
172 void *data;
173 struct tcf_proto_ops *ops;
174};
175
175f9c1b
JK
176struct qdisc_skb_cb {
177 unsigned int pkt_len;
178 char data[];
179};
180
181static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
182{
183 return (struct qdisc_skb_cb *)skb->cb;
184}
185
83874000
DM
186static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
187{
188 return &qdisc->q.lock;
189}
190
7698b4fc
DM
191static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
192{
193 return qdisc->dev_queue->qdisc;
194}
195
7e43f112
DM
196/* The qdisc root lock is a mechanism by which to top level
197 * of a qdisc tree can be locked from any qdisc node in the
198 * forest. This allows changing the configuration of some
199 * aspect of the qdisc tree while blocking out asynchronous
200 * qdisc access in the packet processing paths.
201 *
202 * It is only legal to do this when the root will not change
203 * on us. Otherwise we'll potentially lock the wrong qdisc
204 * root. This is enforced by holding the RTNL semaphore, which
205 * all users of this lock accessor must do.
206 */
7698b4fc
DM
207static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
208{
209 struct Qdisc *root = qdisc_root(qdisc);
210
7e43f112 211 ASSERT_RTNL();
83874000 212 return qdisc_lock(root);
7698b4fc
DM
213}
214
5ce2d488
DM
215static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
216{
217 return qdisc->dev_queue->dev;
218}
1da177e4 219
78a5b30b
DM
220static inline void sch_tree_lock(struct Qdisc *q)
221{
222 spin_lock_bh(qdisc_root_lock(q));
223}
224
225static inline void sch_tree_unlock(struct Qdisc *q)
226{
227 spin_unlock_bh(qdisc_root_lock(q));
228}
229
230#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
231#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
1da177e4 232
e41a33e6
TG
233extern struct Qdisc noop_qdisc;
234extern struct Qdisc_ops noop_qdisc_ops;
235
6fe1c7a5
PM
236struct Qdisc_class_common
237{
238 u32 classid;
239 struct hlist_node hnode;
240};
241
242struct Qdisc_class_hash
243{
244 struct hlist_head *hash;
245 unsigned int hashsize;
246 unsigned int hashmask;
247 unsigned int hashelems;
248};
249
250static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
251{
252 id ^= id >> 8;
253 id ^= id >> 4;
254 return id & mask;
255}
256
257static inline struct Qdisc_class_common *
258qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
259{
260 struct Qdisc_class_common *cl;
261 struct hlist_node *n;
262 unsigned int h;
263
264 h = qdisc_class_hash(id, hash->hashmask);
265 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
266 if (cl->classid == id)
267 return cl;
268 }
269 return NULL;
270}
271
272extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
273extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
274extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
275extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
276extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
277
e41a33e6
TG
278extern void dev_init_scheduler(struct net_device *dev);
279extern void dev_shutdown(struct net_device *dev);
280extern void dev_activate(struct net_device *dev);
281extern void dev_deactivate(struct net_device *dev);
282extern void qdisc_reset(struct Qdisc *qdisc);
283extern void qdisc_destroy(struct Qdisc *qdisc);
43effa1e 284extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
5ce2d488 285extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
bb949fbd 286 struct Qdisc_ops *ops);
e41a33e6 287extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
bb949fbd 288 struct netdev_queue *dev_queue,
9f9afec4 289 struct Qdisc_ops *ops, u32 parentid);
175f9c1b
JK
290extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
291 struct qdisc_size_table *stab);
a48b5a61 292extern void tcf_destroy(struct tcf_proto *tp);
ff31ab56 293extern void tcf_destroy_chain(struct tcf_proto **fl);
1da177e4 294
5aa70995
DM
295/* Reset all TX qdiscs of a device. */
296static inline void qdisc_reset_all_tx(struct net_device *dev)
297{
e8a0464c
DM
298 unsigned int i;
299 for (i = 0; i < dev->num_tx_queues; i++)
300 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
5aa70995
DM
301}
302
3e745dd6
DM
303/* Are all TX queues of the device empty? */
304static inline bool qdisc_all_tx_empty(const struct net_device *dev)
305{
e8a0464c
DM
306 unsigned int i;
307 for (i = 0; i < dev->num_tx_queues; i++) {
308 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
309 const struct Qdisc *q = txq->qdisc;
3e745dd6 310
e8a0464c
DM
311 if (q->q.qlen)
312 return false;
313 }
314 return true;
3e745dd6
DM
315}
316
6fa9864b
DM
317/* Are any of the TX qdiscs changing? */
318static inline bool qdisc_tx_changing(struct net_device *dev)
319{
e8a0464c
DM
320 unsigned int i;
321 for (i = 0; i < dev->num_tx_queues; i++) {
322 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
323 if (txq->qdisc != txq->qdisc_sleeping)
324 return true;
325 }
326 return false;
6fa9864b
DM
327}
328
e8a0464c 329/* Is the device using the noop qdisc on all queues? */
05297949
DM
330static inline bool qdisc_tx_is_noop(const struct net_device *dev)
331{
e8a0464c
DM
332 unsigned int i;
333 for (i = 0; i < dev->num_tx_queues; i++) {
334 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
335 if (txq->qdisc != &noop_qdisc)
336 return false;
337 }
338 return true;
05297949
DM
339}
340
0abf77e5
JK
341static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
342{
175f9c1b 343 return qdisc_skb_cb(skb)->pkt_len;
0abf77e5
JK
344}
345
c27f339a 346/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
378a2f09
JP
347enum net_xmit_qdisc_t {
348 __NET_XMIT_STOLEN = 0x00010000,
c27f339a 349 __NET_XMIT_BYPASS = 0x00020000,
378a2f09
JP
350};
351
c27f339a 352#ifdef CONFIG_NET_CLS_ACT
378a2f09 353#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
378a2f09
JP
354#else
355#define net_xmit_drop_count(e) (1)
356#endif
357
5f86173b
JK
358static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
359{
3a682fbd 360#ifdef CONFIG_NET_SCHED
175f9c1b
JK
361 if (sch->stab)
362 qdisc_calculate_pkt_len(skb, sch->stab);
3a682fbd 363#endif
5f86173b
JK
364 return sch->enqueue(skb, sch);
365}
366
367static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
368{
175f9c1b 369 qdisc_skb_cb(skb)->pkt_len = skb->len;
378a2f09 370 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
5f86173b
JK
371}
372
9972b25d
TG
373static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
374 struct sk_buff_head *list)
375{
376 __skb_queue_tail(list, skb);
0abf77e5
JK
377 sch->qstats.backlog += qdisc_pkt_len(skb);
378 sch->bstats.bytes += qdisc_pkt_len(skb);
9972b25d
TG
379 sch->bstats.packets++;
380
381 return NET_XMIT_SUCCESS;
382}
383
384static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
385{
386 return __qdisc_enqueue_tail(skb, sch, &sch->q);
387}
388
389static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
390 struct sk_buff_head *list)
391{
392 struct sk_buff *skb = __skb_dequeue(list);
393
394 if (likely(skb != NULL))
0abf77e5 395 sch->qstats.backlog -= qdisc_pkt_len(skb);
9972b25d
TG
396
397 return skb;
398}
399
400static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
401{
402 return __qdisc_dequeue_head(sch, &sch->q);
403}
404
405static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
406 struct sk_buff_head *list)
407{
408 struct sk_buff *skb = __skb_dequeue_tail(list);
409
410 if (likely(skb != NULL))
0abf77e5 411 sch->qstats.backlog -= qdisc_pkt_len(skb);
9972b25d
TG
412
413 return skb;
414}
415
416static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
417{
418 return __qdisc_dequeue_tail(sch, &sch->q);
419}
420
421static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
422 struct sk_buff_head *list)
423{
424 __skb_queue_head(list, skb);
0abf77e5 425 sch->qstats.backlog += qdisc_pkt_len(skb);
9972b25d
TG
426 sch->qstats.requeues++;
427
428 return NET_XMIT_SUCCESS;
429}
430
431static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
432{
433 return __qdisc_requeue(skb, sch, &sch->q);
434}
435
436static inline void __qdisc_reset_queue(struct Qdisc *sch,
437 struct sk_buff_head *list)
438{
439 /*
440 * We do not know the backlog in bytes of this list, it
441 * is up to the caller to correct it
442 */
93245dd6 443 __skb_queue_purge(list);
9972b25d
TG
444}
445
446static inline void qdisc_reset_queue(struct Qdisc *sch)
447{
448 __qdisc_reset_queue(sch, &sch->q);
449 sch->qstats.backlog = 0;
450}
451
452static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
453 struct sk_buff_head *list)
454{
455 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
456
457 if (likely(skb != NULL)) {
0abf77e5 458 unsigned int len = qdisc_pkt_len(skb);
9972b25d
TG
459 kfree_skb(skb);
460 return len;
461 }
462
463 return 0;
464}
465
466static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
467{
468 return __qdisc_queue_drop(sch, &sch->q);
469}
470
471static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
472{
473 kfree_skb(skb);
474 sch->qstats.drops++;
475
476 return NET_XMIT_DROP;
477}
478
479static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
480{
481 sch->qstats.drops++;
482
c3bc7cff 483#ifdef CONFIG_NET_CLS_ACT
9972b25d
TG
484 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
485 goto drop;
486
487 return NET_XMIT_SUCCESS;
488
489drop:
490#endif
491 kfree_skb(skb);
492 return NET_XMIT_DROP;
493}
494
e9bef55d
JDB
495/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
496 long it will take to send a packet given its size.
497 */
498static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
499{
e08b0998
JDB
500 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
501 if (slot < 0)
502 slot = 0;
e9bef55d
JDB
503 slot >>= rtab->rate.cell_log;
504 if (slot > 255)
505 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
506 return rtab->data[slot];
507}
508
12da81d1
JHS
509#ifdef CONFIG_NET_CLS_ACT
510static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
511{
512 struct sk_buff *n = skb_clone(skb, gfp_mask);
513
514 if (n) {
515 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
516 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
517 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
12da81d1
JHS
518 }
519 return n;
520}
521#endif
522
1da177e4 523#endif