]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/net/pkt_sched.h
Merge tag 'devicetree-for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / include / net / pkt_sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_SCHED_H
3 #define __NET_PKT_SCHED_H
4
5 #include <linux/jiffies.h>
6 #include <linux/ktime.h>
7 #include <linux/if_vlan.h>
8 #include <linux/netdevice.h>
9 #include <net/sch_generic.h>
10 #include <net/net_namespace.h>
11 #include <uapi/linux/pkt_sched.h>
12
13 #define DEFAULT_TX_QUEUE_LEN 1000
14
15 struct qdisc_walker {
16 int stop;
17 int skip;
18 int count;
19 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
20 };
21
22 static inline void *qdisc_priv(struct Qdisc *q)
23 {
24 return &q->privdata;
25 }
26
27 static inline struct Qdisc *qdisc_from_priv(void *priv)
28 {
29 return container_of(priv, struct Qdisc, privdata);
30 }
31
32 /*
33 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
34
35 Normal IP packet size ~ 512byte, hence:
36
37 0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
38 10Mbit ethernet.
39
40 10msec resolution -> <50Kbit/sec.
41
42 The result: [34]86 is not good choice for QoS router :-(
43
44 The things are not so bad, because we may use artificial
45 clock evaluated by integration of network data flow
46 in the most critical places.
47 */
48
49 typedef u64 psched_time_t;
50 typedef long psched_tdiff_t;
51
52 /* Avoid doing 64 bit divide */
53 #define PSCHED_SHIFT 6
54 #define PSCHED_TICKS2NS(x) ((s64)(x) << PSCHED_SHIFT)
55 #define PSCHED_NS2TICKS(x) ((x) >> PSCHED_SHIFT)
56
57 #define PSCHED_TICKS_PER_SEC PSCHED_NS2TICKS(NSEC_PER_SEC)
58 #define PSCHED_PASTPERFECT 0
59
60 static inline psched_time_t psched_get_time(void)
61 {
62 return PSCHED_NS2TICKS(ktime_get_ns());
63 }
64
65 static inline psched_tdiff_t
66 psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
67 {
68 return min(tv1 - tv2, bound);
69 }
70
71 struct qdisc_watchdog {
72 u64 last_expires;
73 struct hrtimer timer;
74 struct Qdisc *qdisc;
75 };
76
77 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
78 clockid_t clockid);
79 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
80
81 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
82 u64 delta_ns);
83
84 static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
85 u64 expires)
86 {
87 return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
88 }
89
90 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
91 psched_time_t expires)
92 {
93 qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
94 }
95
96 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
97
98 extern struct Qdisc_ops pfifo_qdisc_ops;
99 extern struct Qdisc_ops bfifo_qdisc_ops;
100 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
101
102 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
103 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
104 unsigned int limit,
105 struct netlink_ext_ack *extack);
106
107 int register_qdisc(struct Qdisc_ops *qops);
108 int unregister_qdisc(struct Qdisc_ops *qops);
109 void qdisc_get_default(char *id, size_t len);
110 int qdisc_set_default(const char *id);
111
112 void qdisc_hash_add(struct Qdisc *q, bool invisible);
113 void qdisc_hash_del(struct Qdisc *q);
114 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
115 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
116 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
117 struct nlattr *tab,
118 struct netlink_ext_ack *extack);
119 void qdisc_put_rtab(struct qdisc_rate_table *tab);
120 void qdisc_put_stab(struct qdisc_size_table *tab);
121 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
122 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
123 struct net_device *dev, struct netdev_queue *txq,
124 spinlock_t *root_lock, bool validate);
125
126 void __qdisc_run(struct Qdisc *q);
127
128 static inline void qdisc_run(struct Qdisc *q)
129 {
130 if (qdisc_run_begin(q)) {
131 /* NOLOCK qdisc must check 'state' under the qdisc seqlock
132 * to avoid racing with dev_qdisc_reset()
133 */
134 if (!(q->flags & TCQ_F_NOLOCK) ||
135 likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
136 __qdisc_run(q);
137 qdisc_run_end(q);
138 }
139 }
140
141 /* Calculate maximal size of packet seen by hard_start_xmit
142 routine of this device.
143 */
144 static inline unsigned int psched_mtu(const struct net_device *dev)
145 {
146 return dev->mtu + dev->hard_header_len;
147 }
148
149 static inline struct net *qdisc_net(struct Qdisc *q)
150 {
151 return dev_net(q->dev_queue->dev);
152 }
153
154 struct tc_cbs_qopt_offload {
155 u8 enable;
156 s32 queue;
157 s32 hicredit;
158 s32 locredit;
159 s32 idleslope;
160 s32 sendslope;
161 };
162
163 struct tc_etf_qopt_offload {
164 u8 enable;
165 s32 queue;
166 };
167
168 struct tc_taprio_sched_entry {
169 u8 command; /* TC_TAPRIO_CMD_* */
170
171 /* The gate_mask in the offloading side refers to traffic classes */
172 u32 gate_mask;
173 u32 interval;
174 };
175
176 struct tc_taprio_qopt_offload {
177 u8 enable;
178 ktime_t base_time;
179 u64 cycle_time;
180 u64 cycle_time_extension;
181
182 size_t num_entries;
183 struct tc_taprio_sched_entry entries[];
184 };
185
186 /* Reference counting */
187 struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
188 *offload);
189 void taprio_offload_free(struct tc_taprio_qopt_offload *offload);
190
191 #endif