]> git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/include/linux/netdevice.h
9d3b24903a26fed7d202f009d40c205c127e72d6
[ovs.git] / datapath / linux / compat / include / linux / netdevice.h
1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
3
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6
7 struct net;
8
9 #include <linux/version.h>
10
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
13 #endif
14
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
17 #else
18 #define HAVE_OVS_DATAPATH
19 #endif
20
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
23 #endif
24
25 #ifndef IFF_OPENVSWITCH
26 #define IFF_OPENVSWITCH 0
27 #endif
28
29 #ifndef to_net_dev
30 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
31 #endif
32
33 #ifndef HAVE_NET_NAME_UNKNOWN
34 #undef alloc_netdev
35 #define NET_NAME_UNKNOWN 0
36 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
37 alloc_netdev_mq(sizeof_priv, name, setup, 1)
38 #endif
39
40 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
41 #define unregister_netdevice_queue(dev, head) unregister_netdevice(dev)
42 #define unregister_netdevice_many(head)
43 #endif
44
45 #ifndef HAVE_DEV_DISABLE_LRO
46 extern void dev_disable_lro(struct net_device *dev);
47 #endif
48
49 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
50 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
51 {
52 struct net_device *dev;
53
54 read_lock(&dev_base_lock);
55 dev = __dev_get_by_index(net, ifindex);
56 read_unlock(&dev_base_lock);
57
58 return dev;
59 }
60 #endif
61
62 #ifndef NETIF_F_FSO
63 #define NETIF_F_FSO 0
64 #endif
65
66 #ifndef HAVE_NETDEV_FEATURES_T
67 typedef u32 netdev_features_t;
68 #endif
69
70 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
71 #define OVS_USE_COMPAT_GSO_SEGMENTATION
72 #endif
73
74 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
75 /* define compat version to handle MPLS segmentation offload. */
76 #define __skb_gso_segment rpl__skb_gso_segment
77 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
78 netdev_features_t features,
79 bool tx_path);
80
81 #define skb_gso_segment rpl_skb_gso_segment
82 static inline
83 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
84 {
85 return rpl__skb_gso_segment(skb, features, true);
86 }
87 #endif
88
89 #ifdef HAVE_NETIF_NEEDS_GSO_NETDEV
90 #define netif_needs_gso rpl_netif_needs_gso
91 static inline bool netif_needs_gso(struct sk_buff *skb,
92 netdev_features_t features)
93 {
94 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
95 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
96 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
97 }
98 #endif
99
100 #ifndef HAVE_NETDEV_MASTER_UPPER_DEV_LINK_PRIV
101 #ifndef HAVE_NETDEV_MASTER_UPPER_DEV_LINK_RH
102 static inline int rpl_netdev_master_upper_dev_link(struct net_device *dev,
103 struct net_device *upper_dev,
104 void *upper_priv,
105 void *upper_info, void *extack)
106 {
107 return netdev_master_upper_dev_link(dev, upper_dev);
108 }
109 #define netdev_master_upper_dev_link rpl_netdev_master_upper_dev_link
110 #else /* #ifndef HAVE_NETDEV_MASTER_UPPER_DEV_LINK_RH */
111 static inline int rpl_netdev_master_upper_dev_link(struct net_device *dev,
112 struct net_device *upper_dev,
113 void *upper_priv,
114 void *upper_info, void *extack)
115 {
116 return netdev_master_upper_dev_link(dev, upper_dev,
117 upper_priv, upper_info);
118 }
119 #undef netdev_master_upper_dev_link
120 #define netdev_master_upper_dev_link rpl_netdev_master_upper_dev_link
121 #endif /* #else HAVE_NETDEV_MASTER_UPPER_DEV_LINK_RH */
122 #else /* #ifndef HAVE_NETDEV_MASTER_UPPER_DEV_LINK_PRIV */
123 #ifndef HAVE_UPPER_DEV_LINK_EXTACK
124 static inline int rpl_netdev_master_upper_dev_link(struct net_device *dev,
125 struct net_device *upper_dev,
126 void *upper_priv,
127 void *upper_info, void *extack)
128 {
129 return netdev_master_upper_dev_link(dev, upper_dev, upper_priv,
130 upper_info);
131 }
132 #define netdev_master_upper_dev_link rpl_netdev_master_upper_dev_link
133 #endif /* #ifndef HAVE_UPPER_DEV_LINK_EXTACK */
134 #endif /* #else HAVE_NETDEV_MASTER_UPPER_DEV_LINK_PRIV */
135
136 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
137 #define dev_queue_xmit rpl_dev_queue_xmit
138 int rpl_dev_queue_xmit(struct sk_buff *skb);
139 #endif
140
141 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
142 static inline struct net_device *rpl_netdev_notifier_info_to_dev(void *info)
143 {
144 return info;
145 }
146 #define netdev_notifier_info_to_dev rpl_netdev_notifier_info_to_dev
147 #endif
148
149 #ifndef HAVE_PCPU_SW_NETSTATS
150 #define pcpu_sw_netstats pcpu_tstats
151 #endif
152
153 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)
154 /* Use compat version for all redhas releases */
155 #undef netdev_alloc_pcpu_stats
156 #endif
157
158 #ifndef netdev_alloc_pcpu_stats
159 #define netdev_alloc_pcpu_stats(type) \
160 ({ \
161 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
162 if (pcpu_stats) { \
163 int ____i; \
164 for_each_possible_cpu(____i) { \
165 typeof(type) *stat; \
166 stat = per_cpu_ptr(pcpu_stats, ____i); \
167 u64_stats_init(&stat->syncp); \
168 } \
169 } \
170 pcpu_stats; \
171 })
172 #endif
173
174 #ifndef HAVE_DEV_RECURSION_LEVEL
175 static inline bool dev_recursion_level(void) { return false; }
176 #endif
177
178 #ifndef NET_NAME_USER
179 #define NET_NAME_USER 3
180 #endif
181
182 #ifndef HAVE_GRO_REMCSUM
183 struct gro_remcsum {
184 };
185
186 #define skb_gro_remcsum_init(grc)
187 #define skb_gro_remcsum_cleanup(a1, a2)
188 #else
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
190
191 #define skb_gro_remcsum_process rpl_skb_gro_remcsum_process
192 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
193 unsigned int off, size_t hdrlen,
194 int start, int offset,
195 struct gro_remcsum *grc,
196 bool nopartial)
197 {
198 __wsum delta;
199 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
200
201 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
202
203 if (!nopartial) {
204 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
205 return ptr;
206 }
207
208 ptr = skb_gro_header_fast(skb, off);
209 if (skb_gro_header_hard(skb, off + plen)) {
210 ptr = skb_gro_header_slow(skb, off + plen, off);
211 if (!ptr)
212 return NULL;
213 }
214
215 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
216 start, offset);
217
218 /* Adjust skb->csum since we changed the packet */
219 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
220
221 grc->offset = off + hdrlen + offset;
222 grc->delta = delta;
223
224 return ptr;
225 }
226 #endif
227 #endif
228
229 #ifndef HAVE_RTNL_LINK_STATS64
230 #define dev_get_stats rpl_dev_get_stats
231 struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev,
232 struct rtnl_link_stats64 *storage);
233 #endif
234
235 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)
236 /* Only required on RHEL 6. */
237 #define dev_get_stats dev_get_stats64
238 #endif
239
240 #ifndef netdev_dbg
241 #define netdev_dbg(__dev, format, args...) \
242 do { \
243 printk(KERN_DEBUG "%s ", __dev->name); \
244 printk(KERN_DEBUG format, ##args); \
245 } while (0)
246 #endif
247
248 #ifndef netdev_info
249 #define netdev_info(__dev, format, args...) \
250 do { \
251 printk(KERN_INFO "%s ", __dev->name); \
252 printk(KERN_INFO format, ##args); \
253 } while (0)
254
255 #endif
256
257 #ifndef USE_UPSTREAM_TUNNEL
258 #define dev_fill_metadata_dst ovs_dev_fill_metadata_dst
259 int ovs_dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
260 #endif
261
262 #ifndef NETDEV_OFFLOAD_PUSH_VXLAN
263 #define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C
264 #endif
265
266 #ifndef NETDEV_OFFLOAD_PUSH_GENEVE
267 #define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
268 #endif
269
270 #ifndef HAVE_IFF_PHONY_HEADROOM
271
272 #define IFF_PHONY_HEADROOM 0
273 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
274 {
275 return 0;
276 }
277
278 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
279 {
280 }
281
282 /* set the device rx headroom to the dev's default */
283 static inline void netdev_reset_rx_headroom(struct net_device *dev)
284 {
285 }
286
287 #endif
288
289 #ifdef IFF_NO_QUEUE
290 #define HAVE_IFF_NO_QUEUE
291 #else
292 #define IFF_NO_QUEUE 0
293 #endif
294
295 #ifndef HAVE_SKB_CSUM_HWOFFLOAD_HELP
296 static inline int skb_csum_hwoffload_help(struct sk_buff *skb,
297 const netdev_features_t features)
298 {
299 /* It's less accurate to approximate to this for older kernels, but
300 * it was sufficient for a long time. If you care about ensuring that
301 * upstream commit 7529390d08f0 has the same effect on older kernels,
302 * consider backporting the following commits:
303 * b72b5bf6a8fc ("net: introduce skb_crc32c_csum_help")
304 * 43c26a1a4593 ("net: more accurate checksumming in validate_xmit_skb()")
305 */
306 return skb_checksum_help(skb);
307 }
308 #endif
309
310 #ifndef HAVE_SKB_GSO_ERROR_UNWIND
311 static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
312 int pulled_hlen, u16 mac_offset,
313 int mac_len)
314 {
315 skb->protocol = protocol;
316 skb->encapsulation = 1;
317 skb_push(skb, pulled_hlen);
318 skb_reset_transport_header(skb);
319 skb->mac_header = mac_offset;
320 skb->network_header = skb->mac_header + mac_len;
321 skb->mac_len = mac_len;
322 }
323 #endif
324 #endif /* __LINUX_NETDEVICE_WRAPPER_H */