]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/include/linux/netdevice.h
datapath: more accurate checksumming in queue_userspace_packet()
[mirror_ovs.git] / datapath / linux / compat / include / linux / netdevice.h
1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
3
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6
7 struct net;
8
9 #include <linux/version.h>
10
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
13 #endif
14
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
17 #else
18 #define HAVE_OVS_DATAPATH
19 #endif
20
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
23 #endif
24
25 #ifndef IFF_NO_QUEUE
26 #define IFF_NO_QUEUE 0
27 #endif
28 #ifndef IFF_OPENVSWITCH
29 #define IFF_OPENVSWITCH 0
30 #endif
31
32 #ifndef to_net_dev
33 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
34 #endif
35
36 #ifndef HAVE_NET_NAME_UNKNOWN
37 #undef alloc_netdev
38 #define NET_NAME_UNKNOWN 0
39 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
40 alloc_netdev_mq(sizeof_priv, name, setup, 1)
41 #endif
42
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
44 #define unregister_netdevice_queue(dev, head) unregister_netdevice(dev)
45 #define unregister_netdevice_many(head)
46 #endif
47
48 #ifndef HAVE_DEV_DISABLE_LRO
49 extern void dev_disable_lro(struct net_device *dev);
50 #endif
51
52 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
53 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
54 {
55 struct net_device *dev;
56
57 read_lock(&dev_base_lock);
58 dev = __dev_get_by_index(net, ifindex);
59 read_unlock(&dev_base_lock);
60
61 return dev;
62 }
63 #endif
64
65 #ifndef NETIF_F_FSO
66 #define NETIF_F_FSO 0
67 #endif
68
69 #ifndef HAVE_NETDEV_FEATURES_T
70 typedef u32 netdev_features_t;
71 #endif
72
73 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
74 #define OVS_USE_COMPAT_GSO_SEGMENTATION
75 #endif
76
77 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
78 /* define compat version to handle MPLS segmentation offload. */
79 #define __skb_gso_segment rpl__skb_gso_segment
80 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
81 netdev_features_t features,
82 bool tx_path);
83
84 #define skb_gso_segment rpl_skb_gso_segment
85 static inline
86 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
87 {
88 return rpl__skb_gso_segment(skb, features, true);
89 }
90 #endif
91
92 #ifdef HAVE_NETIF_NEEDS_GSO_NETDEV
93 #define netif_needs_gso rpl_netif_needs_gso
94 static inline bool netif_needs_gso(struct sk_buff *skb,
95 netdev_features_t features)
96 {
97 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
98 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
99 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
100 }
101 #endif
102
103 #ifndef HAVE_NETDEV_MASTER_UPPER_DEV_LINK_PRIV
104 static inline int rpl_netdev_master_upper_dev_link(struct net_device *dev,
105 struct net_device *upper_dev,
106 void *upper_priv, void *upper_info)
107 {
108 return netdev_master_upper_dev_link(dev, upper_dev);
109 }
110 #define netdev_master_upper_dev_link rpl_netdev_master_upper_dev_link
111
112 #endif
113
114 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
115 #define dev_queue_xmit rpl_dev_queue_xmit
116 int rpl_dev_queue_xmit(struct sk_buff *skb);
117 #endif
118
119 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
120 static inline struct net_device *rpl_netdev_notifier_info_to_dev(void *info)
121 {
122 return info;
123 }
124 #define netdev_notifier_info_to_dev rpl_netdev_notifier_info_to_dev
125 #endif
126
127 #ifndef HAVE_PCPU_SW_NETSTATS
128 #define pcpu_sw_netstats pcpu_tstats
129 #endif
130
131 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)
132 /* Use compat version for all redhas releases */
133 #undef netdev_alloc_pcpu_stats
134 #endif
135
136 #ifndef netdev_alloc_pcpu_stats
137 #define netdev_alloc_pcpu_stats(type) \
138 ({ \
139 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
140 if (pcpu_stats) { \
141 int ____i; \
142 for_each_possible_cpu(____i) { \
143 typeof(type) *stat; \
144 stat = per_cpu_ptr(pcpu_stats, ____i); \
145 u64_stats_init(&stat->syncp); \
146 } \
147 } \
148 pcpu_stats; \
149 })
150 #endif
151
152 #ifndef HAVE_DEV_RECURSION_LEVEL
153 static inline bool dev_recursion_level(void) { return false; }
154 #endif
155
156 #ifndef NET_NAME_USER
157 #define NET_NAME_USER 3
158 #endif
159
160 #ifndef HAVE_GRO_REMCSUM
161 struct gro_remcsum {
162 };
163
164 #define skb_gro_remcsum_init(grc)
165 #define skb_gro_remcsum_cleanup(a1, a2)
166 #else
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
168
169 #define skb_gro_remcsum_process rpl_skb_gro_remcsum_process
170 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
171 unsigned int off, size_t hdrlen,
172 int start, int offset,
173 struct gro_remcsum *grc,
174 bool nopartial)
175 {
176 __wsum delta;
177 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
178
179 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
180
181 if (!nopartial) {
182 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
183 return ptr;
184 }
185
186 ptr = skb_gro_header_fast(skb, off);
187 if (skb_gro_header_hard(skb, off + plen)) {
188 ptr = skb_gro_header_slow(skb, off + plen, off);
189 if (!ptr)
190 return NULL;
191 }
192
193 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
194 start, offset);
195
196 /* Adjust skb->csum since we changed the packet */
197 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
198
199 grc->offset = off + hdrlen + offset;
200 grc->delta = delta;
201
202 return ptr;
203 }
204 #endif
205 #endif
206
207 #ifndef HAVE_RTNL_LINK_STATS64
208 #define dev_get_stats rpl_dev_get_stats
209 struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev,
210 struct rtnl_link_stats64 *storage);
211 #endif
212
213 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)
214 /* Only required on RHEL 6. */
215 #define dev_get_stats dev_get_stats64
216 #endif
217
218 #ifndef netdev_dbg
219 #define netdev_dbg(__dev, format, args...) \
220 do { \
221 printk(KERN_DEBUG "%s ", __dev->name); \
222 printk(KERN_DEBUG format, ##args); \
223 } while (0)
224 #endif
225
226 #ifndef netdev_info
227 #define netdev_info(__dev, format, args...) \
228 do { \
229 printk(KERN_INFO "%s ", __dev->name); \
230 printk(KERN_INFO format, ##args); \
231 } while (0)
232
233 #endif
234
235 #ifndef USE_UPSTREAM_TUNNEL
236 #define dev_fill_metadata_dst ovs_dev_fill_metadata_dst
237 int ovs_dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
238 #endif
239
240 #ifndef NETDEV_OFFLOAD_PUSH_VXLAN
241 #define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C
242 #endif
243
244 #ifndef NETDEV_OFFLOAD_PUSH_GENEVE
245 #define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
246 #endif
247
248 #ifndef HAVE_IFF_PHONY_HEADROOM
249
250 #define IFF_PHONY_HEADROOM 0
251 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
252 {
253 return 0;
254 }
255
256 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
257 {
258 }
259
260 /* set the device rx headroom to the dev's default */
261 static inline void netdev_reset_rx_headroom(struct net_device *dev)
262 {
263 }
264
265 #endif
266
267 #ifdef IFF_NO_QUEUE
268 #define HAVE_IFF_NO_QUEUE
269 #else
270 #define IFF_NO_QUEUE 0
271 #endif
272
273 #ifndef HAVE_SKB_CSUM_HWOFFLOAD_HELP
274 static inline int skb_csum_hwoffload_help(struct sk_buff *skb,
275 const netdev_features_t features)
276 {
277 /* It's less accurate to approximate to this for older kernels, but
278 * it was sufficient for a long time. If you care about ensuring that
279 * upstream commit 7529390d08f0 has the same effect on older kernels,
280 * consider backporting the following commits:
281 * b72b5bf6a8fc ("net: introduce skb_crc32c_csum_help")
282 * 43c26a1a4593 ("net: more accurate checksumming in validate_xmit_skb()")
283 */
284 return skb_checksum_help(skb);
285 }
286 #endif
287
288 #endif /* __LINUX_NETDEVICE_WRAPPER_H */