]> git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/include/linux/netdevice.h
dataoath: compat: Do not use upstream fill-meta-data function for compat tunnel
[ovs.git] / datapath / linux / compat / include / linux / netdevice.h
1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
3
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6
7 struct net;
8
9 #include <linux/version.h>
10
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
13 #endif
14
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
17 #else
18 #define HAVE_OVS_DATAPATH
19 #endif
20
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
23 #endif
24
25 #ifndef IFF_NO_QUEUE
26 #define IFF_NO_QUEUE 0
27 #endif
28 #ifndef IFF_OPENVSWITCH
29 #define IFF_OPENVSWITCH 0
30 #endif
31
32 #ifndef to_net_dev
33 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
34 #endif
35
36 #ifndef HAVE_NET_NAME_UNKNOWN
37 #undef alloc_netdev
38 #define NET_NAME_UNKNOWN 0
39 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
40 alloc_netdev_mq(sizeof_priv, name, setup, 1)
41 #endif
42
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
44 #define unregister_netdevice_queue(dev, head) unregister_netdevice(dev)
45 #define unregister_netdevice_many(head)
46 #endif
47
48 #ifndef HAVE_DEV_DISABLE_LRO
49 extern void dev_disable_lro(struct net_device *dev);
50 #endif
51
52 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
53 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
54 {
55 struct net_device *dev;
56
57 read_lock(&dev_base_lock);
58 dev = __dev_get_by_index(net, ifindex);
59 read_unlock(&dev_base_lock);
60
61 return dev;
62 }
63 #endif
64
65 #ifndef NETIF_F_FSO
66 #define NETIF_F_FSO 0
67 #endif
68
69 #ifndef HAVE_NETDEV_FEATURES_T
70 typedef u32 netdev_features_t;
71 #endif
72
73 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
74 #define OVS_USE_COMPAT_GSO_SEGMENTATION
75 #endif
76
77 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
78 /* define compat version to handle MPLS segmentation offload. */
79 #define __skb_gso_segment rpl__skb_gso_segment
80 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
81 netdev_features_t features,
82 bool tx_path);
83
84 #define skb_gso_segment rpl_skb_gso_segment
85 static inline
86 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
87 {
88 return rpl__skb_gso_segment(skb, features, true);
89 }
90 #endif
91
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
93 #define netif_skb_features rpl_netif_skb_features
94 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
95 #endif
96
97 #ifdef HAVE_NETIF_NEEDS_GSO_NETDEV
98 #define netif_needs_gso rpl_netif_needs_gso
99 static inline bool netif_needs_gso(struct sk_buff *skb,
100 netdev_features_t features)
101 {
102 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
103 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
104 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
105 }
106 #endif
107
108 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
109
110 /* XEN dom0 networking assumes dev->master is bond device
111 * and it tries to access bond private structure from dev->master
112 * ptr on receive path. This causes panic. Therefore it is better
113 * not to backport this API.
114 **/
115 static inline int netdev_master_upper_dev_link(struct net_device *dev,
116 struct net_device *upper_dev)
117 {
118 return 0;
119 }
120
121 static inline void netdev_upper_dev_unlink(struct net_device *dev,
122 struct net_device *upper_dev)
123 {
124 }
125
126 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
127 {
128 return NULL;
129 }
130 #endif
131
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
133 #define dev_queue_xmit rpl_dev_queue_xmit
134 int rpl_dev_queue_xmit(struct sk_buff *skb);
135 #endif
136
137 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
138 static inline struct net_device *netdev_notifier_info_to_dev(void *info)
139 {
140 return info;
141 }
142 #endif
143
144 #ifndef HAVE_PCPU_SW_NETSTATS
145 #define pcpu_sw_netstats pcpu_tstats
146 #endif
147
148 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)
149 /* Use compat version for all redhas releases */
150 #undef netdev_alloc_pcpu_stats
151 #endif
152
153 #ifndef netdev_alloc_pcpu_stats
154 #define netdev_alloc_pcpu_stats(type) \
155 ({ \
156 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
157 if (pcpu_stats) { \
158 int ____i; \
159 for_each_possible_cpu(____i) { \
160 typeof(type) *stat; \
161 stat = per_cpu_ptr(pcpu_stats, ____i); \
162 u64_stats_init(&stat->syncp); \
163 } \
164 } \
165 pcpu_stats; \
166 })
167 #endif
168
169 #ifndef HAVE_DEV_RECURSION_LEVEL
170 static inline bool dev_recursion_level(void) { return false; }
171 #endif
172
173 #ifndef NET_NAME_USER
174 #define NET_NAME_USER 3
175 #endif
176
177 #ifndef HAVE_GRO_REMCSUM
178 struct gro_remcsum {
179 };
180
181 #define skb_gro_remcsum_init(grc)
182 #define skb_gro_remcsum_cleanup(a1, a2)
183 #else
184 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
185
186 #define skb_gro_remcsum_process rpl_skb_gro_remcsum_process
187 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
188 unsigned int off, size_t hdrlen,
189 int start, int offset,
190 struct gro_remcsum *grc,
191 bool nopartial)
192 {
193 __wsum delta;
194 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
195
196 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
197
198 if (!nopartial) {
199 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
200 return ptr;
201 }
202
203 ptr = skb_gro_header_fast(skb, off);
204 if (skb_gro_header_hard(skb, off + plen)) {
205 ptr = skb_gro_header_slow(skb, off + plen, off);
206 if (!ptr)
207 return NULL;
208 }
209
210 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
211 start, offset);
212
213 /* Adjust skb->csum since we changed the packet */
214 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
215
216 grc->offset = off + hdrlen + offset;
217 grc->delta = delta;
218
219 return ptr;
220 }
221 #endif
222 #endif
223
224 #ifndef HAVE_RTNL_LINK_STATS64
225 #define dev_get_stats rpl_dev_get_stats
226 struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev,
227 struct rtnl_link_stats64 *storage);
228 #endif
229
230 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)
231 /* Only required on RHEL 6. */
232 #define dev_get_stats dev_get_stats64
233 #endif
234
235 #ifndef netdev_dbg
236 #define netdev_dbg(__dev, format, args...) \
237 do { \
238 printk(KERN_DEBUG "%s ", __dev->name); \
239 printk(KERN_DEBUG format, ##args); \
240 } while (0)
241 #endif
242
243 #ifndef netdev_info
244 #define netdev_info(__dev, format, args...) \
245 do { \
246 printk(KERN_INFO "%s ", __dev->name); \
247 printk(KERN_INFO format, ##args); \
248 } while (0)
249
250 #endif
251
252 #ifndef USE_UPSTREAM_TUNNEL
253 #define dev_fill_metadata_dst ovs_dev_fill_metadata_dst
254 int ovs_dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
255 #endif
256
257 #ifndef NETDEV_OFFLOAD_PUSH_VXLAN
258 #define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C
259 #endif
260
261 #ifndef NETDEV_OFFLOAD_PUSH_GENEVE
262 #define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
263 #endif
264
265 #ifndef HAVE_IFF_PHONY_HEADROOM
266
267 #define IFF_PHONY_HEADROOM 0
268 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
269 {
270 return 0;
271 }
272
273 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
274 {
275 }
276
277 /* set the device rx headroom to the dev's default */
278 static inline void netdev_reset_rx_headroom(struct net_device *dev)
279 {
280 }
281
282 #endif
283
284 #ifdef IFF_NO_QUEUE
285 #define HAVE_IFF_NO_QUEUE
286 #else
287 #define IFF_NO_QUEUE 0
288 #endif
289
290 #endif /* __LINUX_NETDEVICE_WRAPPER_H */