1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
9 #include <linux/version.h>
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
18 #define HAVE_OVS_DATAPATH
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
26 #define IFF_NO_QUEUE 0
28 #ifndef IFF_OPENVSWITCH
29 #define IFF_OPENVSWITCH 0
33 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
36 #ifndef HAVE_NET_NAME_UNKNOWN
38 #define NET_NAME_UNKNOWN 0
39 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
40 alloc_netdev_mq(sizeof_priv, name, setup, 1)
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
44 #define unregister_netdevice_queue(dev, head) unregister_netdevice(dev)
45 #define unregister_netdevice_many(head)
48 #ifndef HAVE_DEV_DISABLE_LRO
49 extern void dev_disable_lro(struct net_device
*dev
);
52 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
53 static inline struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
55 struct net_device
*dev
;
57 read_lock(&dev_base_lock
);
58 dev
= __dev_get_by_index(net
, ifindex
);
59 read_unlock(&dev_base_lock
);
69 #ifndef HAVE_NETDEV_FEATURES_T
70 typedef u32 netdev_features_t
;
73 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
74 #define OVS_USE_COMPAT_GSO_SEGMENTATION
77 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
78 /* define compat version to handle MPLS segmentation offload. */
79 #define __skb_gso_segment rpl__skb_gso_segment
80 struct sk_buff
*rpl__skb_gso_segment(struct sk_buff
*skb
,
81 netdev_features_t features
,
84 #define skb_gso_segment rpl_skb_gso_segment
86 struct sk_buff
*rpl_skb_gso_segment(struct sk_buff
*skb
, netdev_features_t features
)
88 return rpl__skb_gso_segment(skb
, features
, true);
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
93 #define netif_skb_features rpl_netif_skb_features
94 netdev_features_t
rpl_netif_skb_features(struct sk_buff
*skb
);
97 #ifdef HAVE_NETIF_NEEDS_GSO_NETDEV
98 #define netif_needs_gso rpl_netif_needs_gso
99 static inline bool netif_needs_gso(struct sk_buff
*skb
,
100 netdev_features_t features
)
102 return skb_is_gso(skb
) && (!skb_gso_ok(skb
, features
) ||
103 unlikely((skb
->ip_summed
!= CHECKSUM_PARTIAL
) &&
104 (skb
->ip_summed
!= CHECKSUM_UNNECESSARY
)));
108 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
110 /* XEN dom0 networking assumes dev->master is bond device
111 * and it tries to access bond private structure from dev->master
112 * ptr on receive path. This causes panic. Therefore it is better
113 * not to backport this API.
115 static inline int netdev_master_upper_dev_link(struct net_device
*dev
,
116 struct net_device
*upper_dev
)
121 static inline void netdev_upper_dev_unlink(struct net_device
*dev
,
122 struct net_device
*upper_dev
)
126 static inline struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
133 #define dev_queue_xmit rpl_dev_queue_xmit
134 int rpl_dev_queue_xmit(struct sk_buff
*skb
);
137 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
138 static inline struct net_device
*netdev_notifier_info_to_dev(void *info
)
144 #ifndef HAVE_PCPU_SW_NETSTATS
145 #define pcpu_sw_netstats pcpu_tstats
148 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)
149 /* Use compat version for all redhas releases */
150 #undef netdev_alloc_pcpu_stats
153 #ifndef netdev_alloc_pcpu_stats
154 #define netdev_alloc_pcpu_stats(type) \
156 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
159 for_each_possible_cpu(____i) { \
160 typeof(type) *stat; \
161 stat = per_cpu_ptr(pcpu_stats, ____i); \
162 u64_stats_init(&stat->syncp); \
169 #ifndef HAVE_DEV_RECURSION_LEVEL
170 static inline bool dev_recursion_level(void) { return false; }
173 #ifndef NET_NAME_USER
174 #define NET_NAME_USER 3
177 #ifndef HAVE_GRO_REMCSUM
181 #define skb_gro_remcsum_init(grc)
182 #define skb_gro_remcsum_cleanup(a1, a2)
184 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
186 #define skb_gro_remcsum_process rpl_skb_gro_remcsum_process
187 static inline void *skb_gro_remcsum_process(struct sk_buff
*skb
, void *ptr
,
188 unsigned int off
, size_t hdrlen
,
189 int start
, int offset
,
190 struct gro_remcsum
*grc
,
194 size_t plen
= hdrlen
+ max_t(size_t, offset
+ sizeof(u16
), start
);
196 BUG_ON(!NAPI_GRO_CB(skb
)->csum_valid
);
199 NAPI_GRO_CB(skb
)->gro_remcsum_start
= off
+ hdrlen
+ start
;
203 ptr
= skb_gro_header_fast(skb
, off
);
204 if (skb_gro_header_hard(skb
, off
+ plen
)) {
205 ptr
= skb_gro_header_slow(skb
, off
+ plen
, off
);
210 delta
= remcsum_adjust(ptr
+ hdrlen
, NAPI_GRO_CB(skb
)->csum
,
213 /* Adjust skb->csum since we changed the packet */
214 NAPI_GRO_CB(skb
)->csum
= csum_add(NAPI_GRO_CB(skb
)->csum
, delta
);
216 grc
->offset
= off
+ hdrlen
+ offset
;
224 #ifndef HAVE_RTNL_LINK_STATS64
225 #define dev_get_stats rpl_dev_get_stats
226 struct rtnl_link_stats64
*rpl_dev_get_stats(struct net_device
*dev
,
227 struct rtnl_link_stats64
*storage
);
230 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)
231 /* Only required on RHEL 6. */
232 #define dev_get_stats dev_get_stats64
236 #define netdev_dbg(__dev, format, args...) \
238 printk(KERN_DEBUG "%s ", __dev->name); \
239 printk(KERN_DEBUG format, ##args); \
244 #define netdev_info(__dev, format, args...) \
246 printk(KERN_INFO "%s ", __dev->name); \
247 printk(KERN_INFO format, ##args); \
252 #ifndef HAVE_NDO_FILL_METADATA_DST
253 #define dev_fill_metadata_dst ovs_dev_fill_metadata_dst
254 int ovs_dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
);
256 #endif /* __LINUX_NETDEVICE_WRAPPER_H */