]>
git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/include/linux/skbuff.h
1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
4 #include <linux/version.h>
5 #include <linux/types.h>
7 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
8 /* This should be before skbuff.h to make sure that we rewrite
12 int pskb_expand_head(struct sk_buff
*skb
, int nhead
, int ntail
,
14 #define pskb_expand_head rpl_pskb_expand_head
17 #include_next <linux/skbuff.h>
18 #include <linux/jhash.h>
20 #ifndef HAVE_IGNORE_DF_RENAME
21 #define ignore_df local_df
25 #ifndef HAVE_NULL_COMPUTE_PSEUDO
26 static inline __wsum
null_compute_pseudo(struct sk_buff
*skb
, int proto
)
32 #ifndef HAVE_SKB_CHECKSUM_CONVERT
33 static inline bool __skb_checksum_convert_check(struct sk_buff
*skb
)
35 #ifdef HAVE_SKBUFF_CSUM_VALID
36 return (skb
->ip_summed
== CHECKSUM_NONE
&& skb
->csum_valid
);
38 return skb
->ip_summed
== CHECKSUM_NONE
;
42 static inline void __skb_checksum_convert(struct sk_buff
*skb
,
43 __sum16 check
, __wsum pseudo
)
46 skb
->ip_summed
= CHECKSUM_COMPLETE
;
49 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
51 if (__skb_checksum_convert_check(skb)) \
52 __skb_checksum_convert(skb, check, \
53 compute_pseudo(skb, proto)); \
58 #ifndef SKB_CHECKSUM_SIMPLE_VALIDATE
60 #ifndef __skb_checksum_validate
61 #define __skb_checksum_validate(skb, proto, complete, \
62 zero_okay, check, compute_pseudo) \
69 #define skb_checksum_simple_validate(skb) \
70 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
73 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
74 static inline void skb_copy_from_linear_data_offset(const struct sk_buff
*skb
,
75 const int offset
, void *to
,
76 const unsigned int len
)
78 memcpy(to
, skb
->data
+ offset
, len
);
81 static inline void skb_copy_to_linear_data_offset(struct sk_buff
*skb
,
84 const unsigned int len
)
86 memcpy(skb
->data
+ offset
, from
, len
);
89 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
91 #ifndef HAVE_SKB_INNER_TRANSPORT_OFFSET
92 static inline int skb_inner_transport_offset(const struct sk_buff
*skb
)
94 return skb_inner_transport_header(skb
) - skb
->data
;
98 #ifndef HAVE_SKB_RESET_TAIL_POINTER
99 static inline void skb_reset_tail_pointer(struct sk_buff
*skb
)
101 skb
->tail
= skb
->data
;
105 * The networking layer reserves some headroom in skb data (via
106 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
107 * the header has to grow. In the default case, if the header has to grow
108 * 16 bytes or less we avoid the reallocation.
110 * Unfortunately this headroom changes the DMA alignment of the resulting
111 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
112 * on some architectures. An architecture can override this value,
113 * perhaps setting it to a cacheline in size (since that will maintain
114 * cacheline alignment of the DMA). It must be a power of 2.
116 * Various parts of the networking layer expect at least 16 bytes of
117 * headroom, you should not reduce this.
120 #define NET_SKB_PAD 16
123 #ifndef HAVE_SKB_COW_HEAD
124 static inline int __skb_cow(struct sk_buff
*skb
, unsigned int headroom
,
129 if (headroom
< NET_SKB_PAD
)
130 headroom
= NET_SKB_PAD
;
131 if (headroom
> skb_headroom(skb
))
132 delta
= headroom
- skb_headroom(skb
);
135 return pskb_expand_head(skb
, ALIGN(delta
, NET_SKB_PAD
), 0,
140 static inline int skb_cow_head(struct sk_buff
*skb
, unsigned int headroom
)
142 return __skb_cow(skb
, headroom
, skb_header_cloned(skb
));
144 #endif /* !HAVE_SKB_COW_HEAD */
146 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
147 static inline struct dst_entry
*skb_dst(const struct sk_buff
*skb
)
149 return (struct dst_entry
*)skb
->dst
;
152 static inline void skb_dst_set(struct sk_buff
*skb
, struct dst_entry
*dst
)
157 static inline struct rtable
*skb_rtable(const struct sk_buff
*skb
)
159 return (struct rtable
*)skb
->dst
;
163 #ifndef CHECKSUM_PARTIAL
164 #define CHECKSUM_PARTIAL CHECKSUM_HW
166 #ifndef CHECKSUM_COMPLETE
167 #define CHECKSUM_COMPLETE CHECKSUM_HW
170 #ifndef HAVE_SKB_WARN_LRO
172 static inline bool skb_warn_if_lro(const struct sk_buff
*skb
)
177 extern void __skb_warn_lro_forwarding(const struct sk_buff
*skb
);
179 static inline bool skb_warn_if_lro(const struct sk_buff
*skb
)
181 /* LRO sets gso_size but not gso_type, whereas if GSO is really
182 * wanted then gso_type will be set. */
183 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
184 if (shinfo
->gso_size
!= 0 && unlikely(shinfo
->gso_type
== 0)) {
185 __skb_warn_lro_forwarding(skb
);
190 #endif /* NETIF_F_LRO */
191 #endif /* HAVE_SKB_WARN_LRO */
193 #ifndef HAVE_CONSUME_SKB
194 #define consume_skb kfree_skb
197 #ifndef HAVE_SKB_FRAG_PAGE
198 #include <linux/mm.h>
200 static inline struct page
*skb_frag_page(const skb_frag_t
*frag
)
205 static inline void __skb_frag_set_page(skb_frag_t
*frag
, struct page
*page
)
209 static inline void skb_frag_size_set(skb_frag_t
*frag
, unsigned int size
)
213 static inline void __skb_frag_ref(skb_frag_t
*frag
)
215 get_page(skb_frag_page(frag
));
217 static inline void __skb_frag_unref(skb_frag_t
*frag
)
219 put_page(skb_frag_page(frag
));
222 static inline void skb_frag_ref(struct sk_buff
*skb
, int f
)
224 __skb_frag_ref(&skb_shinfo(skb
)->frags
[f
]);
227 static inline void skb_frag_unref(struct sk_buff
*skb
, int f
)
229 __skb_frag_unref(&skb_shinfo(skb
)->frags
[f
]);
234 #ifndef HAVE_SKB_RESET_MAC_LEN
235 static inline void skb_reset_mac_len(struct sk_buff
*skb
)
237 skb
->mac_len
= skb
->network_header
- skb
->mac_header
;
241 #ifndef HAVE_SKB_UNCLONE
242 static inline int skb_unclone(struct sk_buff
*skb
, gfp_t pri
)
244 might_sleep_if(pri
& __GFP_WAIT
);
247 return pskb_expand_head(skb
, 0, 0, pri
);
253 #ifndef HAVE_SKB_ORPHAN_FRAGS
254 static inline int skb_orphan_frags(struct sk_buff
*skb
, gfp_t gfp_mask
)
260 #ifndef HAVE_SKB_GET_HASH
261 #define skb_get_hash skb_get_rxhash
262 #endif /* HAVE_SKB_GET_HASH */
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
265 #define skb_zerocopy_headlen rpl_skb_zerocopy_headlen
266 unsigned int rpl_skb_zerocopy_headlen(const struct sk_buff
*from
);
269 #ifndef HAVE_SKB_ZEROCOPY
270 #define skb_zerocopy rpl_skb_zerocopy
271 int rpl_skb_zerocopy(struct sk_buff
*to
, struct sk_buff
*from
, int len
,
275 #ifndef HAVE_SKB_CLEAR_HASH
276 static inline void skb_clear_hash(struct sk_buff
*skb
)
281 #if defined(HAVE_L4_RXHASH) && !defined(HAVE_RHEL_OVS_HOOK)
287 #ifndef HAVE_SKB_HAS_FRAG_LIST
288 #define skb_has_frag_list skb_has_frags
291 #ifndef HAVE___SKB_FILL_PAGE_DESC
292 static inline void __skb_fill_page_desc(struct sk_buff
*skb
, int i
,
293 struct page
*page
, int off
, int size
)
295 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
297 __skb_frag_set_page(frag
, page
);
298 frag
->page_offset
= off
;
299 skb_frag_size_set(frag
, size
);
303 #ifndef HAVE_SKB_ENSURE_WRITABLE
304 #define skb_ensure_writable rpl_skb_ensure_writable
305 int rpl_skb_ensure_writable(struct sk_buff
*skb
, int write_len
);
308 #ifndef HAVE___SKB_VLAN_POP
309 #define __skb_vlan_pop rpl___skb_vlan_pop
310 int rpl___skb_vlan_pop(struct sk_buff
*skb
, u16
*vlan_tci
);
313 #ifndef HAVE_SKB_VLAN_POP
314 #define skb_vlan_pop rpl_skb_vlan_pop
315 int rpl_skb_vlan_pop(struct sk_buff
*skb
);
318 #ifndef HAVE_SKB_VLAN_PUSH
319 #define skb_vlan_push rpl_skb_vlan_push
320 int rpl_skb_vlan_push(struct sk_buff
*skb
, __be16 vlan_proto
, u16 vlan_tci
);
323 #ifndef HAVE_KFREE_SKB_LIST
324 void rpl_kfree_skb_list(struct sk_buff
*segs
);
325 #define kfree_skb_list rpl_kfree_skb_list
328 #ifndef HAVE_SKB_CHECKSUM_START_OFFSET
329 static inline int skb_checksum_start_offset(const struct sk_buff
*skb
)
331 return skb
->csum_start
- skb_headroom(skb
);
335 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
336 #define skb_postpull_rcsum rpl_skb_postpull_rcsum
337 static inline void skb_postpull_rcsum(struct sk_buff
*skb
,
338 const void *start
, unsigned int len
)
340 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
341 skb
->csum
= csum_sub(skb
->csum
, csum_partial(start
, len
, 0));
342 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
343 skb_checksum_start_offset(skb
) < 0)
344 skb
->ip_summed
= CHECKSUM_NONE
;
347 #define skb_pull_rcsum rpl_skb_pull_rcsum
348 static inline unsigned char *skb_pull_rcsum(struct sk_buff
*skb
, unsigned int len
)
350 unsigned char *data
= skb
->data
;
352 BUG_ON(len
> skb
->len
);
353 __skb_pull(skb
, len
);
354 skb_postpull_rcsum(skb
, data
, len
);
360 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
361 #define skb_scrub_packet rpl_skb_scrub_packet
362 void rpl_skb_scrub_packet(struct sk_buff
*skb
, bool xnet
);
365 #define skb_pop_mac_header rpl_skb_pop_mac_header
366 static inline void skb_pop_mac_header(struct sk_buff
*skb
)
368 skb
->mac_header
= skb
->network_header
;
371 #ifndef HAVE_SKB_CLEAR_HASH_IF_NOT_L4
372 static inline void skb_clear_hash_if_not_l4(struct sk_buff
*skb
)
379 #ifndef HAVE_SKB_POSTPUSH_RCSUM
380 static inline void skb_postpush_rcsum(struct sk_buff
*skb
,
381 const void *start
, unsigned int len
)
383 /* For performing the reverse operation to skb_postpull_rcsum(),
384 * we can instead of ...
386 * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
388 * ... just use this equivalent version here to save a few
389 * instructions. Feeding csum of 0 in csum_partial() and later
390 * on adding skb->csum is equivalent to feed skb->csum in the
393 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
394 skb
->csum
= csum_partial(start
, len
, skb
->csum
);
398 #define skb_checksum_start rpl_skb_checksum_start
399 static inline unsigned char *skb_checksum_start(const struct sk_buff
*skb
)
401 return skb
->head
+ skb
->csum_start
;
404 #ifndef HAVE_LCO_CSUM
405 static inline __wsum
lco_csum(struct sk_buff
*skb
)
407 unsigned char *csum_start
= skb_checksum_start(skb
);
408 unsigned char *l4_hdr
= skb_transport_header(skb
);
411 /* Start with complement of inner checksum adjustment */
412 partial
= ~csum_unfold(*(__force __sum16
*)(csum_start
+
415 /* Add in checksum of our headers (incl. outer checksum
416 * adjustment filled in by caller) and return result.
418 return csum_partial(l4_hdr
, csum_start
- l4_hdr
, partial
);
422 #ifndef HAVE_SKB_NFCT
423 static inline struct nf_conntrack
*skb_nfct(const struct sk_buff
*skb
)
425 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
433 #ifndef HAVE_SKB_PUT_ZERO
434 static inline void *skb_put_zero(struct sk_buff
*skb
, unsigned int len
)
436 void *tmp
= skb_put(skb
, len
);
444 #ifndef HAVE_SKB_GSO_IPXIP6
445 #define SKB_GSO_IPXIP6 (1 << 10)
448 #ifndef HAVE_SKB_SET_INNER_IPPROTO
449 static inline void skb_set_inner_ipproto(struct sk_buff
*skb
,
455 #ifndef HAVE_NF_RESET_CT
456 #define nf_reset_ct nf_reset