]> git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/include/linux/skbuff.h
datapath: compat: drop bridge nf reset from nf_reset
[ovs.git] / datapath / linux / compat / include / linux / skbuff.h
1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
3
4 #include <linux/version.h>
5 #include <linux/types.h>
6
7 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
8 /* This should be before skbuff.h to make sure that we rewrite
9 * the calls there. */
10 struct sk_buff;
11
12 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
13 gfp_t gfp_mask);
14 #define pskb_expand_head rpl_pskb_expand_head
15 #endif
16
17 #include_next <linux/skbuff.h>
18 #include <linux/jhash.h>
19
20 #ifndef HAVE_IGNORE_DF_RENAME
21 #define ignore_df local_df
22 #endif
23
24
25 #ifndef HAVE_NULL_COMPUTE_PSEUDO
26 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
27 {
28 return 0;
29 }
30 #endif
31
32 #ifndef HAVE_SKB_CHECKSUM_CONVERT
33 static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
34 {
35 #ifdef HAVE_SKBUFF_CSUM_VALID
36 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
37 #else
38 return skb->ip_summed == CHECKSUM_NONE;
39 #endif
40 }
41
42 static inline void __skb_checksum_convert(struct sk_buff *skb,
43 __sum16 check, __wsum pseudo)
44 {
45 skb->csum = ~pseudo;
46 skb->ip_summed = CHECKSUM_COMPLETE;
47 }
48
49 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
50 do { \
51 if (__skb_checksum_convert_check(skb)) \
52 __skb_checksum_convert(skb, check, \
53 compute_pseudo(skb, proto)); \
54 } while (0)
55
56 #endif
57
58 #ifndef SKB_CHECKSUM_SIMPLE_VALIDATE
59
60 #ifndef __skb_checksum_validate
61 #define __skb_checksum_validate(skb, proto, complete, \
62 zero_okay, check, compute_pseudo) \
63 ({ \
64 __sum16 __ret = 0; \
65 __ret; \
66 })
67 #endif
68
69 #define skb_checksum_simple_validate(skb) \
70 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
71 #endif
72
73 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
74 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
75 const int offset, void *to,
76 const unsigned int len)
77 {
78 memcpy(to, skb->data + offset, len);
79 }
80
81 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
82 const int offset,
83 const void *from,
84 const unsigned int len)
85 {
86 memcpy(skb->data + offset, from, len);
87 }
88
89 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
90
91 #ifndef HAVE_SKB_INNER_TRANSPORT_OFFSET
92 static inline int skb_inner_transport_offset(const struct sk_buff *skb)
93 {
94 return skb_inner_transport_header(skb) - skb->data;
95 }
96 #endif
97
98 #ifndef HAVE_SKB_RESET_TAIL_POINTER
99 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
100 {
101 skb->tail = skb->data;
102 }
103 #endif
104 /*
105 * The networking layer reserves some headroom in skb data (via
106 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
107 * the header has to grow. In the default case, if the header has to grow
108 * 16 bytes or less we avoid the reallocation.
109 *
110 * Unfortunately this headroom changes the DMA alignment of the resulting
111 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
112 * on some architectures. An architecture can override this value,
113 * perhaps setting it to a cacheline in size (since that will maintain
114 * cacheline alignment of the DMA). It must be a power of 2.
115 *
116 * Various parts of the networking layer expect at least 16 bytes of
117 * headroom, you should not reduce this.
118 */
119 #ifndef NET_SKB_PAD
120 #define NET_SKB_PAD 16
121 #endif
122
123 #ifndef HAVE_SKB_COW_HEAD
124 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
125 int cloned)
126 {
127 int delta = 0;
128
129 if (headroom < NET_SKB_PAD)
130 headroom = NET_SKB_PAD;
131 if (headroom > skb_headroom(skb))
132 delta = headroom - skb_headroom(skb);
133
134 if (delta || cloned)
135 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
136 GFP_ATOMIC);
137 return 0;
138 }
139
140 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
141 {
142 return __skb_cow(skb, headroom, skb_header_cloned(skb));
143 }
144 #endif /* !HAVE_SKB_COW_HEAD */
145
146 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
147 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
148 {
149 return (struct dst_entry *)skb->dst;
150 }
151
152 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
153 {
154 skb->dst = dst;
155 }
156
157 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
158 {
159 return (struct rtable *)skb->dst;
160 }
161 #endif
162
163 #ifndef CHECKSUM_PARTIAL
164 #define CHECKSUM_PARTIAL CHECKSUM_HW
165 #endif
166 #ifndef CHECKSUM_COMPLETE
167 #define CHECKSUM_COMPLETE CHECKSUM_HW
168 #endif
169
170 #ifndef HAVE_SKB_WARN_LRO
171 #ifndef NETIF_F_LRO
172 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
173 {
174 return false;
175 }
176 #else
177 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
178
179 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
180 {
181 /* LRO sets gso_size but not gso_type, whereas if GSO is really
182 * wanted then gso_type will be set. */
183 struct skb_shared_info *shinfo = skb_shinfo(skb);
184 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
185 __skb_warn_lro_forwarding(skb);
186 return true;
187 }
188 return false;
189 }
190 #endif /* NETIF_F_LRO */
191 #endif /* HAVE_SKB_WARN_LRO */
192
193 #ifndef HAVE_CONSUME_SKB
194 #define consume_skb kfree_skb
195 #endif
196
197 #ifndef HAVE_SKB_FRAG_PAGE
198 #include <linux/mm.h>
199
200 static inline struct page *skb_frag_page(const skb_frag_t *frag)
201 {
202 return frag->page;
203 }
204
205 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
206 {
207 frag->page = page;
208 }
209 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
210 {
211 frag->size = size;
212 }
213 static inline void __skb_frag_ref(skb_frag_t *frag)
214 {
215 get_page(skb_frag_page(frag));
216 }
217 static inline void __skb_frag_unref(skb_frag_t *frag)
218 {
219 put_page(skb_frag_page(frag));
220 }
221
222 static inline void skb_frag_ref(struct sk_buff *skb, int f)
223 {
224 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
225 }
226
227 static inline void skb_frag_unref(struct sk_buff *skb, int f)
228 {
229 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
230 }
231
232 #endif
233
234 #ifndef HAVE_SKB_RESET_MAC_LEN
235 static inline void skb_reset_mac_len(struct sk_buff *skb)
236 {
237 skb->mac_len = skb->network_header - skb->mac_header;
238 }
239 #endif
240
241 #ifndef HAVE_SKB_UNCLONE
242 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
243 {
244 might_sleep_if(pri & __GFP_WAIT);
245
246 if (skb_cloned(skb))
247 return pskb_expand_head(skb, 0, 0, pri);
248
249 return 0;
250 }
251 #endif
252
253 #ifndef HAVE_SKB_ORPHAN_FRAGS
254 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
255 {
256 return 0;
257 }
258 #endif
259
260 #ifndef HAVE_SKB_GET_HASH
261 #define skb_get_hash skb_get_rxhash
262 #endif /* HAVE_SKB_GET_HASH */
263
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
265 #define skb_zerocopy_headlen rpl_skb_zerocopy_headlen
266 unsigned int rpl_skb_zerocopy_headlen(const struct sk_buff *from);
267 #endif
268
269 #ifndef HAVE_SKB_ZEROCOPY
270 #define skb_zerocopy rpl_skb_zerocopy
271 int rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len,
272 int hlen);
273 #endif
274
275 #ifndef HAVE_SKB_CLEAR_HASH
276 static inline void skb_clear_hash(struct sk_buff *skb)
277 {
278 #ifdef HAVE_RXHASH
279 skb->rxhash = 0;
280 #endif
281 #if defined(HAVE_L4_RXHASH) && !defined(HAVE_RHEL_OVS_HOOK)
282 skb->l4_rxhash = 0;
283 #endif
284 }
285 #endif
286
287 #ifndef HAVE_SKB_HAS_FRAG_LIST
288 #define skb_has_frag_list skb_has_frags
289 #endif
290
291 #ifndef HAVE___SKB_FILL_PAGE_DESC
292 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
293 struct page *page, int off, int size)
294 {
295 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
296
297 __skb_frag_set_page(frag, page);
298 frag->page_offset = off;
299 skb_frag_size_set(frag, size);
300 }
301 #endif
302
303 #ifndef HAVE_SKB_ENSURE_WRITABLE
304 #define skb_ensure_writable rpl_skb_ensure_writable
305 int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len);
306 #endif
307
308 #ifndef HAVE___SKB_VLAN_POP
309 #define __skb_vlan_pop rpl___skb_vlan_pop
310 int rpl___skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
311 #endif
312
313 #ifndef HAVE_SKB_VLAN_POP
314 #define skb_vlan_pop rpl_skb_vlan_pop
315 int rpl_skb_vlan_pop(struct sk_buff *skb);
316 #endif
317
318 #ifndef HAVE_SKB_VLAN_PUSH
319 #define skb_vlan_push rpl_skb_vlan_push
320 int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
321 #endif
322
323 #ifndef HAVE_KFREE_SKB_LIST
324 void rpl_kfree_skb_list(struct sk_buff *segs);
325 #define kfree_skb_list rpl_kfree_skb_list
326 #endif
327
328 #ifndef HAVE_SKB_CHECKSUM_START_OFFSET
329 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
330 {
331 return skb->csum_start - skb_headroom(skb);
332 }
333 #endif
334
335 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
336 #define skb_postpull_rcsum rpl_skb_postpull_rcsum
337 static inline void skb_postpull_rcsum(struct sk_buff *skb,
338 const void *start, unsigned int len)
339 {
340 if (skb->ip_summed == CHECKSUM_COMPLETE)
341 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
342 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
343 skb_checksum_start_offset(skb) < 0)
344 skb->ip_summed = CHECKSUM_NONE;
345 }
346
347 #define skb_pull_rcsum rpl_skb_pull_rcsum
348 static inline unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
349 {
350 unsigned char *data = skb->data;
351
352 BUG_ON(len > skb->len);
353 __skb_pull(skb, len);
354 skb_postpull_rcsum(skb, data, len);
355 return skb->data;
356 }
357
358 #endif
359
360 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
361 #define skb_scrub_packet rpl_skb_scrub_packet
362 void rpl_skb_scrub_packet(struct sk_buff *skb, bool xnet);
363 #endif
364
365 #define skb_pop_mac_header rpl_skb_pop_mac_header
366 static inline void skb_pop_mac_header(struct sk_buff *skb)
367 {
368 skb->mac_header = skb->network_header;
369 }
370
371 #ifndef HAVE_SKB_CLEAR_HASH_IF_NOT_L4
372 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
373 {
374 if (!skb->l4_rxhash)
375 skb_clear_hash(skb);
376 }
377 #endif
378
379 #ifndef HAVE_SKB_POSTPUSH_RCSUM
380 static inline void skb_postpush_rcsum(struct sk_buff *skb,
381 const void *start, unsigned int len)
382 {
383 /* For performing the reverse operation to skb_postpull_rcsum(),
384 * we can instead of ...
385 *
386 * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
387 *
388 * ... just use this equivalent version here to save a few
389 * instructions. Feeding csum of 0 in csum_partial() and later
390 * on adding skb->csum is equivalent to feed skb->csum in the
391 * first place.
392 */
393 if (skb->ip_summed == CHECKSUM_COMPLETE)
394 skb->csum = csum_partial(start, len, skb->csum);
395 }
396 #endif
397
398 #define skb_checksum_start rpl_skb_checksum_start
399 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
400 {
401 return skb->head + skb->csum_start;
402 }
403
404 #ifndef HAVE_LCO_CSUM
405 static inline __wsum lco_csum(struct sk_buff *skb)
406 {
407 unsigned char *csum_start = skb_checksum_start(skb);
408 unsigned char *l4_hdr = skb_transport_header(skb);
409 __wsum partial;
410
411 /* Start with complement of inner checksum adjustment */
412 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
413 skb->csum_offset));
414
415 /* Add in checksum of our headers (incl. outer checksum
416 * adjustment filled in by caller) and return result.
417 */
418 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
419 }
420 #endif
421
422 #ifndef HAVE_SKB_NFCT
423 static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
424 {
425 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
426 return skb->nfct;
427 #else
428 return NULL;
429 #endif
430 }
431 #endif
432
433 #ifndef HAVE_SKB_PUT_ZERO
434 static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
435 {
436 void *tmp = skb_put(skb, len);
437
438 memset(tmp, 0, len);
439
440 return tmp;
441 }
442 #endif
443
444 #ifndef HAVE_SKB_GSO_IPXIP6
445 #define SKB_GSO_IPXIP6 (1 << 10)
446 #endif
447
448 #ifndef HAVE_SKB_SET_INNER_IPPROTO
449 static inline void skb_set_inner_ipproto(struct sk_buff *skb,
450 __u8 ipproto)
451 {
452 }
453 #endif
454
455 #ifndef HAVE_NF_RESET_CT
456 #define nf_reset_ct nf_reset
457 #endif
458
459 #endif