]>
git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/include/linux/skbuff.h
1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
4 #include_next <linux/skbuff.h>
6 #include <linux/version.h>
8 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
9 static inline void skb_copy_from_linear_data_offset(const struct sk_buff
*skb
,
10 const int offset
, void *to
,
11 const unsigned int len
)
13 memcpy(to
, skb
->data
+ offset
, len
);
16 static inline void skb_copy_to_linear_data_offset(struct sk_buff
*skb
,
19 const unsigned int len
)
21 memcpy(skb
->data
+ offset
, from
, len
);
24 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
26 #ifndef HAVE_SKB_RESET_TAIL_POINTER
27 static inline void skb_reset_tail_pointer(struct sk_buff
*skb
)
29 skb
->tail
= skb
->data
;
33 * The networking layer reserves some headroom in skb data (via
34 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
35 * the header has to grow. In the default case, if the header has to grow
36 * 16 bytes or less we avoid the reallocation.
38 * Unfortunately this headroom changes the DMA alignment of the resulting
39 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
40 * on some architectures. An architecture can override this value,
41 * perhaps setting it to a cacheline in size (since that will maintain
42 * cacheline alignment of the DMA). It must be a power of 2.
44 * Various parts of the networking layer expect at least 16 bytes of
45 * headroom, you should not reduce this.
48 #define NET_SKB_PAD 16
51 #ifndef HAVE_SKB_COW_HEAD
52 static inline int __skb_cow(struct sk_buff
*skb
, unsigned int headroom
,
57 if (headroom
< NET_SKB_PAD
)
58 headroom
= NET_SKB_PAD
;
59 if (headroom
> skb_headroom(skb
))
60 delta
= headroom
- skb_headroom(skb
);
63 return pskb_expand_head(skb
, ALIGN(delta
, NET_SKB_PAD
), 0,
68 static inline int skb_cow_head(struct sk_buff
*skb
, unsigned int headroom
)
70 return __skb_cow(skb
, headroom
, skb_header_cloned(skb
));
72 #endif /* !HAVE_SKB_COW_HEAD */
74 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
75 static inline struct dst_entry
*skb_dst(const struct sk_buff
*skb
)
77 return (struct dst_entry
*)skb
->dst
;
80 static inline void skb_dst_set(struct sk_buff
*skb
, struct dst_entry
*dst
)
85 static inline struct rtable
*skb_rtable(const struct sk_buff
*skb
)
87 return (struct rtable
*)skb
->dst
;
91 #ifndef CHECKSUM_PARTIAL
92 #define CHECKSUM_PARTIAL CHECKSUM_HW
94 #ifndef CHECKSUM_COMPLETE
95 #define CHECKSUM_COMPLETE CHECKSUM_HW
98 #ifndef HAVE_SKBUFF_HEADER_HELPERS
99 static inline unsigned char *skb_transport_header(const struct sk_buff
*skb
)
104 static inline void skb_reset_transport_header(struct sk_buff
*skb
)
106 skb
->h
.raw
= skb
->data
;
109 static inline void skb_set_transport_header(struct sk_buff
*skb
,
112 skb
->h
.raw
= skb
->data
+ offset
;
115 static inline unsigned char *skb_network_header(const struct sk_buff
*skb
)
120 static inline void skb_reset_network_header(struct sk_buff
*skb
)
122 skb
->nh
.raw
= skb
->data
;
125 static inline void skb_set_network_header(struct sk_buff
*skb
, const int offset
)
127 skb
->nh
.raw
= skb
->data
+ offset
;
130 static inline unsigned char *skb_mac_header(const struct sk_buff
*skb
)
135 static inline void skb_reset_mac_header(struct sk_buff
*skb
)
137 skb
->mac_header
= skb
->data
;
140 static inline void skb_set_mac_header(struct sk_buff
*skb
, const int offset
)
142 skb
->mac
.raw
= skb
->data
+ offset
;
145 static inline int skb_transport_offset(const struct sk_buff
*skb
)
147 return skb_transport_header(skb
) - skb
->data
;
150 static inline int skb_network_offset(const struct sk_buff
*skb
)
152 return skb_network_header(skb
) - skb
->data
;
155 static inline void skb_copy_to_linear_data(struct sk_buff
*skb
,
157 const unsigned int len
)
159 memcpy(skb
->data
, from
, len
);
161 #endif /* !HAVE_SKBUFF_HEADER_HELPERS */
163 #ifndef HAVE_SKB_WARN_LRO
165 static inline bool skb_warn_if_lro(const struct sk_buff
*skb
)
170 extern void __skb_warn_lro_forwarding(const struct sk_buff
*skb
);
172 static inline bool skb_warn_if_lro(const struct sk_buff
*skb
)
174 /* LRO sets gso_size but not gso_type, whereas if GSO is really
175 * wanted then gso_type will be set. */
176 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
177 if (shinfo
->gso_size
!= 0 && unlikely(shinfo
->gso_type
== 0)) {
178 __skb_warn_lro_forwarding(skb
);
183 #endif /* NETIF_F_LRO */
184 #endif /* HAVE_SKB_WARN_LRO */
186 #ifndef HAVE_CONSUME_SKB
187 #define consume_skb kfree_skb
190 #ifndef HAVE_SKB_FRAG_PAGE
191 static inline struct page
*skb_frag_page(const skb_frag_t
*frag
)
197 #ifndef HAVE_SKB_RESET_MAC_LEN
198 static inline void skb_reset_mac_len(struct sk_buff
*skb
)
200 skb
->mac_len
= skb
->network_header
- skb
->mac_header
;
204 #ifndef HAVE_SKB_UNCLONE
205 static inline int skb_unclone(struct sk_buff
*skb
, gfp_t pri
)
207 might_sleep_if(pri
& __GFP_WAIT
);
210 return pskb_expand_head(skb
, 0, 0, pri
);
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
217 extern u32
__skb_get_rxhash(struct sk_buff
*skb
);
218 static inline __u32
skb_get_rxhash(struct sk_buff
*skb
)
220 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)
224 return __skb_get_rxhash(skb
);