]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/include/linux/skbuff.h
datapath: Conditionally define skb_unclone in datapath compat code
[mirror_ovs.git] / datapath / linux / compat / include / linux / skbuff.h
1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
3
4 #include_next <linux/skbuff.h>
5
6 #include <linux/version.h>
7
8 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
9 /* In version 2.6.24 the return type of skb_headroom() changed from 'int' to
10 * 'unsigned int'. We use skb_headroom() as one arm of a min(a,b) invocation
11 * in make_writable() in actions.c, so we need the correct type. */
12 #define skb_headroom rpl_skb_headroom
13 static inline unsigned int rpl_skb_headroom(const struct sk_buff *skb)
14 {
15 return skb->data - skb->head;
16 }
17 #endif
18
19 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
20 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
21 const int offset, void *to,
22 const unsigned int len)
23 {
24 memcpy(to, skb->data + offset, len);
25 }
26
27 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
28 const int offset,
29 const void *from,
30 const unsigned int len)
31 {
32 memcpy(skb->data + offset, from, len);
33 }
34
35 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
36
37 #ifndef HAVE_SKB_RESET_TAIL_POINTER
38 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
39 {
40 skb->tail = skb->data;
41 }
42 #endif
43 /*
44 * The networking layer reserves some headroom in skb data (via
45 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
46 * the header has to grow. In the default case, if the header has to grow
47 * 16 bytes or less we avoid the reallocation.
48 *
49 * Unfortunately this headroom changes the DMA alignment of the resulting
50 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
51 * on some architectures. An architecture can override this value,
52 * perhaps setting it to a cacheline in size (since that will maintain
53 * cacheline alignment of the DMA). It must be a power of 2.
54 *
55 * Various parts of the networking layer expect at least 16 bytes of
56 * headroom, you should not reduce this.
57 */
58 #ifndef NET_SKB_PAD
59 #define NET_SKB_PAD 16
60 #endif
61
62 #ifndef HAVE_SKB_COW_HEAD
63 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
64 int cloned)
65 {
66 int delta = 0;
67
68 if (headroom < NET_SKB_PAD)
69 headroom = NET_SKB_PAD;
70 if (headroom > skb_headroom(skb))
71 delta = headroom - skb_headroom(skb);
72
73 if (delta || cloned)
74 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
75 GFP_ATOMIC);
76 return 0;
77 }
78
79 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
80 {
81 return __skb_cow(skb, headroom, skb_header_cloned(skb));
82 }
83 #endif /* !HAVE_SKB_COW_HEAD */
84
85 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
86 static inline int skb_clone_writable(struct sk_buff *skb, int len)
87 {
88 return false;
89 }
90 #endif
91
92 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
93 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
94 {
95 return (struct dst_entry *)skb->dst;
96 }
97
98 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
99 {
100 skb->dst = dst;
101 }
102
103 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
104 {
105 return (struct rtable *)skb->dst;
106 }
107 #endif
108
109 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
110 /* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
111 * null pointer arguments. */
112 #define kfree_skb(skb) kfree_skb_maybe_null(skb)
113 static inline void kfree_skb_maybe_null(struct sk_buff *skb)
114 {
115 if (likely(skb != NULL))
116 (kfree_skb)(skb);
117 }
118 #endif
119
120
121 #ifndef CHECKSUM_PARTIAL
122 #define CHECKSUM_PARTIAL CHECKSUM_HW
123 #endif
124 #ifndef CHECKSUM_COMPLETE
125 #define CHECKSUM_COMPLETE CHECKSUM_HW
126 #endif
127
128 #ifdef HAVE_MAC_RAW
129 #define mac_header mac.raw
130 #define network_header nh.raw
131 #define transport_header h.raw
132 #endif
133
134 #ifndef HAVE_SKBUFF_HEADER_HELPERS
135 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
136 {
137 return skb->h.raw;
138 }
139
140 static inline void skb_reset_transport_header(struct sk_buff *skb)
141 {
142 skb->h.raw = skb->data;
143 }
144
145 static inline void skb_set_transport_header(struct sk_buff *skb,
146 const int offset)
147 {
148 skb->h.raw = skb->data + offset;
149 }
150
151 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
152 {
153 return skb->nh.raw;
154 }
155
156 static inline void skb_reset_network_header(struct sk_buff *skb)
157 {
158 skb->nh.raw = skb->data;
159 }
160
161 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
162 {
163 skb->nh.raw = skb->data + offset;
164 }
165
166 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
167 {
168 return skb->mac.raw;
169 }
170
171 static inline void skb_reset_mac_header(struct sk_buff *skb)
172 {
173 skb->mac_header = skb->data;
174 }
175
176 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
177 {
178 skb->mac.raw = skb->data + offset;
179 }
180
181 static inline int skb_transport_offset(const struct sk_buff *skb)
182 {
183 return skb_transport_header(skb) - skb->data;
184 }
185
186 static inline int skb_network_offset(const struct sk_buff *skb)
187 {
188 return skb_network_header(skb) - skb->data;
189 }
190
191 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
192 const void *from,
193 const unsigned int len)
194 {
195 memcpy(skb->data, from, len);
196 }
197 #endif /* !HAVE_SKBUFF_HEADER_HELPERS */
198
199 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
200 #warning "TSO/UFO not supported on kernels earlier than 2.6.18"
201
202 static inline int skb_is_gso(const struct sk_buff *skb)
203 {
204 return 0;
205 }
206
207 static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
208 int features)
209 {
210 return NULL;
211 }
212 #endif /* before 2.6.18 */
213
214 #ifndef HAVE_SKB_WARN_LRO
215 #ifndef NETIF_F_LRO
216 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
217 {
218 return false;
219 }
220 #else
221 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
222
223 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
224 {
225 /* LRO sets gso_size but not gso_type, whereas if GSO is really
226 * wanted then gso_type will be set. */
227 struct skb_shared_info *shinfo = skb_shinfo(skb);
228 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
229 __skb_warn_lro_forwarding(skb);
230 return true;
231 }
232 return false;
233 }
234 #endif /* NETIF_F_LRO */
235 #endif /* HAVE_SKB_WARN_LRO */
236
237 #ifndef HAVE_CONSUME_SKB
238 #define consume_skb kfree_skb
239 #endif
240
241 #ifndef HAVE_SKB_FRAG_PAGE
242 static inline struct page *skb_frag_page(const skb_frag_t *frag)
243 {
244 return frag->page;
245 }
246 #endif
247
248 #ifndef HAVE_SKB_RESET_MAC_LEN
249 static inline void skb_reset_mac_len(struct sk_buff *skb)
250 {
251 skb->mac_len = skb->network_header - skb->mac_header;
252 }
253 #endif
254
255 #ifndef HAVE_SKB_UNCLONE
256 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
257 {
258 might_sleep_if(pri & __GFP_WAIT);
259
260 if (skb_cloned(skb))
261 return pskb_expand_head(skb, 0, 0, pri);
262
263 return 0;
264 }
265 #endif
266
267 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
268 extern u32 __skb_get_rxhash(struct sk_buff *skb);
269 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
270 {
271 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)
272 if (!skb->rxhash)
273 #endif
274 return __skb_get_rxhash(skb);
275 }
276 #endif
277
278 #endif