]>
git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/skbuff-openvswitch.c
1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
4 #include <linux/if_vlan.h>
8 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 void __skb_warn_lro_forwarding(const struct sk_buff
*skb
)
15 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
23 static inline bool head_frag(const struct sk_buff
*skb
)
25 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
26 return skb
->head_frag
;
33 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
34 * @from: source buffer
36 * Calculates the amount of linear headroom needed in the 'to' skb passed
37 * into skb_zerocopy().
40 rpl_skb_zerocopy_headlen(const struct sk_buff
*from
)
42 unsigned int hlen
= 0;
44 if (!head_frag(from
) ||
45 skb_headlen(from
) < L1_CACHE_BYTES
||
46 skb_shinfo(from
)->nr_frags
>= MAX_SKB_FRAGS
)
47 hlen
= skb_headlen(from
);
49 if (skb_has_frag_list(from
))
54 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy_headlen
);
56 #ifndef HAVE_SKB_ZEROCOPY
58 * skb_zerocopy - Zero copy skb to skb
59 * @to: destination buffer
60 * @source: source buffer
61 * @len: number of bytes to copy from source buffer
62 * @hlen: size of linear headroom in destination buffer
64 * Copies up to `len` bytes from `from` to `to` by creating references
65 * to the frags in the source buffer.
67 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
68 * headroom in the `to` buffer.
72 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
73 * -EFAULT: skb_copy_bits() found some problem with skb geometry
76 rpl_skb_zerocopy(struct sk_buff
*to
, struct sk_buff
*from
, int len
, int hlen
)
79 int plen
= 0; /* length of skb->head fragment */
84 BUG_ON(!head_frag(from
) && !hlen
);
86 /* dont bother with small payloads */
87 if (len
<= skb_tailroom(to
))
88 return skb_copy_bits(from
, 0, skb_put(to
, len
), len
);
91 ret
= skb_copy_bits(from
, 0, skb_put(to
, hlen
), hlen
);
96 plen
= min_t(int, skb_headlen(from
), len
);
98 page
= virt_to_head_page(from
->head
);
99 offset
= from
->data
- (unsigned char *)page_address(page
);
100 __skb_fill_page_desc(to
, 0, page
, offset
, plen
);
107 to
->truesize
+= len
+ plen
;
108 to
->len
+= len
+ plen
;
109 to
->data_len
+= len
+ plen
;
111 if (unlikely(skb_orphan_frags(from
, GFP_ATOMIC
))) {
116 for (i
= 0; i
< skb_shinfo(from
)->nr_frags
; i
++) {
119 skb_shinfo(to
)->frags
[j
] = skb_shinfo(from
)->frags
[i
];
120 skb_shinfo(to
)->frags
[j
].size
= min_t(int, skb_shinfo(to
)->frags
[j
].size
, len
);
121 len
-= skb_shinfo(to
)->frags
[j
].size
;
125 skb_shinfo(to
)->nr_frags
= j
;
129 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy
);
133 #ifndef HAVE_SKB_ENSURE_WRITABLE
134 int rpl_skb_ensure_writable(struct sk_buff
*skb
, int write_len
)
136 if (!pskb_may_pull(skb
, write_len
))
139 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
142 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
144 EXPORT_SYMBOL_GPL(rpl_skb_ensure_writable
);
147 #ifndef HAVE_SKB_VLAN_POP
148 /* remove VLAN header from packet and update csum accordingly. */
149 static int __skb_vlan_pop(struct sk_buff
*skb
, u16
*vlan_tci
)
151 struct vlan_hdr
*vhdr
;
152 unsigned int offset
= skb
->data
- skb_mac_header(skb
);
155 __skb_push(skb
, offset
);
156 err
= skb_ensure_writable(skb
, VLAN_ETH_HLEN
);
160 skb_postpull_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
162 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
163 *vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
165 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 2 * ETH_ALEN
);
166 __skb_pull(skb
, VLAN_HLEN
);
168 vlan_set_encap_proto(skb
, vhdr
);
169 skb
->mac_header
+= VLAN_HLEN
;
171 if (skb_network_offset(skb
) < ETH_HLEN
)
172 skb_set_network_header(skb
, ETH_HLEN
);
174 skb_reset_mac_len(skb
);
176 __skb_pull(skb
, offset
);
181 int rpl_skb_vlan_pop(struct sk_buff
*skb
)
187 if (likely(skb_vlan_tag_present(skb
))) {
190 if (unlikely((skb
->protocol
!= htons(ETH_P_8021Q
) &&
191 skb
->protocol
!= htons(ETH_P_8021AD
)) ||
192 skb
->len
< VLAN_ETH_HLEN
))
195 err
= __skb_vlan_pop(skb
, &vlan_tci
);
199 /* move next vlan tag to hw accel tag */
200 if (likely((skb
->protocol
!= htons(ETH_P_8021Q
) &&
201 skb
->protocol
!= htons(ETH_P_8021AD
)) ||
202 skb
->len
< VLAN_ETH_HLEN
))
205 vlan_proto
= htons(ETH_P_8021Q
);
206 err
= __skb_vlan_pop(skb
, &vlan_tci
);
210 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
213 EXPORT_SYMBOL_GPL(rpl_skb_vlan_pop
);
216 #ifndef HAVE_SKB_VLAN_PUSH
217 int rpl_skb_vlan_push(struct sk_buff
*skb
, __be16 vlan_proto
, u16 vlan_tci
)
219 if (skb_vlan_tag_present(skb
)) {
220 unsigned int offset
= skb
->data
- skb_mac_header(skb
);
223 /* __vlan_insert_tag expect skb->data pointing to mac header.
224 * So change skb->data before calling it and change back to
225 * original position later
227 __skb_push(skb
, offset
);
228 err
= __vlan_insert_tag(skb
, skb
->vlan_proto
,
229 skb_vlan_tag_get(skb
));
232 skb
->mac_len
+= VLAN_HLEN
;
233 __skb_pull(skb
, offset
);
235 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
236 skb
->csum
= csum_add(skb
->csum
, csum_partial(skb
->data
237 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
239 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
242 EXPORT_SYMBOL_GPL(rpl_skb_vlan_push
);
245 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
246 int rpl_pskb_expand_head(struct sk_buff
*skb
, int nhead
, int ntail
,
250 int inner_mac_offset
, inner_nw_offset
, inner_transport_offset
;
252 inner_mac_offset
= skb_inner_mac_offset(skb
);
253 inner_nw_offset
= skb_inner_network_offset(skb
);
254 inner_transport_offset
= ovs_skb_inner_transport_offset(skb
);
256 #undef pskb_expand_head
257 err
= pskb_expand_head(skb
, nhead
, ntail
, gfp_mask
);
261 skb_set_inner_mac_header(skb
, inner_mac_offset
);
262 skb_set_inner_network_header(skb
, inner_nw_offset
);
263 skb_set_inner_transport_header(skb
, inner_transport_offset
);
267 EXPORT_SYMBOL(rpl_pskb_expand_head
);