]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/linux/compat/skbuff-openvswitch.c
datapath: Prevent linker error of unknown symbol.
[mirror_ovs.git] / datapath / linux / compat / skbuff-openvswitch.c
1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
4 #include <linux/if_vlan.h>
5
6 #include "gso.h"
7
8 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
13 {
14 if (net_ratelimit())
15 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
16 skb->dev->name);
17 }
18
19 #endif
20
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
22
23 static inline bool head_frag(const struct sk_buff *skb)
24 {
25 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
26 return skb->head_frag;
27 #else
28 return false;
29 #endif
30 }
31
32 /**
33 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
34 * @from: source buffer
35 *
36 * Calculates the amount of linear headroom needed in the 'to' skb passed
37 * into skb_zerocopy().
38 */
39 unsigned int
40 rpl_skb_zerocopy_headlen(const struct sk_buff *from)
41 {
42 unsigned int hlen = 0;
43
44 if (!head_frag(from) ||
45 skb_headlen(from) < L1_CACHE_BYTES ||
46 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
47 hlen = skb_headlen(from);
48
49 if (skb_has_frag_list(from))
50 hlen = from->len;
51
52 return hlen;
53 }
54 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy_headlen);
55
56 #ifndef HAVE_SKB_ZEROCOPY
57 /**
58 * skb_zerocopy - Zero copy skb to skb
59 * @to: destination buffer
60 * @source: source buffer
61 * @len: number of bytes to copy from source buffer
62 * @hlen: size of linear headroom in destination buffer
63 *
64 * Copies up to `len` bytes from `from` to `to` by creating references
65 * to the frags in the source buffer.
66 *
67 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
68 * headroom in the `to` buffer.
69 *
70 * Return value:
71 * 0: everything is OK
72 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
73 * -EFAULT: skb_copy_bits() found some problem with skb geometry
74 */
75 int
76 rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
77 {
78 int i, j = 0;
79 int plen = 0; /* length of skb->head fragment */
80 int ret;
81 struct page *page;
82 unsigned int offset;
83
84 BUG_ON(!head_frag(from) && !hlen);
85
86 /* dont bother with small payloads */
87 if (len <= skb_tailroom(to))
88 return skb_copy_bits(from, 0, skb_put(to, len), len);
89
90 if (hlen) {
91 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
92 if (unlikely(ret))
93 return ret;
94 len -= hlen;
95 } else {
96 plen = min_t(int, skb_headlen(from), len);
97 if (plen) {
98 page = virt_to_head_page(from->head);
99 offset = from->data - (unsigned char *)page_address(page);
100 __skb_fill_page_desc(to, 0, page, offset, plen);
101 get_page(page);
102 j = 1;
103 len -= plen;
104 }
105 }
106
107 to->truesize += len + plen;
108 to->len += len + plen;
109 to->data_len += len + plen;
110
111 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
112 skb_tx_error(from);
113 return -ENOMEM;
114 }
115
116 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
117 if (!len)
118 break;
119 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
120 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
121 len -= skb_shinfo(to)->frags[j].size;
122 skb_frag_ref(to, j);
123 j++;
124 }
125 skb_shinfo(to)->nr_frags = j;
126
127 return 0;
128 }
129 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy);
130 #endif
131 #endif
132
133 #ifndef HAVE_SKB_ENSURE_WRITABLE
134 int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len)
135 {
136 if (!pskb_may_pull(skb, write_len))
137 return -ENOMEM;
138
139 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
140 return 0;
141
142 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
143 }
144 EXPORT_SYMBOL_GPL(rpl_skb_ensure_writable);
145 #endif
146
147 #ifndef HAVE_SKB_VLAN_POP
148 /* remove VLAN header from packet and update csum accordingly. */
149 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
150 {
151 struct vlan_hdr *vhdr;
152 unsigned int offset = skb->data - skb_mac_header(skb);
153 int err;
154
155 __skb_push(skb, offset);
156 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
157 if (unlikely(err))
158 goto pull;
159
160 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
161
162 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
163 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
164
165 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
166 __skb_pull(skb, VLAN_HLEN);
167
168 vlan_set_encap_proto(skb, vhdr);
169 skb->mac_header += VLAN_HLEN;
170
171 if (skb_network_offset(skb) < ETH_HLEN)
172 skb_set_network_header(skb, ETH_HLEN);
173
174 skb_reset_mac_len(skb);
175 pull:
176 __skb_pull(skb, offset);
177
178 return err;
179 }
180
181 int rpl_skb_vlan_pop(struct sk_buff *skb)
182 {
183 u16 vlan_tci;
184 __be16 vlan_proto;
185 int err;
186
187 if (likely(skb_vlan_tag_present(skb))) {
188 skb->vlan_tci = 0;
189 } else {
190 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
191 skb->protocol != htons(ETH_P_8021AD)) ||
192 skb->len < VLAN_ETH_HLEN))
193 return 0;
194
195 err = __skb_vlan_pop(skb, &vlan_tci);
196 if (err)
197 return err;
198 }
199 /* move next vlan tag to hw accel tag */
200 if (likely((skb->protocol != htons(ETH_P_8021Q) &&
201 skb->protocol != htons(ETH_P_8021AD)) ||
202 skb->len < VLAN_ETH_HLEN))
203 return 0;
204
205 vlan_proto = htons(ETH_P_8021Q);
206 err = __skb_vlan_pop(skb, &vlan_tci);
207 if (unlikely(err))
208 return err;
209
210 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
211 return 0;
212 }
213 EXPORT_SYMBOL_GPL(rpl_skb_vlan_pop);
214 #endif
215
216 #ifndef HAVE_SKB_VLAN_PUSH
217 int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
218 {
219 if (skb_vlan_tag_present(skb)) {
220 unsigned int offset = skb->data - skb_mac_header(skb);
221 int err;
222
223 /* __vlan_insert_tag expect skb->data pointing to mac header.
224 * So change skb->data before calling it and change back to
225 * original position later
226 */
227 __skb_push(skb, offset);
228 err = __vlan_insert_tag(skb, skb->vlan_proto,
229 skb_vlan_tag_get(skb));
230 if (err)
231 return err;
232 skb->mac_len += VLAN_HLEN;
233 __skb_pull(skb, offset);
234
235 if (skb->ip_summed == CHECKSUM_COMPLETE)
236 skb->csum = csum_add(skb->csum, csum_partial(skb->data
237 + (2 * ETH_ALEN), VLAN_HLEN, 0));
238 }
239 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
240 return 0;
241 }
242 EXPORT_SYMBOL_GPL(rpl_skb_vlan_push);
243 #endif
244
245 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
246 int rpl_pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
247 gfp_t gfp_mask)
248 {
249 int err;
250 int inner_mac_offset, inner_nw_offset, inner_transport_offset;
251
252 inner_mac_offset = skb_inner_mac_offset(skb);
253 inner_nw_offset = skb_inner_network_offset(skb);
254 inner_transport_offset = ovs_skb_inner_transport_offset(skb);
255
256 #undef pskb_expand_head
257 err = pskb_expand_head(skb, nhead, ntail, gfp_mask);
258 if (err)
259 return err;
260
261 skb_set_inner_mac_header(skb, inner_mac_offset);
262 skb_set_inner_network_header(skb, inner_nw_offset);
263 skb_set_inner_transport_header(skb, inner_transport_offset);
264
265 return 0;
266 }
267 EXPORT_SYMBOL(rpl_pskb_expand_head);
268
269 #endif