]> git.proxmox.com Git - ovs.git/blob - datapath/linux/compat/gso.c
datapath: Move segmentation compatibility code into a compatibility function
[ovs.git] / datapath / linux / compat / gso.c
1 /*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include <linux/module.h>
20 #include <linux/if.h>
21 #include <linux/if_tunnel.h>
22 #include <linux/icmp.h>
23 #include <linux/in.h>
24 #include <linux/ip.h>
25 #include <linux/kernel.h>
26 #include <linux/kmod.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30
31 #include <net/gre.h>
32 #include <net/icmp.h>
33 #include <net/protocol.h>
34 #include <net/route.h>
35 #include <net/xfrm.h>
36
37 #include "gso.h"
38
39 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
40 !defined(HAVE_VLAN_BUG_WORKAROUND)
41 #include <linux/module.h>
42
43 static int vlan_tso __read_mostly;
44 module_param(vlan_tso, int, 0644);
45 MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
46 #else
47 #define vlan_tso true
48 #endif
49
50 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
51 static bool dev_supports_vlan_tx(struct net_device *dev)
52 {
53 #if defined(HAVE_VLAN_BUG_WORKAROUND)
54 return dev->features & NETIF_F_HW_VLAN_TX;
55 #else
56 /* Assume that the driver is buggy. */
57 return false;
58 #endif
59 }
60
61 int rpl_dev_queue_xmit(struct sk_buff *skb)
62 {
63 #undef dev_queue_xmit
64 int err = -ENOMEM;
65
66 if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
67 int features;
68
69 features = netif_skb_features(skb);
70
71 if (!vlan_tso)
72 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
73 NETIF_F_UFO | NETIF_F_FSO);
74
75 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
76 if (unlikely(!skb))
77 return err;
78 vlan_set_tci(skb, 0);
79
80 if (netif_needs_gso(skb, features)) {
81 struct sk_buff *nskb;
82
83 nskb = skb_gso_segment(skb, features);
84 if (!nskb) {
85 if (unlikely(skb_cloned(skb) &&
86 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
87 goto drop;
88
89 skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
90 goto xmit;
91 }
92
93 if (IS_ERR(nskb)) {
94 err = PTR_ERR(nskb);
95 goto drop;
96 }
97 consume_skb(skb);
98 skb = nskb;
99
100 do {
101 nskb = skb->next;
102 skb->next = NULL;
103 err = dev_queue_xmit(skb);
104 skb = nskb;
105 } while (skb);
106
107 return err;
108 }
109 }
110 xmit:
111 return dev_queue_xmit(skb);
112
113 drop:
114 kfree_skb(skb);
115 return err;
116 }
117 #endif /* kernel version < 2.6.37 */
118
119 static __be16 __skb_network_protocol(struct sk_buff *skb)
120 {
121 __be16 type = skb->protocol;
122 int vlan_depth = ETH_HLEN;
123
124 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
125 struct vlan_hdr *vh;
126
127 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
128 return 0;
129
130 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
131 type = vh->h_vlan_encapsulated_proto;
132 vlan_depth += VLAN_HLEN;
133 }
134
135 return type;
136 }
137
138 static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
139 netdev_features_t features,
140 bool tx_path)
141 {
142 struct iphdr *iph = ip_hdr(skb);
143 int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
144 int mac_offset = skb_inner_mac_offset(skb);
145 struct sk_buff *skb1 = skb;
146 struct sk_buff *segs;
147 __be16 proto = skb->protocol;
148 char cb[sizeof(skb->cb)];
149
150 /* setup whole inner packet to get protocol. */
151 __skb_pull(skb, mac_offset);
152 skb->protocol = __skb_network_protocol(skb);
153
154 /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
155 __skb_pull(skb, (pkt_hlen - mac_offset));
156 skb_reset_mac_header(skb);
157 skb_reset_network_header(skb);
158 skb_reset_transport_header(skb);
159
160 /* From 3.9 kernel skb->cb is used by skb gso. Therefore
161 * make copy of it to restore it back. */
162 memcpy(cb, skb->cb, sizeof(cb));
163
164 segs = __skb_gso_segment(skb, 0, tx_path);
165 if (!segs || IS_ERR(segs))
166 goto free;
167
168 skb = segs;
169 while (skb) {
170 __skb_push(skb, pkt_hlen);
171 skb_reset_mac_header(skb);
172 skb_reset_network_header(skb);
173 skb_set_transport_header(skb, sizeof(struct iphdr));
174 skb->mac_len = 0;
175
176 memcpy(ip_hdr(skb), iph, pkt_hlen);
177 memcpy(skb->cb, cb, sizeof(cb));
178 if (OVS_GSO_CB(skb)->fix_segment)
179 OVS_GSO_CB(skb)->fix_segment(skb);
180
181 skb->protocol = proto;
182 skb = skb->next;
183 }
184 free:
185 consume_skb(skb1);
186 return segs;
187 }
188
189 int rpl_ip_local_out(struct sk_buff *skb)
190 {
191 int ret = NETDEV_TX_OK;
192 int id = -1;
193
194 if (skb_is_gso(skb)) {
195 struct iphdr *iph;
196
197 iph = ip_hdr(skb);
198 id = ntohs(iph->id);
199 skb = tnl_skb_gso_segment(skb, 0, false);
200 if (!skb || IS_ERR(skb))
201 return 0;
202 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
203 int err;
204
205 err = skb_checksum_help(skb);
206 if (unlikely(err))
207 return 0;
208 }
209
210 while (skb) {
211 struct sk_buff *next_skb = skb->next;
212 struct iphdr *iph;
213 int err;
214
215 skb->next = NULL;
216
217 iph = ip_hdr(skb);
218 if (id >= 0)
219 iph->id = htons(id++);
220
221 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
222
223 #undef ip_local_out
224 err = ip_local_out(skb);
225 if (unlikely(net_xmit_eval(err)))
226 ret = err;
227
228 skb = next_skb;
229 }
230 return ret;
231 }