]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/openvswitch/vport-gre.c
Merge tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[mirror_ubuntu-bionic-kernel.git] / net / openvswitch / vport-gre.c
1 /*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/if.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
26 #include <linux/in.h>
27 #include <linux/in_route.h>
28 #include <linux/inetdevice.h>
29 #include <linux/jhash.h>
30 #include <linux/list.h>
31 #include <linux/kernel.h>
32 #include <linux/workqueue.h>
33 #include <linux/rculist.h>
34 #include <net/route.h>
35 #include <net/xfrm.h>
36
37 #include <net/icmp.h>
38 #include <net/ip.h>
39 #include <net/ip_tunnels.h>
40 #include <net/gre.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/protocol.h>
44
45 #include "datapath.h"
46 #include "vport.h"
47
48 /* Returns the least-significant 32 bits of a __be64. */
49 static __be32 be64_get_low32(__be64 x)
50 {
51 #ifdef __BIG_ENDIAN
52 return (__force __be32)x;
53 #else
54 return (__force __be32)((__force u64)x >> 32);
55 #endif
56 }
57
58 static __be16 filter_tnl_flags(__be16 flags)
59 {
60 return flags & (TUNNEL_CSUM | TUNNEL_KEY);
61 }
62
63 static struct sk_buff *__build_header(struct sk_buff *skb,
64 int tunnel_hlen)
65 {
66 const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
67 struct tnl_ptk_info tpi;
68
69 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
70 if (IS_ERR(skb))
71 return NULL;
72
73 tpi.flags = filter_tnl_flags(tun_key->tun_flags);
74 tpi.proto = htons(ETH_P_TEB);
75 tpi.key = be64_get_low32(tun_key->tun_id);
76 tpi.seq = 0;
77 gre_build_header(skb, &tpi, tunnel_hlen);
78
79 return skb;
80 }
81
82 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
83 {
84 #ifdef __BIG_ENDIAN
85 return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
86 #else
87 return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
88 #endif
89 }
90
91 /* Called with rcu_read_lock and BH disabled. */
92 static int gre_rcv(struct sk_buff *skb,
93 const struct tnl_ptk_info *tpi)
94 {
95 struct ovs_key_ipv4_tunnel tun_key;
96 struct ovs_net *ovs_net;
97 struct vport *vport;
98 __be64 key;
99
100 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
101 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
102 if (unlikely(!vport))
103 return PACKET_REJECT;
104
105 key = key_to_tunnel_id(tpi->key, tpi->seq);
106 ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
107 filter_tnl_flags(tpi->flags));
108
109 ovs_vport_receive(vport, skb, &tun_key);
110 return PACKET_RCVD;
111 }
112
113 /* Called with rcu_read_lock and BH disabled. */
114 static int gre_err(struct sk_buff *skb, u32 info,
115 const struct tnl_ptk_info *tpi)
116 {
117 struct ovs_net *ovs_net;
118 struct vport *vport;
119
120 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
121 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
122
123 if (unlikely(!vport))
124 return PACKET_REJECT;
125 else
126 return PACKET_RCVD;
127 }
128
129 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
130 {
131 struct net *net = ovs_dp_get_net(vport->dp);
132 struct flowi4 fl;
133 struct rtable *rt;
134 int min_headroom;
135 int tunnel_hlen;
136 __be16 df;
137 int err;
138
139 if (unlikely(!OVS_CB(skb)->tun_key)) {
140 err = -EINVAL;
141 goto error;
142 }
143
144 /* Route lookup */
145 memset(&fl, 0, sizeof(fl));
146 fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
147 fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
148 fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
149 fl.flowi4_mark = skb->mark;
150 fl.flowi4_proto = IPPROTO_GRE;
151
152 rt = ip_route_output_key(net, &fl);
153 if (IS_ERR(rt))
154 return PTR_ERR(rt);
155
156 tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
157
158 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
159 + tunnel_hlen + sizeof(struct iphdr)
160 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
161 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
162 int head_delta = SKB_DATA_ALIGN(min_headroom -
163 skb_headroom(skb) +
164 16);
165 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
166 0, GFP_ATOMIC);
167 if (unlikely(err))
168 goto err_free_rt;
169 }
170
171 if (vlan_tx_tag_present(skb)) {
172 if (unlikely(!__vlan_put_tag(skb,
173 skb->vlan_proto,
174 vlan_tx_tag_get(skb)))) {
175 err = -ENOMEM;
176 goto err_free_rt;
177 }
178 skb->vlan_tci = 0;
179 }
180
181 /* Push Tunnel header. */
182 skb = __build_header(skb, tunnel_hlen);
183 if (unlikely(!skb)) {
184 err = 0;
185 goto err_free_rt;
186 }
187
188 df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
189 htons(IP_DF) : 0;
190
191 skb->ignore_df = 1;
192
193 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
194 OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
195 OVS_CB(skb)->tun_key->ipv4_tos,
196 OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
197 err_free_rt:
198 ip_rt_put(rt);
199 error:
200 return err;
201 }
202
203 static struct gre_cisco_protocol gre_protocol = {
204 .handler = gre_rcv,
205 .err_handler = gre_err,
206 .priority = 1,
207 };
208
209 static int gre_ports;
210 static int gre_init(void)
211 {
212 int err;
213
214 gre_ports++;
215 if (gre_ports > 1)
216 return 0;
217
218 err = gre_cisco_register(&gre_protocol);
219 if (err)
220 pr_warn("cannot register gre protocol handler\n");
221
222 return err;
223 }
224
225 static void gre_exit(void)
226 {
227 gre_ports--;
228 if (gre_ports > 0)
229 return;
230
231 gre_cisco_unregister(&gre_protocol);
232 }
233
234 static const char *gre_get_name(const struct vport *vport)
235 {
236 return vport_priv(vport);
237 }
238
239 static struct vport *gre_create(const struct vport_parms *parms)
240 {
241 struct net *net = ovs_dp_get_net(parms->dp);
242 struct ovs_net *ovs_net;
243 struct vport *vport;
244 int err;
245
246 err = gre_init();
247 if (err)
248 return ERR_PTR(err);
249
250 ovs_net = net_generic(net, ovs_net_id);
251 if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
252 vport = ERR_PTR(-EEXIST);
253 goto error;
254 }
255
256 vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
257 if (IS_ERR(vport))
258 goto error;
259
260 strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
261 rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
262 return vport;
263
264 error:
265 gre_exit();
266 return vport;
267 }
268
269 static void gre_tnl_destroy(struct vport *vport)
270 {
271 struct net *net = ovs_dp_get_net(vport->dp);
272 struct ovs_net *ovs_net;
273
274 ovs_net = net_generic(net, ovs_net_id);
275
276 RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
277 ovs_vport_deferred_free(vport);
278 gre_exit();
279 }
280
281 const struct vport_ops ovs_gre_vport_ops = {
282 .type = OVS_VPORT_TYPE_GRE,
283 .create = gre_create,
284 .destroy = gre_tnl_destroy,
285 .get_name = gre_get_name,
286 .send = gre_tnl_send,
287 };