]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport.c
datapath: work around the single GRE receive limitation.
[mirror_ovs.git] / datapath / vport.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include <linux/etherdevice.h>
20 #include <linux/if.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jhash.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/compat.h>
30 #include <linux/module.h>
31 #include <linux/if_link.h>
32 #include <net/net_namespace.h>
33 #include <net/lisp.h>
34 #include <net/gre.h>
35 #include <net/geneve.h>
36 #include <net/stt.h>
37 #include <net/vxlan.h>
38
39 #include "datapath.h"
40 #include "gso.h"
41 #include "vport.h"
42 #include "vport-internal_dev.h"
43
44 static LIST_HEAD(vport_ops_list);
45 static bool compat_gre_loaded = false;
46
47 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
48 static struct hlist_head *dev_table;
49 #define VPORT_HASH_BUCKETS 1024
50
51 /**
52 * ovs_vport_init - initialize vport subsystem
53 *
54 * Called at module load time to initialize the vport subsystem.
55 */
56 int ovs_vport_init(void)
57 {
58 int err;
59
60 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
61 GFP_KERNEL);
62 if (!dev_table)
63 return -ENOMEM;
64
65 err = lisp_init_module();
66 if (err)
67 goto err_lisp;
68 err = gre_init();
69 if (err && err != -EEXIST) {
70 goto err_gre;
71 } else {
72 if (err == -EEXIST) {
73 pr_warn("Cannot take GRE protocol rx entry"\
74 "- The GRE/ERSPAN rx feature not supported\n");
75 /* continue GRE tx */
76 }
77
78 err = ipgre_init();
79 if (err && err != -EEXIST)
80 goto err_ipgre;
81 compat_gre_loaded = true;
82 }
83 err = ip6gre_init();
84 if (err)
85 goto err_ip6gre;
86 err = ip6_tunnel_init();
87 if (err)
88 goto err_ip6_tunnel;
89 err = geneve_init_module();
90 if (err)
91 goto err_geneve;
92 err = vxlan_init_module();
93 if (err)
94 goto err_vxlan;
95 err = ovs_stt_init_module();
96 if (err)
97 goto err_stt;
98
99 return 0;
100 ovs_stt_cleanup_module();
101 err_stt:
102 vxlan_cleanup_module();
103 err_vxlan:
104 geneve_cleanup_module();
105 err_geneve:
106 ip6_tunnel_cleanup();
107 err_ip6_tunnel:
108 ip6gre_fini();
109 err_ip6gre:
110 ipgre_fini();
111 err_ipgre:
112 gre_exit();
113 err_gre:
114 lisp_cleanup_module();
115 err_lisp:
116 kfree(dev_table);
117 return err;
118 }
119
120 /**
121 * ovs_vport_exit - shutdown vport subsystem
122 *
123 * Called at module exit time to shutdown the vport subsystem.
124 */
125 void ovs_vport_exit(void)
126 {
127 if (compat_gre_loaded) {
128 gre_exit();
129 ipgre_fini();
130 }
131 ovs_stt_cleanup_module();
132 vxlan_cleanup_module();
133 geneve_cleanup_module();
134 ip6_tunnel_cleanup();
135 ip6gre_fini();
136 lisp_cleanup_module();
137 kfree(dev_table);
138 }
139
140 static struct hlist_head *hash_bucket(const struct net *net, const char *name)
141 {
142 unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
143 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
144 }
145
146 int __ovs_vport_ops_register(struct vport_ops *ops)
147 {
148 int err = -EEXIST;
149 struct vport_ops *o;
150
151 ovs_lock();
152 list_for_each_entry(o, &vport_ops_list, list)
153 if (ops->type == o->type)
154 goto errout;
155
156 list_add_tail(&ops->list, &vport_ops_list);
157 err = 0;
158 errout:
159 ovs_unlock();
160 return err;
161 }
162 EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
163
164 void ovs_vport_ops_unregister(struct vport_ops *ops)
165 {
166 ovs_lock();
167 list_del(&ops->list);
168 ovs_unlock();
169 }
170 EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
171
172 /**
173 * ovs_vport_locate - find a port that has already been created
174 *
175 * @name: name of port to find
176 *
177 * Must be called with ovs or RCU read lock.
178 */
179 struct vport *ovs_vport_locate(const struct net *net, const char *name)
180 {
181 struct hlist_head *bucket = hash_bucket(net, name);
182 struct vport *vport;
183
184 hlist_for_each_entry_rcu(vport, bucket, hash_node)
185 if (!strcmp(name, ovs_vport_name(vport)) &&
186 net_eq(ovs_dp_get_net(vport->dp), net))
187 return vport;
188
189 return NULL;
190 }
191
192 /**
193 * ovs_vport_alloc - allocate and initialize new vport
194 *
195 * @priv_size: Size of private data area to allocate.
196 * @ops: vport device ops
197 *
198 * Allocate and initialize a new vport defined by @ops. The vport will contain
199 * a private data area of size @priv_size that can be accessed using
200 * vport_priv(). vports that are no longer needed should be released with
201 * vport_free().
202 */
203 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
204 const struct vport_parms *parms)
205 {
206 struct vport *vport;
207 size_t alloc_size;
208
209 alloc_size = sizeof(struct vport);
210 if (priv_size) {
211 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
212 alloc_size += priv_size;
213 }
214
215 vport = kzalloc(alloc_size, GFP_KERNEL);
216 if (!vport)
217 return ERR_PTR(-ENOMEM);
218
219 vport->dp = parms->dp;
220 vport->port_no = parms->port_no;
221 vport->ops = ops;
222 INIT_HLIST_NODE(&vport->dp_hash_node);
223
224 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
225 kfree(vport);
226 return ERR_PTR(-EINVAL);
227 }
228
229 return vport;
230 }
231 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
232
233 /**
234 * ovs_vport_free - uninitialize and free vport
235 *
236 * @vport: vport to free
237 *
238 * Frees a vport allocated with vport_alloc() when it is no longer needed.
239 *
240 * The caller must ensure that an RCU grace period has passed since the last
241 * time @vport was in a datapath.
242 */
243 void ovs_vport_free(struct vport *vport)
244 {
245 /* vport is freed from RCU callback or error path, Therefore
246 * it is safe to use raw dereference.
247 */
248 kfree(rcu_dereference_raw(vport->upcall_portids));
249 kfree(vport);
250 }
251 EXPORT_SYMBOL_GPL(ovs_vport_free);
252
253 static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
254 {
255 struct vport_ops *ops;
256
257 list_for_each_entry(ops, &vport_ops_list, list)
258 if (ops->type == parms->type)
259 return ops;
260
261 return NULL;
262 }
263
264 /**
265 * ovs_vport_add - add vport device (for kernel callers)
266 *
267 * @parms: Information about new vport.
268 *
269 * Creates a new vport with the specified configuration (which is dependent on
270 * device type). ovs_mutex must be held.
271 */
272 struct vport *ovs_vport_add(const struct vport_parms *parms)
273 {
274 struct vport_ops *ops;
275 struct vport *vport;
276
277 ops = ovs_vport_lookup(parms);
278 if (ops) {
279 struct hlist_head *bucket;
280
281 if (!try_module_get(ops->owner))
282 return ERR_PTR(-EAFNOSUPPORT);
283
284 vport = ops->create(parms);
285 if (IS_ERR(vport)) {
286 module_put(ops->owner);
287 return vport;
288 }
289
290 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
291 ovs_vport_name(vport));
292 hlist_add_head_rcu(&vport->hash_node, bucket);
293 return vport;
294 }
295
296 if (parms->type == OVS_VPORT_TYPE_GRE && !compat_gre_loaded) {
297 pr_warn("GRE protocol already loaded!\n");
298 return ERR_PTR(-EAFNOSUPPORT);
299 }
300 /* Unlock to attempt module load and return -EAGAIN if load
301 * was successful as we need to restart the port addition
302 * workflow.
303 */
304 ovs_unlock();
305 request_module("vport-type-%d", parms->type);
306 ovs_lock();
307
308 if (!ovs_vport_lookup(parms))
309 return ERR_PTR(-EAFNOSUPPORT);
310 else
311 return ERR_PTR(-EAGAIN);
312 }
313
314 /**
315 * ovs_vport_set_options - modify existing vport device (for kernel callers)
316 *
317 * @vport: vport to modify.
318 * @options: New configuration.
319 *
320 * Modifies an existing device with the specified configuration (which is
321 * dependent on device type). ovs_mutex must be held.
322 */
323 int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
324 {
325 if (!vport->ops->set_options)
326 return -EOPNOTSUPP;
327 return vport->ops->set_options(vport, options);
328 }
329
330 /**
331 * ovs_vport_del - delete existing vport device
332 *
333 * @vport: vport to delete.
334 *
335 * Detaches @vport from its datapath and destroys it. ovs_mutex must be
336 * held.
337 */
338 void ovs_vport_del(struct vport *vport)
339 {
340 ASSERT_OVSL();
341
342 hlist_del_rcu(&vport->hash_node);
343 module_put(vport->ops->owner);
344 vport->ops->destroy(vport);
345 }
346
347 /**
348 * ovs_vport_get_stats - retrieve device stats
349 *
350 * @vport: vport from which to retrieve the stats
351 * @stats: location to store stats
352 *
353 * Retrieves transmit, receive, and error stats for the given device.
354 *
355 * Must be called with ovs_mutex or rcu_read_lock.
356 */
357 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
358 {
359 const struct rtnl_link_stats64 *dev_stats;
360 struct rtnl_link_stats64 temp;
361
362 dev_stats = dev_get_stats(vport->dev, &temp);
363 stats->rx_errors = dev_stats->rx_errors;
364 stats->tx_errors = dev_stats->tx_errors;
365 stats->tx_dropped = dev_stats->tx_dropped;
366 stats->rx_dropped = dev_stats->rx_dropped;
367
368 stats->rx_bytes = dev_stats->rx_bytes;
369 stats->rx_packets = dev_stats->rx_packets;
370 stats->tx_bytes = dev_stats->tx_bytes;
371 stats->tx_packets = dev_stats->tx_packets;
372 }
373
374 /**
375 * ovs_vport_get_options - retrieve device options
376 *
377 * @vport: vport from which to retrieve the options.
378 * @skb: sk_buff where options should be appended.
379 *
380 * Retrieves the configuration of the given device, appending an
381 * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
382 * vport-specific attributes to @skb.
383 *
384 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
385 * negative error code if a real error occurred. If an error occurs, @skb is
386 * left unmodified.
387 *
388 * Must be called with ovs_mutex or rcu_read_lock.
389 */
390 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
391 {
392 struct nlattr *nla;
393 int err;
394
395 if (!vport->ops->get_options)
396 return 0;
397
398 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
399 if (!nla)
400 return -EMSGSIZE;
401
402 err = vport->ops->get_options(vport, skb);
403 if (err) {
404 nla_nest_cancel(skb, nla);
405 return err;
406 }
407
408 nla_nest_end(skb, nla);
409 return 0;
410 }
411
412 /**
413 * ovs_vport_set_upcall_portids - set upcall portids of @vport.
414 *
415 * @vport: vport to modify.
416 * @ids: new configuration, an array of port ids.
417 *
418 * Sets the vport's upcall_portids to @ids.
419 *
420 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
421 * as an array of U32.
422 *
423 * Must be called with ovs_mutex.
424 */
425 int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
426 {
427 struct vport_portids *old, *vport_portids;
428
429 if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
430 return -EINVAL;
431
432 old = ovsl_dereference(vport->upcall_portids);
433
434 vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
435 GFP_KERNEL);
436 if (!vport_portids)
437 return -ENOMEM;
438
439 vport_portids->n_ids = nla_len(ids) / sizeof(u32);
440 vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
441 nla_memcpy(vport_portids->ids, ids, nla_len(ids));
442
443 rcu_assign_pointer(vport->upcall_portids, vport_portids);
444
445 if (old)
446 kfree_rcu(old, rcu);
447 return 0;
448 }
449
450 /**
451 * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
452 *
453 * @vport: vport from which to retrieve the portids.
454 * @skb: sk_buff where portids should be appended.
455 *
456 * Retrieves the configuration of the given vport, appending the
457 * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
458 * portids to @skb.
459 *
460 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
461 * If an error occurs, @skb is left unmodified. Must be called with
462 * ovs_mutex or rcu_read_lock.
463 */
464 int ovs_vport_get_upcall_portids(const struct vport *vport,
465 struct sk_buff *skb)
466 {
467 struct vport_portids *ids;
468
469 ids = rcu_dereference_ovsl(vport->upcall_portids);
470
471 if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
472 return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
473 ids->n_ids * sizeof(u32), (void *)ids->ids);
474 else
475 return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
476 }
477
478 /**
479 * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
480 *
481 * @vport: vport from which the missed packet is received.
482 * @skb: skb that the missed packet was received.
483 *
484 * Uses the skb_get_hash() to select the upcall portid to send the
485 * upcall.
486 *
487 * Returns the portid of the target socket. Must be called with rcu_read_lock.
488 */
489 u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
490 {
491 struct vport_portids *ids;
492 u32 ids_index;
493 u32 hash;
494
495 ids = rcu_dereference(vport->upcall_portids);
496
497 if (ids->n_ids == 1 && ids->ids[0] == 0)
498 return 0;
499
500 hash = skb_get_hash(skb);
501 ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
502 return ids->ids[ids_index];
503 }
504
505 /**
506 * ovs_vport_receive - pass up received packet to the datapath for processing
507 *
508 * @vport: vport that received the packet
509 * @skb: skb that was received
510 * @tun_key: tunnel (if any) that carried packet
511 *
512 * Must be called with rcu_read_lock. The packet cannot be shared and
513 * skb->data should point to the Ethernet header.
514 */
515 int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
516 const struct ip_tunnel_info *tun_info)
517 {
518 struct sw_flow_key key;
519 int error;
520
521 OVS_CB(skb)->input_vport = vport;
522 OVS_CB(skb)->mru = 0;
523 OVS_CB(skb)->cutlen = 0;
524 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
525 u32 mark;
526
527 mark = skb->mark;
528 skb_scrub_packet(skb, true);
529 skb->mark = mark;
530 tun_info = NULL;
531 }
532
533 ovs_skb_init_inner_protocol(skb);
534 skb_clear_ovs_gso_cb(skb);
535 /* Extract flow from 'skb' into 'key'. */
536 error = ovs_flow_key_extract(tun_info, skb, &key);
537 if (unlikely(error)) {
538 kfree_skb(skb);
539 return error;
540 }
541 ovs_dp_process_packet(skb, &key);
542 return 0;
543 }
544
545 static int packet_length(const struct sk_buff *skb,
546 struct net_device *dev)
547 {
548 int length = skb->len - dev->hard_header_len;
549
550 if (!skb_vlan_tag_present(skb) &&
551 eth_type_vlan(skb->protocol))
552 length -= VLAN_HLEN;
553
554 /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
555 * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
556 * account for 802.1ad. e.g. is_skb_forwardable().
557 */
558
559 return length > 0 ? length: 0;
560 }
561
562 void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
563 {
564 int mtu = vport->dev->mtu;
565
566 switch (vport->dev->type) {
567 case ARPHRD_NONE:
568 if (mac_proto == MAC_PROTO_ETHERNET) {
569 skb_reset_network_header(skb);
570 skb_reset_mac_len(skb);
571 skb->protocol = htons(ETH_P_TEB);
572 } else if (mac_proto != MAC_PROTO_NONE) {
573 WARN_ON_ONCE(1);
574 goto drop;
575 }
576 break;
577 case ARPHRD_ETHER:
578 if (mac_proto != MAC_PROTO_ETHERNET)
579 goto drop;
580 break;
581 default:
582 goto drop;
583 }
584
585 if (unlikely(packet_length(skb, vport->dev) > mtu &&
586 !skb_is_gso(skb))) {
587 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
588 vport->dev->name,
589 packet_length(skb, vport->dev), mtu);
590 vport->dev->stats.tx_errors++;
591 goto drop;
592 }
593
594 skb->dev = vport->dev;
595 vport->ops->send(skb);
596 return;
597
598 drop:
599 kfree_skb(skb);
600 }