]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport.c
tests: Fix typo in comment.
[mirror_ovs.git] / datapath / vport.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include <linux/etherdevice.h>
20 #include <linux/if.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jhash.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/compat.h>
30 #include <linux/module.h>
31 #include <linux/if_link.h>
32 #include <net/net_namespace.h>
33 #include <net/lisp.h>
34 #include <net/gre.h>
35 #include <net/geneve.h>
36 #include <net/vxlan.h>
37 #include <net/stt.h>
38
39 #include "datapath.h"
40 #include "gso.h"
41 #include "vport.h"
42 #include "vport-internal_dev.h"
43
44 static LIST_HEAD(vport_ops_list);
45
46 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
47 static struct hlist_head *dev_table;
48 #define VPORT_HASH_BUCKETS 1024
49
50 /**
51 * ovs_vport_init - initialize vport subsystem
52 *
53 * Called at module load time to initialize the vport subsystem.
54 */
55 int ovs_vport_init(void)
56 {
57 int err;
58
59 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
60 GFP_KERNEL);
61 if (!dev_table)
62 return -ENOMEM;
63
64 err = lisp_init_module();
65 if (err)
66 goto err_lisp;
67 err = ipgre_init();
68 if (err)
69 goto err_gre;
70 err = geneve_init_module();
71 if (err)
72 goto err_geneve;
73
74 err = vxlan_init_module();
75 if (err)
76 goto err_vxlan;
77 err = ovs_stt_init_module();
78 if (err)
79 goto err_stt;
80 return 0;
81
82 err_stt:
83 vxlan_cleanup_module();
84 err_vxlan:
85 geneve_cleanup_module();
86 err_geneve:
87 ipgre_fini();
88 err_gre:
89 lisp_cleanup_module();
90 err_lisp:
91 kfree(dev_table);
92 return err;
93 }
94
95 /**
96 * ovs_vport_exit - shutdown vport subsystem
97 *
98 * Called at module exit time to shutdown the vport subsystem.
99 */
100 void ovs_vport_exit(void)
101 {
102 ovs_stt_cleanup_module();
103 vxlan_cleanup_module();
104 geneve_cleanup_module();
105 ipgre_fini();
106 lisp_cleanup_module();
107 kfree(dev_table);
108 }
109
110 static struct hlist_head *hash_bucket(const struct net *net, const char *name)
111 {
112 unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
113 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
114 }
115
116 int __ovs_vport_ops_register(struct vport_ops *ops)
117 {
118 int err = -EEXIST;
119 struct vport_ops *o;
120
121 ovs_lock();
122 list_for_each_entry(o, &vport_ops_list, list)
123 if (ops->type == o->type)
124 goto errout;
125
126 list_add_tail(&ops->list, &vport_ops_list);
127 err = 0;
128 errout:
129 ovs_unlock();
130 return err;
131 }
132 EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
133
134 void ovs_vport_ops_unregister(struct vport_ops *ops)
135 {
136 ovs_lock();
137 list_del(&ops->list);
138 ovs_unlock();
139 }
140 EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
141
142 /**
143 * ovs_vport_locate - find a port that has already been created
144 *
145 * @name: name of port to find
146 *
147 * Must be called with ovs or RCU read lock.
148 */
149 struct vport *ovs_vport_locate(const struct net *net, const char *name)
150 {
151 struct hlist_head *bucket = hash_bucket(net, name);
152 struct vport *vport;
153
154 hlist_for_each_entry_rcu(vport, bucket, hash_node)
155 if (!strcmp(name, ovs_vport_name(vport)) &&
156 net_eq(ovs_dp_get_net(vport->dp), net))
157 return vport;
158
159 return NULL;
160 }
161
162 /**
163 * ovs_vport_alloc - allocate and initialize new vport
164 *
165 * @priv_size: Size of private data area to allocate.
166 * @ops: vport device ops
167 *
168 * Allocate and initialize a new vport defined by @ops. The vport will contain
169 * a private data area of size @priv_size that can be accessed using
170 * vport_priv(). vports that are no longer needed should be released with
171 * vport_free().
172 */
173 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
174 const struct vport_parms *parms)
175 {
176 struct vport *vport;
177 size_t alloc_size;
178
179 alloc_size = sizeof(struct vport);
180 if (priv_size) {
181 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
182 alloc_size += priv_size;
183 }
184
185 vport = kzalloc(alloc_size, GFP_KERNEL);
186 if (!vport)
187 return ERR_PTR(-ENOMEM);
188
189 vport->dp = parms->dp;
190 vport->port_no = parms->port_no;
191 vport->ops = ops;
192 INIT_HLIST_NODE(&vport->dp_hash_node);
193
194 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
195 kfree(vport);
196 return ERR_PTR(-EINVAL);
197 }
198
199 return vport;
200 }
201 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
202
203 /**
204 * ovs_vport_free - uninitialize and free vport
205 *
206 * @vport: vport to free
207 *
208 * Frees a vport allocated with vport_alloc() when it is no longer needed.
209 *
210 * The caller must ensure that an RCU grace period has passed since the last
211 * time @vport was in a datapath.
212 */
213 void ovs_vport_free(struct vport *vport)
214 {
215 /* vport is freed from RCU callback or error path, Therefore
216 * it is safe to use raw dereference.
217 */
218 kfree(rcu_dereference_raw(vport->upcall_portids));
219 kfree(vport);
220 }
221 EXPORT_SYMBOL_GPL(ovs_vport_free);
222
223 static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
224 {
225 struct vport_ops *ops;
226
227 list_for_each_entry(ops, &vport_ops_list, list)
228 if (ops->type == parms->type)
229 return ops;
230
231 return NULL;
232 }
233
234 /**
235 * ovs_vport_add - add vport device (for kernel callers)
236 *
237 * @parms: Information about new vport.
238 *
239 * Creates a new vport with the specified configuration (which is dependent on
240 * device type). ovs_mutex must be held.
241 */
242 struct vport *ovs_vport_add(const struct vport_parms *parms)
243 {
244 struct vport_ops *ops;
245 struct vport *vport;
246
247 ops = ovs_vport_lookup(parms);
248 if (ops) {
249 struct hlist_head *bucket;
250
251 if (!try_module_get(ops->owner))
252 return ERR_PTR(-EAFNOSUPPORT);
253
254 vport = ops->create(parms);
255 if (IS_ERR(vport)) {
256 module_put(ops->owner);
257 return vport;
258 }
259
260 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
261 ovs_vport_name(vport));
262 hlist_add_head_rcu(&vport->hash_node, bucket);
263 return vport;
264 }
265
266 /* Unlock to attempt module load and return -EAGAIN if load
267 * was successful as we need to restart the port addition
268 * workflow.
269 */
270 ovs_unlock();
271 request_module("vport-type-%d", parms->type);
272 ovs_lock();
273
274 if (!ovs_vport_lookup(parms))
275 return ERR_PTR(-EAFNOSUPPORT);
276 else
277 return ERR_PTR(-EAGAIN);
278 }
279
280 /**
281 * ovs_vport_set_options - modify existing vport device (for kernel callers)
282 *
283 * @vport: vport to modify.
284 * @options: New configuration.
285 *
286 * Modifies an existing device with the specified configuration (which is
287 * dependent on device type). ovs_mutex must be held.
288 */
289 int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
290 {
291 if (!vport->ops->set_options)
292 return -EOPNOTSUPP;
293 return vport->ops->set_options(vport, options);
294 }
295
296 /**
297 * ovs_vport_del - delete existing vport device
298 *
299 * @vport: vport to delete.
300 *
301 * Detaches @vport from its datapath and destroys it. It is possible to fail
302 * for reasons such as lack of memory. ovs_mutex must be held.
303 */
304 void ovs_vport_del(struct vport *vport)
305 {
306 ASSERT_OVSL();
307
308 hlist_del_rcu(&vport->hash_node);
309 module_put(vport->ops->owner);
310 vport->ops->destroy(vport);
311 }
312
313 /**
314 * ovs_vport_get_stats - retrieve device stats
315 *
316 * @vport: vport from which to retrieve the stats
317 * @stats: location to store stats
318 *
319 * Retrieves transmit, receive, and error stats for the given device.
320 *
321 * Must be called with ovs_mutex or rcu_read_lock.
322 */
323 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
324 {
325 const struct rtnl_link_stats64 *dev_stats;
326 struct rtnl_link_stats64 temp;
327
328 dev_stats = dev_get_stats(vport->dev, &temp);
329 stats->rx_errors = dev_stats->rx_errors;
330 stats->tx_errors = dev_stats->tx_errors;
331 stats->tx_dropped = dev_stats->tx_dropped;
332 stats->rx_dropped = dev_stats->rx_dropped;
333
334 stats->rx_bytes = dev_stats->rx_bytes;
335 stats->rx_packets = dev_stats->rx_packets;
336 stats->tx_bytes = dev_stats->tx_bytes;
337 stats->tx_packets = dev_stats->tx_packets;
338 }
339
340 /**
341 * ovs_vport_get_options - retrieve device options
342 *
343 * @vport: vport from which to retrieve the options.
344 * @skb: sk_buff where options should be appended.
345 *
346 * Retrieves the configuration of the given device, appending an
347 * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
348 * vport-specific attributes to @skb.
349 *
350 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
351 * negative error code if a real error occurred. If an error occurs, @skb is
352 * left unmodified.
353 *
354 * Must be called with ovs_mutex or rcu_read_lock.
355 */
356 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
357 {
358 struct nlattr *nla;
359 int err;
360
361 if (!vport->ops->get_options)
362 return 0;
363
364 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
365 if (!nla)
366 return -EMSGSIZE;
367
368 err = vport->ops->get_options(vport, skb);
369 if (err) {
370 nla_nest_cancel(skb, nla);
371 return err;
372 }
373
374 nla_nest_end(skb, nla);
375 return 0;
376 }
377
378 static void vport_portids_destroy_rcu_cb(struct rcu_head *rcu)
379 {
380 struct vport_portids *ids = container_of(rcu, struct vport_portids,
381 rcu);
382
383 kfree(ids);
384 }
385
386 /**
387 * ovs_vport_set_upcall_portids - set upcall portids of @vport.
388 *
389 * @vport: vport to modify.
390 * @ids: new configuration, an array of port ids.
391 *
392 * Sets the vport's upcall_portids to @ids.
393 *
394 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
395 * as an array of U32.
396 *
397 * Must be called with ovs_mutex.
398 */
399 int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
400 {
401 struct vport_portids *old, *vport_portids;
402
403 if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
404 return -EINVAL;
405
406 old = ovsl_dereference(vport->upcall_portids);
407
408 vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
409 GFP_KERNEL);
410 if (!vport_portids)
411 return -ENOMEM;
412
413 vport_portids->n_ids = nla_len(ids) / sizeof(u32);
414 vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
415 nla_memcpy(vport_portids->ids, ids, nla_len(ids));
416
417 rcu_assign_pointer(vport->upcall_portids, vport_portids);
418
419 if (old)
420 call_rcu(&old->rcu, vport_portids_destroy_rcu_cb);
421 return 0;
422 }
423
424 /**
425 * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
426 *
427 * @vport: vport from which to retrieve the portids.
428 * @skb: sk_buff where portids should be appended.
429 *
430 * Retrieves the configuration of the given vport, appending the
431 * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
432 * portids to @skb.
433 *
434 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
435 * If an error occurs, @skb is left unmodified. Must be called with
436 * ovs_mutex or rcu_read_lock.
437 */
438 int ovs_vport_get_upcall_portids(const struct vport *vport,
439 struct sk_buff *skb)
440 {
441 struct vport_portids *ids;
442
443 ids = rcu_dereference_ovsl(vport->upcall_portids);
444
445 if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
446 return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
447 ids->n_ids * sizeof(u32), (void *)ids->ids);
448 else
449 return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
450 }
451
452 /**
453 * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
454 *
455 * @vport: vport from which the missed packet is received.
456 * @skb: skb that the missed packet was received.
457 *
458 * Uses the skb_get_hash() to select the upcall portid to send the
459 * upcall.
460 *
461 * Returns the portid of the target socket. Must be called with rcu_read_lock.
462 */
463 u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
464 {
465 struct vport_portids *ids;
466 u32 ids_index;
467 u32 hash;
468
469 ids = rcu_dereference(vport->upcall_portids);
470
471 if (ids->n_ids == 1 && ids->ids[0] == 0)
472 return 0;
473
474 hash = skb_get_hash(skb);
475 ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
476 return ids->ids[ids_index];
477 }
478
479 /**
480 * ovs_vport_receive - pass up received packet to the datapath for processing
481 *
482 * @vport: vport that received the packet
483 * @skb: skb that was received
484 * @tun_key: tunnel (if any) that carried packet
485 *
486 * Must be called with rcu_read_lock. The packet cannot be shared and
487 * skb->data should point to the Ethernet header.
488 */
489 int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
490 const struct ip_tunnel_info *tun_info)
491 {
492 struct sw_flow_key key;
493 int error;
494
495 OVS_CB(skb)->input_vport = vport;
496 OVS_CB(skb)->mru = 0;
497 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
498 u32 mark;
499
500 mark = skb->mark;
501 skb_scrub_packet(skb, true);
502 skb->mark = mark;
503 tun_info = NULL;
504 }
505
506 ovs_skb_init_inner_protocol(skb);
507 skb_clear_ovs_gso_cb(skb);
508 /* Extract flow from 'skb' into 'key'. */
509 error = ovs_flow_key_extract(tun_info, skb, &key);
510 if (unlikely(error)) {
511 kfree_skb(skb);
512 return error;
513 }
514 ovs_dp_process_packet(skb, &key);
515 return 0;
516 }
517 EXPORT_SYMBOL_GPL(ovs_vport_receive);
518
519 static void free_vport_rcu(struct rcu_head *rcu)
520 {
521 struct vport *vport = container_of(rcu, struct vport, rcu);
522
523 ovs_vport_free(vport);
524 }
525
526 void ovs_vport_deferred_free(struct vport *vport)
527 {
528 if (!vport)
529 return;
530
531 call_rcu(&vport->rcu, free_vport_rcu);
532 }
533 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
534
535 int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
536 struct net *net,
537 struct sk_buff *skb,
538 u8 ipproto,
539 __be16 tp_src,
540 __be16 tp_dst)
541 {
542 struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
543 struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
544 const struct ip_tunnel_key *tun_key;
545 u32 skb_mark = skb->mark;
546 struct rtable *rt;
547 struct flowi4 fl;
548
549 if (unlikely(!tun_info))
550 return -EINVAL;
551 if (ip_tunnel_info_af(tun_info) != AF_INET)
552 return -EINVAL;
553
554 tun_key = &tun_info->key;
555
556 /* Route lookup to get srouce IP address.
557 * The process may need to be changed if the corresponding process
558 * in vports ops changed.
559 */
560 rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
561 if (IS_ERR(rt))
562 return PTR_ERR(rt);
563
564 ip_rt_put(rt);
565
566 /* Generate egress_tun_info based on tun_info,
567 * saddr, tp_src and tp_dst
568 */
569 ip_tunnel_key_init(&egress_tun_info->key,
570 fl.saddr, tun_key->u.ipv4.dst,
571 tun_key->tos,
572 tun_key->ttl,
573 tp_src, tp_dst,
574 tun_key->tun_id,
575 tun_key->tun_flags);
576 egress_tun_info->options_len = tun_info->options_len;
577 egress_tun_info->mode = tun_info->mode;
578 upcall->egress_tun_opts = ip_tunnel_info_opts(tun_info);
579 return 0;
580 }
581 EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
582
583 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
584 struct dp_upcall_info *upcall)
585 {
586 /* get_egress_tun_info() is only implemented on tunnel ports. */
587 if (unlikely(!vport->ops->get_egress_tun_info))
588 return -EINVAL;
589
590 return vport->ops->get_egress_tun_info(vport, skb, upcall);
591 }
592
593 static unsigned int packet_length(const struct sk_buff *skb)
594 {
595 unsigned int length = skb->len - ETH_HLEN;
596
597 if (skb->protocol == htons(ETH_P_8021Q))
598 length -= VLAN_HLEN;
599
600 return length;
601 }
602
603 void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
604 {
605 int mtu = vport->dev->mtu;
606
607 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
608 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
609 vport->dev->name,
610 packet_length(skb), mtu);
611 vport->dev->stats.tx_errors++;
612 goto drop;
613 }
614
615 skb->dev = vport->dev;
616 vport->ops->send(skb);
617 return;
618
619 drop:
620 kfree_skb(skb);
621 }