]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport.c
datapath: Drop support for kernel older than 3.10
[mirror_ovs.git] / datapath / vport.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include <linux/etherdevice.h>
20 #include <linux/if.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jhash.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/compat.h>
30 #include <linux/module.h>
31 #include <linux/if_link.h>
32 #include <net/net_namespace.h>
33 #include <net/lisp.h>
34 #include <net/gre.h>
35 #include <net/geneve.h>
36 #include <net/route.h>
37 #include <net/stt.h>
38 #include <net/vxlan.h>
39
40 #include "datapath.h"
41 #include "gso.h"
42 #include "vport.h"
43 #include "vport-internal_dev.h"
44
45 static LIST_HEAD(vport_ops_list);
46
47 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
48 static struct hlist_head *dev_table;
49 #define VPORT_HASH_BUCKETS 1024
50
51 /**
52 * ovs_vport_init - initialize vport subsystem
53 *
54 * Called at module load time to initialize the vport subsystem.
55 */
56 int ovs_vport_init(void)
57 {
58 int err;
59
60 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
61 GFP_KERNEL);
62 if (!dev_table)
63 return -ENOMEM;
64
65 err = lisp_init_module();
66 if (err)
67 goto err_lisp;
68 err = ipgre_init();
69 if (err)
70 goto err_gre;
71 err = geneve_init_module();
72 if (err)
73 goto err_geneve;
74
75 err = vxlan_init_module();
76 if (err)
77 goto err_vxlan;
78 err = ovs_stt_init_module();
79 if (err)
80 goto err_stt;
81 return 0;
82
83 err_stt:
84 vxlan_cleanup_module();
85 err_vxlan:
86 geneve_cleanup_module();
87 err_geneve:
88 ipgre_fini();
89 err_gre:
90 lisp_cleanup_module();
91 err_lisp:
92 kfree(dev_table);
93 return err;
94 }
95
96 /**
97 * ovs_vport_exit - shutdown vport subsystem
98 *
99 * Called at module exit time to shutdown the vport subsystem.
100 */
101 void ovs_vport_exit(void)
102 {
103 ovs_stt_cleanup_module();
104 vxlan_cleanup_module();
105 geneve_cleanup_module();
106 ipgre_fini();
107 lisp_cleanup_module();
108 kfree(dev_table);
109 }
110
111 static struct hlist_head *hash_bucket(const struct net *net, const char *name)
112 {
113 unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
114 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
115 }
116
117 int __ovs_vport_ops_register(struct vport_ops *ops)
118 {
119 int err = -EEXIST;
120 struct vport_ops *o;
121
122 ovs_lock();
123 list_for_each_entry(o, &vport_ops_list, list)
124 if (ops->type == o->type)
125 goto errout;
126
127 list_add_tail(&ops->list, &vport_ops_list);
128 err = 0;
129 errout:
130 ovs_unlock();
131 return err;
132 }
133 EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
134
135 void ovs_vport_ops_unregister(struct vport_ops *ops)
136 {
137 ovs_lock();
138 list_del(&ops->list);
139 ovs_unlock();
140 }
141 EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
142
143 /**
144 * ovs_vport_locate - find a port that has already been created
145 *
146 * @name: name of port to find
147 *
148 * Must be called with ovs or RCU read lock.
149 */
150 struct vport *ovs_vport_locate(const struct net *net, const char *name)
151 {
152 struct hlist_head *bucket = hash_bucket(net, name);
153 struct vport *vport;
154
155 hlist_for_each_entry_rcu(vport, bucket, hash_node)
156 if (!strcmp(name, ovs_vport_name(vport)) &&
157 net_eq(ovs_dp_get_net(vport->dp), net))
158 return vport;
159
160 return NULL;
161 }
162
163 /**
164 * ovs_vport_alloc - allocate and initialize new vport
165 *
166 * @priv_size: Size of private data area to allocate.
167 * @ops: vport device ops
168 *
169 * Allocate and initialize a new vport defined by @ops. The vport will contain
170 * a private data area of size @priv_size that can be accessed using
171 * vport_priv(). vports that are no longer needed should be released with
172 * vport_free().
173 */
174 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
175 const struct vport_parms *parms)
176 {
177 struct vport *vport;
178 size_t alloc_size;
179
180 alloc_size = sizeof(struct vport);
181 if (priv_size) {
182 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
183 alloc_size += priv_size;
184 }
185
186 vport = kzalloc(alloc_size, GFP_KERNEL);
187 if (!vport)
188 return ERR_PTR(-ENOMEM);
189
190 vport->dp = parms->dp;
191 vport->port_no = parms->port_no;
192 vport->ops = ops;
193 INIT_HLIST_NODE(&vport->dp_hash_node);
194
195 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
196 kfree(vport);
197 return ERR_PTR(-EINVAL);
198 }
199
200 return vport;
201 }
202 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
203
204 /**
205 * ovs_vport_free - uninitialize and free vport
206 *
207 * @vport: vport to free
208 *
209 * Frees a vport allocated with vport_alloc() when it is no longer needed.
210 *
211 * The caller must ensure that an RCU grace period has passed since the last
212 * time @vport was in a datapath.
213 */
214 void ovs_vport_free(struct vport *vport)
215 {
216 /* vport is freed from RCU callback or error path, Therefore
217 * it is safe to use raw dereference.
218 */
219 kfree(rcu_dereference_raw(vport->upcall_portids));
220 kfree(vport);
221 }
222 EXPORT_SYMBOL_GPL(ovs_vport_free);
223
224 static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
225 {
226 struct vport_ops *ops;
227
228 list_for_each_entry(ops, &vport_ops_list, list)
229 if (ops->type == parms->type)
230 return ops;
231
232 return NULL;
233 }
234
235 /**
236 * ovs_vport_add - add vport device (for kernel callers)
237 *
238 * @parms: Information about new vport.
239 *
240 * Creates a new vport with the specified configuration (which is dependent on
241 * device type). ovs_mutex must be held.
242 */
243 struct vport *ovs_vport_add(const struct vport_parms *parms)
244 {
245 struct vport_ops *ops;
246 struct vport *vport;
247
248 ops = ovs_vport_lookup(parms);
249 if (ops) {
250 struct hlist_head *bucket;
251
252 if (!try_module_get(ops->owner))
253 return ERR_PTR(-EAFNOSUPPORT);
254
255 vport = ops->create(parms);
256 if (IS_ERR(vport)) {
257 module_put(ops->owner);
258 return vport;
259 }
260
261 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
262 ovs_vport_name(vport));
263 hlist_add_head_rcu(&vport->hash_node, bucket);
264 return vport;
265 }
266
267 /* Unlock to attempt module load and return -EAGAIN if load
268 * was successful as we need to restart the port addition
269 * workflow.
270 */
271 ovs_unlock();
272 request_module("vport-type-%d", parms->type);
273 ovs_lock();
274
275 if (!ovs_vport_lookup(parms))
276 return ERR_PTR(-EAFNOSUPPORT);
277 else
278 return ERR_PTR(-EAGAIN);
279 }
280
281 /**
282 * ovs_vport_set_options - modify existing vport device (for kernel callers)
283 *
284 * @vport: vport to modify.
285 * @options: New configuration.
286 *
287 * Modifies an existing device with the specified configuration (which is
288 * dependent on device type). ovs_mutex must be held.
289 */
290 int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
291 {
292 if (!vport->ops->set_options)
293 return -EOPNOTSUPP;
294 return vport->ops->set_options(vport, options);
295 }
296
297 /**
298 * ovs_vport_del - delete existing vport device
299 *
300 * @vport: vport to delete.
301 *
302 * Detaches @vport from its datapath and destroys it. ovs_mutex must be
303 * held.
304 */
305 void ovs_vport_del(struct vport *vport)
306 {
307 ASSERT_OVSL();
308
309 hlist_del_rcu(&vport->hash_node);
310 module_put(vport->ops->owner);
311 vport->ops->destroy(vport);
312 }
313
314 /**
315 * ovs_vport_get_stats - retrieve device stats
316 *
317 * @vport: vport from which to retrieve the stats
318 * @stats: location to store stats
319 *
320 * Retrieves transmit, receive, and error stats for the given device.
321 *
322 * Must be called with ovs_mutex or rcu_read_lock.
323 */
324 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
325 {
326 const struct rtnl_link_stats64 *dev_stats;
327 struct rtnl_link_stats64 temp;
328
329 dev_stats = dev_get_stats(vport->dev, &temp);
330 stats->rx_errors = dev_stats->rx_errors;
331 stats->tx_errors = dev_stats->tx_errors;
332 stats->tx_dropped = dev_stats->tx_dropped;
333 stats->rx_dropped = dev_stats->rx_dropped;
334
335 stats->rx_bytes = dev_stats->rx_bytes;
336 stats->rx_packets = dev_stats->rx_packets;
337 stats->tx_bytes = dev_stats->tx_bytes;
338 stats->tx_packets = dev_stats->tx_packets;
339 }
340
341 /**
342 * ovs_vport_get_options - retrieve device options
343 *
344 * @vport: vport from which to retrieve the options.
345 * @skb: sk_buff where options should be appended.
346 *
347 * Retrieves the configuration of the given device, appending an
348 * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
349 * vport-specific attributes to @skb.
350 *
351 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
352 * negative error code if a real error occurred. If an error occurs, @skb is
353 * left unmodified.
354 *
355 * Must be called with ovs_mutex or rcu_read_lock.
356 */
357 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
358 {
359 struct nlattr *nla;
360 int err;
361
362 if (!vport->ops->get_options)
363 return 0;
364
365 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
366 if (!nla)
367 return -EMSGSIZE;
368
369 err = vport->ops->get_options(vport, skb);
370 if (err) {
371 nla_nest_cancel(skb, nla);
372 return err;
373 }
374
375 nla_nest_end(skb, nla);
376 return 0;
377 }
378
379 /**
380 * ovs_vport_set_upcall_portids - set upcall portids of @vport.
381 *
382 * @vport: vport to modify.
383 * @ids: new configuration, an array of port ids.
384 *
385 * Sets the vport's upcall_portids to @ids.
386 *
387 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
388 * as an array of U32.
389 *
390 * Must be called with ovs_mutex.
391 */
392 int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
393 {
394 struct vport_portids *old, *vport_portids;
395
396 if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
397 return -EINVAL;
398
399 old = ovsl_dereference(vport->upcall_portids);
400
401 vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
402 GFP_KERNEL);
403 if (!vport_portids)
404 return -ENOMEM;
405
406 vport_portids->n_ids = nla_len(ids) / sizeof(u32);
407 vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
408 nla_memcpy(vport_portids->ids, ids, nla_len(ids));
409
410 rcu_assign_pointer(vport->upcall_portids, vport_portids);
411
412 if (old)
413 kfree_rcu(old, rcu);
414 return 0;
415 }
416
417 /**
418 * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
419 *
420 * @vport: vport from which to retrieve the portids.
421 * @skb: sk_buff where portids should be appended.
422 *
423 * Retrieves the configuration of the given vport, appending the
424 * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
425 * portids to @skb.
426 *
427 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
428 * If an error occurs, @skb is left unmodified. Must be called with
429 * ovs_mutex or rcu_read_lock.
430 */
431 int ovs_vport_get_upcall_portids(const struct vport *vport,
432 struct sk_buff *skb)
433 {
434 struct vport_portids *ids;
435
436 ids = rcu_dereference_ovsl(vport->upcall_portids);
437
438 if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
439 return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
440 ids->n_ids * sizeof(u32), (void *)ids->ids);
441 else
442 return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
443 }
444
445 /**
446 * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
447 *
448 * @vport: vport from which the missed packet is received.
449 * @skb: skb that the missed packet was received.
450 *
451 * Uses the skb_get_hash() to select the upcall portid to send the
452 * upcall.
453 *
454 * Returns the portid of the target socket. Must be called with rcu_read_lock.
455 */
456 u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
457 {
458 struct vport_portids *ids;
459 u32 ids_index;
460 u32 hash;
461
462 ids = rcu_dereference(vport->upcall_portids);
463
464 if (ids->n_ids == 1 && ids->ids[0] == 0)
465 return 0;
466
467 hash = skb_get_hash(skb);
468 ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
469 return ids->ids[ids_index];
470 }
471
472 /**
473 * ovs_vport_receive - pass up received packet to the datapath for processing
474 *
475 * @vport: vport that received the packet
476 * @skb: skb that was received
477 * @tun_key: tunnel (if any) that carried packet
478 *
479 * Must be called with rcu_read_lock. The packet cannot be shared and
480 * skb->data should point to the Ethernet header.
481 */
482 int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
483 const struct ip_tunnel_info *tun_info)
484 {
485 struct sw_flow_key key;
486 int error;
487
488 OVS_CB(skb)->input_vport = vport;
489 OVS_CB(skb)->mru = 0;
490 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
491 u32 mark;
492
493 mark = skb->mark;
494 skb_scrub_packet(skb, true);
495 skb->mark = mark;
496 tun_info = NULL;
497 }
498
499 ovs_skb_init_inner_protocol(skb);
500 skb_clear_ovs_gso_cb(skb);
501 /* Extract flow from 'skb' into 'key'. */
502 error = ovs_flow_key_extract(tun_info, skb, &key);
503 if (unlikely(error)) {
504 kfree_skb(skb);
505 return error;
506 }
507 ovs_dp_process_packet(skb, &key);
508 return 0;
509 }
510 EXPORT_SYMBOL_GPL(ovs_vport_receive);
511
512 static void free_vport_rcu(struct rcu_head *rcu)
513 {
514 struct vport *vport = container_of(rcu, struct vport, rcu);
515
516 ovs_vport_free(vport);
517 }
518
519 void ovs_vport_deferred_free(struct vport *vport)
520 {
521 if (!vport)
522 return;
523
524 call_rcu(&vport->rcu, free_vport_rcu);
525 }
526 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
527
528 static struct rtable *ovs_tunnel_route_lookup(struct net *net,
529 const struct ip_tunnel_key *key,
530 u32 mark,
531 struct flowi4 *fl,
532 u8 protocol)
533 {
534 struct rtable *rt;
535
536 memset(fl, 0, sizeof(*fl));
537 fl->daddr = key->u.ipv4.dst;
538 fl->saddr = key->u.ipv4.src;
539 fl->flowi4_tos = RT_TOS(key->tos);
540 fl->flowi4_mark = mark;
541 fl->flowi4_proto = protocol;
542
543 rt = ip_route_output_key(net, fl);
544 return rt;
545 }
546
547 int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
548 struct net *net,
549 struct sk_buff *skb,
550 u8 ipproto,
551 __be16 tp_src,
552 __be16 tp_dst)
553 {
554 struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
555 struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
556 const struct ip_tunnel_key *tun_key;
557 u32 skb_mark = skb->mark;
558 struct rtable *rt;
559 struct flowi4 fl;
560
561 if (unlikely(!tun_info))
562 return -EINVAL;
563 if (ip_tunnel_info_af(tun_info) != AF_INET)
564 return -EINVAL;
565
566 tun_key = &tun_info->key;
567
568 /* Route lookup to get srouce IP address.
569 * The process may need to be changed if the corresponding process
570 * in vports ops changed.
571 */
572 rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
573 if (IS_ERR(rt))
574 return PTR_ERR(rt);
575
576 ip_rt_put(rt);
577
578 /* Generate egress_tun_info based on tun_info,
579 * saddr, tp_src and tp_dst
580 */
581 ip_tunnel_key_init(&egress_tun_info->key,
582 fl.saddr, tun_key->u.ipv4.dst,
583 tun_key->tos,
584 tun_key->ttl,
585 tp_src, tp_dst,
586 tun_key->tun_id,
587 tun_key->tun_flags);
588 egress_tun_info->options_len = tun_info->options_len;
589 egress_tun_info->mode = tun_info->mode;
590 upcall->egress_tun_opts = ip_tunnel_info_opts(tun_info);
591 return 0;
592 }
593 EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
594
595 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
596 struct dp_upcall_info *upcall)
597 {
598 /* get_egress_tun_info() is only implemented on tunnel ports. */
599 if (unlikely(!vport->ops->get_egress_tun_info))
600 return -EINVAL;
601
602 return vport->ops->get_egress_tun_info(vport, skb, upcall);
603 }
604
605 static unsigned int packet_length(const struct sk_buff *skb)
606 {
607 unsigned int length = skb->len - ETH_HLEN;
608
609 if (skb->protocol == htons(ETH_P_8021Q))
610 length -= VLAN_HLEN;
611
612 return length;
613 }
614
615 void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
616 {
617 int mtu = vport->dev->mtu;
618
619 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
620 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
621 vport->dev->name,
622 packet_length(skb), mtu);
623 vport->dev->stats.tx_errors++;
624 goto drop;
625 }
626
627 skb->dev = vport->dev;
628 vport->ops->send(skb);
629 return;
630
631 drop:
632 kfree_skb(skb);
633 }