]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport.c
datapath: pass mac_proto to ovs_vport_send
[mirror_ovs.git] / datapath / vport.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include <linux/etherdevice.h>
20 #include <linux/if.h>
21 #include <linux/if_vlan.h>
22 #include <linux/jhash.h>
23 #include <linux/kernel.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/compat.h>
30 #include <linux/module.h>
31 #include <linux/if_link.h>
32 #include <net/net_namespace.h>
33 #include <net/lisp.h>
34 #include <net/gre.h>
35 #include <net/geneve.h>
36 #include <net/stt.h>
37 #include <net/vxlan.h>
38
39 #include "datapath.h"
40 #include "gso.h"
41 #include "vport.h"
42 #include "vport-internal_dev.h"
43
44 static LIST_HEAD(vport_ops_list);
45
46 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
47 static struct hlist_head *dev_table;
48 #define VPORT_HASH_BUCKETS 1024
49
50 /**
51 * ovs_vport_init - initialize vport subsystem
52 *
53 * Called at module load time to initialize the vport subsystem.
54 */
55 int ovs_vport_init(void)
56 {
57 int err;
58
59 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
60 GFP_KERNEL);
61 if (!dev_table)
62 return -ENOMEM;
63
64 err = lisp_init_module();
65 if (err)
66 goto err_lisp;
67 err = ipgre_init();
68 if (err)
69 goto err_gre;
70 err = geneve_init_module();
71 if (err)
72 goto err_geneve;
73
74 err = vxlan_init_module();
75 if (err)
76 goto err_vxlan;
77 err = ovs_stt_init_module();
78 if (err)
79 goto err_stt;
80 return 0;
81
82 err_stt:
83 vxlan_cleanup_module();
84 err_vxlan:
85 geneve_cleanup_module();
86 err_geneve:
87 ipgre_fini();
88 err_gre:
89 lisp_cleanup_module();
90 err_lisp:
91 kfree(dev_table);
92 return err;
93 }
94
95 /**
96 * ovs_vport_exit - shutdown vport subsystem
97 *
98 * Called at module exit time to shutdown the vport subsystem.
99 */
100 void ovs_vport_exit(void)
101 {
102 ovs_stt_cleanup_module();
103 vxlan_cleanup_module();
104 geneve_cleanup_module();
105 ipgre_fini();
106 lisp_cleanup_module();
107 kfree(dev_table);
108 }
109
110 static struct hlist_head *hash_bucket(const struct net *net, const char *name)
111 {
112 unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
113 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
114 }
115
116 int __ovs_vport_ops_register(struct vport_ops *ops)
117 {
118 int err = -EEXIST;
119 struct vport_ops *o;
120
121 ovs_lock();
122 list_for_each_entry(o, &vport_ops_list, list)
123 if (ops->type == o->type)
124 goto errout;
125
126 list_add_tail(&ops->list, &vport_ops_list);
127 err = 0;
128 errout:
129 ovs_unlock();
130 return err;
131 }
132 EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
133
134 void ovs_vport_ops_unregister(struct vport_ops *ops)
135 {
136 ovs_lock();
137 list_del(&ops->list);
138 ovs_unlock();
139 }
140 EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
141
142 /**
143 * ovs_vport_locate - find a port that has already been created
144 *
145 * @name: name of port to find
146 *
147 * Must be called with ovs or RCU read lock.
148 */
149 struct vport *ovs_vport_locate(const struct net *net, const char *name)
150 {
151 struct hlist_head *bucket = hash_bucket(net, name);
152 struct vport *vport;
153
154 hlist_for_each_entry_rcu(vport, bucket, hash_node)
155 if (!strcmp(name, ovs_vport_name(vport)) &&
156 net_eq(ovs_dp_get_net(vport->dp), net))
157 return vport;
158
159 return NULL;
160 }
161
162 /**
163 * ovs_vport_alloc - allocate and initialize new vport
164 *
165 * @priv_size: Size of private data area to allocate.
166 * @ops: vport device ops
167 *
168 * Allocate and initialize a new vport defined by @ops. The vport will contain
169 * a private data area of size @priv_size that can be accessed using
170 * vport_priv(). vports that are no longer needed should be released with
171 * vport_free().
172 */
173 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
174 const struct vport_parms *parms)
175 {
176 struct vport *vport;
177 size_t alloc_size;
178
179 alloc_size = sizeof(struct vport);
180 if (priv_size) {
181 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
182 alloc_size += priv_size;
183 }
184
185 vport = kzalloc(alloc_size, GFP_KERNEL);
186 if (!vport)
187 return ERR_PTR(-ENOMEM);
188
189 vport->dp = parms->dp;
190 vport->port_no = parms->port_no;
191 vport->ops = ops;
192 INIT_HLIST_NODE(&vport->dp_hash_node);
193
194 if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
195 kfree(vport);
196 return ERR_PTR(-EINVAL);
197 }
198
199 return vport;
200 }
201 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
202
203 /**
204 * ovs_vport_free - uninitialize and free vport
205 *
206 * @vport: vport to free
207 *
208 * Frees a vport allocated with vport_alloc() when it is no longer needed.
209 *
210 * The caller must ensure that an RCU grace period has passed since the last
211 * time @vport was in a datapath.
212 */
213 void ovs_vport_free(struct vport *vport)
214 {
215 /* vport is freed from RCU callback or error path, Therefore
216 * it is safe to use raw dereference.
217 */
218 kfree(rcu_dereference_raw(vport->upcall_portids));
219 kfree(vport);
220 }
221 EXPORT_SYMBOL_GPL(ovs_vport_free);
222
223 static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
224 {
225 struct vport_ops *ops;
226
227 list_for_each_entry(ops, &vport_ops_list, list)
228 if (ops->type == parms->type)
229 return ops;
230
231 return NULL;
232 }
233
234 /**
235 * ovs_vport_add - add vport device (for kernel callers)
236 *
237 * @parms: Information about new vport.
238 *
239 * Creates a new vport with the specified configuration (which is dependent on
240 * device type). ovs_mutex must be held.
241 */
242 struct vport *ovs_vport_add(const struct vport_parms *parms)
243 {
244 struct vport_ops *ops;
245 struct vport *vport;
246
247 ops = ovs_vport_lookup(parms);
248 if (ops) {
249 struct hlist_head *bucket;
250
251 if (!try_module_get(ops->owner))
252 return ERR_PTR(-EAFNOSUPPORT);
253
254 vport = ops->create(parms);
255 if (IS_ERR(vport)) {
256 module_put(ops->owner);
257 return vport;
258 }
259
260 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
261 ovs_vport_name(vport));
262 hlist_add_head_rcu(&vport->hash_node, bucket);
263 return vport;
264 }
265
266 /* Unlock to attempt module load and return -EAGAIN if load
267 * was successful as we need to restart the port addition
268 * workflow.
269 */
270 ovs_unlock();
271 request_module("vport-type-%d", parms->type);
272 ovs_lock();
273
274 if (!ovs_vport_lookup(parms))
275 return ERR_PTR(-EAFNOSUPPORT);
276 else
277 return ERR_PTR(-EAGAIN);
278 }
279
280 /**
281 * ovs_vport_set_options - modify existing vport device (for kernel callers)
282 *
283 * @vport: vport to modify.
284 * @options: New configuration.
285 *
286 * Modifies an existing device with the specified configuration (which is
287 * dependent on device type). ovs_mutex must be held.
288 */
289 int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
290 {
291 if (!vport->ops->set_options)
292 return -EOPNOTSUPP;
293 return vport->ops->set_options(vport, options);
294 }
295
296 /**
297 * ovs_vport_del - delete existing vport device
298 *
299 * @vport: vport to delete.
300 *
301 * Detaches @vport from its datapath and destroys it. ovs_mutex must be
302 * held.
303 */
304 void ovs_vport_del(struct vport *vport)
305 {
306 ASSERT_OVSL();
307
308 hlist_del_rcu(&vport->hash_node);
309 module_put(vport->ops->owner);
310 vport->ops->destroy(vport);
311 }
312
313 /**
314 * ovs_vport_get_stats - retrieve device stats
315 *
316 * @vport: vport from which to retrieve the stats
317 * @stats: location to store stats
318 *
319 * Retrieves transmit, receive, and error stats for the given device.
320 *
321 * Must be called with ovs_mutex or rcu_read_lock.
322 */
323 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
324 {
325 const struct rtnl_link_stats64 *dev_stats;
326 struct rtnl_link_stats64 temp;
327
328 dev_stats = dev_get_stats(vport->dev, &temp);
329 stats->rx_errors = dev_stats->rx_errors;
330 stats->tx_errors = dev_stats->tx_errors;
331 stats->tx_dropped = dev_stats->tx_dropped;
332 stats->rx_dropped = dev_stats->rx_dropped;
333
334 stats->rx_bytes = dev_stats->rx_bytes;
335 stats->rx_packets = dev_stats->rx_packets;
336 stats->tx_bytes = dev_stats->tx_bytes;
337 stats->tx_packets = dev_stats->tx_packets;
338 }
339
340 /**
341 * ovs_vport_get_options - retrieve device options
342 *
343 * @vport: vport from which to retrieve the options.
344 * @skb: sk_buff where options should be appended.
345 *
346 * Retrieves the configuration of the given device, appending an
347 * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
348 * vport-specific attributes to @skb.
349 *
350 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
351 * negative error code if a real error occurred. If an error occurs, @skb is
352 * left unmodified.
353 *
354 * Must be called with ovs_mutex or rcu_read_lock.
355 */
356 int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
357 {
358 struct nlattr *nla;
359 int err;
360
361 if (!vport->ops->get_options)
362 return 0;
363
364 nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
365 if (!nla)
366 return -EMSGSIZE;
367
368 err = vport->ops->get_options(vport, skb);
369 if (err) {
370 nla_nest_cancel(skb, nla);
371 return err;
372 }
373
374 nla_nest_end(skb, nla);
375 return 0;
376 }
377
378 /**
379 * ovs_vport_set_upcall_portids - set upcall portids of @vport.
380 *
381 * @vport: vport to modify.
382 * @ids: new configuration, an array of port ids.
383 *
384 * Sets the vport's upcall_portids to @ids.
385 *
386 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
387 * as an array of U32.
388 *
389 * Must be called with ovs_mutex.
390 */
391 int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
392 {
393 struct vport_portids *old, *vport_portids;
394
395 if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
396 return -EINVAL;
397
398 old = ovsl_dereference(vport->upcall_portids);
399
400 vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
401 GFP_KERNEL);
402 if (!vport_portids)
403 return -ENOMEM;
404
405 vport_portids->n_ids = nla_len(ids) / sizeof(u32);
406 vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
407 nla_memcpy(vport_portids->ids, ids, nla_len(ids));
408
409 rcu_assign_pointer(vport->upcall_portids, vport_portids);
410
411 if (old)
412 kfree_rcu(old, rcu);
413 return 0;
414 }
415
416 /**
417 * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
418 *
419 * @vport: vport from which to retrieve the portids.
420 * @skb: sk_buff where portids should be appended.
421 *
422 * Retrieves the configuration of the given vport, appending the
423 * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
424 * portids to @skb.
425 *
426 * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
427 * If an error occurs, @skb is left unmodified. Must be called with
428 * ovs_mutex or rcu_read_lock.
429 */
430 int ovs_vport_get_upcall_portids(const struct vport *vport,
431 struct sk_buff *skb)
432 {
433 struct vport_portids *ids;
434
435 ids = rcu_dereference_ovsl(vport->upcall_portids);
436
437 if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
438 return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
439 ids->n_ids * sizeof(u32), (void *)ids->ids);
440 else
441 return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
442 }
443
444 /**
445 * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
446 *
447 * @vport: vport from which the missed packet is received.
448 * @skb: skb that the missed packet was received.
449 *
450 * Uses the skb_get_hash() to select the upcall portid to send the
451 * upcall.
452 *
453 * Returns the portid of the target socket. Must be called with rcu_read_lock.
454 */
455 u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
456 {
457 struct vport_portids *ids;
458 u32 ids_index;
459 u32 hash;
460
461 ids = rcu_dereference(vport->upcall_portids);
462
463 if (ids->n_ids == 1 && ids->ids[0] == 0)
464 return 0;
465
466 hash = skb_get_hash(skb);
467 ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
468 return ids->ids[ids_index];
469 }
470
471 /**
472 * ovs_vport_receive - pass up received packet to the datapath for processing
473 *
474 * @vport: vport that received the packet
475 * @skb: skb that was received
476 * @tun_key: tunnel (if any) that carried packet
477 *
478 * Must be called with rcu_read_lock. The packet cannot be shared and
479 * skb->data should point to the Ethernet header.
480 */
481 int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
482 const struct ip_tunnel_info *tun_info)
483 {
484 struct sw_flow_key key;
485 int error;
486
487 OVS_CB(skb)->input_vport = vport;
488 OVS_CB(skb)->mru = 0;
489 OVS_CB(skb)->cutlen = 0;
490 if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
491 u32 mark;
492
493 mark = skb->mark;
494 skb_scrub_packet(skb, true);
495 skb->mark = mark;
496 tun_info = NULL;
497 }
498
499 ovs_skb_init_inner_protocol(skb);
500 skb_clear_ovs_gso_cb(skb);
501 /* Extract flow from 'skb' into 'key'. */
502 error = ovs_flow_key_extract(tun_info, skb, &key);
503 if (unlikely(error)) {
504 kfree_skb(skb);
505 return error;
506 }
507 ovs_dp_process_packet(skb, &key);
508 return 0;
509 }
510
511 static unsigned int packet_length(const struct sk_buff *skb,
512 struct net_device *dev)
513 {
514 unsigned int length = skb->len - dev->hard_header_len;
515
516 if (!skb_vlan_tag_present(skb) &&
517 eth_type_vlan(skb->protocol))
518 length -= VLAN_HLEN;
519
520 /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
521 * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
522 * account for 802.1ad. e.g. is_skb_forwardable().
523 */
524
525 return length;
526 }
527
528 void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
529 {
530 int mtu = vport->dev->mtu;
531
532 if (unlikely(packet_length(skb, vport->dev) > mtu &&
533 !skb_is_gso(skb))) {
534 net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
535 vport->dev->name,
536 packet_length(skb, vport->dev), mtu);
537 vport->dev->stats.tx_errors++;
538 goto drop;
539 }
540
541 skb->dev = vport->dev;
542 vport->ops->send(skb);
543 return;
544
545 drop:
546 kfree_skb(skb);
547 }