]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/vport.c
datapath: Add generic virtual port layer.
[mirror_ovs.git] / datapath / vport.c
1 /*
2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 #include <linux/dcache.h>
10 #include <linux/etherdevice.h>
11 #include <linux/if.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/percpu.h>
16 #include <linux/rtnetlink.h>
17
18 #include "vport.h"
19
20 extern struct vport_ops netdev_vport_ops;
21 extern struct vport_ops internal_vport_ops;
22 extern struct vport_ops gre_vport_ops;
23
24 static struct vport_ops *base_vport_ops_list[] = {
25 &netdev_vport_ops,
26 &internal_vport_ops,
27 };
28
29 static const struct vport_ops **vport_ops_list;
30 static int n_vport_types;
31
32 static struct hlist_head *dev_table;
33 #define VPORT_HASH_BUCKETS 1024
34
35 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
36 *
37 * If you use vport_locate and then perform some operations, you need to hold
38 * one of these locks if you don't want the vport to be deleted out from under
39 * you.
40 *
41 * If you get a reference to a vport through a dp_port, it is protected
42 * by RCU and you need to hold rcu_read_lock instead when reading.
43 *
44 * If multiple locks are taken, the hierarchy is:
45 * 1. RTNL
46 * 2. DP
47 * 3. vport
48 */
49 static DEFINE_MUTEX(vport_mutex);
50
51 /**
52 * vport_lock - acquire vport lock
53 *
54 * Acquire global vport lock. See above comment about locking requirements
55 * and specific function definitions. May sleep.
56 */
57 void
58 vport_lock(void)
59 {
60 mutex_lock(&vport_mutex);
61 }
62
63 /**
64 * vport_unlock - release vport lock
65 *
66 * Release lock acquired with vport_lock.
67 */
68 void
69 vport_unlock(void)
70 {
71 mutex_unlock(&vport_mutex);
72 }
73
74 #define ASSERT_VPORT() do { \
75 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
76 printk(KERN_ERR "openvswitch: vport lock not held at %s (%d)\n", \
77 __FILE__, __LINE__); \
78 dump_stack(); \
79 } \
80 } while(0)
81
82 /**
83 * vport_init - initialize vport subsystem
84 *
85 * Called at module load time to initialize the vport subsystem and any
86 * compiled in vport types.
87 */
88 int
89 vport_init(void)
90 {
91 int err;
92 int i;
93
94 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
95 GFP_KERNEL);
96 if (!dev_table) {
97 err = -ENOMEM;
98 goto error;
99 }
100
101 vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
102 sizeof(struct vport_ops *), GFP_KERNEL);
103 if (!vport_ops_list) {
104 err = -ENOMEM;
105 goto error_dev_table;
106 }
107
108 for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
109 struct vport_ops *new_ops = base_vport_ops_list[i];
110
111 if (new_ops->get_stats && new_ops->flags & VPORT_F_GEN_STATS) {
112 printk(KERN_INFO "openvswitch: both get_stats() and VPORT_F_GEN_STATS defined on vport %s, dropping VPORT_F_GEN_STATS\n", new_ops->type);
113 new_ops->flags &= ~VPORT_F_GEN_STATS;
114 }
115
116 if (new_ops->init)
117 err = new_ops->init();
118 else
119 err = 0;
120
121 if (!err)
122 vport_ops_list[n_vport_types++] = new_ops;
123 else if (new_ops->flags & VPORT_F_REQUIRED) {
124 vport_exit();
125 goto error;
126 }
127 }
128
129 return 0;
130
131 error_dev_table:
132 kfree(dev_table);
133 error:
134 return err;
135 }
136
137 static void
138 vport_del_all(void)
139 {
140 int i;
141
142 rtnl_lock();
143 vport_lock();
144
145 for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
146 struct hlist_head *bucket = &dev_table[i];
147 struct vport *vport;
148 struct hlist_node *node, *next;
149
150 hlist_for_each_entry_safe(vport, node, next, bucket, hash_node)
151 __vport_del(vport);
152 }
153
154 vport_unlock();
155 rtnl_unlock();
156 }
157
158 /**
159 * vport_exit - shutdown vport subsystem
160 *
161 * Called at module exit time to shutdown the vport subsystem and any
162 * initialized vport types.
163 */
164 void
165 vport_exit(void)
166 {
167 int i;
168
169 vport_del_all();
170
171 for (i = 0; i < n_vport_types; i++) {
172 if (vport_ops_list[i]->exit)
173 vport_ops_list[i]->exit();
174 }
175
176 kfree(vport_ops_list);
177 kfree(dev_table);
178 }
179
180 /**
181 * vport_add - add vport device (for userspace callers)
182 *
183 * @uvport_config: New port configuration.
184 *
185 * Creates a new vport with the specified configuration (which is dependent
186 * on device type). This function is for userspace callers and assumes no
187 * locks are held.
188 */
189 int
190 vport_add(const struct odp_vport_add __user *uvport_config)
191 {
192 struct odp_vport_add vport_config;
193 struct vport *vport;
194 int err = 0;
195
196 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_add)))
197 return -EFAULT;
198
199 vport_config.port_type[VPORT_TYPE_SIZE - 1] = '\0';
200 vport_config.devname[IFNAMSIZ - 1] = '\0';
201
202 rtnl_lock();
203
204 vport = vport_locate(vport_config.devname);
205 if (vport) {
206 err = -EEXIST;
207 goto out;
208 }
209
210 vport_lock();
211 vport = __vport_add(vport_config.devname, vport_config.port_type,
212 vport_config.config);
213 vport_unlock();
214
215 if (IS_ERR(vport))
216 err = PTR_ERR(vport);
217
218 out:
219 rtnl_unlock();
220 return err;
221 }
222
223 /**
224 * vport_mod - modify existing vport device (for userspace callers)
225 *
226 * @uvport_config: New configuration for vport
227 *
228 * Modifies an existing device with the specified configuration (which is
229 * dependent on device type). This function is for userspace callers and
230 * assumes no locks are held.
231 */
232 int
233 vport_mod(const struct odp_vport_mod __user *uvport_config)
234 {
235 struct odp_vport_mod vport_config;
236 struct vport *vport;
237 int err;
238
239 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_mod)))
240 return -EFAULT;
241
242 vport_config.devname[IFNAMSIZ - 1] = '\0';
243
244 rtnl_lock();
245
246 vport = vport_locate(vport_config.devname);
247 if (!vport) {
248 err = -ENODEV;
249 goto out;
250 }
251
252 vport_lock();
253 err = __vport_mod(vport, vport_config.config);
254 vport_unlock();
255
256 out:
257 rtnl_unlock();
258 return err;
259 }
260
261 /**
262 * vport_del - delete existing vport device (for userspace callers)
263 *
264 * @udevname: Name of device to delete
265 *
266 * Deletes the specified device. Detaches the device from a datapath first
267 * if it is attached. Deleting the device will fail if it does not exist or it
268 * is the datapath local port. It is also possible to fail for less obvious
269 * reasons, such as lack of memory. This function is for userspace callers and
270 * assumes no locks are held.
271 */
272 int
273 vport_del(const char __user *udevname)
274 {
275 char devname[IFNAMSIZ];
276 struct vport *vport;
277 struct dp_port *dp_port;
278 int err = 0;
279
280 if (strncpy_from_user(devname, udevname, IFNAMSIZ - 1) < 0)
281 return -EFAULT;
282 devname[IFNAMSIZ - 1] = '\0';
283
284 rtnl_lock();
285
286 vport = vport_locate(devname);
287 if (!vport) {
288 err = -ENODEV;
289 goto out;
290 }
291
292 dp_port = vport_get_dp_port(vport);
293 if (dp_port) {
294 struct datapath *dp = dp_port->dp;
295
296 mutex_lock(&dp->mutex);
297
298 if (!strcmp(dp_name(dp), devname)) {
299 err = -EINVAL;
300 goto dp_port_out;
301 }
302
303 err = dp_detach_port(dp_port, 0);
304
305 dp_port_out:
306 mutex_unlock(&dp->mutex);
307
308 if (err)
309 goto out;
310 }
311
312 vport_lock();
313 err = __vport_del(vport);
314 vport_unlock();
315
316 out:
317 rtnl_unlock();
318 return err;
319 }
320
321 /**
322 * vport_stats_get - retrieve device stats (for userspace callers)
323 *
324 * @ustats_req: Stats request parameters.
325 *
326 * Retrieves transmit, receive, and error stats for the given device. This
327 * function is for userspace callers and assumes no locks are held.
328 */
329 int
330 vport_stats_get(struct odp_vport_stats_req __user *ustats_req)
331 {
332 struct odp_vport_stats_req stats_req;
333 struct vport *vport;
334 int err;
335
336 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
337 return -EFAULT;
338
339 stats_req.devname[IFNAMSIZ - 1] = '\0';
340
341 vport_lock();
342
343 vport = vport_locate(stats_req.devname);
344 if (!vport) {
345 err = -ENODEV;
346 goto out;
347 }
348
349 if (vport->ops->get_stats)
350 err = vport->ops->get_stats(vport, &stats_req.stats);
351 else if (vport->ops->flags & VPORT_F_GEN_STATS) {
352 int i;
353
354 memset(&stats_req.stats, 0, sizeof(struct odp_vport_stats));
355
356 for_each_possible_cpu(i) {
357 const struct vport_percpu_stats *percpu_stats;
358
359 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
360 stats_req.stats.rx_bytes += percpu_stats->rx_bytes;
361 stats_req.stats.rx_packets += percpu_stats->rx_packets;
362 stats_req.stats.tx_bytes += percpu_stats->tx_bytes;
363 stats_req.stats.tx_packets += percpu_stats->tx_packets;
364 }
365
366 spin_lock_bh(&vport->err_stats.lock);
367
368 stats_req.stats.rx_dropped = vport->err_stats.rx_dropped;
369 stats_req.stats.rx_errors = vport->err_stats.rx_errors
370 + vport->err_stats.rx_frame_err
371 + vport->err_stats.rx_over_err
372 + vport->err_stats.rx_crc_err;
373 stats_req.stats.rx_frame_err = vport->err_stats.rx_frame_err;
374 stats_req.stats.rx_over_err = vport->err_stats.rx_over_err;
375 stats_req.stats.rx_crc_err = vport->err_stats.rx_crc_err;
376 stats_req.stats.tx_dropped = vport->err_stats.tx_dropped;
377 stats_req.stats.tx_errors = vport->err_stats.tx_errors;
378 stats_req.stats.collisions = vport->err_stats.collisions;
379
380 spin_unlock_bh(&vport->err_stats.lock);
381
382 err = 0;
383 } else
384 err = -EOPNOTSUPP;
385
386 out:
387 vport_unlock();
388
389 if (!err)
390 if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
391 err = -EFAULT;
392
393 return err;
394 }
395
396 /**
397 * vport_ether_get - retrieve device Ethernet address (for userspace callers)
398 *
399 * @uvport_ether: Ethernet address request parameters.
400 *
401 * Retrieves the Ethernet address of the given device. This function is for
402 * userspace callers and assumes no locks are held.
403 */
404 int
405 vport_ether_get(struct odp_vport_ether __user *uvport_ether)
406 {
407 struct odp_vport_ether vport_ether;
408 struct vport *vport;
409 int err = 0;
410
411 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
412 return -EFAULT;
413
414 vport_ether.devname[IFNAMSIZ - 1] = '\0';
415
416 vport_lock();
417
418 vport = vport_locate(vport_ether.devname);
419 if (!vport) {
420 err = -ENODEV;
421 goto out;
422 }
423
424 memcpy(vport_ether.ether_addr, vport_get_addr(vport), ETH_ALEN);
425
426 out:
427 vport_unlock();
428
429 if (!err)
430 if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
431 err = -EFAULT;
432
433 return err;
434 }
435
436 /**
437 * vport_ether_set - set device Ethernet address (for userspace callers)
438 *
439 * @uvport_ether: Ethernet address request parameters.
440 *
441 * Sets the Ethernet address of the given device. Some devices may not support
442 * setting the Ethernet address, in which case the result will always be
443 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
444 * are held.
445 */
446 int
447 vport_ether_set(struct odp_vport_ether __user *uvport_ether)
448 {
449 struct odp_vport_ether vport_ether;
450 struct vport *vport;
451 int err;
452
453 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
454 return -EFAULT;
455
456 vport_ether.devname[IFNAMSIZ - 1] = '\0';
457
458 rtnl_lock();
459 vport_lock();
460
461 vport = vport_locate(vport_ether.devname);
462 if (!vport) {
463 err = -ENODEV;
464 goto out;
465 }
466
467 err = vport_set_addr(vport, vport_ether.ether_addr);
468
469 out:
470 vport_unlock();
471 rtnl_unlock();
472 return err;
473 }
474
475 /**
476 * vport_mut_get - retrieve device MTU (for userspace callers)
477 *
478 * @uvport_mtu: MTU request parameters.
479 *
480 * Retrieves the MTU of the given device. This function is for userspace
481 * callers and assumes no locks are held.
482 */
483 int
484 vport_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
485 {
486 struct odp_vport_mtu vport_mtu;
487 struct vport *vport;
488 int err = 0;
489
490 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
491 return -EFAULT;
492
493 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
494
495 vport_lock();
496
497 vport = vport_locate(vport_mtu.devname);
498 if (!vport) {
499 err = -ENODEV;
500 goto out;
501 }
502
503 vport_mtu.mtu = vport_get_mtu(vport);
504
505 out:
506 vport_unlock();
507
508 if (!err)
509 if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
510 err = -EFAULT;
511
512 return err;
513 }
514
515 /**
516 * vport_mtu_set - set device MTU (for userspace callers)
517 *
518 * @uvport_mtu: MTU request parameters.
519 *
520 * Sets the MTU of the given device. Some devices may not support setting the
521 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
522 * for userspace callers and assumes no locks are held.
523 */
524 int
525 vport_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
526 {
527 struct odp_vport_mtu vport_mtu;
528 struct vport *vport;
529 int err;
530
531 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
532 return -EFAULT;
533
534 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
535
536 rtnl_lock();
537 vport_lock();
538
539 vport = vport_locate(vport_mtu.devname);
540 if (!vport) {
541 err = -ENODEV;
542 goto out;
543 }
544
545 err = vport_set_mtu(vport, vport_mtu.mtu);
546
547 out:
548 vport_unlock();
549 rtnl_unlock();
550 return err;
551 }
552
553 static struct hlist_head *
554 hash_bucket(const char *name)
555 {
556 unsigned int hash = full_name_hash(name, strlen(name));
557 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
558 }
559
560 /**
561 * vport_locate - find a port that has already been created
562 *
563 * @name: name of port to find
564 *
565 * Either RTNL or vport lock must be acquired before calling this function
566 * and held while using the found port. See the locking comments at the
567 * top of the file.
568 */
569 struct vport *
570 vport_locate(const char *name)
571 {
572 struct hlist_head *bucket = hash_bucket(name);
573 struct vport *vport;
574 struct hlist_node *node;
575
576 if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
577 printk(KERN_ERR "openvswitch: neither RTNL nor vport lock held in vport_locate\n");
578 dump_stack();
579 }
580
581 hlist_for_each_entry(vport, node, bucket, hash_node)
582 if (!strcmp(name, vport_get_name(vport)))
583 return vport;
584
585 return NULL;
586 }
587
588 static void
589 register_vport(struct vport *vport)
590 {
591 hlist_add_head(&vport->hash_node, hash_bucket(vport_get_name(vport)));
592 }
593
594 static void
595 unregister_vport(struct vport *vport)
596 {
597 hlist_del(&vport->hash_node);
598 }
599
600 /**
601 * vport_alloc - allocate and initialize new vport
602 *
603 * @priv_size: Size of private data area to allocate.
604 * @ops: vport device ops
605 *
606 * Allocate and initialize a new vport defined by @ops. The vport will contain
607 * a private data area of size @priv_size that can be accessed using
608 * vport_priv(). vports that are no longer needed should be released with
609 * vport_free().
610 */
611 struct vport *
612 vport_alloc(int priv_size, const struct vport_ops *ops)
613 {
614 struct vport *vport;
615 size_t alloc_size;
616
617 alloc_size = sizeof(struct vport);
618 if (priv_size) {
619 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
620 alloc_size += priv_size;
621 }
622
623 vport = kzalloc(alloc_size, GFP_KERNEL);
624 if (!vport)
625 return ERR_PTR(-ENOMEM);
626
627 vport->ops = ops;
628
629 if (vport->ops->flags & VPORT_F_GEN_STATS) {
630 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
631 if (!vport->percpu_stats)
632 return ERR_PTR(-ENOMEM);
633
634 spin_lock_init(&vport->err_stats.lock);
635 }
636
637 return vport;
638 }
639
640 /**
641 * vport_free - uninitialize and free vport
642 *
643 * @vport: vport to free
644 *
645 * Frees a vport allocated with vport_alloc() when it is no longer needed.
646 */
647 void
648 vport_free(struct vport *vport)
649 {
650 if (vport->ops->flags & VPORT_F_GEN_STATS)
651 free_percpu(vport->percpu_stats);
652
653 kfree(vport);
654 }
655
656 /**
657 * __vport_add - add vport device (for kernel callers)
658 *
659 * @name: Name of new device.
660 * @type: Type of new device (to be matched against types in registered vport
661 * ops).
662 * @config: Device type specific configuration. Userspace pointer.
663 *
664 * Creates a new vport with the specified configuration (which is dependent
665 * on device type). Both RTNL and vport locks must be held.
666 */
667 struct vport *
668 __vport_add(const char *name, const char *type, const void __user *config)
669 {
670 struct vport *vport;
671 int err = 0;
672 int i;
673
674 ASSERT_RTNL();
675 ASSERT_VPORT();
676
677 for (i = 0; i < n_vport_types; i++) {
678 if (!strcmp(vport_ops_list[i]->type, type)) {
679 vport = vport_ops_list[i]->create(name, config);
680 if (IS_ERR(vport)) {
681 err = PTR_ERR(vport);
682 goto out;
683 }
684
685 register_vport(vport);
686 return vport;
687 }
688 }
689
690 err = -EAFNOSUPPORT;
691
692 out:
693 return ERR_PTR(err);
694 }
695
696 /**
697 * __vport_mod - modify existing vport device (for kernel callers)
698 *
699 * @vport: vport to modify.
700 * @config: Device type specific configuration. Userspace pointer.
701 *
702 * Modifies an existing device with the specified configuration (which is
703 * dependent on device type). Both RTNL and vport locks must be held.
704 */
705 int
706 __vport_mod(struct vport *vport, const void __user *config)
707 {
708 ASSERT_RTNL();
709 ASSERT_VPORT();
710
711 if (vport->ops->modify)
712 return vport->ops->modify(vport, config);
713 else
714 return -EOPNOTSUPP;
715 }
716
717 /**
718 * __vport_del - delete existing vport device (for kernel callers)
719 *
720 * @vport: vport to delete.
721 *
722 * Deletes the specified device. The device must not be currently attached to
723 * a datapath. It is possible to fail for reasons such as lack of memory.
724 * Both RTNL and vport locks must be held.
725 */
726 int
727 __vport_del(struct vport *vport)
728 {
729 ASSERT_RTNL();
730 ASSERT_VPORT();
731 BUG_ON(vport_get_dp_port(vport));
732
733 unregister_vport(vport);
734
735 return vport->ops->destroy(vport);
736 }
737
738 /**
739 * vport_attach - attach a vport to a datapath
740 *
741 * @vport: vport to attach.
742 * @dp_port: Datapath port to attach the vport to.
743 *
744 * Attaches a vport to a specific datapath so that packets may be exchanged.
745 * Both ports must be currently unattached. @dp_port must be successfully
746 * attached to a vport before it is connected to a datapath and must not be
747 * modified while connected. RTNL lock and the appropriate DP mutex must be held.
748 */
749 int
750 vport_attach(struct vport *vport, struct dp_port *dp_port)
751 {
752 ASSERT_RTNL();
753
754 if (dp_port->vport)
755 return -EBUSY;
756
757 if (vport_get_dp_port(vport))
758 return -EBUSY;
759
760 if (vport->ops->attach) {
761 int err;
762
763 err = vport->ops->attach(vport);
764 if (err)
765 return err;
766 }
767
768 dp_port->vport = vport;
769 rcu_assign_pointer(vport->dp_port, dp_port);
770
771 return 0;
772 }
773
774 /**
775 * vport_detach - detach a vport from a datapath
776 *
777 * @vport: vport to detach.
778 *
779 * Detaches a vport from a datapath. May fail for a variety of reasons,
780 * including lack of memory. RTNL lock and the appropriate DP mutex must be held.
781 */
782 int
783 vport_detach(struct vport *vport)
784 {
785 struct dp_port *dp_port;
786
787 ASSERT_RTNL();
788
789 dp_port = vport_get_dp_port(vport);
790 if (!dp_port)
791 return -EINVAL;
792
793 dp_port->vport = NULL;
794 rcu_assign_pointer(vport->dp_port, NULL);
795
796 if (vport->ops->detach)
797 return vport->ops->detach(vport);
798 else
799 return 0;
800 }
801
802 /**
803 * vport_set_mtu - set device MTU (for kernel callers)
804 *
805 * @vport: vport on which to set MTU.
806 * @mtu: New MTU.
807 *
808 * Sets the MTU of the given device. Some devices may not support setting the
809 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
810 * be held.
811 */
812 int
813 vport_set_mtu(struct vport *vport, int mtu)
814 {
815 ASSERT_RTNL();
816
817 if (mtu < 68)
818 return -EINVAL;
819
820 if (vport->ops->set_mtu)
821 return vport->ops->set_mtu(vport, mtu);
822 else
823 return -EOPNOTSUPP;
824 }
825
826 /**
827 * vport_set_addr - set device Ethernet address (for kernel callers)
828 *
829 * @vport: vport on which to set Ethernet address.
830 * @addr: New address.
831 *
832 * Sets the Ethernet address of the given device. Some devices may not support
833 * setting the Ethernet address, in which case the result will always be
834 * -EOPNOTSUPP. RTNL lock must be held.
835 */
836 int
837 vport_set_addr(struct vport *vport, const unsigned char *addr)
838 {
839 ASSERT_RTNL();
840
841 if (!is_valid_ether_addr(addr))
842 return -EADDRNOTAVAIL;
843
844 if (vport->ops->set_addr)
845 return vport->ops->set_addr(vport, addr);
846 else
847 return -EOPNOTSUPP;
848 }
849
850 /**
851 * vport_get_name - retrieve device name
852 *
853 * @vport: vport from which to retrieve the name.
854 *
855 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
856 * must be held for the entire duration that the name is in use.
857 */
858 const char *
859 vport_get_name(const struct vport *vport)
860 {
861 return vport->ops->get_name(vport);
862 }
863
864 /**
865 * vport_get_type - retrieve device type
866 *
867 * @vport: vport from which to retrieve the type.
868 *
869 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
870 * must be held for the entire duration that the type is in use.
871 */
872 const char *
873 vport_get_type(const struct vport *vport)
874 {
875 return vport->ops->type;
876 }
877
878 /**
879 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
880 *
881 * @vport: vport from which to retrieve the Ethernet address.
882 *
883 * Retrieves the Ethernet address of the given device. Either RTNL lock or
884 * rcu_read_lock must be held for the entire duration that the Ethernet address
885 * is in use.
886 */
887 const unsigned char *
888 vport_get_addr(const struct vport *vport)
889 {
890 return vport->ops->get_addr(vport);
891 }
892
893 /**
894 * vport_get_dp_port - retrieve attached datapath port
895 *
896 * @vport: vport from which to retrieve the datapath port.
897 *
898 * Retrieves the attached datapath port or null if not attached. Either RTNL
899 * lock or rcu_read_lock must be held for the entire duration that the datapath
900 * port is being accessed.
901 */
902 struct dp_port *
903 vport_get_dp_port(const struct vport *vport)
904 {
905 return rcu_dereference(vport->dp_port);
906 }
907
908 /**
909 * vport_get_kobj - retrieve associated kobj
910 *
911 * @vport: vport from which to retrieve the associated kobj
912 *
913 * Retrieves the associated kobj or null if no kobj. The returned kobj is
914 * valid for as long as the vport exists.
915 */
916 struct kobject *
917 vport_get_kobj(const struct vport *vport)
918 {
919 if (vport->ops->get_kobj)
920 return vport->ops->get_kobj(vport);
921 else
922 return NULL;
923 }
924
925 /**
926 * vport_get_flags - retrieve device flags
927 *
928 * @vport: vport from which to retrieve the flags
929 *
930 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
931 * must be held.
932 */
933 unsigned
934 vport_get_flags(const struct vport *vport)
935 {
936 return vport->ops->get_dev_flags(vport);
937 }
938
939 /**
940 * vport_get_flags - check whether device is running
941 *
942 * @vport: vport on which to check status.
943 *
944 * Checks whether the given device is running. Either RTNL lock or
945 * rcu_read_lock must be held.
946 */
947 int
948 vport_is_running(const struct vport *vport)
949 {
950 return vport->ops->is_running(vport);
951 }
952
953 /**
954 * vport_get_flags - retrieve device operating state
955 *
956 * @vport: vport from which to check status
957 *
958 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
959 * rcu_read_lock must be held.
960 */
961 unsigned char
962 vport_get_operstate(const struct vport *vport)
963 {
964 return vport->ops->get_operstate(vport);
965 }
966
967 /**
968 * vport_get_ifindex - retrieve device system interface index
969 *
970 * @vport: vport from which to retrieve index
971 *
972 * Retrieves the system interface index of the given device. Not all devices
973 * will have system indexes, in which case the index of the datapath local
974 * port is returned. Returns a negative index on error. Either RTNL lock or
975 * rcu_read_lock must be held.
976 */
977 int
978 vport_get_ifindex(const struct vport *vport)
979 {
980 const struct dp_port *dp_port;
981
982 if (vport->ops->get_ifindex)
983 return vport->ops->get_ifindex(vport);
984
985 /* If we don't actually have an ifindex, use the local port's.
986 * Userspace doesn't check it anyways. */
987 dp_port = vport_get_dp_port(vport);
988 if (!dp_port)
989 return -EAGAIN;
990
991 return vport_get_ifindex(dp_port->dp->ports[ODPP_LOCAL]->vport);
992 }
993
994 /**
995 * vport_get_iflink - retrieve device system link index
996 *
997 * @vport: vport from which to retrieve index
998 *
999 * Retrieves the system link index of the given device. The link is the index
1000 * of the interface on which the packet will actually be sent. In most cases
1001 * this is the same as the ifindex but may be different for tunnel devices.
1002 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
1003 * be held.
1004 */
1005 int
1006 vport_get_iflink(const struct vport *vport)
1007 {
1008 if (vport->ops->get_iflink)
1009 return vport->ops->get_iflink(vport);
1010
1011 /* If we don't have an iflink, use the ifindex. In most cases they
1012 * are the same. */
1013 return vport_get_ifindex(vport);
1014 }
1015
1016 /**
1017 * vport_get_mtu - retrieve device MTU (for kernel callers)
1018 *
1019 * @vport: vport from which to retrieve MTU
1020 *
1021 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
1022 * must be held.
1023 */
1024 int
1025 vport_get_mtu(const struct vport *vport)
1026 {
1027 return vport->ops->get_mtu(vport);
1028 }
1029
1030 /**
1031 * vport_receive - pass up received packet to the datapath for processing
1032 *
1033 * @vport: vport that received the packet
1034 * @skb: skb that was received
1035 *
1036 * Must be called with rcu_read_lock and bottom halves disabled. The packet
1037 * cannot be shared and skb->data should point to the Ethernet header.
1038 */
1039 void
1040 vport_receive(struct vport *vport, struct sk_buff *skb)
1041 {
1042 struct dp_port *dp_port = vport_get_dp_port(vport);
1043
1044 if (!dp_port)
1045 return;
1046
1047 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1048 struct vport_percpu_stats *stats;
1049
1050 local_bh_disable();
1051
1052 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1053 stats->rx_packets++;
1054 stats->rx_bytes += skb->len;
1055
1056 local_bh_enable();
1057 }
1058
1059 if (!(vport->ops->flags & VPORT_F_TUN_ID))
1060 OVS_CB(skb)->tun_id = 0;
1061
1062 dp_process_received_packet(dp_port, skb);
1063 }
1064
1065 /**
1066 * vport_send - send a packet on a device
1067 *
1068 * @vport: vport on which to send the packet
1069 * @skb: skb to send
1070 *
1071 * Sends the given packet and returns the length of data sent. Either RTNL
1072 * lock or rcu_read_lock must be held.
1073 */
1074 int
1075 vport_send(struct vport *vport, struct sk_buff *skb)
1076 {
1077 int sent;
1078
1079 sent = vport->ops->send(vport, skb);
1080
1081 if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) {
1082 struct vport_percpu_stats *stats;
1083
1084 local_bh_disable();
1085
1086 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1087 stats->tx_packets++;
1088 stats->tx_bytes += sent;
1089
1090 local_bh_enable();
1091 }
1092
1093 return sent;
1094 }
1095
1096 /**
1097 * vport_record_error - indicate device error to generic stats layer
1098 *
1099 * @vport: vport that encountered the error
1100 * @err_type: one of enum vport_err_type types to indicate the error type
1101 *
1102 * If using the vport generic stats layer indicate that an error of the given
1103 * type has occured.
1104 */
1105 void
1106 vport_record_error(struct vport *vport, enum vport_err_type err_type)
1107 {
1108 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1109
1110 spin_lock_bh(&vport->err_stats.lock);
1111
1112 switch (err_type) {
1113 case VPORT_E_RX_DROPPED:
1114 vport->err_stats.rx_dropped++;
1115 break;
1116
1117 case VPORT_E_RX_ERROR:
1118 vport->err_stats.rx_errors++;
1119 break;
1120
1121 case VPORT_E_RX_FRAME:
1122 vport->err_stats.rx_frame_err++;
1123 break;
1124
1125 case VPORT_E_RX_OVER:
1126 vport->err_stats.rx_over_err++;
1127 break;
1128
1129 case VPORT_E_RX_CRC:
1130 vport->err_stats.rx_crc_err++;
1131 break;
1132
1133 case VPORT_E_TX_DROPPED:
1134 vport->err_stats.tx_dropped++;
1135 break;
1136
1137 case VPORT_E_TX_ERROR:
1138 vport->err_stats.tx_errors++;
1139 break;
1140
1141 case VPORT_E_COLLISION:
1142 vport->err_stats.collisions++;
1143 break;
1144 };
1145
1146 spin_unlock_bh(&vport->err_stats.lock);
1147 }
1148 }
1149
1150 /**
1151 * vport_gen_ether_addr - generate an Ethernet address
1152 *
1153 * @addr: location to store generated address
1154 *
1155 * Generates a random Ethernet address for use when creating a device that
1156 * has no natural address.
1157 */
1158 void
1159 vport_gen_ether_addr(u8 *addr)
1160 {
1161 random_ether_addr(addr);
1162
1163 /* Set the OUI to the Nicira one. */
1164 addr[0] = 0x00;
1165 addr[1] = 0x23;
1166 addr[2] = 0x20;
1167
1168 /* Set the top bit to indicate random address. */
1169 addr[3] |= 0x80;
1170 }