2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/dcache.h>
12 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/percpu.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/compat.h>
21 #include <linux/version.h>
24 #include "vport-internal_dev.h"
26 /* List of statically compiled vport implementations. Don't forget to also
27 * add yours to the list at the bottom of vport.h. */
28 static const struct vport_ops
*base_vport_ops_list
[] = {
33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
38 static const struct vport_ops
**vport_ops_list
;
39 static int n_vport_types
;
41 static struct hlist_head
*dev_table
;
42 #define VPORT_HASH_BUCKETS 1024
44 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
46 * If you use vport_locate and then perform some operations, you need to hold
47 * one of these locks if you don't want the vport to be deleted out from under
50 * If you get a reference to a vport through a datapath, it is protected
51 * by RCU and you need to hold rcu_read_lock instead when reading.
53 * If multiple locks are taken, the hierarchy is:
58 static DEFINE_MUTEX(vport_mutex
);
61 * vport_lock - acquire vport lock
63 * Acquire global vport lock. See above comment about locking requirements
64 * and specific function definitions. May sleep.
68 mutex_lock(&vport_mutex
);
72 * vport_unlock - release vport lock
74 * Release lock acquired with vport_lock.
76 void vport_unlock(void)
78 mutex_unlock(&vport_mutex
);
81 #define ASSERT_VPORT() \
83 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
84 pr_err("vport lock not held at %s (%d)\n", \
85 __FILE__, __LINE__); \
91 * vport_init - initialize vport subsystem
93 * Called at module load time to initialize the vport subsystem and any
94 * compiled in vport types.
101 dev_table
= kzalloc(VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
108 vport_ops_list
= kmalloc(ARRAY_SIZE(base_vport_ops_list
) *
109 sizeof(struct vport_ops
*), GFP_KERNEL
);
110 if (!vport_ops_list
) {
112 goto error_dev_table
;
115 for (i
= 0; i
< ARRAY_SIZE(base_vport_ops_list
); i
++) {
116 const struct vport_ops
*new_ops
= base_vport_ops_list
[i
];
119 err
= new_ops
->init();
124 vport_ops_list
[n_vport_types
++] = new_ops
;
125 else if (new_ops
->flags
& VPORT_F_REQUIRED
) {
139 static void vport_del_all(void)
146 for (i
= 0; i
< VPORT_HASH_BUCKETS
; i
++) {
147 struct hlist_head
*bucket
= &dev_table
[i
];
149 struct hlist_node
*node
, *next
;
151 hlist_for_each_entry_safe(vport
, node
, next
, bucket
, hash_node
)
160 * vport_exit - shutdown vport subsystem
162 * Called at module exit time to shutdown the vport subsystem and any
163 * initialized vport types.
165 void vport_exit(void)
171 for (i
= 0; i
< n_vport_types
; i
++) {
172 if (vport_ops_list
[i
]->exit
)
173 vport_ops_list
[i
]->exit();
176 kfree(vport_ops_list
);
181 * vport_user_mod - modify existing vport device (for userspace callers)
183 * @uport: New configuration for vport
185 * Modifies an existing device with the specified configuration (which is
186 * dependent on device type). This function is for userspace callers and
187 * assumes no locks are held.
189 int vport_user_mod(const struct odp_port __user
*uport
)
191 struct odp_port port
;
195 if (copy_from_user(&port
, uport
, sizeof(port
)))
198 port
.devname
[IFNAMSIZ
- 1] = '\0';
202 vport
= vport_locate(port
.devname
);
209 err
= vport_mod(vport
, &port
);
218 * vport_user_stats_get - retrieve device stats (for userspace callers)
220 * @ustats_req: Stats request parameters.
222 * Retrieves transmit, receive, and error stats for the given device. This
223 * function is for userspace callers and assumes no locks are held.
225 int vport_user_stats_get(struct odp_vport_stats_req __user
*ustats_req
)
227 struct odp_vport_stats_req stats_req
;
231 if (copy_from_user(&stats_req
, ustats_req
, sizeof(struct odp_vport_stats_req
)))
234 stats_req
.devname
[IFNAMSIZ
- 1] = '\0';
238 vport
= vport_locate(stats_req
.devname
);
244 err
= vport_get_stats(vport
, &stats_req
.stats
);
250 if (copy_to_user(ustats_req
, &stats_req
, sizeof(struct odp_vport_stats_req
)))
257 * vport_user_stats_set - sets offset device stats (for userspace callers)
259 * @ustats_req: Stats set parameters.
261 * Provides a set of transmit, receive, and error stats to be added as an
262 * offset to the collect data when stats are retreived. Some devices may not
263 * support setting the stats, in which case the result will always be
264 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
267 int vport_user_stats_set(struct odp_vport_stats_req __user
*ustats_req
)
269 struct odp_vport_stats_req stats_req
;
273 if (copy_from_user(&stats_req
, ustats_req
, sizeof(struct odp_vport_stats_req
)))
276 stats_req
.devname
[IFNAMSIZ
- 1] = '\0';
281 vport
= vport_locate(stats_req
.devname
);
287 err
= vport_set_stats(vport
, &stats_req
.stats
);
297 * vport_user_ether_get - retrieve device Ethernet address (for userspace callers)
299 * @uvport_ether: Ethernet address request parameters.
301 * Retrieves the Ethernet address of the given device. This function is for
302 * userspace callers and assumes no locks are held.
304 int vport_user_ether_get(struct odp_vport_ether __user
*uvport_ether
)
306 struct odp_vport_ether vport_ether
;
310 if (copy_from_user(&vport_ether
, uvport_ether
, sizeof(struct odp_vport_ether
)))
313 vport_ether
.devname
[IFNAMSIZ
- 1] = '\0';
317 vport
= vport_locate(vport_ether
.devname
);
324 memcpy(vport_ether
.ether_addr
, vport_get_addr(vport
), ETH_ALEN
);
331 if (copy_to_user(uvport_ether
, &vport_ether
, sizeof(struct odp_vport_ether
)))
338 * vport_user_ether_set - set device Ethernet address (for userspace callers)
340 * @uvport_ether: Ethernet address request parameters.
342 * Sets the Ethernet address of the given device. Some devices may not support
343 * setting the Ethernet address, in which case the result will always be
344 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
347 int vport_user_ether_set(struct odp_vport_ether __user
*uvport_ether
)
349 struct odp_vport_ether vport_ether
;
353 if (copy_from_user(&vport_ether
, uvport_ether
, sizeof(struct odp_vport_ether
)))
356 vport_ether
.devname
[IFNAMSIZ
- 1] = '\0';
361 vport
= vport_locate(vport_ether
.devname
);
367 err
= vport_set_addr(vport
, vport_ether
.ether_addr
);
376 * vport_user_mtu_get - retrieve device MTU (for userspace callers)
378 * @uvport_mtu: MTU request parameters.
380 * Retrieves the MTU of the given device. This function is for userspace
381 * callers and assumes no locks are held.
383 int vport_user_mtu_get(struct odp_vport_mtu __user
*uvport_mtu
)
385 struct odp_vport_mtu vport_mtu
;
389 if (copy_from_user(&vport_mtu
, uvport_mtu
, sizeof(struct odp_vport_mtu
)))
392 vport_mtu
.devname
[IFNAMSIZ
- 1] = '\0';
396 vport
= vport_locate(vport_mtu
.devname
);
402 vport_mtu
.mtu
= vport_get_mtu(vport
);
408 if (copy_to_user(uvport_mtu
, &vport_mtu
, sizeof(struct odp_vport_mtu
)))
415 * vport_user_mtu_set - set device MTU (for userspace callers)
417 * @uvport_mtu: MTU request parameters.
419 * Sets the MTU of the given device. Some devices may not support setting the
420 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
421 * for userspace callers and assumes no locks are held.
423 int vport_user_mtu_set(struct odp_vport_mtu __user
*uvport_mtu
)
425 struct odp_vport_mtu vport_mtu
;
429 if (copy_from_user(&vport_mtu
, uvport_mtu
, sizeof(struct odp_vport_mtu
)))
432 vport_mtu
.devname
[IFNAMSIZ
- 1] = '\0';
437 vport
= vport_locate(vport_mtu
.devname
);
443 err
= vport_set_mtu(vport
, vport_mtu
.mtu
);
451 static struct hlist_head
*hash_bucket(const char *name
)
453 unsigned int hash
= full_name_hash(name
, strlen(name
));
454 return &dev_table
[hash
& (VPORT_HASH_BUCKETS
- 1)];
458 * vport_locate - find a port that has already been created
460 * @name: name of port to find
462 * Either RTNL or vport lock must be acquired before calling this function
463 * and held while using the found port. See the locking comments at the
466 struct vport
*vport_locate(const char *name
)
468 struct hlist_head
*bucket
= hash_bucket(name
);
470 struct hlist_node
*node
;
472 if (unlikely(!mutex_is_locked(&vport_mutex
) && !rtnl_is_locked())) {
473 pr_err("neither RTNL nor vport lock held in vport_locate\n");
479 hlist_for_each_entry(vport
, node
, bucket
, hash_node
)
480 if (!strcmp(name
, vport_get_name(vport
)))
490 static void register_vport(struct vport
*vport
)
492 hlist_add_head(&vport
->hash_node
, hash_bucket(vport_get_name(vport
)));
495 static void unregister_vport(struct vport
*vport
)
497 hlist_del(&vport
->hash_node
);
500 static void release_vport(struct kobject
*kobj
)
502 struct vport
*p
= container_of(kobj
, struct vport
, kobj
);
506 static struct kobj_type brport_ktype
= {
508 .sysfs_ops
= &brport_sysfs_ops
,
510 .release
= release_vport
514 * vport_alloc - allocate and initialize new vport
516 * @priv_size: Size of private data area to allocate.
517 * @ops: vport device ops
519 * Allocate and initialize a new vport defined by @ops. The vport will contain
520 * a private data area of size @priv_size that can be accessed using
521 * vport_priv(). vports that are no longer needed should be released with
524 struct vport
*vport_alloc(int priv_size
, const struct vport_ops
*ops
, const struct vport_parms
*parms
)
529 alloc_size
= sizeof(struct vport
);
531 alloc_size
= ALIGN(alloc_size
, VPORT_ALIGN
);
532 alloc_size
+= priv_size
;
535 vport
= kzalloc(alloc_size
, GFP_KERNEL
);
537 return ERR_PTR(-ENOMEM
);
539 vport
->dp
= parms
->dp
;
540 vport
->port_no
= parms
->port_no
;
541 atomic_set(&vport
->sflow_pool
, 0);
544 /* Initialize kobject for bridge. This will be added as
545 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
546 vport
->kobj
.kset
= NULL
;
547 kobject_init(&vport
->kobj
, &brport_ktype
);
549 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
) {
550 vport
->percpu_stats
= alloc_percpu(struct vport_percpu_stats
);
551 if (!vport
->percpu_stats
)
552 return ERR_PTR(-ENOMEM
);
554 spin_lock_init(&vport
->stats_lock
);
561 * vport_free - uninitialize and free vport
563 * @vport: vport to free
565 * Frees a vport allocated with vport_alloc() when it is no longer needed.
567 void vport_free(struct vport
*vport
)
569 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
)
570 free_percpu(vport
->percpu_stats
);
572 kobject_put(&vport
->kobj
);
576 * vport_add - add vport device (for kernel callers)
578 * @parms: Information about new vport.
580 * Creates a new vport with the specified configuration (which is dependent on
581 * device type) and attaches it to a datapath. Both RTNL and vport locks must
584 struct vport
*vport_add(const struct vport_parms
*parms
)
593 for (i
= 0; i
< n_vport_types
; i
++) {
594 if (!strcmp(vport_ops_list
[i
]->type
, parms
->type
)) {
595 vport
= vport_ops_list
[i
]->create(parms
);
597 err
= PTR_ERR(vport
);
601 register_vport(vport
);
613 * vport_mod - modify existing vport device (for kernel callers)
615 * @vport: vport to modify.
616 * @port: New configuration.
618 * Modifies an existing device with the specified configuration (which is
619 * dependent on device type). Both RTNL and vport locks must be held.
621 int vport_mod(struct vport
*vport
, struct odp_port
*port
)
626 if (vport
->ops
->modify
)
627 return vport
->ops
->modify(vport
, port
);
633 * vport_del - delete existing vport device (for kernel callers)
635 * @vport: vport to delete.
637 * Detaches @vport from its datapath and destroys it. It is possible to fail
638 * for reasons such as lack of memory. Both RTNL and vport locks must be held.
640 int vport_del(struct vport
*vport
)
645 unregister_vport(vport
);
647 return vport
->ops
->destroy(vport
);
651 * vport_set_mtu - set device MTU (for kernel callers)
653 * @vport: vport on which to set MTU.
656 * Sets the MTU of the given device. Some devices may not support setting the
657 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
660 int vport_set_mtu(struct vport
*vport
, int mtu
)
667 if (vport
->ops
->set_mtu
) {
670 ret
= vport
->ops
->set_mtu(vport
, mtu
);
672 if (!ret
&& !is_internal_vport(vport
))
673 set_internal_devs_mtu(vport
->dp
);
681 * vport_set_addr - set device Ethernet address (for kernel callers)
683 * @vport: vport on which to set Ethernet address.
684 * @addr: New address.
686 * Sets the Ethernet address of the given device. Some devices may not support
687 * setting the Ethernet address, in which case the result will always be
688 * -EOPNOTSUPP. RTNL lock must be held.
690 int vport_set_addr(struct vport
*vport
, const unsigned char *addr
)
694 if (!is_valid_ether_addr(addr
))
695 return -EADDRNOTAVAIL
;
697 if (vport
->ops
->set_addr
)
698 return vport
->ops
->set_addr(vport
, addr
);
704 * vport_set_stats - sets offset device stats (for kernel callers)
706 * @vport: vport on which to set stats
707 * @stats: stats to set
709 * Provides a set of transmit, receive, and error stats to be added as an
710 * offset to the collect data when stats are retreived. Some devices may not
711 * support setting the stats, in which case the result will always be
712 * -EOPNOTSUPP. RTNL lock must be held.
714 int vport_set_stats(struct vport
*vport
, struct rtnl_link_stats64
*stats
)
718 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
) {
719 spin_lock_bh(&vport
->stats_lock
);
720 vport
->offset_stats
= *stats
;
721 spin_unlock_bh(&vport
->stats_lock
);
724 } else if (vport
->ops
->set_stats
)
725 return vport
->ops
->set_stats(vport
, stats
);
731 * vport_get_name - retrieve device name
733 * @vport: vport from which to retrieve the name.
735 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
736 * must be held for the entire duration that the name is in use.
738 const char *vport_get_name(const struct vport
*vport
)
740 return vport
->ops
->get_name(vport
);
744 * vport_get_type - retrieve device type
746 * @vport: vport from which to retrieve the type.
748 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
749 * must be held for the entire duration that the type is in use.
751 const char *vport_get_type(const struct vport
*vport
)
753 return vport
->ops
->type
;
757 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
759 * @vport: vport from which to retrieve the Ethernet address.
761 * Retrieves the Ethernet address of the given device. Either RTNL lock or
762 * rcu_read_lock must be held for the entire duration that the Ethernet address
765 const unsigned char *vport_get_addr(const struct vport
*vport
)
767 return vport
->ops
->get_addr(vport
);
771 * vport_get_kobj - retrieve associated kobj
773 * @vport: vport from which to retrieve the associated kobj
775 * Retrieves the associated kobj or null if no kobj. The returned kobj is
776 * valid for as long as the vport exists.
778 struct kobject
*vport_get_kobj(const struct vport
*vport
)
780 if (vport
->ops
->get_kobj
)
781 return vport
->ops
->get_kobj(vport
);
787 * vport_get_stats - retrieve device stats (for kernel callers)
789 * @vport: vport from which to retrieve the stats
790 * @stats: location to store stats
792 * Retrieves transmit, receive, and error stats for the given device.
794 int vport_get_stats(struct vport
*vport
, struct rtnl_link_stats64
*stats
)
796 struct rtnl_link_stats64 dev_stats
;
797 struct rtnl_link_stats64
*dev_statsp
= NULL
;
800 if (vport
->ops
->get_stats
) {
801 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
)
802 dev_statsp
= &dev_stats
;
807 err
= vport
->ops
->get_stats(vport
, dev_statsp
);
814 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
) {
817 /* We potentially have 3 sources of stats that need to be
818 * combined: those we have collected (split into err_stats and
819 * percpu_stats), offset_stats from set_stats(), and device
820 * error stats from get_stats() (for errors that happen
821 * downstream and therefore aren't reported through our
822 * vport_record_error() function). */
824 spin_lock_bh(&vport
->stats_lock
);
826 *stats
= vport
->offset_stats
;
828 stats
->rx_errors
+= vport
->err_stats
.rx_errors
;
829 stats
->tx_errors
+= vport
->err_stats
.tx_errors
;
830 stats
->tx_dropped
+= vport
->err_stats
.tx_dropped
;
831 stats
->rx_dropped
+= vport
->err_stats
.rx_dropped
;
833 spin_unlock_bh(&vport
->stats_lock
);
836 stats
->rx_packets
+= dev_statsp
->rx_packets
;
837 stats
->tx_packets
+= dev_statsp
->tx_packets
;
838 stats
->rx_bytes
+= dev_statsp
->rx_bytes
;
839 stats
->tx_bytes
+= dev_statsp
->tx_bytes
;
840 stats
->rx_errors
+= dev_statsp
->rx_errors
;
841 stats
->tx_errors
+= dev_statsp
->tx_errors
;
842 stats
->rx_dropped
+= dev_statsp
->rx_dropped
;
843 stats
->tx_dropped
+= dev_statsp
->tx_dropped
;
844 stats
->multicast
+= dev_statsp
->multicast
;
845 stats
->collisions
+= dev_statsp
->collisions
;
846 stats
->rx_length_errors
+= dev_statsp
->rx_length_errors
;
847 stats
->rx_over_errors
+= dev_statsp
->rx_over_errors
;
848 stats
->rx_crc_errors
+= dev_statsp
->rx_crc_errors
;
849 stats
->rx_frame_errors
+= dev_statsp
->rx_frame_errors
;
850 stats
->rx_fifo_errors
+= dev_statsp
->rx_fifo_errors
;
851 stats
->rx_missed_errors
+= dev_statsp
->rx_missed_errors
;
852 stats
->tx_aborted_errors
+= dev_statsp
->tx_aborted_errors
;
853 stats
->tx_carrier_errors
+= dev_statsp
->tx_carrier_errors
;
854 stats
->tx_fifo_errors
+= dev_statsp
->tx_fifo_errors
;
855 stats
->tx_heartbeat_errors
+= dev_statsp
->tx_heartbeat_errors
;
856 stats
->tx_window_errors
+= dev_statsp
->tx_window_errors
;
857 stats
->rx_compressed
+= dev_statsp
->rx_compressed
;
858 stats
->tx_compressed
+= dev_statsp
->tx_compressed
;
861 for_each_possible_cpu(i
) {
862 const struct vport_percpu_stats
*percpu_stats
;
863 struct vport_percpu_stats local_stats
;
866 percpu_stats
= per_cpu_ptr(vport
->percpu_stats
, i
);
869 seqcount
= read_seqcount_begin(&percpu_stats
->seqlock
);
870 local_stats
= *percpu_stats
;
871 } while (read_seqcount_retry(&percpu_stats
->seqlock
, seqcount
));
873 stats
->rx_bytes
+= local_stats
.rx_bytes
;
874 stats
->rx_packets
+= local_stats
.rx_packets
;
875 stats
->tx_bytes
+= local_stats
.tx_bytes
;
876 stats
->tx_packets
+= local_stats
.tx_packets
;
888 * vport_get_flags - retrieve device flags
890 * @vport: vport from which to retrieve the flags
892 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
895 unsigned vport_get_flags(const struct vport
*vport
)
897 return vport
->ops
->get_dev_flags(vport
);
901 * vport_get_flags - check whether device is running
903 * @vport: vport on which to check status.
905 * Checks whether the given device is running. Either RTNL lock or
906 * rcu_read_lock must be held.
908 int vport_is_running(const struct vport
*vport
)
910 return vport
->ops
->is_running(vport
);
914 * vport_get_flags - retrieve device operating state
916 * @vport: vport from which to check status
918 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
919 * rcu_read_lock must be held.
921 unsigned char vport_get_operstate(const struct vport
*vport
)
923 return vport
->ops
->get_operstate(vport
);
927 * vport_get_ifindex - retrieve device system interface index
929 * @vport: vport from which to retrieve index
931 * Retrieves the system interface index of the given device. Not all devices
932 * will have system indexes, in which case the index of the datapath local
933 * port is returned. Returns a negative index on error. Either RTNL lock or
934 * rcu_read_lock must be held.
936 int vport_get_ifindex(const struct vport
*vport
)
938 if (vport
->ops
->get_ifindex
)
939 return vport
->ops
->get_ifindex(vport
);
941 /* If we don't actually have an ifindex, use the local port's.
942 * Userspace doesn't check it anyways. */
943 return vport_get_ifindex(vport
->dp
->ports
[ODPP_LOCAL
]);
947 * vport_get_iflink - retrieve device system link index
949 * @vport: vport from which to retrieve index
951 * Retrieves the system link index of the given device. The link is the index
952 * of the interface on which the packet will actually be sent. In most cases
953 * this is the same as the ifindex but may be different for tunnel devices.
954 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
957 int vport_get_iflink(const struct vport
*vport
)
959 if (vport
->ops
->get_iflink
)
960 return vport
->ops
->get_iflink(vport
);
962 /* If we don't have an iflink, use the ifindex. In most cases they
964 return vport_get_ifindex(vport
);
968 * vport_get_mtu - retrieve device MTU (for kernel callers)
970 * @vport: vport from which to retrieve MTU
972 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
975 int vport_get_mtu(const struct vport
*vport
)
977 return vport
->ops
->get_mtu(vport
);
981 * vport_get_config - retrieve device configuration
983 * @vport: vport from which to retrieve the configuration.
984 * @config: buffer to store config, which must be at least the length
985 * of VPORT_CONFIG_SIZE.
987 * Retrieves the configuration of the given device. Either RTNL lock or
988 * rcu_read_lock must be held.
990 void vport_get_config(const struct vport
*vport
, void *config
)
992 if (vport
->ops
->get_config
)
993 vport
->ops
->get_config(vport
, config
);
997 * vport_receive - pass up received packet to the datapath for processing
999 * @vport: vport that received the packet
1000 * @skb: skb that was received
1002 * Must be called with rcu_read_lock. The packet cannot be shared and
1003 * skb->data should point to the Ethernet header. The caller must have already
1004 * called compute_ip_summed() to initialize the checksumming fields.
1006 void vport_receive(struct vport
*vport
, struct sk_buff
*skb
)
1008 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
) {
1009 struct vport_percpu_stats
*stats
;
1012 stats
= per_cpu_ptr(vport
->percpu_stats
, smp_processor_id());
1014 write_seqcount_begin(&stats
->seqlock
);
1015 stats
->rx_packets
++;
1016 stats
->rx_bytes
+= skb
->len
;
1017 write_seqcount_end(&stats
->seqlock
);
1022 if (!(vport
->ops
->flags
& VPORT_F_FLOW
))
1023 OVS_CB(skb
)->flow
= NULL
;
1025 if (!(vport
->ops
->flags
& VPORT_F_TUN_ID
))
1026 OVS_CB(skb
)->tun_id
= 0;
1028 dp_process_received_packet(vport
, skb
);
1031 static inline unsigned packet_length(const struct sk_buff
*skb
)
1033 unsigned length
= skb
->len
- ETH_HLEN
;
1035 if (skb
->protocol
== htons(ETH_P_8021Q
))
1036 length
-= VLAN_HLEN
;
1042 * vport_send - send a packet on a device
1044 * @vport: vport on which to send the packet
1047 * Sends the given packet and returns the length of data sent. Either RTNL
1048 * lock or rcu_read_lock must be held.
1050 int vport_send(struct vport
*vport
, struct sk_buff
*skb
)
1055 mtu
= vport_get_mtu(vport
);
1056 if (unlikely(packet_length(skb
) > mtu
&& !skb_is_gso(skb
))) {
1057 if (net_ratelimit())
1058 pr_warn("%s: dropped over-mtu packet: %d > %d\n",
1059 dp_name(vport
->dp
), packet_length(skb
), mtu
);
1063 sent
= vport
->ops
->send(vport
, skb
);
1065 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
&& sent
> 0) {
1066 struct vport_percpu_stats
*stats
;
1069 stats
= per_cpu_ptr(vport
->percpu_stats
, smp_processor_id());
1071 write_seqcount_begin(&stats
->seqlock
);
1072 stats
->tx_packets
++;
1073 stats
->tx_bytes
+= sent
;
1074 write_seqcount_end(&stats
->seqlock
);
1083 vport_record_error(vport
, VPORT_E_TX_DROPPED
);
1088 * vport_record_error - indicate device error to generic stats layer
1090 * @vport: vport that encountered the error
1091 * @err_type: one of enum vport_err_type types to indicate the error type
1093 * If using the vport generic stats layer indicate that an error of the given
1096 void vport_record_error(struct vport
*vport
, enum vport_err_type err_type
)
1098 if (vport
->ops
->flags
& VPORT_F_GEN_STATS
) {
1100 spin_lock_bh(&vport
->stats_lock
);
1103 case VPORT_E_RX_DROPPED
:
1104 vport
->err_stats
.rx_dropped
++;
1107 case VPORT_E_RX_ERROR
:
1108 vport
->err_stats
.rx_errors
++;
1111 case VPORT_E_TX_DROPPED
:
1112 vport
->err_stats
.tx_dropped
++;
1115 case VPORT_E_TX_ERROR
:
1116 vport
->err_stats
.tx_errors
++;
1120 spin_unlock_bh(&vport
->stats_lock
);