/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2015 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* 02110-1301, USA
*/
-#include <linux/dcache.h>
#include <linux/etherdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
-#include <linux/version.h>
-
+#include <linux/module.h>
+#include <linux/if_link.h>
+#include <net/net_namespace.h>
+#include <net/lisp.h>
+#include <net/gre.h>
+#include <net/geneve.h>
+#include <net/stt.h>
+#include <net/vxlan.h>
+
+#include "datapath.h"
+#include "gso.h"
#include "vport.h"
#include "vport-internal_dev.h"
-/* List of statically compiled vport implementations. Don't forget to also
- * add yours to the list at the bottom of vport.h. */
-static const struct vport_ops *base_vport_ops_list[] = {
- &ovs_netdev_vport_ops,
- &ovs_internal_vport_ops,
- &ovs_patch_vport_ops,
- &ovs_gre_vport_ops,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
- &ovs_capwap_vport_ops,
-#endif
-};
-
-static const struct vport_ops **vport_ops_list;
-static int n_vport_types;
-
-/* Protected by RCU read lock for reading, RTNL lock for writing. */
+static LIST_HEAD(vport_ops_list);
+static bool compat_gre_loaded = false;
+static bool compat_ip6_tunnel_loaded = false;
+
+/* Protected by RCU read lock for reading, ovs_mutex for writing. */
static struct hlist_head *dev_table;
#define VPORT_HASH_BUCKETS 1024
/**
* ovs_vport_init - initialize vport subsystem
*
- * Called at module load time to initialize the vport subsystem and any
- * compiled in vport types.
+ * Called at module load time to initialize the vport subsystem.
*/
int ovs_vport_init(void)
{
int err;
- int i;
- dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+ dev_table = kcalloc(VPORT_HASH_BUCKETS, sizeof(struct hlist_head),
GFP_KERNEL);
- if (!dev_table) {
- err = -ENOMEM;
- goto error;
- }
+ if (!dev_table)
+ return -ENOMEM;
+
+ err = lisp_init_module();
+ if (err)
+ goto err_lisp;
+ err = gre_init();
+ if (err && err != -EEXIST) {
+ goto err_gre;
+ } else {
+ if (err == -EEXIST) {
+ pr_warn("Cannot take GRE protocol rx entry"\
+ "- The GRE/ERSPAN rx feature not supported\n");
+ /* continue GRE tx */
+ }
- vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
- sizeof(struct vport_ops *), GFP_KERNEL);
- if (!vport_ops_list) {
- err = -ENOMEM;
- goto error_dev_table;
+ err = ipgre_init();
+ if (err && err != -EEXIST)
+ goto err_ipgre;
+ compat_gre_loaded = true;
}
-
- for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
- const struct vport_ops *new_ops = base_vport_ops_list[i];
-
- if (new_ops->init)
- err = new_ops->init();
- else
- err = 0;
-
- if (!err)
- vport_ops_list[n_vport_types++] = new_ops;
- else if (new_ops->flags & VPORT_F_REQUIRED) {
- ovs_vport_exit();
- goto error;
+ err = ip6gre_init();
+ if (err && err != -EEXIST) {
+ goto err_ip6gre;
+ } else {
+ if (err == -EEXIST) {
+ pr_warn("IPv6 GRE/ERSPAN Rx mode is not supported\n");
+ goto skip_ip6_tunnel_init;
}
}
- return 0;
+ err = ip6_tunnel_init();
+ if (err)
+ goto err_ip6_tunnel;
+ else
+ compat_ip6_tunnel_loaded = true;
+
+skip_ip6_tunnel_init:
+ err = geneve_init_module();
+ if (err)
+ goto err_geneve;
+ err = vxlan_init_module();
+ if (err)
+ goto err_vxlan;
+ err = ovs_stt_init_module();
+ if (err)
+ goto err_stt;
-error_dev_table:
+ return 0;
+ ovs_stt_cleanup_module();
+err_stt:
+ vxlan_cleanup_module();
+err_vxlan:
+ geneve_cleanup_module();
+err_geneve:
+ ip6_tunnel_cleanup();
+err_ip6_tunnel:
+ ip6gre_fini();
+err_ip6gre:
+ ipgre_fini();
+err_ipgre:
+ gre_exit();
+err_gre:
+ lisp_cleanup_module();
+err_lisp:
kfree(dev_table);
-error:
return err;
}
/**
* ovs_vport_exit - shutdown vport subsystem
*
- * Called at module exit time to shutdown the vport subsystem and any
- * initialized vport types.
+ * Called at module exit time to shutdown the vport subsystem.
*/
void ovs_vport_exit(void)
{
- int i;
-
- for (i = 0; i < n_vport_types; i++) {
- if (vport_ops_list[i]->exit)
- vport_ops_list[i]->exit();
+ if (compat_gre_loaded) {
+ gre_exit();
+ ipgre_fini();
}
-
- kfree(vport_ops_list);
+ ovs_stt_cleanup_module();
+ vxlan_cleanup_module();
+ geneve_cleanup_module();
+ if (compat_ip6_tunnel_loaded)
+ ip6_tunnel_cleanup();
+ ip6gre_fini();
+ lisp_cleanup_module();
kfree(dev_table);
}
-static struct hlist_head *hash_bucket(const char *name)
+static struct hlist_head *hash_bucket(const struct net *net, const char *name)
{
- unsigned int hash = full_name_hash(name, strlen(name));
+ unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
}
+int __ovs_vport_ops_register(struct vport_ops *ops)
+{
+ int err = -EEXIST;
+ struct vport_ops *o;
+
+ ovs_lock();
+ list_for_each_entry(o, &vport_ops_list, list)
+ if (ops->type == o->type)
+ goto errout;
+
+ list_add_tail(&ops->list, &vport_ops_list);
+ err = 0;
+errout:
+ ovs_unlock();
+ return err;
+}
+EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
+
+void ovs_vport_ops_unregister(struct vport_ops *ops)
+{
+ ovs_lock();
+ list_del(&ops->list);
+ ovs_unlock();
+}
+EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
+
/**
* ovs_vport_locate - find a port that has already been created
*
* @name: name of port to find
*
- * Must be called with RTNL or RCU read lock.
+ * Must be called with ovs or RCU read lock.
*/
-struct vport *ovs_vport_locate(const char *name)
+struct vport *ovs_vport_locate(const struct net *net, const char *name)
{
- struct hlist_head *bucket = hash_bucket(name);
+ struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
- struct hlist_node *node;
- hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
- if (!strcmp(name, vport->ops->get_name(vport)))
+ hlist_for_each_entry_rcu(vport, bucket, hash_node)
+ if (!strcmp(name, ovs_vport_name(vport)) &&
+ net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
return NULL;
}
-static void release_vport(struct kobject *kobj)
-{
- struct vport *p = container_of(kobj, struct vport, kobj);
- kfree(p);
-}
-
-static struct kobj_type brport_ktype = {
-#ifdef CONFIG_SYSFS
- .sysfs_ops = &ovs_brport_sysfs_ops,
-#endif
- .release = release_vport
-};
-
/**
* ovs_vport_alloc - allocate and initialize new vport
*
* Allocate and initialize a new vport defined by @ops. The vport will contain
* a private data area of size @priv_size that can be accessed using
* vport_priv(). vports that are no longer needed should be released with
- * ovs_vport_free().
+ * vport_free().
*/
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
- const struct vport_parms *parms)
+ const struct vport_parms *parms)
{
struct vport *vport;
size_t alloc_size;
vport->dp = parms->dp;
vport->port_no = parms->port_no;
- vport->upcall_pid = parms->upcall_pid;
vport->ops = ops;
+ INIT_HLIST_NODE(&vport->dp_hash_node);
- /* Initialize kobject for bridge. This will be added as
- * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
- vport->kobj.kset = NULL;
- kobject_init(&vport->kobj, &brport_ktype);
-
- vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
- if (!vport->percpu_stats)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&vport->stats_lock);
+ if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
+ kfree(vport);
+ return ERR_PTR(-EINVAL);
+ }
return vport;
}
+EXPORT_SYMBOL_GPL(ovs_vport_alloc);
/**
* ovs_vport_free - uninitialize and free vport
*
* @vport: vport to free
*
- * Frees a vport allocated with ovs_vport_alloc() when it is no longer needed.
+ * Frees a vport allocated with vport_alloc() when it is no longer needed.
*
* The caller must ensure that an RCU grace period has passed since the last
* time @vport was in a datapath.
*/
void ovs_vport_free(struct vport *vport)
{
- free_percpu(vport->percpu_stats);
+ /* vport is freed from RCU callback or error path, Therefore
+ * it is safe to use raw dereference.
+ */
+ kfree(rcu_dereference_raw(vport->upcall_portids));
+ kfree(vport);
+}
+EXPORT_SYMBOL_GPL(ovs_vport_free);
- kobject_put(&vport->kobj);
+static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
+{
+ struct vport_ops *ops;
+
+ list_for_each_entry(ops, &vport_ops_list, list)
+ if (ops->type == parms->type)
+ return ops;
+
+ return NULL;
}
/**
* @parms: Information about new vport.
*
* Creates a new vport with the specified configuration (which is dependent on
- * device type). RTNL lock must be held.
+ * device type). ovs_mutex must be held.
*/
struct vport *ovs_vport_add(const struct vport_parms *parms)
{
+ struct vport_ops *ops;
struct vport *vport;
- int err = 0;
- int i;
- ASSERT_RTNL();
+ ops = ovs_vport_lookup(parms);
+ if (ops) {
+ struct hlist_head *bucket;
- for (i = 0; i < n_vport_types; i++) {
- if (vport_ops_list[i]->type == parms->type) {
- vport = vport_ops_list[i]->create(parms);
- if (IS_ERR(vport)) {
- err = PTR_ERR(vport);
- goto out;
- }
+ if (!try_module_get(ops->owner))
+ return ERR_PTR(-EAFNOSUPPORT);
- hlist_add_head_rcu(&vport->hash_node,
- hash_bucket(vport->ops->get_name(vport)));
+ vport = ops->create(parms);
+ if (IS_ERR(vport)) {
+ module_put(ops->owner);
return vport;
}
+
+ bucket = hash_bucket(ovs_dp_get_net(vport->dp),
+ ovs_vport_name(vport));
+ hlist_add_head_rcu(&vport->hash_node, bucket);
+ return vport;
}
- err = -EAFNOSUPPORT;
+ if (parms->type == OVS_VPORT_TYPE_GRE && !compat_gre_loaded) {
+ pr_warn("GRE protocol already loaded!\n");
+ return ERR_PTR(-EAFNOSUPPORT);
+ }
+ /* Unlock to attempt module load and return -EAGAIN if load
+ * was successful as we need to restart the port addition
+ * workflow.
+ */
+ ovs_unlock();
+ request_module("vport-type-%d", parms->type);
+ ovs_lock();
-out:
- return ERR_PTR(err);
+ if (!ovs_vport_lookup(parms))
+ return ERR_PTR(-EAFNOSUPPORT);
+ else
+ return ERR_PTR(-EAGAIN);
}
/**
* ovs_vport_set_options - modify existing vport device (for kernel callers)
*
* @vport: vport to modify.
- * @port: New configuration.
+ * @options: New configuration.
*
* Modifies an existing device with the specified configuration (which is
- * dependent on device type). RTNL lock must be held.
+ * dependent on device type). ovs_mutex must be held.
*/
int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
{
- ASSERT_RTNL();
-
if (!vport->ops->set_options)
return -EOPNOTSUPP;
return vport->ops->set_options(vport, options);
*
* @vport: vport to delete.
*
- * Detaches @vport from its datapath and destroys it. It is possible to fail
- * for reasons such as lack of memory. RTNL lock must be held.
+ * Detaches @vport from its datapath and destroys it. ovs_mutex must be
+ * held.
*/
void ovs_vport_del(struct vport *vport)
{
- ASSERT_RTNL();
+ ASSERT_OVSL();
hlist_del_rcu(&vport->hash_node);
-
+ module_put(vport->ops->owner);
vport->ops->destroy(vport);
}
-/**
- * ovs_vport_set_addr - set device Ethernet address (for kernel callers)
- *
- * @vport: vport on which to set Ethernet address.
- * @addr: New address.
- *
- * Sets the Ethernet address of the given device. Some devices may not support
- * setting the Ethernet address, in which case the result will always be
- * -EOPNOTSUPP. RTNL lock must be held.
- */
-int ovs_vport_set_addr(struct vport *vport, const unsigned char *addr)
-{
- ASSERT_RTNL();
-
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
- if (vport->ops->set_addr)
- return vport->ops->set_addr(vport, addr);
- else
- return -EOPNOTSUPP;
-}
-
-/**
- * ovs_vport_set_stats - sets offset device stats
- *
- * @vport: vport on which to set stats
- * @stats: stats to set
- *
- * Provides a set of transmit, receive, and error stats to be added as an
- * offset to the collect data when stats are retreived. Some devices may not
- * support setting the stats, in which case the result will always be
- * -EOPNOTSUPP.
- *
- * Must be called with RTNL lock.
- */
-void ovs_vport_set_stats(struct vport *vport, struct ovs_vport_stats *stats)
-{
- ASSERT_RTNL();
-
- spin_lock_bh(&vport->stats_lock);
- vport->offset_stats = *stats;
- spin_unlock_bh(&vport->stats_lock);
-}
-
/**
* ovs_vport_get_stats - retrieve device stats
*
*
* Retrieves transmit, receive, and error stats for the given device.
*
- * Must be called with RTNL lock or rcu_read_lock.
+ * Must be called with ovs_mutex or rcu_read_lock.
*/
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
- int i;
-
- /* We potentially have 3 sources of stats that need to be
- * combined: those we have collected (split into err_stats and
- * percpu_stats), offset_stats from set_stats(), and device
- * error stats from netdev->get_stats() (for errors that happen
- * downstream and therefore aren't reported through our
- * vport_record_error() function).
- * Stats from first two sources are merged and reported by ovs over
- * OVS_VPORT_ATTR_STATS.
- * netdev-stats can be directly read over netlink-ioctl.
- */
-
- spin_lock_bh(&vport->stats_lock);
-
- *stats = vport->offset_stats;
-
- stats->rx_errors += vport->err_stats.rx_errors;
- stats->tx_errors += vport->err_stats.tx_errors;
- stats->tx_dropped += vport->err_stats.tx_dropped;
- stats->rx_dropped += vport->err_stats.rx_dropped;
-
- spin_unlock_bh(&vport->stats_lock);
-
- for_each_possible_cpu(i) {
- const struct vport_percpu_stats *percpu_stats;
- struct vport_percpu_stats local_stats;
- unsigned int start;
-
- percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
-
- do {
- start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
- local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
-
- stats->rx_bytes += local_stats.rx_bytes;
- stats->rx_packets += local_stats.rx_packets;
- stats->tx_bytes += local_stats.tx_bytes;
- stats->tx_packets += local_stats.tx_packets;
- }
+ const struct rtnl_link_stats64 *dev_stats;
+ struct rtnl_link_stats64 temp;
+
+ dev_stats = dev_get_stats(vport->dev, &temp);
+ stats->rx_errors = dev_stats->rx_errors;
+ stats->tx_errors = dev_stats->tx_errors;
+ stats->tx_dropped = dev_stats->tx_dropped;
+ stats->rx_dropped = dev_stats->rx_dropped;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets;
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets;
}
/**
* negative error code if a real error occurred. If an error occurs, @skb is
* left unmodified.
*
- * Must be called with RTNL lock or rcu_read_lock.
+ * Must be called with ovs_mutex or rcu_read_lock.
*/
int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
{
struct nlattr *nla;
+ int err;
+
+ if (!vport->ops->get_options)
+ return 0;
- nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
+ nla = nla_nest_start_noflag(skb, OVS_VPORT_ATTR_OPTIONS);
if (!nla)
return -EMSGSIZE;
- if (vport->ops->get_options) {
- int err = vport->ops->get_options(vport, skb);
- if (err) {
- nla_nest_cancel(skb, nla);
- return err;
- }
+ err = vport->ops->get_options(vport, skb);
+ if (err) {
+ nla_nest_cancel(skb, nla);
+ return err;
}
nla_nest_end(skb, nla);
}
/**
- * ovs_vport_receive - pass up received packet to the datapath for processing
+ * ovs_vport_set_upcall_portids - set upcall portids of @vport.
*
- * @vport: vport that received the packet
- * @skb: skb that was received
+ * @vport: vport to modify.
+ * @ids: new configuration, an array of port ids.
*
- * Must be called with rcu_read_lock. The packet cannot be shared and
- * skb->data should point to the Ethernet header. The caller must have already
- * called compute_ip_summed() to initialize the checksumming fields.
+ * Sets the vport's upcall_portids to @ids.
+ *
+ * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
+ * as an array of U32.
+ *
+ * Must be called with ovs_mutex.
*/
-void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
- struct vport_percpu_stats *stats;
+ struct vport_portids *old, *vport_portids;
+
+ if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
+ return -EINVAL;
- stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+ old = ovsl_dereference(vport->upcall_portids);
- u64_stats_update_begin(&stats->sync);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->sync);
+ vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
+ GFP_KERNEL);
+ if (!vport_portids)
+ return -ENOMEM;
- if (!(vport->ops->flags & VPORT_F_FLOW))
- OVS_CB(skb)->flow = NULL;
+ vport_portids->n_ids = nla_len(ids) / sizeof(u32);
+ vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
+ nla_memcpy(vport_portids->ids, ids, nla_len(ids));
- if (!(vport->ops->flags & VPORT_F_TUN_ID))
- OVS_CB(skb)->tun_id = 0;
+ rcu_assign_pointer(vport->upcall_portids, vport_portids);
- ovs_dp_process_received_packet(vport, skb);
+ if (old)
+ kfree_rcu(old, rcu);
+ return 0;
}
/**
- * ovs_vport_send - send a packet on a device
+ * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
*
- * @vport: vport on which to send the packet
- * @skb: skb to send
+ * @vport: vport from which to retrieve the portids.
+ * @skb: sk_buff where portids should be appended.
*
- * Sends the given packet and returns the length of data sent. Either RTNL
- * lock or rcu_read_lock must be held.
+ * Retrieves the configuration of the given vport, appending the
+ * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
+ * portids to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
+ * If an error occurs, @skb is left unmodified. Must be called with
+ * ovs_mutex or rcu_read_lock.
*/
-int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+int ovs_vport_get_upcall_portids(const struct vport *vport,
+ struct sk_buff *skb)
{
- int sent = vport->ops->send(vport, skb);
+ struct vport_portids *ids;
- if (likely(sent)) {
- struct vport_percpu_stats *stats;
+ ids = rcu_dereference_ovsl(vport->upcall_portids);
- stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+ if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
+ return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
+ ids->n_ids * sizeof(u32), (void *)ids->ids);
+ else
+ return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
+}
- u64_stats_update_begin(&stats->sync);
- stats->tx_packets++;
- stats->tx_bytes += sent;
- u64_stats_update_end(&stats->sync);
- }
- return sent;
+/**
+ * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
+ *
+ * @vport: vport from which the missed packet is received.
+ * @skb: skb that the missed packet was received.
+ *
+ * Uses the skb_get_hash() to select the upcall portid to send the
+ * upcall.
+ *
+ * Returns the portid of the target socket. Must be called with rcu_read_lock.
+ */
+u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
+{
+ struct vport_portids *ids;
+ u32 ids_index;
+ u32 hash;
+
+ ids = rcu_dereference(vport->upcall_portids);
+
+ /* If there is only one portid, select it in the fast-path. */
+ if (ids->n_ids == 1)
+ return ids->ids[0];
+
+ hash = skb_get_hash(skb);
+ ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
+ return ids->ids[ids_index];
}
/**
- * ovs_vport_record_error - indicate device error to generic stats layer
+ * ovs_vport_receive - pass up received packet to the datapath for processing
*
- * @vport: vport that encountered the error
- * @err_type: one of enum vport_err_type types to indicate the error type
+ * @vport: vport that received the packet
+ * @skb: skb that was received
+ * @tun_key: tunnel (if any) that carried packet
*
- * If using the vport generic stats layer indicate that an error of the given
- * type has occured.
+ * Must be called with rcu_read_lock. The packet cannot be shared and
+ * skb->data should point to the Ethernet header.
*/
-void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
+int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
+ const struct ip_tunnel_info *tun_info)
{
- spin_lock(&vport->stats_lock);
+ struct sw_flow_key key;
+ int error;
+
+ OVS_CB(skb)->input_vport = vport;
+ OVS_CB(skb)->mru = 0;
+ OVS_CB(skb)->cutlen = 0;
+ if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
+ u32 mark;
+
+ mark = skb->mark;
+ skb_scrub_packet(skb, true);
+ skb->mark = mark;
+ tun_info = NULL;
+ }
- switch (err_type) {
- case VPORT_E_RX_DROPPED:
- vport->err_stats.rx_dropped++;
- break;
+ ovs_skb_init_inner_protocol(skb);
+ skb_clear_ovs_gso_cb(skb);
+ /* Extract flow from 'skb' into 'key'. */
+ error = ovs_flow_key_extract(tun_info, skb, &key);
+ if (unlikely(error)) {
+ kfree_skb(skb);
+ return error;
+ }
+ ovs_dp_process_packet(skb, &key);
+ return 0;
+}
- case VPORT_E_RX_ERROR:
- vport->err_stats.rx_errors++;
- break;
+static int packet_length(const struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int length = skb->len - dev->hard_header_len;
- case VPORT_E_TX_DROPPED:
- vport->err_stats.tx_dropped++;
- break;
+ if (!skb_vlan_tag_present(skb) &&
+ eth_type_vlan(skb->protocol))
+ length -= VLAN_HLEN;
+
+ /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
+ * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
+ * account for 802.1ad. e.g. is_skb_forwardable().
+ */
+
+ return length > 0 ? length: 0;
+}
- case VPORT_E_TX_ERROR:
- vport->err_stats.tx_errors++;
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
+{
+ int mtu = vport->dev->mtu;
+
+ switch (vport->dev->type) {
+ case ARPHRD_NONE:
+ if (mac_proto == MAC_PROTO_ETHERNET) {
+ skb_reset_network_header(skb);
+ skb_reset_mac_len(skb);
+ skb->protocol = htons(ETH_P_TEB);
+ } else if (mac_proto != MAC_PROTO_NONE) {
+ WARN_ON_ONCE(1);
+ goto drop;
+ }
break;
- };
+ case ARPHRD_ETHER:
+ if (mac_proto != MAC_PROTO_ETHERNET)
+ goto drop;
+ break;
+ default:
+ goto drop;
+ }
+
+ if (unlikely(packet_length(skb, vport->dev) > mtu &&
+ !skb_is_gso(skb))) {
+ net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+ vport->dev->name,
+ packet_length(skb, vport->dev), mtu);
+ vport->dev->stats.tx_errors++;
+ goto drop;
+ }
+
+ skb->dev = vport->dev;
+ vport->ops->send(skb);
+ return;
- spin_unlock(&vport->stats_lock);
+drop:
+ kfree_skb(skb);
}