#include "valgrind.h"
#include "openvswitch/ofp-errors.h"
#include "openvswitch/vlog.h"
+#include "lib/netdev-provider.h"
VLOG_DEFINE_THIS_MODULE(dpif);
int refcount;
};
static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
-static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
+static struct sset dpif_disallowed = SSET_INITIALIZER(&dpif_disallowed);
-/* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
+/* Protects 'dpif_classes', including the refcount, and 'dpif_disallowed'. */
static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
/* Rate limit for individual messages going to or from the datapath, output at
struct seq *tnl_conf_seq;
static bool
-dpif_is_internal_port(const char *type)
+dpif_is_tap_port(const char *type)
{
- /* For userspace datapath, tap devices are the equivalent
- * of internal devices in the kernel datapath, so both
- * these types are 'internal' devices. */
- return !strcmp(type, "internal") || !strcmp(type, "tap");
+ return !strcmp(type, "tap");
}
static void
struct registered_dpif_class *registered_class;
int error;
- if (sset_contains(&dpif_blacklist, new_class->type)) {
- VLOG_DBG("attempted to register blacklisted provider: %s",
+ if (sset_contains(&dpif_disallowed, new_class->type)) {
+ VLOG_DBG("attempted to register disallowed provider: %s",
new_class->type);
return EINVAL;
}
return error;
}
-/* Blacklists a provider. Causes future calls of dp_register_provider() with
+/* Disallows a provider. Causes future calls of dp_register_provider() with
* a dpif_class which implements 'type' to fail. */
void
-dp_blacklist_provider(const char *type)
+dp_disallow_provider(const char *type)
{
ovs_mutex_lock(&dpif_mutex);
- sset_add(&dpif_blacklist, type);
+ sset_add(&dpif_disallowed, type);
ovs_mutex_unlock(&dpif_mutex);
}
error = registered_class->dpif_class->open(registered_class->dpif_class,
name, create, &dpif);
if (!error) {
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
struct dpif_port_dump port_dump;
struct dpif_port dpif_port;
struct netdev *netdev;
int err;
- if (dpif_is_internal_port(dpif_port.type)) {
+ if (dpif_is_tap_port(dpif_port.type)) {
continue;
}
err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
if (!err) {
- netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
+ netdev_ports_insert(netdev, dpif_type_str, &dpif_port);
netdev_close(netdev);
} else {
VLOG_WARN("could not open netdev %s type %s: %s",
static void
dpif_remove_netdev_ports(struct dpif *dpif) {
- struct dpif_port_dump port_dump;
- struct dpif_port dpif_port;
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
+ struct dpif_port_dump port_dump;
+ struct dpif_port dpif_port;
- DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
- if (!dpif_is_internal_port(dpif_port.type)) {
- netdev_ports_remove(dpif_port.port_no, dpif->dpif_class);
- }
+ DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
+ if (!dpif_is_tap_port(dpif_port.type)) {
+ netdev_ports_remove(dpif_port.port_no, dpif_type_str);
}
+ }
}
/* Closes and frees the connection to 'dpif'. Does not destroy the datapath
return dpif->dpif_class->type;
}
+/* Checks if datapath 'dpif' requires cleanup. */
+bool
+dpif_cleanup_required(const struct dpif *dpif)
+{
+ return dpif->dpif_class->cleanup_required;
+}
+
/* Returns the fully spelled out name for the given datapath 'type'.
*
* Normalized type string can be compared with strcmp(). Unnormalized type
return error;
}
+int
+dpif_set_features(struct dpif *dpif, uint32_t new_features)
+{
+ int error = dpif->dpif_class->set_features(dpif, new_features);
+
+ log_operation(dpif, "set_features", error);
+ return error;
+}
+
const char *
dpif_port_open_type(const char *datapath_type, const char *port_type)
{
VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
dpif_name(dpif), netdev_name, port_no);
- if (!dpif_is_internal_port(netdev_get_type(netdev))) {
+ if (!dpif_is_tap_port(netdev_get_type(netdev))) {
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
struct dpif_port dpif_port;
dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
dpif_port.name = CONST_CAST(char *, netdev_name);
dpif_port.port_no = port_no;
- netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
+ netdev_ports_insert(netdev, dpif_type_str, &dpif_port);
}
} else {
- if (error != EEXIST) {
- VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
- dpif_name(dpif), netdev_name, ovs_strerror(error));
- } else {
- /* It's fairly common for upper layers to try to add a duplicate
- * port, and they know how to handle it properly. */
- }
+ VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
+ dpif_name(dpif), netdev_name, ovs_strerror(error));
port_no = ODPP_NONE;
}
if (port_nop) {
}
}
- netdev_ports_remove(port_no, dpif->dpif_class);
+ netdev_ports_remove(port_no, dpif_normalize_type(dpif_type(dpif)));
return error;
}
/* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
* actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
- * flows whose packets arrived on port 'port_no'. In the case where the
- * provider allocates multiple Netlink PIDs to a single port, it may use
- * 'hash' to spread load among them. The caller need not use a particular
- * hash function; a 5-tuple hash is suitable.
- *
- * (The datapath implementation might use some different hash function for
- * distributing packets received via flow misses among PIDs. This means
- * that packets received via flow misses might be reordered relative to
- * packets received via userspace actions. This is not ordinarily a
- * problem.)
+ * flows whose packets arrived on port 'port_no'.
*
* A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
* allocated to any port, that the client may use for special purposes.
* update all of the flows that it installed that contain
* OVS_ACTION_ATTR_USERSPACE actions. */
uint32_t
-dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
+dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no)
{
return (dpif->dpif_class->port_get_pid
- ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
+ ? (dpif->dpif_class->port_get_pid)(dpif, port_no)
: 0);
}
}
}
-/* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
-void
-dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
- const void *key, size_t key_len, ovs_u128 *hash)
-{
- static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- static uint32_t secret;
-
- if (ovsthread_once_start(&once)) {
- secret = random_uint32();
- ovsthread_once_done(&once);
- }
- hash_bytes128(key, key_len, secret, hash);
- uuid_set_bits_v4((struct uuid *)hash);
-}
-
/* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
* positive errno value. */
int
op.flow_get.flow->key_len = key_len;
opp = &op;
- dpif_operate(dpif, &opp, 1);
+ dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
return op.error;
}
op.flow_put.stats = stats;
opp = &op;
- dpif_operate(dpif, &opp, 1);
+ dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
return op.error;
}
op.flow_del.terse = false;
opp = &op;
- dpif_operate(dpif, &opp, 1);
+ dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
return op.error;
}
int type = nl_attr_type(action);
struct dp_packet *packet = packets_->packets[0];
- ovs_assert(packets_->count == 1);
+ ovs_assert(dp_packet_batch_size(packets_) == 1);
switch ((enum ovs_action_attr)type) {
case OVS_ACTION_ATTR_METER:
case OVS_ACTION_ATTR_CT:
case OVS_ACTION_ATTR_OUTPUT:
+ case OVS_ACTION_ATTR_LB_OUTPUT:
case OVS_ACTION_ATTR_TUNNEL_PUSH:
case OVS_ACTION_ATTR_TUNNEL_POP:
case OVS_ACTION_ATTR_USERSPACE:
struct dp_packet *clone = NULL;
uint32_t cutlen = dp_packet_get_cutlen(packet);
if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
+ || type == OVS_ACTION_ATTR_LB_OUTPUT
|| type == OVS_ACTION_ATTR_TUNNEL_PUSH
|| type == OVS_ACTION_ATTR_TUNNEL_POP
|| type == OVS_ACTION_ATTR_USERSPACE)) {
case OVS_ACTION_ATTR_POP_NSH:
case OVS_ACTION_ATTR_CT_CLEAR:
case OVS_ACTION_ATTR_UNSPEC:
+ case OVS_ACTION_ATTR_CHECK_PKT_LEN:
+ case OVS_ACTION_ATTR_DROP:
case __OVS_ACTION_ATTR_MAX:
OVS_NOT_REACHED();
}
op.execute = *execute;
opp = &op;
- dpif_operate(dpif, &opp, 1);
+ dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
return op.error;
} else {
/* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
* which they are specified. Places each operation's results in the "output"
* members documented in comments, and 0 in the 'error' member on success or a
- * positive errno on failure. */
+ * positive errno on failure.
+ */
void
-dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
-{
+dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
+ enum dpif_offload_type offload_type)
+{
+ if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
+ size_t i;
+ for (i = 0; i < n_ops; i++) {
+ struct dpif_op *op = ops[i];
+ op->error = EINVAL;
+ }
+ return;
+ }
+
while (n_ops > 0) {
size_t chunk;
* handle itself, without help. */
size_t i;
- dpif->dpif_class->operate(dpif, ops, chunk);
+ dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
for (i = 0; i < chunk; i++) {
struct dpif_op *op = ops[i];
return dpif_is_netdev(dpif);
}
+bool
+dpif_supports_explicit_drop_action(const struct dpif *dpif)
+{
+ return dpif_is_netdev(dpif);
+}
+
+bool
+dpif_supports_lb_output_action(const struct dpif *dpif)
+{
+ /*
+ * Balance-tcp optimization is currently supported in netdev
+ * datapath only.
+ */
+ return dpif_is_netdev(dpif);
+}
+
/* Meters */
void
dpif_meter_get_features(const struct dpif *dpif,
}
return error;
}
+
+int
+dpif_bond_add(struct dpif *dpif, uint32_t bond_id, odp_port_t *member_map)
+{
+ return dpif->dpif_class->bond_del
+ ? dpif->dpif_class->bond_add(dpif, bond_id, member_map)
+ : EOPNOTSUPP;
+}
+
+int
+dpif_bond_del(struct dpif *dpif, uint32_t bond_id)
+{
+ return dpif->dpif_class->bond_del
+ ? dpif->dpif_class->bond_del(dpif, bond_id)
+ : EOPNOTSUPP;
+}
+
+int
+dpif_bond_stats_get(struct dpif *dpif, uint32_t bond_id,
+ uint64_t *n_bytes)
+{
+ memset(n_bytes, 0, BOND_BUCKETS * sizeof *n_bytes);
+
+ return dpif->dpif_class->bond_stats_get
+ ? dpif->dpif_class->bond_stats_get(dpif, bond_id, n_bytes)
+ : EOPNOTSUPP;
+}
+
+int
+dpif_get_n_offloaded_flows(struct dpif *dpif, uint64_t *n_flows)
+{
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
+ struct dpif_port_dump port_dump;
+ struct dpif_port dpif_port;
+ int ret, n_devs = 0;
+ uint64_t nflows;
+
+ *n_flows = 0;
+ DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
+ ret = netdev_ports_get_n_flows(dpif_type_str, dpif_port.port_no,
+ &nflows);
+ if (!ret) {
+ *n_flows += nflows;
+ } else if (ret == EOPNOTSUPP) {
+ continue;
+ }
+ n_devs++;
+ }
+ return n_devs ? 0 : EOPNOTSUPP;
+}