int refcount;
};
static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
-static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
+static struct sset dpif_disallowed = SSET_INITIALIZER(&dpif_disallowed);
-/* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
+/* Protects 'dpif_classes', including the refcount, and 'dpif_disallowed'. */
static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
/* Rate limit for individual messages going to or from the datapath, output at
struct seq *tnl_conf_seq;
static bool
-dpif_is_internal_port(const char *type)
+dpif_is_tap_port(const char *type)
{
- /* For userspace datapath, tap devices are the equivalent
- * of internal devices in the kernel datapath, so both
- * these types are 'internal' devices. */
- return !strcmp(type, "internal") || !strcmp(type, "tap");
+ return !strcmp(type, "tap");
}
static void
struct registered_dpif_class *registered_class;
int error;
- if (sset_contains(&dpif_blacklist, new_class->type)) {
- VLOG_DBG("attempted to register blacklisted provider: %s",
+ if (sset_contains(&dpif_disallowed, new_class->type)) {
+ VLOG_DBG("attempted to register disallowed provider: %s",
new_class->type);
return EINVAL;
}
return error;
}
-/* Blacklists a provider. Causes future calls of dp_register_provider() with
+/* Disallows a provider. Causes future calls of dp_register_provider() with
* a dpif_class which implements 'type' to fail. */
void
-dp_blacklist_provider(const char *type)
+dp_disallow_provider(const char *type)
{
ovs_mutex_lock(&dpif_mutex);
- sset_add(&dpif_blacklist, type);
+ sset_add(&dpif_disallowed, type);
ovs_mutex_unlock(&dpif_mutex);
}
error = registered_class->dpif_class->open(registered_class->dpif_class,
name, create, &dpif);
if (!error) {
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
struct dpif_port_dump port_dump;
struct dpif_port dpif_port;
struct netdev *netdev;
int err;
- if (dpif_is_internal_port(dpif_port.type)) {
+ if (dpif_is_tap_port(dpif_port.type)) {
continue;
}
err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
if (!err) {
- netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
+ netdev_ports_insert(netdev, dpif_type_str, &dpif_port);
netdev_close(netdev);
} else {
VLOG_WARN("could not open netdev %s type %s: %s",
static void
dpif_remove_netdev_ports(struct dpif *dpif) {
- struct dpif_port_dump port_dump;
- struct dpif_port dpif_port;
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
+ struct dpif_port_dump port_dump;
+ struct dpif_port dpif_port;
- DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
- if (!dpif_is_internal_port(dpif_port.type)) {
- netdev_ports_remove(dpif_port.port_no, dpif->dpif_class);
- }
+ DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
+ if (!dpif_is_tap_port(dpif_port.type)) {
+ netdev_ports_remove(dpif_port.port_no, dpif_type_str);
}
+ }
}
/* Closes and frees the connection to 'dpif'. Does not destroy the datapath
return dpif->dpif_class->type;
}
+/* Checks if datapath 'dpif' requires cleanup. */
+bool
+dpif_cleanup_required(const struct dpif *dpif)
+{
+ return dpif->dpif_class->cleanup_required;
+}
+
/* Returns the fully spelled out name for the given datapath 'type'.
*
* Normalized type string can be compared with strcmp(). Unnormalized type
return error;
}
+int
+dpif_set_features(struct dpif *dpif, uint32_t new_features)
+{
+ int error = dpif->dpif_class->set_features(dpif, new_features);
+
+ log_operation(dpif, "set_features", error);
+ return error;
+}
+
const char *
dpif_port_open_type(const char *datapath_type, const char *port_type)
{
VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
dpif_name(dpif), netdev_name, port_no);
- if (!dpif_is_internal_port(netdev_get_type(netdev))) {
+ if (!dpif_is_tap_port(netdev_get_type(netdev))) {
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
struct dpif_port dpif_port;
dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
dpif_port.name = CONST_CAST(char *, netdev_name);
dpif_port.port_no = port_no;
- netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
+ netdev_ports_insert(netdev, dpif_type_str, &dpif_port);
}
} else {
VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
}
}
- netdev_ports_remove(port_no, dpif->dpif_class);
+ netdev_ports_remove(port_no, dpif_normalize_type(dpif_type(dpif)));
return error;
}
}
}
-/* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
-void
-dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
- const void *key, size_t key_len, ovs_u128 *hash)
-{
- static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- static uint32_t secret;
-
- if (ovsthread_once_start(&once)) {
- secret = random_uint32();
- ovsthread_once_done(&once);
- }
- hash_bytes128(key, key_len, secret, hash);
- uuid_set_bits_v4((struct uuid *)hash);
-}
-
/* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
* positive errno value. */
int
case OVS_ACTION_ATTR_CT:
case OVS_ACTION_ATTR_OUTPUT:
+ case OVS_ACTION_ATTR_LB_OUTPUT:
case OVS_ACTION_ATTR_TUNNEL_PUSH:
case OVS_ACTION_ATTR_TUNNEL_POP:
case OVS_ACTION_ATTR_USERSPACE:
struct dp_packet *clone = NULL;
uint32_t cutlen = dp_packet_get_cutlen(packet);
if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
+ || type == OVS_ACTION_ATTR_LB_OUTPUT
|| type == OVS_ACTION_ATTR_TUNNEL_PUSH
|| type == OVS_ACTION_ATTR_TUNNEL_POP
|| type == OVS_ACTION_ATTR_USERSPACE)) {
case OVS_ACTION_ATTR_POP_NSH:
case OVS_ACTION_ATTR_CT_CLEAR:
case OVS_ACTION_ATTR_UNSPEC:
+ case OVS_ACTION_ATTR_CHECK_PKT_LEN:
+ case OVS_ACTION_ATTR_DROP:
case __OVS_ACTION_ATTR_MAX:
OVS_NOT_REACHED();
}
return dpif_is_netdev(dpif);
}
+bool
+dpif_supports_explicit_drop_action(const struct dpif *dpif)
+{
+ return dpif_is_netdev(dpif);
+}
+
+bool
+dpif_supports_lb_output_action(const struct dpif *dpif)
+{
+ /*
+ * Balance-tcp optimization is currently supported in netdev
+ * datapath only.
+ */
+ return dpif_is_netdev(dpif);
+}
+
/* Meters */
void
dpif_meter_get_features(const struct dpif *dpif,
}
return error;
}
+
+int
+dpif_bond_add(struct dpif *dpif, uint32_t bond_id, odp_port_t *member_map)
+{
+ return dpif->dpif_class->bond_del
+ ? dpif->dpif_class->bond_add(dpif, bond_id, member_map)
+ : EOPNOTSUPP;
+}
+
+int
+dpif_bond_del(struct dpif *dpif, uint32_t bond_id)
+{
+ return dpif->dpif_class->bond_del
+ ? dpif->dpif_class->bond_del(dpif, bond_id)
+ : EOPNOTSUPP;
+}
+
+int
+dpif_bond_stats_get(struct dpif *dpif, uint32_t bond_id,
+ uint64_t *n_bytes)
+{
+ memset(n_bytes, 0, BOND_BUCKETS * sizeof *n_bytes);
+
+ return dpif->dpif_class->bond_stats_get
+ ? dpif->dpif_class->bond_stats_get(dpif, bond_id, n_bytes)
+ : EOPNOTSUPP;
+}
+
+int
+dpif_get_n_offloaded_flows(struct dpif *dpif, uint64_t *n_flows)
+{
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
+ struct dpif_port_dump port_dump;
+ struct dpif_port dpif_port;
+ int ret, n_devs = 0;
+ uint64_t nflows;
+
+ *n_flows = 0;
+ DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
+ ret = netdev_ports_get_n_flows(dpif_type_str, dpif_port.port_no,
+ &nflows);
+ if (!ret) {
+ *n_flows += nflows;
+ } else if (ret == EOPNOTSUPP) {
+ continue;
+ }
+ n_devs++;
+ }
+ return n_devs ? 0 : EOPNOTSUPP;
+}