/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <string.h>
#include "coverage.h"
-#include "dynamic-string.h"
+#include "dpctl.h"
+#include "dp-packet.h"
+#include "dpif-netdev.h"
+#include "openvswitch/dynamic-string.h"
#include "flow.h"
#include "netdev.h"
#include "netlink.h"
#include "odp-execute.h"
#include "odp-util.h"
-#include "ofp-errors.h"
-#include "ofp-print.h"
-#include "ofp-util.h"
-#include "ofpbuf.h"
-#include "packet-dpif.h"
+#include "openvswitch/ofp-print.h"
+#include "openvswitch/ofpbuf.h"
#include "packets.h"
-#include "poll-loop.h"
-#include "shash.h"
+#include "openvswitch/poll-loop.h"
+#include "route-table.h"
+#include "seq.h"
+#include "openvswitch/shash.h"
#include "sset.h"
#include "timeval.h"
+#include "tnl-neigh-cache.h"
+#include "tnl-ports.h"
#include "util.h"
+#include "uuid.h"
#include "valgrind.h"
-#include "vlog.h"
+#include "openvswitch/ofp-errors.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(dpif);
COVERAGE_DEFINE(dpif_execute);
COVERAGE_DEFINE(dpif_purge);
COVERAGE_DEFINE(dpif_execute_with_help);
+COVERAGE_DEFINE(dpif_meter_set);
+COVERAGE_DEFINE(dpif_meter_get);
+COVERAGE_DEFINE(dpif_meter_del);
static const struct dpif_class *base_dpif_classes[] = {
-#ifdef __linux__
- &dpif_linux_class,
+#if defined(__linux__) || defined(_WIN32)
+ &dpif_netlink_class,
#endif
&dpif_netdev_class,
};
/* Not really much point in logging many dpif errors. */
static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
-static void log_flow_message(const struct dpif *dpif, int error,
- const char *operation,
- const struct nlattr *key, size_t key_len,
- const struct nlattr *mask, size_t mask_len,
- const struct dpif_flow_stats *stats,
- const struct nlattr *actions, size_t actions_len);
static void log_operation(const struct dpif *, const char *operation,
int error);
-static bool should_log_flow_message(int error);
-static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
- int error);
-static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
- int error);
-static void log_execute_message(struct dpif *, const struct dpif_execute *,
- int error);
+static bool should_log_flow_message(const struct vlog_module *module,
+ int error);
+
+/* Incremented whenever tnl route, arp, etc changes. */
+struct seq *tnl_conf_seq;
+
+static bool
+dpif_is_internal_port(const char *type)
+{
+ /* For userspace datapath, tap devices are the equivalent
+ * of internal devices in the kernel datapath, so both
+ * these types are 'internal' devices. */
+ return !strcmp(type, "internal") || !strcmp(type, "tap");
+}
static void
dp_initialize(void)
if (ovsthread_once_start(&once)) {
int i;
+ tnl_conf_seq = seq_create();
+ dpctl_unixctl_register();
+ tnl_port_map_init();
+ tnl_neigh_cache_init();
+ route_table_init();
+
for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
dp_register_provider(base_dpif_classes[i]);
}
+
ovsthread_once_done(&once);
}
}
dp_register_provider__(const struct dpif_class *new_class)
{
struct registered_dpif_class *registered_class;
+ int error;
if (sset_contains(&dpif_blacklist, new_class->type)) {
VLOG_DBG("attempted to register blacklisted provider: %s",
return EEXIST;
}
+ error = new_class->init ? new_class->init() : 0;
+ if (error) {
+ VLOG_WARN("failed to initialize %s datapath class: %s",
+ new_class->type, ovs_strerror(error));
+ return error;
+ }
+
registered_class = xmalloc(sizeof *registered_class);
registered_class->dpif_class = new_class;
registered_class->refcount = 0;
node = shash_find(&dpif_classes, type);
if (!node) {
- VLOG_WARN("attempted to unregister a datapath provider that is not "
- "registered: %s", type);
return EAFNOSUPPORT;
}
ovs_mutex_unlock(&dpif_mutex);
}
-/* Clears 'types' and enumerates the types of all currently registered datapath
- * providers into it. The caller must first initialize the sset. */
+/* Adds the types of all currently registered datapath providers to 'types'.
+ * The caller must first initialize the sset. */
void
dp_enumerate_types(struct sset *types)
{
struct shash_node *node;
dp_initialize();
- sset_clear(types);
ovs_mutex_lock(&dpif_mutex);
SHASH_FOR_EACH(node, &dpif_classes) {
error = registered_class->dpif_class->open(registered_class->dpif_class,
name, create, &dpif);
if (!error) {
+ struct dpif_port_dump port_dump;
+ struct dpif_port dpif_port;
+
ovs_assert(dpif->dpif_class == registered_class->dpif_class);
+
+ DPIF_PORT_FOR_EACH(&dpif_port, &port_dump, dpif) {
+ struct netdev *netdev;
+ int err;
+
+ if (dpif_is_internal_port(dpif_port.type)) {
+ continue;
+ }
+
+ err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
+
+ if (!err) {
+ netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
+ netdev_close(netdev);
+ } else {
+ VLOG_WARN("could not open netdev %s type %s: %s",
+ dpif_port.name, dpif_port.type, ovs_strerror(err));
+ }
+ }
} else {
dp_class_unref(registered_class);
}
return error;
}
+static void
+dpif_remove_netdev_ports(struct dpif *dpif) {
+ struct dpif_port_dump port_dump;
+ struct dpif_port dpif_port;
+
+ DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
+ if (!dpif_is_internal_port(dpif_port.type)) {
+ netdev_ports_remove(dpif_port.port_no, dpif->dpif_class);
+ }
+ }
+}
+
/* Closes and frees the connection to 'dpif'. Does not destroy the datapath
* itself; call dpif_delete() first, instead, if that is desirable. */
void
struct registered_dpif_class *rc;
rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
+
+ if (rc->refcount == 1) {
+ dpif_remove_netdev_ports(dpif);
+ }
dpif_uninit(dpif, true);
dp_class_unref(rc);
}
}
/* Performs periodic work needed by 'dpif'. */
-void
+bool
dpif_run(struct dpif *dpif)
{
if (dpif->dpif_class->run) {
- dpif->dpif_class->run(dpif);
+ return dpif->dpif_class->run(dpif);
}
+ return false;
}
/* Arranges for poll_block() to wake up when dp_run() needs to be called for
if (!error) {
VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
dpif_name(dpif), netdev_name, port_no);
+
+ if (!dpif_is_internal_port(netdev_get_type(netdev))) {
+
+ struct dpif_port dpif_port;
+
+ dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
+ dpif_port.name = CONST_CAST(char *, netdev_name);
+ dpif_port.port_no = port_no;
+ netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
+ }
} else {
VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
dpif_name(dpif), netdev_name, ovs_strerror(error));
/* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
* otherwise a positive errno value. */
int
-dpif_port_del(struct dpif *dpif, odp_port_t port_no)
+dpif_port_del(struct dpif *dpif, odp_port_t port_no, bool local_delete)
{
- int error;
+ int error = 0;
COVERAGE_INC(dpif_port_del);
- error = dpif->dpif_class->port_del(dpif, port_no);
- if (!error) {
- VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
- dpif_name(dpif), port_no);
- } else {
- log_operation(dpif, "port_del", error);
+ if (!local_delete) {
+ error = dpif->dpif_class->port_del(dpif, port_no);
+ if (!error) {
+ VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
+ dpif_name(dpif), port_no);
+ } else {
+ log_operation(dpif, "port_del", error);
+ }
}
+
+ netdev_ports_remove(port_no, dpif->dpif_class);
return error;
}
dpif_port_exists(const struct dpif *dpif, const char *devname)
{
int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
- if (error != 0 && error != ENOENT && error != ENODEV) {
+ if (error != 0 && error != ENODEV) {
VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
dpif_name(dpif), devname, ovs_strerror(error));
}
return !error;
}
+/* Refreshes configuration of 'dpif's port. */
+int
+dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
+ const struct smap *cfg)
+{
+ int error = 0;
+
+ if (dpif->dpif_class->port_set_config) {
+ error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
+ if (error) {
+ log_operation(dpif, "port_set_config", error);
+ }
+ }
+
+ return error;
+}
+
/* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
* initializes '*port' appropriately; on failure, returns a positive errno
* value.
*
+ * Retuns ENODEV if the port doesn't exist.
+ *
* The caller owns the data in 'port' and must free it with
* dpif_port_destroy() when it is no longer needed. */
int
* initializes '*port' appropriately; on failure, returns a positive errno
* value.
*
+ * Retuns ENODEV if the port doesn't exist.
+ *
* The caller owns the data in 'port' and must free it with
* dpif_port_destroy() when it is no longer needed. */
int
} else {
memset(port, 0, sizeof *port);
- /* For ENOENT or ENODEV we use DBG level because the caller is probably
+ /* For ENODEV we use DBG level because the caller is probably
* interested in whether 'dpif' actually has a port 'devname', so that
* it's not an issue worth logging if it doesn't. Other errors are
* uncommon and more likely to indicate a real problem. */
- VLOG_RL(&error_rl,
- error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
+ VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
"%s: failed to query port %s: %s",
dpif_name(dpif), devname, ovs_strerror(error));
}
* arguments must have been initialized through a call to flow_extract().
* 'used' is stored into stats->used. */
void
-dpif_flow_stats_extract(const struct flow *flow, const struct ofpbuf *packet,
+dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
long long int used, struct dpif_flow_stats *stats)
{
stats->tcp_flags = ntohs(flow->tcp_flags);
- stats->n_bytes = ofpbuf_size(packet);
+ stats->n_bytes = dp_packet_size(packet);
stats->n_packets = 1;
stats->used = used;
}
}
}
+/* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
+void
+dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
+ const void *key, size_t key_len, ovs_u128 *hash)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ static uint32_t secret;
+
+ if (ovsthread_once_start(&once)) {
+ secret = random_uint32();
+ ovsthread_once_done(&once);
+ }
+ hash_bytes128(key, key_len, secret, hash);
+ uuid_set_bits_v4((struct uuid *)hash);
+}
+
/* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
* positive errno value. */
int
return error;
}
-/* Queries 'dpif' for a flow entry. The flow is specified by the Netlink
- * attributes with types OVS_KEY_ATTR_* in the 'key_len' bytes starting at
- * 'key'.
- *
- * Returns 0 if successful. If no flow matches, returns ENOENT. On other
- * failure, returns a positive errno value.
- *
- * If 'actionsp' is nonnull, then on success '*actionsp' will be set to an
- * ofpbuf owned by the caller that contains the Netlink attributes for the
- * flow's actions. The caller must free the ofpbuf (with ofpbuf_delete()) when
- * it is no longer needed.
- *
- * If 'stats' is nonnull, then on success it will be updated with the flow's
- * statistics. */
-int
-dpif_flow_get(const struct dpif *dpif,
- const struct nlattr *key, size_t key_len,
- struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
-{
+/* Attempts to install 'key' into the datapath, fetches it, then deletes it.
+ * Returns true if the datapath supported installing 'flow', false otherwise.
+ */
+bool
+dpif_probe_feature(struct dpif *dpif, const char *name,
+ const struct ofpbuf *key, const struct ofpbuf *actions,
+ const ovs_u128 *ufid)
+{
+ struct dpif_flow flow;
+ struct ofpbuf reply;
+ uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
+ bool enable_feature = false;
int error;
-
- COVERAGE_INC(dpif_flow_get);
-
- error = dpif->dpif_class->flow_get(dpif, key, key_len, actionsp, stats);
+ const struct nlattr *nl_actions = actions ? actions->data : NULL;
+ const size_t nl_actions_size = actions ? actions->size : 0;
+
+ /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
+ * restarted) at just the right time such that feature probes from the
+ * previous run are still present in the datapath. */
+ error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
+ key->data, key->size, NULL, 0,
+ nl_actions, nl_actions_size,
+ ufid, NON_PMD_CORE_ID, NULL);
if (error) {
- if (actionsp) {
- *actionsp = NULL;
- }
- if (stats) {
- memset(stats, 0, sizeof *stats);
+ if (error != EINVAL && error != EOVERFLOW) {
+ VLOG_WARN("%s: %s flow probe failed (%s)",
+ dpif_name(dpif), name, ovs_strerror(error));
}
+ return false;
}
- if (should_log_flow_message(error)) {
- const struct nlattr *actions;
- size_t actions_len;
- if (!error && actionsp) {
- actions = ofpbuf_data(*actionsp);
- actions_len = ofpbuf_size(*actionsp);
- } else {
- actions = NULL;
- actions_len = 0;
- }
- log_flow_message(dpif, error, "flow_get", key, key_len,
- NULL, 0, stats, actions, actions_len);
+ ofpbuf_use_stack(&reply, &stub, sizeof stub);
+ error = dpif_flow_get(dpif, key->data, key->size, ufid,
+ NON_PMD_CORE_ID, &reply, &flow);
+ if (!error
+ && (!ufid || (flow.ufid_present
+ && ovs_u128_equals(*ufid, flow.ufid)))) {
+ enable_feature = true;
}
- return error;
+
+ error = dpif_flow_del(dpif, key->data, key->size, ufid,
+ NON_PMD_CORE_ID, NULL);
+ if (error) {
+ VLOG_WARN("%s: failed to delete %s feature probe flow",
+ dpif_name(dpif), name);
+ }
+
+ return enable_feature;
}
-static int
-dpif_flow_put__(struct dpif *dpif, const struct dpif_flow_put *put)
+/* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
+int
+dpif_flow_get(struct dpif *dpif,
+ const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
+ const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
{
- int error;
+ struct dpif_op *opp;
+ struct dpif_op op;
- COVERAGE_INC(dpif_flow_put);
- ovs_assert(!(put->flags & ~(DPIF_FP_CREATE | DPIF_FP_MODIFY
- | DPIF_FP_ZERO_STATS)));
+ op.type = DPIF_OP_FLOW_GET;
+ op.flow_get.key = key;
+ op.flow_get.key_len = key_len;
+ op.flow_get.ufid = ufid;
+ op.flow_get.pmd_id = pmd_id;
+ op.flow_get.buffer = buf;
- error = dpif->dpif_class->flow_put(dpif, put);
- if (error && put->stats) {
- memset(put->stats, 0, sizeof *put->stats);
- }
- log_flow_put_message(dpif, put, error);
- return error;
+ memset(flow, 0, sizeof *flow);
+ op.flow_get.flow = flow;
+ op.flow_get.flow->key = key;
+ op.flow_get.flow->key_len = key_len;
+
+ opp = &op;
+ dpif_operate(dpif, &opp, 1);
+
+ return op.error;
}
-/* Adds or modifies a flow in 'dpif'. The flow is specified by the Netlink
- * attribute OVS_FLOW_ATTR_KEY with types OVS_KEY_ATTR_* in the 'key_len' bytes
- * starting at 'key', and OVS_FLOW_ATTR_MASK with types of OVS_KEY_ATTR_* in
- * the 'mask_len' bytes starting at 'mask'. The associated actions are
- * specified by the Netlink attributes with types OVS_ACTION_ATTR_* in the
- * 'actions_len' bytes starting at 'actions'.
- *
- * - If the flow's key does not exist in 'dpif', then the flow will be added if
- * 'flags' includes DPIF_FP_CREATE. Otherwise the operation will fail with
- * ENOENT.
- *
- * The datapath may reject attempts to insert overlapping flows with EINVAL
- * or EEXIST, but clients should not rely on this: avoiding overlapping flows
- * is primarily the client's responsibility.
- *
- * If the operation succeeds, then 'stats', if nonnull, will be zeroed.
- *
- * - If the flow's key does exist in 'dpif', then the flow's actions will be
- * updated if 'flags' includes DPIF_FP_MODIFY. Otherwise the operation will
- * fail with EEXIST. If the flow's actions are updated, then its statistics
- * will be zeroed if 'flags' includes DPIF_FP_ZERO_STATS, and left as-is
- * otherwise.
- *
- * If the operation succeeds, then 'stats', if nonnull, will be set to the
- * flow's statistics before the update.
- */
+/* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
int
dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
const struct nlattr *key, size_t key_len,
const struct nlattr *mask, size_t mask_len,
const struct nlattr *actions, size_t actions_len,
+ const ovs_u128 *ufid, const unsigned pmd_id,
struct dpif_flow_stats *stats)
{
- struct dpif_flow_put put;
-
- put.flags = flags;
- put.key = key;
- put.key_len = key_len;
- put.mask = mask;
- put.mask_len = mask_len;
- put.actions = actions;
- put.actions_len = actions_len;
- put.stats = stats;
- return dpif_flow_put__(dpif, &put);
-}
+ struct dpif_op *opp;
+ struct dpif_op op;
-static int
-dpif_flow_del__(struct dpif *dpif, struct dpif_flow_del *del)
-{
- int error;
+ op.type = DPIF_OP_FLOW_PUT;
+ op.flow_put.flags = flags;
+ op.flow_put.key = key;
+ op.flow_put.key_len = key_len;
+ op.flow_put.mask = mask;
+ op.flow_put.mask_len = mask_len;
+ op.flow_put.actions = actions;
+ op.flow_put.actions_len = actions_len;
+ op.flow_put.ufid = ufid;
+ op.flow_put.pmd_id = pmd_id;
+ op.flow_put.stats = stats;
- COVERAGE_INC(dpif_flow_del);
+ opp = &op;
+ dpif_operate(dpif, &opp, 1);
- error = dpif->dpif_class->flow_del(dpif, del);
- if (error && del->stats) {
- memset(del->stats, 0, sizeof *del->stats);
- }
- log_flow_del_message(dpif, del, error);
- return error;
+ return op.error;
}
-/* Deletes a flow from 'dpif' and returns 0, or returns ENOENT if 'dpif' does
- * not contain such a flow. The flow is specified by the Netlink attributes
- * with types OVS_KEY_ATTR_* in the 'key_len' bytes starting at 'key'.
- *
- * If the operation succeeds, then 'stats', if nonnull, will be set to the
- * flow's statistics before its deletion. */
+/* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
int
dpif_flow_del(struct dpif *dpif,
- const struct nlattr *key, size_t key_len,
- struct dpif_flow_stats *stats)
+ const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
+ const unsigned pmd_id, struct dpif_flow_stats *stats)
{
- struct dpif_flow_del del;
+ struct dpif_op *opp;
+ struct dpif_op op;
+
+ op.type = DPIF_OP_FLOW_DEL;
+ op.flow_del.key = key;
+ op.flow_del.key_len = key_len;
+ op.flow_del.ufid = ufid;
+ op.flow_del.pmd_id = pmd_id;
+ op.flow_del.stats = stats;
+ op.flow_del.terse = false;
- del.key = key;
- del.key_len = key_len;
- del.stats = stats;
- return dpif_flow_del__(dpif, &del);
+ opp = &op;
+ dpif_operate(dpif, &opp, 1);
+
+ return op.error;
}
/* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
- * flows in 'dpif'.
+ * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
+ * be returned in the dump. Otherwise, all fields will be returned.
*
* This function always successfully returns a dpif_flow_dump. Error
* reporting is deferred to dpif_flow_dump_destroy(). */
struct dpif_flow_dump *
-dpif_flow_dump_create(const struct dpif *dpif)
+dpif_flow_dump_create(const struct dpif *dpif, bool terse, char *type)
{
- return dpif->dpif_class->flow_dump_create(dpif);
+ return dpif->dpif_class->flow_dump_create(dpif, terse, type);
}
/* Destroys 'dump', which must have been created with dpif_flow_dump_create().
*
* All of the data stored into 'flows' is owned by the datapath, not by the
* caller, and the caller must not modify or free it. The datapath guarantees
- * that it remains accessible and unchanged until at least the next call to
- * dpif_flow_dump_next() for 'thread'. */
+ * that it remains accessible and unchanged until the first of:
+ * - The next call to dpif_flow_dump_next() for 'thread', or
+ * - The next rcu quiescent period. */
int
dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
struct dpif_flow *flows, int max_flows)
if (n > 0) {
struct dpif_flow *f;
- for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
- log_flow_message(dpif, 0, "flow_dump",
+ for (f = flows; f < &flows[n]
+ && should_log_flow_message(&this_module, 0); f++) {
+ log_flow_message(dpif, 0, &this_module, "flow_dump",
f->key, f->key_len, f->mask, f->mask_len,
- &f->stats, f->actions, f->actions_len);
+ &f->ufid, &f->stats, f->actions, f->actions_len);
}
} else {
VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
struct dpif_execute_helper_aux {
struct dpif *dpif;
+ const struct flow *flow;
int error;
+ const struct nlattr *meter_action; /* Non-NULL, if have a meter action. */
};
/* This is called for actions that need the context of the datapath to be
* meaningful. */
static void
-dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
- struct pkt_metadata *md,
- const struct nlattr *action, bool may_steal OVS_UNUSED)
+dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
+ const struct nlattr *action, bool should_steal)
{
struct dpif_execute_helper_aux *aux = aux_;
int type = nl_attr_type(action);
- struct ofpbuf * packet = &packets[0]->ofpbuf;
+ struct dp_packet *packet = packets_->packets[0];
- ovs_assert(cnt == 1);
+ ovs_assert(packets_->count == 1);
switch ((enum ovs_action_attr)type) {
+ case OVS_ACTION_ATTR_METER:
+ /* Maintain a pointer to the first meter action seen. */
+ if (!aux->meter_action) {
+ aux->meter_action = action;
+ }
+ break;
+
+ case OVS_ACTION_ATTR_CT:
case OVS_ACTION_ATTR_OUTPUT:
+ case OVS_ACTION_ATTR_TUNNEL_PUSH:
+ case OVS_ACTION_ATTR_TUNNEL_POP:
case OVS_ACTION_ATTR_USERSPACE:
case OVS_ACTION_ATTR_RECIRC: {
struct dpif_execute execute;
struct ofpbuf execute_actions;
uint64_t stub[256 / 8];
+ struct pkt_metadata *md = &packet->md;
+
+ if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
+ ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
+
+ if (aux->meter_action) {
+ const struct nlattr *a = aux->meter_action;
+
+ /* XXX: This code collects meter actions since the last action
+ * execution via the datapath to be executed right before the
+ * current action that needs to be executed by the datapath.
+ * This is only an approximation, but better than nothing.
+ * Fundamentally, we should have a mechanism by which the
+ * datapath could return the result of the meter action so that
+ * we could execute them at the right order. */
+ do {
+ ofpbuf_put(&execute_actions, a, NLA_ALIGN(a->nla_len));
+ /* Find next meter action before 'action', if any. */
+ do {
+ a = nl_attr_next(a);
+ } while (a != action &&
+ nl_attr_type(a) != OVS_ACTION_ATTR_METER);
+ } while (a != action);
+ }
- if (md->tunnel.ip_dst) {
/* The Linux kernel datapath throws away the tunnel information
* that we supply as metadata. We have to use a "set" action to
* supply it. */
- ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
- odp_put_tunnel_action(&md->tunnel, &execute_actions);
+ if (md->tunnel.ip_dst) {
+ odp_put_tunnel_action(&md->tunnel, &execute_actions, NULL);
+ }
ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
- execute.actions = ofpbuf_data(&execute_actions);
- execute.actions_len = ofpbuf_size(&execute_actions);
+ execute.actions = execute_actions.data;
+ execute.actions_len = execute_actions.size;
} else {
execute.actions = action;
execute.actions_len = NLA_ALIGN(action->nla_len);
}
+ struct dp_packet *clone = NULL;
+ uint32_t cutlen = dp_packet_get_cutlen(packet);
+ if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
+ || type == OVS_ACTION_ATTR_TUNNEL_PUSH
+ || type == OVS_ACTION_ATTR_TUNNEL_POP
+ || type == OVS_ACTION_ATTR_USERSPACE)) {
+ dp_packet_reset_cutlen(packet);
+ if (!should_steal) {
+ packet = clone = dp_packet_clone(packet);
+ }
+ dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
+ }
+
execute.packet = packet;
- execute.md = *md;
+ execute.flow = aux->flow;
execute.needs_help = false;
- aux->error = aux->dpif->dpif_class->execute(aux->dpif, &execute);
+ execute.probe = false;
+ execute.mtu = 0;
+ aux->error = dpif_execute(aux->dpif, &execute);
+ log_execute_message(aux->dpif, &this_module, &execute,
+ true, aux->error);
- if (md->tunnel.ip_dst) {
+ dp_packet_delete(clone);
+
+ if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
ofpbuf_uninit(&execute_actions);
+
+ /* Do not re-use the same meters for later output actions. */
+ aux->meter_action = NULL;
}
break;
}
case OVS_ACTION_ATTR_PUSH_MPLS:
case OVS_ACTION_ATTR_POP_MPLS:
case OVS_ACTION_ATTR_SET:
+ case OVS_ACTION_ATTR_SET_MASKED:
case OVS_ACTION_ATTR_SAMPLE:
+ case OVS_ACTION_ATTR_TRUNC:
+ case OVS_ACTION_ATTR_PUSH_ETH:
+ case OVS_ACTION_ATTR_POP_ETH:
+ case OVS_ACTION_ATTR_CLONE:
+ case OVS_ACTION_ATTR_PUSH_NSH:
+ case OVS_ACTION_ATTR_POP_NSH:
+ case OVS_ACTION_ATTR_CT_CLEAR:
case OVS_ACTION_ATTR_UNSPEC:
case __OVS_ACTION_ATTR_MAX:
OVS_NOT_REACHED();
}
+ dp_packet_delete_batch(packets_, should_steal);
}
/* Executes 'execute' by performing most of the actions in userspace and
static int
dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
{
- struct dpif_execute_helper_aux aux = {dpif, 0};
- struct dpif_packet packet, *pp;
+ struct dpif_execute_helper_aux aux = {dpif, execute->flow, 0, NULL};
+ struct dp_packet_batch pb;
COVERAGE_INC(dpif_execute_with_help);
- packet.ofpbuf = *execute->packet;
- pp = &packet;
-
- odp_execute_actions(&aux, &pp, 1, false, &execute->md, execute->actions,
+ dp_packet_batch_init_packet(&pb, execute->packet);
+ odp_execute_actions(&aux, &pb, false, execute->actions,
execute->actions_len, dpif_execute_helper_cb);
-
- /* Even though may_steal is set to false, some actions could modify or
- * reallocate the ofpbuf memory. We need to pass those changes to the
- * caller */
- *execute->packet = packet.ofpbuf;
-
return aux.error;
}
-/* Causes 'dpif' to perform the 'execute->actions_len' bytes of actions in
- * 'execute->actions' on the Ethernet frame in 'execute->packet' and on packet
- * metadata in 'execute->md'. The implementation is allowed to modify both the
- * '*execute->packet' and 'execute->md'.
- *
- * Some dpif providers do not implement every action. The Linux kernel
- * datapath, in particular, does not implement ARP field modification. If
- * 'needs_help' is true, the dpif layer executes in userspace all of the
- * actions that it can, and for OVS_ACTION_ATTR_OUTPUT and
- * OVS_ACTION_ATTR_USERSPACE actions it passes the packet through to the dpif
- * implementation.
- *
- * This works even if 'execute->actions_len' is too long for a Netlink
- * attribute.
- *
- * Returns 0 if successful, otherwise a positive errno value. */
+/* Returns true if the datapath needs help executing 'execute'. */
+static bool
+dpif_execute_needs_help(const struct dpif_execute *execute)
+{
+ return execute->needs_help || nl_attr_oversized(execute->actions_len);
+}
+
+/* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
int
dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
{
- int error;
+ if (execute->actions_len) {
+ struct dpif_op *opp;
+ struct dpif_op op;
- COVERAGE_INC(dpif_execute);
- if (execute->actions_len > 0) {
- error = (execute->needs_help || nl_attr_oversized(execute->actions_len)
- ? dpif_execute_with_help(dpif, execute)
- : dpif->dpif_class->execute(dpif, execute));
- } else {
- error = 0;
- }
+ op.type = DPIF_OP_EXECUTE;
+ op.execute = *execute;
- log_execute_message(dpif, execute, error);
+ opp = &op;
+ dpif_operate(dpif, &opp, 1);
- return error;
+ return op.error;
+ } else {
+ return 0;
+ }
}
/* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
- * which they are specified, placing each operation's results in the "output"
- * members documented in comments.
- *
- * This function exists because some datapaths can perform batched operations
- * faster than individual operations. */
+ * which they are specified. Places each operation's results in the "output"
+ * members documented in comments, and 0 in the 'error' member on success or a
+ * positive errno on failure. */
void
dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
{
- if (dpif->dpif_class->operate) {
- while (n_ops > 0) {
- size_t chunk;
+ while (n_ops > 0) {
+ size_t chunk;
- /* Count 'chunk', the number of ops that can be executed without
- * needing any help. Ops that need help should be rare, so we
- * expect this to ordinarily be 'n_ops', that is, all the ops. */
- for (chunk = 0; chunk < n_ops; chunk++) {
- struct dpif_op *op = ops[chunk];
+ /* Count 'chunk', the number of ops that can be executed without
+ * needing any help. Ops that need help should be rare, so we
+ * expect this to ordinarily be 'n_ops', that is, all the ops. */
+ for (chunk = 0; chunk < n_ops; chunk++) {
+ struct dpif_op *op = ops[chunk];
- if (op->type == DPIF_OP_EXECUTE && op->u.execute.needs_help) {
- break;
- }
+ if (op->type == DPIF_OP_EXECUTE
+ && dpif_execute_needs_help(&op->execute)) {
+ break;
}
+ }
- if (chunk) {
- /* Execute a chunk full of ops that the dpif provider can
- * handle itself, without help. */
- size_t i;
-
- dpif->dpif_class->operate(dpif, ops, chunk);
+ if (chunk) {
+ /* Execute a chunk full of ops that the dpif provider can
+ * handle itself, without help. */
+ size_t i;
- for (i = 0; i < chunk; i++) {
- struct dpif_op *op = ops[i];
+ dpif->dpif_class->operate(dpif, ops, chunk);
- switch (op->type) {
- case DPIF_OP_FLOW_PUT:
- log_flow_put_message(dpif, &op->u.flow_put, op->error);
- break;
+ for (i = 0; i < chunk; i++) {
+ struct dpif_op *op = ops[i];
+ int error = op->error;
- case DPIF_OP_FLOW_DEL:
- log_flow_del_message(dpif, &op->u.flow_del, op->error);
- break;
+ switch (op->type) {
+ case DPIF_OP_FLOW_PUT: {
+ struct dpif_flow_put *put = &op->flow_put;
- case DPIF_OP_EXECUTE:
- log_execute_message(dpif, &op->u.execute, op->error);
- break;
+ COVERAGE_INC(dpif_flow_put);
+ log_flow_put_message(dpif, &this_module, put, error);
+ if (error && put->stats) {
+ memset(put->stats, 0, sizeof *put->stats);
}
+ break;
}
- ops += chunk;
- n_ops -= chunk;
- } else {
- /* Help the dpif provider to execute one op. */
- struct dpif_op *op = ops[0];
+ case DPIF_OP_FLOW_GET: {
+ struct dpif_flow_get *get = &op->flow_get;
- op->error = dpif_execute(dpif, &op->u.execute);
- ops++;
- n_ops--;
- }
- }
- } else {
- size_t i;
-
- for (i = 0; i < n_ops; i++) {
- struct dpif_op *op = ops[i];
+ COVERAGE_INC(dpif_flow_get);
+ if (error) {
+ memset(get->flow, 0, sizeof *get->flow);
+ }
+ log_flow_get_message(dpif, &this_module, get, error);
- switch (op->type) {
- case DPIF_OP_FLOW_PUT:
- op->error = dpif_flow_put__(dpif, &op->u.flow_put);
- break;
+ break;
+ }
- case DPIF_OP_FLOW_DEL:
- op->error = dpif_flow_del__(dpif, &op->u.flow_del);
- break;
+ case DPIF_OP_FLOW_DEL: {
+ struct dpif_flow_del *del = &op->flow_del;
- case DPIF_OP_EXECUTE:
- op->error = dpif_execute(dpif, &op->u.execute);
- break;
+ COVERAGE_INC(dpif_flow_del);
+ log_flow_del_message(dpif, &this_module, del, error);
+ if (error && del->stats) {
+ memset(del->stats, 0, sizeof *del->stats);
+ }
+ break;
+ }
- default:
- OVS_NOT_REACHED();
+ case DPIF_OP_EXECUTE:
+ COVERAGE_INC(dpif_execute);
+ log_execute_message(dpif, &this_module, &op->execute,
+ false, error);
+ break;
+ }
}
+
+ ops += chunk;
+ n_ops -= chunk;
+ } else {
+ /* Help the dpif provider to execute one op. */
+ struct dpif_op *op = ops[0];
+
+ COVERAGE_INC(dpif_execute);
+ op->error = dpif_execute_with_help(dpif, &op->execute);
+ ops++;
+ n_ops--;
}
}
}
int
dpif_recv_set(struct dpif *dpif, bool enable)
{
- int error = dpif->dpif_class->recv_set(dpif, enable);
- log_operation(dpif, "recv_set", error);
+ int error = 0;
+
+ if (dpif->dpif_class->recv_set) {
+ error = dpif->dpif_class->recv_set(dpif, enable);
+ log_operation(dpif, "recv_set", error);
+ }
return error;
}
int
dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
{
- int error = dpif->dpif_class->handlers_set(dpif, n_handlers);
- log_operation(dpif, "handlers_set", error);
+ int error = 0;
+
+ if (dpif->dpif_class->handlers_set) {
+ error = dpif->dpif_class->handlers_set(dpif, n_handlers);
+ log_operation(dpif, "handlers_set", error);
+ }
+ return error;
+}
+
+void
+dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
+{
+ if (dpif->dpif_class->register_dp_purge_cb) {
+ dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
+ }
+}
+
+void
+dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
+{
+ if (dpif->dpif_class->register_upcall_cb) {
+ dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
+ }
+}
+
+void
+dpif_enable_upcall(struct dpif *dpif)
+{
+ if (dpif->dpif_class->enable_upcall) {
+ dpif->dpif_class->enable_upcall(dpif);
+ }
+}
+
+void
+dpif_disable_upcall(struct dpif *dpif)
+{
+ if (dpif->dpif_class->disable_upcall) {
+ dpif->dpif_class->disable_upcall(dpif);
+ }
+}
+
+void
+dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
+{
+ if (!VLOG_DROP_DBG(&dpmsg_rl)) {
+ struct ds flow;
+ char *packet;
+
+ packet = ofp_dp_packet_to_string(&upcall->packet);
+
+ ds_init(&flow);
+ odp_flow_key_format(upcall->key, upcall->key_len, &flow);
+
+ VLOG_DBG("%s: %s upcall:\n%s\n%s",
+ dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
+ ds_cstr(&flow), packet);
+
+ ds_destroy(&flow);
+ free(packet);
+ }
+}
+
+/* Pass custom configuration to the datapath implementation. Some of the
+ * changes can be postponed until dpif_run() is called. */
+int
+dpif_set_config(struct dpif *dpif, const struct smap *cfg)
+{
+ int error = 0;
+
+ if (dpif->dpif_class->set_config) {
+ error = dpif->dpif_class->set_config(dpif, cfg);
+ if (error) {
+ log_operation(dpif, "set_config", error);
+ }
+ }
+
return error;
}
-/* Polls for an upcall from 'dpif' for an upcall handler. Since there
- * there can be multiple poll loops, 'handler_id' is needed as index to
- * identify the corresponding poll loop. If successful, stores the upcall
- * into '*upcall', using 'buf' for storage. Should only be called if
- * 'recv_set' has been used to enable receiving packets from 'dpif'.
+/* Polls for an upcall from 'dpif' for an upcall handler. Since there can
+ * be multiple poll loops, 'handler_id' is needed as index to identify the
+ * corresponding poll loop. If successful, stores the upcall into '*upcall',
+ * using 'buf' for storage. Should only be called if 'recv_set' has been used
+ * to enable receiving packets from 'dpif'.
*
* 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
* 'buf', so their memory cannot be freed separately from 'buf'.
dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
struct ofpbuf *buf)
{
- int error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
- if (!error && !VLOG_DROP_DBG(&dpmsg_rl)) {
- struct ds flow;
- char *packet;
+ int error = EAGAIN;
- packet = ofp_packet_to_string(ofpbuf_data(&upcall->packet),
- ofpbuf_size(&upcall->packet));
-
- ds_init(&flow);
- odp_flow_key_format(upcall->key, upcall->key_len, &flow);
-
- VLOG_DBG("%s: %s upcall:\n%s\n%s",
- dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
- ds_cstr(&flow), packet);
-
- ds_destroy(&flow);
- free(packet);
- } else if (error && error != EAGAIN) {
- log_operation(dpif, "recv", error);
+ if (dpif->dpif_class->recv) {
+ error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
+ if (!error) {
+ dpif_print_packet(dpif, upcall);
+ } else if (error != EAGAIN) {
+ log_operation(dpif, "recv", error);
+ }
}
return error;
}
void
dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
{
- dpif->dpif_class->recv_wait(dpif, handler_id);
+ if (dpif->dpif_class->recv_wait) {
+ dpif->dpif_class->recv_wait(dpif, handler_id);
+ }
+}
+
+/*
+ * Return the datapath version. Caller is responsible for freeing
+ * the string.
+ */
+char *
+dpif_get_dp_version(const struct dpif *dpif)
+{
+ char *version = NULL;
+
+ if (dpif->dpif_class->get_datapath_version) {
+ version = dpif->dpif_class->get_datapath_version();
+ }
+
+ return version;
}
/* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
}
static bool
-should_log_flow_message(int error)
+should_log_flow_message(const struct vlog_module *module, int error)
{
- return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
+ return !vlog_should_drop(module, flow_message_log_level(error),
error ? &error_rl : &dpmsg_rl);
}
-static void
-log_flow_message(const struct dpif *dpif, int error, const char *operation,
+void
+log_flow_message(const struct dpif *dpif, int error,
+ const struct vlog_module *module,
+ const char *operation,
const struct nlattr *key, size_t key_len,
const struct nlattr *mask, size_t mask_len,
- const struct dpif_flow_stats *stats,
+ const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
const struct nlattr *actions, size_t actions_len)
{
struct ds ds = DS_EMPTY_INITIALIZER;
if (error) {
ds_put_format(&ds, "(%s) ", ovs_strerror(error));
}
+ if (ufid) {
+ odp_format_ufid(ufid, &ds);
+ ds_put_cstr(&ds, " ");
+ }
odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
if (stats) {
ds_put_cstr(&ds, ", ");
}
if (actions || actions_len) {
ds_put_cstr(&ds, ", actions:");
- format_odp_actions(&ds, actions, actions_len);
+ format_odp_actions(&ds, actions, actions_len, NULL);
}
- vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
+ vlog(module, flow_message_log_level(error), "%s", ds_cstr(&ds));
ds_destroy(&ds);
}
-static void
-log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
+void
+log_flow_put_message(const struct dpif *dpif,
+ const struct vlog_module *module,
+ const struct dpif_flow_put *put,
int error)
{
- if (should_log_flow_message(error)) {
+ if (should_log_flow_message(module, error)
+ && !(put->flags & DPIF_FP_PROBE)) {
struct ds s;
ds_init(&s);
if (put->flags & DPIF_FP_ZERO_STATS) {
ds_put_cstr(&s, "[zero]");
}
- log_flow_message(dpif, error, ds_cstr(&s),
+ log_flow_message(dpif, error, module, ds_cstr(&s),
put->key, put->key_len, put->mask, put->mask_len,
- put->stats, put->actions, put->actions_len);
+ put->ufid, put->stats, put->actions,
+ put->actions_len);
ds_destroy(&s);
}
}
-static void
-log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
+void
+log_flow_del_message(const struct dpif *dpif,
+ const struct vlog_module *module,
+ const struct dpif_flow_del *del,
int error)
{
- if (should_log_flow_message(error)) {
- log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
- NULL, 0, !error ? del->stats : NULL, NULL, 0);
+ if (should_log_flow_message(module, error)) {
+ log_flow_message(dpif, error, module, "flow_del",
+ del->key, del->key_len,
+ NULL, 0, del->ufid, !error ? del->stats : NULL,
+ NULL, 0);
}
}
-static void
-log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
- int error)
+/* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
+ * (0 for success). 'subexecute' should be true if the execution is a result
+ * of breaking down a larger execution that needed help, false otherwise.
+ *
+ *
+ * XXX In theory, the log message could be deceptive because this function is
+ * called after the dpif_provider's '->execute' function, which is allowed to
+ * modify execute->packet and execute->md. In practice, though:
+ *
+ * - dpif-netlink doesn't modify execute->packet or execute->md.
+ *
+ * - dpif-netdev does modify them but it is less likely to have problems
+ * because it is built into ovs-vswitchd and cannot have version skew,
+ * etc.
+ *
+ * It would still be better to avoid the potential problem. I don't know of a
+ * good way to do that, though, that isn't expensive. */
+void
+log_execute_message(const struct dpif *dpif,
+ const struct vlog_module *module,
+ const struct dpif_execute *execute,
+ bool subexecute, int error)
{
- if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))) {
+ if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
+ && !execute->probe) {
struct ds ds = DS_EMPTY_INITIALIZER;
char *packet;
-
- packet = ofp_packet_to_string(ofpbuf_data(execute->packet),
- ofpbuf_size(execute->packet));
- ds_put_format(&ds, "%s: execute ", dpif_name(dpif));
- format_odp_actions(&ds, execute->actions, execute->actions_len);
+ uint64_t stub[1024 / 8];
+ struct ofpbuf md = OFPBUF_STUB_INITIALIZER(stub);
+
+ packet = ofp_packet_to_string(dp_packet_data(execute->packet),
+ dp_packet_size(execute->packet),
+ execute->packet->packet_type);
+ odp_key_from_dp_packet(&md, execute->packet);
+ ds_put_format(&ds, "%s: %sexecute ",
+ dpif_name(dpif),
+ (subexecute ? "sub-"
+ : dpif_execute_needs_help(execute) ? "super-"
+ : ""));
+ format_odp_actions(&ds, execute->actions, execute->actions_len, NULL);
if (error) {
ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
}
ds_put_format(&ds, " on packet %s", packet);
- vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
+ ds_put_format(&ds, " with metadata ");
+ odp_flow_format(md.data, md.size, NULL, 0, NULL, &ds, true);
+ ds_put_format(&ds, " mtu %d", execute->mtu);
+ vlog(module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
ds_destroy(&ds);
free(packet);
+ ofpbuf_uninit(&md);
}
}
+
+void
+log_flow_get_message(const struct dpif *dpif,
+ const struct vlog_module *module,
+ const struct dpif_flow_get *get,
+ int error)
+{
+ if (should_log_flow_message(module, error)) {
+ log_flow_message(dpif, error, module, "flow_get",
+ get->key, get->key_len,
+ get->flow->mask, get->flow->mask_len,
+ get->ufid, &get->flow->stats,
+ get->flow->actions, get->flow->actions_len);
+ }
+}
+
+bool
+dpif_supports_tnl_push_pop(const struct dpif *dpif)
+{
+ return dpif_is_netdev(dpif);
+}
+
+/* Meters */
+void
+dpif_meter_get_features(const struct dpif *dpif,
+ struct ofputil_meter_features *features)
+{
+ memset(features, 0, sizeof *features);
+ if (dpif->dpif_class->meter_get_features) {
+ dpif->dpif_class->meter_get_features(dpif, features);
+ }
+}
+
+/* Adds or modifies meter identified by 'meter_id' in 'dpif'. If '*meter_id'
+ * is UINT32_MAX, adds a new meter, otherwise modifies an existing meter.
+ *
+ * If meter is successfully added, sets '*meter_id' to the new meter's
+ * meter number. */
+int
+dpif_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
+ struct ofputil_meter_config *config)
+{
+ int error;
+
+ COVERAGE_INC(dpif_meter_set);
+
+ error = dpif->dpif_class->meter_set(dpif, meter_id, config);
+ if (!error) {
+ VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" set",
+ dpif_name(dpif), meter_id->uint32);
+ } else {
+ VLOG_WARN_RL(&error_rl, "%s: failed to set DPIF meter %"PRIu32": %s",
+ dpif_name(dpif), meter_id->uint32, ovs_strerror(error));
+ meter_id->uint32 = UINT32_MAX;
+ }
+ return error;
+}
+
+int
+dpif_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
+ struct ofputil_meter_stats *stats, uint16_t n_bands)
+{
+ int error;
+
+ COVERAGE_INC(dpif_meter_get);
+
+ error = dpif->dpif_class->meter_get(dpif, meter_id, stats, n_bands);
+ if (!error) {
+ VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" get stats",
+ dpif_name(dpif), meter_id.uint32);
+ } else {
+ VLOG_WARN_RL(&error_rl,
+ "%s: failed to get DPIF meter %"PRIu32" stats: %s",
+ dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
+ stats->packet_in_count = ~0;
+ stats->byte_in_count = ~0;
+ stats->n_bands = 0;
+ }
+ return error;
+}
+
+int
+dpif_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
+ struct ofputil_meter_stats *stats, uint16_t n_bands)
+{
+ int error;
+
+ COVERAGE_INC(dpif_meter_del);
+
+ error = dpif->dpif_class->meter_del(dpif, meter_id, stats, n_bands);
+ if (!error) {
+ VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" deleted",
+ dpif_name(dpif), meter_id.uint32);
+ } else {
+ VLOG_WARN_RL(&error_rl,
+ "%s: failed to delete DPIF meter %"PRIu32": %s",
+ dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
+ if (stats) {
+ stats->packet_in_count = ~0;
+ stats->byte_in_count = ~0;
+ stats->n_bands = 0;
+ }
+ }
+ return error;
+}