/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "lacp.h"
#include "learn.h"
#include "mac-learning.h"
+#include "math.h"
#include "mcast-snooping.h"
#include "multipath.h"
#include "netdev-vport.h"
#include "openvswitch/ofp-actions.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/meta-flow.h"
-#include "openvswitch/ofp-parse.h"
#include "openvswitch/ofp-print.h"
-#include "openvswitch/ofp-util.h"
#include "openvswitch/ofpbuf.h"
+#include "openvswitch/uuid.h"
#include "openvswitch/vlog.h"
#include "ovs-lldp.h"
#include "ovs-rcu.h"
#include "ovs-router.h"
-#include "poll-loop.h"
+#include "openvswitch/poll-loop.h"
#include "seq.h"
#include "simap.h"
#include "smap.h"
#include "unaligned.h"
#include "unixctl.h"
#include "util.h"
+#include "uuid.h"
#include "vlan-bitmap.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
char *name; /* Identifier for log messages. */
/* Configuration. */
- struct ovs_list ports; /* Contains "struct ofport"s. */
+ struct ovs_list ports; /* Contains "struct ofport_dpif"s. */
enum port_vlan_mode vlan_mode; /* VLAN mode */
uint16_t qinq_ethtype;
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
struct bfd *bfd; /* BFD, if any. */
struct lldp *lldp; /* lldp, if any. */
- bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* This port is a tunnel. */
- bool is_layer3; /* This is a layer 3 port. */
long long int carrier_seq; /* Carrier status changes. */
struct ofport_dpif *peer; /* Peer if patch port. */
struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
/* All existing ofproto_dpif instances, indexed by ->up.name. */
-struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
+static struct hmap all_ofproto_dpifs_by_name =
+ HMAP_INITIALIZER(&all_ofproto_dpifs_by_name);
+
+/* All existing ofproto_dpif instances, indexed by ->uuid. */
+static struct hmap all_ofproto_dpifs_by_uuid =
+ HMAP_INITIALIZER(&all_ofproto_dpifs_by_uuid);
static bool ofproto_use_tnl_push_pop = true;
static void ofproto_unixctl_init(void);
struct ofproto_dpif *ofproto;
sset_clear(names);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (strcmp(type, ofproto->up.type)) {
continue;
}
{
struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (sset_contains(&ofproto->ports, name)) {
return ofproto;
}
simap_init(&tmp_backers);
simap_swap(&backer->tnl_backers, &tmp_backers);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
struct ofport_dpif *iter;
if (backer != ofproto->backer) {
HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
const char *dp_port;
+ odp_port_t old_odp_port;
if (!iter->is_tunnel) {
continue;
dp_port = netdev_vport_get_dpif_port(iter->up.netdev,
namebuf, sizeof namebuf);
+ old_odp_port = iter->odp_port;
node = simap_find(&tmp_backers, dp_port);
if (node) {
simap_put(&backer->tnl_backers, dp_port, node->data);
iter->odp_port = node ? u32_to_odp(node->data) : ODPP_NONE;
if (tnl_port_reconfigure(iter, iter->up.netdev,
- iter->odp_port,
+ iter->odp_port, old_odp_port,
ovs_native_tunneling_is_on(ofproto), dp_port)) {
backer->need_revalidate = REV_RECONFIGURE;
}
}
SIMAP_FOR_EACH (node, &tmp_backers) {
- dpif_port_del(backer->dpif, u32_to_odp(node->data));
+ dpif_port_del(backer->dpif, u32_to_odp(node->data), false);
}
simap_destroy(&tmp_backers);
backer->need_revalidate = 0;
xlate_txn_start();
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
struct ofport_dpif *ofport;
struct ofbundle *bundle;
ofproto->netflow,
ofproto->up.forward_bpdu,
connmgr_has_in_band(ofproto->up.connmgr),
- &ofproto->backer->support);
+ &ofproto->backer->rt_support);
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
xlate_bundle_set(ofproto, bundle, bundle->name,
ofport->rstp_port, ofport->qdscp,
ofport->n_qdscp, ofport->up.pp.config,
ofport->up.pp.state, ofport->is_tunnel,
- ofport->may_enable);
+ ofport->up.may_enable);
}
}
xlate_txn_commit();
const char *devname;
sset_init(&devnames);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (ofproto->backer == backer) {
struct ofport *ofport;
return;
}
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
- &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
return;
}
} else if (!ofproto) {
/* The port was added, but we don't know with which
* ofproto we should associate it. Delete it. */
- dpif_port_del(backer->dpif, port.port_no);
+ dpif_port_del(backer->dpif, port.port_no, false);
} else {
struct ofport_dpif *ofport;
{
struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (ofproto->backer == backer) {
sset_clear(&ofproto->port_poll_set);
ofproto->port_poll_errno = error;
static void
close_dpif_backer(struct dpif_backer *backer, bool del)
{
+ struct simap_node *node;
+
ovs_assert(backer->refcount > 0);
if (--backer->refcount) {
udpif_destroy(backer->udpif);
+ SIMAP_FOR_EACH (node, &backer->tnl_backers) {
+ dpif_port_del(backer->dpif, u32_to_odp(node->data), false);
+ }
simap_destroy(&backer->tnl_backers);
ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
hmap_destroy(&backer->odp_to_ofport_map);
odp_port_t odp_port;
};
-static bool check_variable_length_userdata(struct dpif_backer *backer);
static void check_support(struct dpif_backer *backer);
static int
dpif_port_dump_done(&port_dump);
LIST_FOR_EACH_POP (garbage, list_node, &garbage_list) {
- dpif_port_del(backer->dpif, garbage->odp_port);
+ dpif_port_del(backer->dpif, garbage->odp_port, false);
free(garbage);
}
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
}
- /* This check fails if performed before udpif threads have been set,
- * as the kernel module checks that the 'pid' in userspace action
- * is non-zero. */
- backer->support.variable_length_userdata
- = check_variable_length_userdata(backer);
backer->dp_version_string = dpif_get_dp_version(backer->dpif);
/* Manage Datapath meter IDs if supported. */
backer->meter_ids = NULL;
}
+ /* Make a pristine snapshot of 'support' into 'boottime_support'.
+ * 'boottime_support' can be checked to prevent 'support' to be changed
+ * beyond the datapath capabilities. In case 'support' is changed by
+ * the user, 'boottime_support' can be used to restore it. */
+ backer->bt_support = backer->rt_support;
+
return error;
}
bool
ovs_native_tunneling_is_on(struct ofproto_dpif *ofproto)
{
- return ofproto_use_tnl_push_pop && ofproto->backer->support.tnl_push_pop &&
- atomic_count_get(&ofproto->backer->tnl_count);
+ return ofproto_use_tnl_push_pop
+ && ofproto->backer->rt_support.tnl_push_pop
+ && atomic_count_get(&ofproto->backer->tnl_count);
}
/* Tests whether 'backer''s datapath supports recirculation. Only newer
return enable_ufid;
}
-/* Tests whether 'backer''s datapath supports variable-length
- * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. We need
- * to disable some features on older datapaths that don't support this
- * feature.
- *
- * Returns false if 'backer' definitely does not support variable-length
- * userdata, true if it seems to support them or if at least the error we get
- * is ambiguous. */
-static bool
-check_variable_length_userdata(struct dpif_backer *backer)
-{
- struct eth_header *eth;
- struct ofpbuf actions;
- struct dpif_execute execute;
- struct dp_packet packet;
- struct flow flow;
- size_t start;
- int error;
-
- /* Compose a userspace action that will cause an ERANGE error on older
- * datapaths that don't support variable-length userdata.
- *
- * We really test for using userdata longer than 8 bytes, but older
- * datapaths accepted these, silently truncating the userdata to 8 bytes.
- * The same older datapaths rejected userdata shorter than 8 bytes, so we
- * test for that instead as a proxy for longer userdata support. */
- ofpbuf_init(&actions, 64);
- start = nl_msg_start_nested(&actions, OVS_ACTION_ATTR_USERSPACE);
- nl_msg_put_u32(&actions, OVS_USERSPACE_ATTR_PID,
- dpif_port_get_pid(backer->dpif, ODPP_NONE, 0));
- nl_msg_put_unspec_zero(&actions, OVS_USERSPACE_ATTR_USERDATA, 4);
- nl_msg_end_nested(&actions, start);
-
- /* Compose a dummy ethernet packet. */
- dp_packet_init(&packet, ETH_HEADER_LEN);
- eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
- eth->eth_type = htons(0x1234);
-
- flow_extract(&packet, &flow);
-
- /* Execute the actions. On older datapaths this fails with ERANGE, on
- * newer datapaths it succeeds. */
- execute.actions = actions.data;
- execute.actions_len = actions.size;
- execute.packet = &packet;
- execute.flow = &flow;
- execute.needs_help = false;
- execute.probe = true;
- execute.mtu = 0;
-
- error = dpif_execute(backer->dpif, &execute);
-
- dp_packet_uninit(&packet);
- ofpbuf_uninit(&actions);
-
- switch (error) {
- case 0:
- return true;
-
- case ERANGE:
- /* Variable-length userdata is not supported. */
- VLOG_WARN("%s: datapath does not support variable-length userdata "
- "feature (needs Linux 3.10+ or kernel module from OVS "
- "1..11+). The NXAST_SAMPLE action will be ignored.",
- dpif_name(backer->dpif));
- return false;
-
- default:
- /* Something odd happened. We're not sure whether variable-length
- * userdata is supported. Default to "yes". */
- VLOG_WARN("%s: variable-length userdata feature probe failed (%s)",
- dpif_name(backer->dpif), ovs_strerror(error));
- return true;
- }
-}
-
/* Tests number of 802.1q VLAN headers supported by 'backer''s datapath.
*
* Returns the number of elements in a struct flow's vlan
/* Compose a dummy UDP packet. */
dp_packet_init(&packet, 0);
- flow_compose(&packet, &flow);
+ flow_compose(&packet, &flow, NULL, 64);
/* Execute the actions. On older datapaths this fails with EINVAL, on
* newer datapaths it succeeds. */
return !error;
}
+/* Tests whether 'backer''s datapath supports the OVS_ACTION_ATTR_CT_CLEAR
+ * action. */
+static bool
+check_ct_clear(struct dpif_backer *backer)
+{
+ struct odputil_keybuf keybuf;
+ uint8_t actbuf[NL_A_FLAG_SIZE];
+ struct ofpbuf actions;
+ struct ofpbuf key;
+ struct flow flow;
+ bool supported;
+
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &flow,
+ .probe = true,
+ };
+
+ memset(&flow, 0, sizeof flow);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&odp_parms, &key);
+
+ ofpbuf_use_stack(&actions, &actbuf, sizeof actbuf);
+ nl_msg_put_flag(&actions, OVS_ACTION_ATTR_CT_CLEAR);
+
+ supported = dpif_probe_feature(backer->dpif, "ct_clear", &key,
+ &actions, NULL);
+
+ VLOG_INFO("%s: Datapath %s ct_clear action",
+ dpif_name(backer->dpif), (supported) ? "supports"
+ : "does not support");
+ return supported;
+}
+
+/* Probe the highest dp_hash algorithm supported by the datapath. */
+static size_t
+check_max_dp_hash_alg(struct dpif_backer *backer)
+{
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ struct flow flow;
+ struct ovs_action_hash *hash;
+ int max_alg = 0;
+
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &flow,
+ .probe = true,
+ };
+
+ memset(&flow, 0, sizeof flow);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&odp_parms, &key);
+
+ /* All datapaths support algortithm 0 (OVS_HASH_ALG_L4). */
+ for (int alg = 1; alg < __OVS_HASH_MAX; alg++) {
+ struct ofpbuf actions;
+ bool ok;
+
+ ofpbuf_init(&actions, 300);
+ hash = nl_msg_put_unspec_uninit(&actions,
+ OVS_ACTION_ATTR_HASH, sizeof *hash);
+ hash->hash_basis = 0;
+ hash->hash_alg = alg;
+ ok = dpif_probe_feature(backer->dpif, "Max dp_hash algorithm", &key,
+ &actions, NULL);
+ ofpbuf_uninit(&actions);
+ if (ok) {
+ max_alg = alg;
+ } else {
+ break;
+ }
+ }
+
+ VLOG_INFO("%s: Max dp_hash algorithm probed to be %d",
+ dpif_name(backer->dpif), max_alg);
+ return max_alg;
+}
+
#define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE, ETHTYPE) \
static bool \
check_##NAME(struct dpif_backer *backer) \
CHECK_FEATURE__(ct_state_nat, ct_state, ct_state, \
CS_TRACKED|CS_SRC_NAT, ETH_TYPE_IP)
CHECK_FEATURE__(ct_orig_tuple, ct_orig_tuple, ct_nw_proto, 1, ETH_TYPE_IP)
+CHECK_FEATURE__(ct_orig_tuple6, ct_orig_tuple6, ct_nw_proto, 1, ETH_TYPE_IPV6)
#undef CHECK_FEATURE
#undef CHECK_FEATURE__
static void
check_support(struct dpif_backer *backer)
{
- /* This feature needs to be tested after udpif threads are set. */
- backer->support.variable_length_userdata = false;
-
/* Actions. */
- backer->support.odp.recirc = check_recirc(backer);
- backer->support.odp.max_vlan_headers = check_max_vlan_headers(backer);
- backer->support.odp.max_mpls_depth = check_max_mpls_depth(backer);
- backer->support.masked_set_action = check_masked_set_action(backer);
- backer->support.trunc = check_trunc_action(backer);
- backer->support.ufid = check_ufid(backer);
- backer->support.tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
- backer->support.clone = check_clone(backer);
- backer->support.sample_nesting = check_max_sample_nesting(backer);
- backer->support.ct_eventmask = check_ct_eventmask(backer);
+ backer->rt_support.odp.recirc = check_recirc(backer);
+ backer->rt_support.odp.max_vlan_headers = check_max_vlan_headers(backer);
+ backer->rt_support.odp.max_mpls_depth = check_max_mpls_depth(backer);
+ backer->rt_support.masked_set_action = check_masked_set_action(backer);
+ backer->rt_support.trunc = check_trunc_action(backer);
+ backer->rt_support.ufid = check_ufid(backer);
+ backer->rt_support.tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
+ backer->rt_support.clone = check_clone(backer);
+ backer->rt_support.sample_nesting = check_max_sample_nesting(backer);
+ backer->rt_support.ct_eventmask = check_ct_eventmask(backer);
+ backer->rt_support.ct_clear = check_ct_clear(backer);
+ backer->rt_support.max_hash_alg = check_max_dp_hash_alg(backer);
/* Flow fields. */
- backer->support.odp.ct_state = check_ct_state(backer);
- backer->support.odp.ct_zone = check_ct_zone(backer);
- backer->support.odp.ct_mark = check_ct_mark(backer);
- backer->support.odp.ct_label = check_ct_label(backer);
+ backer->rt_support.odp.ct_state = check_ct_state(backer);
+ backer->rt_support.odp.ct_zone = check_ct_zone(backer);
+ backer->rt_support.odp.ct_mark = check_ct_mark(backer);
+ backer->rt_support.odp.ct_label = check_ct_label(backer);
- backer->support.odp.ct_state_nat = check_ct_state_nat(backer);
- backer->support.odp.ct_orig_tuple = check_ct_orig_tuple(backer);
+ backer->rt_support.odp.ct_state_nat = check_ct_state_nat(backer);
+ backer->rt_support.odp.ct_orig_tuple = check_ct_orig_tuple(backer);
+ backer->rt_support.odp.ct_orig_tuple6 = check_ct_orig_tuple6(backer);
}
static int
}
}
- hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
+ hmap_insert(&all_ofproto_dpifs_by_name,
+ &ofproto->all_ofproto_dpifs_by_name_node,
hash_string(ofproto->up.name, 0));
+ hmap_insert(&all_ofproto_dpifs_by_uuid,
+ &ofproto->all_ofproto_dpifs_by_uuid_node,
+ uuid_hash(&ofproto->uuid));
memset(&ofproto->stats, 0, sizeof ofproto->stats);
ofproto_init_tables(ofproto_, N_TABLES);
controller->max_len = UINT16_MAX;
controller->controller_id = 0;
controller->reason = OFPR_IMPLICIT_MISS;
+ controller->meter_id = NX_CTLR_NO_METER;
ofpact_finish_CONTROLLER(&ofpacts, &controller);
error = add_internal_miss_flow(ofproto, id++, &ofpacts,
* to the ofproto or anything in it. */
udpif_synchronize(ofproto->backer->udpif);
- hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
+ hmap_remove(&all_ofproto_dpifs_by_name,
+ &ofproto->all_ofproto_dpifs_by_name_node);
+ hmap_remove(&all_ofproto_dpifs_by_uuid,
+ &ofproto->all_ofproto_dpifs_by_uuid_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
CLS_FOR_EACH (rule, up.cr, &table->cls) {
static void
query_tables(struct ofproto *ofproto,
- struct ofputil_table_features *features,
+ struct ofputil_table_features *features OVS_UNUSED,
struct ofputil_table_stats *stats)
{
- strcpy(features->name, "classifier");
-
if (stats) {
int i;
port->cfm = NULL;
port->bfd = NULL;
port->lldp = NULL;
- port->may_enable = false;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
port->rstp_port = NULL;
port->qdscp = NULL;
port->n_qdscp = 0;
port->carrier_seq = netdev_get_carrier_resets(netdev);
- port->is_layer3 = netdev_vport_is_layer3(netdev);
if (netdev_vport_is_patch(netdev)) {
/* By bailing out here, we don't submit the port to the sFlow module
}
port->is_tunnel = true;
- if (ofproto->ipfix) {
- dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
- }
} else {
/* Sanity-check that a mapping doesn't already exist. This
* shouldn't happen for non-tunnel ports. */
if (ofproto->sflow) {
dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
}
+ if (ofproto->ipfix) {
+ dpif_ipfix_add_port(ofproto->ipfix, port_, port->odp_port);
+ }
return 0;
}
* assumes that removal of attached ports will happen as part of
* destruction. */
if (!port->is_tunnel) {
- dpif_port_del(ofproto->backer->dpif, port->odp_port);
+ dpif_port_del(ofproto->backer->dpif, port->odp_port, false);
+ }
+ } else if (del) {
+ /* The underlying device is already deleted (e.g. tunctl -d).
+ * Calling dpif_port_remove to do local cleanup for the netdev */
+ if (!port->is_tunnel) {
+ dpif_port_del(ofproto->backer->dpif, port->odp_port, true);
}
}
atomic_count_dec(&ofproto->backer->tnl_count);
}
- if (port->is_tunnel && ofproto->ipfix) {
- dpif_ipfix_del_tunnel_port(ofproto->ipfix, port->odp_port);
- }
-
- tnl_port_del(port);
+ tnl_port_del(port, port->odp_port);
sset_find_and_delete(&ofproto->ports, devname);
sset_find_and_delete(&ofproto->ghost_ports, devname);
bundle_remove(port_);
if (ofproto->sflow) {
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
+ if (ofproto->ipfix) {
+ dpif_ipfix_del_port(ofproto->ipfix, port->odp_port);
+ }
free(port->qdscp);
}
bfd_set_netdev(port->bfd, netdev);
}
- /* Set liveness, unless the link is administratively or
- * operationally down or link monitoring false */
- if (!(port->up.pp.config & OFPUTIL_PC_PORT_DOWN) &&
- !(port->up.pp.state & OFPUTIL_PS_LINK_DOWN) &&
- port->may_enable) {
- port->up.pp.state |= OFPUTIL_PS_LIVE;
- } else {
- port->up.pp.state &= ~OFPUTIL_PS_LIVE;
- }
-
ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
port->lldp, &port->up.pp.hw_addr);
if (port->is_tunnel) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
- if (tnl_port_reconfigure(port, netdev, port->odp_port,
+ if (tnl_port_reconfigure(port, netdev, port->odp_port, port->odp_port,
ovs_native_tunneling_is_on(ofproto),
dp_port_name)) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle_update(port->bundle);
}
}
+ port_run(port);
}
static int
di, bridge_exporter_options, flow_exporters_options,
n_flow_exporters_options);
- /* Add tunnel ports only when a new ipfix created */
+ /* Add ports only when a new ipfix created */
if (new_di == true) {
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
- if (ofport->is_tunnel == true) {
- dpif_ipfix_add_tunnel_port(di, &ofport->up, ofport->odp_port);
- }
+ dpif_ipfix_add_port(di, &ofport->up, ofport->odp_port);
}
}
rstp_port_set(rp, s->port_num, s->priority, s->path_cost,
s->admin_edge_port, s->auto_edge,
s->admin_p2p_mac_state, s->admin_port_state, s->mcheck,
- ofport);
+ ofport, netdev_get_name(ofport->up.netdev));
update_rstp_port_state(ofport);
/* Synchronize operational status. */
- rstp_port_set_mac_operational(rp, ofport->may_enable);
+ rstp_port_set_mac_operational(rp, ofport->up.may_enable);
}
static void
if (all_ofprotos) {
struct ofproto_dpif *o;
- HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (o, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (o != ofproto) {
struct mac_entry *e;
bundle->floodable = true;
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
- || port->is_layer3
+ || netdev_get_pt_mode(port->up.netdev) == NETDEV_PT_LEGACY_L3
|| (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
|| (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
bundle->floodable = false;
port->bundle = bundle;
ovs_list_push_back(&bundle->ports, &port->bundle_node);
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
- || port->is_layer3
+ || netdev_get_pt_mode(port->up.netdev) == NETDEV_PT_LEGACY_L3
|| (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
|| (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
bundle->floodable = false;
size_t i;
bool ok;
+ bundle = bundle_lookup(ofproto, aux);
+
if (!s) {
- bundle_destroy(bundle_lookup(ofproto, aux));
+ bundle_destroy(bundle);
return 0;
}
ovs_assert(s->n_slaves == 1 || s->bond != NULL);
ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
- bundle = bundle_lookup(ofproto, aux);
if (!bundle) {
bundle = xmalloc(sizeof *bundle);
static void
send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
struct ofport_dpif *port = port_;
struct eth_addr ea;
int error;
ofproto_dpif_send_packet(port, false, &packet);
dp_packet_uninit(&packet);
} else {
- VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
+ static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 10);
+ VLOG_ERR_RL(&rll, "port %s: cannot obtain Ethernet address of iface "
"%s (%s)", port->bundle->name,
netdev_get_name(port->up.netdev), ovs_strerror(error));
}
}
if (n_errors) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
+ static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rll, "bond %s: %d errors sending %d gratuitous learning "
"packets, last error was: %s",
bundle->name, n_errors, n_packets, ovs_strerror(error));
} else {
struct ofport_dpif *port;
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
+ bond_slave_set_may_enable(bundle->bond, port, port->up.may_enable);
}
if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
return;
}
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
struct ofport *peer_ofport;
struct ofport_dpif *peer;
char *peer_peer;
free(peer_name);
}
-static void
-port_run(struct ofport_dpif *ofport)
+static bool
+may_enable_port(struct ofport_dpif *ofport)
{
- long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
- bool carrier_changed = carrier_seq != ofport->carrier_seq;
- bool enable = netdev_get_carrier(ofport->up.netdev);
- bool cfm_enable = false;
- bool bfd_enable = false;
-
- ofport->carrier_seq = carrier_seq;
-
- if (ofport->cfm) {
- int cfm_opup = cfm_get_opup(ofport->cfm);
-
- cfm_enable = !cfm_get_fault(ofport->cfm);
-
- if (cfm_opup >= 0) {
- cfm_enable = cfm_enable && cfm_opup;
- }
+ /* Carrier must be up. */
+ if (!netdev_get_carrier(ofport->up.netdev)) {
+ return false;
}
- if (ofport->bfd) {
- bfd_enable = bfd_forwarding(ofport->bfd);
+ /* If CFM or BFD is enabled, then at least one of them must report that the
+ * port is up. */
+ if ((ofport->bfd || ofport->cfm)
+ && !(ofport->cfm
+ && !cfm_get_fault(ofport->cfm)
+ && cfm_get_opup(ofport->cfm) != 0)
+ && !(ofport->bfd
+ && bfd_forwarding(ofport->bfd))) {
+ return false;
}
- if (ofport->bfd || ofport->cfm) {
- enable = enable && (cfm_enable || bfd_enable);
+ /* If LACP is enabled, it must report that the link is enabled. */
+ if (ofport->bundle
+ && !lacp_slave_may_enable(ofport->bundle->lacp, ofport)) {
+ return false;
}
- if (ofport->bundle) {
- enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
- if (carrier_changed) {
- lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
- }
+ return true;
+}
+
+static void
+port_run(struct ofport_dpif *ofport)
+{
+ long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
+ bool carrier_changed = carrier_seq != ofport->carrier_seq;
+ ofport->carrier_seq = carrier_seq;
+ if (carrier_changed && ofport->bundle) {
+ lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
}
- if (ofport->may_enable != enable) {
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ bool enable = may_enable_port(ofport);
+ if (ofport->up.may_enable != enable) {
+ ofproto_port_set_enable(&ofport->up, enable);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
if (ofport->rstp_port) {
rstp_port_set_mac_operational(ofport->rstp_port, enable);
}
-
- /* Propagate liveness, unless the link is administratively or
- * operationally down. */
- if (!(ofport->up.pp.config & OFPUTIL_PC_PORT_DOWN) &&
- !(ofport->up.pp.state & OFPUTIL_PS_LINK_DOWN)) {
- enum ofputil_port_state of_state = ofport->up.pp.state;
- if (enable) {
- of_state |= OFPUTIL_PS_LIVE;
- } else {
- of_state &= ~OFPUTIL_PS_LIVE;
- }
- ofproto_port_set_state(&ofport->up, of_state);
- }
}
-
- ofport->may_enable = enable;
}
static int
}
dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
- if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
- odp_port_t port_no = ODPP_NONE;
- int error;
- error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
+ odp_port_t port_no = ODPP_NONE;
+ int error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
+ if (error != EEXIST && error != EBUSY) {
if (error) {
return error;
}
netdev_get_name(ofport->up.netdev));
ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (!ofport->is_tunnel && !netdev_vport_is_patch(ofport->up.netdev)) {
- error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
+ error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port, false);
if (!error) {
/* The caller is going to close ofport->up.netdev. If this is a
* bonded port, then the bond is using that netdev, so remove it
ovs_u128 ct_label;
uint32_t ct_mark;
- support = &ofproto->backer->support.odp;
+ support = &ofproto->backer->rt_support.odp;
ct_state = MINIFLOW_GET_U8(flow, ct_state);
+ if (ct_state & CS_UNSUPPORTED_MASK) {
+ return OFPERR_OFPBMC_BAD_MASK;
+ }
+
/* Do not bother dissecting the flow further if the datapath supports all
* the features we know of. */
if (support->ct_state && support->ct_zone && support->ct_mark
&& support->ct_label && support->ct_state_nat
- && support->ct_orig_tuple) {
- return ct_state & CS_UNSUPPORTED_MASK ? OFPERR_OFPBMC_BAD_MASK : 0;
+ && support->ct_orig_tuple && support->ct_orig_tuple6) {
+ return 0;
}
ct_zone = MINIFLOW_GET_U16(flow, ct_zone);
ct_label = MINIFLOW_GET_U128(flow, ct_label);
if ((ct_state && !support->ct_state)
- || (ct_state & CS_UNSUPPORTED_MASK)
|| ((ct_state & (CS_SRC_NAT | CS_DST_NAT)) && !support->ct_state_nat)
|| (ct_zone && !support->ct_zone)
|| (ct_mark && !support->ct_mark)
|| (!ovs_u128_is_zero(ct_label) && !support->ct_label)) {
- return OFPERR_OFPBMC_BAD_MASK;
+ return OFPERR_NXBMC_CT_DATAPATH_SUPPORT;
}
- if (!support->ct_orig_tuple &&
- (MINIFLOW_GET_U8(flow, ct_nw_proto) ||
- MINIFLOW_GET_U16(flow, ct_tp_src) ||
- MINIFLOW_GET_U16(flow, ct_tp_dst) ||
- MINIFLOW_GET_U32(flow, ct_nw_src) ||
- MINIFLOW_GET_U32(flow, ct_nw_dst) ||
- !ovs_u128_is_zero(MINIFLOW_GET_U128(flow, ct_ipv6_src)) ||
- !ovs_u128_is_zero(MINIFLOW_GET_U128(flow, ct_ipv6_dst)))) {
- return OFPERR_OFPBMC_BAD_MASK;
+ if (!support->ct_orig_tuple && !support->ct_orig_tuple6
+ && (MINIFLOW_GET_U8(flow, ct_nw_proto)
+ || MINIFLOW_GET_U16(flow, ct_tp_src)
+ || MINIFLOW_GET_U16(flow, ct_tp_dst))) {
+ return OFPERR_NXBMC_CT_DATAPATH_SUPPORT;
+ }
+
+ if (!support->ct_orig_tuple
+ && (MINIFLOW_GET_U32(flow, ct_nw_src)
+ || MINIFLOW_GET_U32(flow, ct_nw_dst))) {
+ return OFPERR_NXBMC_CT_DATAPATH_SUPPORT;
+ }
+
+ if (!support->ct_orig_tuple6
+ && (!ovs_u128_is_zero(MINIFLOW_GET_U128(flow, ct_ipv6_src))
+ || !ovs_u128_is_zero(MINIFLOW_GET_U128(flow, ct_ipv6_dst)))) {
+ return OFPERR_NXBMC_CT_DATAPATH_SUPPORT;
}
return 0;
static void
report_unsupported_act(const char *action, const char *detail)
{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "Rejecting %s action because datapath does not support"
+ static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rll, "Rejecting %s action because datapath does not support"
"%s%s (your kernel module may be out of date)",
action, detail ? " " : "", detail ? detail : "");
}
const struct rule_actions *const actions)
{
const struct ofpact *ofpact;
- const struct odp_support *support = &ofproto->backer->support.odp;
+ const struct odp_support *support = &ofproto->backer->rt_support.odp;
OFPACT_FOR_EACH (ofpact, actions->ofpacts, actions->ofpacts_len) {
if (ofpact->type == OFPACT_CT) {
if (!support->ct_state) {
report_unsupported_act("ct", "ct action");
- return OFPERR_OFPBAC_BAD_TYPE;
+ return OFPERR_NXBAC_CT_DATAPATH_SUPPORT;
}
if ((ct->zone_imm || ct->zone_src.field) && !support->ct_zone) {
report_unsupported_act("ct", "ct zones");
- return OFPERR_OFPBAC_BAD_ARGUMENT;
+ return OFPERR_NXBAC_CT_DATAPATH_SUPPORT;
}
/* So far the force commit feature is implemented together with the
* original direction tuple feature by all datapaths, so we use the
* force commit feature as well. */
if ((ct->flags & NX_CT_F_FORCE) && !support->ct_orig_tuple) {
report_unsupported_act("ct", "force commit");
- return OFPERR_OFPBAC_BAD_ARGUMENT;
+ return OFPERR_NXBAC_CT_DATAPATH_SUPPORT;
}
OFPACT_FOR_EACH(a, ct->actions, ofpact_ct_get_action_len(ct)) {
* 'ct_state': assume that it doesn't support the NAT
* action. */
report_unsupported_act("ct", "nat");
- return OFPERR_OFPBAC_BAD_TYPE;
+ return OFPERR_NXBAC_CT_DATAPATH_SUPPORT;
}
if (dst && ((dst->id == MFF_CT_MARK && !support->ct_mark) ||
(dst->id == MFF_CT_LABEL && !support->ct_label))) {
report_unsupported_act("ct", "setting mark and/or label");
- return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
+ return OFPERR_NXBAC_CT_DATAPATH_SUPPORT;
}
}
} else if (ofpact->type == OFPACT_RESUBMIT) {
if (resubmit->with_ct_orig && !support->ct_orig_tuple) {
report_unsupported_act("resubmit",
"ct original direction tuple");
- return OFPERR_OFPBAC_BAD_TYPE;
+ return OFPERR_NXBAC_CT_DATAPATH_SUPPORT;
}
}
}
static void
ofproto_dpif_xcache_execute(struct ofproto_dpif *ofproto,
struct xlate_cache *xcache,
- const struct dpif_flow_stats *stats)
+ struct dpif_flow_stats *stats)
OVS_REQUIRES(ofproto_mutex)
{
struct xc_entry *entry;
case XC_NORMAL:
case XC_GROUP:
case XC_TNL_NEIGH:
- case XC_CONTROLLER:
+ case XC_TUNNEL_HEADER:
xlate_push_stats_entry(entry, stats);
break;
default:
bucket->stats.packet_count += stats->n_packets;
bucket->stats.byte_count += stats->n_bytes;
} else { /* Credit to all buckets */
- struct ofputil_bucket *bucket;
LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
bucket->stats.packet_count += stats->n_packets;
bucket->stats.byte_count += stats->n_bytes;
ovs_mutex_unlock(&group->stats_mutex);
}
+/* Calculate the dp_hash mask needed to provide the least weighted bucket
+ * with at least one hash value and construct a mapping table from masked
+ * dp_hash value to group bucket using the Webster method.
+ * If the caller specifies a non-zero max_hash value, abort and return false
+ * if more hash values would be required. The absolute maximum number of
+ * hash values supported is 256. */
+
+#define MAX_SELECT_GROUP_HASH_VALUES 256
+
+static bool
+group_setup_dp_hash_table(struct group_dpif *group, size_t max_hash)
+{
+ struct ofputil_bucket *bucket;
+ uint32_t n_buckets = group->up.n_buckets;
+ uint64_t total_weight = 0;
+ uint16_t min_weight = UINT16_MAX;
+ struct webster {
+ struct ofputil_bucket *bucket;
+ uint32_t divisor;
+ double value;
+ int hits;
+ } *webster;
+
+ if (n_buckets == 0) {
+ VLOG_DBG(" Don't apply dp_hash method without buckets.");
+ return false;
+ }
+
+ webster = xcalloc(n_buckets, sizeof(struct webster));
+ int i = 0;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
+ if (bucket->weight > 0 && bucket->weight < min_weight) {
+ min_weight = bucket->weight;
+ }
+ total_weight += bucket->weight;
+ webster[i].bucket = bucket;
+ webster[i].divisor = 1;
+ webster[i].value = bucket->weight;
+ webster[i].hits = 0;
+ i++;
+ }
+
+ if (total_weight == 0) {
+ VLOG_DBG(" Total weight is zero. No active buckets.");
+ free(webster);
+ return false;
+ }
+ VLOG_DBG(" Minimum weight: %d, total weight: %"PRIu64,
+ min_weight, total_weight);
+
+ uint64_t min_slots = DIV_ROUND_UP(total_weight, min_weight);
+ uint64_t min_slots2 = ROUND_UP_POW2(min_slots);
+ uint64_t n_hash = MAX(16, min_slots2);
+ if (n_hash > MAX_SELECT_GROUP_HASH_VALUES ||
+ (max_hash != 0 && n_hash > max_hash)) {
+ VLOG_DBG(" Too many hash values required: %"PRIu64, n_hash);
+ return false;
+ }
+
+ VLOG_DBG(" Using %"PRIu64" hash values:", n_hash);
+ group->hash_mask = n_hash - 1;
+ if (group->hash_map) {
+ free(group->hash_map);
+ }
+ group->hash_map = xcalloc(n_hash, sizeof(struct ofputil_bucket *));
+
+ /* Use Webster method to distribute hash values over buckets. */
+ for (int hash = 0; hash < n_hash; hash++) {
+ struct webster *winner = &webster[0];
+ for (i = 1; i < n_buckets; i++) {
+ if (webster[i].value > winner->value) {
+ winner = &webster[i];
+ }
+ }
+ winner->hits++;
+ winner->divisor += 2;
+ winner->value = (double) winner->bucket->weight / winner->divisor;
+ group->hash_map[hash] = winner->bucket;
+ }
+
+ i = 0;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
+ double target = (n_hash * bucket->weight) / (double) total_weight;
+ VLOG_DBG(" Bucket %d: weight=%d, target=%.2f hits=%d",
+ bucket->bucket_id, bucket->weight,
+ target, webster[i].hits);
+ i++;
+ }
+
+ free(webster);
+ return true;
+}
+
+static void
+group_set_selection_method(struct group_dpif *group)
+{
+ const struct ofputil_group_props *props = &group->up.props;
+ const char *selection_method = props->selection_method;
+
+ VLOG_DBG("Constructing select group %"PRIu32, group->up.group_id);
+ if (selection_method[0] == '\0') {
+ VLOG_DBG("No selection method specified. Trying dp_hash.");
+ /* If the controller has not specified a selection method, check if
+ * the dp_hash selection method with max 64 hash values is appropriate
+ * for the given bucket configuration. */
+ if (group_setup_dp_hash_table(group, 64)) {
+ /* Use dp_hash selection method with symmetric L4 hash. */
+ group->selection_method = SEL_METHOD_DP_HASH;
+ group->hash_alg = OVS_HASH_ALG_SYM_L4;
+ group->hash_basis = 0;
+ VLOG_DBG("Use dp_hash with %d hash values using algorithm %d.",
+ group->hash_mask + 1, group->hash_alg);
+ } else {
+ /* Fall back to original default hashing in slow path. */
+ VLOG_DBG("Falling back to default hash method.");
+ group->selection_method = SEL_METHOD_DEFAULT;
+ }
+ } else if (!strcmp(selection_method, "dp_hash")) {
+ VLOG_DBG("Selection method specified: dp_hash.");
+ /* Try to use dp_hash if possible at all. */
+ if (group_setup_dp_hash_table(group, 0)) {
+ group->selection_method = SEL_METHOD_DP_HASH;
+ group->hash_alg = props->selection_method_param >> 32;
+ if (group->hash_alg >= __OVS_HASH_MAX) {
+ VLOG_DBG("Invalid dp_hash algorithm %d. "
+ "Defaulting to OVS_HASH_ALG_L4", group->hash_alg);
+ group->hash_alg = OVS_HASH_ALG_L4;
+ }
+ group->hash_basis = (uint32_t) props->selection_method_param;
+ VLOG_DBG("Use dp_hash with %d hash values using algorithm %d.",
+ group->hash_mask + 1, group->hash_alg);
+ } else {
+ /* Fall back to original default hashing in slow path. */
+ VLOG_DBG("Falling back to default hash method.");
+ group->selection_method = SEL_METHOD_DEFAULT;
+ }
+ } else if (!strcmp(selection_method, "hash")) {
+ VLOG_DBG("Selection method specified: hash.");
+ if (props->fields.values_size > 0) {
+ /* Controller has specified hash fields. */
+ struct ds s = DS_EMPTY_INITIALIZER;
+ oxm_format_field_array(&s, &props->fields);
+ VLOG_DBG("Hash fields: %s", ds_cstr(&s));
+ ds_destroy(&s);
+ group->selection_method = SEL_METHOD_HASH;
+ } else {
+ /* No hash fields. Fall back to original default hashing. */
+ VLOG_DBG("No hash fields. Falling back to default hash method.");
+ group->selection_method = SEL_METHOD_DEFAULT;
+ }
+ } else {
+ /* Parsing of groups should ensure this never happens */
+ OVS_NOT_REACHED();
+ }
+}
+
static enum ofperr
group_construct(struct ofgroup *group_)
{
ovs_mutex_init_adaptive(&group->stats_mutex);
ovs_mutex_lock(&group->stats_mutex);
group_construct_stats(group);
+ group->hash_map = NULL;
+ if (group->up.type == OFPGT11_SELECT) {
+ group_set_selection_method(group);
+ }
ovs_mutex_unlock(&group->stats_mutex);
return 0;
}
{
struct group_dpif *group = group_dpif_cast(group_);
ovs_mutex_destroy(&group->stats_mutex);
+ if (group->hash_map) {
+ free(group->hash_map);
+ group->hash_map = NULL;
+ }
}
static enum ofperr
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- ct_dpif_flush(ofproto->backer->dpif, zone);
+ ct_dpif_flush(ofproto->backer->dpif, zone, NULL);
}
static bool
const struct ofputil_packet_in_private *pin)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct dpif_flow_stats stats;
+ struct xlate_cache xcache;
+ struct flow flow;
+ xlate_cache_init(&xcache);
/* Translate pin into datapath actions. */
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
enum slow_path_reason slow;
- enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow);
+ enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow,
+ &flow, &xcache);
/* Steal 'pin->packet' and put it into a dp_packet. */
struct dp_packet packet;
- dp_packet_init(&packet, pin->public.packet_len);
- dp_packet_put(&packet, pin->public.packet, pin->public.packet_len);
+ dp_packet_init(&packet, pin->base.packet_len);
+ dp_packet_put(&packet, pin->base.packet, pin->base.packet_len);
- pkt_metadata_from_flow(&packet.md, &pin->public.flow_metadata.flow);
+ /* Run the side effects from the xcache. */
+ dpif_flow_stats_extract(&flow, &packet, time_msec(), &stats);
+ ovs_mutex_lock(&ofproto_mutex);
+ ofproto_dpif_xcache_execute(ofproto, &xcache, &stats);
+ ovs_mutex_unlock(&ofproto_mutex);
+
+ pkt_metadata_from_flow(&packet.md, &pin->base.flow_metadata.flow);
/* Fix up in_port. */
ofproto_dpif_set_packet_odp_port(ofproto,
- pin->public.flow_metadata.flow.in_port.ofp_port,
+ pin->base.flow_metadata.flow.in_port.ofp_port,
&packet);
struct flow headers;
}
\f
struct ofproto_dpif *
-ofproto_dpif_lookup(const char *name)
+ofproto_dpif_lookup_by_name(const char *name)
{
struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
- hash_string(name, 0), &all_ofproto_dpifs) {
+ HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_by_name_node,
+ hash_string(name, 0),
+ &all_ofproto_dpifs_by_name) {
if (!strcmp(ofproto->up.name, name)) {
return ofproto;
}
return NULL;
}
+struct ofproto_dpif *
+ofproto_dpif_lookup_by_uuid(const struct uuid *uuid)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_by_uuid_node,
+ uuid_hash(uuid), &all_ofproto_dpifs_by_uuid) {
+ if (uuid_equals(&ofproto->uuid, uuid)) {
+ return ofproto;
+ }
+ }
+ return NULL;
+}
+
static void
ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
const char *argv[], void *aux OVS_UNUSED)
struct ofproto_dpif *ofproto;
if (argc > 1) {
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
mac_learning_flush(ofproto->ml);
ovs_rwlock_unlock(&ofproto->ml->rwlock);
} else {
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml);
ovs_rwlock_unlock(&ofproto->ml->rwlock);
struct ofproto_dpif *ofproto;
if (argc > 1) {
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
}
mcast_snooping_mdb_flush(ofproto->ms);
} else {
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (!mcast_snooping_enabled(ofproto->ms)) {
continue;
}
const struct ofproto_dpif *ofproto;
const struct mac_entry *e;
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
char name[OFP10_MAX_PORT_NAME_LEN];
ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
- name, sizeof name);
+ NULL, name, sizeof name);
ds_put_format(&ds, "%5s %4d "ETH_ADDR_FMT" %3d\n",
name, e->vlan, ETH_ADDR_ARGS(e->mac),
mac_entry_age(ofproto->ml, e));
ds_destroy(&ds);
}
+static void
+ofproto_unixctl_fdb_stats_clear(struct unixctl_conn *conn, int argc,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct ofproto_dpif *ofproto;
+
+ if (argc > 1) {
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_clear_statistics(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ } else {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_clear_statistics(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ }
+ }
+
+ unixctl_command_reply(conn, "statistics successfully cleared");
+}
+
+static void
+ofproto_unixctl_fdb_stats_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct ofproto_dpif *ofproto;
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+
+ ds_put_format(&ds, "Statistics for bridge \"%s\":\n", argv[1]);
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
+
+ ds_put_format(&ds, " Current/maximum MAC entries in the table: %"
+ PRIuSIZE"/%"PRIuSIZE"\n",
+ hmap_count(&ofproto->ml->table), ofproto->ml->max_entries);
+ ds_put_format(&ds,
+ " Total number of learned MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_learned);
+ ds_put_format(&ds,
+ " Total number of expired MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_expired);
+ ds_put_format(&ds,
+ " Total number of evicted MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_evicted);
+ ds_put_format(&ds,
+ " Total number of port moved MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_moved);
+
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
static void
ofproto_unixctl_mcast_snooping_show(struct unixctl_conn *conn,
int argc OVS_UNUSED,
struct mcast_group_bundle *b;
struct mcast_mrouter_bundle *mrouter;
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
bundle = b->port;
ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
- name, sizeof name);
+ NULL, name, sizeof name);
ds_put_format(&ds, "%5s %4d ", name, grp->vlan);
ipv6_format_mapped(&grp->addr, &ds);
ds_put_format(&ds, " %3d\n",
bundle = mrouter->port;
ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
- name, sizeof name);
+ NULL, name, sizeof name);
ds_put_format(&ds, "%5s %4d querier %3d\n",
name, mrouter->vlan,
mcast_mrouter_age(ofproto->ms, mrouter));
{
const struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
shash_add_nocopy(ofproto_shash, name, ofproto);
}
ds_put_format(ds, "%s: %"PRIuSIZE"\n", feature, s);
}
+enum dpif_support_field_type {
+ DPIF_SUPPORT_FIELD_bool,
+ DPIF_SUPPORT_FIELD_size_t,
+};
+
+struct dpif_support_field {
+ void *rt_ptr; /* Points to the 'rt_support' field. */
+ const void *bt_ptr; /* Points to the 'bt_support' field. */
+ const char *title;
+ enum dpif_support_field_type type;
+};
+
+#define DPIF_SUPPORT_FIELD_INTIALIZER(RT_PTR, BT_PTR, TITLE, TYPE) \
+ (struct dpif_support_field) {RT_PTR, BT_PTR, TITLE, TYPE}
+
static void
dpif_show_support(const struct dpif_backer_support *support, struct ds *ds)
{
#undef ODP_SUPPORT_FIELD
}
+static void
+display_support_field(const char *name,
+ const struct dpif_support_field *field,
+ struct ds *ds)
+{
+ switch (field->type) {
+ case DPIF_SUPPORT_FIELD_bool: {
+ bool v = *(bool *)field->rt_ptr;
+ bool b = *(bool *)field->bt_ptr;
+ ds_put_format(ds, "%s (%s) : [run time]:%s, [boot time]:%s\n", name,
+ field->title, v ? "true" : "false",
+ b ? "true" : "false");
+ break;
+ }
+ case DPIF_SUPPORT_FIELD_size_t:
+ ds_put_format(ds, "%s (%s) : [run time]:%"PRIuSIZE
+ ", [boot time]:%"PRIuSIZE"\n", name,
+ field->title, *(size_t *)field->rt_ptr,
+ *(size_t *)field->bt_ptr);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+}
+
+/* Set a field of 'rt_support' to a new value.
+ *
+ * Returns 'true' if the value is actually set. */
+static bool
+dpif_set_support(struct dpif_backer_support *rt_support,
+ struct dpif_backer_support *bt_support,
+ const char *name, const char *value, struct ds *ds)
+{
+ struct shash all_fields = SHASH_INITIALIZER(&all_fields);
+ struct dpif_support_field *field;
+ struct shash_node *node;
+ bool changed = false;
+
+#define DPIF_SUPPORT_FIELD(TYPE, NAME, TITLE) \
+ {\
+ struct dpif_support_field *f = xmalloc(sizeof *f); \
+ *f = DPIF_SUPPORT_FIELD_INTIALIZER(&rt_support->NAME, \
+ &bt_support->NAME, \
+ TITLE, \
+ DPIF_SUPPORT_FIELD_##TYPE);\
+ shash_add_once(&all_fields, #NAME, f); \
+ }
+ DPIF_SUPPORT_FIELDS;
+#undef DPIF_SUPPORT_FIELD
+
+#define ODP_SUPPORT_FIELD(TYPE, NAME, TITLE) \
+ {\
+ struct dpif_support_field *f = xmalloc(sizeof *f); \
+ *f = DPIF_SUPPORT_FIELD_INTIALIZER(&rt_support->odp.NAME, \
+ &bt_support->odp.NAME, \
+ TITLE, \
+ DPIF_SUPPORT_FIELD_##TYPE);\
+ shash_add_once(&all_fields, #NAME, f); \
+ }
+ ODP_SUPPORT_FIELDS;
+#undef ODP_SUPPORT_FIELD
+
+ if (!name) {
+ SHASH_FOR_EACH (node, &all_fields) {
+ display_support_field(node->name, node->data, ds);
+ }
+ goto done;
+ }
+
+ node = shash_find(&all_fields, name);
+ if (!node) {
+ ds_put_cstr(ds, "Unexpected support field");
+ goto done;
+ }
+ field = node->data;
+
+ if (!value) {
+ display_support_field(node->name, field, ds);
+ goto done;
+ }
+
+ if (field->type == DPIF_SUPPORT_FIELD_bool) {
+ if (!strcasecmp(value, "true")) {
+ if (*(bool *)field->bt_ptr) {
+ *(bool *)field->rt_ptr = true;
+ changed = true;
+ } else {
+ ds_put_cstr(ds, "Can not enable features not supported by the datapth");
+ }
+ } else if (!strcasecmp(value, "false")) {
+ *(bool *)field->rt_ptr = false;
+ changed = true;
+ } else {
+ ds_put_cstr(ds, "Boolean value expected");
+ }
+ } else if (field->type == DPIF_SUPPORT_FIELD_size_t) {
+ int v;
+ if (str_to_int(value, 10, &v)) {
+ if (v >= 0) {
+ if (v <= *(size_t *)field->bt_ptr) {
+ *(size_t *)field->rt_ptr = v;
+ changed = true;
+ } else {
+ ds_put_cstr(ds, "Can not set value beyond the datapath capability");
+ }
+ } else {
+ ds_put_format(ds, "Negative number not expected");
+ }
+ } else {
+ ds_put_cstr(ds, "Integer number expected");
+ }
+ }
+
+done:
+ shash_destroy_free_data(&all_fields);
+ return changed;
+}
+
static void
dpif_show_backer(const struct dpif_backer *backer, struct ds *ds)
{
continue;
}
- ds_put_format(ds, "\t%s:\n", ofproto->up.name);
+ ds_put_format(ds, " %s:\n", ofproto->up.name);
ports = shash_sort(&ofproto->up.port_by_name);
for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) {
struct smap config;
odp_port_t odp_port;
- ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
+ ds_put_format(ds, " %s %u/", netdev_get_name(ofport->netdev),
ofport->ofp_port);
odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
smap_init(&config);
if (!netdev_get_config(ofport->netdev, &config)) {
- const struct smap_node **nodes;
- size_t i;
-
- nodes = smap_sort(&config);
- for (i = 0; i < smap_count(&config); i++) {
- const struct smap_node *node = nodes[i];
- ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
- node->key, node->value);
+ const struct smap_node **nodes = smap_sort(&config);
+ for (size_t k = 0; k < smap_count(&config); k++) {
+ ds_put_format(ds, "%c %s=%s", k ? ',' : ':',
+ nodes[k]->key, nodes[k]->value);
}
free(nodes);
}
const struct ofproto_dpif *ofproto;
struct ds ds = DS_EMPTY_INITIALIZER;
- bool verbosity = false;
-
- struct dpif_port dpif_port;
- struct dpif_port_dump port_dump;
- struct hmap portno_names;
struct dpif_flow_dump *flow_dump;
struct dpif_flow_dump_thread *flow_dump_thread;
struct dpif_flow f;
int error;
- ofproto = ofproto_dpif_lookup(argv[argc - 1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[argc - 1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- if (argc > 2 && !strcmp(argv[1], "-m")) {
- verbosity = true;
+ bool verbosity = false;
+ bool names = false;
+ bool set_names = false;
+ for (int i = 1; i < argc - 1; i++) {
+ if (!strcmp(argv[i], "-m")) {
+ verbosity = true;
+ } else if (!strcmp(argv[i], "--names")) {
+ names = true;
+ set_names = true;
+ } else if (!strcmp(argv[i], "--no-names")) {
+ names = false;
+ set_names = true;
+ }
+ }
+ if (!set_names) {
+ names = verbosity;
}
- hmap_init(&portno_names);
- DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, ofproto->backer->dpif) {
- odp_portno_names_set(&portno_names, dpif_port.port_no, dpif_port.name);
+ struct hmap *portno_names = NULL;
+ if (names) {
+ portno_names = xmalloc(sizeof *portno_names);
+ hmap_init(portno_names);
+
+ struct dpif_port dpif_port;
+ struct dpif_port_dump port_dump;
+ DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, ofproto->backer->dpif) {
+ odp_portno_names_set(portno_names, dpif_port.port_no,
+ dpif_port.name);
+ }
}
ds_init(&ds);
- flow_dump = dpif_flow_dump_create(ofproto->backer->dpif, false);
+ flow_dump = dpif_flow_dump_create(ofproto->backer->dpif, false, NULL);
flow_dump_thread = dpif_flow_dump_thread_create(flow_dump);
while (dpif_flow_dump_next(flow_dump_thread, &f, 1)) {
struct flow flow;
ds_put_cstr(&ds, " ");
}
odp_flow_format(f.key, f.key_len, f.mask, f.mask_len,
- &portno_names, &ds, verbosity);
+ portno_names, &ds, verbosity);
ds_put_cstr(&ds, ", ");
dpif_flow_stats_format(&f.stats, &ds);
ds_put_cstr(&ds, ", actions:");
- format_odp_actions(&ds, f.actions, f.actions_len);
+ format_odp_actions(&ds, f.actions, f.actions_len, portno_names);
ds_put_char(&ds, '\n');
}
dpif_flow_dump_thread_destroy(flow_dump_thread);
} else {
unixctl_command_reply(conn, ds_cstr(&ds));
}
- odp_portno_names_destroy(&portno_names);
- hmap_destroy(&portno_names);
- ds_destroy(&ds);
-}
-
-static void
-ofproto_revalidate_all_backers(void)
-{
- const struct shash_node **backers;
- int i;
-
- backers = shash_sort(&all_dpif_backers);
- for (i = 0; i < shash_count(&all_dpif_backers); i++) {
- struct dpif_backer *backer = backers[i]->data;
- backer->need_revalidate = REV_RECONFIGURE;
- }
- free(backers);
-}
-
-static void
-disable_tnl_push_pop(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
- const char *argv[], void *aux OVS_UNUSED)
-{
- if (!strcasecmp(argv[1], "off")) {
- ofproto_use_tnl_push_pop = false;
- unixctl_command_reply(conn, "Tunnel push-pop off");
- ofproto_revalidate_all_backers();
- } else if (!strcasecmp(argv[1], "on")) {
- ofproto_use_tnl_push_pop = true;
- unixctl_command_reply(conn, "Tunnel push-pop on");
- ofproto_revalidate_all_backers();
- } else {
- unixctl_command_reply_error(conn, "Invalid argument");
- }
-}
-
-static void
-disable_datapath_truncate(struct unixctl_conn *conn OVS_UNUSED,
- int argc OVS_UNUSED,
- const char *argv[] OVS_UNUSED,
- void *aux OVS_UNUSED)
-{
- const struct shash_node **backers;
- int i;
-
- backers = shash_sort(&all_dpif_backers);
- for (i = 0; i < shash_count(&all_dpif_backers); i++) {
- struct dpif_backer *backer = backers[i]->data;
- backer->support.trunc = false;
+ if (portno_names) {
+ odp_portno_names_destroy(portno_names);
+ hmap_destroy(portno_names);
+ free(portno_names);
}
- free(backers);
- unixctl_command_reply(conn, "Datapath truncate action diabled");
+ ds_destroy(&ds);
}
static void
-disable_datapath_clone(struct unixctl_conn *conn OVS_UNUSED,
- int argc, const char *argv[],
- void *aux OVS_UNUSED)
+ofproto_unixctl_dpif_show_dp_features(struct unixctl_conn *conn,
+ int argc, const char *argv[],
+ void *aux OVS_UNUSED)
{
struct ds ds = DS_EMPTY_INITIALIZER;
const char *br = argv[argc -1];
- struct ofproto_dpif *ofproto;
+ struct ofproto_dpif *ofproto = ofproto_dpif_lookup_by_name(br);
- ofproto = ofproto_dpif_lookup(br);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- xlate_disable_dp_clone(ofproto);
- udpif_flush(ofproto->backer->udpif);
- ds_put_format(&ds, "Datapath clone action disabled for bridge %s", br);
+
+ dpif_show_support(&ofproto->backer->bt_support, &ds);
unixctl_command_reply(conn, ds_cstr(&ds));
- ds_destroy(&ds);
}
static void
-ofproto_unixctl_dpif_show_dp_features(struct unixctl_conn *conn,
- int argc, const char *argv[],
- void *aux OVS_UNUSED)
+ofproto_unixctl_dpif_set_dp_features(struct unixctl_conn *conn,
+ int argc, const char *argv[],
+ void *aux OVS_UNUSED)
{
struct ds ds = DS_EMPTY_INITIALIZER;
- const char *br = argv[argc -1];
- struct ofproto_dpif *ofproto = ofproto_dpif_lookup(br);
+ const char *br = argv[1];
+ const char *name, *value;
+ struct ofproto_dpif *ofproto = ofproto_dpif_lookup_by_name(br);
+ bool changed;
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- dpif_show_support(&ofproto->backer->support, &ds);
+ name = argc > 2 ? argv[2] : NULL;
+ value = argc > 3 ? argv[3] : NULL;
+ changed = dpif_set_support(&ofproto->backer->rt_support,
+ &ofproto->backer->bt_support,
+ name, value, &ds);
+ if (changed) {
+ xlate_set_support(ofproto, &ofproto->backer->rt_support);
+ udpif_flush(ofproto->backer->udpif);
+ }
unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
}
static void
ofproto_unixctl_fdb_flush, NULL);
unixctl_command_register("fdb/show", "bridge", 1, 1,
ofproto_unixctl_fdb_show, NULL);
+ unixctl_command_register("fdb/stats-clear", "[bridge]", 0, 1,
+ ofproto_unixctl_fdb_stats_clear, NULL);
+ unixctl_command_register("fdb/stats-show", "bridge", 1, 1,
+ ofproto_unixctl_fdb_stats_show, NULL);
unixctl_command_register("mdb/flush", "[bridge]", 0, 1,
ofproto_unixctl_mcast_snooping_flush, NULL);
unixctl_command_register("mdb/show", "bridge", 1, 1,
NULL);
unixctl_command_register("dpif/show-dp-features", "bridge", 1, 1,
ofproto_unixctl_dpif_show_dp_features, NULL);
- unixctl_command_register("dpif/dump-flows", "[-m] bridge", 1, 2,
+ unixctl_command_register("dpif/dump-flows",
+ "[-m] [--names | --no-names] bridge", 1, INT_MAX,
ofproto_unixctl_dpif_dump_flows, NULL);
-
- unixctl_command_register("ofproto/tnl-push-pop", "[on]|[off]", 1, 1,
- disable_tnl_push_pop, NULL);
-
- unixctl_command_register("dpif/disable-truncate", "", 0, 0,
- disable_datapath_truncate, NULL);
-
- unixctl_command_register("dpif/disable-dp-clone", "bridge", 1, 1,
- disable_datapath_clone, NULL);
+ unixctl_command_register("dpif/set-dp-features", "bridge", 1, 3 ,
+ ofproto_unixctl_dpif_set_dp_features, NULL);
}
\f
static odp_port_t
}
}
+/* 'match' is non-const to allow for temporary modifications. Any changes are
+ * restored before returning. */
int
ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
- const struct match *match, int priority,
+ struct match *match, int priority,
uint16_t idle_timeout,
const struct ofpbuf *ofpacts,
struct rule **rulep)
fm = (struct ofputil_flow_mod) {
.buffer_id = UINT32_MAX,
- .match = *match,
.priority = priority,
.table_id = TBL_INTERNAL,
.command = OFPFC_ADD,
.ofpacts = ofpacts->data,
.ofpacts_len = ofpacts->size,
};
-
+ minimatch_init(&fm.match, match);
error = ofproto_flow_mod(&ofproto->up, &fm);
+ minimatch_destroy(&fm.match);
+
if (error) {
VLOG_ERR_RL(&rl, "failed to add internal flow (%s)",
ofperr_to_string(error));
rule = rule_dpif_lookup_in_table(ofproto,
ofproto_dpif_get_tables_version(ofproto),
- TBL_INTERNAL, &fm.match.flow,
- &fm.match.wc);
+ TBL_INTERNAL, &match->flow, &match->wc);
if (rule) {
*rulep = &rule->up;
} else {
fm = (struct ofputil_flow_mod) {
.buffer_id = UINT32_MAX,
- .match = *match,
.priority = priority,
.table_id = TBL_INTERNAL,
.out_port = OFPP_ANY,
.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
.command = OFPFC_DELETE_STRICT,
};
-
+ minimatch_init(&fm.match, match);
error = ofproto_flow_mod(&ofproto->up, &fm);
+ minimatch_destroy(&fm.match);
+
if (error) {
VLOG_ERR_RL(&rl, "failed to delete internal flow (%s)",
ofperr_to_string(error));
}
}
- switch (dpif_meter_set(ofproto->backer->dpif, meter_id, config)) {
+ switch (dpif_meter_set(ofproto->backer->dpif, *meter_id, config)) {
case 0:
return 0;
case EFBIG: /* meter_id out of range */
return OFPERR_OFPMMFC_OUT_OF_BANDS;
case ENODEV: /* Unsupported band type */
return OFPERR_OFPMMFC_BAD_BAND;
+ case EDOM: /* Rate must be non-zero */
+ return OFPERR_OFPMMFC_BAD_RATE;
default:
return OFPERR_OFPMMFC_UNKNOWN;
}