/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "lacp.h"
#include "learn.h"
#include "mac-learning.h"
+#include "math.h"
#include "mcast-snooping.h"
#include "multipath.h"
#include "netdev-vport.h"
#include "openvswitch/ofp-actions.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/meta-flow.h"
-#include "openvswitch/ofp-parse.h"
#include "openvswitch/ofp-print.h"
-#include "openvswitch/ofp-util.h"
#include "openvswitch/ofpbuf.h"
+#include "openvswitch/uuid.h"
#include "openvswitch/vlog.h"
#include "ovs-lldp.h"
#include "ovs-rcu.h"
#include "unaligned.h"
#include "unixctl.h"
#include "util.h"
+#include "uuid.h"
#include "vlan-bitmap.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
struct cfm *cfm; /* Connectivity Fault Management, if any. */
struct bfd *bfd; /* BFD, if any. */
struct lldp *lldp; /* lldp, if any. */
- bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* This port is a tunnel. */
long long int carrier_seq; /* Carrier status changes. */
struct ofport_dpif *peer; /* Peer if patch port. */
struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
/* All existing ofproto_dpif instances, indexed by ->up.name. */
-struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
+static struct hmap all_ofproto_dpifs_by_name =
+ HMAP_INITIALIZER(&all_ofproto_dpifs_by_name);
+
+/* All existing ofproto_dpif instances, indexed by ->uuid. */
+static struct hmap all_ofproto_dpifs_by_uuid =
+ HMAP_INITIALIZER(&all_ofproto_dpifs_by_uuid);
static bool ofproto_use_tnl_push_pop = true;
static void ofproto_unixctl_init(void);
struct ofproto_dpif *ofproto;
sset_clear(names);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (strcmp(type, ofproto->up.type)) {
continue;
}
{
struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (sset_contains(&ofproto->ports, name)) {
return ofproto;
}
simap_init(&tmp_backers);
simap_swap(&backer->tnl_backers, &tmp_backers);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
struct ofport_dpif *iter;
if (backer != ofproto->backer) {
backer->need_revalidate = 0;
xlate_txn_start();
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
struct ofport_dpif *ofport;
struct ofbundle *bundle;
ofport->rstp_port, ofport->qdscp,
ofport->n_qdscp, ofport->up.pp.config,
ofport->up.pp.state, ofport->is_tunnel,
- ofport->may_enable);
+ ofport->up.may_enable);
}
}
xlate_txn_commit();
const char *devname;
sset_init(&devnames);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (ofproto->backer == backer) {
struct ofport *ofport;
return;
}
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
- &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
return;
}
{
struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (ofproto->backer == backer) {
sset_clear(&ofproto->port_poll_set);
ofproto->port_poll_errno = error;
static void
close_dpif_backer(struct dpif_backer *backer, bool del)
{
+ struct simap_node *node;
+
ovs_assert(backer->refcount > 0);
if (--backer->refcount) {
udpif_destroy(backer->udpif);
+ SIMAP_FOR_EACH (node, &backer->tnl_backers) {
+ dpif_port_del(backer->dpif, u32_to_odp(node->data), false);
+ }
simap_destroy(&backer->tnl_backers);
ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
hmap_destroy(&backer->odp_to_ofport_map);
/* Compose a dummy UDP packet. */
dp_packet_init(&packet, 0);
- flow_compose(&packet, &flow, 0);
+ flow_compose(&packet, &flow, NULL, 64);
/* Execute the actions. On older datapaths this fails with EINVAL, on
* newer datapaths it succeeds. */
return !error;
}
+/* Tests whether 'backer''s datapath supports the OVS_ACTION_ATTR_CT_CLEAR
+ * action. */
+static bool
+check_ct_clear(struct dpif_backer *backer)
+{
+ struct odputil_keybuf keybuf;
+ uint8_t actbuf[NL_A_FLAG_SIZE];
+ struct ofpbuf actions;
+ struct ofpbuf key;
+ struct flow flow;
+ bool supported;
+
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &flow,
+ .probe = true,
+ };
+
+ memset(&flow, 0, sizeof flow);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&odp_parms, &key);
+
+ ofpbuf_use_stack(&actions, &actbuf, sizeof actbuf);
+ nl_msg_put_flag(&actions, OVS_ACTION_ATTR_CT_CLEAR);
+
+ supported = dpif_probe_feature(backer->dpif, "ct_clear", &key,
+ &actions, NULL);
+
+ VLOG_INFO("%s: Datapath %s ct_clear action",
+ dpif_name(backer->dpif), (supported) ? "supports"
+ : "does not support");
+ return supported;
+}
+
+/* Probe the highest dp_hash algorithm supported by the datapath. */
+static size_t
+check_max_dp_hash_alg(struct dpif_backer *backer)
+{
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ struct flow flow;
+ struct ovs_action_hash *hash;
+ int max_alg = 0;
+
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &flow,
+ .probe = true,
+ };
+
+ memset(&flow, 0, sizeof flow);
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&odp_parms, &key);
+
+ /* All datapaths support algortithm 0 (OVS_HASH_ALG_L4). */
+ for (int alg = 1; alg < __OVS_HASH_MAX; alg++) {
+ struct ofpbuf actions;
+ bool ok;
+
+ ofpbuf_init(&actions, 300);
+ hash = nl_msg_put_unspec_uninit(&actions,
+ OVS_ACTION_ATTR_HASH, sizeof *hash);
+ hash->hash_basis = 0;
+ hash->hash_alg = alg;
+ ok = dpif_probe_feature(backer->dpif, "Max dp_hash algorithm", &key,
+ &actions, NULL);
+ ofpbuf_uninit(&actions);
+ if (ok) {
+ max_alg = alg;
+ } else {
+ break;
+ }
+ }
+
+ VLOG_INFO("%s: Max dp_hash algorithm probed to be %d",
+ dpif_name(backer->dpif), max_alg);
+ return max_alg;
+}
+
#define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE, ETHTYPE) \
static bool \
check_##NAME(struct dpif_backer *backer) \
backer->rt_support.clone = check_clone(backer);
backer->rt_support.sample_nesting = check_max_sample_nesting(backer);
backer->rt_support.ct_eventmask = check_ct_eventmask(backer);
+ backer->rt_support.ct_clear = check_ct_clear(backer);
+ backer->rt_support.max_hash_alg = check_max_dp_hash_alg(backer);
/* Flow fields. */
backer->rt_support.odp.ct_state = check_ct_state(backer);
}
}
- hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
+ hmap_insert(&all_ofproto_dpifs_by_name,
+ &ofproto->all_ofproto_dpifs_by_name_node,
hash_string(ofproto->up.name, 0));
+ hmap_insert(&all_ofproto_dpifs_by_uuid,
+ &ofproto->all_ofproto_dpifs_by_uuid_node,
+ uuid_hash(&ofproto->uuid));
memset(&ofproto->stats, 0, sizeof ofproto->stats);
ofproto_init_tables(ofproto_, N_TABLES);
controller->max_len = UINT16_MAX;
controller->controller_id = 0;
controller->reason = OFPR_IMPLICIT_MISS;
+ controller->meter_id = NX_CTLR_NO_METER;
ofpact_finish_CONTROLLER(&ofpacts, &controller);
error = add_internal_miss_flow(ofproto, id++, &ofpacts,
* to the ofproto or anything in it. */
udpif_synchronize(ofproto->backer->udpif);
- hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
+ hmap_remove(&all_ofproto_dpifs_by_name,
+ &ofproto->all_ofproto_dpifs_by_name_node);
+ hmap_remove(&all_ofproto_dpifs_by_uuid,
+ &ofproto->all_ofproto_dpifs_by_uuid_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
CLS_FOR_EACH (rule, up.cr, &table->cls) {
static void
query_tables(struct ofproto *ofproto,
- struct ofputil_table_features *features,
+ struct ofputil_table_features *features OVS_UNUSED,
struct ofputil_table_stats *stats)
{
- strcpy(features->name, "classifier");
-
if (stats) {
int i;
port->cfm = NULL;
port->bfd = NULL;
port->lldp = NULL;
- port->may_enable = false;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
port->rstp_port = NULL;
}
port->is_tunnel = true;
- if (ofproto->ipfix) {
- dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
- }
} else {
/* Sanity-check that a mapping doesn't already exist. This
* shouldn't happen for non-tunnel ports. */
if (ofproto->sflow) {
dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
}
+ if (ofproto->ipfix) {
+ dpif_ipfix_add_port(ofproto->ipfix, port_, port->odp_port);
+ }
return 0;
}
atomic_count_dec(&ofproto->backer->tnl_count);
}
- if (port->is_tunnel && ofproto->ipfix) {
- dpif_ipfix_del_tunnel_port(ofproto->ipfix, port->odp_port);
- }
-
tnl_port_del(port, port->odp_port);
sset_find_and_delete(&ofproto->ports, devname);
sset_find_and_delete(&ofproto->ghost_ports, devname);
if (ofproto->sflow) {
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
+ if (ofproto->ipfix) {
+ dpif_ipfix_del_port(ofproto->ipfix, port->odp_port);
+ }
free(port->qdscp);
}
bfd_set_netdev(port->bfd, netdev);
}
- /* Set liveness, unless the link is administratively or
- * operationally down or link monitoring false */
- if (!(port->up.pp.config & OFPUTIL_PC_PORT_DOWN) &&
- !(port->up.pp.state & OFPUTIL_PS_LINK_DOWN) &&
- port->may_enable) {
- port->up.pp.state |= OFPUTIL_PS_LIVE;
- } else {
- port->up.pp.state &= ~OFPUTIL_PS_LIVE;
- }
-
ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
port->lldp, &port->up.pp.hw_addr);
bundle_update(port->bundle);
}
}
+ port_run(port);
}
static int
di, bridge_exporter_options, flow_exporters_options,
n_flow_exporters_options);
- /* Add tunnel ports only when a new ipfix created */
+ /* Add ports only when a new ipfix created */
if (new_di == true) {
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
- if (ofport->is_tunnel == true) {
- dpif_ipfix_add_tunnel_port(di, &ofport->up, ofport->odp_port);
- }
+ dpif_ipfix_add_port(di, &ofport->up, ofport->odp_port);
}
}
ofport, netdev_get_name(ofport->up.netdev));
update_rstp_port_state(ofport);
/* Synchronize operational status. */
- rstp_port_set_mac_operational(rp, ofport->may_enable);
+ rstp_port_set_mac_operational(rp, ofport->up.may_enable);
}
static void
if (all_ofprotos) {
struct ofproto_dpif *o;
- HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (o, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (o != ofproto) {
struct mac_entry *e;
static void
send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
struct ofport_dpif *port = port_;
struct eth_addr ea;
int error;
ofproto_dpif_send_packet(port, false, &packet);
dp_packet_uninit(&packet);
} else {
- VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
+ static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 10);
+ VLOG_ERR_RL(&rll, "port %s: cannot obtain Ethernet address of iface "
"%s (%s)", port->bundle->name,
netdev_get_name(port->up.netdev), ovs_strerror(error));
}
}
if (n_errors) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
+ static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rll, "bond %s: %d errors sending %d gratuitous learning "
"packets, last error was: %s",
bundle->name, n_errors, n_packets, ovs_strerror(error));
} else {
struct ofport_dpif *port;
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
+ bond_slave_set_may_enable(bundle->bond, port, port->up.may_enable);
}
if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
return;
}
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
struct ofport *peer_ofport;
struct ofport_dpif *peer;
char *peer_peer;
free(peer_name);
}
-static void
-port_run(struct ofport_dpif *ofport)
+static bool
+may_enable_port(struct ofport_dpif *ofport)
{
- long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
- bool carrier_changed = carrier_seq != ofport->carrier_seq;
- bool enable = netdev_get_carrier(ofport->up.netdev);
- bool cfm_enable = false;
- bool bfd_enable = false;
-
- ofport->carrier_seq = carrier_seq;
-
- if (ofport->cfm) {
- int cfm_opup = cfm_get_opup(ofport->cfm);
-
- cfm_enable = !cfm_get_fault(ofport->cfm);
-
- if (cfm_opup >= 0) {
- cfm_enable = cfm_enable && cfm_opup;
- }
+ /* Carrier must be up. */
+ if (!netdev_get_carrier(ofport->up.netdev)) {
+ return false;
}
- if (ofport->bfd) {
- bfd_enable = bfd_forwarding(ofport->bfd);
+ /* If CFM or BFD is enabled, then at least one of them must report that the
+ * port is up. */
+ if ((ofport->bfd || ofport->cfm)
+ && !(ofport->cfm
+ && !cfm_get_fault(ofport->cfm)
+ && cfm_get_opup(ofport->cfm) != 0)
+ && !(ofport->bfd
+ && bfd_forwarding(ofport->bfd))) {
+ return false;
}
- if (ofport->bfd || ofport->cfm) {
- enable = enable && (cfm_enable || bfd_enable);
+ /* If LACP is enabled, it must report that the link is enabled. */
+ if (ofport->bundle
+ && !lacp_slave_may_enable(ofport->bundle->lacp, ofport)) {
+ return false;
}
- if (ofport->bundle) {
- enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
- if (carrier_changed) {
- lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
- }
+ return true;
+}
+
+static void
+port_run(struct ofport_dpif *ofport)
+{
+ long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
+ bool carrier_changed = carrier_seq != ofport->carrier_seq;
+ ofport->carrier_seq = carrier_seq;
+ if (carrier_changed && ofport->bundle) {
+ lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
}
- if (ofport->may_enable != enable) {
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ bool enable = may_enable_port(ofport);
+ if (ofport->up.may_enable != enable) {
+ ofproto_port_set_enable(&ofport->up, enable);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
if (ofport->rstp_port) {
rstp_port_set_mac_operational(ofport->rstp_port, enable);
}
-
- /* Propagate liveness, unless the link is administratively or
- * operationally down. */
- if (!(ofport->up.pp.config & OFPUTIL_PC_PORT_DOWN) &&
- !(ofport->up.pp.state & OFPUTIL_PS_LINK_DOWN)) {
- enum ofputil_port_state of_state = ofport->up.pp.state;
- if (enable) {
- of_state |= OFPUTIL_PS_LIVE;
- } else {
- of_state &= ~OFPUTIL_PS_LIVE;
- }
- ofproto_port_set_state(&ofport->up, of_state);
- }
}
-
- ofport->may_enable = enable;
}
static int
}
dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
- if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
- odp_port_t port_no = ODPP_NONE;
- int error;
- error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
+ odp_port_t port_no = ODPP_NONE;
+ int error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
+ if (error != EEXIST && error != EBUSY) {
if (error) {
return error;
}
static void
report_unsupported_act(const char *action, const char *detail)
{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "Rejecting %s action because datapath does not support"
+ static struct vlog_rate_limit rll = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rll, "Rejecting %s action because datapath does not support"
"%s%s (your kernel module may be out of date)",
action, detail ? " " : "", detail ? detail : "");
}
case XC_NORMAL:
case XC_GROUP:
case XC_TNL_NEIGH:
- case XC_CONTROLLER:
case XC_TUNNEL_HEADER:
xlate_push_stats_entry(entry, stats);
break;
ovs_mutex_unlock(&group->stats_mutex);
}
+/* Calculate the dp_hash mask needed to provide the least weighted bucket
+ * with at least one hash value and construct a mapping table from masked
+ * dp_hash value to group bucket using the Webster method.
+ * If the caller specifies a non-zero max_hash value, abort and return false
+ * if more hash values would be required. The absolute maximum number of
+ * hash values supported is 256. */
+
+#define MAX_SELECT_GROUP_HASH_VALUES 256
+
+static bool
+group_setup_dp_hash_table(struct group_dpif *group, size_t max_hash)
+{
+ struct ofputil_bucket *bucket;
+ uint32_t n_buckets = group->up.n_buckets;
+ uint64_t total_weight = 0;
+ uint16_t min_weight = UINT16_MAX;
+ struct webster {
+ struct ofputil_bucket *bucket;
+ uint32_t divisor;
+ double value;
+ int hits;
+ } *webster;
+
+ if (n_buckets == 0) {
+ VLOG_DBG(" Don't apply dp_hash method without buckets.");
+ return false;
+ }
+
+ webster = xcalloc(n_buckets, sizeof(struct webster));
+ int i = 0;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
+ if (bucket->weight > 0 && bucket->weight < min_weight) {
+ min_weight = bucket->weight;
+ }
+ total_weight += bucket->weight;
+ webster[i].bucket = bucket;
+ webster[i].divisor = 1;
+ webster[i].value = bucket->weight;
+ webster[i].hits = 0;
+ i++;
+ }
+
+ if (total_weight == 0) {
+ VLOG_DBG(" Total weight is zero. No active buckets.");
+ free(webster);
+ return false;
+ }
+ VLOG_DBG(" Minimum weight: %d, total weight: %"PRIu64,
+ min_weight, total_weight);
+
+ uint64_t min_slots = DIV_ROUND_UP(total_weight, min_weight);
+ uint64_t min_slots2 = ROUND_UP_POW2(min_slots);
+ uint64_t n_hash = MAX(16, min_slots2);
+ if (n_hash > MAX_SELECT_GROUP_HASH_VALUES ||
+ (max_hash != 0 && n_hash > max_hash)) {
+ VLOG_DBG(" Too many hash values required: %"PRIu64, n_hash);
+ return false;
+ }
+
+ VLOG_DBG(" Using %"PRIu64" hash values:", n_hash);
+ group->hash_mask = n_hash - 1;
+ if (group->hash_map) {
+ free(group->hash_map);
+ }
+ group->hash_map = xcalloc(n_hash, sizeof(struct ofputil_bucket *));
+
+ /* Use Webster method to distribute hash values over buckets. */
+ for (int hash = 0; hash < n_hash; hash++) {
+ struct webster *winner = &webster[0];
+ for (i = 1; i < n_buckets; i++) {
+ if (webster[i].value > winner->value) {
+ winner = &webster[i];
+ }
+ }
+ winner->hits++;
+ winner->divisor += 2;
+ winner->value = (double) winner->bucket->weight / winner->divisor;
+ group->hash_map[hash] = winner->bucket;
+ }
+
+ i = 0;
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
+ double target = (n_hash * bucket->weight) / (double) total_weight;
+ VLOG_DBG(" Bucket %d: weight=%d, target=%.2f hits=%d",
+ bucket->bucket_id, bucket->weight,
+ target, webster[i].hits);
+ i++;
+ }
+
+ free(webster);
+ return true;
+}
+
+static void
+group_set_selection_method(struct group_dpif *group)
+{
+ const struct ofputil_group_props *props = &group->up.props;
+ const char *selection_method = props->selection_method;
+
+ VLOG_DBG("Constructing select group %"PRIu32, group->up.group_id);
+ if (selection_method[0] == '\0') {
+ VLOG_DBG("No selection method specified. Trying dp_hash.");
+ /* If the controller has not specified a selection method, check if
+ * the dp_hash selection method with max 64 hash values is appropriate
+ * for the given bucket configuration. */
+ if (group_setup_dp_hash_table(group, 64)) {
+ /* Use dp_hash selection method with symmetric L4 hash. */
+ group->selection_method = SEL_METHOD_DP_HASH;
+ group->hash_alg = OVS_HASH_ALG_SYM_L4;
+ group->hash_basis = 0;
+ VLOG_DBG("Use dp_hash with %d hash values using algorithm %d.",
+ group->hash_mask + 1, group->hash_alg);
+ } else {
+ /* Fall back to original default hashing in slow path. */
+ VLOG_DBG("Falling back to default hash method.");
+ group->selection_method = SEL_METHOD_DEFAULT;
+ }
+ } else if (!strcmp(selection_method, "dp_hash")) {
+ VLOG_DBG("Selection method specified: dp_hash.");
+ /* Try to use dp_hash if possible at all. */
+ if (group_setup_dp_hash_table(group, 0)) {
+ group->selection_method = SEL_METHOD_DP_HASH;
+ group->hash_alg = props->selection_method_param >> 32;
+ if (group->hash_alg >= __OVS_HASH_MAX) {
+ VLOG_DBG("Invalid dp_hash algorithm %d. "
+ "Defaulting to OVS_HASH_ALG_L4", group->hash_alg);
+ group->hash_alg = OVS_HASH_ALG_L4;
+ }
+ group->hash_basis = (uint32_t) props->selection_method_param;
+ VLOG_DBG("Use dp_hash with %d hash values using algorithm %d.",
+ group->hash_mask + 1, group->hash_alg);
+ } else {
+ /* Fall back to original default hashing in slow path. */
+ VLOG_DBG("Falling back to default hash method.");
+ group->selection_method = SEL_METHOD_DEFAULT;
+ }
+ } else if (!strcmp(selection_method, "hash")) {
+ VLOG_DBG("Selection method specified: hash.");
+ if (props->fields.values_size > 0) {
+ /* Controller has specified hash fields. */
+ struct ds s = DS_EMPTY_INITIALIZER;
+ oxm_format_field_array(&s, &props->fields);
+ VLOG_DBG("Hash fields: %s", ds_cstr(&s));
+ ds_destroy(&s);
+ group->selection_method = SEL_METHOD_HASH;
+ } else {
+ /* No hash fields. Fall back to original default hashing. */
+ VLOG_DBG("No hash fields. Falling back to default hash method.");
+ group->selection_method = SEL_METHOD_DEFAULT;
+ }
+ } else {
+ /* Parsing of groups should ensure this never happens */
+ OVS_NOT_REACHED();
+ }
+}
+
static enum ofperr
group_construct(struct ofgroup *group_)
{
ovs_mutex_init_adaptive(&group->stats_mutex);
ovs_mutex_lock(&group->stats_mutex);
group_construct_stats(group);
+ group->hash_map = NULL;
+ if (group->up.type == OFPGT11_SELECT) {
+ group_set_selection_method(group);
+ }
ovs_mutex_unlock(&group->stats_mutex);
return 0;
}
{
struct group_dpif *group = group_dpif_cast(group_);
ovs_mutex_destroy(&group->stats_mutex);
+ if (group->hash_map) {
+ free(group->hash_map);
+ group->hash_map = NULL;
+ }
}
static enum ofperr
const struct ofputil_packet_in_private *pin)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct dpif_flow_stats stats;
+ struct xlate_cache xcache;
+ struct flow flow;
+ xlate_cache_init(&xcache);
/* Translate pin into datapath actions. */
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
enum slow_path_reason slow;
- enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow);
+ enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow,
+ &flow, &xcache);
/* Steal 'pin->packet' and put it into a dp_packet. */
struct dp_packet packet;
dp_packet_init(&packet, pin->base.packet_len);
dp_packet_put(&packet, pin->base.packet, pin->base.packet_len);
+ /* Run the side effects from the xcache. */
+ dpif_flow_stats_extract(&flow, &packet, time_msec(), &stats);
+ ovs_mutex_lock(&ofproto_mutex);
+ ofproto_dpif_xcache_execute(ofproto, &xcache, &stats);
+ ovs_mutex_unlock(&ofproto_mutex);
+
pkt_metadata_from_flow(&packet.md, &pin->base.flow_metadata.flow);
/* Fix up in_port. */
}
\f
struct ofproto_dpif *
-ofproto_dpif_lookup(const char *name)
+ofproto_dpif_lookup_by_name(const char *name)
{
struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
- hash_string(name, 0), &all_ofproto_dpifs) {
+ HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_by_name_node,
+ hash_string(name, 0),
+ &all_ofproto_dpifs_by_name) {
if (!strcmp(ofproto->up.name, name)) {
return ofproto;
}
return NULL;
}
+struct ofproto_dpif *
+ofproto_dpif_lookup_by_uuid(const struct uuid *uuid)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_by_uuid_node,
+ uuid_hash(uuid), &all_ofproto_dpifs_by_uuid) {
+ if (uuid_equals(&ofproto->uuid, uuid)) {
+ return ofproto;
+ }
+ }
+ return NULL;
+}
+
static void
ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
const char *argv[], void *aux OVS_UNUSED)
struct ofproto_dpif *ofproto;
if (argc > 1) {
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
mac_learning_flush(ofproto->ml);
ovs_rwlock_unlock(&ofproto->ml->rwlock);
} else {
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
ovs_rwlock_wrlock(&ofproto->ml->rwlock);
mac_learning_flush(ofproto->ml);
ovs_rwlock_unlock(&ofproto->ml->rwlock);
struct ofproto_dpif *ofproto;
if (argc > 1) {
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
}
mcast_snooping_mdb_flush(ofproto->ms);
} else {
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
if (!mcast_snooping_enabled(ofproto->ms)) {
continue;
}
const struct ofproto_dpif *ofproto;
const struct mac_entry *e;
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
ds_destroy(&ds);
}
+static void
+ofproto_unixctl_fdb_stats_clear(struct unixctl_conn *conn, int argc,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct ofproto_dpif *ofproto;
+
+ if (argc > 1) {
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_clear_statistics(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ } else {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_clear_statistics(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ }
+ }
+
+ unixctl_command_reply(conn, "statistics successfully cleared");
+}
+
+static void
+ofproto_unixctl_fdb_stats_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct ofproto_dpif *ofproto;
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+
+ ds_put_format(&ds, "Statistics for bridge \"%s\":\n", argv[1]);
+ ovs_rwlock_rdlock(&ofproto->ml->rwlock);
+
+ ds_put_format(&ds, " Current/maximum MAC entries in the table: %"
+ PRIuSIZE"/%"PRIuSIZE"\n",
+ hmap_count(&ofproto->ml->table), ofproto->ml->max_entries);
+ ds_put_format(&ds,
+ " Total number of learned MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_learned);
+ ds_put_format(&ds,
+ " Total number of expired MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_expired);
+ ds_put_format(&ds,
+ " Total number of evicted MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_evicted);
+ ds_put_format(&ds,
+ " Total number of port moved MAC entries : %"PRIu64"\n",
+ ofproto->ml->total_moved);
+
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
static void
ofproto_unixctl_mcast_snooping_show(struct unixctl_conn *conn,
int argc OVS_UNUSED,
struct mcast_group_bundle *b;
struct mcast_mrouter_bundle *mrouter;
- ofproto = ofproto_dpif_lookup(argv[1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
{
const struct ofproto_dpif *ofproto;
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_by_name_node,
+ &all_ofproto_dpifs_by_name) {
char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
shash_add_nocopy(ofproto_shash, name, ofproto);
}
#undef ODP_SUPPORT_FIELD
if (!name) {
- struct shash_node *node;
-
SHASH_FOR_EACH (node, &all_fields) {
display_support_field(node->name, node->data, ds);
}
continue;
}
- ds_put_format(ds, "\t%s:\n", ofproto->up.name);
+ ds_put_format(ds, " %s:\n", ofproto->up.name);
ports = shash_sort(&ofproto->up.port_by_name);
for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) {
struct smap config;
odp_port_t odp_port;
- ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
+ ds_put_format(ds, " %s %u/", netdev_get_name(ofport->netdev),
ofport->ofp_port);
odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
struct dpif_flow f;
int error;
- ofproto = ofproto_dpif_lookup(argv[argc - 1]);
+ ofproto = ofproto_dpif_lookup_by_name(argv[argc - 1]);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
return;
{
struct ds ds = DS_EMPTY_INITIALIZER;
const char *br = argv[argc -1];
- struct ofproto_dpif *ofproto = ofproto_dpif_lookup(br);
+ struct ofproto_dpif *ofproto = ofproto_dpif_lookup_by_name(br);
if (!ofproto) {
unixctl_command_reply_error(conn, "no such bridge");
struct ds ds = DS_EMPTY_INITIALIZER;
const char *br = argv[1];
const char *name, *value;
- struct ofproto_dpif *ofproto = ofproto_dpif_lookup(br);
+ struct ofproto_dpif *ofproto = ofproto_dpif_lookup_by_name(br);
bool changed;
if (!ofproto) {
ofproto_unixctl_fdb_flush, NULL);
unixctl_command_register("fdb/show", "bridge", 1, 1,
ofproto_unixctl_fdb_show, NULL);
+ unixctl_command_register("fdb/stats-clear", "[bridge]", 0, 1,
+ ofproto_unixctl_fdb_stats_clear, NULL);
+ unixctl_command_register("fdb/stats-show", "bridge", 1, 1,
+ ofproto_unixctl_fdb_stats_show, NULL);
unixctl_command_register("mdb/flush", "[bridge]", 0, 1,
ofproto_unixctl_mcast_snooping_flush, NULL);
unixctl_command_register("mdb/show", "bridge", 1, 1,
NULL);
unixctl_command_register("dpif/show-dp-features", "bridge", 1, 1,
ofproto_unixctl_dpif_show_dp_features, NULL);
- unixctl_command_register("dpif/dump-flows", "[-m] [--names | --no-nmaes] bridge", 1, INT_MAX,
+ unixctl_command_register("dpif/dump-flows",
+ "[-m] [--names | --no-names] bridge", 1, INT_MAX,
ofproto_unixctl_dpif_dump_flows, NULL);
unixctl_command_register("dpif/set-dp-features", "bridge", 1, 3 ,
ofproto_unixctl_dpif_set_dp_features, NULL);
}
}
+/* 'match' is non-const to allow for temporary modifications. Any changes are
+ * restored before returning. */
int
ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
- const struct match *match, int priority,
+ struct match *match, int priority,
uint16_t idle_timeout,
const struct ofpbuf *ofpacts,
struct rule **rulep)
fm = (struct ofputil_flow_mod) {
.buffer_id = UINT32_MAX,
- .match = *match,
.priority = priority,
.table_id = TBL_INTERNAL,
.command = OFPFC_ADD,
.ofpacts = ofpacts->data,
.ofpacts_len = ofpacts->size,
};
-
+ minimatch_init(&fm.match, match);
error = ofproto_flow_mod(&ofproto->up, &fm);
+ minimatch_destroy(&fm.match);
+
if (error) {
VLOG_ERR_RL(&rl, "failed to add internal flow (%s)",
ofperr_to_string(error));
rule = rule_dpif_lookup_in_table(ofproto,
ofproto_dpif_get_tables_version(ofproto),
- TBL_INTERNAL, &fm.match.flow,
- &fm.match.wc);
+ TBL_INTERNAL, &match->flow, &match->wc);
if (rule) {
*rulep = &rule->up;
} else {
fm = (struct ofputil_flow_mod) {
.buffer_id = UINT32_MAX,
- .match = *match,
.priority = priority,
.table_id = TBL_INTERNAL,
.out_port = OFPP_ANY,
.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
.command = OFPFC_DELETE_STRICT,
};
-
+ minimatch_init(&fm.match, match);
error = ofproto_flow_mod(&ofproto->up, &fm);
+ minimatch_destroy(&fm.match);
+
if (error) {
VLOG_ERR_RL(&rl, "failed to delete internal flow (%s)",
ofperr_to_string(error));
}
}
- switch (dpif_meter_set(ofproto->backer->dpif, meter_id, config)) {
+ switch (dpif_meter_set(ofproto->backer->dpif, *meter_id, config)) {
case 0:
return 0;
case EFBIG: /* meter_id out of range */