#define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
#define EM_FLOW_HASH_SEGS 2
+/* Default EMC insert probability is 1 / DEFAULT_EM_FLOW_INSERT_INV_PROB */
+#define DEFAULT_EM_FLOW_INSERT_INV_PROB 100
+#define DEFAULT_EM_FLOW_INSERT_MIN (UINT32_MAX / \
+ DEFAULT_EM_FLOW_INSERT_INV_PROB)
+
struct emc_entry {
struct dp_netdev_flow *flow;
struct netdev_flow_key key; /* key.hash used for emc hash value. */
uint64_t last_tnl_conf_seq;
struct conntrack conntrack;
+
+ /* Probability of EMC insertions is a factor of 'emc_insert_min'.*/
+ OVS_ALIGNED_VAR(CACHE_LINE_SIZE) atomic_uint32_t emc_insert_min;
};
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
conntrack_init(&dp->conntrack);
+ atomic_init(&dp->emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
+
cmap_init(&dp->poll_threads);
ovs_mutex_init_recursive(&dp->non_pmd_mutex);
ovsthread_key_create(&dp->per_pmd_key, NULL);
emc_change_entry(to_be_replaced, flow, key);
}
+static inline void
+emc_probabilistic_insert(struct dp_netdev_pmd_thread *pmd,
+ const struct netdev_flow_key *key,
+ struct dp_netdev_flow *flow)
+{
+ /* Insert an entry into the EMC based on probability value 'min'. By
+ * default the value is UINT32_MAX / 100 which yields an insertion
+ * probability of 1/100 ie. 1% */
+
+ uint32_t min;
+ atomic_read_relaxed(&pmd->dp->emc_insert_min, &min);
+
+#ifdef DPDK_NETDEV
+ if (min && (key->hash ^ (uint32_t) pmd->last_cycles) <= min) {
+#else
+ if (min && (key->hash ^ random_uint32()) <= min) {
+#endif
+ emc_insert(&pmd->flow_cache, key, flow);
+ }
+}
+
static inline struct dp_netdev_flow *
emc_lookup(struct emc_cache *cache, const struct netdev_flow_key *key)
{
{
struct dp_netdev *dp = get_dp_netdev(dpif);
const char *cmask = smap_get(other_config, "pmd-cpu-mask");
+ unsigned long long insert_prob =
+ smap_get_ullong(other_config, "emc-insert-inv-prob",
+ DEFAULT_EM_FLOW_INSERT_INV_PROB);
+ uint32_t insert_min, cur_min;
if (!nullable_string_is_equal(dp->pmd_cmask, cmask)) {
free(dp->pmd_cmask);
dp_netdev_request_reconfigure(dp);
}
+ atomic_read_relaxed(&dp->emc_insert_min, &cur_min);
+ if (insert_prob <= UINT32_MAX) {
+ insert_min = insert_prob == 0 ? 0 : UINT32_MAX / insert_prob;
+ } else {
+ insert_min = DEFAULT_EM_FLOW_INSERT_MIN;
+ insert_prob = DEFAULT_EM_FLOW_INSERT_INV_PROB;
+ }
+
+ if (insert_min != cur_min) {
+ atomic_store_relaxed(&dp->emc_insert_min, insert_min);
+ if (insert_min == 0) {
+ VLOG_INFO("EMC has been disabled");
+ } else {
+ VLOG_INFO("EMC insertion probability changed to 1/%llu (~%.2f%%)",
+ insert_prob, (100 / (float)insert_prob));
+ }
+ }
+
return 0;
}
add_actions->size);
}
ovs_mutex_unlock(&pmd->flow_mutex);
-
- emc_insert(&pmd->flow_cache, key, netdev_flow);
+ emc_probabilistic_insert(pmd, key, netdev_flow);
}
}
struct dpcls *cls;
struct dpcls_rule *rules[PKT_ARRAY_SIZE];
struct dp_netdev *dp = pmd->dp;
- struct emc_cache *flow_cache = &pmd->flow_cache;
int miss_cnt = 0, lost_cnt = 0;
int lookup_cnt = 0, add_lookup_cnt;
bool any_miss;
flow = dp_netdev_flow_cast(rules[i]);
- emc_insert(flow_cache, &keys[i], flow);
+ emc_probabilistic_insert(pmd, &keys[i], flow);
dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches);
}