/*
- * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
+ * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <config.h>
-#include "ofpbuf.h"
+#include "openvswitch/ofpbuf.h"
#include "ofproto-dpif.h"
#include "ofproto-dpif-rid.h"
#include "ofproto-provider.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_rid);
-static struct ovs_mutex mutex;
+static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
-static struct cmap id_map;
-static struct cmap metadata_map;
+static struct cmap id_map = CMAP_INITIALIZER;
+static struct cmap metadata_map = CMAP_INITIALIZER;
-static struct ovs_list expiring OVS_GUARDED_BY(mutex);
-static struct ovs_list expired OVS_GUARDED_BY(mutex);
+static struct ovs_list expiring OVS_GUARDED_BY(mutex)
+ = OVS_LIST_INITIALIZER(&expiring);
+static struct ovs_list expired OVS_GUARDED_BY(mutex)
+ = OVS_LIST_INITIALIZER(&expired);
-static uint32_t next_id OVS_GUARDED_BY(mutex); /* Possible next free id. */
+static uint32_t next_id OVS_GUARDED_BY(mutex) = 1; /* Possible next free id. */
#define RECIRC_POOL_STATIC_IDS 1024
static void recirc_id_node_free(struct recirc_id_node *);
-void
-recirc_init(void)
-{
- static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
-
- if (ovsthread_once_start(&once)) {
- ovs_mutex_init(&mutex);
- ovs_mutex_lock(&mutex);
- next_id = 1; /* 0 is not a valid ID. */
- cmap_init(&id_map);
- cmap_init(&metadata_map);
- list_init(&expiring);
- list_init(&expired);
- ovs_mutex_unlock(&mutex);
-
- ovsthread_once_done(&once);
- }
-
-}
-
/* This should be called by the revalidator once at each round (every 500ms or
* more). */
void
ovsrcu_postpone(recirc_id_node_free, node);
}
- if (!list_is_empty(&expiring)) {
+ if (!ovs_list_is_empty(&expiring)) {
/* 'expired' is now empty, move nodes in 'expiring' to it. */
- list_splice(&expired, list_front(&expiring), &expiring);
+ ovs_list_splice(&expired, ovs_list_front(&expiring), &expiring);
}
}
ovs_mutex_unlock(&mutex);
: NULL;
}
+bool
+recirc_id_node_find_and_ref(uint32_t id)
+{
+ struct recirc_id_node *rid_node =
+ CONST_CAST(struct recirc_id_node *, recirc_id_node_find(id));
+
+ if (!rid_node) {
+ return false;
+ }
+
+ return ovs_refcount_try_ref_rcu(&rid_node->refcount);
+}
+
static uint32_t
-recirc_metadata_hash(const struct recirc_state *state)
+frozen_state_hash(const struct frozen_state *state)
{
uint32_t hash;
- hash = hash_pointer(state->ofproto, 0);
+ hash = uuid_hash(&state->ofproto_uuid);
hash = hash_int(state->table_id, hash);
- if (flow_tnl_dst_is_set(state->metadata.tunnel)) {
- /* We may leave remainder bytes unhashed, but that is unlikely as
- * the tunnel is not in the datapath format. */
- hash = hash_bytes64((const uint64_t *) state->metadata.tunnel,
- flow_tnl_size(state->metadata.tunnel), hash);
- }
+ hash = hash_bytes64((const uint64_t *) &state->metadata,
+ sizeof state->metadata, hash);
hash = hash_boolean(state->conntracked, hash);
- hash = hash_bytes64((const uint64_t *) &state->metadata.metadata,
- sizeof state->metadata - sizeof state->metadata.tunnel,
- hash);
- if (state->stack && state->n_stack) {
- hash = hash_bytes64((const uint64_t *) state->stack,
- state->n_stack * sizeof *state->stack, hash);
+ hash = hash_boolean(state->was_mpls, hash);
+ if (state->stack && state->stack_size) {
+ hash = hash_bytes(state->stack, state->stack_size, hash);
}
hash = hash_int(state->mirrors, hash);
- hash = hash_int(state->action_set_len, hash);
if (state->action_set_len) {
hash = hash_bytes64(ALIGNED_CAST(const uint64_t *, state->action_set),
state->action_set_len, hash);
hash = hash_bytes64(ALIGNED_CAST(const uint64_t *, state->ofpacts),
state->ofpacts_len, hash);
}
+ if (state->userdata && state->userdata_len) {
+ hash = hash_bytes(state->userdata, state->userdata_len, hash);
+ }
return hash;
}
static bool
-recirc_metadata_equal(const struct recirc_state *a,
- const struct recirc_state *b)
+frozen_state_equal(const struct frozen_state *a, const struct frozen_state *b)
{
return (a->table_id == b->table_id
- && a->ofproto == b->ofproto
- && flow_tnl_equal(a->metadata.tunnel, b->metadata.tunnel)
- && !memcmp(&a->metadata.metadata, &b->metadata.metadata,
- sizeof a->metadata - sizeof a->metadata.tunnel)
- && a->n_stack == b->n_stack
- && !memcmp(a->stack, b->stack, a->n_stack * sizeof *a->stack)
+ && uuid_equals(&a->ofproto_uuid, &b->ofproto_uuid)
+ && !memcmp(&a->metadata, &b->metadata, sizeof a->metadata)
+ && a->stack_size == b->stack_size
+ && !memcmp(a->stack, b->stack, a->stack_size)
&& a->mirrors == b->mirrors
&& a->conntracked == b->conntracked
+ && a->was_mpls == b->was_mpls
&& ofpacts_equal(a->ofpacts, a->ofpacts_len,
b->ofpacts, b->ofpacts_len)
&& ofpacts_equal(a->action_set, a->action_set_len,
- b->action_set, b->action_set_len));
+ b->action_set, b->action_set_len)
+ && !memcmp(a->userdata, b->userdata, a->userdata_len)
+ && uuid_equals(&a->xport_uuid, &b->xport_uuid));
}
/* Lockless RCU protected lookup. If node is needed accross RCU quiescent
* state, caller should take a reference. */
static struct recirc_id_node *
-recirc_find_equal(const struct recirc_state *target, uint32_t hash)
+recirc_find_equal(const struct frozen_state *target, uint32_t hash)
{
struct recirc_id_node *node;
CMAP_FOR_EACH_WITH_HASH (node, metadata_node, hash, &metadata_map) {
- if (recirc_metadata_equal(&node->state, target)) {
+ if (frozen_state_equal(&node->state, target)) {
return node;
}
}
}
static struct recirc_id_node *
-recirc_ref_equal(const struct recirc_state *target, uint32_t hash)
+recirc_ref_equal(const struct frozen_state *target, uint32_t hash)
{
struct recirc_id_node *node;
}
static void
-recirc_state_clone(struct recirc_state *new, const struct recirc_state *old,
- struct flow_tnl *tunnel)
+frozen_state_clone(struct frozen_state *new, const struct frozen_state *old)
{
*new = *old;
- flow_tnl_copy__(tunnel, old->metadata.tunnel);
- new->metadata.tunnel = tunnel;
-
- new->stack = (new->n_stack
- ? xmemdup(new->stack, new->n_stack * sizeof *new->stack)
+ new->stack = (new->stack_size
+ ? xmemdup(new->stack, new->stack_size)
: NULL);
new->ofpacts = (new->ofpacts_len
? xmemdup(new->ofpacts, new->ofpacts_len)
new->action_set = (new->action_set_len
? xmemdup(new->action_set, new->action_set_len)
: NULL);
+ new->userdata = (new->userdata_len
+ ? xmemdup(new->userdata, new->userdata_len)
+ : NULL);
}
static void
-recirc_state_free(struct recirc_state *state)
+frozen_state_free(struct frozen_state *state)
{
free(state->stack);
free(state->ofpacts);
free(state->action_set);
+ free(state->userdata);
}
/* Allocate a unique recirculation id for the given set of flow metadata.
* The ID space is 2^^32, so there should never be a situation in which all
- * the IDs are used up. We loop until we find a free one.
- * hash is recomputed if it is passed in as 0. */
+ * the IDs are used up. We loop until we find a free one. */
static struct recirc_id_node *
-recirc_alloc_id__(const struct recirc_state *state, uint32_t hash)
+recirc_alloc_id__(const struct frozen_state *state, uint32_t hash)
{
ovs_assert(state->action_set_len <= state->ofpacts_len);
node->hash = hash;
ovs_refcount_init(&node->refcount);
- recirc_state_clone(CONST_CAST(struct recirc_state *, &node->state), state,
- &node->state_metadata_tunnel);
+ frozen_state_clone(CONST_CAST(struct frozen_state *, &node->state), state);
ovs_mutex_lock(&mutex);
for (;;) {
/* Look up an existing ID for the given flow's metadata and optional actions.
*/
uint32_t
-recirc_find_id(const struct recirc_state *target)
+recirc_find_id(const struct frozen_state *target)
{
- uint32_t hash = recirc_metadata_hash(target);
+ uint32_t hash = frozen_state_hash(target);
struct recirc_id_node *node = recirc_find_equal(target, hash);
return node ? node->id : 0;
}
/* Allocate a unique recirculation id for the given set of flow metadata and
optional actions. */
uint32_t
-recirc_alloc_id_ctx(const struct recirc_state *state)
+recirc_alloc_id_ctx(const struct frozen_state *state)
{
- uint32_t hash = recirc_metadata_hash(state);
+ uint32_t hash = frozen_state_hash(state);
struct recirc_id_node *node = recirc_ref_equal(state, hash);
if (!node) {
node = recirc_alloc_id__(state, hash);
uint32_t
recirc_alloc_id(struct ofproto_dpif *ofproto)
{
- struct flow_tnl tunnel;
- tunnel.ip_dst = htonl(0);
- tunnel.ipv6_dst = in6addr_any;
- struct recirc_state state = {
+ struct frozen_state state = {
.table_id = TBL_INTERNAL,
- .ofproto = ofproto,
- .metadata = { .tunnel = &tunnel, .in_port = OFPP_NONE },
+ .ofproto_uuid = ofproto->uuid,
+ .metadata = {
+ .tunnel = {
+ .ip_dst = htonl(0),
+ .ipv6_dst = in6addr_any,
+ },
+ .in_port = OFPP_NONE },
};
- return recirc_alloc_id__(&state, recirc_metadata_hash(&state))->id;
+ /* In order to make sparse happy, xport_uuid needs to be set separately. */
+ state.xport_uuid = UUID_ZERO;
+ return recirc_alloc_id__(&state, frozen_state_hash(&state))->id;
}
static void
recirc_id_node_free(struct recirc_id_node *node)
{
- recirc_state_free(CONST_CAST(struct recirc_state *, &node->state));
+ frozen_state_free(CONST_CAST(struct frozen_state *, &node->state));
free(node);
}
cmap_remove(&metadata_map, &node->metadata_node, node->hash);
/* We keep the node in the 'id_map' so that it can be found as long
* as it lingers, and add it to the 'expiring' list. */
- list_insert(&expiring, &node->exp_node);
+ ovs_list_insert(&expiring, &node->exp_node);
ovs_mutex_unlock(&mutex);
}
}
struct recirc_id_node *n;
CMAP_FOR_EACH (n, metadata_node, &metadata_map) {
- if (n->state.ofproto == ofproto) {
+ if (uuid_equals(&n->state.ofproto_uuid, &ofproto->uuid)) {
VLOG_ERR("recirc_id %"PRIu32
" left allocated when ofproto (%s)"
" is destructed", n->id, ofproto_name);