rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
const struct flow *flow, struct flow_wildcards *wc,
uint8_t table_id, struct rule_dpif **rule)
- OVS_TRY_RDLOCK(true, (*rule)->up.evict)
+ OVS_TRY_RDLOCK(true, (*rule)->up.rwlock)
{
struct cls_rule *cls_rule;
struct classifier *cls;
}
*rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
- if (*rule && ovs_rwlock_tryrdlock(&(*rule)->up.evict)) {
+ if (*rule && ovs_rwlock_tryrdlock(&(*rule)->up.rwlock)) {
/* The rule is in the process of being removed. Best we can do is
* pretend it isn't there. */
*rule = NULL;
OVS_NO_THREAD_SAFETY_ANALYSIS
{
*rule = config & OFPUTIL_PC_NO_PACKET_IN ? no_packet_in_rule : miss_rule;
- ovs_rwlock_rdlock(&(*rule)->up.evict);
+ ovs_rwlock_rdlock(&(*rule)->up.rwlock);
}
void
OVS_NO_THREAD_SAFETY_ANALYSIS
{
if (rule) {
- ovs_rwlock_unlock(&rule->up.evict);
+ ovs_rwlock_unlock(&rule->up.rwlock);
}
}
struct ofoperation *pending; /* Operation now in progress, if nonnull. */
- ovs_be64 flow_cookie; /* Controller-issued identifier. */
+ ovs_be64 flow_cookie; /* Controller-issued identifier. Guarded by
+ rwlock. */
struct hindex_node cookie_node; /* In owning ofproto's 'cookies' index. */
long long int created; /* Creation time. */
struct heap_node evg_node; /* In eviction_group's "rules" heap. */
struct eviction_group *eviction_group; /* NULL if not in any group. */
- /* The evict lock is used to prevent rules from being evicted while child
- * threads are using them to xlate flows. A read lock means the rule is
- * currently being used. A write lock means the rule is in the process of
- * being evicted and should be considered gone. A rule will not be evicted
- * unless both its own and its classifiers write locks are held.
- * Therefore, while holding a classifier readlock, one can be assured that
- * even write locked rules are safe. */
- struct ovs_rwlock evict;
+ /* The rwlock is used to protect those elements in struct rule which are
+ * accessed by multiple threads. While maintaining a pointer to struct
+ * rule, threads are required to hold a readlock. The main ofproto code is
+ * guaranteed not to evict the rule, or change any of the elements "Guarded
+ * by rwlock" without holding the writelock.
+ *
+ * A rule will not be evicted unless both its own and its classifier's
+ * write locks are held. Therefore, while holding a classifier readlock,
+ * one can be assured that write locked rules are safe to reference. */
+ struct ovs_rwlock rwlock;
+ /* Guarded by rwlock. */
struct ofpact *ofpacts; /* Sequence of "struct ofpacts". */
unsigned int ofpacts_len; /* Size of 'ofpacts', in bytes. */
const struct mf_subfield *fields,
size_t n_fields);
-static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->evict);
+static void oftable_remove_rule(struct rule *rule) OVS_RELEASES(rule->rwlock);
static void oftable_remove_rule__(struct ofproto *ofproto,
struct classifier *cls, struct rule *rule)
- OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict);
+ OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->rwlock);
static void oftable_insert_rule(struct rule *);
/* A set of rules within a single OpenFlow table (oftable) that have the same
};
static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
- OVS_TRY_WRLOCK(true, (*rulep)->evict);
+ OVS_TRY_WRLOCK(true, (*rulep)->rwlock);
static void ofproto_evict(struct ofproto *);
static uint32_t rule_eviction_priority(struct rule *);
static void eviction_group_add_rule(struct rule *);
const struct ofp_header *, struct list *);
static void delete_flow__(struct rule *rule, struct ofopgroup *,
enum ofp_flow_removed_reason)
- OVS_RELEASES(rule->evict);
+ OVS_RELEASES(rule->rwlock);
static enum ofperr add_group(struct ofproto *, struct ofputil_group_mod *);
static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *,
group = ofopgroup_create_unattached(ofproto);
ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
- ovs_rwlock_wrlock(&rule->evict);
+ ovs_rwlock_wrlock(&rule->rwlock);
oftable_remove_rule__(ofproto, cls, rule);
ofproto->ofproto_class->rule_delete(rule);
ofopgroup_submit(group);
cls_rule_destroy(&rule->cr);
free(rule->ofpacts);
ovs_mutex_destroy(&rule->timeout_mutex);
- ovs_rwlock_destroy(&rule->evict);
+ ovs_rwlock_destroy(&rule->rwlock);
rule->ofproto->ofproto_class->rule_dealloc(rule);
}
}
if (new_cookie != rule->flow_cookie) {
cookies_remove(ofproto, rule);
+ ovs_rwlock_wrlock(&rule->rwlock);
rule->flow_cookie = new_cookie;
+ ovs_rwlock_unlock(&rule->rwlock);
cookies_insert(ofproto, rule);
}
} else if (!choose_rule_to_evict(table, &rule)) {
return OFPERR_OFPFMFC_TABLE_FULL;
} else if (rule->pending) {
- ovs_rwlock_unlock(&rule->evict);
+ ovs_rwlock_unlock(&rule->rwlock);
return OFPROTO_POSTPONE;
} else {
struct ofopgroup *group;
rule->monitor_flags = 0;
rule->add_seqno = 0;
rule->modify_seqno = 0;
- ovs_rwlock_init(&rule->evict);
+ ovs_rwlock_init(&rule->rwlock);
/* Construct rule, initializing derived state. */
error = ofproto->ofproto_class->rule_construct(rule);
op->ofpacts = rule->ofpacts;
op->ofpacts_len = rule->ofpacts_len;
op->meter_id = rule->meter_id;
+
+ ovs_rwlock_wrlock(&rule->rwlock);
rule->ofpacts = xmemdup(fm->ofpacts, fm->ofpacts_len);
rule->ofpacts_len = fm->ofpacts_len;
+ ovs_rwlock_unlock(&rule->rwlock);
+
rule->meter_id = find_meter(rule->ofpacts, rule->ofpacts_len);
rule->ofproto->ofproto_class->rule_modify_actions(rule,
reset_counters);
group = ofopgroup_create(ofproto, ofconn, request, UINT32_MAX);
LIST_FOR_EACH_SAFE (rule, next, ofproto_node, rules) {
- ovs_rwlock_wrlock(&rule->evict);
+ ovs_rwlock_wrlock(&rule->rwlock);
delete_flow__(rule, group, reason);
}
ofopgroup_submit(group);
}
}
} else {
- ovs_rwlock_wrlock(&rule->evict);
+ ovs_rwlock_wrlock(&rule->rwlock);
oftable_remove_rule(rule);
ofproto_rule_destroy__(rule);
}
ovs_mutex_unlock(&rule->timeout_mutex);
if (op->ofpacts) {
free(rule->ofpacts);
+
+ ovs_rwlock_wrlock(&rule->rwlock);
rule->ofpacts = op->ofpacts;
rule->ofpacts_len = op->ofpacts_len;
+ ovs_rwlock_unlock(&rule->rwlock);
+
op->ofpacts = NULL;
op->ofpacts_len = 0;
}
struct rule *rule;
HEAP_FOR_EACH (rule, evg_node, &evg->rules) {
- if (!ovs_rwlock_trywrlock(&rule->evict)) {
+ if (!ovs_rwlock_trywrlock(&rule->rwlock)) {
*rulep = rule;
return true;
}
}
if (rule->pending) {
- ovs_rwlock_unlock(&rule->evict);
+ ovs_rwlock_unlock(&rule->rwlock);
break;
}
static void
oftable_remove_rule__(struct ofproto *ofproto, struct classifier *cls,
struct rule *rule)
- OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->evict)
+ OVS_REQ_WRLOCK(cls->rwlock) OVS_RELEASES(rule->rwlock)
{
classifier_remove(cls, &rule->cr);
cookies_remove(ofproto, rule);
list_remove(&rule->meter_list_node);
list_init(&rule->meter_list_node);
}
- ovs_rwlock_unlock(&rule->evict);
+ ovs_rwlock_unlock(&rule->rwlock);
}
static void