static struct dp_netdev_flow *
dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
- OVS_EXCLUDED(dp->cls.rwlock)
+ OVS_REQ_RDLOCK(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
struct cls_rule *rule;
- fat_rwlock_rdlock(&dp->cls.rwlock);
rule = classifier_lookup_miniflow_first(&dp->cls, key);
netdev_flow = dp_netdev_flow_cast(rule);
- fat_rwlock_unlock(&dp->cls.rwlock);
return netdev_flow;
}
miniflow_init(&miniflow, &flow);
ovs_mutex_lock(&dp->flow_mutex);
+ fat_rwlock_rdlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
+ fat_rwlock_unlock(&dp->cls.rwlock);
if (!netdev_flow) {
if (put->flags & DPIF_FP_CREATE) {
if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
miniflow_initialize(&key.flow, key.buf);
+ fat_rwlock_rdlock(&dp->cls.rwlock);
for (i = 0; i < cnt; i++) {
struct dp_netdev_flow *netdev_flow;
struct ofpbuf *buf = &packets[i]->ofpbuf;
}
}
}
+ fat_rwlock_unlock(&dp->cls.rwlock);
if (batch.flow) {
packet_batch_execute(&batch, dp);