stats.used = time_msec();
stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
- xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
+ xlate_in_init(&xin, upcall->ofproto,
+ ofproto_dpif_get_tables_version(upcall->ofproto),
+ upcall->flow, upcall->in_port, NULL,
stats.tcp_flags, upcall->packet, wc, odp_actions);
if (upcall->type == DPIF_UC_MISS) {
ukey->xcache = xlate_cache_new();
}
- xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
+ xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
+ &flow, ofp_in_port, NULL, push.tcp_flags,
NULL, need_revalidate ? &wc : NULL, odp_actions);
if (push.n_packets) {
xin.resubmit_stats = &push;
if (!error) {
struct xlate_in xin;
- xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
+ xlate_in_init(&xin, ofproto,
+ ofproto_dpif_get_tables_version(ofproto),
+ &flow, ofp_in_port, NULL,
push->tcp_flags, NULL, NULL, NULL);
xin.resubmit_stats = push->n_packets ? push : NULL;
xin.allow_side_effects = push->n_packets > 0;
#include "ofproto/ofproto-dpif-xlate-cache.h"
-#include <errno.h>
#include <arpa/inet.h>
+#include <errno.h>
#include <net/if.h>
-#include <sys/socket.h>
#include <netinet/in.h>
+#include <sys/socket.h>
#include "bfd.h"
#include "bitmap.h"
#include "mac-learning.h"
#include "netdev-vport.h"
#include "ofproto/ofproto-dpif-mirror.h"
-#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-dpif-xlate.h"
+#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ofproto_xlate_cache);
+void
+xlate_cache_init(struct xlate_cache *xcache)
+{
+ ofpbuf_init(&xcache->entries, 120);
+}
+
struct xlate_cache *
xlate_cache_new(void)
{
struct xlate_cache *xcache = xmalloc(sizeof *xcache);
-
- ofpbuf_init(&xcache->entries, 512);
+ xlate_cache_init(xcache);
return xcache;
}
}
void
-xlate_cache_delete(struct xlate_cache *xcache)
+xlate_cache_uninit(struct xlate_cache *xcache)
{
xlate_cache_clear(xcache);
ofpbuf_uninit(&xcache->entries);
+}
+
+void
+xlate_cache_delete(struct xlate_cache *xcache)
+{
+ xlate_cache_uninit(xcache);
free(xcache);
}
struct ofpbuf entries;
};
+void xlate_cache_init(struct xlate_cache *);
struct xlate_cache *xlate_cache_new(void);
struct xc_entry *xlate_cache_add_entry(struct xlate_cache *, enum xc_type);
void xlate_push_stats_entry(struct xc_entry *, const struct dpif_flow_stats *);
void xlate_push_stats(struct xlate_cache *, const struct dpif_flow_stats *);
void xlate_cache_clear_entry(struct xc_entry *);
void xlate_cache_clear(struct xlate_cache *);
+void xlate_cache_uninit(struct xlate_cache *);
void xlate_cache_delete(struct xlate_cache *);
#endif /* ofproto-dpif-xlate-cache.h */
const struct xbridge *xbridge;
- /* Flow tables version at the beginning of the translation. */
- ovs_version_t tables_version;
-
/* Flow at the last commit. */
struct flow base_flow;
struct group_dpif *group;
group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
- ctx->tables_version, false);
+ ctx->xin->tables_version, false);
if (group) {
return group_first_live_bucket(ctx, group, depth) != NULL;
}
output.port = OFPP_TABLE;
output.max_len = 0;
- return ofproto_dpif_execute_actions__(xbridge->ofproto, &flow, NULL,
- &output.ofpact, sizeof output,
+ return ofproto_dpif_execute_actions__(xbridge->ofproto,
+ ctx->xin->tables_version, &flow,
+ NULL, &output.ofpact, sizeof output,
ctx->indentation, ctx->depth,
ctx->resubmits, packet);
}
struct flow old_flow = ctx->xin->flow;
bool old_conntrack = ctx->conntracked;
bool old_was_mpls = ctx->was_mpls;
- ovs_version_t old_version = ctx->tables_version;
+ ovs_version_t old_version = ctx->xin->tables_version;
struct ofpbuf old_stack = ctx->stack;
union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
struct ofpbuf old_action_set = ctx->action_set;
clear_conntrack(flow);
/* The bridge is now known so obtain its table version. */
- ctx->tables_version
+ ctx->xin->tables_version
= ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
if (!process_special(ctx, peer) && may_receive(peer, ctx)) {
ctx->stack = old_stack;
/* Restore calling bridge's lookup version. */
- ctx->tables_version = old_version;
+ ctx->xin->tables_version = old_version;
/* The peer bridge popping MPLS should have no effect on the original
* bridge. */
ctx->table_id = table_id;
rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
- ctx->tables_version,
+ ctx->xin->tables_version,
&ctx->xin->flow, ctx->wc,
ctx->xin->resubmit_stats,
&ctx->table_id, in_port,
/* Take ref only if xcache exists. */
group = group_dpif_lookup(ctx->xbridge->ofproto, group_id,
- ctx->tables_version, ctx->xin->xcache);
+ ctx->xin->tables_version, ctx->xin->xcache);
if (!group) {
/* XXX: Should set ctx->error ? */
return true;
void
xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
- const struct flow *flow, ofp_port_t in_port,
- struct rule_dpif *rule, uint16_t tcp_flags,
+ ovs_version_t version, const struct flow *flow,
+ ofp_port_t in_port, struct rule_dpif *rule, uint16_t tcp_flags,
const struct dp_packet *packet, struct flow_wildcards *wc,
struct ofpbuf *odp_actions)
{
xin->ofproto = ofproto;
+ xin->tables_version = version;
xin->flow = *flow;
xin->flow.in_port.ofp_port = in_port;
xin->flow.actset_output = OFPP_UNSET;
goto exit;
}
ctx.xbridge = new_bridge;
+ /* The bridge is now known so obtain its table version. */
+ ctx.xin->tables_version
+ = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
}
/* Set the thawed table id. Note: A table lookup is done only if there
ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
goto exit;
}
- /* The bridge is now known so obtain its table version. */
- ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
if (!xin->ofpacts && !ctx.rule) {
ctx.rule = rule_dpif_lookup_from_table(
- ctx.xbridge->ofproto, ctx.tables_version, flow, ctx.wc,
+ ctx.xbridge->ofproto, ctx.xin->tables_version, flow, ctx.wc,
ctx.xin->resubmit_stats, &ctx.table_id,
flow->in_port.ofp_port, true, true, ctx.xin->xcache);
if (ctx.xin->resubmit_stats) {
flow_extract(&packet, &flow);
struct xlate_in xin;
- xlate_in_init(&xin, ofproto, &flow, 0, NULL, ntohs(flow.tcp_flags),
+ xlate_in_init(&xin, ofproto, ofproto_dpif_get_tables_version(ofproto),
+ &flow, 0, NULL, ntohs(flow.tcp_flags),
&packet, NULL, odp_actions);
struct ofpact_note noop;
ofpact_put_OUTPUT(&ofpacts)->port = xport->ofp_port;
- return ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
+ /* Actions here are not referring to anything versionable (flow tables or
+ * groups) so we don't need to worry about the version here. */
+ return ofproto_dpif_execute_actions(xport->xbridge->ofproto,
+ OVS_VERSION_MAX, &flow, NULL,
ofpacts.data, ofpacts.size, packet);
}
struct xlate_in {
struct ofproto_dpif *ofproto;
+ ovs_version_t tables_version; /* Lookup in this version. */
/* Flow to which the OpenFlow actions apply. xlate_actions() will modify
* this flow when actions change header fields. */
enum xlate_error xlate_actions(struct xlate_in *, struct xlate_out *);
-void xlate_in_init(struct xlate_in *, struct ofproto_dpif *,
+void xlate_in_init(struct xlate_in *, struct ofproto_dpif *, ovs_version_t,
const struct flow *, ofp_port_t in_port, struct rule_dpif *,
uint16_t tcp_flags, const struct dp_packet *packet,
struct flow_wildcards *, struct ofpbuf *odp_actions);
int
ofproto_dpif_execute_actions__(struct ofproto_dpif *ofproto,
- const struct flow *flow,
+ ovs_version_t version, const struct flow *flow,
struct rule_dpif *rule,
const struct ofpact *ofpacts, size_t ofpacts_len,
int indentation, int depth, int resubmits,
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
- xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
+ xlate_in_init(&xin, ofproto, version, flow, flow->in_port.ofp_port, rule,
stats.tcp_flags, packet, NULL, &odp_actions);
xin.ofpacts = ofpacts;
xin.ofpacts_len = ofpacts_len;
* 'flow' must reflect the data in 'packet'. */
int
ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
- const struct flow *flow,
+ ovs_version_t version, const struct flow *flow,
struct rule_dpif *rule,
const struct ofpact *ofpacts, size_t ofpacts_len,
struct dp_packet *packet)
{
- return ofproto_dpif_execute_actions__(ofproto, flow, rule, ofpacts,
- ofpacts_len, 0, 0, 0, packet);
+ return ofproto_dpif_execute_actions__(ofproto, version, flow, rule,
+ ofpacts, ofpacts_len, 0, 0, 0,
+ packet);
}
static void
}
ovs_version_t
-ofproto_dpif_get_tables_version(struct ofproto_dpif *ofproto OVS_UNUSED)
+ofproto_dpif_get_tables_version(struct ofproto_dpif *ofproto)
{
ovs_version_t version;
ovs_mutex_unlock(&rule->stats_mutex);
}
+struct ofproto_dpif_packet_out {
+ struct xlate_cache xcache;
+ struct ofpbuf odp_actions;
+ struct recirc_refs rr;
+ bool needs_help;
+};
+
+
+static struct ofproto_dpif_packet_out *
+ofproto_dpif_packet_out_new(void)
+{
+ struct ofproto_dpif_packet_out *aux = xmalloc(sizeof *aux);
+ xlate_cache_init(&aux->xcache);
+ ofpbuf_init(&aux->odp_actions, 64);
+ aux->rr = RECIRC_REFS_EMPTY_INITIALIZER;
+ aux->needs_help = false;
+
+ return aux;
+}
+
+static void
+ofproto_dpif_packet_out_delete(struct ofproto_dpif_packet_out *aux)
+{
+ if (aux) {
+ xlate_cache_uninit(&aux->xcache);
+ ofpbuf_uninit(&aux->odp_actions);
+ recirc_refs_unref(&aux->rr);
+ free(aux);
+ }
+}
+
+static enum ofperr
+packet_xlate(struct ofproto *ofproto_, struct ofproto_packet_out *opo)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct xlate_out xout;
+ struct xlate_in xin;
+ enum ofperr error = 0;
+
+ struct ofproto_dpif_packet_out *aux = ofproto_dpif_packet_out_new();
+
+ xlate_in_init(&xin, ofproto, opo->version, opo->flow,
+ opo->flow->in_port.ofp_port, NULL, 0, opo->packet, NULL,
+ &aux->odp_actions);
+ xin.ofpacts = opo->ofpacts;
+ xin.ofpacts_len = opo->ofpacts_len;
+ /* No learning or stats, but collect side effects to xcache. */
+ xin.allow_side_effects = false;
+ xin.resubmit_stats = NULL;
+ xin.xcache = &aux->xcache;
+
+ if (xlate_actions(&xin, &xout) != XLATE_OK) {
+ error = OFPERR_OFPFMFC_UNKNOWN; /* Error processing actions. */
+ goto error_out;
+ } else {
+ /* Prepare learn actions. */
+ struct xc_entry *entry;
+ struct ofpbuf entries = aux->xcache.entries;
+
+ XC_ENTRY_FOR_EACH (entry, &entries) {
+ if (entry->type == XC_LEARN) {
+ struct ofproto_flow_mod *ofm = entry->learn.ofm;
+
+ error = ofproto_flow_mod_learn_refresh(ofm);
+ if (error) {
+ goto error_out;
+ }
+ struct rule *rule = ofm->temp_rule;
+ ofm->learn_adds_rule = (rule->state == RULE_INITIALIZED);
+ if (ofm->learn_adds_rule) {
+ /* If learning on a different bridge, must use its next
+ * version number. */
+ ofm->version = (rule->ofproto == ofproto_)
+ ? opo->version : rule->ofproto->tables_version + 1;
+ error = ofproto_flow_mod_learn_start(ofm);
+ if (error) {
+ goto error_out;
+ }
+ }
+ }
+ }
+
+ /* Success. */
+ aux->needs_help = (xout.slow & SLOW_ACTION) != 0;
+ recirc_refs_swap(&aux->rr, &xout.recircs); /* Hold recirc refs. */
+ }
+ xlate_out_uninit(&xout);
+ opo->aux = aux;
+ return 0;
+
+error_out:
+ xlate_out_uninit(&xout);
+ ofproto_dpif_packet_out_delete(aux);
+ opo->aux = NULL;
+ return error;
+}
+
+/* Push stats and perform side effects of flow translation. */
+static void
+ofproto_dpif_xcache_execute(struct ofproto_dpif *ofproto,
+ struct xlate_cache *xcache,
+ const struct dpif_flow_stats *stats)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries = xcache->entries;
+
+ XC_ENTRY_FOR_EACH (entry, &entries) {
+ switch (entry->type) {
+ case XC_LEARN:
+ /* Finish the learned flows. */
+ if (entry->learn.ofm->learn_adds_rule) {
+ ofproto_flow_mod_learn_finish(entry->learn.ofm, &ofproto->up);
+ }
+ break;
+ case XC_FIN_TIMEOUT:
+ if (stats->tcp_flags & (TCP_FIN | TCP_RST)) {
+ /* 'ofproto_mutex' already held */
+ ofproto_rule_reduce_timeouts__(&entry->fin.rule->up,
+ entry->fin.idle,
+ entry->fin.hard);
+ }
+ break;
+ /* All the rest can be dealt with by the xlate layer. */
+ case XC_TABLE:
+ case XC_RULE:
+ case XC_BOND:
+ case XC_NETDEV:
+ case XC_NETFLOW:
+ case XC_MIRROR:
+ case XC_NORMAL:
+ case XC_GROUP:
+ case XC_TNL_NEIGH:
+ case XC_CONTROLLER:
+ xlate_push_stats_entry(entry, stats);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+}
+
+static void
+packet_execute(struct ofproto *ofproto_, struct ofproto_packet_out *opo)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct dpif_flow_stats stats;
+ struct dpif_execute execute;
+
+ struct ofproto_dpif_packet_out *aux = opo->aux;
+ ovs_assert(aux);
+
+ /* Run the side effects from the xcache. */
+ dpif_flow_stats_extract(opo->flow, opo->packet, time_msec(), &stats);
+ ofproto_dpif_xcache_execute(ofproto, &aux->xcache, &stats);
+
+ execute.actions = aux->odp_actions.data;
+ execute.actions_len = aux->odp_actions.size;
+
+ pkt_metadata_from_flow(&opo->packet->md, opo->flow);
+ execute.packet = opo->packet;
+ execute.flow = opo->flow;
+ execute.needs_help = aux->needs_help;
+ execute.probe = false;
+ execute.mtu = 0;
+
+ /* Fix up in_port. */
+ ofproto_dpif_set_packet_odp_port(ofproto, opo->flow->in_port.ofp_port,
+ opo->packet);
+
+ dpif_execute(ofproto->backer->dpif, &execute);
+ ofproto_dpif_packet_out_delete(aux);
+ opo->aux = NULL;
+}
+
static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
{
return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
}
}
-static enum ofperr
-packet_out(struct ofproto *ofproto_, struct dp_packet *packet,
- const struct flow *flow,
- const struct ofpact *ofpacts, size_t ofpacts_len)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-
- ofproto_dpif_execute_actions(ofproto, flow, NULL, ofpacts,
- ofpacts_len, packet);
- return 0;
-}
-
static enum ofperr
nxt_resume(struct ofproto *ofproto_,
const struct ofputil_packet_in_private *pin)
trace.result = ds;
trace.key = flow; /* Original flow key, used for megaflow. */
trace.flow = *flow; /* May be modified by actions. */
- xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL,
- ntohs(flow->tcp_flags), packet, &trace.wc,
- &trace.odp_actions);
+ xlate_in_init(&trace.xin, ofproto,
+ ofproto_dpif_get_tables_version(ofproto), flow,
+ flow->in_port.ofp_port, NULL, ntohs(flow->tcp_flags),
+ packet, &trace.wc, &trace.odp_actions);
trace.xin.ofpacts = ofpacts;
trace.xin.ofpacts_len = ofpacts_len;
trace.xin.resubmit_hook = trace_resubmit;
rule_destruct,
rule_dealloc,
rule_get_stats,
+ packet_xlate,
+ packet_execute,
set_frag_handling,
- packet_out,
nxt_resume,
set_netflow,
get_netflow_ids,
uint64_t group_dpif_get_selection_method_param(const struct group_dpif *group);
const struct field_array *group_dpif_get_fields(const struct group_dpif *group);
-int ofproto_dpif_execute_actions(struct ofproto_dpif *, const struct flow *,
- struct rule_dpif *, const struct ofpact *,
- size_t ofpacts_len, struct dp_packet *);
-int ofproto_dpif_execute_actions__(struct ofproto_dpif *, const struct flow *,
- struct rule_dpif *, const struct ofpact *,
- size_t ofpacts_len, int indentation,
- int depth, int resubmits,
+int ofproto_dpif_execute_actions(struct ofproto_dpif *, ovs_version_t,
+ const struct flow *, struct rule_dpif *,
+ const struct ofpact *, size_t ofpacts_len,
+ struct dp_packet *);
+int ofproto_dpif_execute_actions__(struct ofproto_dpif *, ovs_version_t,
+ const struct flow *, struct rule_dpif *,
+ const struct ofpact *, size_t ofpacts_len,
+ int indentation, int depth, int resubmits,
struct dp_packet *);
void ofproto_dpif_send_async_msg(struct ofproto_dpif *,
struct ofproto_async_msg *);
struct bfd_cfg;
struct meter;
struct ofoperation;
+struct ofproto_packet_out;
extern struct ovs_mutex ofproto_mutex;
OVS_REQUIRES(ofproto_mutex);
void ofproto_rule_delete(struct ofproto *, struct rule *)
OVS_EXCLUDED(ofproto_mutex);
+void ofproto_rule_reduce_timeouts__(struct rule *rule, uint16_t idle_timeout,
+ uint16_t hard_timeout)
+ OVS_REQUIRES(ofproto_mutex) OVS_EXCLUDED(rule->mutex);
void ofproto_rule_reduce_timeouts(struct rule *rule, uint16_t idle_timeout,
uint16_t hard_timeout)
OVS_EXCLUDED(ofproto_mutex);
uint64_t *byte_count, long long int *used)
/* OVS_EXCLUDED(ofproto_mutex) */;
+ /* Translates actions in 'opo->ofpacts', for 'opo->packet' in flow tables
+ * in version 'opo->version'. This is useful for OpenFlow OFPT_PACKET_OUT.
+ *
+ * This function must validate that it can correctly translate
+ * 'opo->ofpacts'. If not, then it should return an OpenFlow error code.
+ *
+ * 'opo->flow' reflects the flow information for 'opo->packet'. All of the
+ * information in 'opo->flow' is extracted from 'opo->packet', except for
+ * 'in_port', which is assigned to the correct value for the incoming
+ * packet. 'tunnel' and register values should be zeroed. 'packet''s
+ * header pointers and offsets (e.g. packet->l3) are appropriately
+ * initialized. packet->l3 is aligned on a 32-bit boundary.
+ *
+ * Returns 0 if successful, otherwise an OpenFlow error code.
+ *
+ * This function may be called with 'ofproto_mutex' held. */
+ enum ofperr (*packet_xlate)(struct ofproto *,
+ struct ofproto_packet_out *opo);
+
+ /* Executes the datapath actions, translation side-effects, and stats as
+ * produced by ->packet_xlate(). The caller retains ownership of 'opo'.
+ */
+ void (*packet_execute)(struct ofproto *, struct ofproto_packet_out *opo);
+
/* Changes the OpenFlow IP fragment handling policy to 'frag_handling',
* which takes one of the following values, with the corresponding
* meanings:
bool (*set_frag_handling)(struct ofproto *ofproto,
enum ofputil_frag_handling frag_handling);
- /* Implements the OpenFlow OFPT_PACKET_OUT command. The datapath should
- * execute the 'ofpacts_len' bytes of "struct ofpacts" in 'ofpacts'.
- *
- * The caller retains ownership of 'packet' and of 'ofpacts', so
- * ->packet_out() should not modify or free them.
- *
- * This function must validate that it can correctly implement 'ofpacts'.
- * If not, then it should return an OpenFlow error code.
- *
- * 'flow' reflects the flow information for 'packet'. All of the
- * information in 'flow' is extracted from 'packet', except for
- * flow->in_port (see below). flow->tunnel and its register values are
- * zeroed.
- *
- * flow->in_port comes from the OpenFlow OFPT_PACKET_OUT message. The
- * implementation should reject invalid flow->in_port values by returning
- * OFPERR_OFPBRC_BAD_PORT. (If the implementation called
- * ofproto_init_max_ports(), then the client will reject these ports
- * itself.) For consistency, the implementation should consider valid for
- * flow->in_port any value that could possibly be seen in a packet that it
- * passes to connmgr_send_packet_in(). Ideally, even an implementation
- * that never generates packet-ins (e.g. due to hardware limitations)
- * should still allow flow->in_port values for every possible physical port
- * and OFPP_LOCAL. The only virtual ports (those above OFPP_MAX) that the
- * caller will ever pass in as flow->in_port, other than OFPP_LOCAL, are
- * OFPP_NONE and OFPP_CONTROLLER. The implementation should allow both of
- * these, treating each of them as packets generated by the controller as
- * opposed to packets originating from some switch port.
- *
- * (Ordinarily the only effect of flow->in_port is on output actions that
- * involve the input port, such as actions that output to OFPP_IN_PORT,
- * OFPP_FLOOD, or OFPP_ALL. flow->in_port can also affect Nicira extension
- * "resubmit" actions.)
- *
- * 'packet' is not matched against the OpenFlow flow table, so its
- * statistics should not be included in OpenFlow flow statistics.
- *
- * Returns 0 if successful, otherwise an OpenFlow error code. */
- enum ofperr (*packet_out)(struct ofproto *ofproto, struct dp_packet *packet,
- const struct flow *flow,
- const struct ofpact *ofpacts,
- size_t ofpacts_len);
-
enum ofperr (*nxt_resume)(struct ofproto *ofproto,
const struct ofputil_packet_in_private *);
* ofproto_flow_mod_uninit() does NOT clean these up. */
ovs_version_t version; /* Version in which changes take
* effect. */
+ bool learn_adds_rule; /* Learn execution adds a rule. */
struct rule_collection old_rules; /* Affected rules. */
struct rule_collection new_rules; /* Replacement rules. */
};
struct group_collection old_groups; /* Affected groups. */
};
+/* packet_out with execution context. */
+struct ofproto_packet_out {
+ ovs_version_t version;
+ struct dp_packet *packet;
+ struct flow *flow;
+ struct ofpact *ofpacts;
+ size_t ofpacts_len;
+
+ void *aux; /* Provider private. */
+};
+
+void ofproto_packet_out_uninit(struct ofproto_packet_out *);
+
enum ofperr ofproto_flow_mod(struct ofproto *, const struct ofputil_flow_mod *)
OVS_EXCLUDED(ofproto_mutex);
enum ofperr ofproto_flow_mod_init_for_learn(struct ofproto *,
OVS_EXCLUDED(ofproto_mutex);
enum ofperr ofproto_flow_mod_learn(struct ofproto_flow_mod *, bool keep_ref)
OVS_EXCLUDED(ofproto_mutex);
+enum ofperr ofproto_flow_mod_learn_refresh(struct ofproto_flow_mod *ofm);
+enum ofperr ofproto_flow_mod_learn_start(struct ofproto_flow_mod *ofm)
+ OVS_REQUIRES(ofproto_mutex);
+void ofproto_flow_mod_learn_finish(struct ofproto_flow_mod *ofm,
+ struct ofproto *orig_ofproto)
+ OVS_REQUIRES(ofproto_mutex);
void ofproto_add_flow(struct ofproto *, const struct match *, int priority,
const struct ofpact *ofpacts, size_t ofpacts_len)
OVS_EXCLUDED(ofproto_mutex);
*
* - If they use any groups, then 'ofproto' has that group configured.
*
- * Returns 0 if successful, otherwise an OpenFlow error. */
+ * Returns 0 if successful, otherwise an OpenFlow error. Caller must hold
+ * 'ofproto_mutex' for the result to be valid also after this function
+ * returns. */
enum ofperr
ofproto_check_ofpacts(struct ofproto *ofproto,
const struct ofpact ofpacts[], size_t ofpacts_len)
+ OVS_REQUIRES(ofproto_mutex)
{
uint32_t mid;
return 0;
}
+void
+ofproto_packet_out_uninit(struct ofproto_packet_out *opo)
+{
+ dp_packet_delete(opo->packet);
+ opo->packet = NULL;
+ free(opo->flow);
+ opo->flow = NULL;
+ free(opo->ofpacts);
+ opo->ofpacts = NULL;
+ opo->ofpacts_len = 0;
+ ovs_assert(!opo->aux);
+}
+
+/* Takes ownership of po->ofpacts, which must have been malloc'ed. */
+static enum ofperr
+ofproto_packet_out_init(struct ofproto *ofproto,
+ struct ofconn *ofconn,
+ struct ofproto_packet_out *opo,
+ const struct ofputil_packet_out *po)
+{
+ enum ofperr error;
+
+ if (ofp_to_u16(po->in_port) >= ofproto->max_ports
+ && ofp_to_u16(po->in_port) < ofp_to_u16(OFPP_MAX)) {
+ return OFPERR_OFPBRC_BAD_PORT;
+ }
+
+ /* Get payload. */
+ if (po->buffer_id != UINT32_MAX) {
+ return OFPERR_OFPBRC_BUFFER_UNKNOWN;
+ }
+
+ /* Ensure that the L3 header is 32-bit aligned. */
+ opo->packet = dp_packet_clone_data_with_headroom(po->packet,
+ po->packet_len, 2);
+ /* Store struct flow. */
+ opo->flow = xmalloc(sizeof *opo->flow);
+ flow_extract(opo->packet, opo->flow);
+ opo->flow->in_port.ofp_port = po->in_port;
+
+ /* Check actions like for flow mods. We pass a 'table_id' of 0 to
+ * ofproto_check_consistency(), which isn't strictly correct because these
+ * actions aren't in any table. This is OK as 'table_id' is only used to
+ * check instructions (e.g., goto-table), which can't appear on the action
+ * list of a packet-out. */
+ error = ofpacts_check_consistency(po->ofpacts, po->ofpacts_len,
+ opo->flow,
+ u16_to_ofp(ofproto->max_ports), 0,
+ ofproto->n_tables,
+ ofconn_get_protocol(ofconn));
+ if (error) {
+ dp_packet_delete(opo->packet);
+ free(opo->flow);
+ return error;
+ }
+
+ opo->ofpacts = po->ofpacts;
+ opo->ofpacts_len = po->ofpacts_len;
+
+ opo->aux = NULL;
+ return 0;
+}
+
+static enum ofperr
+ofproto_packet_out_start(struct ofproto *ofproto,
+ struct ofproto_packet_out *opo)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ enum ofperr error;
+
+ error = ofproto_check_ofpacts(ofproto, opo->ofpacts, opo->ofpacts_len);
+ if (error) {
+ return error;
+ }
+
+ return ofproto->ofproto_class->packet_xlate(ofproto, opo);
+}
+
+static void
+ofproto_packet_out_finish(struct ofproto *ofproto,
+ struct ofproto_packet_out *opo)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ ofproto->ofproto_class->packet_execute(ofproto, opo);
+}
+
static enum ofperr
handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh)
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *p = ofconn_get_ofproto(ofconn);
struct ofputil_packet_out po;
- struct dp_packet *payload;
+ struct ofproto_packet_out opo;
uint64_t ofpacts_stub[1024 / 8];
struct ofpbuf ofpacts;
- struct flow flow;
enum ofperr error;
COVERAGE_INC(ofproto_packet_out);
error = reject_slave_controller(ofconn);
if (error) {
- goto exit;
+ return error;
}
/* Decode message. */
ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
error = ofputil_decode_packet_out(&po, oh, &ofpacts);
if (error) {
- goto exit_free_ofpacts;
- }
- if (ofp_to_u16(po.in_port) >= p->max_ports
- && ofp_to_u16(po.in_port) < ofp_to_u16(OFPP_MAX)) {
- error = OFPERR_OFPBRC_BAD_PORT;
- goto exit_free_ofpacts;
+ ofpbuf_uninit(&ofpacts);
+ return error;
}
- /* Get payload. */
- if (po.buffer_id != UINT32_MAX) {
- error = OFPERR_OFPBRC_BUFFER_UNKNOWN;
- goto exit_free_ofpacts;
+ po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */
+ error = ofproto_packet_out_init(p, ofconn, &opo, &po);
+ if (error) {
+ free(po.ofpacts);
+ return error;
}
- /* Ensure that the L3 header is 32-bit aligned. */
- payload = dp_packet_clone_data_with_headroom(po.packet, po.packet_len, 2);
-
- /* Verify actions against packet, then send packet if successful. */
- flow_extract(payload, &flow);
- flow.in_port.ofp_port = po.in_port;
- /* Check actions like for flow mods. We pass a 'table_id' of 0 to
- * ofproto_check_consistency(), which isn't strictly correct because these
- * actions aren't in any table. This is OK as 'table_id' is only used to
- * check instructions (e.g., goto-table), which can't appear on the action
- * list of a packet-out. */
- error = ofpacts_check_consistency(po.ofpacts, po.ofpacts_len,
- &flow, u16_to_ofp(p->max_ports),
- 0, p->n_tables,
- ofconn_get_protocol(ofconn));
+ ovs_mutex_lock(&ofproto_mutex);
+ opo.version = p->tables_version;
+ error = ofproto_packet_out_start(p, &opo);
if (!error) {
- /* Should hold ofproto_mutex to guarantee state don't
- * change between the check and the execution. */
- error = ofproto_check_ofpacts(p, po.ofpacts, po.ofpacts_len);
- if (!error) {
- error = p->ofproto_class->packet_out(p, payload, &flow,
- po.ofpacts, po.ofpacts_len);
- }
+ ofproto_packet_out_finish(p, &opo);
}
- dp_packet_delete(payload);
+ ovs_mutex_unlock(&ofproto_mutex);
-exit_free_ofpacts:
- ofpbuf_uninit(&ofpacts);
-exit:
+ ofproto_packet_out_uninit(&opo);
return error;
}
return 0;
}
-/* Refresh 'ofm->temp_rule', for which the caller holds a reference, if already
- * in the classifier, insert it otherwise. If the rule has already been
- * removed from the classifier, a new rule is created using 'ofm->temp_rule' as
- * a template and the reference to the old 'ofm->temp_rule' is freed. If
- * 'keep_ref' is true, then a reference to the current rule is held, otherwise
- * it is released and 'ofm->temp_rule' is set to NULL.
- *
- * Caller needs to be the exclusive owner of 'ofm' as it is being manipulated
- * during the call. */
enum ofperr
-ofproto_flow_mod_learn(struct ofproto_flow_mod *ofm, bool keep_ref)
- OVS_EXCLUDED(ofproto_mutex)
+ofproto_flow_mod_learn_refresh(struct ofproto_flow_mod *ofm)
{
enum ofperr error = 0;
if (!rule) {
return OFPERR_OFPFMFC_UNKNOWN;
}
- struct ofproto *ofproto = rule->ofproto;
- enum rule_state state = rule->state;
/* Create a new rule if the current one has been removed from the
* classifier. We need to do this since RCU does not allow a current rule
* to be reinserted before all threads have quiesced.
*
* It is possible that the rule is removed asynchronously, e.g., right
- * after we have read the 'rule->state' above. In this case the next time
+ * after we have read the 'rule->state' below. In this case the next time
* this function is executed the rule will be reinstated. */
- if (state == RULE_REMOVED) {
+ if (rule->state == RULE_REMOVED) {
struct cls_rule cr;
cls_rule_clone(&cr, &rule->cr);
ovs_mutex_lock(&rule->mutex);
- error = ofproto_rule_create(ofproto, &cr, rule->table_id,
+ error = ofproto_rule_create(rule->ofproto, &cr, rule->table_id,
rule->flow_cookie,
rule->idle_timeout,
rule->hard_timeout, rule->flags,
rule->actions->ofpacts_len,
&ofm->temp_rule);
ovs_mutex_unlock(&rule->mutex);
- if (error) {
- goto error_out;
- }
- ofproto_rule_unref(rule);
- rule = ofm->temp_rule;
- state = rule->state;
- }
-
- /* Do we need to insert the rule? In this case no-one else has the
- * reference to this rule yet, but multiple threads may compete inserting
- * duplicate rules. This race is resolved by holding the ofproto_mutex and
- * any possible old rule we don't know of yet being removed by the flow
- * add. */
- if (state == RULE_INITIALIZED) {
- ovs_mutex_lock(&ofproto_mutex);
- ofm->version = ofproto->tables_version + 1;
- /* ofproto_flow_mod_start() consumes the reference, so we take a new
- * one. */
- ofproto_rule_ref(rule);
- error = ofproto_flow_mod_start(ofproto, ofm);
- ofm->temp_rule = rule;
if (!error) {
- ofproto_bump_tables_version(ofproto);
- ofproto_flow_mod_finish(ofproto, ofm, NULL);
- ofmonitor_flush(ofproto->connmgr);
+ ofproto_rule_unref(rule); /* Release old reference. */
}
- ovs_mutex_unlock(&ofproto_mutex);
} else {
/* Refresh the existing rule. */
ovs_mutex_lock(&rule->mutex);
rule->modified = time_msec();
ovs_mutex_unlock(&rule->mutex);
}
+ return error;
+}
+
+enum ofperr
+ofproto_flow_mod_learn_start(struct ofproto_flow_mod *ofm)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct rule *rule = ofm->temp_rule;
+
+ /* ofproto_flow_mod_start() consumes the reference, so we
+ * take a new one. */
+ ofproto_rule_ref(rule);
+ enum ofperr error = ofproto_flow_mod_start(rule->ofproto, ofm);
+ ofm->temp_rule = rule;
+
+ return error;
+}
+
+void
+ofproto_flow_mod_learn_finish(struct ofproto_flow_mod *ofm,
+ struct ofproto *orig_ofproto)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct rule *rule = rule_collection_rules(&ofm->new_rules)[0];
+
+ /* If learning on a different bridge, must bump its version
+ * number and flush connmgr afterwards. */
+ if (rule->ofproto != orig_ofproto) {
+ ofproto_bump_tables_version(rule->ofproto);
+ }
+ ofproto_flow_mod_finish(rule->ofproto, ofm, NULL);
+ if (rule->ofproto != orig_ofproto) {
+ ofmonitor_flush(rule->ofproto->connmgr);
+ }
+}
+
+/* Refresh 'ofm->temp_rule', for which the caller holds a reference, if already
+ * in the classifier, insert it otherwise. If the rule has already been
+ * removed from the classifier, a new rule is created using 'ofm->temp_rule' as
+ * a template and the reference to the old 'ofm->temp_rule' is freed. If
+ * 'keep_ref' is true, then a reference to the current rule is held, otherwise
+ * it is released and 'ofm->temp_rule' is set to NULL.
+ *
+ * Caller needs to be the exclusive owner of 'ofm' as it is being manipulated
+ * during the call. */
+enum ofperr
+ofproto_flow_mod_learn(struct ofproto_flow_mod *ofm, bool keep_ref)
+ OVS_EXCLUDED(ofproto_mutex)
+{
+ enum ofperr error = ofproto_flow_mod_learn_refresh(ofm);
+ struct rule *rule = ofm->temp_rule;
+
+ /* Do we need to insert the rule? */
+ if (!error && rule->state == RULE_INITIALIZED) {
+ ovs_mutex_lock(&ofproto_mutex);
+ ofm->version = rule->ofproto->tables_version + 1;
+ error = ofproto_flow_mod_learn_start(ofm);
+ if (!error) {
+ ofproto_flow_mod_learn_finish(ofm, NULL);
+ }
+ ovs_mutex_unlock(&ofproto_mutex);
+ }
-error_out:
if (!keep_ref) {
ofproto_rule_unref(rule);
ofm->temp_rule = NULL;
* 'idle_timeout' seconds. Similarly for 'hard_timeout'.
*
* Suitable for implementing OFPACT_FIN_TIMEOUT. */
+void
+ofproto_rule_reduce_timeouts__(struct rule *rule,
+ uint16_t idle_timeout, uint16_t hard_timeout)
+ OVS_REQUIRES(ofproto_mutex)
+ OVS_EXCLUDED(rule->mutex)
+{
+ if (!idle_timeout && !hard_timeout) {
+ return;
+ }
+
+ if (ovs_list_is_empty(&rule->expirable)) {
+ ovs_list_insert(&rule->ofproto->expirable, &rule->expirable);
+ }
+
+ ovs_mutex_lock(&rule->mutex);
+ reduce_timeout(idle_timeout, &rule->idle_timeout);
+ reduce_timeout(hard_timeout, &rule->hard_timeout);
+ ovs_mutex_unlock(&rule->mutex);
+}
+
void
ofproto_rule_reduce_timeouts(struct rule *rule,
uint16_t idle_timeout, uint16_t hard_timeout)