--- /dev/null
+/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. */
+
+#include <config.h>
+
+#include "ofproto/ofproto-dpif-xlate-cache.h"
+
+#include <errno.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "bfd.h"
+#include "bitmap.h"
+#include "bond.h"
+#include "bundle.h"
+#include "byte-order.h"
+#include "coverage.h"
+#include "dp-packet.h"
+#include "dpif.h"
+#include "learn.h"
+#include "mac-learning.h"
+#include "netdev-vport.h"
+#include "ofproto/ofproto-dpif-mirror.h"
+#include "ofproto/ofproto-dpif.h"
+#include "ofproto/ofproto-dpif-xlate.h"
+#include "ofproto/ofproto-provider.h"
+#include "openvswitch/dynamic-string.h"
+#include "openvswitch/vlog.h"
+#include "ovs-router.h"
+#include "packets.h"
+#include "tnl-neigh-cache.h"
+#include "util.h"
+
+VLOG_DEFINE_THIS_MODULE(ofproto_xlate_cache);
+
+struct xlate_cache *
+xlate_cache_new(void)
+{
+ struct xlate_cache *xcache = xmalloc(sizeof *xcache);
+
+ ofpbuf_init(&xcache->entries, 512);
+ return xcache;
+}
+
+struct xc_entry *
+xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
+{
+ struct xc_entry *entry;
+
+ entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
+ entry->type = type;
+
+ return entry;
+}
+
+static void
+xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
+{
+ if (entry->dev.tx) {
+ netdev_vport_inc_tx(entry->dev.tx, stats);
+ }
+ if (entry->dev.rx) {
+ netdev_vport_inc_rx(entry->dev.rx, stats);
+ }
+ if (entry->dev.bfd) {
+ bfd_account_rx(entry->dev.bfd, stats);
+ }
+}
+
+/* Push stats and perform side effects of flow translation. */
+void
+xlate_push_stats_entry(struct xc_entry *entry,
+ const struct dpif_flow_stats *stats)
+{
+ struct eth_addr dmac;
+
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_credit_stats(entry->rule, stats);
+ break;
+ case XC_BOND:
+ bond_account(entry->bond.bond, entry->bond.flow,
+ entry->bond.vid, stats->n_bytes);
+ break;
+ case XC_NETDEV:
+ xlate_cache_netdev(entry, stats);
+ break;
+ case XC_NETFLOW:
+ netflow_flow_update(entry->nf.netflow, entry->nf.flow,
+ entry->nf.iface, stats);
+ break;
+ case XC_MIRROR:
+ mirror_update_stats(entry->mirror.mbridge,
+ entry->mirror.mirrors,
+ stats->n_packets, stats->n_bytes);
+ break;
+ case XC_LEARN:
+ ofproto_dpif_flow_mod(entry->learn.ofproto, entry->learn.fm);
+ break;
+ case XC_NORMAL:
+ xlate_mac_learning_update(entry->normal.ofproto,
+ entry->normal.in_port,
+ entry->normal.dl_src,
+ entry->normal.vlan,
+ entry->normal.is_gratuitous_arp);
+ break;
+ case XC_FIN_TIMEOUT:
+ if (stats->tcp_flags & (TCP_FIN | TCP_RST)) {
+ rule_dpif_reduce_timeouts(entry->fin.rule, entry->fin.idle,
+ entry->fin.hard);
+ }
+ break;
+ case XC_GROUP:
+ group_dpif_credit_stats(entry->group.group, entry->group.bucket,
+ stats);
+ break;
+ case XC_TNL_NEIGH:
+ /* Lookup neighbor to avoid timeout. */
+ tnl_neigh_lookup(entry->tnl_neigh_cache.br_name,
+ &entry->tnl_neigh_cache.d_ipv6, &dmac);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+}
+
+void
+xlate_push_stats(struct xlate_cache *xcache,
+ const struct dpif_flow_stats *stats)
+{
+ if (!stats->n_packets) {
+ return;
+ }
+
+ struct xc_entry *entry;
+ struct ofpbuf entries = xcache->entries;
+ XC_ENTRY_FOR_EACH (entry, &entries) {
+ xlate_push_stats_entry(entry, stats);
+ }
+}
+
+static void
+xlate_dev_unref(struct xc_entry *entry)
+{
+ if (entry->dev.tx) {
+ netdev_close(entry->dev.tx);
+ }
+ if (entry->dev.rx) {
+ netdev_close(entry->dev.rx);
+ }
+ if (entry->dev.bfd) {
+ bfd_unref(entry->dev.bfd);
+ }
+}
+
+static void
+xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
+{
+ netflow_flow_clear(netflow, flow);
+ netflow_unref(netflow);
+ free(flow);
+}
+
+void
+xlate_cache_clear_entry(struct xc_entry *entry)
+{
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_unref(entry->rule);
+ break;
+ case XC_BOND:
+ free(entry->bond.flow);
+ bond_unref(entry->bond.bond);
+ break;
+ case XC_NETDEV:
+ xlate_dev_unref(entry);
+ break;
+ case XC_NETFLOW:
+ xlate_cache_clear_netflow(entry->nf.netflow, entry->nf.flow);
+ break;
+ case XC_MIRROR:
+ mbridge_unref(entry->mirror.mbridge);
+ break;
+ case XC_LEARN:
+ free(entry->learn.fm);
+ ofpbuf_delete(entry->learn.ofpacts);
+ break;
+ case XC_NORMAL:
+ break;
+ case XC_FIN_TIMEOUT:
+ /* 'u.fin.rule' is always already held as a XC_RULE, which
+ * has already released it's reference above. */
+ break;
+ case XC_GROUP:
+ group_dpif_unref(entry->group.group);
+ break;
+ case XC_TNL_NEIGH:
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+}
+
+void
+xlate_cache_clear(struct xlate_cache *xcache)
+{
+ if (!xcache) {
+ return;
+ }
+
+ struct xc_entry *entry;
+ struct ofpbuf entries = xcache->entries;
+ XC_ENTRY_FOR_EACH (entry, &entries) {
+ xlate_cache_clear_entry(entry);
+ }
+
+ ofpbuf_clear(&xcache->entries);
+}
+
+void
+xlate_cache_delete(struct xlate_cache *xcache)
+{
+ xlate_cache_clear(xcache);
+ ofpbuf_uninit(&xcache->entries);
+ free(xcache);
+}
--- /dev/null
+/* Copyright (c) 2016 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. */
+
+#ifndef OFPROTO_DPIF_XLATE_CACHE_H
+#define OFPROTO_DPIF_XLATE_CACHE_H 1
+
+#include <net/if.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "openvswitch/types.h"
+#include "dp-packet.h"
+#include "odp-util.h"
+#include "ofproto/ofproto-dpif-mirror.h"
+#include "openvswitch/ofpbuf.h"
+
+struct bfd;
+struct bond;
+struct dpif_flow_stats;
+struct flow;
+struct group_dpif;
+struct mbridge;
+struct netdev;
+struct netflow;
+struct ofpbuf;
+struct ofproto_dpif;
+struct ofputil_bucket;
+struct ofputil_flow_mod;
+struct rule_dpif;
+
+enum xc_type {
+ XC_RULE,
+ XC_BOND,
+ XC_NETDEV,
+ XC_NETFLOW,
+ XC_MIRROR,
+ XC_LEARN,
+ XC_NORMAL,
+ XC_FIN_TIMEOUT,
+ XC_GROUP,
+ XC_TNL_NEIGH,
+};
+
+/* xlate_cache entries hold enough information to perform the side effects of
+ * xlate_actions() for a rule, without needing to perform rule translation
+ * from scratch. The primary usage of these is to submit statistics to objects
+ * that a flow relates to, although they may be used for other effects as well
+ * (for instance, refreshing hard timeouts for learned flows).
+ *
+ * An explicit reference is taken to all pointers other than the ones for
+ * struct ofproto_dpif. ofproto_dpif pointers are explicitly protected by
+ * destroying all xlate caches before the ofproto is destroyed. */
+struct xc_entry {
+ enum xc_type type;
+ union {
+ struct rule_dpif *rule;
+ struct {
+ struct netdev *tx;
+ struct netdev *rx;
+ struct bfd *bfd;
+ } dev;
+ struct {
+ struct netflow *netflow;
+ struct flow *flow;
+ ofp_port_t iface;
+ } nf;
+ struct {
+ struct mbridge *mbridge;
+ mirror_mask_t mirrors;
+ } mirror;
+ struct {
+ struct bond *bond;
+ struct flow *flow;
+ uint16_t vid;
+ } bond;
+ struct {
+ struct ofproto_dpif *ofproto;
+ struct ofputil_flow_mod *fm;
+ struct ofpbuf *ofpacts;
+ } learn;
+ struct {
+ struct ofproto_dpif *ofproto;
+ ofp_port_t in_port;
+ struct eth_addr dl_src;
+ int vlan;
+ bool is_gratuitous_arp;
+ } normal;
+ struct {
+ struct rule_dpif *rule;
+ uint16_t idle;
+ uint16_t hard;
+ } fin;
+ struct {
+ struct group_dpif *group;
+ struct ofputil_bucket *bucket;
+ } group;
+ struct {
+ char br_name[IFNAMSIZ];
+ struct in6_addr d_ipv6;
+ } tnl_neigh_cache;
+ };
+};
+
+#define XC_ENTRY_FOR_EACH(ENTRY, ENTRIES) \
+ for (ENTRY = ofpbuf_try_pull(ENTRIES, sizeof *ENTRY); \
+ ENTRY; \
+ ENTRY = ofpbuf_try_pull(ENTRIES, sizeof *ENTRY))
+
+struct xlate_cache {
+ struct ofpbuf entries;
+};
+
+struct xlate_cache *xlate_cache_new(void);
+struct xc_entry *xlate_cache_add_entry(struct xlate_cache *, enum xc_type);
+void xlate_push_stats_entry(struct xc_entry *, const struct dpif_flow_stats *);
+void xlate_push_stats(struct xlate_cache *, const struct dpif_flow_stats *);
+void xlate_cache_clear_entry(struct xc_entry *);
+void xlate_cache_clear(struct xlate_cache *);
+void xlate_cache_delete(struct xlate_cache *);
+
+#endif /* ofproto-dpif-xlate-cache.h */
#include "ofproto/ofproto-dpif-mirror.h"
#include "ofproto/ofproto-dpif-monitor.h"
#include "ofproto/ofproto-dpif-sflow.h"
+#include "ofproto/ofproto-dpif-xlate-cache.h"
#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
#include "openvswitch/dynamic-string.h"
uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
};
-enum xc_type {
- XC_RULE,
- XC_BOND,
- XC_NETDEV,
- XC_NETFLOW,
- XC_MIRROR,
- XC_LEARN,
- XC_NORMAL,
- XC_FIN_TIMEOUT,
- XC_GROUP,
- XC_TNL_NEIGH,
-};
-
-/* xlate_cache entries hold enough information to perform the side effects of
- * xlate_actions() for a rule, without needing to perform rule translation
- * from scratch. The primary usage of these is to submit statistics to objects
- * that a flow relates to, although they may be used for other effects as well
- * (for instance, refreshing hard timeouts for learned flows). */
-struct xc_entry {
- enum xc_type type;
- union {
- struct rule_dpif *rule;
- struct {
- struct netdev *tx;
- struct netdev *rx;
- struct bfd *bfd;
- } dev;
- struct {
- struct netflow *netflow;
- struct flow *flow;
- ofp_port_t iface;
- } nf;
- struct {
- struct mbridge *mbridge;
- mirror_mask_t mirrors;
- } mirror;
- struct {
- struct bond *bond;
- struct flow *flow;
- uint16_t vid;
- } bond;
- struct {
- struct ofproto_dpif *ofproto;
- struct ofputil_flow_mod *fm;
- struct ofpbuf *ofpacts;
- } learn;
- struct {
- struct ofproto_dpif *ofproto;
- ofp_port_t in_port;
- struct eth_addr dl_src;
- int vlan;
- bool is_gratuitous_arp;
- } normal;
- struct {
- struct rule_dpif *rule;
- uint16_t idle;
- uint16_t hard;
- } fin;
- struct {
- struct group_dpif *group;
- struct ofputil_bucket *bucket;
- } group;
- struct {
- char br_name[IFNAMSIZ];
- struct in6_addr d_ipv6;
- } tnl_neigh_cache;
- } u;
-};
-
-#define XC_ENTRY_FOR_EACH(ENTRY, ENTRIES, XCACHE) \
- ENTRIES = XCACHE->entries; \
- for (ENTRY = ofpbuf_try_pull(&ENTRIES, sizeof *ENTRY); \
- ENTRY; \
- ENTRY = ofpbuf_try_pull(&ENTRIES, sizeof *ENTRY))
-
-struct xlate_cache {
- struct ofpbuf entries;
-};
-
/* Xlate config contains hash maps of all bridges, bundles and ports.
* Xcfgp contains the pointer to the current xlate configuration.
* When the main thread needs to change the configuration, it copies xcfgp to
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
uint8_t *dscp);
-static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
- enum xc_type type);
static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
static void xlate_xport_init(struct xlate_cfg *, struct xport *);
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
- entry->u.mirror.mbridge = mbridge_ref(xbridge->mbridge);
- entry->u.mirror.mirrors = mirrors;
+ entry->mirror.mbridge = mbridge_ref(xbridge->mbridge);
+ entry->mirror.mirrors = mirrors;
}
/* 'mirrors' is a bit-mask of candidates for mirroring. Iterate as long as
flow = &ctx->xin->flow;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND);
- entry->u.bond.bond = bond_ref(out_xbundle->bond);
- entry->u.bond.flow = xmemdup(flow, sizeof *flow);
- entry->u.bond.vid = vid;
+ entry->bond.bond = bond_ref(out_xbundle->bond);
+ entry->bond.flow = xmemdup(flow, sizeof *flow);
+ entry->bond.vid = vid;
}
}
}
/* Save just enough info to update mac learning table later. */
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
- entry->u.normal.ofproto = ctx->xbridge->ofproto;
- entry->u.normal.in_port = flow->in_port.ofp_port;
- entry->u.normal.dl_src = flow->dl_src;
- entry->u.normal.vlan = vlan;
- entry->u.normal.is_gratuitous_arp = is_grat_arp;
+ entry->normal.ofproto = ctx->xbridge->ofproto;
+ entry->normal.in_port = flow->in_port.ofp_port;
+ entry->normal.dl_src = flow->dl_src;
+ entry->normal.vlan = vlan;
+ entry->normal.is_gratuitous_arp = is_grat_arp;
}
/* Determine output bundle. */
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
- ovs_strlcpy(entry->u.tnl_neigh_cache.br_name, out_dev->xbridge->name,
- sizeof entry->u.tnl_neigh_cache.br_name);
- entry->u.tnl_neigh_cache.d_ipv6 = d_ip6;
+ ovs_strlcpy(entry->tnl_neigh_cache.br_name, out_dev->xbridge->name,
+ sizeof entry->tnl_neigh_cache.br_name);
+ entry->tnl_neigh_cache.d_ipv6 = d_ip6;
}
xlate_report(ctx, "tunneling from "ETH_ADDR_FMT" %s"
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
- entry->u.dev.tx = netdev_ref(xport->netdev);
- entry->u.dev.rx = netdev_ref(peer->netdev);
- entry->u.dev.bfd = bfd_ref(peer->bfd);
+ entry->dev.tx = netdev_ref(xport->netdev);
+ entry->dev.rx = netdev_ref(peer->netdev);
+ entry->dev.bfd = bfd_ref(peer->bfd);
}
return;
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV);
- entry->u.dev.tx = netdev_ref(xport->netdev);
+ entry->dev.tx = netdev_ref(xport->netdev);
}
out_port = odp_port;
if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
- entry->u.rule = rule;
+ entry->rule = rule;
rule_dpif_ref(rule);
}
xlate_recursively(ctx, rule, table_id <= old_table_id);
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP);
- entry->u.group.group = group;
- entry->u.group.bucket = bucket;
+ entry->group.group = group;
+ entry->group.bucket = bucket;
}
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
- entry->u.learn.ofproto = ctx->xbridge->ofproto;
- entry->u.learn.fm = xmalloc(sizeof *entry->u.learn.fm);
- entry->u.learn.ofpacts = ofpbuf_new(64);
- xlate_learn_action__(ctx, learn, entry->u.learn.fm,
- entry->u.learn.ofpacts);
+ entry->learn.ofproto = ctx->xbridge->ofproto;
+ entry->learn.fm = xmalloc(sizeof *entry->learn.fm);
+ entry->learn.ofpacts = ofpbuf_new(64);
+ xlate_learn_action__(ctx, learn, entry->learn.fm,
+ entry->learn.ofpacts);
} else if (ctx->xin->may_learn) {
uint64_t ofpacts_stub[1024 / 8];
struct ofputil_flow_mod fm;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
/* XC_RULE already holds a reference on the rule, none is taken
* here. */
- entry->u.fin.rule = ctx->rule;
- entry->u.fin.idle = oft->fin_idle_timeout;
- entry->u.fin.hard = oft->fin_hard_timeout;
+ entry->fin.rule = ctx->rule;
+ entry->fin.idle = oft->fin_idle_timeout;
+ entry->fin.hard = oft->fin_hard_timeout;
}
}
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
- entry->u.rule = ctx.rule;
+ entry->rule = ctx.rule;
rule_dpif_ref(ctx.rule);
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV);
- entry->u.dev.rx = netdev_ref(in_port->netdev);
- entry->u.dev.bfd = bfd_ref(in_port->bfd);
+ entry->dev.rx = netdev_ref(in_port->netdev);
+ entry->dev.bfd = bfd_ref(in_port->bfd);
}
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
- entry->u.nf.netflow = netflow_ref(xbridge->netflow);
- entry->u.nf.flow = xmemdup(flow, sizeof *flow);
- entry->u.nf.iface = ctx.nf_output_iface;
+ entry->nf.netflow = netflow_ref(xbridge->netflow);
+ entry->nf.flow = xmemdup(flow, sizeof *flow);
+ entry->nf.iface = ctx.nf_output_iface;
}
}
ofpacts.data, ofpacts.size, packet);
}
-struct xlate_cache *
-xlate_cache_new(void)
-{
- struct xlate_cache *xcache = xmalloc(sizeof *xcache);
-
- ofpbuf_init(&xcache->entries, 512);
- return xcache;
-}
-
-static struct xc_entry *
-xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
-{
- struct xc_entry *entry;
-
- entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
- entry->type = type;
-
- return entry;
-}
-
-static void
-xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
-{
- if (entry->u.dev.tx) {
- netdev_vport_inc_tx(entry->u.dev.tx, stats);
- }
- if (entry->u.dev.rx) {
- netdev_vport_inc_rx(entry->u.dev.rx, stats);
- }
- if (entry->u.dev.bfd) {
- bfd_account_rx(entry->u.dev.bfd, stats);
- }
-}
-
-static void
+void
xlate_mac_learning_update(const struct ofproto_dpif *ofproto,
ofp_port_t in_port, struct eth_addr dl_src,
int vlan, bool is_grat_arp)
update_learning_table(xbridge, xbundle, dl_src, vlan, is_grat_arp);
}
-
-/* Push stats and perform side effects of flow translation. */
-void
-xlate_push_stats(struct xlate_cache *xcache,
- const struct dpif_flow_stats *stats)
-{
- struct xc_entry *entry;
- struct ofpbuf entries = xcache->entries;
- struct eth_addr dmac;
-
- if (!stats->n_packets) {
- return;
- }
-
- XC_ENTRY_FOR_EACH (entry, entries, xcache) {
- switch (entry->type) {
- case XC_RULE:
- rule_dpif_credit_stats(entry->u.rule, stats);
- break;
- case XC_BOND:
- bond_account(entry->u.bond.bond, entry->u.bond.flow,
- entry->u.bond.vid, stats->n_bytes);
- break;
- case XC_NETDEV:
- xlate_cache_netdev(entry, stats);
- break;
- case XC_NETFLOW:
- netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
- entry->u.nf.iface, stats);
- break;
- case XC_MIRROR:
- mirror_update_stats(entry->u.mirror.mbridge,
- entry->u.mirror.mirrors,
- stats->n_packets, stats->n_bytes);
- break;
- case XC_LEARN:
- ofproto_dpif_flow_mod(entry->u.learn.ofproto, entry->u.learn.fm);
- break;
- case XC_NORMAL:
- xlate_mac_learning_update(entry->u.normal.ofproto,
- entry->u.normal.in_port,
- entry->u.normal.dl_src,
- entry->u.normal.vlan,
- entry->u.normal.is_gratuitous_arp);
- break;
- case XC_FIN_TIMEOUT:
- xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
- entry->u.fin.idle, entry->u.fin.hard);
- break;
- case XC_GROUP:
- group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
- stats);
- break;
- case XC_TNL_NEIGH:
- /* Lookup neighbor to avoid timeout. */
- tnl_neigh_lookup(entry->u.tnl_neigh_cache.br_name,
- &entry->u.tnl_neigh_cache.d_ipv6, &dmac);
- break;
- default:
- OVS_NOT_REACHED();
- }
- }
-}
-
-static void
-xlate_dev_unref(struct xc_entry *entry)
-{
- if (entry->u.dev.tx) {
- netdev_close(entry->u.dev.tx);
- }
- if (entry->u.dev.rx) {
- netdev_close(entry->u.dev.rx);
- }
- if (entry->u.dev.bfd) {
- bfd_unref(entry->u.dev.bfd);
- }
-}
-
-static void
-xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
-{
- netflow_flow_clear(netflow, flow);
- netflow_unref(netflow);
- free(flow);
-}
-
-void
-xlate_cache_clear(struct xlate_cache *xcache)
-{
- struct xc_entry *entry;
- struct ofpbuf entries;
-
- if (!xcache) {
- return;
- }
-
- XC_ENTRY_FOR_EACH (entry, entries, xcache) {
- switch (entry->type) {
- case XC_RULE:
- rule_dpif_unref(entry->u.rule);
- break;
- case XC_BOND:
- free(entry->u.bond.flow);
- bond_unref(entry->u.bond.bond);
- break;
- case XC_NETDEV:
- xlate_dev_unref(entry);
- break;
- case XC_NETFLOW:
- xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
- break;
- case XC_MIRROR:
- mbridge_unref(entry->u.mirror.mbridge);
- break;
- case XC_LEARN:
- free(entry->u.learn.fm);
- ofpbuf_delete(entry->u.learn.ofpacts);
- break;
- case XC_NORMAL:
- break;
- case XC_FIN_TIMEOUT:
- /* 'u.fin.rule' is always already held as a XC_RULE, which
- * has already released it's reference above. */
- break;
- case XC_GROUP:
- group_dpif_unref(entry->u.group.group);
- break;
- case XC_TNL_NEIGH:
- break;
- default:
- OVS_NOT_REACHED();
- }
- }
-
- ofpbuf_clear(&xcache->entries);
-}
-
-void
-xlate_cache_delete(struct xlate_cache *xcache)
-{
- xlate_cache_clear(xcache);
- ofpbuf_uninit(&xcache->entries);
- free(xcache);
-}