New functions are implemented in the conntrack module to support this.
Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
{
return 0;
}
+
+int
+conntrack_flush(struct conntrack *ct, const uint16_t *zone)
+{
+ unsigned i;
+
+ for (i = 0; i < CONNTRACK_BUCKETS; i++) {
+ struct conn *conn, *next;
+
+ ct_lock_lock(&ct->buckets[i].lock);
+ HMAP_FOR_EACH_SAFE(conn, next, node, &ct->buckets[i].connections) {
+ if (!zone || *zone == conn->key.zone) {
+ ovs_list_remove(&conn->exp_node);
+ hmap_remove(&ct->buckets[i].connections, &conn->node);
+ atomic_count_dec(&ct->n_conn);
+ delete_conn(conn);
+ }
+ }
+ ct_lock_unlock(&ct->buckets[i].lock);
+ }
+
+ return 0;
+}
const uint16_t *pzone);
int conntrack_dump_next(struct conntrack_dump *, struct ct_dpif_entry *);
int conntrack_dump_done(struct conntrack_dump *);
+
+int conntrack_flush(struct conntrack *, const uint16_t *zone);
\f
/* 'struct ct_lock' is a wrapper for an adaptive mutex. It's useful to try
* different types of locks (e.g. spinlocks) */
return err;
}
+static int
+dpif_netdev_ct_flush(struct dpif *dpif, const uint16_t *zone)
+{
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+
+ return conntrack_flush(&dp->conntrack, zone);
+}
+
const struct dpif_class dpif_netdev_class = {
"netdev",
dpif_netdev_init,
dpif_netdev_ct_dump_start,
dpif_netdev_ct_dump_next,
dpif_netdev_ct_dump_done,
- NULL, /* ct_flush */
+ dpif_netdev_ct_flush,
};
static void