New functions are implemented in the conntrack module to support this.
Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
#include <netinet/ip6.h>
#include "conntrack.h"
+#include "ct-dpif.h"
#include "openvswitch/hmap.h"
#include "openvswitch/list.h"
#include "openvswitch/types.h"
struct conntrack_bucket *,
struct dp_packet *pkt, bool reply,
long long now);
+ void (*conn_get_protoinfo)(const struct conn *,
+ struct ct_dpif_protoinfo *);
};
extern struct ct_l4_proto ct_proto_tcp;
return &newconn->up;
}
+static uint8_t
+tcp_peer_to_protoinfo_flags(const struct tcp_peer *peer)
+{
+ uint8_t res = 0;
+
+ if (peer->wscale & CT_WSCALE_FLAG) {
+ res |= CT_DPIF_TCPF_WINDOW_SCALE;
+ }
+
+ if (peer->wscale & CT_WSCALE_UNKNOWN) {
+ res |= CT_DPIF_TCPF_BE_LIBERAL;
+ }
+
+ return res;
+}
+
+static void
+tcp_conn_get_protoinfo(const struct conn *conn_,
+ struct ct_dpif_protoinfo *protoinfo)
+{
+ const struct conn_tcp *conn = conn_tcp_cast(conn_);
+
+ protoinfo->proto = IPPROTO_TCP;
+ protoinfo->tcp.state_orig = conn->peer[0].state;
+ protoinfo->tcp.state_reply = conn->peer[1].state;
+
+ protoinfo->tcp.wscale_orig = conn->peer[0].wscale & CT_WSCALE_MASK;
+ protoinfo->tcp.wscale_reply = conn->peer[1].wscale & CT_WSCALE_MASK;
+
+ protoinfo->tcp.flags_orig = tcp_peer_to_protoinfo_flags(&conn->peer[0]);
+ protoinfo->tcp.flags_reply = tcp_peer_to_protoinfo_flags(&conn->peer[1]);
+}
+
struct ct_l4_proto ct_proto_tcp = {
.new_conn = tcp_new_conn,
.valid_new = tcp_valid_new,
.conn_update = tcp_conn_update,
+ .conn_get_protoinfo = tcp_conn_get_protoinfo,
};
#include "conntrack-private.h"
#include "coverage.h"
#include "csum.h"
+#include "ct-dpif.h"
#include "dp-packet.h"
#include "flow.h"
#include "netdev.h"
{
free(conn);
}
+\f
+static void
+ct_endpoint_to_ct_dpif_inet_addr(const struct ct_addr *a,
+ union ct_dpif_inet_addr *b,
+ ovs_be16 dl_type)
+{
+ if (dl_type == htons(ETH_TYPE_IP)) {
+ b->ip = a->ipv4_aligned;
+ } else if (dl_type == htons(ETH_TYPE_IPV6)){
+ b->in6 = a->ipv6_aligned;
+ }
+}
+
+static void
+conn_key_to_tuple(const struct conn_key *key, struct ct_dpif_tuple *tuple)
+{
+ if (key->dl_type == htons(ETH_TYPE_IP)) {
+ tuple->l3_type = AF_INET;
+ } else if (key->dl_type == htons(ETH_TYPE_IPV6)) {
+ tuple->l3_type = AF_INET6;
+ }
+ tuple->ip_proto = key->nw_proto;
+ ct_endpoint_to_ct_dpif_inet_addr(&key->src.addr, &tuple->src,
+ key->dl_type);
+ ct_endpoint_to_ct_dpif_inet_addr(&key->dst.addr, &tuple->dst,
+ key->dl_type);
+
+ if (key->nw_proto == IPPROTO_ICMP || key->nw_proto == IPPROTO_ICMPV6) {
+ tuple->icmp_id = key->src.port;
+ /* ICMP type and code are not tracked */
+ tuple->icmp_type = 0;
+ tuple->icmp_code = 0;
+ } else {
+ tuple->src_port = key->src.port;
+ tuple->dst_port = key->dst.port;
+ }
+}
+
+static void
+conn_to_ct_dpif_entry(const struct conn *conn, struct ct_dpif_entry *entry,
+ long long now)
+{
+ struct ct_l4_proto *class;
+ long long expiration;
+ memset(entry, 0, sizeof *entry);
+ conn_key_to_tuple(&conn->key, &entry->tuple_orig);
+ conn_key_to_tuple(&conn->rev_key, &entry->tuple_reply);
+
+ entry->zone = conn->key.zone;
+ entry->mark = conn->mark;
+
+ memcpy(&entry->labels, &conn->label, sizeof(entry->labels));
+ /* Not implemented yet */
+ entry->timestamp.start = 0;
+ entry->timestamp.stop = 0;
+
+ expiration = conn->expiration - now;
+ entry->timeout = (expiration > 0) ? expiration / 1000 : 0;
+
+ class = l4_protos[conn->key.nw_proto];
+ if (class->conn_get_protoinfo) {
+ class->conn_get_protoinfo(conn, &entry->protoinfo);
+ }
+}
+
+int
+conntrack_dump_start(struct conntrack *ct, struct conntrack_dump *dump,
+ const uint16_t *pzone)
+{
+ memset(dump, 0, sizeof(*dump));
+ if (pzone) {
+ dump->zone = *pzone;
+ dump->filter_zone = true;
+ }
+ dump->ct = ct;
+
+ return 0;
+}
+
+int
+conntrack_dump_next(struct conntrack_dump *dump, struct ct_dpif_entry *entry)
+{
+ struct conntrack *ct = dump->ct;
+ long long now = time_msec();
+
+ while (dump->bucket < CONNTRACK_BUCKETS) {
+ struct hmap_node *node;
+
+ ct_lock_lock(&ct->buckets[dump->bucket].lock);
+ for (;;) {
+ struct conn *conn;
+
+ node = hmap_at_position(&ct->buckets[dump->bucket].connections,
+ &dump->bucket_pos);
+ if (!node) {
+ break;
+ }
+ INIT_CONTAINER(conn, node, node);
+ if (!dump->filter_zone || conn->key.zone == dump->zone) {
+ conn_to_ct_dpif_entry(conn, entry, now);
+ break;
+ }
+ /* Else continue, until we find an entry in the appropriate zone
+ * or the bucket has been scanned completely. */
+ }
+ ct_lock_unlock(&ct->buckets[dump->bucket].lock);
+
+ if (!node) {
+ memset(&dump->bucket_pos, 0, sizeof dump->bucket_pos);
+ dump->bucket++;
+ } else {
+ return 0;
+ }
+ }
+ return EOF;
+}
+
+int
+conntrack_dump_done(struct conntrack_dump *dump OVS_UNUSED)
+{
+ return 0;
+}
bool commit, uint16_t zone, const uint32_t *setmark,
const struct ovs_key_ct_labels *setlabel,
const char *helper);
+
+struct conntrack_dump {
+ struct conntrack *ct;
+ unsigned bucket;
+ struct hmap_position bucket_pos;
+ bool filter_zone;
+ uint16_t zone;
+};
+
+struct ct_dpif_entry;
+
+int conntrack_dump_start(struct conntrack *, struct conntrack_dump *,
+ const uint16_t *pzone);
+int conntrack_dump_next(struct conntrack_dump *, struct ct_dpif_entry *);
+int conntrack_dump_done(struct conntrack_dump *);
\f
/* 'struct ct_lock' is a wrapper for an adaptive mutex. It's useful to try
* different types of locks (e.g. spinlocks) */
#include "cmap.h"
#include "conntrack.h"
#include "coverage.h"
+#include "ct-dpif.h"
#include "csum.h"
#include "dp-packet.h"
#include "dpif.h"
actions_len, dp_execute_cb);
}
+struct dp_netdev_ct_dump {
+ struct ct_dpif_dump_state up;
+ struct conntrack_dump dump;
+ struct conntrack *ct;
+ struct dp_netdev *dp;
+};
+
+static int
+dpif_netdev_ct_dump_start(struct dpif *dpif, struct ct_dpif_dump_state **dump_,
+ const uint16_t *pzone)
+{
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+ struct dp_netdev_ct_dump *dump;
+
+ dump = xzalloc(sizeof *dump);
+ dump->dp = dp;
+ dump->ct = &dp->conntrack;
+
+ conntrack_dump_start(&dp->conntrack, &dump->dump, pzone);
+
+ *dump_ = &dump->up;
+
+ return 0;
+}
+
+static int
+dpif_netdev_ct_dump_next(struct dpif *dpif OVS_UNUSED,
+ struct ct_dpif_dump_state *dump_,
+ struct ct_dpif_entry *entry)
+{
+ struct dp_netdev_ct_dump *dump;
+
+ INIT_CONTAINER(dump, dump_, up);
+
+ return conntrack_dump_next(&dump->dump, entry);
+}
+
+static int
+dpif_netdev_ct_dump_done(struct dpif *dpif OVS_UNUSED,
+ struct ct_dpif_dump_state *dump_)
+{
+ struct dp_netdev_ct_dump *dump;
+ int err;
+
+ INIT_CONTAINER(dump, dump_, up);
+
+ err = conntrack_dump_done(&dump->dump);
+
+ free(dump);
+
+ return err;
+}
+
const struct dpif_class dpif_netdev_class = {
"netdev",
dpif_netdev_init,
dpif_netdev_enable_upcall,
dpif_netdev_disable_upcall,
dpif_netdev_get_datapath_version,
- NULL, /* ct_dump_start */
- NULL, /* ct_dump_next */
- NULL, /* ct_dump_done */
+ dpif_netdev_ct_dump_start,
+ dpif_netdev_ct_dump_next,
+ dpif_netdev_ct_dump_done,
NULL, /* ct_flush */
};