/*
- * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
+ * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "dp-packet.h"
#include "dpif-netdev.h"
#include "flow.h"
+#include "netdev-offload-provider.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
#include "odp-util.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/list.h"
+#include "openvswitch/match.h"
#include "openvswitch/ofp-print.h"
#include "openvswitch/ofpbuf.h"
#include "openvswitch/vlog.h"
#include "ovs-atomic.h"
#include "packets.h"
#include "pcap-file.h"
-#include "poll-loop.h"
+#include "openvswitch/poll-loop.h"
#include "openvswitch/shash.h"
#include "sset.h"
#include "stream.h"
VLOG_DEFINE_THIS_MODULE(netdev_dummy);
+#define C_STATS_SIZE 2
+
struct reconnect;
struct dummy_packet_stream {
struct stream *stream;
- struct dp_packet rxbuf;
struct ovs_list txq;
+ struct dp_packet rxbuf;
};
enum dummy_packet_conn_type {
union {
struct dummy_packet_pconn pconn;
struct dummy_packet_rconn rconn;
- } u;
+ };
};
struct pkt_list_node {
struct ovs_list list_node;
};
+struct offloaded_flow {
+ struct hmap_node node;
+ ovs_u128 ufid;
+ struct match match;
+ uint32_t mark;
+};
+
/* Protects 'dummy_list'. */
static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
struct eth_addr hwaddr OVS_GUARDED;
int mtu OVS_GUARDED;
struct netdev_stats stats OVS_GUARDED;
+ struct netdev_custom_counter custom_stats[C_STATS_SIZE] OVS_GUARDED;
enum netdev_flags flags OVS_GUARDED;
int ifindex OVS_GUARDED;
int numa_id OVS_GUARDED;
struct dummy_packet_conn conn OVS_GUARDED;
- FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
+ struct pcap_file *tx_pcap, *rxq_pcap OVS_GUARDED;
struct in_addr address, netmask;
struct in6_addr ipv6, ipv6_mask;
struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
+ struct hmap offloaded_flows OVS_GUARDED;
+
/* The following properties are for dummy-pmd and they cannot be changed
* when a device is running, so we remember the request and update them
* next time netdev_dummy_reconfigure() is called. */
static unixctl_cb_func netdev_dummy_set_admin_state;
static int netdev_dummy_construct(struct netdev *);
static void netdev_dummy_queue_packet(struct netdev_dummy *,
- struct dp_packet *, int);
+ struct dp_packet *, struct flow *, int);
static void dummy_packet_stream_close(struct dummy_packet_stream *);
if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
dp_packet_pull(&s->rxbuf, 2);
netdev_dummy_queue_packet(dev,
- dp_packet_clone(&s->rxbuf), 0);
+ dp_packet_clone(&s->rxbuf), NULL, 0);
dp_packet_clear(&s->rxbuf);
}
} else if (retval != -EAGAIN) {
switch (conn->type) {
case PASSIVE:
- smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
+ smap_add(args, "pstream", pstream_get_name(conn->pconn.pstream));
break;
case ACTIVE:
- smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
+ smap_add(args, "stream", stream_get_name(conn->rconn.rstream->stream));
break;
case NONE:
dummy_packet_conn_close(struct dummy_packet_conn *conn)
{
int i;
- struct dummy_packet_pconn *pconn = &conn->u.pconn;
- struct dummy_packet_rconn *rconn = &conn->u.rconn;
+ struct dummy_packet_pconn *pconn = &conn->pconn;
+ struct dummy_packet_rconn *rconn = &conn->rconn;
switch (conn->type) {
case PASSIVE:
switch (conn->type) {
case PASSIVE:
if (pstream &&
- !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
+ !strcmp(pstream_get_name(conn->pconn.pstream), pstream)) {
return;
}
dummy_packet_conn_close(conn);
break;
case ACTIVE:
if (stream &&
- !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
+ !strcmp(stream_get_name(conn->rconn.rstream->stream), stream)) {
return;
}
dummy_packet_conn_close(conn);
if (pstream) {
int error;
- error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
+ error = pstream_open(pstream, &conn->pconn.pstream, DSCP_DEFAULT);
if (error) {
VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
} else {
reconnect_enable(reconnect, time_msec());
reconnect_set_backoff(reconnect, 100, INT_MAX);
reconnect_set_probe_interval(reconnect, 0);
- conn->u.rconn.reconnect = reconnect;
+ conn->rconn.reconnect = reconnect;
conn->type = ACTIVE;
error = stream_open(stream, &active_stream, DSCP_DEFAULT);
- conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
+ conn->rconn.rstream = dummy_packet_stream_create(active_stream);
switch (error) {
case 0:
default:
reconnect_connect_failed(reconnect, time_msec(), error);
stream_close(active_stream);
- conn->u.rconn.rstream->stream = NULL;
+ conn->rconn.rstream->stream = NULL;
break;
}
}
OVS_REQUIRES(dev->mutex)
{
struct stream *new_stream;
- struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
+ struct dummy_packet_pconn *pconn = &dev->conn.pconn;
int error;
size_t i;
dummy_rconn_run(struct netdev_dummy *dev)
OVS_REQUIRES(dev->mutex)
{
- struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
+ struct dummy_packet_rconn *rconn = &dev->conn.rconn;
switch (reconnect_run(rconn->reconnect, time_msec())) {
case RECONNECT_CONNECT:
int i;
switch (conn->type) {
case PASSIVE:
- pstream_wait(conn->u.pconn.pstream);
- for (i = 0; i < conn->u.pconn.n_streams; i++) {
- struct dummy_packet_stream *s = conn->u.pconn.streams[i];
+ pstream_wait(conn->pconn.pstream);
+ for (i = 0; i < conn->pconn.n_streams; i++) {
+ struct dummy_packet_stream *s = conn->pconn.streams[i];
dummy_packet_stream_wait(s);
}
break;
case ACTIVE:
- if (reconnect_is_connected(conn->u.rconn.reconnect)) {
- dummy_packet_stream_wait(conn->u.rconn.rstream);
+ if (reconnect_is_connected(conn->rconn.reconnect)) {
+ dummy_packet_stream_wait(conn->rconn.rstream);
}
break;
switch (conn->type) {
case PASSIVE:
- for (i = 0; i < conn->u.pconn.n_streams; i++) {
- struct dummy_packet_stream *s = conn->u.pconn.streams[i];
+ for (i = 0; i < conn->pconn.n_streams; i++) {
+ struct dummy_packet_stream *s = conn->pconn.streams[i];
dummy_packet_stream_send(s, buffer, size);
- pstream_wait(conn->u.pconn.pstream);
+ pstream_wait(conn->pconn.pstream);
}
break;
case ACTIVE:
- if (reconnect_is_connected(conn->u.rconn.reconnect)) {
- dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
- dummy_packet_stream_wait(conn->u.rconn.rstream);
+ if (reconnect_is_connected(conn->rconn.reconnect)) {
+ dummy_packet_stream_send(conn->rconn.rstream, buffer, size);
+ dummy_packet_stream_wait(conn->rconn.rstream);
}
break;
enum dummy_netdev_conn_state state;
if (conn->type == ACTIVE) {
- if (reconnect_is_connected(conn->u.rconn.reconnect)) {
+ if (reconnect_is_connected(conn->rconn.reconnect)) {
state = CONN_STATE_CONNECTED;
} else {
state = CONN_STATE_NOT_CONNECTED;
netdev->hwaddr.ea[4] = n >> 8;
netdev->hwaddr.ea[5] = n;
netdev->mtu = 1500;
- netdev->flags = 0;
+ netdev->flags = NETDEV_UP;
netdev->ifindex = -EOPNOTSUPP;
netdev->requested_n_rxq = netdev_->n_rxq;
netdev->requested_n_txq = netdev_->n_txq;
netdev->numa_id = 0;
+ memset(&netdev->custom_stats, 0, sizeof(netdev->custom_stats));
+
+ ovs_strlcpy(netdev->custom_stats[0].name,
+ "rx_custom_packets_1", NETDEV_CUSTOM_STATS_NAME_SIZE);
+ ovs_strlcpy(netdev->custom_stats[1].name,
+ "rx_custom_packets_2", NETDEV_CUSTOM_STATS_NAME_SIZE);
+
dummy_packet_conn_init(&netdev->conn);
ovs_list_init(&netdev->rxes);
+ hmap_init(&netdev->offloaded_flows);
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_lock(&dummy_list_mutex);
netdev_dummy_destruct(struct netdev *netdev_)
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ struct offloaded_flow *off_flow;
ovs_mutex_lock(&dummy_list_mutex);
ovs_list_remove(&netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
ovs_mutex_lock(&netdev->mutex);
+ if (netdev->rxq_pcap) {
+ ovs_pcap_close(netdev->rxq_pcap);
+ }
+ if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
+ ovs_pcap_close(netdev->tx_pcap);
+ }
dummy_packet_conn_close(&netdev->conn);
netdev->conn.type = NONE;
+ HMAP_FOR_EACH_POP (off_flow, node, &netdev->offloaded_flows) {
+ free(off_flow);
+ }
+ hmap_destroy(&netdev->offloaded_flows);
+
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_destroy(&netdev->mutex);
}
return 0;
}
+#define DUMMY_MAX_QUEUES_PER_PORT 1024
+
static int
-netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
+netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
+ char **errp OVS_UNUSED)
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
const char *pcap;
dummy_packet_conn_set_config(&netdev->conn, args);
if (netdev->rxq_pcap) {
- fclose(netdev->rxq_pcap);
+ ovs_pcap_close(netdev->rxq_pcap);
}
if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
- fclose(netdev->tx_pcap);
+ ovs_pcap_close(netdev->tx_pcap);
}
netdev->rxq_pcap = netdev->tx_pcap = NULL;
pcap = smap_get(args, "pcap");
goto exit;
}
- new_n_rxq = MAX(smap_get_int(args, "n_rxq", netdev->requested_n_rxq), 1);
- new_n_txq = MAX(smap_get_int(args, "n_txq", netdev->requested_n_txq), 1);
+ new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
+ new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
+
+ if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
+ new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
+ VLOG_WARN("The one or both of interface %s queues"
+ "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
+ netdev_get_name(netdev_),
+ new_n_rxq,
+ new_n_txq,
+ DUMMY_MAX_QUEUES_PER_PORT,
+ DUMMY_MAX_QUEUES_PER_PORT);
+
+ new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
+ new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
+ }
+
new_numa_id = smap_get_int(args, "numa_id", 0);
if (new_n_rxq != netdev->requested_n_rxq
|| new_n_txq != netdev->requested_n_txq
}
static int
-netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
+netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch,
+ int *qfill)
{
struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
ovs_mutex_lock(&netdev->mutex);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += dp_packet_size(packet);
+ netdev->custom_stats[0].value++;
+ netdev->custom_stats[1].value++;
ovs_mutex_unlock(&netdev->mutex);
- batch->packets[0] = packet;
- batch->count = 1;
+ dp_packet_batch_init_packet(batch, packet);
+
+ if (qfill) {
+ *qfill = -ENOTSUP;
+ }
+
return 0;
}
static int
netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
- struct dp_packet_batch *batch, bool may_steal,
+ struct dp_packet_batch *batch,
bool concurrent_txq OVS_UNUSED)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
int error = 0;
- int i;
- for (i = 0; i < batch->count; i++) {
- const void *buffer = dp_packet_data(batch->packets[i]);
- size_t size = dp_packet_size(batch->packets[i]);
+ struct dp_packet *packet;
+ DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
+ const void *buffer = dp_packet_data(packet);
+ size_t size = dp_packet_size(packet);
- size -= dp_packet_get_cutlen(batch->packets[i]);
+ if (!dp_packet_is_eth(packet)) {
+ error = EPFNOSUPPORT;
+ break;
+ }
if (size < ETH_HEADER_LEN) {
error = EMSGSIZE;
/* Reply to ARP requests for 'dev''s assigned IP address. */
if (dev->address.s_addr) {
- struct dp_packet packet;
+ struct dp_packet dp;
struct flow flow;
- dp_packet_use_const(&packet, buffer, size);
- flow_extract(&packet, &flow);
+ dp_packet_use_const(&dp, buffer, size);
+ flow_extract(&dp, &flow);
if (flow.dl_type == htons(ETH_TYPE_ARP)
&& flow.nw_proto == ARP_OP_REQUEST
&& flow.nw_dst == dev->address.s_addr) {
struct dp_packet *reply = dp_packet_new(0);
compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
false, flow.nw_dst, flow.nw_src);
- netdev_dummy_queue_packet(dev, reply, 0);
+ netdev_dummy_queue_packet(dev, reply, NULL, 0);
}
}
if (dev->tx_pcap) {
- struct dp_packet packet;
+ struct dp_packet dp;
- dp_packet_use_const(&packet, buffer, size);
- ovs_pcap_write(dev->tx_pcap, &packet);
- fflush(dev->tx_pcap);
+ dp_packet_use_const(&dp, buffer, size);
+ ovs_pcap_write(dev->tx_pcap, &dp);
}
ovs_mutex_unlock(&dev->mutex);
}
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
return error;
}
return 0;
}
+#define DUMMY_MIN_MTU 68
+#define DUMMY_MAX_MTU 65535
+
static int
netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
{
+ if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
+ return EINVAL;
+ }
+
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
ovs_mutex_lock(&dev->mutex);
return 0;
}
+static int
+netdev_dummy_get_custom_stats(const struct netdev *netdev,
+ struct netdev_custom_stats *custom_stats)
+{
+ int i;
+
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+
+ custom_stats->size = 2;
+ custom_stats->counters =
+ (struct netdev_custom_counter *) xcalloc(C_STATS_SIZE,
+ sizeof(struct netdev_custom_counter));
+
+ ovs_mutex_lock(&dev->mutex);
+ for (i = 0 ; i < C_STATS_SIZE ; i++) {
+ custom_stats->counters[i].value = dev->custom_stats[i].value;
+ ovs_strlcpy(custom_stats->counters[i].name,
+ dev->custom_stats[i].name,
+ NETDEV_CUSTOM_STATS_NAME_SIZE);
+ }
+ ovs_mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
static int
netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
unsigned int queue_id, struct smap *details OVS_UNUSED)
return error;
}
+
+/* Flow offload API. */
+static uint32_t
+netdev_dummy_flow_hash(const ovs_u128 *ufid)
+{
+ return ufid->u32[0];
+}
+
+static struct offloaded_flow *
+find_offloaded_flow(const struct hmap *offloaded_flows, const ovs_u128 *ufid)
+{
+ uint32_t hash = netdev_dummy_flow_hash(ufid);
+ struct offloaded_flow *data;
+
+ HMAP_FOR_EACH_WITH_HASH (data, node, hash, offloaded_flows) {
+ if (ovs_u128_equals(*ufid, data->ufid)) {
+ return data;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+netdev_dummy_flow_put(struct netdev *netdev, struct match *match,
+ struct nlattr *actions OVS_UNUSED,
+ size_t actions_len OVS_UNUSED,
+ const ovs_u128 *ufid, struct offload_info *info,
+ struct dpif_flow_stats *stats)
+{
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ struct offloaded_flow *off_flow;
+ bool modify = true;
+
+ ovs_mutex_lock(&dev->mutex);
+
+ off_flow = find_offloaded_flow(&dev->offloaded_flows, ufid);
+ if (!off_flow) {
+ /* Create new offloaded flow. */
+ off_flow = xzalloc(sizeof *off_flow);
+ memcpy(&off_flow->ufid, ufid, sizeof *ufid);
+ hmap_insert(&dev->offloaded_flows, &off_flow->node,
+ netdev_dummy_flow_hash(ufid));
+ modify = false;
+ }
+
+ off_flow->mark = info->flow_mark;
+ memcpy(&off_flow->match, match, sizeof *match);
+
+ /* As we have per-netdev 'offloaded_flows', we don't need to match
+ * the 'in_port' for received packets. This will also allow offloading for
+ * packets passed to 'receive' command without specifying the 'in_port'. */
+ off_flow->match.wc.masks.in_port.odp_port = 0;
+
+ ovs_mutex_unlock(&dev->mutex);
+
+ if (VLOG_IS_DBG_ENABLED()) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ ds_put_format(&ds, "%s: flow put[%s]: ", netdev_get_name(netdev),
+ modify ? "modify" : "create");
+ odp_format_ufid(ufid, &ds);
+ ds_put_cstr(&ds, " flow match: ");
+ match_format(match, NULL, &ds, OFP_DEFAULT_PRIORITY);
+ ds_put_format(&ds, ", mark: %"PRIu32, info->flow_mark);
+
+ VLOG_DBG("%s", ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+
+ if (stats) {
+ memset(stats, 0, sizeof *stats);
+ }
+ return 0;
+}
+
+static int
+netdev_dummy_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
+ struct dpif_flow_stats *stats)
+{
+ struct netdev_dummy *dev = netdev_dummy_cast(netdev);
+ struct offloaded_flow *off_flow;
+ const char *error = NULL;
+ uint32_t mark;
+
+ ovs_mutex_lock(&dev->mutex);
+
+ off_flow = find_offloaded_flow(&dev->offloaded_flows, ufid);
+ if (!off_flow) {
+ error = "No such flow.";
+ goto exit;
+ }
+
+ mark = off_flow->mark;
+ hmap_remove(&dev->offloaded_flows, &off_flow->node);
+ free(off_flow);
+
+exit:
+ ovs_mutex_unlock(&dev->mutex);
+
+ if (error || VLOG_IS_DBG_ENABLED()) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ ds_put_format(&ds, "%s: ", netdev_get_name(netdev));
+ if (error) {
+ ds_put_cstr(&ds, "failed to ");
+ }
+ ds_put_cstr(&ds, "flow del: ");
+ odp_format_ufid(ufid, &ds);
+ if (error) {
+ ds_put_format(&ds, " error: %s", error);
+ } else {
+ ds_put_format(&ds, " mark: %"PRIu32, mark);
+ }
+ VLOG(error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+
+ if (stats) {
+ memset(stats, 0, sizeof *stats);
+ }
+ return error ? -1 : 0;
+}
+
+#define NETDEV_DUMMY_CLASS_COMMON \
+ .run = netdev_dummy_run, \
+ .wait = netdev_dummy_wait, \
+ .alloc = netdev_dummy_alloc, \
+ .construct = netdev_dummy_construct, \
+ .destruct = netdev_dummy_destruct, \
+ .dealloc = netdev_dummy_dealloc, \
+ .get_config = netdev_dummy_get_config, \
+ .set_config = netdev_dummy_set_config, \
+ .get_numa_id = netdev_dummy_get_numa_id, \
+ .send = netdev_dummy_send, \
+ .set_etheraddr = netdev_dummy_set_etheraddr, \
+ .get_etheraddr = netdev_dummy_get_etheraddr, \
+ .get_mtu = netdev_dummy_get_mtu, \
+ .set_mtu = netdev_dummy_set_mtu, \
+ .get_ifindex = netdev_dummy_get_ifindex, \
+ .get_stats = netdev_dummy_get_stats, \
+ .get_custom_stats = netdev_dummy_get_custom_stats, \
+ .get_queue = netdev_dummy_get_queue, \
+ .get_queue_stats = netdev_dummy_get_queue_stats, \
+ .queue_dump_start = netdev_dummy_queue_dump_start, \
+ .queue_dump_next = netdev_dummy_queue_dump_next, \
+ .queue_dump_done = netdev_dummy_queue_dump_done, \
+ .dump_queue_stats = netdev_dummy_dump_queue_stats, \
+ .get_addr_list = netdev_dummy_get_addr_list, \
+ .update_flags = netdev_dummy_update_flags, \
+ .rxq_alloc = netdev_dummy_rxq_alloc, \
+ .rxq_construct = netdev_dummy_rxq_construct, \
+ .rxq_destruct = netdev_dummy_rxq_destruct, \
+ .rxq_dealloc = netdev_dummy_rxq_dealloc, \
+ .rxq_recv = netdev_dummy_rxq_recv, \
+ .rxq_wait = netdev_dummy_rxq_wait, \
+ .rxq_drain = netdev_dummy_rxq_drain
+
+static const struct netdev_class dummy_class = {
+ NETDEV_DUMMY_CLASS_COMMON,
+ .type = "dummy"
+};
+
+static const struct netdev_class dummy_internal_class = {
+ NETDEV_DUMMY_CLASS_COMMON,
+ .type = "dummy-internal"
+};
+
+static const struct netdev_class dummy_pmd_class = {
+ NETDEV_DUMMY_CLASS_COMMON,
+ .type = "dummy-pmd",
+ .is_pmd = true,
+ .reconfigure = netdev_dummy_reconfigure
+};
+
+static int
+netdev_dummy_offloads_init_flow_api(struct netdev *netdev)
+{
+ return is_dummy_class(netdev->netdev_class) ? 0 : EOPNOTSUPP;
+}
+
+static const struct netdev_flow_api netdev_offload_dummy = {
+ .type = "dummy",
+ .flow_put = netdev_dummy_flow_put,
+ .flow_del = netdev_dummy_flow_del,
+ .init_flow_api = netdev_dummy_offloads_init_flow_api,
+};
+
\f
/* Helper functions. */
-#define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
-{ \
- NAME, \
- PMD, /* is_pmd */ \
- NULL, /* init */ \
- netdev_dummy_run, \
- netdev_dummy_wait, \
- \
- netdev_dummy_alloc, \
- netdev_dummy_construct, \
- netdev_dummy_destruct, \
- netdev_dummy_dealloc, \
- netdev_dummy_get_config, \
- netdev_dummy_set_config, \
- NULL, /* get_tunnel_config */ \
- NULL, /* build header */ \
- NULL, /* push header */ \
- NULL, /* pop header */ \
- netdev_dummy_get_numa_id, \
- NULL, /* set_tx_multiq */ \
- \
- netdev_dummy_send, /* send */ \
- NULL, /* send_wait */ \
- \
- netdev_dummy_set_etheraddr, \
- netdev_dummy_get_etheraddr, \
- netdev_dummy_get_mtu, \
- netdev_dummy_set_mtu, \
- netdev_dummy_get_ifindex, \
- NULL, /* get_carrier */ \
- NULL, /* get_carrier_resets */ \
- NULL, /* get_miimon */ \
- netdev_dummy_get_stats, \
- \
- NULL, /* get_features */ \
- NULL, /* set_advertisements */ \
- \
- NULL, /* set_policing */ \
- NULL, /* get_qos_types */ \
- NULL, /* get_qos_capabilities */ \
- NULL, /* get_qos */ \
- NULL, /* set_qos */ \
- netdev_dummy_get_queue, \
- NULL, /* set_queue */ \
- NULL, /* delete_queue */ \
- netdev_dummy_get_queue_stats, \
- netdev_dummy_queue_dump_start, \
- netdev_dummy_queue_dump_next, \
- netdev_dummy_queue_dump_done, \
- netdev_dummy_dump_queue_stats, \
- \
- NULL, /* set_in4 */ \
- netdev_dummy_get_addr_list, \
- NULL, /* add_router */ \
- NULL, /* get_next_hop */ \
- NULL, /* get_status */ \
- NULL, /* arp_lookup */ \
- \
- netdev_dummy_update_flags, \
- RECOFIGURE, \
- \
- netdev_dummy_rxq_alloc, \
- netdev_dummy_rxq_construct, \
- netdev_dummy_rxq_destruct, \
- netdev_dummy_rxq_dealloc, \
- netdev_dummy_rxq_recv, \
- netdev_dummy_rxq_wait, \
- netdev_dummy_rxq_drain, \
-}
-
-static const struct netdev_class dummy_class =
- NETDEV_DUMMY_CLASS("dummy", false, NULL);
-
-static const struct netdev_class dummy_internal_class =
- NETDEV_DUMMY_CLASS("dummy-internal", false, NULL);
-
-static const struct netdev_class dummy_pmd_class =
- NETDEV_DUMMY_CLASS("dummy-pmd", true,
- netdev_dummy_reconfigure);
-
static void
pkt_list_delete(struct ovs_list *l)
{
}
static struct dp_packet *
-eth_from_packet_or_flow(const char *s)
+eth_from_packet(const char *s)
{
+ struct dp_packet *packet;
+ eth_from_hex(s, &packet);
+ return packet;
+}
+
+static struct dp_packet *
+eth_from_flow_str(const char *s, size_t packet_size,
+ struct flow *flow, char **errorp)
+{
+ *errorp = NULL;
+
enum odp_key_fitness fitness;
struct dp_packet *packet;
struct ofpbuf odp_key;
- struct flow flow;
int error;
- if (!eth_from_hex(s, &packet)) {
- return packet;
- }
-
/* Convert string to datapath key.
*
* It would actually be nicer to parse an OpenFlow-like flow key here, but
* settle for parsing a datapath key for now.
*/
ofpbuf_init(&odp_key, 0);
- error = odp_flow_from_string(s, NULL, &odp_key, NULL);
+ error = odp_flow_from_string(s, NULL, &odp_key, NULL, errorp);
if (error) {
ofpbuf_uninit(&odp_key);
return NULL;
}
/* Convert odp_key to flow. */
- fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
+ fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, flow, errorp);
if (fitness == ODP_FIT_ERROR) {
ofpbuf_uninit(&odp_key);
return NULL;
}
packet = dp_packet_new(0);
- flow_compose(packet, &flow);
+ if (packet_size) {
+ flow_compose(packet, flow, NULL, 0);
+ if (dp_packet_size(packet) < packet_size) {
+ packet_expand(packet, flow, packet_size);
+ } else if (dp_packet_size(packet) > packet_size){
+ dp_packet_delete(packet);
+ packet = NULL;
+ }
+ } else {
+ flow_compose(packet, flow, NULL, 64);
+ }
ofpbuf_uninit(&odp_key);
return packet;
static void
netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
- int queue_id)
+ struct flow *flow, int queue_id)
OVS_REQUIRES(dummy->mutex)
{
struct netdev_rxq_dummy *rx, *prev;
+ struct offloaded_flow *data;
+ struct flow packet_flow;
if (dummy->rxq_pcap) {
ovs_pcap_write(dummy->rxq_pcap, packet);
- fflush(dummy->rxq_pcap);
}
+
+ if (!flow) {
+ flow = &packet_flow;
+ flow_extract(packet, flow);
+ }
+ HMAP_FOR_EACH (data, node, &dummy->offloaded_flows) {
+ if (flow_equal_except(flow, &data->match.flow, &data->match.wc)) {
+
+ dp_packet_set_flow_mark(packet, data->mark);
+
+ if (VLOG_IS_DBG_ENABLED()) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ ds_put_format(&ds, "%s: packet: ",
+ netdev_get_name(&dummy->up));
+ /* 'flow' does not contain proper port number here.
+ * Let's just clear it as it wildcarded anyway. */
+ flow->in_port.ofp_port = 0;
+ flow_format(&ds, flow, NULL);
+
+ ds_put_cstr(&ds, " matches with flow: ");
+ odp_format_ufid(&data->ufid, &ds);
+ ds_put_cstr(&ds, " ");
+ match_format(&data->match, NULL, &ds, OFP_DEFAULT_PRIORITY);
+ ds_put_format(&ds, " with mark: %"PRIu32, data->mark);
+
+ VLOG_DBG("%s", ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+ break;
+ }
+ }
+
prev = NULL;
LIST_FOR_EACH (rx, node, &dummy->rxes) {
if (rx->up.queue_id == queue_id &&
for (i = k; i < argc; i++) {
struct dp_packet *packet;
+ struct flow flow;
+
+ /* Try to parse 'argv[i]' as packet in hex. */
+ packet = eth_from_packet(argv[i]);
- packet = eth_from_packet_or_flow(argv[i]);
if (!packet) {
- unixctl_command_reply_error(conn, "bad packet syntax");
- goto exit;
+ int packet_size = 0;
+ const char *flow_str = argv[i];
+
+ /* Parse optional --len argument immediately follows a 'flow'. */
+ if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
+ packet_size = strtol(argv[i + 2], NULL, 10);
+
+ if (packet_size < ETH_TOTAL_MIN) {
+ unixctl_command_reply_error(conn, "too small packet len");
+ goto exit;
+ }
+ i += 2;
+ }
+ /* Try parse 'argv[i]' as odp flow. */
+ char *error_s;
+ packet = eth_from_flow_str(flow_str, packet_size, &flow, &error_s);
+ if (!packet) {
+ unixctl_command_reply_error(conn, error_s);
+ free(error_s);
+ goto exit;
+ }
+ } else {
+ flow_extract(packet, &flow);
}
- netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
+ netdev_dummy_queue_packet(dummy_dev, packet, &flow, rx_qid);
}
unixctl_command_reply(conn, NULL);
unixctl_command_reply_error(conn, error);
free(error);
}
- netdev_close(netdev);
} else {
unixctl_command_reply_error(conn, "Unknown Dummy Interface");
}
netdev_dummy_register(enum dummy_level level)
{
unixctl_command_register("netdev-dummy/receive",
- "name [--qid queue_id] packet|flow...",
+ "name [--qid queue_id] packet|flow [--len packet_len]",
2, INT_MAX, netdev_dummy_receive, NULL);
unixctl_command_register("netdev-dummy/set-admin-state",
"[netdev] up|down", 1, 2,
netdev_register_provider(&dummy_internal_class);
netdev_register_provider(&dummy_pmd_class);
+ netdev_register_flow_api_provider(&netdev_offload_dummy);
+
netdev_vport_tunnel_register();
}