/*
- * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "dummy.h"
#include <errno.h>
+#include <unistd.h>
#include "dp-packet.h"
#include "dpif-netdev.h"
-#include "dynamic-string.h"
#include "flow.h"
-#include "list.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
#include "odp-util.h"
-#include "ofp-print.h"
-#include "ofpbuf.h"
+#include "openvswitch/dynamic-string.h"
+#include "openvswitch/list.h"
+#include "openvswitch/ofp-print.h"
+#include "openvswitch/ofpbuf.h"
+#include "openvswitch/vlog.h"
+#include "ovs-atomic.h"
#include "packets.h"
#include "pcap-file.h"
-#include "poll-loop.h"
-#include "shash.h"
+#include "openvswitch/poll-loop.h"
+#include "openvswitch/shash.h"
#include "sset.h"
#include "stream.h"
#include "unaligned.h"
#include "timeval.h"
#include "unixctl.h"
#include "reconnect.h"
-#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(netdev_dummy);
struct dummy_packet_stream {
struct stream *stream;
- struct dp_packet rxbuf;
struct ovs_list txq;
+ struct dp_packet rxbuf;
};
enum dummy_packet_conn_type {
struct dummy_packet_pconn {
struct pstream *pstream;
- struct dummy_packet_stream *streams;
+ struct dummy_packet_stream **streams;
size_t n_streams;
};
} u;
};
+struct pkt_list_node {
+ struct dp_packet *pkt;
+ struct ovs_list list_node;
+};
+
/* Protects 'dummy_list'. */
static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
/* Protects all members below. */
struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
- uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
+ struct eth_addr hwaddr OVS_GUARDED;
int mtu OVS_GUARDED;
struct netdev_stats stats OVS_GUARDED;
enum netdev_flags flags OVS_GUARDED;
int ifindex OVS_GUARDED;
+ int numa_id OVS_GUARDED;
struct dummy_packet_conn conn OVS_GUARDED;
FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
struct in_addr address, netmask;
+ struct in6_addr ipv6, ipv6_mask;
struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
+
+ /* The following properties are for dummy-pmd and they cannot be changed
+ * when a device is running, so we remember the request and update them
+ * next time netdev_dummy_reconfigure() is called. */
+ int requested_n_txq OVS_GUARDED;
+ int requested_n_rxq OVS_GUARDED;
+ int requested_numa_id OVS_GUARDED;
};
/* Max 'recv_queue_len' in struct netdev_dummy. */
struct netdev_rxq up;
struct ovs_list node; /* In netdev_dummy's "rxes" list. */
struct ovs_list recv_queue;
- int recv_queue_len; /* list_size(&recv_queue). */
+ int recv_queue_len; /* ovs_list_size(&recv_queue). */
struct seq *seq; /* Reports newly queued packets. */
};
static unixctl_cb_func netdev_dummy_set_admin_state;
static int netdev_dummy_construct(struct netdev *);
-static void netdev_dummy_queue_packet(struct netdev_dummy *, struct dp_packet *);
+static void netdev_dummy_queue_packet(struct netdev_dummy *,
+ struct dp_packet *, int);
static void dummy_packet_stream_close(struct dummy_packet_stream *);
+static void pkt_list_delete(struct ovs_list *);
+
static bool
is_dummy_class(const struct netdev_class *class)
{
int rxbuf_size = stream ? 2048 : 0;
s->stream = stream;
dp_packet_init(&s->rxbuf, rxbuf_size);
- list_init(&s->txq);
+ ovs_list_init(&s->txq);
}
static struct dummy_packet_stream *
dummy_packet_stream_wait(struct dummy_packet_stream *s)
{
stream_run_wait(s->stream);
- if (!list_is_empty(&s->txq)) {
+ if (!ovs_list_is_empty(&s->txq)) {
stream_send_wait(s->stream);
}
stream_recv_wait(s->stream);
static void
dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
{
- if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
+ if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
struct dp_packet *b;
+ struct pkt_list_node *node;
b = dp_packet_clone_data_with_headroom(buffer, size, 2);
put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
- list_push_back(&s->txq, &b->list_node);
+
+ node = xmalloc(sizeof *node);
+ node->pkt = b;
+ ovs_list_push_back(&s->txq, &node->list_node);
}
}
stream_run(s->stream);
- if (!list_is_empty(&s->txq)) {
+ if (!ovs_list_is_empty(&s->txq)) {
+ struct pkt_list_node *txbuf_node;
struct dp_packet *txbuf;
int retval;
- txbuf = dp_packet_from_list(list_front(&s->txq));
+ ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
+ txbuf = txbuf_node->pkt;
retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
if (retval > 0) {
dp_packet_pull(txbuf, retval);
if (!dp_packet_size(txbuf)) {
- list_remove(&txbuf->list_node);
+ ovs_list_remove(&txbuf_node->list_node);
+ free(txbuf_node);
dp_packet_delete(txbuf);
}
} else if (retval != -EAGAIN) {
if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
dp_packet_pull(&s->rxbuf, 2);
netdev_dummy_queue_packet(dev,
- dp_packet_clone(&s->rxbuf));
+ dp_packet_clone(&s->rxbuf), 0);
dp_packet_clear(&s->rxbuf);
}
} else if (retval != -EAGAIN) {
{
stream_close(s->stream);
dp_packet_uninit(&s->rxbuf);
- dp_packet_list_delete(&s->txq);
+ pkt_list_delete(&s->txq);
}
static void
case PASSIVE:
pstream_close(pconn->pstream);
for (i = 0; i < pconn->n_streams; i++) {
- dummy_packet_stream_close(&pconn->streams[i]);
+ dummy_packet_stream_close(pconn->streams[i]);
+ free(pconn->streams[i]);
}
free(pconn->streams);
pconn->pstream = NULL;
switch (conn->type) {
case PASSIVE:
- if (!strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
+ if (pstream &&
+ !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
return;
}
dummy_packet_conn_close(conn);
break;
case ACTIVE:
- if (!strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
+ if (stream &&
+ !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
return;
}
dummy_packet_conn_close(conn);
if (stream) {
int error;
struct stream *active_stream;
- struct reconnect *reconnect;;
+ struct reconnect *reconnect;
reconnect = reconnect_create(time_msec());
reconnect_set_name(reconnect, stream);
pconn->streams = xrealloc(pconn->streams,
((pconn->n_streams + 1)
- * sizeof *s));
- s = &pconn->streams[pconn->n_streams++];
+ * sizeof s));
+ s = xmalloc(sizeof *s);
+ pconn->streams[pconn->n_streams++] = s;
dummy_packet_stream_init(s, new_stream);
} else if (error != EAGAIN) {
VLOG_WARN("%s: accept failed (%s)",
dev->conn.type = NONE;
}
- for (i = 0; i < pconn->n_streams; i++) {
- struct dummy_packet_stream *s = &pconn->streams[i];
+ for (i = 0; i < pconn->n_streams; ) {
+ struct dummy_packet_stream *s = pconn->streams[i];
error = dummy_packet_stream_run(dev, s);
if (error) {
stream_get_name(s->stream),
ovs_retval_to_string(error));
dummy_packet_stream_close(s);
+ free(s);
pconn->streams[i] = pconn->streams[--pconn->n_streams];
+ } else {
+ i++;
}
}
}
case PASSIVE:
pstream_wait(conn->u.pconn.pstream);
for (i = 0; i < conn->u.pconn.n_streams; i++) {
- struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
+ struct dummy_packet_stream *s = conn->u.pconn.streams[i];
dummy_packet_stream_wait(s);
}
break;
switch (conn->type) {
case PASSIVE:
for (i = 0; i < conn->u.pconn.n_streams; i++) {
- struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
+ struct dummy_packet_stream *s = conn->u.pconn.streams[i];
dummy_packet_stream_send(s, buffer, size);
pstream_wait(conn->u.pconn.pstream);
}
static void
-netdev_dummy_run(void)
+netdev_dummy_run(const struct netdev_class *netdev_class)
{
struct netdev_dummy *dev;
ovs_mutex_lock(&dummy_list_mutex);
LIST_FOR_EACH (dev, list_node, &dummy_list) {
+ if (netdev_get_class(&dev->up) != netdev_class) {
+ continue;
+ }
ovs_mutex_lock(&dev->mutex);
dummy_packet_conn_run(dev);
ovs_mutex_unlock(&dev->mutex);
}
static void
-netdev_dummy_wait(void)
+netdev_dummy_wait(const struct netdev_class *netdev_class)
{
struct netdev_dummy *dev;
ovs_mutex_lock(&dummy_list_mutex);
LIST_FOR_EACH (dev, list_node, &dummy_list) {
+ if (netdev_get_class(&dev->up) != netdev_class) {
+ continue;
+ }
ovs_mutex_lock(&dev->mutex);
dummy_packet_conn_wait(&dev->conn);
ovs_mutex_unlock(&dev->mutex);
ovs_mutex_init(&netdev->mutex);
ovs_mutex_lock(&netdev->mutex);
- netdev->hwaddr[0] = 0xaa;
- netdev->hwaddr[1] = 0x55;
- netdev->hwaddr[2] = n >> 24;
- netdev->hwaddr[3] = n >> 16;
- netdev->hwaddr[4] = n >> 8;
- netdev->hwaddr[5] = n;
+ netdev->hwaddr.ea[0] = 0xaa;
+ netdev->hwaddr.ea[1] = 0x55;
+ netdev->hwaddr.ea[2] = n >> 24;
+ netdev->hwaddr.ea[3] = n >> 16;
+ netdev->hwaddr.ea[4] = n >> 8;
+ netdev->hwaddr.ea[5] = n;
netdev->mtu = 1500;
netdev->flags = 0;
netdev->ifindex = -EOPNOTSUPP;
+ netdev->requested_n_rxq = netdev_->n_rxq;
+ netdev->requested_n_txq = netdev_->n_txq;
+ netdev->numa_id = 0;
dummy_packet_conn_init(&netdev->conn);
- list_init(&netdev->rxes);
+ ovs_list_init(&netdev->rxes);
ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_lock(&dummy_list_mutex);
- list_push_back(&dummy_list, &netdev->list_node);
+ ovs_list_push_back(&dummy_list, &netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
return 0;
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
ovs_mutex_lock(&dummy_list_mutex);
- list_remove(&netdev->list_node);
+ ovs_list_remove(&netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
ovs_mutex_lock(&netdev->mutex);
+ if (netdev->rxq_pcap) {
+ fclose(netdev->rxq_pcap);
+ }
+ if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
+ fclose(netdev->tx_pcap);
+ }
dummy_packet_conn_close(&netdev->conn);
netdev->conn.type = NONE;
}
static int
-netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
+netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
{
- struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ struct netdev_dummy *netdev = netdev_dummy_cast(dev);
ovs_mutex_lock(&netdev->mutex);
dummy_packet_conn_get_config(&netdev->conn, args);
+ /* 'dummy-pmd' specific config. */
+ if (!netdev_is_pmd(dev)) {
+ goto exit;
+ }
+ smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
+ smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
+ smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
+ smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
+
+exit:
ovs_mutex_unlock(&netdev->mutex);
return 0;
}
static int
-netdev_dummy_get_in4(const struct netdev *netdev_,
- struct in_addr *address, struct in_addr *netmask)
+netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
+ struct in6_addr **pmask, int *n_addr)
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ int cnt = 0, i = 0, err = 0;
+ struct in6_addr *addr, *mask;
ovs_mutex_lock(&netdev->mutex);
- *address = netdev->address;
- *netmask = netdev->netmask;
+ if (netdev->address.s_addr != INADDR_ANY) {
+ cnt++;
+ }
+
+ if (ipv6_addr_is_set(&netdev->ipv6)) {
+ cnt++;
+ }
+ if (!cnt) {
+ err = EADDRNOTAVAIL;
+ goto out;
+ }
+ addr = xmalloc(sizeof *addr * cnt);
+ mask = xmalloc(sizeof *mask * cnt);
+ if (netdev->address.s_addr != INADDR_ANY) {
+ in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
+ in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
+ i++;
+ }
+
+ if (ipv6_addr_is_set(&netdev->ipv6)) {
+ memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
+ memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
+ i++;
+ }
+ if (paddr) {
+ *paddr = addr;
+ *pmask = mask;
+ *n_addr = cnt;
+ } else {
+ free(addr);
+ free(mask);
+ }
+out:
ovs_mutex_unlock(&netdev->mutex);
- return 0;
+
+ return err;
}
static int
ovs_mutex_lock(&netdev->mutex);
netdev->address = address;
netdev->netmask = netmask;
+ netdev_change_seq_changed(netdev_);
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return 0;
+}
+
+static int
+netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
+ struct in6_addr *mask)
+{
+ struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
+ netdev->ipv6 = *in6;
+ netdev->ipv6_mask = *mask;
+ netdev_change_seq_changed(netdev_);
ovs_mutex_unlock(&netdev->mutex);
return 0;
}
+#define DUMMY_MAX_QUEUES_PER_PORT 1024
+
static int
-netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
+netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
+ char **errp OVS_UNUSED)
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
const char *pcap;
+ int new_n_rxq, new_n_txq, new_numa_id;
ovs_mutex_lock(&netdev->mutex);
netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
}
}
+ netdev_change_seq_changed(netdev_);
+
+ /* 'dummy-pmd' specific config. */
+ if (!netdev_->netdev_class->is_pmd) {
+ goto exit;
+ }
+
+ new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
+ new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
+
+ if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
+ new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
+ VLOG_WARN("The one or both of interface %s queues"
+ "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
+ netdev_get_name(netdev_),
+ new_n_rxq,
+ new_n_txq,
+ DUMMY_MAX_QUEUES_PER_PORT,
+ DUMMY_MAX_QUEUES_PER_PORT);
+
+ new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
+ new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
+ }
+
+ new_numa_id = smap_get_int(args, "numa_id", 0);
+ if (new_n_rxq != netdev->requested_n_rxq
+ || new_n_txq != netdev->requested_n_txq
+ || new_numa_id != netdev->requested_numa_id) {
+ netdev->requested_n_rxq = new_n_rxq;
+ netdev->requested_n_txq = new_n_txq;
+ netdev->requested_numa_id = new_numa_id;
+ netdev_request_reconfigure(netdev_);
+ }
+
+exit:
ovs_mutex_unlock(&netdev->mutex);
+ return 0;
+}
+
+static int
+netdev_dummy_get_numa_id(const struct netdev *netdev_)
+{
+ struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
+ int numa_id = netdev->numa_id;
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return numa_id;
+}
+/* Sets the number of tx queues and rx queues for the dummy PMD interface. */
+static int
+netdev_dummy_reconfigure(struct netdev *netdev_)
+{
+ struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+
+ ovs_mutex_lock(&netdev->mutex);
+
+ netdev_->n_txq = netdev->requested_n_txq;
+ netdev_->n_rxq = netdev->requested_n_rxq;
+ netdev->numa_id = netdev->requested_numa_id;
+
+ ovs_mutex_unlock(&netdev->mutex);
return 0;
}
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
ovs_mutex_lock(&netdev->mutex);
- list_push_back(&netdev->rxes, &rx->node);
- list_init(&rx->recv_queue);
+ ovs_list_push_back(&netdev->rxes, &rx->node);
+ ovs_list_init(&rx->recv_queue);
rx->recv_queue_len = 0;
rx->seq = seq_create();
ovs_mutex_unlock(&netdev->mutex);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
ovs_mutex_lock(&netdev->mutex);
- list_remove(&rx->node);
- dp_packet_list_delete(&rx->recv_queue);
+ ovs_list_remove(&rx->node);
+ pkt_list_delete(&rx->recv_queue);
ovs_mutex_unlock(&netdev->mutex);
seq_destroy(rx->seq);
}
}
static int
-netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
- int *c)
+netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
{
struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
struct dp_packet *packet;
ovs_mutex_lock(&netdev->mutex);
- if (!list_is_empty(&rx->recv_queue)) {
- packet = dp_packet_from_list(list_pop_front(&rx->recv_queue));
+ if (!ovs_list_is_empty(&rx->recv_queue)) {
+ struct pkt_list_node *pkt_node;
+
+ ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
+ packet = pkt_node->pkt;
+ free(pkt_node);
rx->recv_queue_len--;
} else {
packet = NULL;
ovs_mutex_unlock(&netdev->mutex);
if (!packet) {
+ if (netdev_is_pmd(&netdev->up)) {
+ /* If 'netdev' is a PMD device, this is called as part of the PMD
+ * thread busy loop. We yield here (without quiescing) for two
+ * reasons:
+ *
+ * - To reduce the CPU utilization during the testsuite
+ * - To give valgrind a chance to switch thread. According
+ * to the valgrind documentation, there's a big lock that
+ * prevents multiple thread from being executed at the same
+ * time. On my system, without this sleep, the pmd threads
+ * testcases fail under valgrind, because ovs-vswitchd becomes
+ * unresponsive. */
+ sched_yield();
+ }
return EAGAIN;
}
ovs_mutex_lock(&netdev->mutex);
netdev->stats.rx_bytes += dp_packet_size(packet);
ovs_mutex_unlock(&netdev->mutex);
- dp_packet_pad(packet);
- dp_packet_set_dp_hash(packet, 0);
-
- arr[0] = packet;
- *c = 1;
+ batch->packets[0] = packet;
+ batch->count = 1;
return 0;
}
uint64_t seq = seq_read(rx->seq);
ovs_mutex_lock(&netdev->mutex);
- if (!list_is_empty(&rx->recv_queue)) {
+ if (!ovs_list_is_empty(&rx->recv_queue)) {
poll_immediate_wake();
} else {
seq_wait(rx->seq, seq);
struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
ovs_mutex_lock(&netdev->mutex);
- dp_packet_list_delete(&rx->recv_queue);
+ pkt_list_delete(&rx->recv_queue);
rx->recv_queue_len = 0;
ovs_mutex_unlock(&netdev->mutex);
static int
netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch,
+ bool concurrent_txq OVS_UNUSED)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
int error = 0;
- int i;
- for (i = 0; i < cnt; i++) {
- const void *buffer = dp_packet_data(pkts[i]);
- size_t size = dp_packet_size(pkts[i]);
+ struct dp_packet *packet;
+ DP_PACKET_BATCH_FOR_EACH(packet, batch) {
+ const void *buffer = dp_packet_data(packet);
+ size_t size = dp_packet_size(packet);
+
+ if (batch->packets[i]->packet_type != htonl(PT_ETH)) {
+ error = EPFNOSUPPORT;
+ break;
+ }
if (size < ETH_HEADER_LEN) {
error = EMSGSIZE;
dummy_packet_conn_send(&dev->conn, buffer, size);
+ /* Reply to ARP requests for 'dev''s assigned IP address. */
+ if (dev->address.s_addr) {
+ struct dp_packet dp;
+ struct flow flow;
+
+ dp_packet_use_const(&dp, buffer, size);
+ flow_extract(&dp, &flow);
+ if (flow.dl_type == htons(ETH_TYPE_ARP)
+ && flow.nw_proto == ARP_OP_REQUEST
+ && flow.nw_dst == dev->address.s_addr) {
+ struct dp_packet *reply = dp_packet_new(0);
+ compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
+ false, flow.nw_dst, flow.nw_src);
+ netdev_dummy_queue_packet(dev, reply, 0);
+ }
+ }
+
if (dev->tx_pcap) {
- struct dp_packet packet;
+ struct dp_packet dp;
- dp_packet_use_const(&packet, buffer, size);
- ovs_pcap_write(dev->tx_pcap, &packet);
+ dp_packet_use_const(&dp, buffer, size);
+ ovs_pcap_write(dev->tx_pcap, &dp);
fflush(dev->tx_pcap);
}
ovs_mutex_unlock(&dev->mutex);
}
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ dp_packet_delete_batch(batch, true);
return error;
}
static int
-netdev_dummy_set_etheraddr(struct netdev *netdev,
- const uint8_t mac[ETH_ADDR_LEN])
+netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
ovs_mutex_lock(&dev->mutex);
if (!eth_addr_equals(dev->hwaddr, mac)) {
- memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
+ dev->hwaddr = mac;
netdev_change_seq_changed(netdev);
}
ovs_mutex_unlock(&dev->mutex);
}
static int
-netdev_dummy_get_etheraddr(const struct netdev *netdev,
- uint8_t mac[ETH_ADDR_LEN])
+netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
ovs_mutex_lock(&dev->mutex);
- memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
+ *mac = dev->hwaddr;
ovs_mutex_unlock(&dev->mutex);
return 0;
return 0;
}
+#define DUMMY_MIN_MTU 68
+#define DUMMY_MAX_MTU 65535
+
static int
-netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
+netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
{
+ if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
+ return EINVAL;
+ }
+
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
ovs_mutex_lock(&dev->mutex);
- dev->mtu = mtu;
+ if (dev->mtu != mtu) {
+ dev->mtu = mtu;
+ netdev_change_seq_changed(netdev);
+ }
ovs_mutex_unlock(&dev->mutex);
return 0;
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
ovs_mutex_lock(&dev->mutex);
- *stats = dev->stats;
+ /* Passing only collected counters */
+ stats->tx_packets = dev->stats.tx_packets;
+ stats->tx_bytes = dev->stats.tx_bytes;
+ stats->rx_packets = dev->stats.rx_packets;
+ stats->rx_bytes = dev->stats.rx_bytes;
ovs_mutex_unlock(&dev->mutex);
return 0;
}
+static int
+netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
+ unsigned int queue_id, struct smap *details OVS_UNUSED)
+{
+ if (queue_id == 0) {
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
+
+static void
+netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
+{
+ *stats = (struct netdev_queue_stats) {
+ .tx_bytes = UINT64_MAX,
+ .tx_packets = UINT64_MAX,
+ .tx_errors = UINT64_MAX,
+ .created = LLONG_MIN,
+ };
+}
+
+static int
+netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
+ unsigned int queue_id,
+ struct netdev_queue_stats *stats)
+{
+ if (queue_id == 0) {
+ netdev_dummy_init_queue_stats(stats);
+ return 0;
+ } else {
+ return EINVAL;
+ }
+}
+
+struct netdev_dummy_queue_state {
+ unsigned int next_queue;
+};
+
+static int
+netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
+ void **statep)
+{
+ struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
+ state->next_queue = 0;
+ *statep = state;
+ return 0;
+}
+
+static int
+netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
+ void *state_,
+ unsigned int *queue_id,
+ struct smap *details OVS_UNUSED)
+{
+ struct netdev_dummy_queue_state *state = state_;
+ if (state->next_queue == 0) {
+ *queue_id = 0;
+ state->next_queue++;
+ return 0;
+ } else {
+ return EOF;
+ }
+}
+
+static int
+netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
+ void *state)
+{
+ free(state);
+ return 0;
+}
+
+static int
+netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
+ void (*cb)(unsigned int queue_id,
+ struct netdev_queue_stats *,
+ void *aux),
+ void *aux)
+{
+ struct netdev_queue_stats stats;
+ netdev_dummy_init_queue_stats(&stats);
+ cb(0, &stats, aux);
+ return 0;
+}
+
static int
netdev_dummy_get_ifindex(const struct netdev *netdev)
{
\f
/* Helper functions. */
-static const struct netdev_class dummy_class = {
- "dummy",
- NULL, /* init */
- netdev_dummy_run,
- netdev_dummy_wait,
-
- netdev_dummy_alloc,
- netdev_dummy_construct,
- netdev_dummy_destruct,
- netdev_dummy_dealloc,
- netdev_dummy_get_config,
- netdev_dummy_set_config,
- NULL, /* get_tunnel_config */
- NULL, /* build header */
- NULL, /* push header */
- NULL, /* pop header */
- NULL, /* get_numa_id */
- NULL, /* set_multiq */
-
- netdev_dummy_send, /* send */
- NULL, /* send_wait */
-
- netdev_dummy_set_etheraddr,
- netdev_dummy_get_etheraddr,
- netdev_dummy_get_mtu,
- netdev_dummy_set_mtu,
- netdev_dummy_get_ifindex,
- NULL, /* get_carrier */
- NULL, /* get_carrier_resets */
- NULL, /* get_miimon */
- netdev_dummy_get_stats,
-
- NULL, /* get_features */
- NULL, /* set_advertisements */
-
- NULL, /* set_policing */
- NULL, /* get_qos_types */
- NULL, /* get_qos_capabilities */
- NULL, /* get_qos */
- NULL, /* set_qos */
- NULL, /* get_queue */
- NULL, /* set_queue */
- NULL, /* delete_queue */
- NULL, /* get_queue_stats */
- NULL, /* queue_dump_start */
- NULL, /* queue_dump_next */
- NULL, /* queue_dump_done */
- NULL, /* dump_queue_stats */
-
- netdev_dummy_get_in4, /* get_in4 */
- NULL, /* set_in4 */
- NULL, /* get_in6 */
- NULL, /* add_router */
- NULL, /* get_next_hop */
- NULL, /* get_status */
- NULL, /* arp_lookup */
-
- netdev_dummy_update_flags,
-
- netdev_dummy_rxq_alloc,
- netdev_dummy_rxq_construct,
- netdev_dummy_rxq_destruct,
- netdev_dummy_rxq_dealloc,
- netdev_dummy_rxq_recv,
- netdev_dummy_rxq_wait,
- netdev_dummy_rxq_drain,
-};
+#define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
+{ \
+ NAME, \
+ PMD, /* is_pmd */ \
+ NULL, /* init */ \
+ netdev_dummy_run, \
+ netdev_dummy_wait, \
+ \
+ netdev_dummy_alloc, \
+ netdev_dummy_construct, \
+ netdev_dummy_destruct, \
+ netdev_dummy_dealloc, \
+ netdev_dummy_get_config, \
+ netdev_dummy_set_config, \
+ NULL, /* get_tunnel_config */ \
+ NULL, /* build header */ \
+ NULL, /* push header */ \
+ NULL, /* pop header */ \
+ netdev_dummy_get_numa_id, \
+ NULL, /* set_tx_multiq */ \
+ \
+ netdev_dummy_send, /* send */ \
+ NULL, /* send_wait */ \
+ \
+ netdev_dummy_set_etheraddr, \
+ netdev_dummy_get_etheraddr, \
+ netdev_dummy_get_mtu, \
+ netdev_dummy_set_mtu, \
+ netdev_dummy_get_ifindex, \
+ NULL, /* get_carrier */ \
+ NULL, /* get_carrier_resets */ \
+ NULL, /* get_miimon */ \
+ netdev_dummy_get_stats, \
+ \
+ NULL, /* get_features */ \
+ NULL, /* set_advertisements */ \
+ NULL, /* get_pt_mode */ \
+ \
+ NULL, /* set_policing */ \
+ NULL, /* get_qos_types */ \
+ NULL, /* get_qos_capabilities */ \
+ NULL, /* get_qos */ \
+ NULL, /* set_qos */ \
+ netdev_dummy_get_queue, \
+ NULL, /* set_queue */ \
+ NULL, /* delete_queue */ \
+ netdev_dummy_get_queue_stats, \
+ netdev_dummy_queue_dump_start, \
+ netdev_dummy_queue_dump_next, \
+ netdev_dummy_queue_dump_done, \
+ netdev_dummy_dump_queue_stats, \
+ \
+ NULL, /* set_in4 */ \
+ netdev_dummy_get_addr_list, \
+ NULL, /* add_router */ \
+ NULL, /* get_next_hop */ \
+ NULL, /* get_status */ \
+ NULL, /* arp_lookup */ \
+ \
+ netdev_dummy_update_flags, \
+ RECOFIGURE, \
+ \
+ netdev_dummy_rxq_alloc, \
+ netdev_dummy_rxq_construct, \
+ netdev_dummy_rxq_destruct, \
+ netdev_dummy_rxq_dealloc, \
+ netdev_dummy_rxq_recv, \
+ netdev_dummy_rxq_wait, \
+ netdev_dummy_rxq_drain, \
+ \
+ NO_OFFLOAD_API \
+}
+
+static const struct netdev_class dummy_class =
+ NETDEV_DUMMY_CLASS("dummy", false, NULL);
+
+static const struct netdev_class dummy_internal_class =
+ NETDEV_DUMMY_CLASS("dummy-internal", false, NULL);
+
+static const struct netdev_class dummy_pmd_class =
+ NETDEV_DUMMY_CLASS("dummy-pmd", true,
+ netdev_dummy_reconfigure);
+
+static void
+pkt_list_delete(struct ovs_list *l)
+{
+ struct pkt_list_node *pkt;
+
+ LIST_FOR_EACH_POP(pkt, list_node, l) {
+ dp_packet_delete(pkt->pkt);
+ free(pkt);
+ }
+}
+
+static struct dp_packet *
+eth_from_packet(const char *s)
+{
+ struct dp_packet *packet;
+ eth_from_hex(s, &packet);
+ return packet;
+}
static struct dp_packet *
-eth_from_packet_or_flow(const char *s)
+eth_from_flow(const char *s, size_t packet_size)
{
enum odp_key_fitness fitness;
struct dp_packet *packet;
struct flow flow;
int error;
- if (!eth_from_hex(s, &packet)) {
- return packet;
- }
-
/* Convert string to datapath key.
*
* It would actually be nicer to parse an OpenFlow-like flow key here, but
}
/* Convert odp_key to flow. */
- fitness = odp_flow_key_to_flow(ofpbuf_data(&odp_key),
- ofpbuf_size(&odp_key), &flow);
+ fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
if (fitness == ODP_FIT_ERROR) {
ofpbuf_uninit(&odp_key);
return NULL;
}
packet = dp_packet_new(0);
- flow_compose(packet, &flow);
+ if (!flow_compose(packet, &flow, packet_size)) {
+ dp_packet_delete(packet);
+ packet = NULL;
+ };
ofpbuf_uninit(&odp_key);
return packet;
static void
netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
{
- list_push_back(&rx->recv_queue, &packet->list_node);
+ struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
+
+ pkt_node->pkt = packet;
+ ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
rx->recv_queue_len++;
seq_change(rx->seq);
}
static void
-netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet)
+netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
+ int queue_id)
OVS_REQUIRES(dummy->mutex)
{
struct netdev_rxq_dummy *rx, *prev;
}
prev = NULL;
LIST_FOR_EACH (rx, node, &dummy->rxes) {
- if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
+ if (rx->up.queue_id == queue_id &&
+ rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
if (prev) {
netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
}
{
struct netdev_dummy *dummy_dev;
struct netdev *netdev;
- int i;
+ int i, k = 1, rx_qid = 0;
- netdev = netdev_from_name(argv[1]);
+ netdev = netdev_from_name(argv[k++]);
if (!netdev || !is_dummy_class(netdev->netdev_class)) {
unixctl_command_reply_error(conn, "no such dummy netdev");
- goto exit;
+ goto exit_netdev;
}
dummy_dev = netdev_dummy_cast(netdev);
- for (i = 2; i < argc; i++) {
+ ovs_mutex_lock(&dummy_dev->mutex);
+
+ if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
+ rx_qid = strtol(argv[k + 1], NULL, 10);
+ if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
+ unixctl_command_reply_error(conn, "bad rx queue id.");
+ goto exit;
+ }
+ k += 2;
+ }
+
+ for (i = k; i < argc; i++) {
struct dp_packet *packet;
- packet = eth_from_packet_or_flow(argv[i]);
+ /* Try to parse 'argv[i]' as packet in hex. */
+ packet = eth_from_packet(argv[i]);
+
if (!packet) {
- unixctl_command_reply_error(conn, "bad packet syntax");
- goto exit;
+ int packet_size = 0;
+ const char *flow_str = argv[i];
+
+ /* Parse optional --len argument immediately follows a 'flow'. */
+ if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
+ packet_size = strtol(argv[i + 2], NULL, 10);
+
+ if (packet_size < ETH_TOTAL_MIN) {
+ unixctl_command_reply_error(conn, "too small packet len");
+ goto exit;
+ }
+ i += 2;
+ }
+ /* Try parse 'argv[i]' as odp flow. */
+ packet = eth_from_flow(flow_str, packet_size);
+
+ if (!packet) {
+ unixctl_command_reply_error(conn, "bad packet or flow syntax");
+ goto exit;
+ }
}
- ovs_mutex_lock(&dummy_dev->mutex);
- netdev_dummy_queue_packet(dummy_dev, packet);
- ovs_mutex_unlock(&dummy_dev->mutex);
+ netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
}
unixctl_command_reply(conn, NULL);
exit:
+ ovs_mutex_unlock(&dummy_dev->mutex);
+exit_netdev:
netdev_close(netdev);
}
struct netdev *netdev = netdev_from_name(argv[1]);
if (netdev && is_dummy_class(netdev->netdev_class)) {
- struct in_addr ip;
- uint16_t plen;
-
- if (ovs_scan(argv[2], IP_SCAN_FMT"/%"SCNi16,
- IP_SCAN_ARGS(&ip.s_addr), &plen)) {
- struct in_addr mask;
+ struct in_addr ip, mask;
+ char *error;
- mask.s_addr = be32_prefix_mask(plen);
+ error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
+ if (!error) {
netdev_dummy_set_in4(netdev, ip, mask);
unixctl_command_reply(conn, "OK");
} else {
- unixctl_command_reply(conn, "Invalid parameters");
+ unixctl_command_reply_error(conn, error);
+ free(error);
}
+ } else {
+ unixctl_command_reply_error(conn, "Unknown Dummy Interface");
+ }
+
+ netdev_close(netdev);
+}
- netdev_close(netdev);
+static void
+netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct netdev *netdev = netdev_from_name(argv[1]);
+
+ if (netdev && is_dummy_class(netdev->netdev_class)) {
+ struct in6_addr ip6;
+ char *error;
+ uint32_t plen;
+
+ error = ipv6_parse_cidr(argv[2], &ip6, &plen);
+ if (!error) {
+ struct in6_addr mask;
+
+ mask = ipv6_create_mask(plen);
+ netdev_dummy_set_in6(netdev, &ip6, &mask);
+ unixctl_command_reply(conn, "OK");
+ } else {
+ unixctl_command_reply_error(conn, error);
+ free(error);
+ }
} else {
unixctl_command_reply_error(conn, "Unknown Dummy Interface");
- netdev_close(netdev);
- return;
}
+ netdev_close(netdev);
+}
+
+
+static void
+netdev_dummy_override(const char *type)
+{
+ if (!netdev_unregister_provider(type)) {
+ struct netdev_class *class;
+ int error;
+
+ class = xmemdup(&dummy_class, sizeof dummy_class);
+ class->type = xstrdup(type);
+ error = netdev_register_provider(class);
+ if (error) {
+ VLOG_ERR("%s: failed to register netdev provider (%s)",
+ type, ovs_strerror(error));
+ free(CONST_CAST(char *, class->type));
+ free(class);
+ }
+ }
}
void
-netdev_dummy_register(bool override)
+netdev_dummy_register(enum dummy_level level)
{
- unixctl_command_register("netdev-dummy/receive", "name packet|flow...",
+ unixctl_command_register("netdev-dummy/receive",
+ "name [--qid queue_id] packet|flow [--len packet_len]",
2, INT_MAX, netdev_dummy_receive, NULL);
unixctl_command_register("netdev-dummy/set-admin-state",
"[netdev] up|down", 1, 2,
unixctl_command_register("netdev-dummy/ip4addr",
"[netdev] ipaddr/mask-prefix-len", 2, 2,
netdev_dummy_ip4addr, NULL);
+ unixctl_command_register("netdev-dummy/ip6addr",
+ "[netdev] ip6addr", 2, 2,
+ netdev_dummy_ip6addr, NULL);
-
- if (override) {
+ if (level == DUMMY_OVERRIDE_ALL) {
struct sset types;
const char *type;
sset_init(&types);
netdev_enumerate_types(&types);
SSET_FOR_EACH (type, &types) {
- if (!strcmp(type, "patch")) {
- continue;
- }
- if (!netdev_unregister_provider(type)) {
- struct netdev_class *class;
- int error;
-
- class = xmemdup(&dummy_class, sizeof dummy_class);
- class->type = xstrdup(type);
- error = netdev_register_provider(class);
- if (error) {
- VLOG_ERR("%s: failed to register netdev provider (%s)",
- type, ovs_strerror(error));
- free(CONST_CAST(char *, class->type));
- free(class);
- }
+ if (strcmp(type, "patch")) {
+ netdev_dummy_override(type);
}
}
sset_destroy(&types);
+ } else if (level == DUMMY_OVERRIDE_SYSTEM) {
+ netdev_dummy_override("system");
}
netdev_register_provider(&dummy_class);
+ netdev_register_provider(&dummy_internal_class);
+ netdev_register_provider(&dummy_pmd_class);
netdev_vport_tunnel_register();
}