2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
37 #include "pcap-file.h"
38 #include "poll-loop.h"
39 #include "openvswitch/shash.h"
42 #include "unaligned.h"
45 #include "reconnect.h"
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy
);
51 struct dummy_packet_stream
{
52 struct stream
*stream
;
53 struct dp_packet rxbuf
;
57 enum dummy_packet_conn_type
{
58 NONE
, /* No connection is configured. */
59 PASSIVE
, /* Listener. */
60 ACTIVE
/* Connect to listener. */
63 enum dummy_netdev_conn_state
{
64 CONN_STATE_CONNECTED
, /* Listener connected. */
65 CONN_STATE_NOT_CONNECTED
, /* Listener not connected. */
66 CONN_STATE_UNKNOWN
, /* No relavent information. */
69 struct dummy_packet_pconn
{
70 struct pstream
*pstream
;
71 struct dummy_packet_stream
**streams
;
75 struct dummy_packet_rconn
{
76 struct dummy_packet_stream
*rstream
;
77 struct reconnect
*reconnect
;
80 struct dummy_packet_conn
{
81 enum dummy_packet_conn_type type
;
83 struct dummy_packet_pconn pconn
;
84 struct dummy_packet_rconn rconn
;
88 struct pkt_list_node
{
89 struct dp_packet
*pkt
;
90 struct ovs_list list_node
;
93 /* Protects 'dummy_list'. */
94 static struct ovs_mutex dummy_list_mutex
= OVS_MUTEX_INITIALIZER
;
96 /* Contains all 'struct dummy_dev's. */
97 static struct ovs_list dummy_list
OVS_GUARDED_BY(dummy_list_mutex
)
98 = OVS_LIST_INITIALIZER(&dummy_list
);
100 struct netdev_dummy
{
104 struct ovs_list list_node
OVS_GUARDED_BY(dummy_list_mutex
);
106 /* Protects all members below. */
107 struct ovs_mutex mutex
OVS_ACQ_AFTER(dummy_list_mutex
);
109 struct eth_addr hwaddr OVS_GUARDED
;
111 struct netdev_stats stats OVS_GUARDED
;
112 enum netdev_flags flags OVS_GUARDED
;
113 int ifindex OVS_GUARDED
;
114 int numa_id OVS_GUARDED
;
116 struct dummy_packet_conn conn OVS_GUARDED
;
118 FILE *tx_pcap
, *rxq_pcap OVS_GUARDED
;
120 struct in_addr address
, netmask
;
121 struct in6_addr ipv6
, ipv6_mask
;
122 struct ovs_list rxes OVS_GUARDED
; /* List of child "netdev_rxq_dummy"s. */
124 /* The following properties are for dummy-pmd and they cannot be changed
125 * when a device is running, so we remember the request and update them
126 * next time netdev_dummy_reconfigure() is called. */
127 int requested_n_txq OVS_GUARDED
;
128 int requested_n_rxq OVS_GUARDED
;
129 int requested_numa_id OVS_GUARDED
;
132 /* Max 'recv_queue_len' in struct netdev_dummy. */
133 #define NETDEV_DUMMY_MAX_QUEUE 100
135 struct netdev_rxq_dummy
{
136 struct netdev_rxq up
;
137 struct ovs_list node
; /* In netdev_dummy's "rxes" list. */
138 struct ovs_list recv_queue
;
139 int recv_queue_len
; /* ovs_list_size(&recv_queue). */
140 struct seq
*seq
; /* Reports newly queued packets. */
143 static unixctl_cb_func netdev_dummy_set_admin_state
;
144 static int netdev_dummy_construct(struct netdev
*);
145 static void netdev_dummy_queue_packet(struct netdev_dummy
*,
146 struct dp_packet
*, int);
148 static void dummy_packet_stream_close(struct dummy_packet_stream
*);
150 static void pkt_list_delete(struct ovs_list
*);
153 is_dummy_class(const struct netdev_class
*class)
155 return class->construct
== netdev_dummy_construct
;
158 static struct netdev_dummy
*
159 netdev_dummy_cast(const struct netdev
*netdev
)
161 ovs_assert(is_dummy_class(netdev_get_class(netdev
)));
162 return CONTAINER_OF(netdev
, struct netdev_dummy
, up
);
165 static struct netdev_rxq_dummy
*
166 netdev_rxq_dummy_cast(const struct netdev_rxq
*rx
)
168 ovs_assert(is_dummy_class(netdev_get_class(rx
->netdev
)));
169 return CONTAINER_OF(rx
, struct netdev_rxq_dummy
, up
);
173 dummy_packet_stream_init(struct dummy_packet_stream
*s
, struct stream
*stream
)
175 int rxbuf_size
= stream
? 2048 : 0;
177 dp_packet_init(&s
->rxbuf
, rxbuf_size
);
178 ovs_list_init(&s
->txq
);
181 static struct dummy_packet_stream
*
182 dummy_packet_stream_create(struct stream
*stream
)
184 struct dummy_packet_stream
*s
;
186 s
= xzalloc(sizeof *s
);
187 dummy_packet_stream_init(s
, stream
);
193 dummy_packet_stream_wait(struct dummy_packet_stream
*s
)
195 stream_run_wait(s
->stream
);
196 if (!ovs_list_is_empty(&s
->txq
)) {
197 stream_send_wait(s
->stream
);
199 stream_recv_wait(s
->stream
);
203 dummy_packet_stream_send(struct dummy_packet_stream
*s
, const void *buffer
, size_t size
)
205 if (ovs_list_size(&s
->txq
) < NETDEV_DUMMY_MAX_QUEUE
) {
207 struct pkt_list_node
*node
;
209 b
= dp_packet_clone_data_with_headroom(buffer
, size
, 2);
210 put_unaligned_be16(dp_packet_push_uninit(b
, 2), htons(size
));
212 node
= xmalloc(sizeof *node
);
214 ovs_list_push_back(&s
->txq
, &node
->list_node
);
219 dummy_packet_stream_run(struct netdev_dummy
*dev
, struct dummy_packet_stream
*s
)
224 stream_run(s
->stream
);
226 if (!ovs_list_is_empty(&s
->txq
)) {
227 struct pkt_list_node
*txbuf_node
;
228 struct dp_packet
*txbuf
;
231 ASSIGN_CONTAINER(txbuf_node
, ovs_list_front(&s
->txq
), list_node
);
232 txbuf
= txbuf_node
->pkt
;
233 retval
= stream_send(s
->stream
, dp_packet_data(txbuf
), dp_packet_size(txbuf
));
236 dp_packet_pull(txbuf
, retval
);
237 if (!dp_packet_size(txbuf
)) {
238 ovs_list_remove(&txbuf_node
->list_node
);
240 dp_packet_delete(txbuf
);
242 } else if (retval
!= -EAGAIN
) {
248 if (dp_packet_size(&s
->rxbuf
) < 2) {
249 n
= 2 - dp_packet_size(&s
->rxbuf
);
253 frame_len
= ntohs(get_unaligned_be16(dp_packet_data(&s
->rxbuf
)));
254 if (frame_len
< ETH_HEADER_LEN
) {
258 n
= (2 + frame_len
) - dp_packet_size(&s
->rxbuf
);
265 dp_packet_prealloc_tailroom(&s
->rxbuf
, n
);
266 retval
= stream_recv(s
->stream
, dp_packet_tail(&s
->rxbuf
), n
);
269 dp_packet_set_size(&s
->rxbuf
, dp_packet_size(&s
->rxbuf
) + retval
);
270 if (retval
== n
&& dp_packet_size(&s
->rxbuf
) > 2) {
271 dp_packet_pull(&s
->rxbuf
, 2);
272 netdev_dummy_queue_packet(dev
,
273 dp_packet_clone(&s
->rxbuf
), 0);
274 dp_packet_clear(&s
->rxbuf
);
276 } else if (retval
!= -EAGAIN
) {
277 error
= (retval
< 0 ? -retval
278 : dp_packet_size(&s
->rxbuf
) ? EPROTO
287 dummy_packet_stream_close(struct dummy_packet_stream
*s
)
289 stream_close(s
->stream
);
290 dp_packet_uninit(&s
->rxbuf
);
291 pkt_list_delete(&s
->txq
);
295 dummy_packet_conn_init(struct dummy_packet_conn
*conn
)
297 memset(conn
, 0, sizeof *conn
);
302 dummy_packet_conn_get_config(struct dummy_packet_conn
*conn
, struct smap
*args
)
305 switch (conn
->type
) {
307 smap_add(args
, "pstream", pstream_get_name(conn
->u
.pconn
.pstream
));
311 smap_add(args
, "stream", stream_get_name(conn
->u
.rconn
.rstream
->stream
));
321 dummy_packet_conn_close(struct dummy_packet_conn
*conn
)
324 struct dummy_packet_pconn
*pconn
= &conn
->u
.pconn
;
325 struct dummy_packet_rconn
*rconn
= &conn
->u
.rconn
;
327 switch (conn
->type
) {
329 pstream_close(pconn
->pstream
);
330 for (i
= 0; i
< pconn
->n_streams
; i
++) {
331 dummy_packet_stream_close(pconn
->streams
[i
]);
332 free(pconn
->streams
[i
]);
334 free(pconn
->streams
);
335 pconn
->pstream
= NULL
;
336 pconn
->streams
= NULL
;
340 dummy_packet_stream_close(rconn
->rstream
);
341 free(rconn
->rstream
);
342 rconn
->rstream
= NULL
;
343 reconnect_destroy(rconn
->reconnect
);
344 rconn
->reconnect
= NULL
;
353 memset(conn
, 0, sizeof *conn
);
357 dummy_packet_conn_set_config(struct dummy_packet_conn
*conn
,
358 const struct smap
*args
)
360 const char *pstream
= smap_get(args
, "pstream");
361 const char *stream
= smap_get(args
, "stream");
363 if (pstream
&& stream
) {
364 VLOG_WARN("Open failed: both %s and %s are configured",
369 switch (conn
->type
) {
372 !strcmp(pstream_get_name(conn
->u
.pconn
.pstream
), pstream
)) {
375 dummy_packet_conn_close(conn
);
379 !strcmp(stream_get_name(conn
->u
.rconn
.rstream
->stream
), stream
)) {
382 dummy_packet_conn_close(conn
);
392 error
= pstream_open(pstream
, &conn
->u
.pconn
.pstream
, DSCP_DEFAULT
);
394 VLOG_WARN("%s: open failed (%s)", pstream
, ovs_strerror(error
));
396 conn
->type
= PASSIVE
;
402 struct stream
*active_stream
;
403 struct reconnect
*reconnect
;
405 reconnect
= reconnect_create(time_msec());
406 reconnect_set_name(reconnect
, stream
);
407 reconnect_set_passive(reconnect
, false, time_msec());
408 reconnect_enable(reconnect
, time_msec());
409 reconnect_set_backoff(reconnect
, 100, INT_MAX
);
410 reconnect_set_probe_interval(reconnect
, 0);
411 conn
->u
.rconn
.reconnect
= reconnect
;
414 error
= stream_open(stream
, &active_stream
, DSCP_DEFAULT
);
415 conn
->u
.rconn
.rstream
= dummy_packet_stream_create(active_stream
);
419 reconnect_connected(reconnect
, time_msec());
423 reconnect_connecting(reconnect
, time_msec());
427 reconnect_connect_failed(reconnect
, time_msec(), error
);
428 stream_close(active_stream
);
429 conn
->u
.rconn
.rstream
->stream
= NULL
;
436 dummy_pconn_run(struct netdev_dummy
*dev
)
437 OVS_REQUIRES(dev
->mutex
)
439 struct stream
*new_stream
;
440 struct dummy_packet_pconn
*pconn
= &dev
->conn
.u
.pconn
;
444 error
= pstream_accept(pconn
->pstream
, &new_stream
);
446 struct dummy_packet_stream
*s
;
448 pconn
->streams
= xrealloc(pconn
->streams
,
449 ((pconn
->n_streams
+ 1)
451 s
= xmalloc(sizeof *s
);
452 pconn
->streams
[pconn
->n_streams
++] = s
;
453 dummy_packet_stream_init(s
, new_stream
);
454 } else if (error
!= EAGAIN
) {
455 VLOG_WARN("%s: accept failed (%s)",
456 pstream_get_name(pconn
->pstream
), ovs_strerror(error
));
457 pstream_close(pconn
->pstream
);
458 pconn
->pstream
= NULL
;
459 dev
->conn
.type
= NONE
;
462 for (i
= 0; i
< pconn
->n_streams
; ) {
463 struct dummy_packet_stream
*s
= pconn
->streams
[i
];
465 error
= dummy_packet_stream_run(dev
, s
);
467 VLOG_DBG("%s: closing connection (%s)",
468 stream_get_name(s
->stream
),
469 ovs_retval_to_string(error
));
470 dummy_packet_stream_close(s
);
472 pconn
->streams
[i
] = pconn
->streams
[--pconn
->n_streams
];
480 dummy_rconn_run(struct netdev_dummy
*dev
)
481 OVS_REQUIRES(dev
->mutex
)
483 struct dummy_packet_rconn
*rconn
= &dev
->conn
.u
.rconn
;
485 switch (reconnect_run(rconn
->reconnect
, time_msec())) {
486 case RECONNECT_CONNECT
:
490 if (rconn
->rstream
->stream
) {
491 error
= stream_connect(rconn
->rstream
->stream
);
493 error
= stream_open(reconnect_get_name(rconn
->reconnect
),
494 &rconn
->rstream
->stream
, DSCP_DEFAULT
);
499 reconnect_connected(rconn
->reconnect
, time_msec());
503 reconnect_connecting(rconn
->reconnect
, time_msec());
507 reconnect_connect_failed(rconn
->reconnect
, time_msec(), error
);
508 stream_close(rconn
->rstream
->stream
);
509 rconn
->rstream
->stream
= NULL
;
515 case RECONNECT_DISCONNECT
:
516 case RECONNECT_PROBE
:
521 if (reconnect_is_connected(rconn
->reconnect
)) {
524 err
= dummy_packet_stream_run(dev
, rconn
->rstream
);
527 reconnect_disconnected(rconn
->reconnect
, time_msec(), err
);
528 stream_close(rconn
->rstream
->stream
);
529 rconn
->rstream
->stream
= NULL
;
535 dummy_packet_conn_run(struct netdev_dummy
*dev
)
536 OVS_REQUIRES(dev
->mutex
)
538 switch (dev
->conn
.type
) {
540 dummy_pconn_run(dev
);
544 dummy_rconn_run(dev
);
554 dummy_packet_conn_wait(struct dummy_packet_conn
*conn
)
557 switch (conn
->type
) {
559 pstream_wait(conn
->u
.pconn
.pstream
);
560 for (i
= 0; i
< conn
->u
.pconn
.n_streams
; i
++) {
561 struct dummy_packet_stream
*s
= conn
->u
.pconn
.streams
[i
];
562 dummy_packet_stream_wait(s
);
566 if (reconnect_is_connected(conn
->u
.rconn
.reconnect
)) {
567 dummy_packet_stream_wait(conn
->u
.rconn
.rstream
);
578 dummy_packet_conn_send(struct dummy_packet_conn
*conn
,
579 const void *buffer
, size_t size
)
583 switch (conn
->type
) {
585 for (i
= 0; i
< conn
->u
.pconn
.n_streams
; i
++) {
586 struct dummy_packet_stream
*s
= conn
->u
.pconn
.streams
[i
];
588 dummy_packet_stream_send(s
, buffer
, size
);
589 pstream_wait(conn
->u
.pconn
.pstream
);
594 if (reconnect_is_connected(conn
->u
.rconn
.reconnect
)) {
595 dummy_packet_stream_send(conn
->u
.rconn
.rstream
, buffer
, size
);
596 dummy_packet_stream_wait(conn
->u
.rconn
.rstream
);
606 static enum dummy_netdev_conn_state
607 dummy_netdev_get_conn_state(struct dummy_packet_conn
*conn
)
609 enum dummy_netdev_conn_state state
;
611 if (conn
->type
== ACTIVE
) {
612 if (reconnect_is_connected(conn
->u
.rconn
.reconnect
)) {
613 state
= CONN_STATE_CONNECTED
;
615 state
= CONN_STATE_NOT_CONNECTED
;
618 state
= CONN_STATE_UNKNOWN
;
625 netdev_dummy_run(const struct netdev_class
*netdev_class
)
627 struct netdev_dummy
*dev
;
629 ovs_mutex_lock(&dummy_list_mutex
);
630 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
631 if (netdev_get_class(&dev
->up
) != netdev_class
) {
634 ovs_mutex_lock(&dev
->mutex
);
635 dummy_packet_conn_run(dev
);
636 ovs_mutex_unlock(&dev
->mutex
);
638 ovs_mutex_unlock(&dummy_list_mutex
);
642 netdev_dummy_wait(const struct netdev_class
*netdev_class
)
644 struct netdev_dummy
*dev
;
646 ovs_mutex_lock(&dummy_list_mutex
);
647 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
648 if (netdev_get_class(&dev
->up
) != netdev_class
) {
651 ovs_mutex_lock(&dev
->mutex
);
652 dummy_packet_conn_wait(&dev
->conn
);
653 ovs_mutex_unlock(&dev
->mutex
);
655 ovs_mutex_unlock(&dummy_list_mutex
);
658 static struct netdev
*
659 netdev_dummy_alloc(void)
661 struct netdev_dummy
*netdev
= xzalloc(sizeof *netdev
);
666 netdev_dummy_construct(struct netdev
*netdev_
)
668 static atomic_count next_n
= ATOMIC_COUNT_INIT(0xaa550000);
669 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
672 n
= atomic_count_inc(&next_n
);
674 ovs_mutex_init(&netdev
->mutex
);
675 ovs_mutex_lock(&netdev
->mutex
);
676 netdev
->hwaddr
.ea
[0] = 0xaa;
677 netdev
->hwaddr
.ea
[1] = 0x55;
678 netdev
->hwaddr
.ea
[2] = n
>> 24;
679 netdev
->hwaddr
.ea
[3] = n
>> 16;
680 netdev
->hwaddr
.ea
[4] = n
>> 8;
681 netdev
->hwaddr
.ea
[5] = n
;
684 netdev
->ifindex
= -EOPNOTSUPP
;
685 netdev
->requested_n_rxq
= netdev_
->n_rxq
;
686 netdev
->requested_n_txq
= netdev_
->n_txq
;
689 dummy_packet_conn_init(&netdev
->conn
);
691 ovs_list_init(&netdev
->rxes
);
692 ovs_mutex_unlock(&netdev
->mutex
);
694 ovs_mutex_lock(&dummy_list_mutex
);
695 ovs_list_push_back(&dummy_list
, &netdev
->list_node
);
696 ovs_mutex_unlock(&dummy_list_mutex
);
702 netdev_dummy_destruct(struct netdev
*netdev_
)
704 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
706 ovs_mutex_lock(&dummy_list_mutex
);
707 ovs_list_remove(&netdev
->list_node
);
708 ovs_mutex_unlock(&dummy_list_mutex
);
710 ovs_mutex_lock(&netdev
->mutex
);
711 dummy_packet_conn_close(&netdev
->conn
);
712 netdev
->conn
.type
= NONE
;
714 ovs_mutex_unlock(&netdev
->mutex
);
715 ovs_mutex_destroy(&netdev
->mutex
);
719 netdev_dummy_dealloc(struct netdev
*netdev_
)
721 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
727 netdev_dummy_get_config(const struct netdev
*dev
, struct smap
*args
)
729 struct netdev_dummy
*netdev
= netdev_dummy_cast(dev
);
731 ovs_mutex_lock(&netdev
->mutex
);
733 if (netdev
->ifindex
>= 0) {
734 smap_add_format(args
, "ifindex", "%d", netdev
->ifindex
);
737 dummy_packet_conn_get_config(&netdev
->conn
, args
);
739 /* 'dummy-pmd' specific config. */
740 if (!netdev_is_pmd(dev
)) {
743 smap_add_format(args
, "requested_rx_queues", "%d", netdev
->requested_n_rxq
);
744 smap_add_format(args
, "configured_rx_queues", "%d", dev
->n_rxq
);
745 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->requested_n_txq
);
746 smap_add_format(args
, "configured_tx_queues", "%d", dev
->n_txq
);
749 ovs_mutex_unlock(&netdev
->mutex
);
754 netdev_dummy_get_addr_list(const struct netdev
*netdev_
, struct in6_addr
**paddr
,
755 struct in6_addr
**pmask
, int *n_addr
)
757 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
758 int cnt
= 0, i
= 0, err
= 0;
759 struct in6_addr
*addr
, *mask
;
761 ovs_mutex_lock(&netdev
->mutex
);
762 if (netdev
->address
.s_addr
!= INADDR_ANY
) {
766 if (ipv6_addr_is_set(&netdev
->ipv6
)) {
773 addr
= xmalloc(sizeof *addr
* cnt
);
774 mask
= xmalloc(sizeof *mask
* cnt
);
775 if (netdev
->address
.s_addr
!= INADDR_ANY
) {
776 in6_addr_set_mapped_ipv4(&addr
[i
], netdev
->address
.s_addr
);
777 in6_addr_set_mapped_ipv4(&mask
[i
], netdev
->netmask
.s_addr
);
781 if (ipv6_addr_is_set(&netdev
->ipv6
)) {
782 memcpy(&addr
[i
], &netdev
->ipv6
, sizeof *addr
);
783 memcpy(&mask
[i
], &netdev
->ipv6_mask
, sizeof *mask
);
795 ovs_mutex_unlock(&netdev
->mutex
);
801 netdev_dummy_set_in4(struct netdev
*netdev_
, struct in_addr address
,
802 struct in_addr netmask
)
804 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
806 ovs_mutex_lock(&netdev
->mutex
);
807 netdev
->address
= address
;
808 netdev
->netmask
= netmask
;
809 netdev_change_seq_changed(netdev_
);
810 ovs_mutex_unlock(&netdev
->mutex
);
816 netdev_dummy_set_in6(struct netdev
*netdev_
, struct in6_addr
*in6
,
817 struct in6_addr
*mask
)
819 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
821 ovs_mutex_lock(&netdev
->mutex
);
823 netdev
->ipv6_mask
= *mask
;
824 netdev_change_seq_changed(netdev_
);
825 ovs_mutex_unlock(&netdev
->mutex
);
830 #define DUMMY_MAX_QUEUES_PER_PORT 1024
833 netdev_dummy_set_config(struct netdev
*netdev_
, const struct smap
*args
,
834 char **errp OVS_UNUSED
)
836 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
838 int new_n_rxq
, new_n_txq
, new_numa_id
;
840 ovs_mutex_lock(&netdev
->mutex
);
841 netdev
->ifindex
= smap_get_int(args
, "ifindex", -EOPNOTSUPP
);
843 dummy_packet_conn_set_config(&netdev
->conn
, args
);
845 if (netdev
->rxq_pcap
) {
846 fclose(netdev
->rxq_pcap
);
848 if (netdev
->tx_pcap
&& netdev
->tx_pcap
!= netdev
->rxq_pcap
) {
849 fclose(netdev
->tx_pcap
);
851 netdev
->rxq_pcap
= netdev
->tx_pcap
= NULL
;
852 pcap
= smap_get(args
, "pcap");
854 netdev
->rxq_pcap
= netdev
->tx_pcap
= ovs_pcap_open(pcap
, "ab");
856 const char *rxq_pcap
= smap_get(args
, "rxq_pcap");
857 const char *tx_pcap
= smap_get(args
, "tx_pcap");
860 netdev
->rxq_pcap
= ovs_pcap_open(rxq_pcap
, "ab");
863 netdev
->tx_pcap
= ovs_pcap_open(tx_pcap
, "ab");
867 netdev_change_seq_changed(netdev_
);
869 /* 'dummy-pmd' specific config. */
870 if (!netdev_
->netdev_class
->is_pmd
) {
874 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", NR_QUEUE
), 1);
875 new_n_txq
= MAX(smap_get_int(args
, "n_txq", NR_QUEUE
), 1);
877 if (new_n_rxq
> DUMMY_MAX_QUEUES_PER_PORT
||
878 new_n_txq
> DUMMY_MAX_QUEUES_PER_PORT
) {
879 VLOG_WARN("The one or both of interface %s queues"
880 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
881 netdev_get_name(netdev_
),
884 DUMMY_MAX_QUEUES_PER_PORT
,
885 DUMMY_MAX_QUEUES_PER_PORT
);
887 new_n_rxq
= MIN(DUMMY_MAX_QUEUES_PER_PORT
, new_n_rxq
);
888 new_n_txq
= MIN(DUMMY_MAX_QUEUES_PER_PORT
, new_n_txq
);
891 new_numa_id
= smap_get_int(args
, "numa_id", 0);
892 if (new_n_rxq
!= netdev
->requested_n_rxq
893 || new_n_txq
!= netdev
->requested_n_txq
894 || new_numa_id
!= netdev
->requested_numa_id
) {
895 netdev
->requested_n_rxq
= new_n_rxq
;
896 netdev
->requested_n_txq
= new_n_txq
;
897 netdev
->requested_numa_id
= new_numa_id
;
898 netdev_request_reconfigure(netdev_
);
902 ovs_mutex_unlock(&netdev
->mutex
);
907 netdev_dummy_get_numa_id(const struct netdev
*netdev_
)
909 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
911 ovs_mutex_lock(&netdev
->mutex
);
912 int numa_id
= netdev
->numa_id
;
913 ovs_mutex_unlock(&netdev
->mutex
);
918 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
920 netdev_dummy_reconfigure(struct netdev
*netdev_
)
922 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
924 ovs_mutex_lock(&netdev
->mutex
);
926 netdev_
->n_txq
= netdev
->requested_n_txq
;
927 netdev_
->n_rxq
= netdev
->requested_n_rxq
;
928 netdev
->numa_id
= netdev
->requested_numa_id
;
930 ovs_mutex_unlock(&netdev
->mutex
);
934 static struct netdev_rxq
*
935 netdev_dummy_rxq_alloc(void)
937 struct netdev_rxq_dummy
*rx
= xzalloc(sizeof *rx
);
942 netdev_dummy_rxq_construct(struct netdev_rxq
*rxq_
)
944 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
945 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
947 ovs_mutex_lock(&netdev
->mutex
);
948 ovs_list_push_back(&netdev
->rxes
, &rx
->node
);
949 ovs_list_init(&rx
->recv_queue
);
950 rx
->recv_queue_len
= 0;
951 rx
->seq
= seq_create();
952 ovs_mutex_unlock(&netdev
->mutex
);
958 netdev_dummy_rxq_destruct(struct netdev_rxq
*rxq_
)
960 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
961 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
963 ovs_mutex_lock(&netdev
->mutex
);
964 ovs_list_remove(&rx
->node
);
965 pkt_list_delete(&rx
->recv_queue
);
966 ovs_mutex_unlock(&netdev
->mutex
);
967 seq_destroy(rx
->seq
);
971 netdev_dummy_rxq_dealloc(struct netdev_rxq
*rxq_
)
973 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
979 netdev_dummy_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet_batch
*batch
)
981 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
982 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
983 struct dp_packet
*packet
;
985 ovs_mutex_lock(&netdev
->mutex
);
986 if (!ovs_list_is_empty(&rx
->recv_queue
)) {
987 struct pkt_list_node
*pkt_node
;
989 ASSIGN_CONTAINER(pkt_node
, ovs_list_pop_front(&rx
->recv_queue
), list_node
);
990 packet
= pkt_node
->pkt
;
992 rx
->recv_queue_len
--;
996 ovs_mutex_unlock(&netdev
->mutex
);
999 if (netdev_is_pmd(&netdev
->up
)) {
1000 /* If 'netdev' is a PMD device, this is called as part of the PMD
1001 * thread busy loop. We yield here (without quiescing) for two
1004 * - To reduce the CPU utilization during the testsuite
1005 * - To give valgrind a chance to switch thread. According
1006 * to the valgrind documentation, there's a big lock that
1007 * prevents multiple thread from being executed at the same
1008 * time. On my system, without this sleep, the pmd threads
1009 * testcases fail under valgrind, because ovs-vswitchd becomes
1015 ovs_mutex_lock(&netdev
->mutex
);
1016 netdev
->stats
.rx_packets
++;
1017 netdev
->stats
.rx_bytes
+= dp_packet_size(packet
);
1018 ovs_mutex_unlock(&netdev
->mutex
);
1020 batch
->packets
[0] = packet
;
1026 netdev_dummy_rxq_wait(struct netdev_rxq
*rxq_
)
1028 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
1029 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
1030 uint64_t seq
= seq_read(rx
->seq
);
1032 ovs_mutex_lock(&netdev
->mutex
);
1033 if (!ovs_list_is_empty(&rx
->recv_queue
)) {
1034 poll_immediate_wake();
1036 seq_wait(rx
->seq
, seq
);
1038 ovs_mutex_unlock(&netdev
->mutex
);
1042 netdev_dummy_rxq_drain(struct netdev_rxq
*rxq_
)
1044 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
1045 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
1047 ovs_mutex_lock(&netdev
->mutex
);
1048 pkt_list_delete(&rx
->recv_queue
);
1049 rx
->recv_queue_len
= 0;
1050 ovs_mutex_unlock(&netdev
->mutex
);
1052 seq_change(rx
->seq
);
1058 netdev_dummy_send(struct netdev
*netdev
, int qid OVS_UNUSED
,
1059 struct dp_packet_batch
*batch
, bool may_steal
,
1060 bool concurrent_txq OVS_UNUSED
)
1062 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1065 struct dp_packet
*packet
;
1066 DP_PACKET_BATCH_FOR_EACH(packet
, batch
) {
1067 const void *buffer
= dp_packet_data(packet
);
1068 size_t size
= dp_packet_size(packet
);
1070 if (batch
->packets
[i
]->packet_type
!= htonl(PT_ETH
)) {
1071 error
= EPFNOSUPPORT
;
1075 size
-= dp_packet_get_cutlen(packet
);
1077 if (size
< ETH_HEADER_LEN
) {
1081 const struct eth_header
*eth
= buffer
;
1084 ovs_mutex_lock(&dev
->mutex
);
1085 max_size
= dev
->mtu
+ ETH_HEADER_LEN
;
1086 ovs_mutex_unlock(&dev
->mutex
);
1088 if (eth
->eth_type
== htons(ETH_TYPE_VLAN
)) {
1089 max_size
+= VLAN_HEADER_LEN
;
1091 if (size
> max_size
) {
1097 ovs_mutex_lock(&dev
->mutex
);
1098 dev
->stats
.tx_packets
++;
1099 dev
->stats
.tx_bytes
+= size
;
1101 dummy_packet_conn_send(&dev
->conn
, buffer
, size
);
1103 /* Reply to ARP requests for 'dev''s assigned IP address. */
1104 if (dev
->address
.s_addr
) {
1105 struct dp_packet packet
;
1108 dp_packet_use_const(&packet
, buffer
, size
);
1109 flow_extract(&packet
, &flow
);
1110 if (flow
.dl_type
== htons(ETH_TYPE_ARP
)
1111 && flow
.nw_proto
== ARP_OP_REQUEST
1112 && flow
.nw_dst
== dev
->address
.s_addr
) {
1113 struct dp_packet
*reply
= dp_packet_new(0);
1114 compose_arp(reply
, ARP_OP_REPLY
, dev
->hwaddr
, flow
.dl_src
,
1115 false, flow
.nw_dst
, flow
.nw_src
);
1116 netdev_dummy_queue_packet(dev
, reply
, 0);
1121 struct dp_packet packet
;
1123 dp_packet_use_const(&packet
, buffer
, size
);
1124 ovs_pcap_write(dev
->tx_pcap
, &packet
);
1125 fflush(dev
->tx_pcap
);
1128 ovs_mutex_unlock(&dev
->mutex
);
1131 dp_packet_delete_batch(batch
, may_steal
);
1137 netdev_dummy_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1139 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1141 ovs_mutex_lock(&dev
->mutex
);
1142 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1144 netdev_change_seq_changed(netdev
);
1146 ovs_mutex_unlock(&dev
->mutex
);
1152 netdev_dummy_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1154 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1156 ovs_mutex_lock(&dev
->mutex
);
1158 ovs_mutex_unlock(&dev
->mutex
);
1164 netdev_dummy_get_mtu(const struct netdev
*netdev
, int *mtup
)
1166 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1168 ovs_mutex_lock(&dev
->mutex
);
1170 ovs_mutex_unlock(&dev
->mutex
);
1175 #define DUMMY_MIN_MTU 68
1176 #define DUMMY_MAX_MTU 65535
1179 netdev_dummy_set_mtu(struct netdev
*netdev
, int mtu
)
1181 if (mtu
< DUMMY_MIN_MTU
|| mtu
> DUMMY_MAX_MTU
) {
1185 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1187 ovs_mutex_lock(&dev
->mutex
);
1188 if (dev
->mtu
!= mtu
) {
1190 netdev_change_seq_changed(netdev
);
1192 ovs_mutex_unlock(&dev
->mutex
);
1198 netdev_dummy_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1200 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1202 ovs_mutex_lock(&dev
->mutex
);
1203 /* Passing only collected counters */
1204 stats
->tx_packets
= dev
->stats
.tx_packets
;
1205 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1206 stats
->rx_packets
= dev
->stats
.rx_packets
;
1207 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1208 ovs_mutex_unlock(&dev
->mutex
);
1214 netdev_dummy_get_queue(const struct netdev
*netdev OVS_UNUSED
,
1215 unsigned int queue_id
, struct smap
*details OVS_UNUSED
)
1217 if (queue_id
== 0) {
1225 netdev_dummy_init_queue_stats(struct netdev_queue_stats
*stats
)
1227 *stats
= (struct netdev_queue_stats
) {
1228 .tx_bytes
= UINT64_MAX
,
1229 .tx_packets
= UINT64_MAX
,
1230 .tx_errors
= UINT64_MAX
,
1231 .created
= LLONG_MIN
,
1236 netdev_dummy_get_queue_stats(const struct netdev
*netdev OVS_UNUSED
,
1237 unsigned int queue_id
,
1238 struct netdev_queue_stats
*stats
)
1240 if (queue_id
== 0) {
1241 netdev_dummy_init_queue_stats(stats
);
1248 struct netdev_dummy_queue_state
{
1249 unsigned int next_queue
;
1253 netdev_dummy_queue_dump_start(const struct netdev
*netdev OVS_UNUSED
,
1256 struct netdev_dummy_queue_state
*state
= xmalloc(sizeof *state
);
1257 state
->next_queue
= 0;
1263 netdev_dummy_queue_dump_next(const struct netdev
*netdev OVS_UNUSED
,
1265 unsigned int *queue_id
,
1266 struct smap
*details OVS_UNUSED
)
1268 struct netdev_dummy_queue_state
*state
= state_
;
1269 if (state
->next_queue
== 0) {
1271 state
->next_queue
++;
1279 netdev_dummy_queue_dump_done(const struct netdev
*netdev OVS_UNUSED
,
1287 netdev_dummy_dump_queue_stats(const struct netdev
*netdev OVS_UNUSED
,
1288 void (*cb
)(unsigned int queue_id
,
1289 struct netdev_queue_stats
*,
1293 struct netdev_queue_stats stats
;
1294 netdev_dummy_init_queue_stats(&stats
);
1300 netdev_dummy_get_ifindex(const struct netdev
*netdev
)
1302 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1305 ovs_mutex_lock(&dev
->mutex
);
1306 ifindex
= dev
->ifindex
;
1307 ovs_mutex_unlock(&dev
->mutex
);
1313 netdev_dummy_update_flags__(struct netdev_dummy
*netdev
,
1314 enum netdev_flags off
, enum netdev_flags on
,
1315 enum netdev_flags
*old_flagsp
)
1316 OVS_REQUIRES(netdev
->mutex
)
1318 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1322 *old_flagsp
= netdev
->flags
;
1323 netdev
->flags
|= on
;
1324 netdev
->flags
&= ~off
;
1325 if (*old_flagsp
!= netdev
->flags
) {
1326 netdev_change_seq_changed(&netdev
->up
);
1333 netdev_dummy_update_flags(struct netdev
*netdev_
,
1334 enum netdev_flags off
, enum netdev_flags on
,
1335 enum netdev_flags
*old_flagsp
)
1337 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
1340 ovs_mutex_lock(&netdev
->mutex
);
1341 error
= netdev_dummy_update_flags__(netdev
, off
, on
, old_flagsp
);
1342 ovs_mutex_unlock(&netdev
->mutex
);
1347 /* Helper functions. */
1349 #define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
1355 netdev_dummy_wait, \
1357 netdev_dummy_alloc, \
1358 netdev_dummy_construct, \
1359 netdev_dummy_destruct, \
1360 netdev_dummy_dealloc, \
1361 netdev_dummy_get_config, \
1362 netdev_dummy_set_config, \
1363 NULL, /* get_tunnel_config */ \
1364 NULL, /* build header */ \
1365 NULL, /* push header */ \
1366 NULL, /* pop header */ \
1367 netdev_dummy_get_numa_id, \
1368 NULL, /* set_tx_multiq */ \
1370 netdev_dummy_send, /* send */ \
1371 NULL, /* send_wait */ \
1373 netdev_dummy_set_etheraddr, \
1374 netdev_dummy_get_etheraddr, \
1375 netdev_dummy_get_mtu, \
1376 netdev_dummy_set_mtu, \
1377 netdev_dummy_get_ifindex, \
1378 NULL, /* get_carrier */ \
1379 NULL, /* get_carrier_resets */ \
1380 NULL, /* get_miimon */ \
1381 netdev_dummy_get_stats, \
1383 NULL, /* get_features */ \
1384 NULL, /* set_advertisements */ \
1386 NULL, /* set_policing */ \
1387 NULL, /* get_qos_types */ \
1388 NULL, /* get_qos_capabilities */ \
1389 NULL, /* get_qos */ \
1390 NULL, /* set_qos */ \
1391 netdev_dummy_get_queue, \
1392 NULL, /* set_queue */ \
1393 NULL, /* delete_queue */ \
1394 netdev_dummy_get_queue_stats, \
1395 netdev_dummy_queue_dump_start, \
1396 netdev_dummy_queue_dump_next, \
1397 netdev_dummy_queue_dump_done, \
1398 netdev_dummy_dump_queue_stats, \
1400 NULL, /* set_in4 */ \
1401 netdev_dummy_get_addr_list, \
1402 NULL, /* add_router */ \
1403 NULL, /* get_next_hop */ \
1404 NULL, /* get_status */ \
1405 NULL, /* arp_lookup */ \
1407 netdev_dummy_update_flags, \
1410 netdev_dummy_rxq_alloc, \
1411 netdev_dummy_rxq_construct, \
1412 netdev_dummy_rxq_destruct, \
1413 netdev_dummy_rxq_dealloc, \
1414 netdev_dummy_rxq_recv, \
1415 netdev_dummy_rxq_wait, \
1416 netdev_dummy_rxq_drain, \
1421 static const struct netdev_class dummy_class
=
1422 NETDEV_DUMMY_CLASS("dummy", false, NULL
);
1424 static const struct netdev_class dummy_internal_class
=
1425 NETDEV_DUMMY_CLASS("dummy-internal", false, NULL
);
1427 static const struct netdev_class dummy_pmd_class
=
1428 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1429 netdev_dummy_reconfigure
);
1432 pkt_list_delete(struct ovs_list
*l
)
1434 struct pkt_list_node
*pkt
;
1436 LIST_FOR_EACH_POP(pkt
, list_node
, l
) {
1437 dp_packet_delete(pkt
->pkt
);
1442 static struct dp_packet
*
1443 eth_from_packet(const char *s
)
1445 struct dp_packet
*packet
;
1446 eth_from_hex(s
, &packet
);
1450 static struct dp_packet
*
1451 eth_from_flow(const char *s
)
1453 enum odp_key_fitness fitness
;
1454 struct dp_packet
*packet
;
1455 struct ofpbuf odp_key
;
1459 /* Convert string to datapath key.
1461 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1462 * the code for that currently calls exit() on parse error. We have to
1463 * settle for parsing a datapath key for now.
1465 ofpbuf_init(&odp_key
, 0);
1466 error
= odp_flow_from_string(s
, NULL
, &odp_key
, NULL
);
1468 ofpbuf_uninit(&odp_key
);
1472 /* Convert odp_key to flow. */
1473 fitness
= odp_flow_key_to_flow(odp_key
.data
, odp_key
.size
, &flow
);
1474 if (fitness
== ODP_FIT_ERROR
) {
1475 ofpbuf_uninit(&odp_key
);
1479 packet
= dp_packet_new(0);
1480 flow_compose(packet
, &flow
);
1482 ofpbuf_uninit(&odp_key
);
1487 netdev_dummy_queue_packet__(struct netdev_rxq_dummy
*rx
, struct dp_packet
*packet
)
1489 struct pkt_list_node
*pkt_node
= xmalloc(sizeof *pkt_node
);
1491 pkt_node
->pkt
= packet
;
1492 ovs_list_push_back(&rx
->recv_queue
, &pkt_node
->list_node
);
1493 rx
->recv_queue_len
++;
1494 seq_change(rx
->seq
);
1498 netdev_dummy_queue_packet(struct netdev_dummy
*dummy
, struct dp_packet
*packet
,
1500 OVS_REQUIRES(dummy
->mutex
)
1502 struct netdev_rxq_dummy
*rx
, *prev
;
1504 if (dummy
->rxq_pcap
) {
1505 ovs_pcap_write(dummy
->rxq_pcap
, packet
);
1506 fflush(dummy
->rxq_pcap
);
1509 LIST_FOR_EACH (rx
, node
, &dummy
->rxes
) {
1510 if (rx
->up
.queue_id
== queue_id
&&
1511 rx
->recv_queue_len
< NETDEV_DUMMY_MAX_QUEUE
) {
1513 netdev_dummy_queue_packet__(prev
, dp_packet_clone(packet
));
1519 netdev_dummy_queue_packet__(prev
, packet
);
1521 dp_packet_delete(packet
);
1526 netdev_dummy_receive(struct unixctl_conn
*conn
,
1527 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
1529 struct netdev_dummy
*dummy_dev
;
1530 struct netdev
*netdev
;
1531 int i
, k
= 1, rx_qid
= 0;
1533 netdev
= netdev_from_name(argv
[k
++]);
1534 if (!netdev
|| !is_dummy_class(netdev
->netdev_class
)) {
1535 unixctl_command_reply_error(conn
, "no such dummy netdev");
1538 dummy_dev
= netdev_dummy_cast(netdev
);
1540 ovs_mutex_lock(&dummy_dev
->mutex
);
1542 if (argc
> k
+ 1 && !strcmp(argv
[k
], "--qid")) {
1543 rx_qid
= strtol(argv
[k
+ 1], NULL
, 10);
1544 if (rx_qid
< 0 || rx_qid
>= netdev
->n_rxq
) {
1545 unixctl_command_reply_error(conn
, "bad rx queue id.");
1551 for (i
= k
; i
< argc
; i
++) {
1552 struct dp_packet
*packet
;
1554 /* Try to parse 'argv[i]' as packet in hex. */
1555 packet
= eth_from_packet(argv
[i
]);
1558 /* Try parse 'argv[i]' as odp flow. */
1559 packet
= eth_from_flow(argv
[i
]);
1562 unixctl_command_reply_error(conn
, "bad packet or flow syntax");
1566 /* Parse optional --len argument immediately follows a 'flow'. */
1567 if (argc
>= i
+ 2 && !strcmp(argv
[i
+ 1], "--len")) {
1568 int packet_size
= strtol(argv
[i
+ 2], NULL
, 10);
1569 dp_packet_set_size(packet
, packet_size
);
1574 netdev_dummy_queue_packet(dummy_dev
, packet
, rx_qid
);
1577 unixctl_command_reply(conn
, NULL
);
1580 ovs_mutex_unlock(&dummy_dev
->mutex
);
1582 netdev_close(netdev
);
1586 netdev_dummy_set_admin_state__(struct netdev_dummy
*dev
, bool admin_state
)
1587 OVS_REQUIRES(dev
->mutex
)
1589 enum netdev_flags old_flags
;
1592 netdev_dummy_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1594 netdev_dummy_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1599 netdev_dummy_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1600 const char *argv
[], void *aux OVS_UNUSED
)
1604 if (!strcasecmp(argv
[argc
- 1], "up")) {
1606 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1609 unixctl_command_reply_error(conn
, "Invalid Admin State");
1614 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1615 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1616 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
1618 ovs_mutex_lock(&dummy_dev
->mutex
);
1619 netdev_dummy_set_admin_state__(dummy_dev
, up
);
1620 ovs_mutex_unlock(&dummy_dev
->mutex
);
1622 netdev_close(netdev
);
1624 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1625 netdev_close(netdev
);
1629 struct netdev_dummy
*netdev
;
1631 ovs_mutex_lock(&dummy_list_mutex
);
1632 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
1633 ovs_mutex_lock(&netdev
->mutex
);
1634 netdev_dummy_set_admin_state__(netdev
, up
);
1635 ovs_mutex_unlock(&netdev
->mutex
);
1637 ovs_mutex_unlock(&dummy_list_mutex
);
1639 unixctl_command_reply(conn
, "OK");
1643 display_conn_state__(struct ds
*s
, const char *name
,
1644 enum dummy_netdev_conn_state state
)
1646 ds_put_format(s
, "%s: ", name
);
1649 case CONN_STATE_CONNECTED
:
1650 ds_put_cstr(s
, "connected\n");
1653 case CONN_STATE_NOT_CONNECTED
:
1654 ds_put_cstr(s
, "disconnected\n");
1657 case CONN_STATE_UNKNOWN
:
1659 ds_put_cstr(s
, "unknown\n");
1665 netdev_dummy_conn_state(struct unixctl_conn
*conn
, int argc
,
1666 const char *argv
[], void *aux OVS_UNUSED
)
1668 enum dummy_netdev_conn_state state
= CONN_STATE_UNKNOWN
;
1674 const char *dev_name
= argv
[1];
1675 struct netdev
*netdev
= netdev_from_name(dev_name
);
1677 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1678 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
1680 ovs_mutex_lock(&dummy_dev
->mutex
);
1681 state
= dummy_netdev_get_conn_state(&dummy_dev
->conn
);
1682 ovs_mutex_unlock(&dummy_dev
->mutex
);
1684 netdev_close(netdev
);
1686 display_conn_state__(&s
, dev_name
, state
);
1688 struct netdev_dummy
*netdev
;
1690 ovs_mutex_lock(&dummy_list_mutex
);
1691 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
1692 ovs_mutex_lock(&netdev
->mutex
);
1693 state
= dummy_netdev_get_conn_state(&netdev
->conn
);
1694 ovs_mutex_unlock(&netdev
->mutex
);
1695 if (state
!= CONN_STATE_UNKNOWN
) {
1696 display_conn_state__(&s
, netdev
->up
.name
, state
);
1699 ovs_mutex_unlock(&dummy_list_mutex
);
1702 unixctl_command_reply(conn
, ds_cstr(&s
));
1707 netdev_dummy_ip4addr(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
1708 const char *argv
[], void *aux OVS_UNUSED
)
1710 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1712 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1713 struct in_addr ip
, mask
;
1716 error
= ip_parse_masked(argv
[2], &ip
.s_addr
, &mask
.s_addr
);
1718 netdev_dummy_set_in4(netdev
, ip
, mask
);
1719 unixctl_command_reply(conn
, "OK");
1721 unixctl_command_reply_error(conn
, error
);
1725 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1728 netdev_close(netdev
);
1732 netdev_dummy_ip6addr(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
1733 const char *argv
[], void *aux OVS_UNUSED
)
1735 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1737 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1738 struct in6_addr ip6
;
1742 error
= ipv6_parse_cidr(argv
[2], &ip6
, &plen
);
1744 struct in6_addr mask
;
1746 mask
= ipv6_create_mask(plen
);
1747 netdev_dummy_set_in6(netdev
, &ip6
, &mask
);
1748 unixctl_command_reply(conn
, "OK");
1750 unixctl_command_reply_error(conn
, error
);
1753 netdev_close(netdev
);
1755 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1758 netdev_close(netdev
);
1763 netdev_dummy_override(const char *type
)
1765 if (!netdev_unregister_provider(type
)) {
1766 struct netdev_class
*class;
1769 class = xmemdup(&dummy_class
, sizeof dummy_class
);
1770 class->type
= xstrdup(type
);
1771 error
= netdev_register_provider(class);
1773 VLOG_ERR("%s: failed to register netdev provider (%s)",
1774 type
, ovs_strerror(error
));
1775 free(CONST_CAST(char *, class->type
));
1782 netdev_dummy_register(enum dummy_level level
)
1784 unixctl_command_register("netdev-dummy/receive",
1785 "name [--qid queue_id] packet|flow [--len packet_len]",
1786 2, INT_MAX
, netdev_dummy_receive
, NULL
);
1787 unixctl_command_register("netdev-dummy/set-admin-state",
1788 "[netdev] up|down", 1, 2,
1789 netdev_dummy_set_admin_state
, NULL
);
1790 unixctl_command_register("netdev-dummy/conn-state",
1792 netdev_dummy_conn_state
, NULL
);
1793 unixctl_command_register("netdev-dummy/ip4addr",
1794 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1795 netdev_dummy_ip4addr
, NULL
);
1796 unixctl_command_register("netdev-dummy/ip6addr",
1797 "[netdev] ip6addr", 2, 2,
1798 netdev_dummy_ip6addr
, NULL
);
1800 if (level
== DUMMY_OVERRIDE_ALL
) {
1805 netdev_enumerate_types(&types
);
1806 SSET_FOR_EACH (type
, &types
) {
1807 if (strcmp(type
, "patch")) {
1808 netdev_dummy_override(type
);
1811 sset_destroy(&types
);
1812 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
1813 netdev_dummy_override("system");
1815 netdev_register_provider(&dummy_class
);
1816 netdev_register_provider(&dummy_internal_class
);
1817 netdev_register_provider(&dummy_pmd_class
);
1819 netdev_vport_tunnel_register();