2 * Copyright (c) 2010, 2011, 2012, 2013, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "dp-packet.h"
24 #include "dpif-netdev.h"
25 #include "dynamic-string.h"
28 #include "netdev-provider.h"
29 #include "netdev-vport.h"
31 #include "ofp-print.h"
33 #include "ovs-atomic.h"
35 #include "pcap-file.h"
36 #include "poll-loop.h"
40 #include "unaligned.h"
43 #include "reconnect.h"
44 #include "openvswitch/vlog.h"
46 VLOG_DEFINE_THIS_MODULE(netdev_dummy
);
50 struct dummy_packet_stream
{
51 struct stream
*stream
;
52 struct dp_packet rxbuf
;
56 enum dummy_packet_conn_type
{
57 NONE
, /* No connection is configured. */
58 PASSIVE
, /* Listener. */
59 ACTIVE
/* Connect to listener. */
62 enum dummy_netdev_conn_state
{
63 CONN_STATE_CONNECTED
, /* Listener connected. */
64 CONN_STATE_NOT_CONNECTED
, /* Listener not connected. */
65 CONN_STATE_UNKNOWN
, /* No relavent information. */
68 struct dummy_packet_pconn
{
69 struct pstream
*pstream
;
70 struct dummy_packet_stream
*streams
;
74 struct dummy_packet_rconn
{
75 struct dummy_packet_stream
*rstream
;
76 struct reconnect
*reconnect
;
79 struct dummy_packet_conn
{
80 enum dummy_packet_conn_type type
;
82 struct dummy_packet_pconn pconn
;
83 struct dummy_packet_rconn rconn
;
87 struct pkt_list_node
{
88 struct dp_packet
*pkt
;
89 struct ovs_list list_node
;
92 /* Protects 'dummy_list'. */
93 static struct ovs_mutex dummy_list_mutex
= OVS_MUTEX_INITIALIZER
;
95 /* Contains all 'struct dummy_dev's. */
96 static struct ovs_list dummy_list
OVS_GUARDED_BY(dummy_list_mutex
)
97 = OVS_LIST_INITIALIZER(&dummy_list
);
103 struct ovs_list list_node
OVS_GUARDED_BY(dummy_list_mutex
);
105 /* Protects all members below. */
106 struct ovs_mutex mutex
OVS_ACQ_AFTER(dummy_list_mutex
);
108 uint8_t hwaddr
[ETH_ADDR_LEN
] OVS_GUARDED
;
110 struct netdev_stats stats OVS_GUARDED
;
111 enum netdev_flags flags OVS_GUARDED
;
112 int ifindex OVS_GUARDED
;
114 struct dummy_packet_conn conn OVS_GUARDED
;
116 FILE *tx_pcap
, *rxq_pcap OVS_GUARDED
;
118 struct in_addr address
, netmask
;
119 struct ovs_list rxes OVS_GUARDED
; /* List of child "netdev_rxq_dummy"s. */
122 /* Max 'recv_queue_len' in struct netdev_dummy. */
123 #define NETDEV_DUMMY_MAX_QUEUE 100
125 struct netdev_rxq_dummy
{
126 struct netdev_rxq up
;
127 struct ovs_list node
; /* In netdev_dummy's "rxes" list. */
128 struct ovs_list recv_queue
;
129 int recv_queue_len
; /* list_size(&recv_queue). */
130 struct seq
*seq
; /* Reports newly queued packets. */
133 static unixctl_cb_func netdev_dummy_set_admin_state
;
134 static int netdev_dummy_construct(struct netdev
*);
135 static void netdev_dummy_queue_packet(struct netdev_dummy
*, struct dp_packet
*);
137 static void dummy_packet_stream_close(struct dummy_packet_stream
*);
139 static void pkt_list_delete(struct ovs_list
*);
142 is_dummy_class(const struct netdev_class
*class)
144 return class->construct
== netdev_dummy_construct
;
147 static struct netdev_dummy
*
148 netdev_dummy_cast(const struct netdev
*netdev
)
150 ovs_assert(is_dummy_class(netdev_get_class(netdev
)));
151 return CONTAINER_OF(netdev
, struct netdev_dummy
, up
);
154 static struct netdev_rxq_dummy
*
155 netdev_rxq_dummy_cast(const struct netdev_rxq
*rx
)
157 ovs_assert(is_dummy_class(netdev_get_class(rx
->netdev
)));
158 return CONTAINER_OF(rx
, struct netdev_rxq_dummy
, up
);
162 dummy_packet_stream_init(struct dummy_packet_stream
*s
, struct stream
*stream
)
164 int rxbuf_size
= stream
? 2048 : 0;
166 dp_packet_init(&s
->rxbuf
, rxbuf_size
);
170 static struct dummy_packet_stream
*
171 dummy_packet_stream_create(struct stream
*stream
)
173 struct dummy_packet_stream
*s
;
175 s
= xzalloc(sizeof *s
);
176 dummy_packet_stream_init(s
, stream
);
182 dummy_packet_stream_wait(struct dummy_packet_stream
*s
)
184 stream_run_wait(s
->stream
);
185 if (!list_is_empty(&s
->txq
)) {
186 stream_send_wait(s
->stream
);
188 stream_recv_wait(s
->stream
);
192 dummy_packet_stream_send(struct dummy_packet_stream
*s
, const void *buffer
, size_t size
)
194 if (list_size(&s
->txq
) < NETDEV_DUMMY_MAX_QUEUE
) {
196 struct pkt_list_node
*node
;
198 b
= dp_packet_clone_data_with_headroom(buffer
, size
, 2);
199 put_unaligned_be16(dp_packet_push_uninit(b
, 2), htons(size
));
201 node
= xmalloc(sizeof *node
);
203 list_push_back(&s
->txq
, &node
->list_node
);
208 dummy_packet_stream_run(struct netdev_dummy
*dev
, struct dummy_packet_stream
*s
)
213 stream_run(s
->stream
);
215 if (!list_is_empty(&s
->txq
)) {
216 struct pkt_list_node
*txbuf_node
;
217 struct dp_packet
*txbuf
;
220 ASSIGN_CONTAINER(txbuf_node
, list_front(&s
->txq
), list_node
);
221 txbuf
= txbuf_node
->pkt
;
222 retval
= stream_send(s
->stream
, dp_packet_data(txbuf
), dp_packet_size(txbuf
));
225 dp_packet_pull(txbuf
, retval
);
226 if (!dp_packet_size(txbuf
)) {
227 list_remove(&txbuf_node
->list_node
);
229 dp_packet_delete(txbuf
);
231 } else if (retval
!= -EAGAIN
) {
237 if (dp_packet_size(&s
->rxbuf
) < 2) {
238 n
= 2 - dp_packet_size(&s
->rxbuf
);
242 frame_len
= ntohs(get_unaligned_be16(dp_packet_data(&s
->rxbuf
)));
243 if (frame_len
< ETH_HEADER_LEN
) {
247 n
= (2 + frame_len
) - dp_packet_size(&s
->rxbuf
);
254 dp_packet_prealloc_tailroom(&s
->rxbuf
, n
);
255 retval
= stream_recv(s
->stream
, dp_packet_tail(&s
->rxbuf
), n
);
258 dp_packet_set_size(&s
->rxbuf
, dp_packet_size(&s
->rxbuf
) + retval
);
259 if (retval
== n
&& dp_packet_size(&s
->rxbuf
) > 2) {
260 dp_packet_pull(&s
->rxbuf
, 2);
261 netdev_dummy_queue_packet(dev
,
262 dp_packet_clone(&s
->rxbuf
));
263 dp_packet_clear(&s
->rxbuf
);
265 } else if (retval
!= -EAGAIN
) {
266 error
= (retval
< 0 ? -retval
267 : dp_packet_size(&s
->rxbuf
) ? EPROTO
276 dummy_packet_stream_close(struct dummy_packet_stream
*s
)
278 stream_close(s
->stream
);
279 dp_packet_uninit(&s
->rxbuf
);
280 pkt_list_delete(&s
->txq
);
284 dummy_packet_conn_init(struct dummy_packet_conn
*conn
)
286 memset(conn
, 0, sizeof *conn
);
291 dummy_packet_conn_get_config(struct dummy_packet_conn
*conn
, struct smap
*args
)
294 switch (conn
->type
) {
296 smap_add(args
, "pstream", pstream_get_name(conn
->u
.pconn
.pstream
));
300 smap_add(args
, "stream", stream_get_name(conn
->u
.rconn
.rstream
->stream
));
310 dummy_packet_conn_close(struct dummy_packet_conn
*conn
)
313 struct dummy_packet_pconn
*pconn
= &conn
->u
.pconn
;
314 struct dummy_packet_rconn
*rconn
= &conn
->u
.rconn
;
316 switch (conn
->type
) {
318 pstream_close(pconn
->pstream
);
319 for (i
= 0; i
< pconn
->n_streams
; i
++) {
320 dummy_packet_stream_close(&pconn
->streams
[i
]);
322 free(pconn
->streams
);
323 pconn
->pstream
= NULL
;
324 pconn
->streams
= NULL
;
328 dummy_packet_stream_close(rconn
->rstream
);
329 free(rconn
->rstream
);
330 rconn
->rstream
= NULL
;
331 reconnect_destroy(rconn
->reconnect
);
332 rconn
->reconnect
= NULL
;
341 memset(conn
, 0, sizeof *conn
);
345 dummy_packet_conn_set_config(struct dummy_packet_conn
*conn
,
346 const struct smap
*args
)
348 const char *pstream
= smap_get(args
, "pstream");
349 const char *stream
= smap_get(args
, "stream");
351 if (pstream
&& stream
) {
352 VLOG_WARN("Open failed: both %s and %s are configured",
357 switch (conn
->type
) {
360 !strcmp(pstream_get_name(conn
->u
.pconn
.pstream
), pstream
)) {
363 dummy_packet_conn_close(conn
);
367 !strcmp(stream_get_name(conn
->u
.rconn
.rstream
->stream
), stream
)) {
370 dummy_packet_conn_close(conn
);
380 error
= pstream_open(pstream
, &conn
->u
.pconn
.pstream
, DSCP_DEFAULT
);
382 VLOG_WARN("%s: open failed (%s)", pstream
, ovs_strerror(error
));
384 conn
->type
= PASSIVE
;
390 struct stream
*active_stream
;
391 struct reconnect
*reconnect
;;
393 reconnect
= reconnect_create(time_msec());
394 reconnect_set_name(reconnect
, stream
);
395 reconnect_set_passive(reconnect
, false, time_msec());
396 reconnect_enable(reconnect
, time_msec());
397 reconnect_set_backoff(reconnect
, 100, INT_MAX
);
398 reconnect_set_probe_interval(reconnect
, 0);
399 conn
->u
.rconn
.reconnect
= reconnect
;
402 error
= stream_open(stream
, &active_stream
, DSCP_DEFAULT
);
403 conn
->u
.rconn
.rstream
= dummy_packet_stream_create(active_stream
);
407 reconnect_connected(reconnect
, time_msec());
411 reconnect_connecting(reconnect
, time_msec());
415 reconnect_connect_failed(reconnect
, time_msec(), error
);
416 stream_close(active_stream
);
417 conn
->u
.rconn
.rstream
->stream
= NULL
;
424 dummy_pconn_run(struct netdev_dummy
*dev
)
425 OVS_REQUIRES(dev
->mutex
)
427 struct stream
*new_stream
;
428 struct dummy_packet_pconn
*pconn
= &dev
->conn
.u
.pconn
;
432 error
= pstream_accept(pconn
->pstream
, &new_stream
);
434 struct dummy_packet_stream
*s
;
436 pconn
->streams
= xrealloc(pconn
->streams
,
437 ((pconn
->n_streams
+ 1)
439 s
= &pconn
->streams
[pconn
->n_streams
++];
440 dummy_packet_stream_init(s
, new_stream
);
441 } else if (error
!= EAGAIN
) {
442 VLOG_WARN("%s: accept failed (%s)",
443 pstream_get_name(pconn
->pstream
), ovs_strerror(error
));
444 pstream_close(pconn
->pstream
);
445 pconn
->pstream
= NULL
;
446 dev
->conn
.type
= NONE
;
449 for (i
= 0; i
< pconn
->n_streams
; i
++) {
450 struct dummy_packet_stream
*s
= &pconn
->streams
[i
];
452 error
= dummy_packet_stream_run(dev
, s
);
454 VLOG_DBG("%s: closing connection (%s)",
455 stream_get_name(s
->stream
),
456 ovs_retval_to_string(error
));
457 dummy_packet_stream_close(s
);
458 pconn
->streams
[i
] = pconn
->streams
[--pconn
->n_streams
];
464 dummy_rconn_run(struct netdev_dummy
*dev
)
465 OVS_REQUIRES(dev
->mutex
)
467 struct dummy_packet_rconn
*rconn
= &dev
->conn
.u
.rconn
;
469 switch (reconnect_run(rconn
->reconnect
, time_msec())) {
470 case RECONNECT_CONNECT
:
474 if (rconn
->rstream
->stream
) {
475 error
= stream_connect(rconn
->rstream
->stream
);
477 error
= stream_open(reconnect_get_name(rconn
->reconnect
),
478 &rconn
->rstream
->stream
, DSCP_DEFAULT
);
483 reconnect_connected(rconn
->reconnect
, time_msec());
487 reconnect_connecting(rconn
->reconnect
, time_msec());
491 reconnect_connect_failed(rconn
->reconnect
, time_msec(), error
);
492 stream_close(rconn
->rstream
->stream
);
493 rconn
->rstream
->stream
= NULL
;
499 case RECONNECT_DISCONNECT
:
500 case RECONNECT_PROBE
:
505 if (reconnect_is_connected(rconn
->reconnect
)) {
508 err
= dummy_packet_stream_run(dev
, rconn
->rstream
);
511 reconnect_disconnected(rconn
->reconnect
, time_msec(), err
);
512 stream_close(rconn
->rstream
->stream
);
513 rconn
->rstream
->stream
= NULL
;
519 dummy_packet_conn_run(struct netdev_dummy
*dev
)
520 OVS_REQUIRES(dev
->mutex
)
522 switch (dev
->conn
.type
) {
524 dummy_pconn_run(dev
);
528 dummy_rconn_run(dev
);
538 dummy_packet_conn_wait(struct dummy_packet_conn
*conn
)
541 switch (conn
->type
) {
543 pstream_wait(conn
->u
.pconn
.pstream
);
544 for (i
= 0; i
< conn
->u
.pconn
.n_streams
; i
++) {
545 struct dummy_packet_stream
*s
= &conn
->u
.pconn
.streams
[i
];
546 dummy_packet_stream_wait(s
);
550 if (reconnect_is_connected(conn
->u
.rconn
.reconnect
)) {
551 dummy_packet_stream_wait(conn
->u
.rconn
.rstream
);
562 dummy_packet_conn_send(struct dummy_packet_conn
*conn
,
563 const void *buffer
, size_t size
)
567 switch (conn
->type
) {
569 for (i
= 0; i
< conn
->u
.pconn
.n_streams
; i
++) {
570 struct dummy_packet_stream
*s
= &conn
->u
.pconn
.streams
[i
];
572 dummy_packet_stream_send(s
, buffer
, size
);
573 pstream_wait(conn
->u
.pconn
.pstream
);
578 if (reconnect_is_connected(conn
->u
.rconn
.reconnect
)) {
579 dummy_packet_stream_send(conn
->u
.rconn
.rstream
, buffer
, size
);
580 dummy_packet_stream_wait(conn
->u
.rconn
.rstream
);
590 static enum dummy_netdev_conn_state
591 dummy_netdev_get_conn_state(struct dummy_packet_conn
*conn
)
593 enum dummy_netdev_conn_state state
;
595 if (conn
->type
== ACTIVE
) {
596 if (reconnect_is_connected(conn
->u
.rconn
.reconnect
)) {
597 state
= CONN_STATE_CONNECTED
;
599 state
= CONN_STATE_NOT_CONNECTED
;
602 state
= CONN_STATE_UNKNOWN
;
609 netdev_dummy_run(void)
611 struct netdev_dummy
*dev
;
613 ovs_mutex_lock(&dummy_list_mutex
);
614 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
615 ovs_mutex_lock(&dev
->mutex
);
616 dummy_packet_conn_run(dev
);
617 ovs_mutex_unlock(&dev
->mutex
);
619 ovs_mutex_unlock(&dummy_list_mutex
);
623 netdev_dummy_wait(void)
625 struct netdev_dummy
*dev
;
627 ovs_mutex_lock(&dummy_list_mutex
);
628 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
629 ovs_mutex_lock(&dev
->mutex
);
630 dummy_packet_conn_wait(&dev
->conn
);
631 ovs_mutex_unlock(&dev
->mutex
);
633 ovs_mutex_unlock(&dummy_list_mutex
);
636 static struct netdev
*
637 netdev_dummy_alloc(void)
639 struct netdev_dummy
*netdev
= xzalloc(sizeof *netdev
);
644 netdev_dummy_construct(struct netdev
*netdev_
)
646 static atomic_count next_n
= ATOMIC_COUNT_INIT(0xaa550000);
647 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
650 n
= atomic_count_inc(&next_n
);
652 ovs_mutex_init(&netdev
->mutex
);
653 ovs_mutex_lock(&netdev
->mutex
);
654 netdev
->hwaddr
[0] = 0xaa;
655 netdev
->hwaddr
[1] = 0x55;
656 netdev
->hwaddr
[2] = n
>> 24;
657 netdev
->hwaddr
[3] = n
>> 16;
658 netdev
->hwaddr
[4] = n
>> 8;
659 netdev
->hwaddr
[5] = n
;
662 netdev
->ifindex
= -EOPNOTSUPP
;
664 dummy_packet_conn_init(&netdev
->conn
);
666 list_init(&netdev
->rxes
);
667 ovs_mutex_unlock(&netdev
->mutex
);
669 ovs_mutex_lock(&dummy_list_mutex
);
670 list_push_back(&dummy_list
, &netdev
->list_node
);
671 ovs_mutex_unlock(&dummy_list_mutex
);
677 netdev_dummy_destruct(struct netdev
*netdev_
)
679 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
681 ovs_mutex_lock(&dummy_list_mutex
);
682 list_remove(&netdev
->list_node
);
683 ovs_mutex_unlock(&dummy_list_mutex
);
685 ovs_mutex_lock(&netdev
->mutex
);
686 dummy_packet_conn_close(&netdev
->conn
);
687 netdev
->conn
.type
= NONE
;
689 ovs_mutex_unlock(&netdev
->mutex
);
690 ovs_mutex_destroy(&netdev
->mutex
);
694 netdev_dummy_dealloc(struct netdev
*netdev_
)
696 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
702 netdev_dummy_get_config(const struct netdev
*netdev_
, struct smap
*args
)
704 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
706 ovs_mutex_lock(&netdev
->mutex
);
708 if (netdev
->ifindex
>= 0) {
709 smap_add_format(args
, "ifindex", "%d", netdev
->ifindex
);
712 dummy_packet_conn_get_config(&netdev
->conn
, args
);
714 ovs_mutex_unlock(&netdev
->mutex
);
719 netdev_dummy_get_in4(const struct netdev
*netdev_
,
720 struct in_addr
*address
, struct in_addr
*netmask
)
722 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
724 ovs_mutex_lock(&netdev
->mutex
);
725 *address
= netdev
->address
;
726 *netmask
= netdev
->netmask
;
727 ovs_mutex_unlock(&netdev
->mutex
);
729 return address
->s_addr
? 0 : EADDRNOTAVAIL
;
733 netdev_dummy_set_in4(struct netdev
*netdev_
, struct in_addr address
,
734 struct in_addr netmask
)
736 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
738 ovs_mutex_lock(&netdev
->mutex
);
739 netdev
->address
= address
;
740 netdev
->netmask
= netmask
;
741 ovs_mutex_unlock(&netdev
->mutex
);
747 netdev_dummy_set_config(struct netdev
*netdev_
, const struct smap
*args
)
749 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
752 ovs_mutex_lock(&netdev
->mutex
);
753 netdev
->ifindex
= smap_get_int(args
, "ifindex", -EOPNOTSUPP
);
755 dummy_packet_conn_set_config(&netdev
->conn
, args
);
757 if (netdev
->rxq_pcap
) {
758 fclose(netdev
->rxq_pcap
);
760 if (netdev
->tx_pcap
&& netdev
->tx_pcap
!= netdev
->rxq_pcap
) {
761 fclose(netdev
->tx_pcap
);
763 netdev
->rxq_pcap
= netdev
->tx_pcap
= NULL
;
764 pcap
= smap_get(args
, "pcap");
766 netdev
->rxq_pcap
= netdev
->tx_pcap
= ovs_pcap_open(pcap
, "ab");
768 const char *rxq_pcap
= smap_get(args
, "rxq_pcap");
769 const char *tx_pcap
= smap_get(args
, "tx_pcap");
772 netdev
->rxq_pcap
= ovs_pcap_open(rxq_pcap
, "ab");
775 netdev
->tx_pcap
= ovs_pcap_open(tx_pcap
, "ab");
779 ovs_mutex_unlock(&netdev
->mutex
);
784 static struct netdev_rxq
*
785 netdev_dummy_rxq_alloc(void)
787 struct netdev_rxq_dummy
*rx
= xzalloc(sizeof *rx
);
792 netdev_dummy_rxq_construct(struct netdev_rxq
*rxq_
)
794 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
795 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
797 ovs_mutex_lock(&netdev
->mutex
);
798 list_push_back(&netdev
->rxes
, &rx
->node
);
799 list_init(&rx
->recv_queue
);
800 rx
->recv_queue_len
= 0;
801 rx
->seq
= seq_create();
802 ovs_mutex_unlock(&netdev
->mutex
);
808 netdev_dummy_rxq_destruct(struct netdev_rxq
*rxq_
)
810 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
811 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
813 ovs_mutex_lock(&netdev
->mutex
);
814 list_remove(&rx
->node
);
815 pkt_list_delete(&rx
->recv_queue
);
816 ovs_mutex_unlock(&netdev
->mutex
);
817 seq_destroy(rx
->seq
);
821 netdev_dummy_rxq_dealloc(struct netdev_rxq
*rxq_
)
823 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
829 netdev_dummy_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**arr
,
832 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
833 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
834 struct dp_packet
*packet
;
836 ovs_mutex_lock(&netdev
->mutex
);
837 if (!list_is_empty(&rx
->recv_queue
)) {
838 struct pkt_list_node
*pkt_node
;
840 ASSIGN_CONTAINER(pkt_node
, list_pop_front(&rx
->recv_queue
), list_node
);
841 packet
= pkt_node
->pkt
;
843 rx
->recv_queue_len
--;
847 ovs_mutex_unlock(&netdev
->mutex
);
852 ovs_mutex_lock(&netdev
->mutex
);
853 netdev
->stats
.rx_packets
++;
854 netdev
->stats
.rx_bytes
+= dp_packet_size(packet
);
855 ovs_mutex_unlock(&netdev
->mutex
);
857 dp_packet_pad(packet
);
858 dp_packet_set_rss_hash(packet
, 0);
866 netdev_dummy_rxq_wait(struct netdev_rxq
*rxq_
)
868 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
869 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
870 uint64_t seq
= seq_read(rx
->seq
);
872 ovs_mutex_lock(&netdev
->mutex
);
873 if (!list_is_empty(&rx
->recv_queue
)) {
874 poll_immediate_wake();
876 seq_wait(rx
->seq
, seq
);
878 ovs_mutex_unlock(&netdev
->mutex
);
882 netdev_dummy_rxq_drain(struct netdev_rxq
*rxq_
)
884 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
885 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
887 ovs_mutex_lock(&netdev
->mutex
);
888 pkt_list_delete(&rx
->recv_queue
);
889 rx
->recv_queue_len
= 0;
890 ovs_mutex_unlock(&netdev
->mutex
);
898 netdev_dummy_send(struct netdev
*netdev
, int qid OVS_UNUSED
,
899 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
901 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
905 for (i
= 0; i
< cnt
; i
++) {
906 const void *buffer
= dp_packet_data(pkts
[i
]);
907 size_t size
= dp_packet_size(pkts
[i
]);
909 if (size
< ETH_HEADER_LEN
) {
913 const struct eth_header
*eth
= buffer
;
916 ovs_mutex_lock(&dev
->mutex
);
917 max_size
= dev
->mtu
+ ETH_HEADER_LEN
;
918 ovs_mutex_unlock(&dev
->mutex
);
920 if (eth
->eth_type
== htons(ETH_TYPE_VLAN
)) {
921 max_size
+= VLAN_HEADER_LEN
;
923 if (size
> max_size
) {
929 ovs_mutex_lock(&dev
->mutex
);
930 dev
->stats
.tx_packets
++;
931 dev
->stats
.tx_bytes
+= size
;
933 dummy_packet_conn_send(&dev
->conn
, buffer
, size
);
935 /* Reply to ARP requests for 'dev''s assigned IP address. */
936 if (dev
->address
.s_addr
) {
937 struct dp_packet packet
;
940 dp_packet_use_const(&packet
, buffer
, size
);
941 flow_extract(&packet
, &flow
);
942 if (flow
.dl_type
== htons(ETH_TYPE_ARP
)
943 && flow
.nw_proto
== ARP_OP_REQUEST
944 && flow
.nw_dst
== dev
->address
.s_addr
) {
945 struct dp_packet
*reply
= dp_packet_new(0);
946 compose_arp(reply
, ARP_OP_REPLY
, dev
->hwaddr
, flow
.dl_src
,
947 false, flow
.nw_dst
, flow
.nw_src
);
948 netdev_dummy_queue_packet(dev
, reply
);
953 struct dp_packet packet
;
955 dp_packet_use_const(&packet
, buffer
, size
);
956 ovs_pcap_write(dev
->tx_pcap
, &packet
);
957 fflush(dev
->tx_pcap
);
960 ovs_mutex_unlock(&dev
->mutex
);
964 for (i
= 0; i
< cnt
; i
++) {
965 dp_packet_delete(pkts
[i
]);
973 netdev_dummy_set_etheraddr(struct netdev
*netdev
,
974 const uint8_t mac
[ETH_ADDR_LEN
])
976 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
978 ovs_mutex_lock(&dev
->mutex
);
979 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
980 memcpy(dev
->hwaddr
, mac
, ETH_ADDR_LEN
);
981 netdev_change_seq_changed(netdev
);
983 ovs_mutex_unlock(&dev
->mutex
);
989 netdev_dummy_get_etheraddr(const struct netdev
*netdev
,
990 uint8_t mac
[ETH_ADDR_LEN
])
992 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
994 ovs_mutex_lock(&dev
->mutex
);
995 memcpy(mac
, dev
->hwaddr
, ETH_ADDR_LEN
);
996 ovs_mutex_unlock(&dev
->mutex
);
1002 netdev_dummy_get_mtu(const struct netdev
*netdev
, int *mtup
)
1004 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1006 ovs_mutex_lock(&dev
->mutex
);
1008 ovs_mutex_unlock(&dev
->mutex
);
1014 netdev_dummy_set_mtu(const struct netdev
*netdev
, int mtu
)
1016 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1018 ovs_mutex_lock(&dev
->mutex
);
1020 ovs_mutex_unlock(&dev
->mutex
);
1026 netdev_dummy_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1028 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1030 ovs_mutex_lock(&dev
->mutex
);
1031 *stats
= dev
->stats
;
1032 ovs_mutex_unlock(&dev
->mutex
);
1038 netdev_dummy_get_ifindex(const struct netdev
*netdev
)
1040 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1043 ovs_mutex_lock(&dev
->mutex
);
1044 ifindex
= dev
->ifindex
;
1045 ovs_mutex_unlock(&dev
->mutex
);
1051 netdev_dummy_update_flags__(struct netdev_dummy
*netdev
,
1052 enum netdev_flags off
, enum netdev_flags on
,
1053 enum netdev_flags
*old_flagsp
)
1054 OVS_REQUIRES(netdev
->mutex
)
1056 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1060 *old_flagsp
= netdev
->flags
;
1061 netdev
->flags
|= on
;
1062 netdev
->flags
&= ~off
;
1063 if (*old_flagsp
!= netdev
->flags
) {
1064 netdev_change_seq_changed(&netdev
->up
);
1071 netdev_dummy_update_flags(struct netdev
*netdev_
,
1072 enum netdev_flags off
, enum netdev_flags on
,
1073 enum netdev_flags
*old_flagsp
)
1075 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
1078 ovs_mutex_lock(&netdev
->mutex
);
1079 error
= netdev_dummy_update_flags__(netdev
, off
, on
, old_flagsp
);
1080 ovs_mutex_unlock(&netdev
->mutex
);
1085 /* Helper functions. */
1087 static const struct netdev_class dummy_class
= {
1094 netdev_dummy_construct
,
1095 netdev_dummy_destruct
,
1096 netdev_dummy_dealloc
,
1097 netdev_dummy_get_config
,
1098 netdev_dummy_set_config
,
1099 NULL
, /* get_tunnel_config */
1100 NULL
, /* build header */
1101 NULL
, /* push header */
1102 NULL
, /* pop header */
1103 NULL
, /* get_numa_id */
1104 NULL
, /* set_multiq */
1106 netdev_dummy_send
, /* send */
1107 NULL
, /* send_wait */
1109 netdev_dummy_set_etheraddr
,
1110 netdev_dummy_get_etheraddr
,
1111 netdev_dummy_get_mtu
,
1112 netdev_dummy_set_mtu
,
1113 netdev_dummy_get_ifindex
,
1114 NULL
, /* get_carrier */
1115 NULL
, /* get_carrier_resets */
1116 NULL
, /* get_miimon */
1117 netdev_dummy_get_stats
,
1119 NULL
, /* get_features */
1120 NULL
, /* set_advertisements */
1122 NULL
, /* set_policing */
1123 NULL
, /* get_qos_types */
1124 NULL
, /* get_qos_capabilities */
1127 NULL
, /* get_queue */
1128 NULL
, /* set_queue */
1129 NULL
, /* delete_queue */
1130 NULL
, /* get_queue_stats */
1131 NULL
, /* queue_dump_start */
1132 NULL
, /* queue_dump_next */
1133 NULL
, /* queue_dump_done */
1134 NULL
, /* dump_queue_stats */
1136 netdev_dummy_get_in4
, /* get_in4 */
1139 NULL
, /* add_router */
1140 NULL
, /* get_next_hop */
1141 NULL
, /* get_status */
1142 NULL
, /* arp_lookup */
1144 netdev_dummy_update_flags
,
1146 netdev_dummy_rxq_alloc
,
1147 netdev_dummy_rxq_construct
,
1148 netdev_dummy_rxq_destruct
,
1149 netdev_dummy_rxq_dealloc
,
1150 netdev_dummy_rxq_recv
,
1151 netdev_dummy_rxq_wait
,
1152 netdev_dummy_rxq_drain
,
1156 pkt_list_delete(struct ovs_list
*l
)
1158 struct pkt_list_node
*pkt
;
1160 LIST_FOR_EACH_POP(pkt
, list_node
, l
) {
1161 dp_packet_delete(pkt
->pkt
);
1166 static struct dp_packet
*
1167 eth_from_packet_or_flow(const char *s
)
1169 enum odp_key_fitness fitness
;
1170 struct dp_packet
*packet
;
1171 struct ofpbuf odp_key
;
1175 if (!eth_from_hex(s
, &packet
)) {
1179 /* Convert string to datapath key.
1181 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1182 * the code for that currently calls exit() on parse error. We have to
1183 * settle for parsing a datapath key for now.
1185 ofpbuf_init(&odp_key
, 0);
1186 error
= odp_flow_from_string(s
, NULL
, &odp_key
, NULL
);
1188 ofpbuf_uninit(&odp_key
);
1192 /* Convert odp_key to flow. */
1193 fitness
= odp_flow_key_to_flow(odp_key
.data
, odp_key
.size
, &flow
);
1194 if (fitness
== ODP_FIT_ERROR
) {
1195 ofpbuf_uninit(&odp_key
);
1199 packet
= dp_packet_new(0);
1200 flow_compose(packet
, &flow
);
1202 ofpbuf_uninit(&odp_key
);
1207 netdev_dummy_queue_packet__(struct netdev_rxq_dummy
*rx
, struct dp_packet
*packet
)
1209 struct pkt_list_node
*pkt_node
= xmalloc(sizeof *pkt_node
);
1211 pkt_node
->pkt
= packet
;
1212 list_push_back(&rx
->recv_queue
, &pkt_node
->list_node
);
1213 rx
->recv_queue_len
++;
1214 seq_change(rx
->seq
);
1218 netdev_dummy_queue_packet(struct netdev_dummy
*dummy
, struct dp_packet
*packet
)
1219 OVS_REQUIRES(dummy
->mutex
)
1221 struct netdev_rxq_dummy
*rx
, *prev
;
1223 if (dummy
->rxq_pcap
) {
1224 ovs_pcap_write(dummy
->rxq_pcap
, packet
);
1225 fflush(dummy
->rxq_pcap
);
1228 LIST_FOR_EACH (rx
, node
, &dummy
->rxes
) {
1229 if (rx
->recv_queue_len
< NETDEV_DUMMY_MAX_QUEUE
) {
1231 netdev_dummy_queue_packet__(prev
, dp_packet_clone(packet
));
1237 netdev_dummy_queue_packet__(prev
, packet
);
1239 dp_packet_delete(packet
);
1244 netdev_dummy_receive(struct unixctl_conn
*conn
,
1245 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
1247 struct netdev_dummy
*dummy_dev
;
1248 struct netdev
*netdev
;
1251 netdev
= netdev_from_name(argv
[1]);
1252 if (!netdev
|| !is_dummy_class(netdev
->netdev_class
)) {
1253 unixctl_command_reply_error(conn
, "no such dummy netdev");
1256 dummy_dev
= netdev_dummy_cast(netdev
);
1258 for (i
= 2; i
< argc
; i
++) {
1259 struct dp_packet
*packet
;
1261 packet
= eth_from_packet_or_flow(argv
[i
]);
1263 unixctl_command_reply_error(conn
, "bad packet syntax");
1267 ovs_mutex_lock(&dummy_dev
->mutex
);
1268 netdev_dummy_queue_packet(dummy_dev
, packet
);
1269 ovs_mutex_unlock(&dummy_dev
->mutex
);
1272 unixctl_command_reply(conn
, NULL
);
1275 netdev_close(netdev
);
1279 netdev_dummy_set_admin_state__(struct netdev_dummy
*dev
, bool admin_state
)
1280 OVS_REQUIRES(dev
->mutex
)
1282 enum netdev_flags old_flags
;
1285 netdev_dummy_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1287 netdev_dummy_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1292 netdev_dummy_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1293 const char *argv
[], void *aux OVS_UNUSED
)
1297 if (!strcasecmp(argv
[argc
- 1], "up")) {
1299 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1302 unixctl_command_reply_error(conn
, "Invalid Admin State");
1307 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1308 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1309 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
1311 ovs_mutex_lock(&dummy_dev
->mutex
);
1312 netdev_dummy_set_admin_state__(dummy_dev
, up
);
1313 ovs_mutex_unlock(&dummy_dev
->mutex
);
1315 netdev_close(netdev
);
1317 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1318 netdev_close(netdev
);
1322 struct netdev_dummy
*netdev
;
1324 ovs_mutex_lock(&dummy_list_mutex
);
1325 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
1326 ovs_mutex_lock(&netdev
->mutex
);
1327 netdev_dummy_set_admin_state__(netdev
, up
);
1328 ovs_mutex_unlock(&netdev
->mutex
);
1330 ovs_mutex_unlock(&dummy_list_mutex
);
1332 unixctl_command_reply(conn
, "OK");
1336 display_conn_state__(struct ds
*s
, const char *name
,
1337 enum dummy_netdev_conn_state state
)
1339 ds_put_format(s
, "%s: ", name
);
1342 case CONN_STATE_CONNECTED
:
1343 ds_put_cstr(s
, "connected\n");
1346 case CONN_STATE_NOT_CONNECTED
:
1347 ds_put_cstr(s
, "disconnected\n");
1350 case CONN_STATE_UNKNOWN
:
1352 ds_put_cstr(s
, "unknown\n");
1358 netdev_dummy_conn_state(struct unixctl_conn
*conn
, int argc
,
1359 const char *argv
[], void *aux OVS_UNUSED
)
1361 enum dummy_netdev_conn_state state
= CONN_STATE_UNKNOWN
;
1367 const char *dev_name
= argv
[1];
1368 struct netdev
*netdev
= netdev_from_name(dev_name
);
1370 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1371 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
1373 ovs_mutex_lock(&dummy_dev
->mutex
);
1374 state
= dummy_netdev_get_conn_state(&dummy_dev
->conn
);
1375 ovs_mutex_unlock(&dummy_dev
->mutex
);
1377 netdev_close(netdev
);
1379 display_conn_state__(&s
, dev_name
, state
);
1381 struct netdev_dummy
*netdev
;
1383 ovs_mutex_lock(&dummy_list_mutex
);
1384 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
1385 ovs_mutex_lock(&netdev
->mutex
);
1386 state
= dummy_netdev_get_conn_state(&netdev
->conn
);
1387 ovs_mutex_unlock(&netdev
->mutex
);
1388 if (state
!= CONN_STATE_UNKNOWN
) {
1389 display_conn_state__(&s
, netdev
->up
.name
, state
);
1392 ovs_mutex_unlock(&dummy_list_mutex
);
1395 unixctl_command_reply(conn
, ds_cstr(&s
));
1400 netdev_dummy_ip4addr(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
1401 const char *argv
[], void *aux OVS_UNUSED
)
1403 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1405 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1409 if (ovs_scan(argv
[2], IP_SCAN_FMT
"/%"SCNi16
,
1410 IP_SCAN_ARGS(&ip
.s_addr
), &plen
)) {
1411 struct in_addr mask
;
1413 mask
.s_addr
= be32_prefix_mask(plen
);
1414 netdev_dummy_set_in4(netdev
, ip
, mask
);
1415 unixctl_command_reply(conn
, "OK");
1417 unixctl_command_reply(conn
, "Invalid parameters");
1420 netdev_close(netdev
);
1422 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1423 netdev_close(netdev
);
1430 netdev_dummy_override(const char *type
)
1432 if (!netdev_unregister_provider(type
)) {
1433 struct netdev_class
*class;
1436 class = xmemdup(&dummy_class
, sizeof dummy_class
);
1437 class->type
= xstrdup(type
);
1438 error
= netdev_register_provider(class);
1440 VLOG_ERR("%s: failed to register netdev provider (%s)",
1441 type
, ovs_strerror(error
));
1442 free(CONST_CAST(char *, class->type
));
1449 netdev_dummy_register(enum dummy_level level
)
1451 unixctl_command_register("netdev-dummy/receive", "name packet|flow...",
1452 2, INT_MAX
, netdev_dummy_receive
, NULL
);
1453 unixctl_command_register("netdev-dummy/set-admin-state",
1454 "[netdev] up|down", 1, 2,
1455 netdev_dummy_set_admin_state
, NULL
);
1456 unixctl_command_register("netdev-dummy/conn-state",
1458 netdev_dummy_conn_state
, NULL
);
1459 unixctl_command_register("netdev-dummy/ip4addr",
1460 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1461 netdev_dummy_ip4addr
, NULL
);
1463 if (level
== DUMMY_OVERRIDE_ALL
) {
1468 netdev_enumerate_types(&types
);
1469 SSET_FOR_EACH (type
, &types
) {
1470 if (strcmp(type
, "patch")) {
1471 netdev_dummy_override(type
);
1474 sset_destroy(&types
);
1475 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
1476 netdev_dummy_override("system");
1478 netdev_register_provider(&dummy_class
);
1480 netdev_vport_tunnel_register();