2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
27 #include "netdev-offload-provider.h"
28 #include "netdev-provider.h"
29 #include "netdev-vport.h"
31 #include "openvswitch/dynamic-string.h"
32 #include "openvswitch/list.h"
33 #include "openvswitch/match.h"
34 #include "openvswitch/ofp-print.h"
35 #include "openvswitch/ofpbuf.h"
36 #include "openvswitch/vlog.h"
37 #include "ovs-atomic.h"
39 #include "pcap-file.h"
40 #include "openvswitch/poll-loop.h"
41 #include "openvswitch/shash.h"
44 #include "unaligned.h"
47 #include "reconnect.h"
49 VLOG_DEFINE_THIS_MODULE(netdev_dummy
);
51 #define C_STATS_SIZE 2
55 struct dummy_packet_stream
{
56 struct stream
*stream
;
58 struct dp_packet rxbuf
;
61 enum dummy_packet_conn_type
{
62 NONE
, /* No connection is configured. */
63 PASSIVE
, /* Listener. */
64 ACTIVE
/* Connect to listener. */
67 enum dummy_netdev_conn_state
{
68 CONN_STATE_CONNECTED
, /* Listener connected. */
69 CONN_STATE_NOT_CONNECTED
, /* Listener not connected. */
70 CONN_STATE_UNKNOWN
, /* No relavent information. */
73 struct dummy_packet_pconn
{
74 struct pstream
*pstream
;
75 struct dummy_packet_stream
**streams
;
79 struct dummy_packet_rconn
{
80 struct dummy_packet_stream
*rstream
;
81 struct reconnect
*reconnect
;
84 struct dummy_packet_conn
{
85 enum dummy_packet_conn_type type
;
87 struct dummy_packet_pconn pconn
;
88 struct dummy_packet_rconn rconn
;
92 struct pkt_list_node
{
93 struct dp_packet
*pkt
;
94 struct ovs_list list_node
;
97 struct offloaded_flow
{
98 struct hmap_node node
;
104 /* Protects 'dummy_list'. */
105 static struct ovs_mutex dummy_list_mutex
= OVS_MUTEX_INITIALIZER
;
107 /* Contains all 'struct dummy_dev's. */
108 static struct ovs_list dummy_list
OVS_GUARDED_BY(dummy_list_mutex
)
109 = OVS_LIST_INITIALIZER(&dummy_list
);
111 struct netdev_dummy
{
115 struct ovs_list list_node
OVS_GUARDED_BY(dummy_list_mutex
);
117 /* Protects all members below. */
118 struct ovs_mutex mutex
OVS_ACQ_AFTER(dummy_list_mutex
);
120 struct eth_addr hwaddr OVS_GUARDED
;
122 struct netdev_stats stats OVS_GUARDED
;
123 struct netdev_custom_counter custom_stats
[C_STATS_SIZE
] OVS_GUARDED
;
124 enum netdev_flags flags OVS_GUARDED
;
125 int ifindex OVS_GUARDED
;
126 int numa_id OVS_GUARDED
;
128 struct dummy_packet_conn conn OVS_GUARDED
;
130 struct pcap_file
*tx_pcap
, *rxq_pcap OVS_GUARDED
;
132 struct in_addr address
, netmask
;
133 struct in6_addr ipv6
, ipv6_mask
;
134 struct ovs_list rxes OVS_GUARDED
; /* List of child "netdev_rxq_dummy"s. */
136 struct hmap offloaded_flows OVS_GUARDED
;
138 /* The following properties are for dummy-pmd and they cannot be changed
139 * when a device is running, so we remember the request and update them
140 * next time netdev_dummy_reconfigure() is called. */
141 int requested_n_txq OVS_GUARDED
;
142 int requested_n_rxq OVS_GUARDED
;
143 int requested_numa_id OVS_GUARDED
;
146 /* Max 'recv_queue_len' in struct netdev_dummy. */
147 #define NETDEV_DUMMY_MAX_QUEUE 100
149 struct netdev_rxq_dummy
{
150 struct netdev_rxq up
;
151 struct ovs_list node
; /* In netdev_dummy's "rxes" list. */
152 struct ovs_list recv_queue
;
153 int recv_queue_len
; /* ovs_list_size(&recv_queue). */
154 struct seq
*seq
; /* Reports newly queued packets. */
157 static unixctl_cb_func netdev_dummy_set_admin_state
;
158 static int netdev_dummy_construct(struct netdev
*);
159 static void netdev_dummy_queue_packet(struct netdev_dummy
*,
160 struct dp_packet
*, struct flow
*, int);
162 static void dummy_packet_stream_close(struct dummy_packet_stream
*);
164 static void pkt_list_delete(struct ovs_list
*);
167 is_dummy_class(const struct netdev_class
*class)
169 return class->construct
== netdev_dummy_construct
;
172 static struct netdev_dummy
*
173 netdev_dummy_cast(const struct netdev
*netdev
)
175 ovs_assert(is_dummy_class(netdev_get_class(netdev
)));
176 return CONTAINER_OF(netdev
, struct netdev_dummy
, up
);
179 static struct netdev_rxq_dummy
*
180 netdev_rxq_dummy_cast(const struct netdev_rxq
*rx
)
182 ovs_assert(is_dummy_class(netdev_get_class(rx
->netdev
)));
183 return CONTAINER_OF(rx
, struct netdev_rxq_dummy
, up
);
187 dummy_packet_stream_init(struct dummy_packet_stream
*s
, struct stream
*stream
)
189 int rxbuf_size
= stream
? 2048 : 0;
191 dp_packet_init(&s
->rxbuf
, rxbuf_size
);
192 ovs_list_init(&s
->txq
);
195 static struct dummy_packet_stream
*
196 dummy_packet_stream_create(struct stream
*stream
)
198 struct dummy_packet_stream
*s
;
200 s
= xzalloc(sizeof *s
);
201 dummy_packet_stream_init(s
, stream
);
207 dummy_packet_stream_wait(struct dummy_packet_stream
*s
)
209 stream_run_wait(s
->stream
);
210 if (!ovs_list_is_empty(&s
->txq
)) {
211 stream_send_wait(s
->stream
);
213 stream_recv_wait(s
->stream
);
217 dummy_packet_stream_send(struct dummy_packet_stream
*s
, const void *buffer
, size_t size
)
219 if (ovs_list_size(&s
->txq
) < NETDEV_DUMMY_MAX_QUEUE
) {
221 struct pkt_list_node
*node
;
223 b
= dp_packet_clone_data_with_headroom(buffer
, size
, 2);
224 put_unaligned_be16(dp_packet_push_uninit(b
, 2), htons(size
));
226 node
= xmalloc(sizeof *node
);
228 ovs_list_push_back(&s
->txq
, &node
->list_node
);
233 dummy_packet_stream_run(struct netdev_dummy
*dev
, struct dummy_packet_stream
*s
)
238 stream_run(s
->stream
);
240 if (!ovs_list_is_empty(&s
->txq
)) {
241 struct pkt_list_node
*txbuf_node
;
242 struct dp_packet
*txbuf
;
245 ASSIGN_CONTAINER(txbuf_node
, ovs_list_front(&s
->txq
), list_node
);
246 txbuf
= txbuf_node
->pkt
;
247 retval
= stream_send(s
->stream
, dp_packet_data(txbuf
), dp_packet_size(txbuf
));
250 dp_packet_pull(txbuf
, retval
);
251 if (!dp_packet_size(txbuf
)) {
252 ovs_list_remove(&txbuf_node
->list_node
);
254 dp_packet_delete(txbuf
);
256 } else if (retval
!= -EAGAIN
) {
262 if (dp_packet_size(&s
->rxbuf
) < 2) {
263 n
= 2 - dp_packet_size(&s
->rxbuf
);
267 frame_len
= ntohs(get_unaligned_be16(dp_packet_data(&s
->rxbuf
)));
268 if (frame_len
< ETH_HEADER_LEN
) {
272 n
= (2 + frame_len
) - dp_packet_size(&s
->rxbuf
);
279 dp_packet_prealloc_tailroom(&s
->rxbuf
, n
);
280 retval
= stream_recv(s
->stream
, dp_packet_tail(&s
->rxbuf
), n
);
283 dp_packet_set_size(&s
->rxbuf
, dp_packet_size(&s
->rxbuf
) + retval
);
284 if (retval
== n
&& dp_packet_size(&s
->rxbuf
) > 2) {
285 dp_packet_pull(&s
->rxbuf
, 2);
286 netdev_dummy_queue_packet(dev
,
287 dp_packet_clone(&s
->rxbuf
), NULL
, 0);
288 dp_packet_clear(&s
->rxbuf
);
290 } else if (retval
!= -EAGAIN
) {
291 error
= (retval
< 0 ? -retval
292 : dp_packet_size(&s
->rxbuf
) ? EPROTO
301 dummy_packet_stream_close(struct dummy_packet_stream
*s
)
303 stream_close(s
->stream
);
304 dp_packet_uninit(&s
->rxbuf
);
305 pkt_list_delete(&s
->txq
);
309 dummy_packet_conn_init(struct dummy_packet_conn
*conn
)
311 memset(conn
, 0, sizeof *conn
);
316 dummy_packet_conn_get_config(struct dummy_packet_conn
*conn
, struct smap
*args
)
319 switch (conn
->type
) {
321 smap_add(args
, "pstream", pstream_get_name(conn
->pconn
.pstream
));
325 smap_add(args
, "stream", stream_get_name(conn
->rconn
.rstream
->stream
));
335 dummy_packet_conn_close(struct dummy_packet_conn
*conn
)
338 struct dummy_packet_pconn
*pconn
= &conn
->pconn
;
339 struct dummy_packet_rconn
*rconn
= &conn
->rconn
;
341 switch (conn
->type
) {
343 pstream_close(pconn
->pstream
);
344 for (i
= 0; i
< pconn
->n_streams
; i
++) {
345 dummy_packet_stream_close(pconn
->streams
[i
]);
346 free(pconn
->streams
[i
]);
348 free(pconn
->streams
);
349 pconn
->pstream
= NULL
;
350 pconn
->streams
= NULL
;
354 dummy_packet_stream_close(rconn
->rstream
);
355 free(rconn
->rstream
);
356 rconn
->rstream
= NULL
;
357 reconnect_destroy(rconn
->reconnect
);
358 rconn
->reconnect
= NULL
;
367 memset(conn
, 0, sizeof *conn
);
371 dummy_packet_conn_set_config(struct dummy_packet_conn
*conn
,
372 const struct smap
*args
)
374 const char *pstream
= smap_get(args
, "pstream");
375 const char *stream
= smap_get(args
, "stream");
377 if (pstream
&& stream
) {
378 VLOG_WARN("Open failed: both %s and %s are configured",
383 switch (conn
->type
) {
386 !strcmp(pstream_get_name(conn
->pconn
.pstream
), pstream
)) {
389 dummy_packet_conn_close(conn
);
393 !strcmp(stream_get_name(conn
->rconn
.rstream
->stream
), stream
)) {
396 dummy_packet_conn_close(conn
);
406 error
= pstream_open(pstream
, &conn
->pconn
.pstream
, DSCP_DEFAULT
);
408 VLOG_WARN("%s: open failed (%s)", pstream
, ovs_strerror(error
));
410 conn
->type
= PASSIVE
;
416 struct stream
*active_stream
;
417 struct reconnect
*reconnect
;
419 reconnect
= reconnect_create(time_msec());
420 reconnect_set_name(reconnect
, stream
);
421 reconnect_set_passive(reconnect
, false, time_msec());
422 reconnect_enable(reconnect
, time_msec());
423 reconnect_set_backoff(reconnect
, 100, INT_MAX
);
424 reconnect_set_probe_interval(reconnect
, 0);
425 conn
->rconn
.reconnect
= reconnect
;
428 error
= stream_open(stream
, &active_stream
, DSCP_DEFAULT
);
429 conn
->rconn
.rstream
= dummy_packet_stream_create(active_stream
);
433 reconnect_connected(reconnect
, time_msec());
437 reconnect_connecting(reconnect
, time_msec());
441 reconnect_connect_failed(reconnect
, time_msec(), error
);
442 stream_close(active_stream
);
443 conn
->rconn
.rstream
->stream
= NULL
;
450 dummy_pconn_run(struct netdev_dummy
*dev
)
451 OVS_REQUIRES(dev
->mutex
)
453 struct stream
*new_stream
;
454 struct dummy_packet_pconn
*pconn
= &dev
->conn
.pconn
;
458 error
= pstream_accept(pconn
->pstream
, &new_stream
);
460 struct dummy_packet_stream
*s
;
462 pconn
->streams
= xrealloc(pconn
->streams
,
463 ((pconn
->n_streams
+ 1)
465 s
= xmalloc(sizeof *s
);
466 pconn
->streams
[pconn
->n_streams
++] = s
;
467 dummy_packet_stream_init(s
, new_stream
);
468 } else if (error
!= EAGAIN
) {
469 VLOG_WARN("%s: accept failed (%s)",
470 pstream_get_name(pconn
->pstream
), ovs_strerror(error
));
471 pstream_close(pconn
->pstream
);
472 pconn
->pstream
= NULL
;
473 dev
->conn
.type
= NONE
;
476 for (i
= 0; i
< pconn
->n_streams
; ) {
477 struct dummy_packet_stream
*s
= pconn
->streams
[i
];
479 error
= dummy_packet_stream_run(dev
, s
);
481 VLOG_DBG("%s: closing connection (%s)",
482 stream_get_name(s
->stream
),
483 ovs_retval_to_string(error
));
484 dummy_packet_stream_close(s
);
486 pconn
->streams
[i
] = pconn
->streams
[--pconn
->n_streams
];
494 dummy_rconn_run(struct netdev_dummy
*dev
)
495 OVS_REQUIRES(dev
->mutex
)
497 struct dummy_packet_rconn
*rconn
= &dev
->conn
.rconn
;
499 switch (reconnect_run(rconn
->reconnect
, time_msec())) {
500 case RECONNECT_CONNECT
:
504 if (rconn
->rstream
->stream
) {
505 error
= stream_connect(rconn
->rstream
->stream
);
507 error
= stream_open(reconnect_get_name(rconn
->reconnect
),
508 &rconn
->rstream
->stream
, DSCP_DEFAULT
);
513 reconnect_connected(rconn
->reconnect
, time_msec());
517 reconnect_connecting(rconn
->reconnect
, time_msec());
521 reconnect_connect_failed(rconn
->reconnect
, time_msec(), error
);
522 stream_close(rconn
->rstream
->stream
);
523 rconn
->rstream
->stream
= NULL
;
529 case RECONNECT_DISCONNECT
:
530 case RECONNECT_PROBE
:
535 if (reconnect_is_connected(rconn
->reconnect
)) {
538 err
= dummy_packet_stream_run(dev
, rconn
->rstream
);
541 reconnect_disconnected(rconn
->reconnect
, time_msec(), err
);
542 stream_close(rconn
->rstream
->stream
);
543 rconn
->rstream
->stream
= NULL
;
549 dummy_packet_conn_run(struct netdev_dummy
*dev
)
550 OVS_REQUIRES(dev
->mutex
)
552 switch (dev
->conn
.type
) {
554 dummy_pconn_run(dev
);
558 dummy_rconn_run(dev
);
568 dummy_packet_conn_wait(struct dummy_packet_conn
*conn
)
571 switch (conn
->type
) {
573 pstream_wait(conn
->pconn
.pstream
);
574 for (i
= 0; i
< conn
->pconn
.n_streams
; i
++) {
575 struct dummy_packet_stream
*s
= conn
->pconn
.streams
[i
];
576 dummy_packet_stream_wait(s
);
580 if (reconnect_is_connected(conn
->rconn
.reconnect
)) {
581 dummy_packet_stream_wait(conn
->rconn
.rstream
);
592 dummy_packet_conn_send(struct dummy_packet_conn
*conn
,
593 const void *buffer
, size_t size
)
597 switch (conn
->type
) {
599 for (i
= 0; i
< conn
->pconn
.n_streams
; i
++) {
600 struct dummy_packet_stream
*s
= conn
->pconn
.streams
[i
];
602 dummy_packet_stream_send(s
, buffer
, size
);
603 pstream_wait(conn
->pconn
.pstream
);
608 if (reconnect_is_connected(conn
->rconn
.reconnect
)) {
609 dummy_packet_stream_send(conn
->rconn
.rstream
, buffer
, size
);
610 dummy_packet_stream_wait(conn
->rconn
.rstream
);
620 static enum dummy_netdev_conn_state
621 dummy_netdev_get_conn_state(struct dummy_packet_conn
*conn
)
623 enum dummy_netdev_conn_state state
;
625 if (conn
->type
== ACTIVE
) {
626 if (reconnect_is_connected(conn
->rconn
.reconnect
)) {
627 state
= CONN_STATE_CONNECTED
;
629 state
= CONN_STATE_NOT_CONNECTED
;
632 state
= CONN_STATE_UNKNOWN
;
639 netdev_dummy_run(const struct netdev_class
*netdev_class
)
641 struct netdev_dummy
*dev
;
643 ovs_mutex_lock(&dummy_list_mutex
);
644 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
645 if (netdev_get_class(&dev
->up
) != netdev_class
) {
648 ovs_mutex_lock(&dev
->mutex
);
649 dummy_packet_conn_run(dev
);
650 ovs_mutex_unlock(&dev
->mutex
);
652 ovs_mutex_unlock(&dummy_list_mutex
);
656 netdev_dummy_wait(const struct netdev_class
*netdev_class
)
658 struct netdev_dummy
*dev
;
660 ovs_mutex_lock(&dummy_list_mutex
);
661 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
662 if (netdev_get_class(&dev
->up
) != netdev_class
) {
665 ovs_mutex_lock(&dev
->mutex
);
666 dummy_packet_conn_wait(&dev
->conn
);
667 ovs_mutex_unlock(&dev
->mutex
);
669 ovs_mutex_unlock(&dummy_list_mutex
);
672 static struct netdev
*
673 netdev_dummy_alloc(void)
675 struct netdev_dummy
*netdev
= xzalloc(sizeof *netdev
);
680 netdev_dummy_construct(struct netdev
*netdev_
)
682 static atomic_count next_n
= ATOMIC_COUNT_INIT(0xaa550000);
683 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
686 n
= atomic_count_inc(&next_n
);
688 ovs_mutex_init(&netdev
->mutex
);
689 ovs_mutex_lock(&netdev
->mutex
);
690 netdev
->hwaddr
.ea
[0] = 0xaa;
691 netdev
->hwaddr
.ea
[1] = 0x55;
692 netdev
->hwaddr
.ea
[2] = n
>> 24;
693 netdev
->hwaddr
.ea
[3] = n
>> 16;
694 netdev
->hwaddr
.ea
[4] = n
>> 8;
695 netdev
->hwaddr
.ea
[5] = n
;
697 netdev
->flags
= NETDEV_UP
;
698 netdev
->ifindex
= -EOPNOTSUPP
;
699 netdev
->requested_n_rxq
= netdev_
->n_rxq
;
700 netdev
->requested_n_txq
= netdev_
->n_txq
;
703 memset(&netdev
->custom_stats
, 0, sizeof(netdev
->custom_stats
));
705 ovs_strlcpy(netdev
->custom_stats
[0].name
,
706 "rx_custom_packets_1", NETDEV_CUSTOM_STATS_NAME_SIZE
);
707 ovs_strlcpy(netdev
->custom_stats
[1].name
,
708 "rx_custom_packets_2", NETDEV_CUSTOM_STATS_NAME_SIZE
);
710 dummy_packet_conn_init(&netdev
->conn
);
712 ovs_list_init(&netdev
->rxes
);
713 hmap_init(&netdev
->offloaded_flows
);
714 ovs_mutex_unlock(&netdev
->mutex
);
716 ovs_mutex_lock(&dummy_list_mutex
);
717 ovs_list_push_back(&dummy_list
, &netdev
->list_node
);
718 ovs_mutex_unlock(&dummy_list_mutex
);
724 netdev_dummy_destruct(struct netdev
*netdev_
)
726 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
727 struct offloaded_flow
*off_flow
;
729 ovs_mutex_lock(&dummy_list_mutex
);
730 ovs_list_remove(&netdev
->list_node
);
731 ovs_mutex_unlock(&dummy_list_mutex
);
733 ovs_mutex_lock(&netdev
->mutex
);
734 if (netdev
->rxq_pcap
) {
735 ovs_pcap_close(netdev
->rxq_pcap
);
737 if (netdev
->tx_pcap
&& netdev
->tx_pcap
!= netdev
->rxq_pcap
) {
738 ovs_pcap_close(netdev
->tx_pcap
);
740 dummy_packet_conn_close(&netdev
->conn
);
741 netdev
->conn
.type
= NONE
;
743 HMAP_FOR_EACH_POP (off_flow
, node
, &netdev
->offloaded_flows
) {
746 hmap_destroy(&netdev
->offloaded_flows
);
748 ovs_mutex_unlock(&netdev
->mutex
);
749 ovs_mutex_destroy(&netdev
->mutex
);
753 netdev_dummy_dealloc(struct netdev
*netdev_
)
755 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
761 netdev_dummy_get_config(const struct netdev
*dev
, struct smap
*args
)
763 struct netdev_dummy
*netdev
= netdev_dummy_cast(dev
);
765 ovs_mutex_lock(&netdev
->mutex
);
767 if (netdev
->ifindex
>= 0) {
768 smap_add_format(args
, "ifindex", "%d", netdev
->ifindex
);
771 dummy_packet_conn_get_config(&netdev
->conn
, args
);
773 /* 'dummy-pmd' specific config. */
774 if (!netdev_is_pmd(dev
)) {
777 smap_add_format(args
, "requested_rx_queues", "%d", netdev
->requested_n_rxq
);
778 smap_add_format(args
, "configured_rx_queues", "%d", dev
->n_rxq
);
779 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->requested_n_txq
);
780 smap_add_format(args
, "configured_tx_queues", "%d", dev
->n_txq
);
783 ovs_mutex_unlock(&netdev
->mutex
);
788 netdev_dummy_get_addr_list(const struct netdev
*netdev_
, struct in6_addr
**paddr
,
789 struct in6_addr
**pmask
, int *n_addr
)
791 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
792 int cnt
= 0, i
= 0, err
= 0;
793 struct in6_addr
*addr
, *mask
;
795 ovs_mutex_lock(&netdev
->mutex
);
796 if (netdev
->address
.s_addr
!= INADDR_ANY
) {
800 if (ipv6_addr_is_set(&netdev
->ipv6
)) {
807 addr
= xmalloc(sizeof *addr
* cnt
);
808 mask
= xmalloc(sizeof *mask
* cnt
);
809 if (netdev
->address
.s_addr
!= INADDR_ANY
) {
810 in6_addr_set_mapped_ipv4(&addr
[i
], netdev
->address
.s_addr
);
811 in6_addr_set_mapped_ipv4(&mask
[i
], netdev
->netmask
.s_addr
);
815 if (ipv6_addr_is_set(&netdev
->ipv6
)) {
816 memcpy(&addr
[i
], &netdev
->ipv6
, sizeof *addr
);
817 memcpy(&mask
[i
], &netdev
->ipv6_mask
, sizeof *mask
);
829 ovs_mutex_unlock(&netdev
->mutex
);
835 netdev_dummy_set_in4(struct netdev
*netdev_
, struct in_addr address
,
836 struct in_addr netmask
)
838 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
840 ovs_mutex_lock(&netdev
->mutex
);
841 netdev
->address
= address
;
842 netdev
->netmask
= netmask
;
843 netdev_change_seq_changed(netdev_
);
844 ovs_mutex_unlock(&netdev
->mutex
);
850 netdev_dummy_set_in6(struct netdev
*netdev_
, struct in6_addr
*in6
,
851 struct in6_addr
*mask
)
853 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
855 ovs_mutex_lock(&netdev
->mutex
);
857 netdev
->ipv6_mask
= *mask
;
858 netdev_change_seq_changed(netdev_
);
859 ovs_mutex_unlock(&netdev
->mutex
);
864 #define DUMMY_MAX_QUEUES_PER_PORT 1024
867 netdev_dummy_set_config(struct netdev
*netdev_
, const struct smap
*args
,
868 char **errp OVS_UNUSED
)
870 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
872 int new_n_rxq
, new_n_txq
, new_numa_id
;
874 ovs_mutex_lock(&netdev
->mutex
);
875 netdev
->ifindex
= smap_get_int(args
, "ifindex", -EOPNOTSUPP
);
877 dummy_packet_conn_set_config(&netdev
->conn
, args
);
879 if (netdev
->rxq_pcap
) {
880 ovs_pcap_close(netdev
->rxq_pcap
);
882 if (netdev
->tx_pcap
&& netdev
->tx_pcap
!= netdev
->rxq_pcap
) {
883 ovs_pcap_close(netdev
->tx_pcap
);
885 netdev
->rxq_pcap
= netdev
->tx_pcap
= NULL
;
886 pcap
= smap_get(args
, "pcap");
888 netdev
->rxq_pcap
= netdev
->tx_pcap
= ovs_pcap_open(pcap
, "ab");
890 const char *rxq_pcap
= smap_get(args
, "rxq_pcap");
891 const char *tx_pcap
= smap_get(args
, "tx_pcap");
894 netdev
->rxq_pcap
= ovs_pcap_open(rxq_pcap
, "ab");
897 netdev
->tx_pcap
= ovs_pcap_open(tx_pcap
, "ab");
901 netdev_change_seq_changed(netdev_
);
903 /* 'dummy-pmd' specific config. */
904 if (!netdev_
->netdev_class
->is_pmd
) {
908 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", NR_QUEUE
), 1);
909 new_n_txq
= MAX(smap_get_int(args
, "n_txq", NR_QUEUE
), 1);
911 if (new_n_rxq
> DUMMY_MAX_QUEUES_PER_PORT
||
912 new_n_txq
> DUMMY_MAX_QUEUES_PER_PORT
) {
913 VLOG_WARN("The one or both of interface %s queues"
914 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
915 netdev_get_name(netdev_
),
918 DUMMY_MAX_QUEUES_PER_PORT
,
919 DUMMY_MAX_QUEUES_PER_PORT
);
921 new_n_rxq
= MIN(DUMMY_MAX_QUEUES_PER_PORT
, new_n_rxq
);
922 new_n_txq
= MIN(DUMMY_MAX_QUEUES_PER_PORT
, new_n_txq
);
925 new_numa_id
= smap_get_int(args
, "numa_id", 0);
926 if (new_n_rxq
!= netdev
->requested_n_rxq
927 || new_n_txq
!= netdev
->requested_n_txq
928 || new_numa_id
!= netdev
->requested_numa_id
) {
929 netdev
->requested_n_rxq
= new_n_rxq
;
930 netdev
->requested_n_txq
= new_n_txq
;
931 netdev
->requested_numa_id
= new_numa_id
;
932 netdev_request_reconfigure(netdev_
);
936 ovs_mutex_unlock(&netdev
->mutex
);
941 netdev_dummy_get_numa_id(const struct netdev
*netdev_
)
943 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
945 ovs_mutex_lock(&netdev
->mutex
);
946 int numa_id
= netdev
->numa_id
;
947 ovs_mutex_unlock(&netdev
->mutex
);
952 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
954 netdev_dummy_reconfigure(struct netdev
*netdev_
)
956 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
958 ovs_mutex_lock(&netdev
->mutex
);
960 netdev_
->n_txq
= netdev
->requested_n_txq
;
961 netdev_
->n_rxq
= netdev
->requested_n_rxq
;
962 netdev
->numa_id
= netdev
->requested_numa_id
;
964 ovs_mutex_unlock(&netdev
->mutex
);
968 static struct netdev_rxq
*
969 netdev_dummy_rxq_alloc(void)
971 struct netdev_rxq_dummy
*rx
= xzalloc(sizeof *rx
);
976 netdev_dummy_rxq_construct(struct netdev_rxq
*rxq_
)
978 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
979 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
981 ovs_mutex_lock(&netdev
->mutex
);
982 ovs_list_push_back(&netdev
->rxes
, &rx
->node
);
983 ovs_list_init(&rx
->recv_queue
);
984 rx
->recv_queue_len
= 0;
985 rx
->seq
= seq_create();
986 ovs_mutex_unlock(&netdev
->mutex
);
992 netdev_dummy_rxq_destruct(struct netdev_rxq
*rxq_
)
994 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
995 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
997 ovs_mutex_lock(&netdev
->mutex
);
998 ovs_list_remove(&rx
->node
);
999 pkt_list_delete(&rx
->recv_queue
);
1000 ovs_mutex_unlock(&netdev
->mutex
);
1001 seq_destroy(rx
->seq
);
1005 netdev_dummy_rxq_dealloc(struct netdev_rxq
*rxq_
)
1007 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
1013 netdev_dummy_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet_batch
*batch
,
1016 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
1017 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
1018 struct dp_packet
*packet
;
1020 ovs_mutex_lock(&netdev
->mutex
);
1021 if (!ovs_list_is_empty(&rx
->recv_queue
)) {
1022 struct pkt_list_node
*pkt_node
;
1024 ASSIGN_CONTAINER(pkt_node
, ovs_list_pop_front(&rx
->recv_queue
), list_node
);
1025 packet
= pkt_node
->pkt
;
1027 rx
->recv_queue_len
--;
1031 ovs_mutex_unlock(&netdev
->mutex
);
1034 if (netdev_is_pmd(&netdev
->up
)) {
1035 /* If 'netdev' is a PMD device, this is called as part of the PMD
1036 * thread busy loop. We yield here (without quiescing) for two
1039 * - To reduce the CPU utilization during the testsuite
1040 * - To give valgrind a chance to switch thread. According
1041 * to the valgrind documentation, there's a big lock that
1042 * prevents multiple thread from being executed at the same
1043 * time. On my system, without this sleep, the pmd threads
1044 * testcases fail under valgrind, because ovs-vswitchd becomes
1050 ovs_mutex_lock(&netdev
->mutex
);
1051 netdev
->stats
.rx_packets
++;
1052 netdev
->stats
.rx_bytes
+= dp_packet_size(packet
);
1053 netdev
->custom_stats
[0].value
++;
1054 netdev
->custom_stats
[1].value
++;
1055 ovs_mutex_unlock(&netdev
->mutex
);
1057 dp_packet_batch_init_packet(batch
, packet
);
1067 netdev_dummy_rxq_wait(struct netdev_rxq
*rxq_
)
1069 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
1070 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
1071 uint64_t seq
= seq_read(rx
->seq
);
1073 ovs_mutex_lock(&netdev
->mutex
);
1074 if (!ovs_list_is_empty(&rx
->recv_queue
)) {
1075 poll_immediate_wake();
1077 seq_wait(rx
->seq
, seq
);
1079 ovs_mutex_unlock(&netdev
->mutex
);
1083 netdev_dummy_rxq_drain(struct netdev_rxq
*rxq_
)
1085 struct netdev_rxq_dummy
*rx
= netdev_rxq_dummy_cast(rxq_
);
1086 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
1088 ovs_mutex_lock(&netdev
->mutex
);
1089 pkt_list_delete(&rx
->recv_queue
);
1090 rx
->recv_queue_len
= 0;
1091 ovs_mutex_unlock(&netdev
->mutex
);
1093 seq_change(rx
->seq
);
1099 netdev_dummy_send(struct netdev
*netdev
, int qid OVS_UNUSED
,
1100 struct dp_packet_batch
*batch
,
1101 bool concurrent_txq OVS_UNUSED
)
1103 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1106 struct dp_packet
*packet
;
1107 DP_PACKET_BATCH_FOR_EACH(i
, packet
, batch
) {
1108 const void *buffer
= dp_packet_data(packet
);
1109 size_t size
= dp_packet_size(packet
);
1111 if (!dp_packet_is_eth(packet
)) {
1112 error
= EPFNOSUPPORT
;
1116 if (size
< ETH_HEADER_LEN
) {
1120 const struct eth_header
*eth
= buffer
;
1123 ovs_mutex_lock(&dev
->mutex
);
1124 max_size
= dev
->mtu
+ ETH_HEADER_LEN
;
1125 ovs_mutex_unlock(&dev
->mutex
);
1127 if (eth
->eth_type
== htons(ETH_TYPE_VLAN
)) {
1128 max_size
+= VLAN_HEADER_LEN
;
1130 if (size
> max_size
) {
1136 ovs_mutex_lock(&dev
->mutex
);
1137 dev
->stats
.tx_packets
++;
1138 dev
->stats
.tx_bytes
+= size
;
1140 dummy_packet_conn_send(&dev
->conn
, buffer
, size
);
1142 /* Reply to ARP requests for 'dev''s assigned IP address. */
1143 if (dev
->address
.s_addr
) {
1144 struct dp_packet dp
;
1147 dp_packet_use_const(&dp
, buffer
, size
);
1148 flow_extract(&dp
, &flow
);
1149 if (flow
.dl_type
== htons(ETH_TYPE_ARP
)
1150 && flow
.nw_proto
== ARP_OP_REQUEST
1151 && flow
.nw_dst
== dev
->address
.s_addr
) {
1152 struct dp_packet
*reply
= dp_packet_new(0);
1153 compose_arp(reply
, ARP_OP_REPLY
, dev
->hwaddr
, flow
.dl_src
,
1154 false, flow
.nw_dst
, flow
.nw_src
);
1155 netdev_dummy_queue_packet(dev
, reply
, NULL
, 0);
1160 struct dp_packet dp
;
1162 dp_packet_use_const(&dp
, buffer
, size
);
1163 ovs_pcap_write(dev
->tx_pcap
, &dp
);
1166 ovs_mutex_unlock(&dev
->mutex
);
1169 dp_packet_delete_batch(batch
, true);
1175 netdev_dummy_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1177 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1179 ovs_mutex_lock(&dev
->mutex
);
1180 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1182 netdev_change_seq_changed(netdev
);
1184 ovs_mutex_unlock(&dev
->mutex
);
1190 netdev_dummy_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1192 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1194 ovs_mutex_lock(&dev
->mutex
);
1196 ovs_mutex_unlock(&dev
->mutex
);
1202 netdev_dummy_get_mtu(const struct netdev
*netdev
, int *mtup
)
1204 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1206 ovs_mutex_lock(&dev
->mutex
);
1208 ovs_mutex_unlock(&dev
->mutex
);
1213 #define DUMMY_MIN_MTU 68
1214 #define DUMMY_MAX_MTU 65535
1217 netdev_dummy_set_mtu(struct netdev
*netdev
, int mtu
)
1219 if (mtu
< DUMMY_MIN_MTU
|| mtu
> DUMMY_MAX_MTU
) {
1223 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1225 ovs_mutex_lock(&dev
->mutex
);
1226 if (dev
->mtu
!= mtu
) {
1228 netdev_change_seq_changed(netdev
);
1230 ovs_mutex_unlock(&dev
->mutex
);
1236 netdev_dummy_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1238 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1240 ovs_mutex_lock(&dev
->mutex
);
1241 /* Passing only collected counters */
1242 stats
->tx_packets
= dev
->stats
.tx_packets
;
1243 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1244 stats
->rx_packets
= dev
->stats
.rx_packets
;
1245 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1246 ovs_mutex_unlock(&dev
->mutex
);
1252 netdev_dummy_get_custom_stats(const struct netdev
*netdev
,
1253 struct netdev_custom_stats
*custom_stats
)
1257 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1259 custom_stats
->size
= 2;
1260 custom_stats
->counters
=
1261 (struct netdev_custom_counter
*) xcalloc(C_STATS_SIZE
,
1262 sizeof(struct netdev_custom_counter
));
1264 ovs_mutex_lock(&dev
->mutex
);
1265 for (i
= 0 ; i
< C_STATS_SIZE
; i
++) {
1266 custom_stats
->counters
[i
].value
= dev
->custom_stats
[i
].value
;
1267 ovs_strlcpy(custom_stats
->counters
[i
].name
,
1268 dev
->custom_stats
[i
].name
,
1269 NETDEV_CUSTOM_STATS_NAME_SIZE
);
1271 ovs_mutex_unlock(&dev
->mutex
);
1277 netdev_dummy_get_queue(const struct netdev
*netdev OVS_UNUSED
,
1278 unsigned int queue_id
, struct smap
*details OVS_UNUSED
)
1280 if (queue_id
== 0) {
1288 netdev_dummy_init_queue_stats(struct netdev_queue_stats
*stats
)
1290 *stats
= (struct netdev_queue_stats
) {
1291 .tx_bytes
= UINT64_MAX
,
1292 .tx_packets
= UINT64_MAX
,
1293 .tx_errors
= UINT64_MAX
,
1294 .created
= LLONG_MIN
,
1299 netdev_dummy_get_queue_stats(const struct netdev
*netdev OVS_UNUSED
,
1300 unsigned int queue_id
,
1301 struct netdev_queue_stats
*stats
)
1303 if (queue_id
== 0) {
1304 netdev_dummy_init_queue_stats(stats
);
1311 struct netdev_dummy_queue_state
{
1312 unsigned int next_queue
;
1316 netdev_dummy_queue_dump_start(const struct netdev
*netdev OVS_UNUSED
,
1319 struct netdev_dummy_queue_state
*state
= xmalloc(sizeof *state
);
1320 state
->next_queue
= 0;
1326 netdev_dummy_queue_dump_next(const struct netdev
*netdev OVS_UNUSED
,
1328 unsigned int *queue_id
,
1329 struct smap
*details OVS_UNUSED
)
1331 struct netdev_dummy_queue_state
*state
= state_
;
1332 if (state
->next_queue
== 0) {
1334 state
->next_queue
++;
1342 netdev_dummy_queue_dump_done(const struct netdev
*netdev OVS_UNUSED
,
1350 netdev_dummy_dump_queue_stats(const struct netdev
*netdev OVS_UNUSED
,
1351 void (*cb
)(unsigned int queue_id
,
1352 struct netdev_queue_stats
*,
1356 struct netdev_queue_stats stats
;
1357 netdev_dummy_init_queue_stats(&stats
);
1363 netdev_dummy_get_ifindex(const struct netdev
*netdev
)
1365 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1368 ovs_mutex_lock(&dev
->mutex
);
1369 ifindex
= dev
->ifindex
;
1370 ovs_mutex_unlock(&dev
->mutex
);
1376 netdev_dummy_update_flags__(struct netdev_dummy
*netdev
,
1377 enum netdev_flags off
, enum netdev_flags on
,
1378 enum netdev_flags
*old_flagsp
)
1379 OVS_REQUIRES(netdev
->mutex
)
1381 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1385 *old_flagsp
= netdev
->flags
;
1386 netdev
->flags
|= on
;
1387 netdev
->flags
&= ~off
;
1388 if (*old_flagsp
!= netdev
->flags
) {
1389 netdev_change_seq_changed(&netdev
->up
);
1396 netdev_dummy_update_flags(struct netdev
*netdev_
,
1397 enum netdev_flags off
, enum netdev_flags on
,
1398 enum netdev_flags
*old_flagsp
)
1400 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
1403 ovs_mutex_lock(&netdev
->mutex
);
1404 error
= netdev_dummy_update_flags__(netdev
, off
, on
, old_flagsp
);
1405 ovs_mutex_unlock(&netdev
->mutex
);
1410 /* Flow offload API. */
1412 netdev_dummy_flow_hash(const ovs_u128
*ufid
)
1414 return ufid
->u32
[0];
1417 static struct offloaded_flow
*
1418 find_offloaded_flow(const struct hmap
*offloaded_flows
, const ovs_u128
*ufid
)
1420 uint32_t hash
= netdev_dummy_flow_hash(ufid
);
1421 struct offloaded_flow
*data
;
1423 HMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, offloaded_flows
) {
1424 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
1433 netdev_dummy_flow_put(struct netdev
*netdev
, struct match
*match
,
1434 struct nlattr
*actions OVS_UNUSED
,
1435 size_t actions_len OVS_UNUSED
,
1436 const ovs_u128
*ufid
, struct offload_info
*info
,
1437 struct dpif_flow_stats
*stats
)
1439 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1440 struct offloaded_flow
*off_flow
;
1443 ovs_mutex_lock(&dev
->mutex
);
1445 off_flow
= find_offloaded_flow(&dev
->offloaded_flows
, ufid
);
1447 /* Create new offloaded flow. */
1448 off_flow
= xzalloc(sizeof *off_flow
);
1449 memcpy(&off_flow
->ufid
, ufid
, sizeof *ufid
);
1450 hmap_insert(&dev
->offloaded_flows
, &off_flow
->node
,
1451 netdev_dummy_flow_hash(ufid
));
1455 off_flow
->mark
= info
->flow_mark
;
1456 memcpy(&off_flow
->match
, match
, sizeof *match
);
1458 /* As we have per-netdev 'offloaded_flows', we don't need to match
1459 * the 'in_port' for received packets. This will also allow offloading for
1460 * packets passed to 'receive' command without specifying the 'in_port'. */
1461 off_flow
->match
.wc
.masks
.in_port
.odp_port
= 0;
1463 ovs_mutex_unlock(&dev
->mutex
);
1465 if (VLOG_IS_DBG_ENABLED()) {
1466 struct ds ds
= DS_EMPTY_INITIALIZER
;
1468 ds_put_format(&ds
, "%s: flow put[%s]: ", netdev_get_name(netdev
),
1469 modify
? "modify" : "create");
1470 odp_format_ufid(ufid
, &ds
);
1471 ds_put_cstr(&ds
, " flow match: ");
1472 match_format(match
, NULL
, &ds
, OFP_DEFAULT_PRIORITY
);
1473 ds_put_format(&ds
, ", mark: %"PRIu32
, info
->flow_mark
);
1475 VLOG_DBG("%s", ds_cstr(&ds
));
1480 memset(stats
, 0, sizeof *stats
);
1486 netdev_dummy_flow_del(struct netdev
*netdev
, const ovs_u128
*ufid
,
1487 struct dpif_flow_stats
*stats
)
1489 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
1490 struct offloaded_flow
*off_flow
;
1491 const char *error
= NULL
;
1494 ovs_mutex_lock(&dev
->mutex
);
1496 off_flow
= find_offloaded_flow(&dev
->offloaded_flows
, ufid
);
1498 error
= "No such flow.";
1502 mark
= off_flow
->mark
;
1503 hmap_remove(&dev
->offloaded_flows
, &off_flow
->node
);
1507 ovs_mutex_unlock(&dev
->mutex
);
1509 if (error
|| VLOG_IS_DBG_ENABLED()) {
1510 struct ds ds
= DS_EMPTY_INITIALIZER
;
1512 ds_put_format(&ds
, "%s: ", netdev_get_name(netdev
));
1514 ds_put_cstr(&ds
, "failed to ");
1516 ds_put_cstr(&ds
, "flow del: ");
1517 odp_format_ufid(ufid
, &ds
);
1519 ds_put_format(&ds
, " error: %s", error
);
1521 ds_put_format(&ds
, " mark: %"PRIu32
, mark
);
1523 VLOG(error
? VLL_WARN
: VLL_DBG
, "%s", ds_cstr(&ds
));
1528 memset(stats
, 0, sizeof *stats
);
1530 return error
? -1 : 0;
1533 #define NETDEV_DUMMY_CLASS_COMMON \
1534 .run = netdev_dummy_run, \
1535 .wait = netdev_dummy_wait, \
1536 .alloc = netdev_dummy_alloc, \
1537 .construct = netdev_dummy_construct, \
1538 .destruct = netdev_dummy_destruct, \
1539 .dealloc = netdev_dummy_dealloc, \
1540 .get_config = netdev_dummy_get_config, \
1541 .set_config = netdev_dummy_set_config, \
1542 .get_numa_id = netdev_dummy_get_numa_id, \
1543 .send = netdev_dummy_send, \
1544 .set_etheraddr = netdev_dummy_set_etheraddr, \
1545 .get_etheraddr = netdev_dummy_get_etheraddr, \
1546 .get_mtu = netdev_dummy_get_mtu, \
1547 .set_mtu = netdev_dummy_set_mtu, \
1548 .get_ifindex = netdev_dummy_get_ifindex, \
1549 .get_stats = netdev_dummy_get_stats, \
1550 .get_custom_stats = netdev_dummy_get_custom_stats, \
1551 .get_queue = netdev_dummy_get_queue, \
1552 .get_queue_stats = netdev_dummy_get_queue_stats, \
1553 .queue_dump_start = netdev_dummy_queue_dump_start, \
1554 .queue_dump_next = netdev_dummy_queue_dump_next, \
1555 .queue_dump_done = netdev_dummy_queue_dump_done, \
1556 .dump_queue_stats = netdev_dummy_dump_queue_stats, \
1557 .get_addr_list = netdev_dummy_get_addr_list, \
1558 .update_flags = netdev_dummy_update_flags, \
1559 .rxq_alloc = netdev_dummy_rxq_alloc, \
1560 .rxq_construct = netdev_dummy_rxq_construct, \
1561 .rxq_destruct = netdev_dummy_rxq_destruct, \
1562 .rxq_dealloc = netdev_dummy_rxq_dealloc, \
1563 .rxq_recv = netdev_dummy_rxq_recv, \
1564 .rxq_wait = netdev_dummy_rxq_wait, \
1565 .rxq_drain = netdev_dummy_rxq_drain
1567 static const struct netdev_class dummy_class
= {
1568 NETDEV_DUMMY_CLASS_COMMON
,
1572 static const struct netdev_class dummy_internal_class
= {
1573 NETDEV_DUMMY_CLASS_COMMON
,
1574 .type
= "dummy-internal"
1577 static const struct netdev_class dummy_pmd_class
= {
1578 NETDEV_DUMMY_CLASS_COMMON
,
1579 .type
= "dummy-pmd",
1581 .reconfigure
= netdev_dummy_reconfigure
1585 netdev_dummy_offloads_init_flow_api(struct netdev
*netdev
)
1587 return is_dummy_class(netdev
->netdev_class
) ? 0 : EOPNOTSUPP
;
1590 static const struct netdev_flow_api netdev_offload_dummy
= {
1592 .flow_put
= netdev_dummy_flow_put
,
1593 .flow_del
= netdev_dummy_flow_del
,
1594 .init_flow_api
= netdev_dummy_offloads_init_flow_api
,
1598 /* Helper functions. */
1601 pkt_list_delete(struct ovs_list
*l
)
1603 struct pkt_list_node
*pkt
;
1605 LIST_FOR_EACH_POP(pkt
, list_node
, l
) {
1606 dp_packet_delete(pkt
->pkt
);
1611 static struct dp_packet
*
1612 eth_from_packet(const char *s
)
1614 struct dp_packet
*packet
;
1615 eth_from_hex(s
, &packet
);
1619 static struct dp_packet
*
1620 eth_from_flow_str(const char *s
, size_t packet_size
,
1621 struct flow
*flow
, char **errorp
)
1625 enum odp_key_fitness fitness
;
1626 struct dp_packet
*packet
;
1627 struct ofpbuf odp_key
;
1630 /* Convert string to datapath key.
1632 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1633 * the code for that currently calls exit() on parse error. We have to
1634 * settle for parsing a datapath key for now.
1636 ofpbuf_init(&odp_key
, 0);
1637 error
= odp_flow_from_string(s
, NULL
, &odp_key
, NULL
, errorp
);
1639 ofpbuf_uninit(&odp_key
);
1643 /* Convert odp_key to flow. */
1644 fitness
= odp_flow_key_to_flow(odp_key
.data
, odp_key
.size
, flow
, errorp
);
1645 if (fitness
== ODP_FIT_ERROR
) {
1646 ofpbuf_uninit(&odp_key
);
1650 packet
= dp_packet_new(0);
1652 flow_compose(packet
, flow
, NULL
, 0);
1653 if (dp_packet_size(packet
) < packet_size
) {
1654 packet_expand(packet
, flow
, packet_size
);
1655 } else if (dp_packet_size(packet
) > packet_size
){
1656 dp_packet_delete(packet
);
1660 flow_compose(packet
, flow
, NULL
, 64);
1663 ofpbuf_uninit(&odp_key
);
1668 netdev_dummy_queue_packet__(struct netdev_rxq_dummy
*rx
, struct dp_packet
*packet
)
1670 struct pkt_list_node
*pkt_node
= xmalloc(sizeof *pkt_node
);
1672 pkt_node
->pkt
= packet
;
1673 ovs_list_push_back(&rx
->recv_queue
, &pkt_node
->list_node
);
1674 rx
->recv_queue_len
++;
1675 seq_change(rx
->seq
);
1679 netdev_dummy_queue_packet(struct netdev_dummy
*dummy
, struct dp_packet
*packet
,
1680 struct flow
*flow
, int queue_id
)
1681 OVS_REQUIRES(dummy
->mutex
)
1683 struct netdev_rxq_dummy
*rx
, *prev
;
1684 struct offloaded_flow
*data
;
1685 struct flow packet_flow
;
1687 if (dummy
->rxq_pcap
) {
1688 ovs_pcap_write(dummy
->rxq_pcap
, packet
);
1692 flow
= &packet_flow
;
1693 flow_extract(packet
, flow
);
1695 HMAP_FOR_EACH (data
, node
, &dummy
->offloaded_flows
) {
1696 if (flow_equal_except(flow
, &data
->match
.flow
, &data
->match
.wc
)) {
1698 dp_packet_set_flow_mark(packet
, data
->mark
);
1700 if (VLOG_IS_DBG_ENABLED()) {
1701 struct ds ds
= DS_EMPTY_INITIALIZER
;
1703 ds_put_format(&ds
, "%s: packet: ",
1704 netdev_get_name(&dummy
->up
));
1705 /* 'flow' does not contain proper port number here.
1706 * Let's just clear it as it wildcarded anyway. */
1707 flow
->in_port
.ofp_port
= 0;
1708 flow_format(&ds
, flow
, NULL
);
1710 ds_put_cstr(&ds
, " matches with flow: ");
1711 odp_format_ufid(&data
->ufid
, &ds
);
1712 ds_put_cstr(&ds
, " ");
1713 match_format(&data
->match
, NULL
, &ds
, OFP_DEFAULT_PRIORITY
);
1714 ds_put_format(&ds
, " with mark: %"PRIu32
, data
->mark
);
1716 VLOG_DBG("%s", ds_cstr(&ds
));
1724 LIST_FOR_EACH (rx
, node
, &dummy
->rxes
) {
1725 if (rx
->up
.queue_id
== queue_id
&&
1726 rx
->recv_queue_len
< NETDEV_DUMMY_MAX_QUEUE
) {
1728 netdev_dummy_queue_packet__(prev
, dp_packet_clone(packet
));
1734 netdev_dummy_queue_packet__(prev
, packet
);
1736 dp_packet_delete(packet
);
1741 netdev_dummy_receive(struct unixctl_conn
*conn
,
1742 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
1744 struct netdev_dummy
*dummy_dev
;
1745 struct netdev
*netdev
;
1746 int i
, k
= 1, rx_qid
= 0;
1748 netdev
= netdev_from_name(argv
[k
++]);
1749 if (!netdev
|| !is_dummy_class(netdev
->netdev_class
)) {
1750 unixctl_command_reply_error(conn
, "no such dummy netdev");
1753 dummy_dev
= netdev_dummy_cast(netdev
);
1755 ovs_mutex_lock(&dummy_dev
->mutex
);
1757 if (argc
> k
+ 1 && !strcmp(argv
[k
], "--qid")) {
1758 rx_qid
= strtol(argv
[k
+ 1], NULL
, 10);
1759 if (rx_qid
< 0 || rx_qid
>= netdev
->n_rxq
) {
1760 unixctl_command_reply_error(conn
, "bad rx queue id.");
1766 for (i
= k
; i
< argc
; i
++) {
1767 struct dp_packet
*packet
;
1770 /* Try to parse 'argv[i]' as packet in hex. */
1771 packet
= eth_from_packet(argv
[i
]);
1774 int packet_size
= 0;
1775 const char *flow_str
= argv
[i
];
1777 /* Parse optional --len argument immediately follows a 'flow'. */
1778 if (argc
>= i
+ 2 && !strcmp(argv
[i
+ 1], "--len")) {
1779 packet_size
= strtol(argv
[i
+ 2], NULL
, 10);
1781 if (packet_size
< ETH_TOTAL_MIN
) {
1782 unixctl_command_reply_error(conn
, "too small packet len");
1787 /* Try parse 'argv[i]' as odp flow. */
1789 packet
= eth_from_flow_str(flow_str
, packet_size
, &flow
, &error_s
);
1791 unixctl_command_reply_error(conn
, error_s
);
1796 flow_extract(packet
, &flow
);
1799 netdev_dummy_queue_packet(dummy_dev
, packet
, &flow
, rx_qid
);
1802 unixctl_command_reply(conn
, NULL
);
1805 ovs_mutex_unlock(&dummy_dev
->mutex
);
1807 netdev_close(netdev
);
1811 netdev_dummy_set_admin_state__(struct netdev_dummy
*dev
, bool admin_state
)
1812 OVS_REQUIRES(dev
->mutex
)
1814 enum netdev_flags old_flags
;
1817 netdev_dummy_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1819 netdev_dummy_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1824 netdev_dummy_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1825 const char *argv
[], void *aux OVS_UNUSED
)
1829 if (!strcasecmp(argv
[argc
- 1], "up")) {
1831 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1834 unixctl_command_reply_error(conn
, "Invalid Admin State");
1839 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1840 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1841 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
1843 ovs_mutex_lock(&dummy_dev
->mutex
);
1844 netdev_dummy_set_admin_state__(dummy_dev
, up
);
1845 ovs_mutex_unlock(&dummy_dev
->mutex
);
1847 netdev_close(netdev
);
1849 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1850 netdev_close(netdev
);
1854 struct netdev_dummy
*netdev
;
1856 ovs_mutex_lock(&dummy_list_mutex
);
1857 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
1858 ovs_mutex_lock(&netdev
->mutex
);
1859 netdev_dummy_set_admin_state__(netdev
, up
);
1860 ovs_mutex_unlock(&netdev
->mutex
);
1862 ovs_mutex_unlock(&dummy_list_mutex
);
1864 unixctl_command_reply(conn
, "OK");
1868 display_conn_state__(struct ds
*s
, const char *name
,
1869 enum dummy_netdev_conn_state state
)
1871 ds_put_format(s
, "%s: ", name
);
1874 case CONN_STATE_CONNECTED
:
1875 ds_put_cstr(s
, "connected\n");
1878 case CONN_STATE_NOT_CONNECTED
:
1879 ds_put_cstr(s
, "disconnected\n");
1882 case CONN_STATE_UNKNOWN
:
1884 ds_put_cstr(s
, "unknown\n");
1890 netdev_dummy_conn_state(struct unixctl_conn
*conn
, int argc
,
1891 const char *argv
[], void *aux OVS_UNUSED
)
1893 enum dummy_netdev_conn_state state
= CONN_STATE_UNKNOWN
;
1899 const char *dev_name
= argv
[1];
1900 struct netdev
*netdev
= netdev_from_name(dev_name
);
1902 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1903 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
1905 ovs_mutex_lock(&dummy_dev
->mutex
);
1906 state
= dummy_netdev_get_conn_state(&dummy_dev
->conn
);
1907 ovs_mutex_unlock(&dummy_dev
->mutex
);
1909 netdev_close(netdev
);
1911 display_conn_state__(&s
, dev_name
, state
);
1913 struct netdev_dummy
*netdev
;
1915 ovs_mutex_lock(&dummy_list_mutex
);
1916 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
1917 ovs_mutex_lock(&netdev
->mutex
);
1918 state
= dummy_netdev_get_conn_state(&netdev
->conn
);
1919 ovs_mutex_unlock(&netdev
->mutex
);
1920 if (state
!= CONN_STATE_UNKNOWN
) {
1921 display_conn_state__(&s
, netdev
->up
.name
, state
);
1924 ovs_mutex_unlock(&dummy_list_mutex
);
1927 unixctl_command_reply(conn
, ds_cstr(&s
));
1932 netdev_dummy_ip4addr(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
1933 const char *argv
[], void *aux OVS_UNUSED
)
1935 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1937 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1938 struct in_addr ip
, mask
;
1941 error
= ip_parse_masked(argv
[2], &ip
.s_addr
, &mask
.s_addr
);
1943 netdev_dummy_set_in4(netdev
, ip
, mask
);
1944 unixctl_command_reply(conn
, "OK");
1946 unixctl_command_reply_error(conn
, error
);
1950 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1953 netdev_close(netdev
);
1957 netdev_dummy_ip6addr(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
1958 const char *argv
[], void *aux OVS_UNUSED
)
1960 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1962 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
1963 struct in6_addr ip6
;
1967 error
= ipv6_parse_cidr(argv
[2], &ip6
, &plen
);
1969 struct in6_addr mask
;
1971 mask
= ipv6_create_mask(plen
);
1972 netdev_dummy_set_in6(netdev
, &ip6
, &mask
);
1973 unixctl_command_reply(conn
, "OK");
1975 unixctl_command_reply_error(conn
, error
);
1979 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
1982 netdev_close(netdev
);
1987 netdev_dummy_override(const char *type
)
1989 if (!netdev_unregister_provider(type
)) {
1990 struct netdev_class
*class;
1993 class = xmemdup(&dummy_class
, sizeof dummy_class
);
1994 class->type
= xstrdup(type
);
1995 error
= netdev_register_provider(class);
1997 VLOG_ERR("%s: failed to register netdev provider (%s)",
1998 type
, ovs_strerror(error
));
1999 free(CONST_CAST(char *, class->type
));
2006 netdev_dummy_register(enum dummy_level level
)
2008 unixctl_command_register("netdev-dummy/receive",
2009 "name [--qid queue_id] packet|flow [--len packet_len]",
2010 2, INT_MAX
, netdev_dummy_receive
, NULL
);
2011 unixctl_command_register("netdev-dummy/set-admin-state",
2012 "[netdev] up|down", 1, 2,
2013 netdev_dummy_set_admin_state
, NULL
);
2014 unixctl_command_register("netdev-dummy/conn-state",
2016 netdev_dummy_conn_state
, NULL
);
2017 unixctl_command_register("netdev-dummy/ip4addr",
2018 "[netdev] ipaddr/mask-prefix-len", 2, 2,
2019 netdev_dummy_ip4addr
, NULL
);
2020 unixctl_command_register("netdev-dummy/ip6addr",
2021 "[netdev] ip6addr", 2, 2,
2022 netdev_dummy_ip6addr
, NULL
);
2024 if (level
== DUMMY_OVERRIDE_ALL
) {
2029 netdev_enumerate_types(&types
);
2030 SSET_FOR_EACH (type
, &types
) {
2031 if (strcmp(type
, "patch")) {
2032 netdev_dummy_override(type
);
2035 sset_destroy(&types
);
2036 } else if (level
== DUMMY_OVERRIDE_SYSTEM
) {
2037 netdev_dummy_override("system");
2039 netdev_register_provider(&dummy_class
);
2040 netdev_register_provider(&dummy_internal_class
);
2041 netdev_register_provider(&dummy_pmd_class
);
2043 netdev_register_flow_api_provider(&netdev_offload_dummy
);
2045 netdev_vport_tunnel_register();