2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 #include "netdev-provider.h"
26 #include "netdev-vport.h"
28 #include "ofp-print.h"
31 #include "poll-loop.h"
35 #include "unaligned.h"
39 VLOG_DEFINE_THIS_MODULE(netdev_dummy
);
42 struct stream
*stream
;
47 /* Protects 'dummy_list'. */
48 static struct ovs_mutex dummy_list_mutex
= OVS_MUTEX_INITIALIZER
;
50 /* Contains all 'struct dummy_dev's. */
51 static struct list dummy_list
OVS_GUARDED_BY(dummy_list_mutex
)
52 = LIST_INITIALIZER(&dummy_list
);
58 struct list list_node
OVS_GUARDED_BY(dummy_list_mutex
);
60 /* Protects all members below. */
61 struct ovs_mutex mutex
OVS_ACQ_AFTER(dummy_list_mutex
);
63 uint8_t hwaddr
[ETH_ADDR_LEN
] OVS_GUARDED
;
65 struct netdev_stats stats OVS_GUARDED
;
66 enum netdev_flags flags OVS_GUARDED
;
67 unsigned int change_seq OVS_GUARDED
;
68 int ifindex OVS_GUARDED
;
70 struct pstream
*pstream OVS_GUARDED
;
71 struct dummy_stream
*streams OVS_GUARDED
;
72 size_t n_streams OVS_GUARDED
;
74 struct list rxes OVS_GUARDED
; /* List of child "netdev_rx_dummy"s. */
77 /* Max 'recv_queue_len' in struct netdev_dummy. */
78 #define NETDEV_DUMMY_MAX_QUEUE 100
80 struct netdev_rx_dummy
{
82 struct list node
; /* In netdev_dummy's "rxes" list. */
83 struct list recv_queue
;
84 int recv_queue_len
; /* list_size(&recv_queue). */
88 static unixctl_cb_func netdev_dummy_set_admin_state
;
89 static int netdev_dummy_construct(struct netdev
*);
90 static void netdev_dummy_poll_notify(struct netdev_dummy
*netdev
)
91 OVS_REQUIRES(netdev
->mutex
);
92 static void netdev_dummy_queue_packet(struct netdev_dummy
*, struct ofpbuf
*);
94 static void dummy_stream_close(struct dummy_stream
*);
97 is_dummy_class(const struct netdev_class
*class)
99 return class->construct
== netdev_dummy_construct
;
102 static struct netdev_dummy
*
103 netdev_dummy_cast(const struct netdev
*netdev
)
105 ovs_assert(is_dummy_class(netdev_get_class(netdev
)));
106 return CONTAINER_OF(netdev
, struct netdev_dummy
, up
);
109 static struct netdev_rx_dummy
*
110 netdev_rx_dummy_cast(const struct netdev_rx
*rx
)
112 ovs_assert(is_dummy_class(netdev_get_class(rx
->netdev
)));
113 return CONTAINER_OF(rx
, struct netdev_rx_dummy
, up
);
117 netdev_dummy_run(void)
119 struct netdev_dummy
*dev
;
121 ovs_mutex_lock(&dummy_list_mutex
);
122 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
125 ovs_mutex_lock(&dev
->mutex
);
128 struct stream
*new_stream
;
131 error
= pstream_accept(dev
->pstream
, &new_stream
);
133 struct dummy_stream
*s
;
135 dev
->streams
= xrealloc(dev
->streams
,
136 ((dev
->n_streams
+ 1)
137 * sizeof *dev
->streams
));
138 s
= &dev
->streams
[dev
->n_streams
++];
139 s
->stream
= new_stream
;
140 ofpbuf_init(&s
->rxbuf
, 2048);
142 } else if (error
!= EAGAIN
) {
143 VLOG_WARN("%s: accept failed (%s)",
144 pstream_get_name(dev
->pstream
), ovs_strerror(error
));
145 pstream_close(dev
->pstream
);
150 for (i
= 0; i
< dev
->n_streams
; i
++) {
151 struct dummy_stream
*s
= &dev
->streams
[i
];
155 stream_run(s
->stream
);
157 if (!list_is_empty(&s
->txq
)) {
158 struct ofpbuf
*txbuf
;
161 txbuf
= ofpbuf_from_list(list_front(&s
->txq
));
162 retval
= stream_send(s
->stream
, txbuf
->data
, txbuf
->size
);
164 ofpbuf_pull(txbuf
, retval
);
166 list_remove(&txbuf
->list_node
);
167 ofpbuf_delete(txbuf
);
169 } else if (retval
!= -EAGAIN
) {
175 if (s
->rxbuf
.size
< 2) {
176 n
= 2 - s
->rxbuf
.size
;
180 frame_len
= ntohs(get_unaligned_be16(s
->rxbuf
.data
));
181 if (frame_len
< ETH_HEADER_LEN
) {
185 n
= (2 + frame_len
) - s
->rxbuf
.size
;
192 ofpbuf_prealloc_tailroom(&s
->rxbuf
, n
);
193 retval
= stream_recv(s
->stream
, ofpbuf_tail(&s
->rxbuf
), n
);
195 s
->rxbuf
.size
+= retval
;
196 if (retval
== n
&& s
->rxbuf
.size
> 2) {
197 ofpbuf_pull(&s
->rxbuf
, 2);
198 netdev_dummy_queue_packet(dev
,
199 ofpbuf_clone(&s
->rxbuf
));
200 ofpbuf_clear(&s
->rxbuf
);
202 } else if (retval
!= -EAGAIN
) {
203 error
= (retval
< 0 ? -retval
204 : s
->rxbuf
.size
? EPROTO
210 VLOG_DBG("%s: closing connection (%s)",
211 stream_get_name(s
->stream
),
212 ovs_retval_to_string(error
));
213 dummy_stream_close(&dev
->streams
[i
]);
214 dev
->streams
[i
] = dev
->streams
[--dev
->n_streams
];
218 ovs_mutex_unlock(&dev
->mutex
);
220 ovs_mutex_unlock(&dummy_list_mutex
);
224 dummy_stream_close(struct dummy_stream
*s
)
226 stream_close(s
->stream
);
227 ofpbuf_uninit(&s
->rxbuf
);
228 ofpbuf_list_delete(&s
->txq
);
232 netdev_dummy_wait(void)
234 struct netdev_dummy
*dev
;
236 ovs_mutex_lock(&dummy_list_mutex
);
237 LIST_FOR_EACH (dev
, list_node
, &dummy_list
) {
240 ovs_mutex_lock(&dev
->mutex
);
242 pstream_wait(dev
->pstream
);
244 for (i
= 0; i
< dev
->n_streams
; i
++) {
245 struct dummy_stream
*s
= &dev
->streams
[i
];
247 stream_run_wait(s
->stream
);
248 if (!list_is_empty(&s
->txq
)) {
249 stream_send_wait(s
->stream
);
251 stream_recv_wait(s
->stream
);
253 ovs_mutex_unlock(&dev
->mutex
);
255 ovs_mutex_unlock(&dummy_list_mutex
);
258 static struct netdev
*
259 netdev_dummy_alloc(void)
261 struct netdev_dummy
*netdev
= xzalloc(sizeof *netdev
);
266 netdev_dummy_construct(struct netdev
*netdev_
)
268 static atomic_uint next_n
= ATOMIC_VAR_INIT(0xaa550000);
269 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
272 atomic_add(&next_n
, 1, &n
);
274 ovs_mutex_init(&netdev
->mutex
);
275 ovs_mutex_lock(&netdev
->mutex
);
276 netdev
->hwaddr
[0] = 0xaa;
277 netdev
->hwaddr
[1] = 0x55;
278 netdev
->hwaddr
[2] = n
>> 24;
279 netdev
->hwaddr
[3] = n
>> 16;
280 netdev
->hwaddr
[4] = n
>> 8;
281 netdev
->hwaddr
[5] = n
;
284 netdev
->change_seq
= 1;
285 netdev
->ifindex
= -EOPNOTSUPP
;
287 netdev
->pstream
= NULL
;
288 netdev
->streams
= NULL
;
289 netdev
->n_streams
= 0;
291 list_init(&netdev
->rxes
);
292 ovs_mutex_unlock(&netdev
->mutex
);
294 ovs_mutex_lock(&dummy_list_mutex
);
295 list_push_back(&dummy_list
, &netdev
->list_node
);
296 ovs_mutex_unlock(&dummy_list_mutex
);
302 netdev_dummy_destruct(struct netdev
*netdev_
)
304 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
307 ovs_mutex_lock(&dummy_list_mutex
);
308 list_remove(&netdev
->list_node
);
309 ovs_mutex_unlock(&dummy_list_mutex
);
311 ovs_mutex_lock(&netdev
->mutex
);
312 pstream_close(netdev
->pstream
);
313 for (i
= 0; i
< netdev
->n_streams
; i
++) {
314 dummy_stream_close(&netdev
->streams
[i
]);
316 free(netdev
->streams
);
317 ovs_mutex_unlock(&netdev
->mutex
);
318 ovs_mutex_destroy(&netdev
->mutex
);
322 netdev_dummy_dealloc(struct netdev
*netdev_
)
324 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
330 netdev_dummy_get_config(const struct netdev
*netdev_
, struct smap
*args
)
332 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
334 ovs_mutex_lock(&netdev
->mutex
);
335 if (netdev
->ifindex
>= 0) {
336 smap_add_format(args
, "ifindex", "%d", netdev
->ifindex
);
338 if (netdev
->pstream
) {
339 smap_add(args
, "pstream", pstream_get_name(netdev
->pstream
));
341 ovs_mutex_unlock(&netdev
->mutex
);
347 netdev_dummy_set_config(struct netdev
*netdev_
, const struct smap
*args
)
349 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
352 ovs_mutex_lock(&netdev
->mutex
);
353 netdev
->ifindex
= smap_get_int(args
, "ifindex", -EOPNOTSUPP
);
355 pstream
= smap_get(args
, "pstream");
358 || strcmp(pstream_get_name(netdev
->pstream
), pstream
)) {
359 pstream_close(netdev
->pstream
);
360 netdev
->pstream
= NULL
;
365 error
= pstream_open(pstream
, &netdev
->pstream
, DSCP_DEFAULT
);
367 VLOG_WARN("%s: open failed (%s)",
368 pstream
, ovs_strerror(error
));
372 ovs_mutex_unlock(&netdev
->mutex
);
377 static struct netdev_rx
*
378 netdev_dummy_rx_alloc(void)
380 struct netdev_rx_dummy
*rx
= xzalloc(sizeof *rx
);
385 netdev_dummy_rx_construct(struct netdev_rx
*rx_
)
387 struct netdev_rx_dummy
*rx
= netdev_rx_dummy_cast(rx_
);
388 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
390 ovs_mutex_lock(&netdev
->mutex
);
391 list_push_back(&netdev
->rxes
, &rx
->node
);
392 list_init(&rx
->recv_queue
);
393 rx
->recv_queue_len
= 0;
394 ovs_mutex_unlock(&netdev
->mutex
);
400 netdev_dummy_rx_destruct(struct netdev_rx
*rx_
)
402 struct netdev_rx_dummy
*rx
= netdev_rx_dummy_cast(rx_
);
403 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
405 ovs_mutex_lock(&netdev
->mutex
);
406 list_remove(&rx
->node
);
407 ofpbuf_list_delete(&rx
->recv_queue
);
408 ovs_mutex_unlock(&netdev
->mutex
);
412 netdev_dummy_rx_dealloc(struct netdev_rx
*rx_
)
414 struct netdev_rx_dummy
*rx
= netdev_rx_dummy_cast(rx_
);
420 netdev_dummy_rx_recv(struct netdev_rx
*rx_
, void *buffer
, size_t size
)
422 struct netdev_rx_dummy
*rx
= netdev_rx_dummy_cast(rx_
);
423 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
424 struct ofpbuf
*packet
;
427 ovs_mutex_lock(&netdev
->mutex
);
428 if (!list_is_empty(&rx
->recv_queue
)) {
429 packet
= ofpbuf_from_list(list_pop_front(&rx
->recv_queue
));
430 rx
->recv_queue_len
--;
434 ovs_mutex_unlock(&netdev
->mutex
);
440 if (packet
->size
<= size
) {
441 memcpy(buffer
, packet
->data
, packet
->size
);
442 retval
= packet
->size
;
446 ofpbuf_delete(packet
);
452 netdev_dummy_rx_wait(struct netdev_rx
*rx_
)
454 struct netdev_rx_dummy
*rx
= netdev_rx_dummy_cast(rx_
);
455 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
457 ovs_mutex_lock(&netdev
->mutex
);
458 if (!list_is_empty(&rx
->recv_queue
)) {
459 poll_immediate_wake();
461 ovs_mutex_unlock(&netdev
->mutex
);
465 netdev_dummy_rx_drain(struct netdev_rx
*rx_
)
467 struct netdev_rx_dummy
*rx
= netdev_rx_dummy_cast(rx_
);
468 struct netdev_dummy
*netdev
= netdev_dummy_cast(rx
->up
.netdev
);
470 ovs_mutex_lock(&netdev
->mutex
);
471 ofpbuf_list_delete(&rx
->recv_queue
);
472 rx
->recv_queue_len
= 0;
473 ovs_mutex_unlock(&netdev
->mutex
);
479 netdev_dummy_send(struct netdev
*netdev
, const void *buffer
, size_t size
)
481 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
484 if (size
< ETH_HEADER_LEN
) {
487 const struct eth_header
*eth
= buffer
;
490 ovs_mutex_lock(&dev
->mutex
);
491 max_size
= dev
->mtu
+ ETH_HEADER_LEN
;
492 ovs_mutex_unlock(&dev
->mutex
);
494 if (eth
->eth_type
== htons(ETH_TYPE_VLAN
)) {
495 max_size
+= VLAN_HEADER_LEN
;
497 if (size
> max_size
) {
502 ovs_mutex_lock(&dev
->mutex
);
503 dev
->stats
.tx_packets
++;
504 dev
->stats
.tx_bytes
+= size
;
506 for (i
= 0; i
< dev
->n_streams
; i
++) {
507 struct dummy_stream
*s
= &dev
->streams
[i
];
509 if (list_size(&s
->txq
) < NETDEV_DUMMY_MAX_QUEUE
) {
512 b
= ofpbuf_clone_data_with_headroom(buffer
, size
, 2);
513 put_unaligned_be16(ofpbuf_push_uninit(b
, 2), htons(size
));
514 list_push_back(&s
->txq
, &b
->list_node
);
517 ovs_mutex_unlock(&dev
->mutex
);
523 netdev_dummy_set_etheraddr(struct netdev
*netdev
,
524 const uint8_t mac
[ETH_ADDR_LEN
])
526 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
528 ovs_mutex_lock(&dev
->mutex
);
529 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
530 memcpy(dev
->hwaddr
, mac
, ETH_ADDR_LEN
);
531 netdev_dummy_poll_notify(dev
);
533 ovs_mutex_unlock(&dev
->mutex
);
539 netdev_dummy_get_etheraddr(const struct netdev
*netdev
,
540 uint8_t mac
[ETH_ADDR_LEN
])
542 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
544 ovs_mutex_lock(&dev
->mutex
);
545 memcpy(mac
, dev
->hwaddr
, ETH_ADDR_LEN
);
546 ovs_mutex_unlock(&dev
->mutex
);
552 netdev_dummy_get_mtu(const struct netdev
*netdev
, int *mtup
)
554 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
556 ovs_mutex_lock(&dev
->mutex
);
558 ovs_mutex_unlock(&dev
->mutex
);
564 netdev_dummy_set_mtu(const struct netdev
*netdev
, int mtu
)
566 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
568 ovs_mutex_lock(&dev
->mutex
);
570 ovs_mutex_unlock(&dev
->mutex
);
576 netdev_dummy_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
578 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
580 ovs_mutex_lock(&dev
->mutex
);
582 ovs_mutex_unlock(&dev
->mutex
);
588 netdev_dummy_set_stats(struct netdev
*netdev
, const struct netdev_stats
*stats
)
590 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
592 ovs_mutex_lock(&dev
->mutex
);
594 ovs_mutex_unlock(&dev
->mutex
);
600 netdev_dummy_get_ifindex(const struct netdev
*netdev
)
602 struct netdev_dummy
*dev
= netdev_dummy_cast(netdev
);
605 ovs_mutex_lock(&dev
->mutex
);
606 ifindex
= dev
->ifindex
;
607 ovs_mutex_unlock(&dev
->mutex
);
613 netdev_dummy_update_flags__(struct netdev_dummy
*netdev
,
614 enum netdev_flags off
, enum netdev_flags on
,
615 enum netdev_flags
*old_flagsp
)
616 OVS_REQUIRES(netdev
->mutex
)
618 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
622 *old_flagsp
= netdev
->flags
;
624 netdev
->flags
&= ~off
;
625 if (*old_flagsp
!= netdev
->flags
) {
626 netdev_dummy_poll_notify(netdev
);
633 netdev_dummy_update_flags(struct netdev
*netdev_
,
634 enum netdev_flags off
, enum netdev_flags on
,
635 enum netdev_flags
*old_flagsp
)
637 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
640 ovs_mutex_lock(&netdev
->mutex
);
641 error
= netdev_dummy_update_flags__(netdev
, off
, on
, old_flagsp
);
642 ovs_mutex_unlock(&netdev
->mutex
);
648 netdev_dummy_change_seq(const struct netdev
*netdev_
)
650 struct netdev_dummy
*netdev
= netdev_dummy_cast(netdev_
);
651 unsigned int change_seq
;
653 ovs_mutex_lock(&netdev
->mutex
);
654 change_seq
= netdev
->change_seq
;
655 ovs_mutex_unlock(&netdev
->mutex
);
660 /* Helper functions. */
663 netdev_dummy_poll_notify(struct netdev_dummy
*dev
)
666 if (!dev
->change_seq
) {
671 static const struct netdev_class dummy_class
= {
678 netdev_dummy_construct
,
679 netdev_dummy_destruct
,
680 netdev_dummy_dealloc
,
681 netdev_dummy_get_config
,
682 netdev_dummy_set_config
,
683 NULL
, /* get_tunnel_config */
685 netdev_dummy_send
, /* send */
686 NULL
, /* send_wait */
688 netdev_dummy_set_etheraddr
,
689 netdev_dummy_get_etheraddr
,
690 netdev_dummy_get_mtu
,
691 netdev_dummy_set_mtu
,
692 netdev_dummy_get_ifindex
,
693 NULL
, /* get_carrier */
694 NULL
, /* get_carrier_resets */
695 NULL
, /* get_miimon */
696 netdev_dummy_get_stats
,
697 netdev_dummy_set_stats
,
699 NULL
, /* get_features */
700 NULL
, /* set_advertisements */
702 NULL
, /* set_policing */
703 NULL
, /* get_qos_types */
704 NULL
, /* get_qos_capabilities */
707 NULL
, /* get_queue */
708 NULL
, /* set_queue */
709 NULL
, /* delete_queue */
710 NULL
, /* get_queue_stats */
711 NULL
, /* queue_dump_start */
712 NULL
, /* queue_dump_next */
713 NULL
, /* queue_dump_done */
714 NULL
, /* dump_queue_stats */
719 NULL
, /* add_router */
720 NULL
, /* get_next_hop */
721 NULL
, /* get_status */
722 NULL
, /* arp_lookup */
724 netdev_dummy_update_flags
,
726 netdev_dummy_change_seq
,
728 netdev_dummy_rx_alloc
,
729 netdev_dummy_rx_construct
,
730 netdev_dummy_rx_destruct
,
731 netdev_dummy_rx_dealloc
,
732 netdev_dummy_rx_recv
,
733 netdev_dummy_rx_wait
,
734 netdev_dummy_rx_drain
,
737 static struct ofpbuf
*
738 eth_from_packet_or_flow(const char *s
)
740 enum odp_key_fitness fitness
;
741 struct ofpbuf
*packet
;
742 struct ofpbuf odp_key
;
746 if (!eth_from_hex(s
, &packet
)) {
750 /* Convert string to datapath key.
752 * It would actually be nicer to parse an OpenFlow-like flow key here, but
753 * the code for that currently calls exit() on parse error. We have to
754 * settle for parsing a datapath key for now.
756 ofpbuf_init(&odp_key
, 0);
757 error
= odp_flow_from_string(s
, NULL
, &odp_key
, NULL
);
759 ofpbuf_uninit(&odp_key
);
763 /* Convert odp_key to flow. */
764 fitness
= odp_flow_key_to_flow(odp_key
.data
, odp_key
.size
, &flow
);
765 if (fitness
== ODP_FIT_ERROR
) {
766 ofpbuf_uninit(&odp_key
);
770 packet
= ofpbuf_new(0);
771 flow_compose(packet
, &flow
);
773 ofpbuf_uninit(&odp_key
);
778 netdev_dummy_queue_packet__(struct netdev_rx_dummy
*rx
, struct ofpbuf
*packet
)
780 list_push_back(&rx
->recv_queue
, &packet
->list_node
);
781 rx
->recv_queue_len
++;
785 netdev_dummy_queue_packet(struct netdev_dummy
*dummy
, struct ofpbuf
*packet
)
787 struct netdev_rx_dummy
*rx
, *prev
;
790 LIST_FOR_EACH (rx
, node
, &dummy
->rxes
) {
791 if (rx
->recv_queue_len
< NETDEV_DUMMY_MAX_QUEUE
) {
793 netdev_dummy_queue_packet__(prev
, ofpbuf_clone(packet
));
799 netdev_dummy_queue_packet__(prev
, packet
);
801 ofpbuf_delete(packet
);
806 netdev_dummy_receive(struct unixctl_conn
*conn
,
807 int argc
, const char *argv
[], void *aux OVS_UNUSED
)
809 struct netdev_dummy
*dummy_dev
;
810 struct netdev
*netdev
;
813 netdev
= netdev_from_name(argv
[1]);
814 if (!netdev
|| !is_dummy_class(netdev
->netdev_class
)) {
815 unixctl_command_reply_error(conn
, "no such dummy netdev");
818 dummy_dev
= netdev_dummy_cast(netdev
);
820 for (i
= 2; i
< argc
; i
++) {
821 struct ofpbuf
*packet
;
823 packet
= eth_from_packet_or_flow(argv
[i
]);
825 unixctl_command_reply_error(conn
, "bad packet syntax");
829 ovs_mutex_lock(&dummy_dev
->mutex
);
830 dummy_dev
->stats
.rx_packets
++;
831 dummy_dev
->stats
.rx_bytes
+= packet
->size
;
832 netdev_dummy_queue_packet(dummy_dev
, packet
);
833 ovs_mutex_unlock(&dummy_dev
->mutex
);
836 unixctl_command_reply(conn
, NULL
);
839 netdev_close(netdev
);
843 netdev_dummy_set_admin_state__(struct netdev_dummy
*dev
, bool admin_state
)
844 OVS_REQUIRES(dev
->mutex
)
846 enum netdev_flags old_flags
;
849 netdev_dummy_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
851 netdev_dummy_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
856 netdev_dummy_set_admin_state(struct unixctl_conn
*conn
, int argc
,
857 const char *argv
[], void *aux OVS_UNUSED
)
861 if (!strcasecmp(argv
[argc
- 1], "up")) {
863 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
866 unixctl_command_reply_error(conn
, "Invalid Admin State");
871 struct netdev
*netdev
= netdev_from_name(argv
[1]);
872 if (netdev
&& is_dummy_class(netdev
->netdev_class
)) {
873 struct netdev_dummy
*dummy_dev
= netdev_dummy_cast(netdev
);
875 ovs_mutex_lock(&dummy_dev
->mutex
);
876 netdev_dummy_set_admin_state__(dummy_dev
, up
);
877 ovs_mutex_unlock(&dummy_dev
->mutex
);
879 netdev_close(netdev
);
881 unixctl_command_reply_error(conn
, "Unknown Dummy Interface");
882 netdev_close(netdev
);
886 struct netdev_dummy
*netdev
;
888 ovs_mutex_lock(&dummy_list_mutex
);
889 LIST_FOR_EACH (netdev
, list_node
, &dummy_list
) {
890 ovs_mutex_lock(&netdev
->mutex
);
891 netdev_dummy_set_admin_state__(netdev
, up
);
892 ovs_mutex_unlock(&netdev
->mutex
);
894 ovs_mutex_unlock(&dummy_list_mutex
);
896 unixctl_command_reply(conn
, "OK");
900 netdev_dummy_register(bool override
)
902 unixctl_command_register("netdev-dummy/receive", "NAME PACKET|FLOW...",
903 2, INT_MAX
, netdev_dummy_receive
, NULL
);
904 unixctl_command_register("netdev-dummy/set-admin-state",
905 "[netdev] up|down", 1, 2,
906 netdev_dummy_set_admin_state
, NULL
);
913 netdev_enumerate_types(&types
);
914 SSET_FOR_EACH (type
, &types
) {
915 if (!netdev_unregister_provider(type
)) {
916 struct netdev_class
*class;
919 class = xmemdup(&dummy_class
, sizeof dummy_class
);
920 class->type
= xstrdup(type
);
921 error
= netdev_register_provider(class);
923 VLOG_ERR("%s: failed to register netdev provider (%s)",
924 type
, ovs_strerror(error
));
925 free(CONST_CAST(char *, class->type
));
930 sset_destroy(&types
);
932 netdev_register_provider(&dummy_class
);
934 netdev_vport_tunnel_register();