]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
Merge remote-tracking branch 'origin/master' into ovn4
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22
23 #include "dp-packet.h"
24 #include "dpif-netdev.h"
25 #include "dynamic-string.h"
26 #include "flow.h"
27 #include "list.h"
28 #include "netdev-provider.h"
29 #include "netdev-vport.h"
30 #include "odp-util.h"
31 #include "ofp-print.h"
32 #include "ofpbuf.h"
33 #include "ovs-atomic.h"
34 #include "packets.h"
35 #include "pcap-file.h"
36 #include "poll-loop.h"
37 #include "shash.h"
38 #include "sset.h"
39 #include "stream.h"
40 #include "unaligned.h"
41 #include "timeval.h"
42 #include "unixctl.h"
43 #include "reconnect.h"
44 #include "openvswitch/vlog.h"
45
46 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
47
48 struct reconnect;
49
50 struct dummy_packet_stream {
51 struct stream *stream;
52 struct dp_packet rxbuf;
53 struct ovs_list txq;
54 };
55
56 enum dummy_packet_conn_type {
57 NONE, /* No connection is configured. */
58 PASSIVE, /* Listener. */
59 ACTIVE /* Connect to listener. */
60 };
61
62 enum dummy_netdev_conn_state {
63 CONN_STATE_CONNECTED, /* Listener connected. */
64 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
65 CONN_STATE_UNKNOWN, /* No relavent information. */
66 };
67
68 struct dummy_packet_pconn {
69 struct pstream *pstream;
70 struct dummy_packet_stream *streams;
71 size_t n_streams;
72 };
73
74 struct dummy_packet_rconn {
75 struct dummy_packet_stream *rstream;
76 struct reconnect *reconnect;
77 };
78
79 struct dummy_packet_conn {
80 enum dummy_packet_conn_type type;
81 union {
82 struct dummy_packet_pconn pconn;
83 struct dummy_packet_rconn rconn;
84 } u;
85 };
86
87 struct pkt_list_node {
88 struct dp_packet *pkt;
89 struct ovs_list list_node;
90 };
91
92 /* Protects 'dummy_list'. */
93 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
94
95 /* Contains all 'struct dummy_dev's. */
96 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
97 = OVS_LIST_INITIALIZER(&dummy_list);
98
99 struct netdev_dummy {
100 struct netdev up;
101
102 /* In dummy_list. */
103 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
104
105 /* Protects all members below. */
106 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
107
108 uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
109 int mtu OVS_GUARDED;
110 struct netdev_stats stats OVS_GUARDED;
111 enum netdev_flags flags OVS_GUARDED;
112 int ifindex OVS_GUARDED;
113
114 struct dummy_packet_conn conn OVS_GUARDED;
115
116 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
117
118 struct in_addr address, netmask;
119 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
120 };
121
122 /* Max 'recv_queue_len' in struct netdev_dummy. */
123 #define NETDEV_DUMMY_MAX_QUEUE 100
124
125 struct netdev_rxq_dummy {
126 struct netdev_rxq up;
127 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
128 struct ovs_list recv_queue;
129 int recv_queue_len; /* list_size(&recv_queue). */
130 struct seq *seq; /* Reports newly queued packets. */
131 };
132
133 static unixctl_cb_func netdev_dummy_set_admin_state;
134 static int netdev_dummy_construct(struct netdev *);
135 static void netdev_dummy_queue_packet(struct netdev_dummy *, struct dp_packet *);
136
137 static void dummy_packet_stream_close(struct dummy_packet_stream *);
138
139 static void pkt_list_delete(struct ovs_list *);
140
141 static bool
142 is_dummy_class(const struct netdev_class *class)
143 {
144 return class->construct == netdev_dummy_construct;
145 }
146
147 static struct netdev_dummy *
148 netdev_dummy_cast(const struct netdev *netdev)
149 {
150 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
151 return CONTAINER_OF(netdev, struct netdev_dummy, up);
152 }
153
154 static struct netdev_rxq_dummy *
155 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
156 {
157 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
158 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
159 }
160
161 static void
162 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
163 {
164 int rxbuf_size = stream ? 2048 : 0;
165 s->stream = stream;
166 dp_packet_init(&s->rxbuf, rxbuf_size);
167 list_init(&s->txq);
168 }
169
170 static struct dummy_packet_stream *
171 dummy_packet_stream_create(struct stream *stream)
172 {
173 struct dummy_packet_stream *s;
174
175 s = xzalloc(sizeof *s);
176 dummy_packet_stream_init(s, stream);
177
178 return s;
179 }
180
181 static void
182 dummy_packet_stream_wait(struct dummy_packet_stream *s)
183 {
184 stream_run_wait(s->stream);
185 if (!list_is_empty(&s->txq)) {
186 stream_send_wait(s->stream);
187 }
188 stream_recv_wait(s->stream);
189 }
190
191 static void
192 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
193 {
194 if (list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
195 struct dp_packet *b;
196 struct pkt_list_node *node;
197
198 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
199 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
200
201 node = xmalloc(sizeof *node);
202 node->pkt = b;
203 list_push_back(&s->txq, &node->list_node);
204 }
205 }
206
207 static int
208 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
209 {
210 int error = 0;
211 size_t n;
212
213 stream_run(s->stream);
214
215 if (!list_is_empty(&s->txq)) {
216 struct pkt_list_node *txbuf_node;
217 struct dp_packet *txbuf;
218 int retval;
219
220 ASSIGN_CONTAINER(txbuf_node, list_front(&s->txq), list_node);
221 txbuf = txbuf_node->pkt;
222 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
223
224 if (retval > 0) {
225 dp_packet_pull(txbuf, retval);
226 if (!dp_packet_size(txbuf)) {
227 list_remove(&txbuf_node->list_node);
228 free(txbuf_node);
229 dp_packet_delete(txbuf);
230 }
231 } else if (retval != -EAGAIN) {
232 error = -retval;
233 }
234 }
235
236 if (!error) {
237 if (dp_packet_size(&s->rxbuf) < 2) {
238 n = 2 - dp_packet_size(&s->rxbuf);
239 } else {
240 uint16_t frame_len;
241
242 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
243 if (frame_len < ETH_HEADER_LEN) {
244 error = EPROTO;
245 n = 0;
246 } else {
247 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
248 }
249 }
250 }
251 if (!error) {
252 int retval;
253
254 dp_packet_prealloc_tailroom(&s->rxbuf, n);
255 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
256
257 if (retval > 0) {
258 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
259 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
260 dp_packet_pull(&s->rxbuf, 2);
261 netdev_dummy_queue_packet(dev,
262 dp_packet_clone(&s->rxbuf));
263 dp_packet_clear(&s->rxbuf);
264 }
265 } else if (retval != -EAGAIN) {
266 error = (retval < 0 ? -retval
267 : dp_packet_size(&s->rxbuf) ? EPROTO
268 : EOF);
269 }
270 }
271
272 return error;
273 }
274
275 static void
276 dummy_packet_stream_close(struct dummy_packet_stream *s)
277 {
278 stream_close(s->stream);
279 dp_packet_uninit(&s->rxbuf);
280 pkt_list_delete(&s->txq);
281 }
282
283 static void
284 dummy_packet_conn_init(struct dummy_packet_conn *conn)
285 {
286 memset(conn, 0, sizeof *conn);
287 conn->type = NONE;
288 }
289
290 static void
291 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
292 {
293
294 switch (conn->type) {
295 case PASSIVE:
296 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
297 break;
298
299 case ACTIVE:
300 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
301 break;
302
303 case NONE:
304 default:
305 break;
306 }
307 }
308
309 static void
310 dummy_packet_conn_close(struct dummy_packet_conn *conn)
311 {
312 int i;
313 struct dummy_packet_pconn *pconn = &conn->u.pconn;
314 struct dummy_packet_rconn *rconn = &conn->u.rconn;
315
316 switch (conn->type) {
317 case PASSIVE:
318 pstream_close(pconn->pstream);
319 for (i = 0; i < pconn->n_streams; i++) {
320 dummy_packet_stream_close(&pconn->streams[i]);
321 }
322 free(pconn->streams);
323 pconn->pstream = NULL;
324 pconn->streams = NULL;
325 break;
326
327 case ACTIVE:
328 dummy_packet_stream_close(rconn->rstream);
329 free(rconn->rstream);
330 rconn->rstream = NULL;
331 reconnect_destroy(rconn->reconnect);
332 rconn->reconnect = NULL;
333 break;
334
335 case NONE:
336 default:
337 break;
338 }
339
340 conn->type = NONE;
341 memset(conn, 0, sizeof *conn);
342 }
343
344 static void
345 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
346 const struct smap *args)
347 {
348 const char *pstream = smap_get(args, "pstream");
349 const char *stream = smap_get(args, "stream");
350
351 if (pstream && stream) {
352 VLOG_WARN("Open failed: both %s and %s are configured",
353 pstream, stream);
354 return;
355 }
356
357 switch (conn->type) {
358 case PASSIVE:
359 if (pstream &&
360 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
361 return;
362 }
363 dummy_packet_conn_close(conn);
364 break;
365 case ACTIVE:
366 if (stream &&
367 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
368 return;
369 }
370 dummy_packet_conn_close(conn);
371 break;
372 case NONE:
373 default:
374 break;
375 }
376
377 if (pstream) {
378 int error;
379
380 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
381 if (error) {
382 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
383 } else {
384 conn->type = PASSIVE;
385 }
386 }
387
388 if (stream) {
389 int error;
390 struct stream *active_stream;
391 struct reconnect *reconnect;;
392
393 reconnect = reconnect_create(time_msec());
394 reconnect_set_name(reconnect, stream);
395 reconnect_set_passive(reconnect, false, time_msec());
396 reconnect_enable(reconnect, time_msec());
397 reconnect_set_backoff(reconnect, 100, INT_MAX);
398 reconnect_set_probe_interval(reconnect, 0);
399 conn->u.rconn.reconnect = reconnect;
400 conn->type = ACTIVE;
401
402 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
403 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
404
405 switch (error) {
406 case 0:
407 reconnect_connected(reconnect, time_msec());
408 break;
409
410 case EAGAIN:
411 reconnect_connecting(reconnect, time_msec());
412 break;
413
414 default:
415 reconnect_connect_failed(reconnect, time_msec(), error);
416 stream_close(active_stream);
417 conn->u.rconn.rstream->stream = NULL;
418 break;
419 }
420 }
421 }
422
423 static void
424 dummy_pconn_run(struct netdev_dummy *dev)
425 OVS_REQUIRES(dev->mutex)
426 {
427 struct stream *new_stream;
428 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
429 int error;
430 size_t i;
431
432 error = pstream_accept(pconn->pstream, &new_stream);
433 if (!error) {
434 struct dummy_packet_stream *s;
435
436 pconn->streams = xrealloc(pconn->streams,
437 ((pconn->n_streams + 1)
438 * sizeof *s));
439 s = &pconn->streams[pconn->n_streams++];
440 dummy_packet_stream_init(s, new_stream);
441 } else if (error != EAGAIN) {
442 VLOG_WARN("%s: accept failed (%s)",
443 pstream_get_name(pconn->pstream), ovs_strerror(error));
444 pstream_close(pconn->pstream);
445 pconn->pstream = NULL;
446 dev->conn.type = NONE;
447 }
448
449 for (i = 0; i < pconn->n_streams; i++) {
450 struct dummy_packet_stream *s = &pconn->streams[i];
451
452 error = dummy_packet_stream_run(dev, s);
453 if (error) {
454 VLOG_DBG("%s: closing connection (%s)",
455 stream_get_name(s->stream),
456 ovs_retval_to_string(error));
457 dummy_packet_stream_close(s);
458 pconn->streams[i] = pconn->streams[--pconn->n_streams];
459 }
460 }
461 }
462
463 static void
464 dummy_rconn_run(struct netdev_dummy *dev)
465 OVS_REQUIRES(dev->mutex)
466 {
467 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
468
469 switch (reconnect_run(rconn->reconnect, time_msec())) {
470 case RECONNECT_CONNECT:
471 {
472 int error;
473
474 if (rconn->rstream->stream) {
475 error = stream_connect(rconn->rstream->stream);
476 } else {
477 error = stream_open(reconnect_get_name(rconn->reconnect),
478 &rconn->rstream->stream, DSCP_DEFAULT);
479 }
480
481 switch (error) {
482 case 0:
483 reconnect_connected(rconn->reconnect, time_msec());
484 break;
485
486 case EAGAIN:
487 reconnect_connecting(rconn->reconnect, time_msec());
488 break;
489
490 default:
491 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
492 stream_close(rconn->rstream->stream);
493 rconn->rstream->stream = NULL;
494 break;
495 }
496 }
497 break;
498
499 case RECONNECT_DISCONNECT:
500 case RECONNECT_PROBE:
501 default:
502 break;
503 }
504
505 if (reconnect_is_connected(rconn->reconnect)) {
506 int err;
507
508 err = dummy_packet_stream_run(dev, rconn->rstream);
509
510 if (err) {
511 reconnect_disconnected(rconn->reconnect, time_msec(), err);
512 stream_close(rconn->rstream->stream);
513 rconn->rstream->stream = NULL;
514 }
515 }
516 }
517
518 static void
519 dummy_packet_conn_run(struct netdev_dummy *dev)
520 OVS_REQUIRES(dev->mutex)
521 {
522 switch (dev->conn.type) {
523 case PASSIVE:
524 dummy_pconn_run(dev);
525 break;
526
527 case ACTIVE:
528 dummy_rconn_run(dev);
529 break;
530
531 case NONE:
532 default:
533 break;
534 }
535 }
536
537 static void
538 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
539 {
540 int i;
541 switch (conn->type) {
542 case PASSIVE:
543 pstream_wait(conn->u.pconn.pstream);
544 for (i = 0; i < conn->u.pconn.n_streams; i++) {
545 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
546 dummy_packet_stream_wait(s);
547 }
548 break;
549 case ACTIVE:
550 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
551 dummy_packet_stream_wait(conn->u.rconn.rstream);
552 }
553 break;
554
555 case NONE:
556 default:
557 break;
558 }
559 }
560
561 static void
562 dummy_packet_conn_send(struct dummy_packet_conn *conn,
563 const void *buffer, size_t size)
564 {
565 int i;
566
567 switch (conn->type) {
568 case PASSIVE:
569 for (i = 0; i < conn->u.pconn.n_streams; i++) {
570 struct dummy_packet_stream *s = &conn->u.pconn.streams[i];
571
572 dummy_packet_stream_send(s, buffer, size);
573 pstream_wait(conn->u.pconn.pstream);
574 }
575 break;
576
577 case ACTIVE:
578 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
579 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
580 dummy_packet_stream_wait(conn->u.rconn.rstream);
581 }
582 break;
583
584 case NONE:
585 default:
586 break;
587 }
588 }
589
590 static enum dummy_netdev_conn_state
591 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
592 {
593 enum dummy_netdev_conn_state state;
594
595 if (conn->type == ACTIVE) {
596 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
597 state = CONN_STATE_CONNECTED;
598 } else {
599 state = CONN_STATE_NOT_CONNECTED;
600 }
601 } else {
602 state = CONN_STATE_UNKNOWN;
603 }
604
605 return state;
606 }
607
608 static void
609 netdev_dummy_run(void)
610 {
611 struct netdev_dummy *dev;
612
613 ovs_mutex_lock(&dummy_list_mutex);
614 LIST_FOR_EACH (dev, list_node, &dummy_list) {
615 ovs_mutex_lock(&dev->mutex);
616 dummy_packet_conn_run(dev);
617 ovs_mutex_unlock(&dev->mutex);
618 }
619 ovs_mutex_unlock(&dummy_list_mutex);
620 }
621
622 static void
623 netdev_dummy_wait(void)
624 {
625 struct netdev_dummy *dev;
626
627 ovs_mutex_lock(&dummy_list_mutex);
628 LIST_FOR_EACH (dev, list_node, &dummy_list) {
629 ovs_mutex_lock(&dev->mutex);
630 dummy_packet_conn_wait(&dev->conn);
631 ovs_mutex_unlock(&dev->mutex);
632 }
633 ovs_mutex_unlock(&dummy_list_mutex);
634 }
635
636 static struct netdev *
637 netdev_dummy_alloc(void)
638 {
639 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
640 return &netdev->up;
641 }
642
643 static int
644 netdev_dummy_construct(struct netdev *netdev_)
645 {
646 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
647 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
648 unsigned int n;
649
650 n = atomic_count_inc(&next_n);
651
652 ovs_mutex_init(&netdev->mutex);
653 ovs_mutex_lock(&netdev->mutex);
654 netdev->hwaddr[0] = 0xaa;
655 netdev->hwaddr[1] = 0x55;
656 netdev->hwaddr[2] = n >> 24;
657 netdev->hwaddr[3] = n >> 16;
658 netdev->hwaddr[4] = n >> 8;
659 netdev->hwaddr[5] = n;
660 netdev->mtu = 1500;
661 netdev->flags = 0;
662 netdev->ifindex = -EOPNOTSUPP;
663
664 dummy_packet_conn_init(&netdev->conn);
665
666 list_init(&netdev->rxes);
667 ovs_mutex_unlock(&netdev->mutex);
668
669 ovs_mutex_lock(&dummy_list_mutex);
670 list_push_back(&dummy_list, &netdev->list_node);
671 ovs_mutex_unlock(&dummy_list_mutex);
672
673 return 0;
674 }
675
676 static void
677 netdev_dummy_destruct(struct netdev *netdev_)
678 {
679 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
680
681 ovs_mutex_lock(&dummy_list_mutex);
682 list_remove(&netdev->list_node);
683 ovs_mutex_unlock(&dummy_list_mutex);
684
685 ovs_mutex_lock(&netdev->mutex);
686 dummy_packet_conn_close(&netdev->conn);
687 netdev->conn.type = NONE;
688
689 ovs_mutex_unlock(&netdev->mutex);
690 ovs_mutex_destroy(&netdev->mutex);
691 }
692
693 static void
694 netdev_dummy_dealloc(struct netdev *netdev_)
695 {
696 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
697
698 free(netdev);
699 }
700
701 static int
702 netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
703 {
704 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
705
706 ovs_mutex_lock(&netdev->mutex);
707
708 if (netdev->ifindex >= 0) {
709 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
710 }
711
712 dummy_packet_conn_get_config(&netdev->conn, args);
713
714 ovs_mutex_unlock(&netdev->mutex);
715 return 0;
716 }
717
718 static int
719 netdev_dummy_get_in4(const struct netdev *netdev_,
720 struct in_addr *address, struct in_addr *netmask)
721 {
722 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
723
724 ovs_mutex_lock(&netdev->mutex);
725 *address = netdev->address;
726 *netmask = netdev->netmask;
727 ovs_mutex_unlock(&netdev->mutex);
728
729 return address->s_addr ? 0 : EADDRNOTAVAIL;
730 }
731
732 static int
733 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
734 struct in_addr netmask)
735 {
736 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
737
738 ovs_mutex_lock(&netdev->mutex);
739 netdev->address = address;
740 netdev->netmask = netmask;
741 ovs_mutex_unlock(&netdev->mutex);
742
743 return 0;
744 }
745
746 static int
747 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args)
748 {
749 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
750 const char *pcap;
751
752 ovs_mutex_lock(&netdev->mutex);
753 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
754
755 dummy_packet_conn_set_config(&netdev->conn, args);
756
757 if (netdev->rxq_pcap) {
758 fclose(netdev->rxq_pcap);
759 }
760 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
761 fclose(netdev->tx_pcap);
762 }
763 netdev->rxq_pcap = netdev->tx_pcap = NULL;
764 pcap = smap_get(args, "pcap");
765 if (pcap) {
766 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
767 } else {
768 const char *rxq_pcap = smap_get(args, "rxq_pcap");
769 const char *tx_pcap = smap_get(args, "tx_pcap");
770
771 if (rxq_pcap) {
772 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
773 }
774 if (tx_pcap) {
775 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
776 }
777 }
778
779 ovs_mutex_unlock(&netdev->mutex);
780
781 return 0;
782 }
783
784 static struct netdev_rxq *
785 netdev_dummy_rxq_alloc(void)
786 {
787 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
788 return &rx->up;
789 }
790
791 static int
792 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
793 {
794 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
795 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
796
797 ovs_mutex_lock(&netdev->mutex);
798 list_push_back(&netdev->rxes, &rx->node);
799 list_init(&rx->recv_queue);
800 rx->recv_queue_len = 0;
801 rx->seq = seq_create();
802 ovs_mutex_unlock(&netdev->mutex);
803
804 return 0;
805 }
806
807 static void
808 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
809 {
810 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
811 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
812
813 ovs_mutex_lock(&netdev->mutex);
814 list_remove(&rx->node);
815 pkt_list_delete(&rx->recv_queue);
816 ovs_mutex_unlock(&netdev->mutex);
817 seq_destroy(rx->seq);
818 }
819
820 static void
821 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
822 {
823 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
824
825 free(rx);
826 }
827
828 static int
829 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **arr,
830 int *c)
831 {
832 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
833 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
834 struct dp_packet *packet;
835
836 ovs_mutex_lock(&netdev->mutex);
837 if (!list_is_empty(&rx->recv_queue)) {
838 struct pkt_list_node *pkt_node;
839
840 ASSIGN_CONTAINER(pkt_node, list_pop_front(&rx->recv_queue), list_node);
841 packet = pkt_node->pkt;
842 free(pkt_node);
843 rx->recv_queue_len--;
844 } else {
845 packet = NULL;
846 }
847 ovs_mutex_unlock(&netdev->mutex);
848
849 if (!packet) {
850 return EAGAIN;
851 }
852 ovs_mutex_lock(&netdev->mutex);
853 netdev->stats.rx_packets++;
854 netdev->stats.rx_bytes += dp_packet_size(packet);
855 ovs_mutex_unlock(&netdev->mutex);
856
857 dp_packet_pad(packet);
858 dp_packet_set_rss_hash(packet, 0);
859
860 arr[0] = packet;
861 *c = 1;
862 return 0;
863 }
864
865 static void
866 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
867 {
868 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
869 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
870 uint64_t seq = seq_read(rx->seq);
871
872 ovs_mutex_lock(&netdev->mutex);
873 if (!list_is_empty(&rx->recv_queue)) {
874 poll_immediate_wake();
875 } else {
876 seq_wait(rx->seq, seq);
877 }
878 ovs_mutex_unlock(&netdev->mutex);
879 }
880
881 static int
882 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
883 {
884 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
885 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
886
887 ovs_mutex_lock(&netdev->mutex);
888 pkt_list_delete(&rx->recv_queue);
889 rx->recv_queue_len = 0;
890 ovs_mutex_unlock(&netdev->mutex);
891
892 seq_change(rx->seq);
893
894 return 0;
895 }
896
897 static int
898 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
899 struct dp_packet **pkts, int cnt, bool may_steal)
900 {
901 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
902 int error = 0;
903 int i;
904
905 for (i = 0; i < cnt; i++) {
906 const void *buffer = dp_packet_data(pkts[i]);
907 size_t size = dp_packet_size(pkts[i]);
908
909 if (size < ETH_HEADER_LEN) {
910 error = EMSGSIZE;
911 break;
912 } else {
913 const struct eth_header *eth = buffer;
914 int max_size;
915
916 ovs_mutex_lock(&dev->mutex);
917 max_size = dev->mtu + ETH_HEADER_LEN;
918 ovs_mutex_unlock(&dev->mutex);
919
920 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
921 max_size += VLAN_HEADER_LEN;
922 }
923 if (size > max_size) {
924 error = EMSGSIZE;
925 break;
926 }
927 }
928
929 ovs_mutex_lock(&dev->mutex);
930 dev->stats.tx_packets++;
931 dev->stats.tx_bytes += size;
932
933 dummy_packet_conn_send(&dev->conn, buffer, size);
934
935 /* Reply to ARP requests for 'dev''s assigned IP address. */
936 if (dev->address.s_addr) {
937 struct dp_packet packet;
938 struct flow flow;
939
940 dp_packet_use_const(&packet, buffer, size);
941 flow_extract(&packet, &flow);
942 if (flow.dl_type == htons(ETH_TYPE_ARP)
943 && flow.nw_proto == ARP_OP_REQUEST
944 && flow.nw_dst == dev->address.s_addr) {
945 struct dp_packet *reply = dp_packet_new(0);
946 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
947 false, flow.nw_dst, flow.nw_src);
948 netdev_dummy_queue_packet(dev, reply);
949 }
950 }
951
952 if (dev->tx_pcap) {
953 struct dp_packet packet;
954
955 dp_packet_use_const(&packet, buffer, size);
956 ovs_pcap_write(dev->tx_pcap, &packet);
957 fflush(dev->tx_pcap);
958 }
959
960 ovs_mutex_unlock(&dev->mutex);
961 }
962
963 if (may_steal) {
964 for (i = 0; i < cnt; i++) {
965 dp_packet_delete(pkts[i]);
966 }
967 }
968
969 return error;
970 }
971
972 static int
973 netdev_dummy_set_etheraddr(struct netdev *netdev,
974 const uint8_t mac[ETH_ADDR_LEN])
975 {
976 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
977
978 ovs_mutex_lock(&dev->mutex);
979 if (!eth_addr_equals(dev->hwaddr, mac)) {
980 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
981 netdev_change_seq_changed(netdev);
982 }
983 ovs_mutex_unlock(&dev->mutex);
984
985 return 0;
986 }
987
988 static int
989 netdev_dummy_get_etheraddr(const struct netdev *netdev,
990 uint8_t mac[ETH_ADDR_LEN])
991 {
992 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
993
994 ovs_mutex_lock(&dev->mutex);
995 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
996 ovs_mutex_unlock(&dev->mutex);
997
998 return 0;
999 }
1000
1001 static int
1002 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1003 {
1004 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1005
1006 ovs_mutex_lock(&dev->mutex);
1007 *mtup = dev->mtu;
1008 ovs_mutex_unlock(&dev->mutex);
1009
1010 return 0;
1011 }
1012
1013 static int
1014 netdev_dummy_set_mtu(const struct netdev *netdev, int mtu)
1015 {
1016 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1017
1018 ovs_mutex_lock(&dev->mutex);
1019 dev->mtu = mtu;
1020 ovs_mutex_unlock(&dev->mutex);
1021
1022 return 0;
1023 }
1024
1025 static int
1026 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1027 {
1028 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1029
1030 ovs_mutex_lock(&dev->mutex);
1031 *stats = dev->stats;
1032 ovs_mutex_unlock(&dev->mutex);
1033
1034 return 0;
1035 }
1036
1037 static int
1038 netdev_dummy_get_ifindex(const struct netdev *netdev)
1039 {
1040 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1041 int ifindex;
1042
1043 ovs_mutex_lock(&dev->mutex);
1044 ifindex = dev->ifindex;
1045 ovs_mutex_unlock(&dev->mutex);
1046
1047 return ifindex;
1048 }
1049
1050 static int
1051 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1052 enum netdev_flags off, enum netdev_flags on,
1053 enum netdev_flags *old_flagsp)
1054 OVS_REQUIRES(netdev->mutex)
1055 {
1056 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1057 return EINVAL;
1058 }
1059
1060 *old_flagsp = netdev->flags;
1061 netdev->flags |= on;
1062 netdev->flags &= ~off;
1063 if (*old_flagsp != netdev->flags) {
1064 netdev_change_seq_changed(&netdev->up);
1065 }
1066
1067 return 0;
1068 }
1069
1070 static int
1071 netdev_dummy_update_flags(struct netdev *netdev_,
1072 enum netdev_flags off, enum netdev_flags on,
1073 enum netdev_flags *old_flagsp)
1074 {
1075 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1076 int error;
1077
1078 ovs_mutex_lock(&netdev->mutex);
1079 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1080 ovs_mutex_unlock(&netdev->mutex);
1081
1082 return error;
1083 }
1084 \f
1085 /* Helper functions. */
1086
1087 static const struct netdev_class dummy_class = {
1088 "dummy",
1089 NULL, /* init */
1090 netdev_dummy_run,
1091 netdev_dummy_wait,
1092
1093 netdev_dummy_alloc,
1094 netdev_dummy_construct,
1095 netdev_dummy_destruct,
1096 netdev_dummy_dealloc,
1097 netdev_dummy_get_config,
1098 netdev_dummy_set_config,
1099 NULL, /* get_tunnel_config */
1100 NULL, /* build header */
1101 NULL, /* push header */
1102 NULL, /* pop header */
1103 NULL, /* get_numa_id */
1104 NULL, /* set_multiq */
1105
1106 netdev_dummy_send, /* send */
1107 NULL, /* send_wait */
1108
1109 netdev_dummy_set_etheraddr,
1110 netdev_dummy_get_etheraddr,
1111 netdev_dummy_get_mtu,
1112 netdev_dummy_set_mtu,
1113 netdev_dummy_get_ifindex,
1114 NULL, /* get_carrier */
1115 NULL, /* get_carrier_resets */
1116 NULL, /* get_miimon */
1117 netdev_dummy_get_stats,
1118
1119 NULL, /* get_features */
1120 NULL, /* set_advertisements */
1121
1122 NULL, /* set_policing */
1123 NULL, /* get_qos_types */
1124 NULL, /* get_qos_capabilities */
1125 NULL, /* get_qos */
1126 NULL, /* set_qos */
1127 NULL, /* get_queue */
1128 NULL, /* set_queue */
1129 NULL, /* delete_queue */
1130 NULL, /* get_queue_stats */
1131 NULL, /* queue_dump_start */
1132 NULL, /* queue_dump_next */
1133 NULL, /* queue_dump_done */
1134 NULL, /* dump_queue_stats */
1135
1136 netdev_dummy_get_in4, /* get_in4 */
1137 NULL, /* set_in4 */
1138 NULL, /* get_in6 */
1139 NULL, /* add_router */
1140 NULL, /* get_next_hop */
1141 NULL, /* get_status */
1142 NULL, /* arp_lookup */
1143
1144 netdev_dummy_update_flags,
1145
1146 netdev_dummy_rxq_alloc,
1147 netdev_dummy_rxq_construct,
1148 netdev_dummy_rxq_destruct,
1149 netdev_dummy_rxq_dealloc,
1150 netdev_dummy_rxq_recv,
1151 netdev_dummy_rxq_wait,
1152 netdev_dummy_rxq_drain,
1153 };
1154
1155 static void
1156 pkt_list_delete(struct ovs_list *l)
1157 {
1158 struct pkt_list_node *pkt;
1159
1160 LIST_FOR_EACH_POP(pkt, list_node, l) {
1161 dp_packet_delete(pkt->pkt);
1162 free(pkt);
1163 }
1164 }
1165
1166 static struct dp_packet *
1167 eth_from_packet_or_flow(const char *s)
1168 {
1169 enum odp_key_fitness fitness;
1170 struct dp_packet *packet;
1171 struct ofpbuf odp_key;
1172 struct flow flow;
1173 int error;
1174
1175 if (!eth_from_hex(s, &packet)) {
1176 return packet;
1177 }
1178
1179 /* Convert string to datapath key.
1180 *
1181 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1182 * the code for that currently calls exit() on parse error. We have to
1183 * settle for parsing a datapath key for now.
1184 */
1185 ofpbuf_init(&odp_key, 0);
1186 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1187 if (error) {
1188 ofpbuf_uninit(&odp_key);
1189 return NULL;
1190 }
1191
1192 /* Convert odp_key to flow. */
1193 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1194 if (fitness == ODP_FIT_ERROR) {
1195 ofpbuf_uninit(&odp_key);
1196 return NULL;
1197 }
1198
1199 packet = dp_packet_new(0);
1200 flow_compose(packet, &flow);
1201
1202 ofpbuf_uninit(&odp_key);
1203 return packet;
1204 }
1205
1206 static void
1207 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1208 {
1209 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1210
1211 pkt_node->pkt = packet;
1212 list_push_back(&rx->recv_queue, &pkt_node->list_node);
1213 rx->recv_queue_len++;
1214 seq_change(rx->seq);
1215 }
1216
1217 static void
1218 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet)
1219 OVS_REQUIRES(dummy->mutex)
1220 {
1221 struct netdev_rxq_dummy *rx, *prev;
1222
1223 if (dummy->rxq_pcap) {
1224 ovs_pcap_write(dummy->rxq_pcap, packet);
1225 fflush(dummy->rxq_pcap);
1226 }
1227 prev = NULL;
1228 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1229 if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1230 if (prev) {
1231 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1232 }
1233 prev = rx;
1234 }
1235 }
1236 if (prev) {
1237 netdev_dummy_queue_packet__(prev, packet);
1238 } else {
1239 dp_packet_delete(packet);
1240 }
1241 }
1242
1243 static void
1244 netdev_dummy_receive(struct unixctl_conn *conn,
1245 int argc, const char *argv[], void *aux OVS_UNUSED)
1246 {
1247 struct netdev_dummy *dummy_dev;
1248 struct netdev *netdev;
1249 int i;
1250
1251 netdev = netdev_from_name(argv[1]);
1252 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1253 unixctl_command_reply_error(conn, "no such dummy netdev");
1254 goto exit;
1255 }
1256 dummy_dev = netdev_dummy_cast(netdev);
1257
1258 for (i = 2; i < argc; i++) {
1259 struct dp_packet *packet;
1260
1261 packet = eth_from_packet_or_flow(argv[i]);
1262 if (!packet) {
1263 unixctl_command_reply_error(conn, "bad packet syntax");
1264 goto exit;
1265 }
1266
1267 ovs_mutex_lock(&dummy_dev->mutex);
1268 netdev_dummy_queue_packet(dummy_dev, packet);
1269 ovs_mutex_unlock(&dummy_dev->mutex);
1270 }
1271
1272 unixctl_command_reply(conn, NULL);
1273
1274 exit:
1275 netdev_close(netdev);
1276 }
1277
1278 static void
1279 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1280 OVS_REQUIRES(dev->mutex)
1281 {
1282 enum netdev_flags old_flags;
1283
1284 if (admin_state) {
1285 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1286 } else {
1287 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1288 }
1289 }
1290
1291 static void
1292 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1293 const char *argv[], void *aux OVS_UNUSED)
1294 {
1295 bool up;
1296
1297 if (!strcasecmp(argv[argc - 1], "up")) {
1298 up = true;
1299 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1300 up = false;
1301 } else {
1302 unixctl_command_reply_error(conn, "Invalid Admin State");
1303 return;
1304 }
1305
1306 if (argc > 2) {
1307 struct netdev *netdev = netdev_from_name(argv[1]);
1308 if (netdev && is_dummy_class(netdev->netdev_class)) {
1309 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1310
1311 ovs_mutex_lock(&dummy_dev->mutex);
1312 netdev_dummy_set_admin_state__(dummy_dev, up);
1313 ovs_mutex_unlock(&dummy_dev->mutex);
1314
1315 netdev_close(netdev);
1316 } else {
1317 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1318 netdev_close(netdev);
1319 return;
1320 }
1321 } else {
1322 struct netdev_dummy *netdev;
1323
1324 ovs_mutex_lock(&dummy_list_mutex);
1325 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1326 ovs_mutex_lock(&netdev->mutex);
1327 netdev_dummy_set_admin_state__(netdev, up);
1328 ovs_mutex_unlock(&netdev->mutex);
1329 }
1330 ovs_mutex_unlock(&dummy_list_mutex);
1331 }
1332 unixctl_command_reply(conn, "OK");
1333 }
1334
1335 static void
1336 display_conn_state__(struct ds *s, const char *name,
1337 enum dummy_netdev_conn_state state)
1338 {
1339 ds_put_format(s, "%s: ", name);
1340
1341 switch (state) {
1342 case CONN_STATE_CONNECTED:
1343 ds_put_cstr(s, "connected\n");
1344 break;
1345
1346 case CONN_STATE_NOT_CONNECTED:
1347 ds_put_cstr(s, "disconnected\n");
1348 break;
1349
1350 case CONN_STATE_UNKNOWN:
1351 default:
1352 ds_put_cstr(s, "unknown\n");
1353 break;
1354 };
1355 }
1356
1357 static void
1358 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1359 const char *argv[], void *aux OVS_UNUSED)
1360 {
1361 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1362 struct ds s;
1363
1364 ds_init(&s);
1365
1366 if (argc > 1) {
1367 const char *dev_name = argv[1];
1368 struct netdev *netdev = netdev_from_name(dev_name);
1369
1370 if (netdev && is_dummy_class(netdev->netdev_class)) {
1371 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1372
1373 ovs_mutex_lock(&dummy_dev->mutex);
1374 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1375 ovs_mutex_unlock(&dummy_dev->mutex);
1376
1377 netdev_close(netdev);
1378 }
1379 display_conn_state__(&s, dev_name, state);
1380 } else {
1381 struct netdev_dummy *netdev;
1382
1383 ovs_mutex_lock(&dummy_list_mutex);
1384 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1385 ovs_mutex_lock(&netdev->mutex);
1386 state = dummy_netdev_get_conn_state(&netdev->conn);
1387 ovs_mutex_unlock(&netdev->mutex);
1388 if (state != CONN_STATE_UNKNOWN) {
1389 display_conn_state__(&s, netdev->up.name, state);
1390 }
1391 }
1392 ovs_mutex_unlock(&dummy_list_mutex);
1393 }
1394
1395 unixctl_command_reply(conn, ds_cstr(&s));
1396 ds_destroy(&s);
1397 }
1398
1399 static void
1400 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1401 const char *argv[], void *aux OVS_UNUSED)
1402 {
1403 struct netdev *netdev = netdev_from_name(argv[1]);
1404
1405 if (netdev && is_dummy_class(netdev->netdev_class)) {
1406 struct in_addr ip;
1407 uint16_t plen;
1408
1409 if (ovs_scan(argv[2], IP_SCAN_FMT"/%"SCNi16,
1410 IP_SCAN_ARGS(&ip.s_addr), &plen)) {
1411 struct in_addr mask;
1412
1413 mask.s_addr = be32_prefix_mask(plen);
1414 netdev_dummy_set_in4(netdev, ip, mask);
1415 unixctl_command_reply(conn, "OK");
1416 } else {
1417 unixctl_command_reply(conn, "Invalid parameters");
1418 }
1419
1420 netdev_close(netdev);
1421 } else {
1422 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1423 netdev_close(netdev);
1424 return;
1425 }
1426
1427 }
1428
1429 static void
1430 netdev_dummy_override(const char *type)
1431 {
1432 if (!netdev_unregister_provider(type)) {
1433 struct netdev_class *class;
1434 int error;
1435
1436 class = xmemdup(&dummy_class, sizeof dummy_class);
1437 class->type = xstrdup(type);
1438 error = netdev_register_provider(class);
1439 if (error) {
1440 VLOG_ERR("%s: failed to register netdev provider (%s)",
1441 type, ovs_strerror(error));
1442 free(CONST_CAST(char *, class->type));
1443 free(class);
1444 }
1445 }
1446 }
1447
1448 void
1449 netdev_dummy_register(enum dummy_level level)
1450 {
1451 unixctl_command_register("netdev-dummy/receive", "name packet|flow...",
1452 2, INT_MAX, netdev_dummy_receive, NULL);
1453 unixctl_command_register("netdev-dummy/set-admin-state",
1454 "[netdev] up|down", 1, 2,
1455 netdev_dummy_set_admin_state, NULL);
1456 unixctl_command_register("netdev-dummy/conn-state",
1457 "[netdev]", 0, 1,
1458 netdev_dummy_conn_state, NULL);
1459 unixctl_command_register("netdev-dummy/ip4addr",
1460 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1461 netdev_dummy_ip4addr, NULL);
1462
1463 if (level == DUMMY_OVERRIDE_ALL) {
1464 struct sset types;
1465 const char *type;
1466
1467 sset_init(&types);
1468 netdev_enumerate_types(&types);
1469 SSET_FOR_EACH (type, &types) {
1470 if (strcmp(type, "patch")) {
1471 netdev_dummy_override(type);
1472 }
1473 }
1474 sset_destroy(&types);
1475 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1476 netdev_dummy_override("system");
1477 }
1478 netdev_register_provider(&dummy_class);
1479
1480 netdev_vport_tunnel_register();
1481 }