]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
dpctl: add examples to the manpage.
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
29 #include "odp-util.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "packets.h"
37 #include "pcap-file.h"
38 #include "poll-loop.h"
39 #include "openvswitch/shash.h"
40 #include "sset.h"
41 #include "stream.h"
42 #include "unaligned.h"
43 #include "timeval.h"
44 #include "unixctl.h"
45 #include "reconnect.h"
46
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48
49 struct reconnect;
50
51 struct dummy_packet_stream {
52 struct stream *stream;
53 struct dp_packet rxbuf;
54 struct ovs_list txq;
55 };
56
57 enum dummy_packet_conn_type {
58 NONE, /* No connection is configured. */
59 PASSIVE, /* Listener. */
60 ACTIVE /* Connect to listener. */
61 };
62
63 enum dummy_netdev_conn_state {
64 CONN_STATE_CONNECTED, /* Listener connected. */
65 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
66 CONN_STATE_UNKNOWN, /* No relavent information. */
67 };
68
69 struct dummy_packet_pconn {
70 struct pstream *pstream;
71 struct dummy_packet_stream **streams;
72 size_t n_streams;
73 };
74
75 struct dummy_packet_rconn {
76 struct dummy_packet_stream *rstream;
77 struct reconnect *reconnect;
78 };
79
80 struct dummy_packet_conn {
81 enum dummy_packet_conn_type type;
82 union {
83 struct dummy_packet_pconn pconn;
84 struct dummy_packet_rconn rconn;
85 } u;
86 };
87
88 struct pkt_list_node {
89 struct dp_packet *pkt;
90 struct ovs_list list_node;
91 };
92
93 /* Protects 'dummy_list'. */
94 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
95
96 /* Contains all 'struct dummy_dev's. */
97 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
98 = OVS_LIST_INITIALIZER(&dummy_list);
99
100 struct netdev_dummy {
101 struct netdev up;
102
103 /* In dummy_list. */
104 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
105
106 /* Protects all members below. */
107 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
108
109 struct eth_addr hwaddr OVS_GUARDED;
110 int mtu OVS_GUARDED;
111 struct netdev_stats stats OVS_GUARDED;
112 enum netdev_flags flags OVS_GUARDED;
113 int ifindex OVS_GUARDED;
114 int numa_id OVS_GUARDED;
115
116 struct dummy_packet_conn conn OVS_GUARDED;
117
118 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
119
120 struct in_addr address, netmask;
121 struct in6_addr ipv6, ipv6_mask;
122 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
123
124 /* The following properties are for dummy-pmd and they cannot be changed
125 * when a device is running, so we remember the request and update them
126 * next time netdev_dummy_reconfigure() is called. */
127 int requested_n_txq OVS_GUARDED;
128 int requested_n_rxq OVS_GUARDED;
129 int requested_numa_id OVS_GUARDED;
130 };
131
132 /* Max 'recv_queue_len' in struct netdev_dummy. */
133 #define NETDEV_DUMMY_MAX_QUEUE 100
134
135 struct netdev_rxq_dummy {
136 struct netdev_rxq up;
137 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
138 struct ovs_list recv_queue;
139 int recv_queue_len; /* ovs_list_size(&recv_queue). */
140 struct seq *seq; /* Reports newly queued packets. */
141 };
142
143 static unixctl_cb_func netdev_dummy_set_admin_state;
144 static int netdev_dummy_construct(struct netdev *);
145 static void netdev_dummy_queue_packet(struct netdev_dummy *,
146 struct dp_packet *, int);
147
148 static void dummy_packet_stream_close(struct dummy_packet_stream *);
149
150 static void pkt_list_delete(struct ovs_list *);
151
152 static bool
153 is_dummy_class(const struct netdev_class *class)
154 {
155 return class->construct == netdev_dummy_construct;
156 }
157
158 static struct netdev_dummy *
159 netdev_dummy_cast(const struct netdev *netdev)
160 {
161 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
162 return CONTAINER_OF(netdev, struct netdev_dummy, up);
163 }
164
165 static struct netdev_rxq_dummy *
166 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
167 {
168 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
169 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
170 }
171
172 static void
173 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
174 {
175 int rxbuf_size = stream ? 2048 : 0;
176 s->stream = stream;
177 dp_packet_init(&s->rxbuf, rxbuf_size);
178 ovs_list_init(&s->txq);
179 }
180
181 static struct dummy_packet_stream *
182 dummy_packet_stream_create(struct stream *stream)
183 {
184 struct dummy_packet_stream *s;
185
186 s = xzalloc(sizeof *s);
187 dummy_packet_stream_init(s, stream);
188
189 return s;
190 }
191
192 static void
193 dummy_packet_stream_wait(struct dummy_packet_stream *s)
194 {
195 stream_run_wait(s->stream);
196 if (!ovs_list_is_empty(&s->txq)) {
197 stream_send_wait(s->stream);
198 }
199 stream_recv_wait(s->stream);
200 }
201
202 static void
203 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
204 {
205 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
206 struct dp_packet *b;
207 struct pkt_list_node *node;
208
209 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
210 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
211
212 node = xmalloc(sizeof *node);
213 node->pkt = b;
214 ovs_list_push_back(&s->txq, &node->list_node);
215 }
216 }
217
218 static int
219 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
220 {
221 int error = 0;
222 size_t n;
223
224 stream_run(s->stream);
225
226 if (!ovs_list_is_empty(&s->txq)) {
227 struct pkt_list_node *txbuf_node;
228 struct dp_packet *txbuf;
229 int retval;
230
231 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
232 txbuf = txbuf_node->pkt;
233 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
234
235 if (retval > 0) {
236 dp_packet_pull(txbuf, retval);
237 if (!dp_packet_size(txbuf)) {
238 ovs_list_remove(&txbuf_node->list_node);
239 free(txbuf_node);
240 dp_packet_delete(txbuf);
241 }
242 } else if (retval != -EAGAIN) {
243 error = -retval;
244 }
245 }
246
247 if (!error) {
248 if (dp_packet_size(&s->rxbuf) < 2) {
249 n = 2 - dp_packet_size(&s->rxbuf);
250 } else {
251 uint16_t frame_len;
252
253 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
254 if (frame_len < ETH_HEADER_LEN) {
255 error = EPROTO;
256 n = 0;
257 } else {
258 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
259 }
260 }
261 }
262 if (!error) {
263 int retval;
264
265 dp_packet_prealloc_tailroom(&s->rxbuf, n);
266 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
267
268 if (retval > 0) {
269 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
270 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
271 dp_packet_pull(&s->rxbuf, 2);
272 netdev_dummy_queue_packet(dev,
273 dp_packet_clone(&s->rxbuf), 0);
274 dp_packet_clear(&s->rxbuf);
275 }
276 } else if (retval != -EAGAIN) {
277 error = (retval < 0 ? -retval
278 : dp_packet_size(&s->rxbuf) ? EPROTO
279 : EOF);
280 }
281 }
282
283 return error;
284 }
285
286 static void
287 dummy_packet_stream_close(struct dummy_packet_stream *s)
288 {
289 stream_close(s->stream);
290 dp_packet_uninit(&s->rxbuf);
291 pkt_list_delete(&s->txq);
292 }
293
294 static void
295 dummy_packet_conn_init(struct dummy_packet_conn *conn)
296 {
297 memset(conn, 0, sizeof *conn);
298 conn->type = NONE;
299 }
300
301 static void
302 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
303 {
304
305 switch (conn->type) {
306 case PASSIVE:
307 smap_add(args, "pstream", pstream_get_name(conn->u.pconn.pstream));
308 break;
309
310 case ACTIVE:
311 smap_add(args, "stream", stream_get_name(conn->u.rconn.rstream->stream));
312 break;
313
314 case NONE:
315 default:
316 break;
317 }
318 }
319
320 static void
321 dummy_packet_conn_close(struct dummy_packet_conn *conn)
322 {
323 int i;
324 struct dummy_packet_pconn *pconn = &conn->u.pconn;
325 struct dummy_packet_rconn *rconn = &conn->u.rconn;
326
327 switch (conn->type) {
328 case PASSIVE:
329 pstream_close(pconn->pstream);
330 for (i = 0; i < pconn->n_streams; i++) {
331 dummy_packet_stream_close(pconn->streams[i]);
332 free(pconn->streams[i]);
333 }
334 free(pconn->streams);
335 pconn->pstream = NULL;
336 pconn->streams = NULL;
337 break;
338
339 case ACTIVE:
340 dummy_packet_stream_close(rconn->rstream);
341 free(rconn->rstream);
342 rconn->rstream = NULL;
343 reconnect_destroy(rconn->reconnect);
344 rconn->reconnect = NULL;
345 break;
346
347 case NONE:
348 default:
349 break;
350 }
351
352 conn->type = NONE;
353 memset(conn, 0, sizeof *conn);
354 }
355
356 static void
357 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
358 const struct smap *args)
359 {
360 const char *pstream = smap_get(args, "pstream");
361 const char *stream = smap_get(args, "stream");
362
363 if (pstream && stream) {
364 VLOG_WARN("Open failed: both %s and %s are configured",
365 pstream, stream);
366 return;
367 }
368
369 switch (conn->type) {
370 case PASSIVE:
371 if (pstream &&
372 !strcmp(pstream_get_name(conn->u.pconn.pstream), pstream)) {
373 return;
374 }
375 dummy_packet_conn_close(conn);
376 break;
377 case ACTIVE:
378 if (stream &&
379 !strcmp(stream_get_name(conn->u.rconn.rstream->stream), stream)) {
380 return;
381 }
382 dummy_packet_conn_close(conn);
383 break;
384 case NONE:
385 default:
386 break;
387 }
388
389 if (pstream) {
390 int error;
391
392 error = pstream_open(pstream, &conn->u.pconn.pstream, DSCP_DEFAULT);
393 if (error) {
394 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
395 } else {
396 conn->type = PASSIVE;
397 }
398 }
399
400 if (stream) {
401 int error;
402 struct stream *active_stream;
403 struct reconnect *reconnect;
404
405 reconnect = reconnect_create(time_msec());
406 reconnect_set_name(reconnect, stream);
407 reconnect_set_passive(reconnect, false, time_msec());
408 reconnect_enable(reconnect, time_msec());
409 reconnect_set_backoff(reconnect, 100, INT_MAX);
410 reconnect_set_probe_interval(reconnect, 0);
411 conn->u.rconn.reconnect = reconnect;
412 conn->type = ACTIVE;
413
414 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
415 conn->u.rconn.rstream = dummy_packet_stream_create(active_stream);
416
417 switch (error) {
418 case 0:
419 reconnect_connected(reconnect, time_msec());
420 break;
421
422 case EAGAIN:
423 reconnect_connecting(reconnect, time_msec());
424 break;
425
426 default:
427 reconnect_connect_failed(reconnect, time_msec(), error);
428 stream_close(active_stream);
429 conn->u.rconn.rstream->stream = NULL;
430 break;
431 }
432 }
433 }
434
435 static void
436 dummy_pconn_run(struct netdev_dummy *dev)
437 OVS_REQUIRES(dev->mutex)
438 {
439 struct stream *new_stream;
440 struct dummy_packet_pconn *pconn = &dev->conn.u.pconn;
441 int error;
442 size_t i;
443
444 error = pstream_accept(pconn->pstream, &new_stream);
445 if (!error) {
446 struct dummy_packet_stream *s;
447
448 pconn->streams = xrealloc(pconn->streams,
449 ((pconn->n_streams + 1)
450 * sizeof s));
451 s = xmalloc(sizeof *s);
452 pconn->streams[pconn->n_streams++] = s;
453 dummy_packet_stream_init(s, new_stream);
454 } else if (error != EAGAIN) {
455 VLOG_WARN("%s: accept failed (%s)",
456 pstream_get_name(pconn->pstream), ovs_strerror(error));
457 pstream_close(pconn->pstream);
458 pconn->pstream = NULL;
459 dev->conn.type = NONE;
460 }
461
462 for (i = 0; i < pconn->n_streams; ) {
463 struct dummy_packet_stream *s = pconn->streams[i];
464
465 error = dummy_packet_stream_run(dev, s);
466 if (error) {
467 VLOG_DBG("%s: closing connection (%s)",
468 stream_get_name(s->stream),
469 ovs_retval_to_string(error));
470 dummy_packet_stream_close(s);
471 free(s);
472 pconn->streams[i] = pconn->streams[--pconn->n_streams];
473 } else {
474 i++;
475 }
476 }
477 }
478
479 static void
480 dummy_rconn_run(struct netdev_dummy *dev)
481 OVS_REQUIRES(dev->mutex)
482 {
483 struct dummy_packet_rconn *rconn = &dev->conn.u.rconn;
484
485 switch (reconnect_run(rconn->reconnect, time_msec())) {
486 case RECONNECT_CONNECT:
487 {
488 int error;
489
490 if (rconn->rstream->stream) {
491 error = stream_connect(rconn->rstream->stream);
492 } else {
493 error = stream_open(reconnect_get_name(rconn->reconnect),
494 &rconn->rstream->stream, DSCP_DEFAULT);
495 }
496
497 switch (error) {
498 case 0:
499 reconnect_connected(rconn->reconnect, time_msec());
500 break;
501
502 case EAGAIN:
503 reconnect_connecting(rconn->reconnect, time_msec());
504 break;
505
506 default:
507 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
508 stream_close(rconn->rstream->stream);
509 rconn->rstream->stream = NULL;
510 break;
511 }
512 }
513 break;
514
515 case RECONNECT_DISCONNECT:
516 case RECONNECT_PROBE:
517 default:
518 break;
519 }
520
521 if (reconnect_is_connected(rconn->reconnect)) {
522 int err;
523
524 err = dummy_packet_stream_run(dev, rconn->rstream);
525
526 if (err) {
527 reconnect_disconnected(rconn->reconnect, time_msec(), err);
528 stream_close(rconn->rstream->stream);
529 rconn->rstream->stream = NULL;
530 }
531 }
532 }
533
534 static void
535 dummy_packet_conn_run(struct netdev_dummy *dev)
536 OVS_REQUIRES(dev->mutex)
537 {
538 switch (dev->conn.type) {
539 case PASSIVE:
540 dummy_pconn_run(dev);
541 break;
542
543 case ACTIVE:
544 dummy_rconn_run(dev);
545 break;
546
547 case NONE:
548 default:
549 break;
550 }
551 }
552
553 static void
554 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
555 {
556 int i;
557 switch (conn->type) {
558 case PASSIVE:
559 pstream_wait(conn->u.pconn.pstream);
560 for (i = 0; i < conn->u.pconn.n_streams; i++) {
561 struct dummy_packet_stream *s = conn->u.pconn.streams[i];
562 dummy_packet_stream_wait(s);
563 }
564 break;
565 case ACTIVE:
566 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
567 dummy_packet_stream_wait(conn->u.rconn.rstream);
568 }
569 break;
570
571 case NONE:
572 default:
573 break;
574 }
575 }
576
577 static void
578 dummy_packet_conn_send(struct dummy_packet_conn *conn,
579 const void *buffer, size_t size)
580 {
581 int i;
582
583 switch (conn->type) {
584 case PASSIVE:
585 for (i = 0; i < conn->u.pconn.n_streams; i++) {
586 struct dummy_packet_stream *s = conn->u.pconn.streams[i];
587
588 dummy_packet_stream_send(s, buffer, size);
589 pstream_wait(conn->u.pconn.pstream);
590 }
591 break;
592
593 case ACTIVE:
594 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
595 dummy_packet_stream_send(conn->u.rconn.rstream, buffer, size);
596 dummy_packet_stream_wait(conn->u.rconn.rstream);
597 }
598 break;
599
600 case NONE:
601 default:
602 break;
603 }
604 }
605
606 static enum dummy_netdev_conn_state
607 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
608 {
609 enum dummy_netdev_conn_state state;
610
611 if (conn->type == ACTIVE) {
612 if (reconnect_is_connected(conn->u.rconn.reconnect)) {
613 state = CONN_STATE_CONNECTED;
614 } else {
615 state = CONN_STATE_NOT_CONNECTED;
616 }
617 } else {
618 state = CONN_STATE_UNKNOWN;
619 }
620
621 return state;
622 }
623
624 static void
625 netdev_dummy_run(const struct netdev_class *netdev_class)
626 {
627 struct netdev_dummy *dev;
628
629 ovs_mutex_lock(&dummy_list_mutex);
630 LIST_FOR_EACH (dev, list_node, &dummy_list) {
631 if (netdev_get_class(&dev->up) != netdev_class) {
632 continue;
633 }
634 ovs_mutex_lock(&dev->mutex);
635 dummy_packet_conn_run(dev);
636 ovs_mutex_unlock(&dev->mutex);
637 }
638 ovs_mutex_unlock(&dummy_list_mutex);
639 }
640
641 static void
642 netdev_dummy_wait(const struct netdev_class *netdev_class)
643 {
644 struct netdev_dummy *dev;
645
646 ovs_mutex_lock(&dummy_list_mutex);
647 LIST_FOR_EACH (dev, list_node, &dummy_list) {
648 if (netdev_get_class(&dev->up) != netdev_class) {
649 continue;
650 }
651 ovs_mutex_lock(&dev->mutex);
652 dummy_packet_conn_wait(&dev->conn);
653 ovs_mutex_unlock(&dev->mutex);
654 }
655 ovs_mutex_unlock(&dummy_list_mutex);
656 }
657
658 static struct netdev *
659 netdev_dummy_alloc(void)
660 {
661 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
662 return &netdev->up;
663 }
664
665 static int
666 netdev_dummy_construct(struct netdev *netdev_)
667 {
668 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
669 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
670 unsigned int n;
671
672 n = atomic_count_inc(&next_n);
673
674 ovs_mutex_init(&netdev->mutex);
675 ovs_mutex_lock(&netdev->mutex);
676 netdev->hwaddr.ea[0] = 0xaa;
677 netdev->hwaddr.ea[1] = 0x55;
678 netdev->hwaddr.ea[2] = n >> 24;
679 netdev->hwaddr.ea[3] = n >> 16;
680 netdev->hwaddr.ea[4] = n >> 8;
681 netdev->hwaddr.ea[5] = n;
682 netdev->mtu = 1500;
683 netdev->flags = 0;
684 netdev->ifindex = -EOPNOTSUPP;
685 netdev->requested_n_rxq = netdev_->n_rxq;
686 netdev->requested_n_txq = netdev_->n_txq;
687 netdev->numa_id = 0;
688
689 dummy_packet_conn_init(&netdev->conn);
690
691 ovs_list_init(&netdev->rxes);
692 ovs_mutex_unlock(&netdev->mutex);
693
694 ovs_mutex_lock(&dummy_list_mutex);
695 ovs_list_push_back(&dummy_list, &netdev->list_node);
696 ovs_mutex_unlock(&dummy_list_mutex);
697
698 return 0;
699 }
700
701 static void
702 netdev_dummy_destruct(struct netdev *netdev_)
703 {
704 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
705
706 ovs_mutex_lock(&dummy_list_mutex);
707 ovs_list_remove(&netdev->list_node);
708 ovs_mutex_unlock(&dummy_list_mutex);
709
710 ovs_mutex_lock(&netdev->mutex);
711 dummy_packet_conn_close(&netdev->conn);
712 netdev->conn.type = NONE;
713
714 ovs_mutex_unlock(&netdev->mutex);
715 ovs_mutex_destroy(&netdev->mutex);
716 }
717
718 static void
719 netdev_dummy_dealloc(struct netdev *netdev_)
720 {
721 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
722
723 free(netdev);
724 }
725
726 static int
727 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
728 {
729 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
730
731 ovs_mutex_lock(&netdev->mutex);
732
733 if (netdev->ifindex >= 0) {
734 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
735 }
736
737 dummy_packet_conn_get_config(&netdev->conn, args);
738
739 /* 'dummy-pmd' specific config. */
740 if (!netdev_is_pmd(dev)) {
741 goto exit;
742 }
743 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
744 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
745 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
746 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
747
748 exit:
749 ovs_mutex_unlock(&netdev->mutex);
750 return 0;
751 }
752
753 static int
754 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
755 struct in6_addr **pmask, int *n_addr)
756 {
757 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
758 int cnt = 0, i = 0, err = 0;
759 struct in6_addr *addr, *mask;
760
761 ovs_mutex_lock(&netdev->mutex);
762 if (netdev->address.s_addr != INADDR_ANY) {
763 cnt++;
764 }
765
766 if (ipv6_addr_is_set(&netdev->ipv6)) {
767 cnt++;
768 }
769 if (!cnt) {
770 err = EADDRNOTAVAIL;
771 goto out;
772 }
773 addr = xmalloc(sizeof *addr * cnt);
774 mask = xmalloc(sizeof *mask * cnt);
775 if (netdev->address.s_addr != INADDR_ANY) {
776 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
777 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
778 i++;
779 }
780
781 if (ipv6_addr_is_set(&netdev->ipv6)) {
782 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
783 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
784 i++;
785 }
786 if (paddr) {
787 *paddr = addr;
788 *pmask = mask;
789 *n_addr = cnt;
790 } else {
791 free(addr);
792 free(mask);
793 }
794 out:
795 ovs_mutex_unlock(&netdev->mutex);
796
797 return err;
798 }
799
800 static int
801 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
802 struct in_addr netmask)
803 {
804 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
805
806 ovs_mutex_lock(&netdev->mutex);
807 netdev->address = address;
808 netdev->netmask = netmask;
809 netdev_change_seq_changed(netdev_);
810 ovs_mutex_unlock(&netdev->mutex);
811
812 return 0;
813 }
814
815 static int
816 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
817 struct in6_addr *mask)
818 {
819 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
820
821 ovs_mutex_lock(&netdev->mutex);
822 netdev->ipv6 = *in6;
823 netdev->ipv6_mask = *mask;
824 netdev_change_seq_changed(netdev_);
825 ovs_mutex_unlock(&netdev->mutex);
826
827 return 0;
828 }
829
830 #define DUMMY_MAX_QUEUES_PER_PORT 1024
831
832 static int
833 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
834 char **errp OVS_UNUSED)
835 {
836 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
837 const char *pcap;
838 int new_n_rxq, new_n_txq, new_numa_id;
839
840 ovs_mutex_lock(&netdev->mutex);
841 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
842
843 dummy_packet_conn_set_config(&netdev->conn, args);
844
845 if (netdev->rxq_pcap) {
846 fclose(netdev->rxq_pcap);
847 }
848 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
849 fclose(netdev->tx_pcap);
850 }
851 netdev->rxq_pcap = netdev->tx_pcap = NULL;
852 pcap = smap_get(args, "pcap");
853 if (pcap) {
854 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
855 } else {
856 const char *rxq_pcap = smap_get(args, "rxq_pcap");
857 const char *tx_pcap = smap_get(args, "tx_pcap");
858
859 if (rxq_pcap) {
860 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
861 }
862 if (tx_pcap) {
863 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
864 }
865 }
866
867 netdev_change_seq_changed(netdev_);
868
869 /* 'dummy-pmd' specific config. */
870 if (!netdev_->netdev_class->is_pmd) {
871 goto exit;
872 }
873
874 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
875 new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
876
877 if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
878 new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
879 VLOG_WARN("The one or both of interface %s queues"
880 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
881 netdev_get_name(netdev_),
882 new_n_rxq,
883 new_n_txq,
884 DUMMY_MAX_QUEUES_PER_PORT,
885 DUMMY_MAX_QUEUES_PER_PORT);
886
887 new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
888 new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
889 }
890
891 new_numa_id = smap_get_int(args, "numa_id", 0);
892 if (new_n_rxq != netdev->requested_n_rxq
893 || new_n_txq != netdev->requested_n_txq
894 || new_numa_id != netdev->requested_numa_id) {
895 netdev->requested_n_rxq = new_n_rxq;
896 netdev->requested_n_txq = new_n_txq;
897 netdev->requested_numa_id = new_numa_id;
898 netdev_request_reconfigure(netdev_);
899 }
900
901 exit:
902 ovs_mutex_unlock(&netdev->mutex);
903 return 0;
904 }
905
906 static int
907 netdev_dummy_get_numa_id(const struct netdev *netdev_)
908 {
909 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
910
911 ovs_mutex_lock(&netdev->mutex);
912 int numa_id = netdev->numa_id;
913 ovs_mutex_unlock(&netdev->mutex);
914
915 return numa_id;
916 }
917
918 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
919 static int
920 netdev_dummy_reconfigure(struct netdev *netdev_)
921 {
922 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
923
924 ovs_mutex_lock(&netdev->mutex);
925
926 netdev_->n_txq = netdev->requested_n_txq;
927 netdev_->n_rxq = netdev->requested_n_rxq;
928 netdev->numa_id = netdev->requested_numa_id;
929
930 ovs_mutex_unlock(&netdev->mutex);
931 return 0;
932 }
933
934 static struct netdev_rxq *
935 netdev_dummy_rxq_alloc(void)
936 {
937 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
938 return &rx->up;
939 }
940
941 static int
942 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
943 {
944 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
945 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
946
947 ovs_mutex_lock(&netdev->mutex);
948 ovs_list_push_back(&netdev->rxes, &rx->node);
949 ovs_list_init(&rx->recv_queue);
950 rx->recv_queue_len = 0;
951 rx->seq = seq_create();
952 ovs_mutex_unlock(&netdev->mutex);
953
954 return 0;
955 }
956
957 static void
958 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
959 {
960 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
961 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
962
963 ovs_mutex_lock(&netdev->mutex);
964 ovs_list_remove(&rx->node);
965 pkt_list_delete(&rx->recv_queue);
966 ovs_mutex_unlock(&netdev->mutex);
967 seq_destroy(rx->seq);
968 }
969
970 static void
971 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
972 {
973 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
974
975 free(rx);
976 }
977
978 static int
979 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch)
980 {
981 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
982 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
983 struct dp_packet *packet;
984
985 ovs_mutex_lock(&netdev->mutex);
986 if (!ovs_list_is_empty(&rx->recv_queue)) {
987 struct pkt_list_node *pkt_node;
988
989 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
990 packet = pkt_node->pkt;
991 free(pkt_node);
992 rx->recv_queue_len--;
993 } else {
994 packet = NULL;
995 }
996 ovs_mutex_unlock(&netdev->mutex);
997
998 if (!packet) {
999 if (netdev_is_pmd(&netdev->up)) {
1000 /* If 'netdev' is a PMD device, this is called as part of the PMD
1001 * thread busy loop. We yield here (without quiescing) for two
1002 * reasons:
1003 *
1004 * - To reduce the CPU utilization during the testsuite
1005 * - To give valgrind a chance to switch thread. According
1006 * to the valgrind documentation, there's a big lock that
1007 * prevents multiple thread from being executed at the same
1008 * time. On my system, without this sleep, the pmd threads
1009 * testcases fail under valgrind, because ovs-vswitchd becomes
1010 * unresponsive. */
1011 sched_yield();
1012 }
1013 return EAGAIN;
1014 }
1015 ovs_mutex_lock(&netdev->mutex);
1016 netdev->stats.rx_packets++;
1017 netdev->stats.rx_bytes += dp_packet_size(packet);
1018 ovs_mutex_unlock(&netdev->mutex);
1019
1020 batch->packets[0] = packet;
1021 batch->count = 1;
1022 return 0;
1023 }
1024
1025 static void
1026 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1027 {
1028 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1029 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1030 uint64_t seq = seq_read(rx->seq);
1031
1032 ovs_mutex_lock(&netdev->mutex);
1033 if (!ovs_list_is_empty(&rx->recv_queue)) {
1034 poll_immediate_wake();
1035 } else {
1036 seq_wait(rx->seq, seq);
1037 }
1038 ovs_mutex_unlock(&netdev->mutex);
1039 }
1040
1041 static int
1042 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1043 {
1044 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1045 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1046
1047 ovs_mutex_lock(&netdev->mutex);
1048 pkt_list_delete(&rx->recv_queue);
1049 rx->recv_queue_len = 0;
1050 ovs_mutex_unlock(&netdev->mutex);
1051
1052 seq_change(rx->seq);
1053
1054 return 0;
1055 }
1056
1057 static int
1058 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1059 struct dp_packet_batch *batch, bool may_steal,
1060 bool concurrent_txq OVS_UNUSED)
1061 {
1062 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1063 int error = 0;
1064
1065 struct dp_packet *packet;
1066 DP_PACKET_BATCH_FOR_EACH(packet, batch) {
1067 const void *buffer = dp_packet_data(packet);
1068 size_t size = dp_packet_size(packet);
1069
1070 size -= dp_packet_get_cutlen(packet);
1071
1072 if (size < ETH_HEADER_LEN) {
1073 error = EMSGSIZE;
1074 break;
1075 } else {
1076 const struct eth_header *eth = buffer;
1077 int max_size;
1078
1079 ovs_mutex_lock(&dev->mutex);
1080 max_size = dev->mtu + ETH_HEADER_LEN;
1081 ovs_mutex_unlock(&dev->mutex);
1082
1083 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1084 max_size += VLAN_HEADER_LEN;
1085 }
1086 if (size > max_size) {
1087 error = EMSGSIZE;
1088 break;
1089 }
1090 }
1091
1092 ovs_mutex_lock(&dev->mutex);
1093 dev->stats.tx_packets++;
1094 dev->stats.tx_bytes += size;
1095
1096 dummy_packet_conn_send(&dev->conn, buffer, size);
1097
1098 /* Reply to ARP requests for 'dev''s assigned IP address. */
1099 if (dev->address.s_addr) {
1100 struct dp_packet packet;
1101 struct flow flow;
1102
1103 dp_packet_use_const(&packet, buffer, size);
1104 flow_extract(&packet, &flow);
1105 if (flow.dl_type == htons(ETH_TYPE_ARP)
1106 && flow.nw_proto == ARP_OP_REQUEST
1107 && flow.nw_dst == dev->address.s_addr) {
1108 struct dp_packet *reply = dp_packet_new(0);
1109 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1110 false, flow.nw_dst, flow.nw_src);
1111 netdev_dummy_queue_packet(dev, reply, 0);
1112 }
1113 }
1114
1115 if (dev->tx_pcap) {
1116 struct dp_packet packet;
1117
1118 dp_packet_use_const(&packet, buffer, size);
1119 ovs_pcap_write(dev->tx_pcap, &packet);
1120 fflush(dev->tx_pcap);
1121 }
1122
1123 ovs_mutex_unlock(&dev->mutex);
1124 }
1125
1126 dp_packet_delete_batch(batch, may_steal);
1127
1128 return error;
1129 }
1130
1131 static int
1132 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1133 {
1134 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1135
1136 ovs_mutex_lock(&dev->mutex);
1137 if (!eth_addr_equals(dev->hwaddr, mac)) {
1138 dev->hwaddr = mac;
1139 netdev_change_seq_changed(netdev);
1140 }
1141 ovs_mutex_unlock(&dev->mutex);
1142
1143 return 0;
1144 }
1145
1146 static int
1147 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1148 {
1149 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1150
1151 ovs_mutex_lock(&dev->mutex);
1152 *mac = dev->hwaddr;
1153 ovs_mutex_unlock(&dev->mutex);
1154
1155 return 0;
1156 }
1157
1158 static int
1159 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1160 {
1161 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1162
1163 ovs_mutex_lock(&dev->mutex);
1164 *mtup = dev->mtu;
1165 ovs_mutex_unlock(&dev->mutex);
1166
1167 return 0;
1168 }
1169
1170 #define DUMMY_MIN_MTU 68
1171 #define DUMMY_MAX_MTU 65535
1172
1173 static int
1174 netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
1175 {
1176 if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
1177 return EINVAL;
1178 }
1179
1180 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1181
1182 ovs_mutex_lock(&dev->mutex);
1183 if (dev->mtu != mtu) {
1184 dev->mtu = mtu;
1185 netdev_change_seq_changed(netdev);
1186 }
1187 ovs_mutex_unlock(&dev->mutex);
1188
1189 return 0;
1190 }
1191
1192 static int
1193 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1194 {
1195 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1196
1197 ovs_mutex_lock(&dev->mutex);
1198 /* Passing only collected counters */
1199 stats->tx_packets = dev->stats.tx_packets;
1200 stats->tx_bytes = dev->stats.tx_bytes;
1201 stats->rx_packets = dev->stats.rx_packets;
1202 stats->rx_bytes = dev->stats.rx_bytes;
1203 ovs_mutex_unlock(&dev->mutex);
1204
1205 return 0;
1206 }
1207
1208 static int
1209 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1210 unsigned int queue_id, struct smap *details OVS_UNUSED)
1211 {
1212 if (queue_id == 0) {
1213 return 0;
1214 } else {
1215 return EINVAL;
1216 }
1217 }
1218
1219 static void
1220 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1221 {
1222 *stats = (struct netdev_queue_stats) {
1223 .tx_bytes = UINT64_MAX,
1224 .tx_packets = UINT64_MAX,
1225 .tx_errors = UINT64_MAX,
1226 .created = LLONG_MIN,
1227 };
1228 }
1229
1230 static int
1231 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1232 unsigned int queue_id,
1233 struct netdev_queue_stats *stats)
1234 {
1235 if (queue_id == 0) {
1236 netdev_dummy_init_queue_stats(stats);
1237 return 0;
1238 } else {
1239 return EINVAL;
1240 }
1241 }
1242
1243 struct netdev_dummy_queue_state {
1244 unsigned int next_queue;
1245 };
1246
1247 static int
1248 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1249 void **statep)
1250 {
1251 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1252 state->next_queue = 0;
1253 *statep = state;
1254 return 0;
1255 }
1256
1257 static int
1258 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1259 void *state_,
1260 unsigned int *queue_id,
1261 struct smap *details OVS_UNUSED)
1262 {
1263 struct netdev_dummy_queue_state *state = state_;
1264 if (state->next_queue == 0) {
1265 *queue_id = 0;
1266 state->next_queue++;
1267 return 0;
1268 } else {
1269 return EOF;
1270 }
1271 }
1272
1273 static int
1274 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1275 void *state)
1276 {
1277 free(state);
1278 return 0;
1279 }
1280
1281 static int
1282 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1283 void (*cb)(unsigned int queue_id,
1284 struct netdev_queue_stats *,
1285 void *aux),
1286 void *aux)
1287 {
1288 struct netdev_queue_stats stats;
1289 netdev_dummy_init_queue_stats(&stats);
1290 cb(0, &stats, aux);
1291 return 0;
1292 }
1293
1294 static int
1295 netdev_dummy_get_ifindex(const struct netdev *netdev)
1296 {
1297 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1298 int ifindex;
1299
1300 ovs_mutex_lock(&dev->mutex);
1301 ifindex = dev->ifindex;
1302 ovs_mutex_unlock(&dev->mutex);
1303
1304 return ifindex;
1305 }
1306
1307 static int
1308 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1309 enum netdev_flags off, enum netdev_flags on,
1310 enum netdev_flags *old_flagsp)
1311 OVS_REQUIRES(netdev->mutex)
1312 {
1313 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1314 return EINVAL;
1315 }
1316
1317 *old_flagsp = netdev->flags;
1318 netdev->flags |= on;
1319 netdev->flags &= ~off;
1320 if (*old_flagsp != netdev->flags) {
1321 netdev_change_seq_changed(&netdev->up);
1322 }
1323
1324 return 0;
1325 }
1326
1327 static int
1328 netdev_dummy_update_flags(struct netdev *netdev_,
1329 enum netdev_flags off, enum netdev_flags on,
1330 enum netdev_flags *old_flagsp)
1331 {
1332 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1333 int error;
1334
1335 ovs_mutex_lock(&netdev->mutex);
1336 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1337 ovs_mutex_unlock(&netdev->mutex);
1338
1339 return error;
1340 }
1341 \f
1342 /* Helper functions. */
1343
1344 #define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
1345 { \
1346 NAME, \
1347 PMD, /* is_pmd */ \
1348 NULL, /* init */ \
1349 netdev_dummy_run, \
1350 netdev_dummy_wait, \
1351 \
1352 netdev_dummy_alloc, \
1353 netdev_dummy_construct, \
1354 netdev_dummy_destruct, \
1355 netdev_dummy_dealloc, \
1356 netdev_dummy_get_config, \
1357 netdev_dummy_set_config, \
1358 NULL, /* get_tunnel_config */ \
1359 NULL, /* build header */ \
1360 NULL, /* push header */ \
1361 NULL, /* pop header */ \
1362 netdev_dummy_get_numa_id, \
1363 NULL, /* set_tx_multiq */ \
1364 \
1365 netdev_dummy_send, /* send */ \
1366 NULL, /* send_wait */ \
1367 \
1368 netdev_dummy_set_etheraddr, \
1369 netdev_dummy_get_etheraddr, \
1370 netdev_dummy_get_mtu, \
1371 netdev_dummy_set_mtu, \
1372 netdev_dummy_get_ifindex, \
1373 NULL, /* get_carrier */ \
1374 NULL, /* get_carrier_resets */ \
1375 NULL, /* get_miimon */ \
1376 netdev_dummy_get_stats, \
1377 \
1378 NULL, /* get_features */ \
1379 NULL, /* set_advertisements */ \
1380 \
1381 NULL, /* set_policing */ \
1382 NULL, /* get_qos_types */ \
1383 NULL, /* get_qos_capabilities */ \
1384 NULL, /* get_qos */ \
1385 NULL, /* set_qos */ \
1386 netdev_dummy_get_queue, \
1387 NULL, /* set_queue */ \
1388 NULL, /* delete_queue */ \
1389 netdev_dummy_get_queue_stats, \
1390 netdev_dummy_queue_dump_start, \
1391 netdev_dummy_queue_dump_next, \
1392 netdev_dummy_queue_dump_done, \
1393 netdev_dummy_dump_queue_stats, \
1394 \
1395 NULL, /* set_in4 */ \
1396 netdev_dummy_get_addr_list, \
1397 NULL, /* add_router */ \
1398 NULL, /* get_next_hop */ \
1399 NULL, /* get_status */ \
1400 NULL, /* arp_lookup */ \
1401 \
1402 netdev_dummy_update_flags, \
1403 RECOFIGURE, \
1404 \
1405 netdev_dummy_rxq_alloc, \
1406 netdev_dummy_rxq_construct, \
1407 netdev_dummy_rxq_destruct, \
1408 netdev_dummy_rxq_dealloc, \
1409 netdev_dummy_rxq_recv, \
1410 netdev_dummy_rxq_wait, \
1411 netdev_dummy_rxq_drain, \
1412 }
1413
1414 static const struct netdev_class dummy_class =
1415 NETDEV_DUMMY_CLASS("dummy", false, NULL);
1416
1417 static const struct netdev_class dummy_internal_class =
1418 NETDEV_DUMMY_CLASS("dummy-internal", false, NULL);
1419
1420 static const struct netdev_class dummy_pmd_class =
1421 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1422 netdev_dummy_reconfigure);
1423
1424 static void
1425 pkt_list_delete(struct ovs_list *l)
1426 {
1427 struct pkt_list_node *pkt;
1428
1429 LIST_FOR_EACH_POP(pkt, list_node, l) {
1430 dp_packet_delete(pkt->pkt);
1431 free(pkt);
1432 }
1433 }
1434
1435 static struct dp_packet *
1436 eth_from_packet(const char *s)
1437 {
1438 struct dp_packet *packet;
1439 eth_from_hex(s, &packet);
1440 return packet;
1441 }
1442
1443 static struct dp_packet *
1444 eth_from_flow(const char *s)
1445 {
1446 enum odp_key_fitness fitness;
1447 struct dp_packet *packet;
1448 struct ofpbuf odp_key;
1449 struct flow flow;
1450 int error;
1451
1452 /* Convert string to datapath key.
1453 *
1454 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1455 * the code for that currently calls exit() on parse error. We have to
1456 * settle for parsing a datapath key for now.
1457 */
1458 ofpbuf_init(&odp_key, 0);
1459 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1460 if (error) {
1461 ofpbuf_uninit(&odp_key);
1462 return NULL;
1463 }
1464
1465 /* Convert odp_key to flow. */
1466 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1467 if (fitness == ODP_FIT_ERROR) {
1468 ofpbuf_uninit(&odp_key);
1469 return NULL;
1470 }
1471
1472 packet = dp_packet_new(0);
1473 flow_compose(packet, &flow);
1474
1475 ofpbuf_uninit(&odp_key);
1476 return packet;
1477 }
1478
1479 static void
1480 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1481 {
1482 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1483
1484 pkt_node->pkt = packet;
1485 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1486 rx->recv_queue_len++;
1487 seq_change(rx->seq);
1488 }
1489
1490 static void
1491 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1492 int queue_id)
1493 OVS_REQUIRES(dummy->mutex)
1494 {
1495 struct netdev_rxq_dummy *rx, *prev;
1496
1497 if (dummy->rxq_pcap) {
1498 ovs_pcap_write(dummy->rxq_pcap, packet);
1499 fflush(dummy->rxq_pcap);
1500 }
1501 prev = NULL;
1502 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1503 if (rx->up.queue_id == queue_id &&
1504 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1505 if (prev) {
1506 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1507 }
1508 prev = rx;
1509 }
1510 }
1511 if (prev) {
1512 netdev_dummy_queue_packet__(prev, packet);
1513 } else {
1514 dp_packet_delete(packet);
1515 }
1516 }
1517
1518 static void
1519 netdev_dummy_receive(struct unixctl_conn *conn,
1520 int argc, const char *argv[], void *aux OVS_UNUSED)
1521 {
1522 struct netdev_dummy *dummy_dev;
1523 struct netdev *netdev;
1524 int i, k = 1, rx_qid = 0;
1525
1526 netdev = netdev_from_name(argv[k++]);
1527 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1528 unixctl_command_reply_error(conn, "no such dummy netdev");
1529 goto exit_netdev;
1530 }
1531 dummy_dev = netdev_dummy_cast(netdev);
1532
1533 ovs_mutex_lock(&dummy_dev->mutex);
1534
1535 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1536 rx_qid = strtol(argv[k + 1], NULL, 10);
1537 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1538 unixctl_command_reply_error(conn, "bad rx queue id.");
1539 goto exit;
1540 }
1541 k += 2;
1542 }
1543
1544 for (i = k; i < argc; i++) {
1545 struct dp_packet *packet;
1546
1547 /* Try to parse 'argv[i]' as packet in hex. */
1548 packet = eth_from_packet(argv[i]);
1549
1550 if (!packet) {
1551 /* Try parse 'argv[i]' as odp flow. */
1552 packet = eth_from_flow(argv[i]);
1553
1554 if (!packet) {
1555 unixctl_command_reply_error(conn, "bad packet or flow syntax");
1556 goto exit;
1557 }
1558
1559 /* Parse optional --len argument immediately follows a 'flow'. */
1560 if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
1561 int packet_size = strtol(argv[i + 2], NULL, 10);
1562 dp_packet_set_size(packet, packet_size);
1563 i+=2;
1564 }
1565 }
1566
1567 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1568 }
1569
1570 unixctl_command_reply(conn, NULL);
1571
1572 exit:
1573 ovs_mutex_unlock(&dummy_dev->mutex);
1574 exit_netdev:
1575 netdev_close(netdev);
1576 }
1577
1578 static void
1579 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1580 OVS_REQUIRES(dev->mutex)
1581 {
1582 enum netdev_flags old_flags;
1583
1584 if (admin_state) {
1585 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1586 } else {
1587 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1588 }
1589 }
1590
1591 static void
1592 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1593 const char *argv[], void *aux OVS_UNUSED)
1594 {
1595 bool up;
1596
1597 if (!strcasecmp(argv[argc - 1], "up")) {
1598 up = true;
1599 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1600 up = false;
1601 } else {
1602 unixctl_command_reply_error(conn, "Invalid Admin State");
1603 return;
1604 }
1605
1606 if (argc > 2) {
1607 struct netdev *netdev = netdev_from_name(argv[1]);
1608 if (netdev && is_dummy_class(netdev->netdev_class)) {
1609 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1610
1611 ovs_mutex_lock(&dummy_dev->mutex);
1612 netdev_dummy_set_admin_state__(dummy_dev, up);
1613 ovs_mutex_unlock(&dummy_dev->mutex);
1614
1615 netdev_close(netdev);
1616 } else {
1617 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1618 netdev_close(netdev);
1619 return;
1620 }
1621 } else {
1622 struct netdev_dummy *netdev;
1623
1624 ovs_mutex_lock(&dummy_list_mutex);
1625 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1626 ovs_mutex_lock(&netdev->mutex);
1627 netdev_dummy_set_admin_state__(netdev, up);
1628 ovs_mutex_unlock(&netdev->mutex);
1629 }
1630 ovs_mutex_unlock(&dummy_list_mutex);
1631 }
1632 unixctl_command_reply(conn, "OK");
1633 }
1634
1635 static void
1636 display_conn_state__(struct ds *s, const char *name,
1637 enum dummy_netdev_conn_state state)
1638 {
1639 ds_put_format(s, "%s: ", name);
1640
1641 switch (state) {
1642 case CONN_STATE_CONNECTED:
1643 ds_put_cstr(s, "connected\n");
1644 break;
1645
1646 case CONN_STATE_NOT_CONNECTED:
1647 ds_put_cstr(s, "disconnected\n");
1648 break;
1649
1650 case CONN_STATE_UNKNOWN:
1651 default:
1652 ds_put_cstr(s, "unknown\n");
1653 break;
1654 };
1655 }
1656
1657 static void
1658 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1659 const char *argv[], void *aux OVS_UNUSED)
1660 {
1661 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1662 struct ds s;
1663
1664 ds_init(&s);
1665
1666 if (argc > 1) {
1667 const char *dev_name = argv[1];
1668 struct netdev *netdev = netdev_from_name(dev_name);
1669
1670 if (netdev && is_dummy_class(netdev->netdev_class)) {
1671 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1672
1673 ovs_mutex_lock(&dummy_dev->mutex);
1674 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1675 ovs_mutex_unlock(&dummy_dev->mutex);
1676
1677 netdev_close(netdev);
1678 }
1679 display_conn_state__(&s, dev_name, state);
1680 } else {
1681 struct netdev_dummy *netdev;
1682
1683 ovs_mutex_lock(&dummy_list_mutex);
1684 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1685 ovs_mutex_lock(&netdev->mutex);
1686 state = dummy_netdev_get_conn_state(&netdev->conn);
1687 ovs_mutex_unlock(&netdev->mutex);
1688 if (state != CONN_STATE_UNKNOWN) {
1689 display_conn_state__(&s, netdev->up.name, state);
1690 }
1691 }
1692 ovs_mutex_unlock(&dummy_list_mutex);
1693 }
1694
1695 unixctl_command_reply(conn, ds_cstr(&s));
1696 ds_destroy(&s);
1697 }
1698
1699 static void
1700 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1701 const char *argv[], void *aux OVS_UNUSED)
1702 {
1703 struct netdev *netdev = netdev_from_name(argv[1]);
1704
1705 if (netdev && is_dummy_class(netdev->netdev_class)) {
1706 struct in_addr ip, mask;
1707 char *error;
1708
1709 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1710 if (!error) {
1711 netdev_dummy_set_in4(netdev, ip, mask);
1712 unixctl_command_reply(conn, "OK");
1713 } else {
1714 unixctl_command_reply_error(conn, error);
1715 free(error);
1716 }
1717 } else {
1718 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1719 }
1720
1721 netdev_close(netdev);
1722 }
1723
1724 static void
1725 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1726 const char *argv[], void *aux OVS_UNUSED)
1727 {
1728 struct netdev *netdev = netdev_from_name(argv[1]);
1729
1730 if (netdev && is_dummy_class(netdev->netdev_class)) {
1731 struct in6_addr ip6;
1732 char *error;
1733 uint32_t plen;
1734
1735 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1736 if (!error) {
1737 struct in6_addr mask;
1738
1739 mask = ipv6_create_mask(plen);
1740 netdev_dummy_set_in6(netdev, &ip6, &mask);
1741 unixctl_command_reply(conn, "OK");
1742 } else {
1743 unixctl_command_reply_error(conn, error);
1744 free(error);
1745 }
1746 netdev_close(netdev);
1747 } else {
1748 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1749 }
1750
1751 netdev_close(netdev);
1752 }
1753
1754
1755 static void
1756 netdev_dummy_override(const char *type)
1757 {
1758 if (!netdev_unregister_provider(type)) {
1759 struct netdev_class *class;
1760 int error;
1761
1762 class = xmemdup(&dummy_class, sizeof dummy_class);
1763 class->type = xstrdup(type);
1764 error = netdev_register_provider(class);
1765 if (error) {
1766 VLOG_ERR("%s: failed to register netdev provider (%s)",
1767 type, ovs_strerror(error));
1768 free(CONST_CAST(char *, class->type));
1769 free(class);
1770 }
1771 }
1772 }
1773
1774 void
1775 netdev_dummy_register(enum dummy_level level)
1776 {
1777 unixctl_command_register("netdev-dummy/receive",
1778 "name [--qid queue_id] packet|flow [--len packet_len]",
1779 2, INT_MAX, netdev_dummy_receive, NULL);
1780 unixctl_command_register("netdev-dummy/set-admin-state",
1781 "[netdev] up|down", 1, 2,
1782 netdev_dummy_set_admin_state, NULL);
1783 unixctl_command_register("netdev-dummy/conn-state",
1784 "[netdev]", 0, 1,
1785 netdev_dummy_conn_state, NULL);
1786 unixctl_command_register("netdev-dummy/ip4addr",
1787 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1788 netdev_dummy_ip4addr, NULL);
1789 unixctl_command_register("netdev-dummy/ip6addr",
1790 "[netdev] ip6addr", 2, 2,
1791 netdev_dummy_ip6addr, NULL);
1792
1793 if (level == DUMMY_OVERRIDE_ALL) {
1794 struct sset types;
1795 const char *type;
1796
1797 sset_init(&types);
1798 netdev_enumerate_types(&types);
1799 SSET_FOR_EACH (type, &types) {
1800 if (strcmp(type, "patch")) {
1801 netdev_dummy_override(type);
1802 }
1803 }
1804 sset_destroy(&types);
1805 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1806 netdev_dummy_override("system");
1807 }
1808 netdev_register_provider(&dummy_class);
1809 netdev_register_provider(&dummy_internal_class);
1810 netdev_register_provider(&dummy_pmd_class);
1811
1812 netdev_vport_tunnel_register();
1813 }