]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-dummy.c
stopwatch: Remove tabs from output.
[mirror_ovs.git] / lib / netdev-dummy.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "dummy.h"
20
21 #include <errno.h>
22 #include <unistd.h>
23
24 #include "dp-packet.h"
25 #include "dpif-netdev.h"
26 #include "flow.h"
27 #include "netdev-provider.h"
28 #include "netdev-vport.h"
29 #include "odp-util.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "openvswitch/list.h"
32 #include "openvswitch/ofp-print.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "ovs-atomic.h"
36 #include "packets.h"
37 #include "pcap-file.h"
38 #include "openvswitch/poll-loop.h"
39 #include "openvswitch/shash.h"
40 #include "sset.h"
41 #include "stream.h"
42 #include "unaligned.h"
43 #include "timeval.h"
44 #include "unixctl.h"
45 #include "reconnect.h"
46
47 VLOG_DEFINE_THIS_MODULE(netdev_dummy);
48
49 #define C_STATS_SIZE 2
50
51 struct reconnect;
52
53 struct dummy_packet_stream {
54 struct stream *stream;
55 struct ovs_list txq;
56 struct dp_packet rxbuf;
57 };
58
59 enum dummy_packet_conn_type {
60 NONE, /* No connection is configured. */
61 PASSIVE, /* Listener. */
62 ACTIVE /* Connect to listener. */
63 };
64
65 enum dummy_netdev_conn_state {
66 CONN_STATE_CONNECTED, /* Listener connected. */
67 CONN_STATE_NOT_CONNECTED, /* Listener not connected. */
68 CONN_STATE_UNKNOWN, /* No relavent information. */
69 };
70
71 struct dummy_packet_pconn {
72 struct pstream *pstream;
73 struct dummy_packet_stream **streams;
74 size_t n_streams;
75 };
76
77 struct dummy_packet_rconn {
78 struct dummy_packet_stream *rstream;
79 struct reconnect *reconnect;
80 };
81
82 struct dummy_packet_conn {
83 enum dummy_packet_conn_type type;
84 union {
85 struct dummy_packet_pconn pconn;
86 struct dummy_packet_rconn rconn;
87 };
88 };
89
90 struct pkt_list_node {
91 struct dp_packet *pkt;
92 struct ovs_list list_node;
93 };
94
95 /* Protects 'dummy_list'. */
96 static struct ovs_mutex dummy_list_mutex = OVS_MUTEX_INITIALIZER;
97
98 /* Contains all 'struct dummy_dev's. */
99 static struct ovs_list dummy_list OVS_GUARDED_BY(dummy_list_mutex)
100 = OVS_LIST_INITIALIZER(&dummy_list);
101
102 struct netdev_dummy {
103 struct netdev up;
104
105 /* In dummy_list. */
106 struct ovs_list list_node OVS_GUARDED_BY(dummy_list_mutex);
107
108 /* Protects all members below. */
109 struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
110
111 struct eth_addr hwaddr OVS_GUARDED;
112 int mtu OVS_GUARDED;
113 struct netdev_stats stats OVS_GUARDED;
114 struct netdev_custom_counter custom_stats[C_STATS_SIZE] OVS_GUARDED;
115 enum netdev_flags flags OVS_GUARDED;
116 int ifindex OVS_GUARDED;
117 int numa_id OVS_GUARDED;
118
119 struct dummy_packet_conn conn OVS_GUARDED;
120
121 FILE *tx_pcap, *rxq_pcap OVS_GUARDED;
122
123 struct in_addr address, netmask;
124 struct in6_addr ipv6, ipv6_mask;
125 struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */
126
127 /* The following properties are for dummy-pmd and they cannot be changed
128 * when a device is running, so we remember the request and update them
129 * next time netdev_dummy_reconfigure() is called. */
130 int requested_n_txq OVS_GUARDED;
131 int requested_n_rxq OVS_GUARDED;
132 int requested_numa_id OVS_GUARDED;
133 };
134
135 /* Max 'recv_queue_len' in struct netdev_dummy. */
136 #define NETDEV_DUMMY_MAX_QUEUE 100
137
138 struct netdev_rxq_dummy {
139 struct netdev_rxq up;
140 struct ovs_list node; /* In netdev_dummy's "rxes" list. */
141 struct ovs_list recv_queue;
142 int recv_queue_len; /* ovs_list_size(&recv_queue). */
143 struct seq *seq; /* Reports newly queued packets. */
144 };
145
146 static unixctl_cb_func netdev_dummy_set_admin_state;
147 static int netdev_dummy_construct(struct netdev *);
148 static void netdev_dummy_queue_packet(struct netdev_dummy *,
149 struct dp_packet *, int);
150
151 static void dummy_packet_stream_close(struct dummy_packet_stream *);
152
153 static void pkt_list_delete(struct ovs_list *);
154
155 static bool
156 is_dummy_class(const struct netdev_class *class)
157 {
158 return class->construct == netdev_dummy_construct;
159 }
160
161 static struct netdev_dummy *
162 netdev_dummy_cast(const struct netdev *netdev)
163 {
164 ovs_assert(is_dummy_class(netdev_get_class(netdev)));
165 return CONTAINER_OF(netdev, struct netdev_dummy, up);
166 }
167
168 static struct netdev_rxq_dummy *
169 netdev_rxq_dummy_cast(const struct netdev_rxq *rx)
170 {
171 ovs_assert(is_dummy_class(netdev_get_class(rx->netdev)));
172 return CONTAINER_OF(rx, struct netdev_rxq_dummy, up);
173 }
174
175 static void
176 dummy_packet_stream_init(struct dummy_packet_stream *s, struct stream *stream)
177 {
178 int rxbuf_size = stream ? 2048 : 0;
179 s->stream = stream;
180 dp_packet_init(&s->rxbuf, rxbuf_size);
181 ovs_list_init(&s->txq);
182 }
183
184 static struct dummy_packet_stream *
185 dummy_packet_stream_create(struct stream *stream)
186 {
187 struct dummy_packet_stream *s;
188
189 s = xzalloc(sizeof *s);
190 dummy_packet_stream_init(s, stream);
191
192 return s;
193 }
194
195 static void
196 dummy_packet_stream_wait(struct dummy_packet_stream *s)
197 {
198 stream_run_wait(s->stream);
199 if (!ovs_list_is_empty(&s->txq)) {
200 stream_send_wait(s->stream);
201 }
202 stream_recv_wait(s->stream);
203 }
204
205 static void
206 dummy_packet_stream_send(struct dummy_packet_stream *s, const void *buffer, size_t size)
207 {
208 if (ovs_list_size(&s->txq) < NETDEV_DUMMY_MAX_QUEUE) {
209 struct dp_packet *b;
210 struct pkt_list_node *node;
211
212 b = dp_packet_clone_data_with_headroom(buffer, size, 2);
213 put_unaligned_be16(dp_packet_push_uninit(b, 2), htons(size));
214
215 node = xmalloc(sizeof *node);
216 node->pkt = b;
217 ovs_list_push_back(&s->txq, &node->list_node);
218 }
219 }
220
221 static int
222 dummy_packet_stream_run(struct netdev_dummy *dev, struct dummy_packet_stream *s)
223 {
224 int error = 0;
225 size_t n;
226
227 stream_run(s->stream);
228
229 if (!ovs_list_is_empty(&s->txq)) {
230 struct pkt_list_node *txbuf_node;
231 struct dp_packet *txbuf;
232 int retval;
233
234 ASSIGN_CONTAINER(txbuf_node, ovs_list_front(&s->txq), list_node);
235 txbuf = txbuf_node->pkt;
236 retval = stream_send(s->stream, dp_packet_data(txbuf), dp_packet_size(txbuf));
237
238 if (retval > 0) {
239 dp_packet_pull(txbuf, retval);
240 if (!dp_packet_size(txbuf)) {
241 ovs_list_remove(&txbuf_node->list_node);
242 free(txbuf_node);
243 dp_packet_delete(txbuf);
244 }
245 } else if (retval != -EAGAIN) {
246 error = -retval;
247 }
248 }
249
250 if (!error) {
251 if (dp_packet_size(&s->rxbuf) < 2) {
252 n = 2 - dp_packet_size(&s->rxbuf);
253 } else {
254 uint16_t frame_len;
255
256 frame_len = ntohs(get_unaligned_be16(dp_packet_data(&s->rxbuf)));
257 if (frame_len < ETH_HEADER_LEN) {
258 error = EPROTO;
259 n = 0;
260 } else {
261 n = (2 + frame_len) - dp_packet_size(&s->rxbuf);
262 }
263 }
264 }
265 if (!error) {
266 int retval;
267
268 dp_packet_prealloc_tailroom(&s->rxbuf, n);
269 retval = stream_recv(s->stream, dp_packet_tail(&s->rxbuf), n);
270
271 if (retval > 0) {
272 dp_packet_set_size(&s->rxbuf, dp_packet_size(&s->rxbuf) + retval);
273 if (retval == n && dp_packet_size(&s->rxbuf) > 2) {
274 dp_packet_pull(&s->rxbuf, 2);
275 netdev_dummy_queue_packet(dev,
276 dp_packet_clone(&s->rxbuf), 0);
277 dp_packet_clear(&s->rxbuf);
278 }
279 } else if (retval != -EAGAIN) {
280 error = (retval < 0 ? -retval
281 : dp_packet_size(&s->rxbuf) ? EPROTO
282 : EOF);
283 }
284 }
285
286 return error;
287 }
288
289 static void
290 dummy_packet_stream_close(struct dummy_packet_stream *s)
291 {
292 stream_close(s->stream);
293 dp_packet_uninit(&s->rxbuf);
294 pkt_list_delete(&s->txq);
295 }
296
297 static void
298 dummy_packet_conn_init(struct dummy_packet_conn *conn)
299 {
300 memset(conn, 0, sizeof *conn);
301 conn->type = NONE;
302 }
303
304 static void
305 dummy_packet_conn_get_config(struct dummy_packet_conn *conn, struct smap *args)
306 {
307
308 switch (conn->type) {
309 case PASSIVE:
310 smap_add(args, "pstream", pstream_get_name(conn->pconn.pstream));
311 break;
312
313 case ACTIVE:
314 smap_add(args, "stream", stream_get_name(conn->rconn.rstream->stream));
315 break;
316
317 case NONE:
318 default:
319 break;
320 }
321 }
322
323 static void
324 dummy_packet_conn_close(struct dummy_packet_conn *conn)
325 {
326 int i;
327 struct dummy_packet_pconn *pconn = &conn->pconn;
328 struct dummy_packet_rconn *rconn = &conn->rconn;
329
330 switch (conn->type) {
331 case PASSIVE:
332 pstream_close(pconn->pstream);
333 for (i = 0; i < pconn->n_streams; i++) {
334 dummy_packet_stream_close(pconn->streams[i]);
335 free(pconn->streams[i]);
336 }
337 free(pconn->streams);
338 pconn->pstream = NULL;
339 pconn->streams = NULL;
340 break;
341
342 case ACTIVE:
343 dummy_packet_stream_close(rconn->rstream);
344 free(rconn->rstream);
345 rconn->rstream = NULL;
346 reconnect_destroy(rconn->reconnect);
347 rconn->reconnect = NULL;
348 break;
349
350 case NONE:
351 default:
352 break;
353 }
354
355 conn->type = NONE;
356 memset(conn, 0, sizeof *conn);
357 }
358
359 static void
360 dummy_packet_conn_set_config(struct dummy_packet_conn *conn,
361 const struct smap *args)
362 {
363 const char *pstream = smap_get(args, "pstream");
364 const char *stream = smap_get(args, "stream");
365
366 if (pstream && stream) {
367 VLOG_WARN("Open failed: both %s and %s are configured",
368 pstream, stream);
369 return;
370 }
371
372 switch (conn->type) {
373 case PASSIVE:
374 if (pstream &&
375 !strcmp(pstream_get_name(conn->pconn.pstream), pstream)) {
376 return;
377 }
378 dummy_packet_conn_close(conn);
379 break;
380 case ACTIVE:
381 if (stream &&
382 !strcmp(stream_get_name(conn->rconn.rstream->stream), stream)) {
383 return;
384 }
385 dummy_packet_conn_close(conn);
386 break;
387 case NONE:
388 default:
389 break;
390 }
391
392 if (pstream) {
393 int error;
394
395 error = pstream_open(pstream, &conn->pconn.pstream, DSCP_DEFAULT);
396 if (error) {
397 VLOG_WARN("%s: open failed (%s)", pstream, ovs_strerror(error));
398 } else {
399 conn->type = PASSIVE;
400 }
401 }
402
403 if (stream) {
404 int error;
405 struct stream *active_stream;
406 struct reconnect *reconnect;
407
408 reconnect = reconnect_create(time_msec());
409 reconnect_set_name(reconnect, stream);
410 reconnect_set_passive(reconnect, false, time_msec());
411 reconnect_enable(reconnect, time_msec());
412 reconnect_set_backoff(reconnect, 100, INT_MAX);
413 reconnect_set_probe_interval(reconnect, 0);
414 conn->rconn.reconnect = reconnect;
415 conn->type = ACTIVE;
416
417 error = stream_open(stream, &active_stream, DSCP_DEFAULT);
418 conn->rconn.rstream = dummy_packet_stream_create(active_stream);
419
420 switch (error) {
421 case 0:
422 reconnect_connected(reconnect, time_msec());
423 break;
424
425 case EAGAIN:
426 reconnect_connecting(reconnect, time_msec());
427 break;
428
429 default:
430 reconnect_connect_failed(reconnect, time_msec(), error);
431 stream_close(active_stream);
432 conn->rconn.rstream->stream = NULL;
433 break;
434 }
435 }
436 }
437
438 static void
439 dummy_pconn_run(struct netdev_dummy *dev)
440 OVS_REQUIRES(dev->mutex)
441 {
442 struct stream *new_stream;
443 struct dummy_packet_pconn *pconn = &dev->conn.pconn;
444 int error;
445 size_t i;
446
447 error = pstream_accept(pconn->pstream, &new_stream);
448 if (!error) {
449 struct dummy_packet_stream *s;
450
451 pconn->streams = xrealloc(pconn->streams,
452 ((pconn->n_streams + 1)
453 * sizeof s));
454 s = xmalloc(sizeof *s);
455 pconn->streams[pconn->n_streams++] = s;
456 dummy_packet_stream_init(s, new_stream);
457 } else if (error != EAGAIN) {
458 VLOG_WARN("%s: accept failed (%s)",
459 pstream_get_name(pconn->pstream), ovs_strerror(error));
460 pstream_close(pconn->pstream);
461 pconn->pstream = NULL;
462 dev->conn.type = NONE;
463 }
464
465 for (i = 0; i < pconn->n_streams; ) {
466 struct dummy_packet_stream *s = pconn->streams[i];
467
468 error = dummy_packet_stream_run(dev, s);
469 if (error) {
470 VLOG_DBG("%s: closing connection (%s)",
471 stream_get_name(s->stream),
472 ovs_retval_to_string(error));
473 dummy_packet_stream_close(s);
474 free(s);
475 pconn->streams[i] = pconn->streams[--pconn->n_streams];
476 } else {
477 i++;
478 }
479 }
480 }
481
482 static void
483 dummy_rconn_run(struct netdev_dummy *dev)
484 OVS_REQUIRES(dev->mutex)
485 {
486 struct dummy_packet_rconn *rconn = &dev->conn.rconn;
487
488 switch (reconnect_run(rconn->reconnect, time_msec())) {
489 case RECONNECT_CONNECT:
490 {
491 int error;
492
493 if (rconn->rstream->stream) {
494 error = stream_connect(rconn->rstream->stream);
495 } else {
496 error = stream_open(reconnect_get_name(rconn->reconnect),
497 &rconn->rstream->stream, DSCP_DEFAULT);
498 }
499
500 switch (error) {
501 case 0:
502 reconnect_connected(rconn->reconnect, time_msec());
503 break;
504
505 case EAGAIN:
506 reconnect_connecting(rconn->reconnect, time_msec());
507 break;
508
509 default:
510 reconnect_connect_failed(rconn->reconnect, time_msec(), error);
511 stream_close(rconn->rstream->stream);
512 rconn->rstream->stream = NULL;
513 break;
514 }
515 }
516 break;
517
518 case RECONNECT_DISCONNECT:
519 case RECONNECT_PROBE:
520 default:
521 break;
522 }
523
524 if (reconnect_is_connected(rconn->reconnect)) {
525 int err;
526
527 err = dummy_packet_stream_run(dev, rconn->rstream);
528
529 if (err) {
530 reconnect_disconnected(rconn->reconnect, time_msec(), err);
531 stream_close(rconn->rstream->stream);
532 rconn->rstream->stream = NULL;
533 }
534 }
535 }
536
537 static void
538 dummy_packet_conn_run(struct netdev_dummy *dev)
539 OVS_REQUIRES(dev->mutex)
540 {
541 switch (dev->conn.type) {
542 case PASSIVE:
543 dummy_pconn_run(dev);
544 break;
545
546 case ACTIVE:
547 dummy_rconn_run(dev);
548 break;
549
550 case NONE:
551 default:
552 break;
553 }
554 }
555
556 static void
557 dummy_packet_conn_wait(struct dummy_packet_conn *conn)
558 {
559 int i;
560 switch (conn->type) {
561 case PASSIVE:
562 pstream_wait(conn->pconn.pstream);
563 for (i = 0; i < conn->pconn.n_streams; i++) {
564 struct dummy_packet_stream *s = conn->pconn.streams[i];
565 dummy_packet_stream_wait(s);
566 }
567 break;
568 case ACTIVE:
569 if (reconnect_is_connected(conn->rconn.reconnect)) {
570 dummy_packet_stream_wait(conn->rconn.rstream);
571 }
572 break;
573
574 case NONE:
575 default:
576 break;
577 }
578 }
579
580 static void
581 dummy_packet_conn_send(struct dummy_packet_conn *conn,
582 const void *buffer, size_t size)
583 {
584 int i;
585
586 switch (conn->type) {
587 case PASSIVE:
588 for (i = 0; i < conn->pconn.n_streams; i++) {
589 struct dummy_packet_stream *s = conn->pconn.streams[i];
590
591 dummy_packet_stream_send(s, buffer, size);
592 pstream_wait(conn->pconn.pstream);
593 }
594 break;
595
596 case ACTIVE:
597 if (reconnect_is_connected(conn->rconn.reconnect)) {
598 dummy_packet_stream_send(conn->rconn.rstream, buffer, size);
599 dummy_packet_stream_wait(conn->rconn.rstream);
600 }
601 break;
602
603 case NONE:
604 default:
605 break;
606 }
607 }
608
609 static enum dummy_netdev_conn_state
610 dummy_netdev_get_conn_state(struct dummy_packet_conn *conn)
611 {
612 enum dummy_netdev_conn_state state;
613
614 if (conn->type == ACTIVE) {
615 if (reconnect_is_connected(conn->rconn.reconnect)) {
616 state = CONN_STATE_CONNECTED;
617 } else {
618 state = CONN_STATE_NOT_CONNECTED;
619 }
620 } else {
621 state = CONN_STATE_UNKNOWN;
622 }
623
624 return state;
625 }
626
627 static void
628 netdev_dummy_run(const struct netdev_class *netdev_class)
629 {
630 struct netdev_dummy *dev;
631
632 ovs_mutex_lock(&dummy_list_mutex);
633 LIST_FOR_EACH (dev, list_node, &dummy_list) {
634 if (netdev_get_class(&dev->up) != netdev_class) {
635 continue;
636 }
637 ovs_mutex_lock(&dev->mutex);
638 dummy_packet_conn_run(dev);
639 ovs_mutex_unlock(&dev->mutex);
640 }
641 ovs_mutex_unlock(&dummy_list_mutex);
642 }
643
644 static void
645 netdev_dummy_wait(const struct netdev_class *netdev_class)
646 {
647 struct netdev_dummy *dev;
648
649 ovs_mutex_lock(&dummy_list_mutex);
650 LIST_FOR_EACH (dev, list_node, &dummy_list) {
651 if (netdev_get_class(&dev->up) != netdev_class) {
652 continue;
653 }
654 ovs_mutex_lock(&dev->mutex);
655 dummy_packet_conn_wait(&dev->conn);
656 ovs_mutex_unlock(&dev->mutex);
657 }
658 ovs_mutex_unlock(&dummy_list_mutex);
659 }
660
661 static struct netdev *
662 netdev_dummy_alloc(void)
663 {
664 struct netdev_dummy *netdev = xzalloc(sizeof *netdev);
665 return &netdev->up;
666 }
667
668 static int
669 netdev_dummy_construct(struct netdev *netdev_)
670 {
671 static atomic_count next_n = ATOMIC_COUNT_INIT(0xaa550000);
672 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
673 unsigned int n;
674
675 n = atomic_count_inc(&next_n);
676
677 ovs_mutex_init(&netdev->mutex);
678 ovs_mutex_lock(&netdev->mutex);
679 netdev->hwaddr.ea[0] = 0xaa;
680 netdev->hwaddr.ea[1] = 0x55;
681 netdev->hwaddr.ea[2] = n >> 24;
682 netdev->hwaddr.ea[3] = n >> 16;
683 netdev->hwaddr.ea[4] = n >> 8;
684 netdev->hwaddr.ea[5] = n;
685 netdev->mtu = 1500;
686 netdev->flags = 0;
687 netdev->ifindex = -EOPNOTSUPP;
688 netdev->requested_n_rxq = netdev_->n_rxq;
689 netdev->requested_n_txq = netdev_->n_txq;
690 netdev->numa_id = 0;
691
692 memset(&netdev->custom_stats, 0, sizeof(netdev->custom_stats));
693
694 ovs_strlcpy(netdev->custom_stats[0].name,
695 "rx_custom_packets_1", NETDEV_CUSTOM_STATS_NAME_SIZE);
696 ovs_strlcpy(netdev->custom_stats[1].name,
697 "rx_custom_packets_2", NETDEV_CUSTOM_STATS_NAME_SIZE);
698
699 dummy_packet_conn_init(&netdev->conn);
700
701 ovs_list_init(&netdev->rxes);
702 ovs_mutex_unlock(&netdev->mutex);
703
704 ovs_mutex_lock(&dummy_list_mutex);
705 ovs_list_push_back(&dummy_list, &netdev->list_node);
706 ovs_mutex_unlock(&dummy_list_mutex);
707
708 return 0;
709 }
710
711 static void
712 netdev_dummy_destruct(struct netdev *netdev_)
713 {
714 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
715
716 ovs_mutex_lock(&dummy_list_mutex);
717 ovs_list_remove(&netdev->list_node);
718 ovs_mutex_unlock(&dummy_list_mutex);
719
720 ovs_mutex_lock(&netdev->mutex);
721 if (netdev->rxq_pcap) {
722 fclose(netdev->rxq_pcap);
723 }
724 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
725 fclose(netdev->tx_pcap);
726 }
727 dummy_packet_conn_close(&netdev->conn);
728 netdev->conn.type = NONE;
729
730 ovs_mutex_unlock(&netdev->mutex);
731 ovs_mutex_destroy(&netdev->mutex);
732 }
733
734 static void
735 netdev_dummy_dealloc(struct netdev *netdev_)
736 {
737 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
738
739 free(netdev);
740 }
741
742 static int
743 netdev_dummy_get_config(const struct netdev *dev, struct smap *args)
744 {
745 struct netdev_dummy *netdev = netdev_dummy_cast(dev);
746
747 ovs_mutex_lock(&netdev->mutex);
748
749 if (netdev->ifindex >= 0) {
750 smap_add_format(args, "ifindex", "%d", netdev->ifindex);
751 }
752
753 dummy_packet_conn_get_config(&netdev->conn, args);
754
755 /* 'dummy-pmd' specific config. */
756 if (!netdev_is_pmd(dev)) {
757 goto exit;
758 }
759 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
760 smap_add_format(args, "configured_rx_queues", "%d", dev->n_rxq);
761 smap_add_format(args, "requested_tx_queues", "%d", netdev->requested_n_txq);
762 smap_add_format(args, "configured_tx_queues", "%d", dev->n_txq);
763
764 exit:
765 ovs_mutex_unlock(&netdev->mutex);
766 return 0;
767 }
768
769 static int
770 netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr,
771 struct in6_addr **pmask, int *n_addr)
772 {
773 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
774 int cnt = 0, i = 0, err = 0;
775 struct in6_addr *addr, *mask;
776
777 ovs_mutex_lock(&netdev->mutex);
778 if (netdev->address.s_addr != INADDR_ANY) {
779 cnt++;
780 }
781
782 if (ipv6_addr_is_set(&netdev->ipv6)) {
783 cnt++;
784 }
785 if (!cnt) {
786 err = EADDRNOTAVAIL;
787 goto out;
788 }
789 addr = xmalloc(sizeof *addr * cnt);
790 mask = xmalloc(sizeof *mask * cnt);
791 if (netdev->address.s_addr != INADDR_ANY) {
792 in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr);
793 in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr);
794 i++;
795 }
796
797 if (ipv6_addr_is_set(&netdev->ipv6)) {
798 memcpy(&addr[i], &netdev->ipv6, sizeof *addr);
799 memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask);
800 i++;
801 }
802 if (paddr) {
803 *paddr = addr;
804 *pmask = mask;
805 *n_addr = cnt;
806 } else {
807 free(addr);
808 free(mask);
809 }
810 out:
811 ovs_mutex_unlock(&netdev->mutex);
812
813 return err;
814 }
815
816 static int
817 netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address,
818 struct in_addr netmask)
819 {
820 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
821
822 ovs_mutex_lock(&netdev->mutex);
823 netdev->address = address;
824 netdev->netmask = netmask;
825 netdev_change_seq_changed(netdev_);
826 ovs_mutex_unlock(&netdev->mutex);
827
828 return 0;
829 }
830
831 static int
832 netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6,
833 struct in6_addr *mask)
834 {
835 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
836
837 ovs_mutex_lock(&netdev->mutex);
838 netdev->ipv6 = *in6;
839 netdev->ipv6_mask = *mask;
840 netdev_change_seq_changed(netdev_);
841 ovs_mutex_unlock(&netdev->mutex);
842
843 return 0;
844 }
845
846 #define DUMMY_MAX_QUEUES_PER_PORT 1024
847
848 static int
849 netdev_dummy_set_config(struct netdev *netdev_, const struct smap *args,
850 char **errp OVS_UNUSED)
851 {
852 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
853 const char *pcap;
854 int new_n_rxq, new_n_txq, new_numa_id;
855
856 ovs_mutex_lock(&netdev->mutex);
857 netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
858
859 dummy_packet_conn_set_config(&netdev->conn, args);
860
861 if (netdev->rxq_pcap) {
862 fclose(netdev->rxq_pcap);
863 }
864 if (netdev->tx_pcap && netdev->tx_pcap != netdev->rxq_pcap) {
865 fclose(netdev->tx_pcap);
866 }
867 netdev->rxq_pcap = netdev->tx_pcap = NULL;
868 pcap = smap_get(args, "pcap");
869 if (pcap) {
870 netdev->rxq_pcap = netdev->tx_pcap = ovs_pcap_open(pcap, "ab");
871 } else {
872 const char *rxq_pcap = smap_get(args, "rxq_pcap");
873 const char *tx_pcap = smap_get(args, "tx_pcap");
874
875 if (rxq_pcap) {
876 netdev->rxq_pcap = ovs_pcap_open(rxq_pcap, "ab");
877 }
878 if (tx_pcap) {
879 netdev->tx_pcap = ovs_pcap_open(tx_pcap, "ab");
880 }
881 }
882
883 netdev_change_seq_changed(netdev_);
884
885 /* 'dummy-pmd' specific config. */
886 if (!netdev_->netdev_class->is_pmd) {
887 goto exit;
888 }
889
890 new_n_rxq = MAX(smap_get_int(args, "n_rxq", NR_QUEUE), 1);
891 new_n_txq = MAX(smap_get_int(args, "n_txq", NR_QUEUE), 1);
892
893 if (new_n_rxq > DUMMY_MAX_QUEUES_PER_PORT ||
894 new_n_txq > DUMMY_MAX_QUEUES_PER_PORT) {
895 VLOG_WARN("The one or both of interface %s queues"
896 "(rxq: %d, txq: %d) exceed %d. Sets it %d.\n",
897 netdev_get_name(netdev_),
898 new_n_rxq,
899 new_n_txq,
900 DUMMY_MAX_QUEUES_PER_PORT,
901 DUMMY_MAX_QUEUES_PER_PORT);
902
903 new_n_rxq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_rxq);
904 new_n_txq = MIN(DUMMY_MAX_QUEUES_PER_PORT, new_n_txq);
905 }
906
907 new_numa_id = smap_get_int(args, "numa_id", 0);
908 if (new_n_rxq != netdev->requested_n_rxq
909 || new_n_txq != netdev->requested_n_txq
910 || new_numa_id != netdev->requested_numa_id) {
911 netdev->requested_n_rxq = new_n_rxq;
912 netdev->requested_n_txq = new_n_txq;
913 netdev->requested_numa_id = new_numa_id;
914 netdev_request_reconfigure(netdev_);
915 }
916
917 exit:
918 ovs_mutex_unlock(&netdev->mutex);
919 return 0;
920 }
921
922 static int
923 netdev_dummy_get_numa_id(const struct netdev *netdev_)
924 {
925 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
926
927 ovs_mutex_lock(&netdev->mutex);
928 int numa_id = netdev->numa_id;
929 ovs_mutex_unlock(&netdev->mutex);
930
931 return numa_id;
932 }
933
934 /* Sets the number of tx queues and rx queues for the dummy PMD interface. */
935 static int
936 netdev_dummy_reconfigure(struct netdev *netdev_)
937 {
938 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
939
940 ovs_mutex_lock(&netdev->mutex);
941
942 netdev_->n_txq = netdev->requested_n_txq;
943 netdev_->n_rxq = netdev->requested_n_rxq;
944 netdev->numa_id = netdev->requested_numa_id;
945
946 ovs_mutex_unlock(&netdev->mutex);
947 return 0;
948 }
949
950 static struct netdev_rxq *
951 netdev_dummy_rxq_alloc(void)
952 {
953 struct netdev_rxq_dummy *rx = xzalloc(sizeof *rx);
954 return &rx->up;
955 }
956
957 static int
958 netdev_dummy_rxq_construct(struct netdev_rxq *rxq_)
959 {
960 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
961 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
962
963 ovs_mutex_lock(&netdev->mutex);
964 ovs_list_push_back(&netdev->rxes, &rx->node);
965 ovs_list_init(&rx->recv_queue);
966 rx->recv_queue_len = 0;
967 rx->seq = seq_create();
968 ovs_mutex_unlock(&netdev->mutex);
969
970 return 0;
971 }
972
973 static void
974 netdev_dummy_rxq_destruct(struct netdev_rxq *rxq_)
975 {
976 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
977 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
978
979 ovs_mutex_lock(&netdev->mutex);
980 ovs_list_remove(&rx->node);
981 pkt_list_delete(&rx->recv_queue);
982 ovs_mutex_unlock(&netdev->mutex);
983 seq_destroy(rx->seq);
984 }
985
986 static void
987 netdev_dummy_rxq_dealloc(struct netdev_rxq *rxq_)
988 {
989 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
990
991 free(rx);
992 }
993
994 static int
995 netdev_dummy_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch,
996 int *qfill)
997 {
998 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
999 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1000 struct dp_packet *packet;
1001
1002 ovs_mutex_lock(&netdev->mutex);
1003 if (!ovs_list_is_empty(&rx->recv_queue)) {
1004 struct pkt_list_node *pkt_node;
1005
1006 ASSIGN_CONTAINER(pkt_node, ovs_list_pop_front(&rx->recv_queue), list_node);
1007 packet = pkt_node->pkt;
1008 free(pkt_node);
1009 rx->recv_queue_len--;
1010 } else {
1011 packet = NULL;
1012 }
1013 ovs_mutex_unlock(&netdev->mutex);
1014
1015 if (!packet) {
1016 if (netdev_is_pmd(&netdev->up)) {
1017 /* If 'netdev' is a PMD device, this is called as part of the PMD
1018 * thread busy loop. We yield here (without quiescing) for two
1019 * reasons:
1020 *
1021 * - To reduce the CPU utilization during the testsuite
1022 * - To give valgrind a chance to switch thread. According
1023 * to the valgrind documentation, there's a big lock that
1024 * prevents multiple thread from being executed at the same
1025 * time. On my system, without this sleep, the pmd threads
1026 * testcases fail under valgrind, because ovs-vswitchd becomes
1027 * unresponsive. */
1028 sched_yield();
1029 }
1030 return EAGAIN;
1031 }
1032 ovs_mutex_lock(&netdev->mutex);
1033 netdev->stats.rx_packets++;
1034 netdev->stats.rx_bytes += dp_packet_size(packet);
1035 netdev->custom_stats[0].value++;
1036 netdev->custom_stats[1].value++;
1037 ovs_mutex_unlock(&netdev->mutex);
1038
1039 batch->packets[0] = packet;
1040 batch->count = 1;
1041
1042 if (qfill) {
1043 *qfill = -ENOTSUP;
1044 }
1045
1046 return 0;
1047 }
1048
1049 static void
1050 netdev_dummy_rxq_wait(struct netdev_rxq *rxq_)
1051 {
1052 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1053 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1054 uint64_t seq = seq_read(rx->seq);
1055
1056 ovs_mutex_lock(&netdev->mutex);
1057 if (!ovs_list_is_empty(&rx->recv_queue)) {
1058 poll_immediate_wake();
1059 } else {
1060 seq_wait(rx->seq, seq);
1061 }
1062 ovs_mutex_unlock(&netdev->mutex);
1063 }
1064
1065 static int
1066 netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
1067 {
1068 struct netdev_rxq_dummy *rx = netdev_rxq_dummy_cast(rxq_);
1069 struct netdev_dummy *netdev = netdev_dummy_cast(rx->up.netdev);
1070
1071 ovs_mutex_lock(&netdev->mutex);
1072 pkt_list_delete(&rx->recv_queue);
1073 rx->recv_queue_len = 0;
1074 ovs_mutex_unlock(&netdev->mutex);
1075
1076 seq_change(rx->seq);
1077
1078 return 0;
1079 }
1080
1081 static int
1082 netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
1083 struct dp_packet_batch *batch,
1084 bool concurrent_txq OVS_UNUSED)
1085 {
1086 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1087 int error = 0;
1088
1089 struct dp_packet *packet;
1090 DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
1091 const void *buffer = dp_packet_data(packet);
1092 size_t size = dp_packet_size(packet);
1093
1094 if (batch->packets[i]->packet_type != htonl(PT_ETH)) {
1095 error = EPFNOSUPPORT;
1096 break;
1097 }
1098
1099 if (size < ETH_HEADER_LEN) {
1100 error = EMSGSIZE;
1101 break;
1102 } else {
1103 const struct eth_header *eth = buffer;
1104 int max_size;
1105
1106 ovs_mutex_lock(&dev->mutex);
1107 max_size = dev->mtu + ETH_HEADER_LEN;
1108 ovs_mutex_unlock(&dev->mutex);
1109
1110 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
1111 max_size += VLAN_HEADER_LEN;
1112 }
1113 if (size > max_size) {
1114 error = EMSGSIZE;
1115 break;
1116 }
1117 }
1118
1119 ovs_mutex_lock(&dev->mutex);
1120 dev->stats.tx_packets++;
1121 dev->stats.tx_bytes += size;
1122
1123 dummy_packet_conn_send(&dev->conn, buffer, size);
1124
1125 /* Reply to ARP requests for 'dev''s assigned IP address. */
1126 if (dev->address.s_addr) {
1127 struct dp_packet dp;
1128 struct flow flow;
1129
1130 dp_packet_use_const(&dp, buffer, size);
1131 flow_extract(&dp, &flow);
1132 if (flow.dl_type == htons(ETH_TYPE_ARP)
1133 && flow.nw_proto == ARP_OP_REQUEST
1134 && flow.nw_dst == dev->address.s_addr) {
1135 struct dp_packet *reply = dp_packet_new(0);
1136 compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src,
1137 false, flow.nw_dst, flow.nw_src);
1138 netdev_dummy_queue_packet(dev, reply, 0);
1139 }
1140 }
1141
1142 if (dev->tx_pcap) {
1143 struct dp_packet dp;
1144
1145 dp_packet_use_const(&dp, buffer, size);
1146 ovs_pcap_write(dev->tx_pcap, &dp);
1147 fflush(dev->tx_pcap);
1148 }
1149
1150 ovs_mutex_unlock(&dev->mutex);
1151 }
1152
1153 dp_packet_delete_batch(batch, true);
1154
1155 return error;
1156 }
1157
1158 static int
1159 netdev_dummy_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1160 {
1161 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1162
1163 ovs_mutex_lock(&dev->mutex);
1164 if (!eth_addr_equals(dev->hwaddr, mac)) {
1165 dev->hwaddr = mac;
1166 netdev_change_seq_changed(netdev);
1167 }
1168 ovs_mutex_unlock(&dev->mutex);
1169
1170 return 0;
1171 }
1172
1173 static int
1174 netdev_dummy_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1175 {
1176 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1177
1178 ovs_mutex_lock(&dev->mutex);
1179 *mac = dev->hwaddr;
1180 ovs_mutex_unlock(&dev->mutex);
1181
1182 return 0;
1183 }
1184
1185 static int
1186 netdev_dummy_get_mtu(const struct netdev *netdev, int *mtup)
1187 {
1188 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1189
1190 ovs_mutex_lock(&dev->mutex);
1191 *mtup = dev->mtu;
1192 ovs_mutex_unlock(&dev->mutex);
1193
1194 return 0;
1195 }
1196
1197 #define DUMMY_MIN_MTU 68
1198 #define DUMMY_MAX_MTU 65535
1199
1200 static int
1201 netdev_dummy_set_mtu(struct netdev *netdev, int mtu)
1202 {
1203 if (mtu < DUMMY_MIN_MTU || mtu > DUMMY_MAX_MTU) {
1204 return EINVAL;
1205 }
1206
1207 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1208
1209 ovs_mutex_lock(&dev->mutex);
1210 if (dev->mtu != mtu) {
1211 dev->mtu = mtu;
1212 netdev_change_seq_changed(netdev);
1213 }
1214 ovs_mutex_unlock(&dev->mutex);
1215
1216 return 0;
1217 }
1218
1219 static int
1220 netdev_dummy_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1221 {
1222 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1223
1224 ovs_mutex_lock(&dev->mutex);
1225 /* Passing only collected counters */
1226 stats->tx_packets = dev->stats.tx_packets;
1227 stats->tx_bytes = dev->stats.tx_bytes;
1228 stats->rx_packets = dev->stats.rx_packets;
1229 stats->rx_bytes = dev->stats.rx_bytes;
1230 ovs_mutex_unlock(&dev->mutex);
1231
1232 return 0;
1233 }
1234
1235 static int
1236 netdev_dummy_get_custom_stats(const struct netdev *netdev,
1237 struct netdev_custom_stats *custom_stats)
1238 {
1239 int i;
1240
1241 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1242
1243 custom_stats->size = 2;
1244 custom_stats->counters =
1245 (struct netdev_custom_counter *) xcalloc(C_STATS_SIZE,
1246 sizeof(struct netdev_custom_counter));
1247
1248 ovs_mutex_lock(&dev->mutex);
1249 for (i = 0 ; i < C_STATS_SIZE ; i++) {
1250 custom_stats->counters[i].value = dev->custom_stats[i].value;
1251 ovs_strlcpy(custom_stats->counters[i].name,
1252 dev->custom_stats[i].name,
1253 NETDEV_CUSTOM_STATS_NAME_SIZE);
1254 }
1255 ovs_mutex_unlock(&dev->mutex);
1256
1257 return 0;
1258 }
1259
1260 static int
1261 netdev_dummy_get_queue(const struct netdev *netdev OVS_UNUSED,
1262 unsigned int queue_id, struct smap *details OVS_UNUSED)
1263 {
1264 if (queue_id == 0) {
1265 return 0;
1266 } else {
1267 return EINVAL;
1268 }
1269 }
1270
1271 static void
1272 netdev_dummy_init_queue_stats(struct netdev_queue_stats *stats)
1273 {
1274 *stats = (struct netdev_queue_stats) {
1275 .tx_bytes = UINT64_MAX,
1276 .tx_packets = UINT64_MAX,
1277 .tx_errors = UINT64_MAX,
1278 .created = LLONG_MIN,
1279 };
1280 }
1281
1282 static int
1283 netdev_dummy_get_queue_stats(const struct netdev *netdev OVS_UNUSED,
1284 unsigned int queue_id,
1285 struct netdev_queue_stats *stats)
1286 {
1287 if (queue_id == 0) {
1288 netdev_dummy_init_queue_stats(stats);
1289 return 0;
1290 } else {
1291 return EINVAL;
1292 }
1293 }
1294
1295 struct netdev_dummy_queue_state {
1296 unsigned int next_queue;
1297 };
1298
1299 static int
1300 netdev_dummy_queue_dump_start(const struct netdev *netdev OVS_UNUSED,
1301 void **statep)
1302 {
1303 struct netdev_dummy_queue_state *state = xmalloc(sizeof *state);
1304 state->next_queue = 0;
1305 *statep = state;
1306 return 0;
1307 }
1308
1309 static int
1310 netdev_dummy_queue_dump_next(const struct netdev *netdev OVS_UNUSED,
1311 void *state_,
1312 unsigned int *queue_id,
1313 struct smap *details OVS_UNUSED)
1314 {
1315 struct netdev_dummy_queue_state *state = state_;
1316 if (state->next_queue == 0) {
1317 *queue_id = 0;
1318 state->next_queue++;
1319 return 0;
1320 } else {
1321 return EOF;
1322 }
1323 }
1324
1325 static int
1326 netdev_dummy_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
1327 void *state)
1328 {
1329 free(state);
1330 return 0;
1331 }
1332
1333 static int
1334 netdev_dummy_dump_queue_stats(const struct netdev *netdev OVS_UNUSED,
1335 void (*cb)(unsigned int queue_id,
1336 struct netdev_queue_stats *,
1337 void *aux),
1338 void *aux)
1339 {
1340 struct netdev_queue_stats stats;
1341 netdev_dummy_init_queue_stats(&stats);
1342 cb(0, &stats, aux);
1343 return 0;
1344 }
1345
1346 static int
1347 netdev_dummy_get_ifindex(const struct netdev *netdev)
1348 {
1349 struct netdev_dummy *dev = netdev_dummy_cast(netdev);
1350 int ifindex;
1351
1352 ovs_mutex_lock(&dev->mutex);
1353 ifindex = dev->ifindex;
1354 ovs_mutex_unlock(&dev->mutex);
1355
1356 return ifindex;
1357 }
1358
1359 static int
1360 netdev_dummy_update_flags__(struct netdev_dummy *netdev,
1361 enum netdev_flags off, enum netdev_flags on,
1362 enum netdev_flags *old_flagsp)
1363 OVS_REQUIRES(netdev->mutex)
1364 {
1365 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1366 return EINVAL;
1367 }
1368
1369 *old_flagsp = netdev->flags;
1370 netdev->flags |= on;
1371 netdev->flags &= ~off;
1372 if (*old_flagsp != netdev->flags) {
1373 netdev_change_seq_changed(&netdev->up);
1374 }
1375
1376 return 0;
1377 }
1378
1379 static int
1380 netdev_dummy_update_flags(struct netdev *netdev_,
1381 enum netdev_flags off, enum netdev_flags on,
1382 enum netdev_flags *old_flagsp)
1383 {
1384 struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
1385 int error;
1386
1387 ovs_mutex_lock(&netdev->mutex);
1388 error = netdev_dummy_update_flags__(netdev, off, on, old_flagsp);
1389 ovs_mutex_unlock(&netdev->mutex);
1390
1391 return error;
1392 }
1393 \f
1394 /* Helper functions. */
1395
1396 #define NETDEV_DUMMY_CLASS(NAME, PMD, RECOFIGURE) \
1397 { \
1398 NAME, \
1399 PMD, /* is_pmd */ \
1400 NULL, /* init */ \
1401 netdev_dummy_run, \
1402 netdev_dummy_wait, \
1403 \
1404 netdev_dummy_alloc, \
1405 netdev_dummy_construct, \
1406 netdev_dummy_destruct, \
1407 netdev_dummy_dealloc, \
1408 netdev_dummy_get_config, \
1409 netdev_dummy_set_config, \
1410 NULL, /* get_tunnel_config */ \
1411 NULL, /* build header */ \
1412 NULL, /* push header */ \
1413 NULL, /* pop header */ \
1414 netdev_dummy_get_numa_id, \
1415 NULL, /* set_tx_multiq */ \
1416 \
1417 netdev_dummy_send, /* send */ \
1418 NULL, /* send_wait */ \
1419 \
1420 netdev_dummy_set_etheraddr, \
1421 netdev_dummy_get_etheraddr, \
1422 netdev_dummy_get_mtu, \
1423 netdev_dummy_set_mtu, \
1424 netdev_dummy_get_ifindex, \
1425 NULL, /* get_carrier */ \
1426 NULL, /* get_carrier_resets */ \
1427 NULL, /* get_miimon */ \
1428 netdev_dummy_get_stats, \
1429 netdev_dummy_get_custom_stats, \
1430 \
1431 NULL, /* get_features */ \
1432 NULL, /* set_advertisements */ \
1433 NULL, /* get_pt_mode */ \
1434 \
1435 NULL, /* set_policing */ \
1436 NULL, /* get_qos_types */ \
1437 NULL, /* get_qos_capabilities */ \
1438 NULL, /* get_qos */ \
1439 NULL, /* set_qos */ \
1440 netdev_dummy_get_queue, \
1441 NULL, /* set_queue */ \
1442 NULL, /* delete_queue */ \
1443 netdev_dummy_get_queue_stats, \
1444 netdev_dummy_queue_dump_start, \
1445 netdev_dummy_queue_dump_next, \
1446 netdev_dummy_queue_dump_done, \
1447 netdev_dummy_dump_queue_stats, \
1448 \
1449 NULL, /* set_in4 */ \
1450 netdev_dummy_get_addr_list, \
1451 NULL, /* add_router */ \
1452 NULL, /* get_next_hop */ \
1453 NULL, /* get_status */ \
1454 NULL, /* arp_lookup */ \
1455 \
1456 netdev_dummy_update_flags, \
1457 RECOFIGURE, \
1458 \
1459 netdev_dummy_rxq_alloc, \
1460 netdev_dummy_rxq_construct, \
1461 netdev_dummy_rxq_destruct, \
1462 netdev_dummy_rxq_dealloc, \
1463 netdev_dummy_rxq_recv, \
1464 netdev_dummy_rxq_wait, \
1465 netdev_dummy_rxq_drain, \
1466 \
1467 NO_OFFLOAD_API \
1468 }
1469
1470 static const struct netdev_class dummy_class =
1471 NETDEV_DUMMY_CLASS("dummy", false, NULL);
1472
1473 static const struct netdev_class dummy_internal_class =
1474 NETDEV_DUMMY_CLASS("dummy-internal", false, NULL);
1475
1476 static const struct netdev_class dummy_pmd_class =
1477 NETDEV_DUMMY_CLASS("dummy-pmd", true,
1478 netdev_dummy_reconfigure);
1479
1480 static void
1481 pkt_list_delete(struct ovs_list *l)
1482 {
1483 struct pkt_list_node *pkt;
1484
1485 LIST_FOR_EACH_POP(pkt, list_node, l) {
1486 dp_packet_delete(pkt->pkt);
1487 free(pkt);
1488 }
1489 }
1490
1491 static struct dp_packet *
1492 eth_from_packet(const char *s)
1493 {
1494 struct dp_packet *packet;
1495 eth_from_hex(s, &packet);
1496 return packet;
1497 }
1498
1499 static struct dp_packet *
1500 eth_from_flow(const char *s, size_t packet_size)
1501 {
1502 enum odp_key_fitness fitness;
1503 struct dp_packet *packet;
1504 struct ofpbuf odp_key;
1505 struct flow flow;
1506 int error;
1507
1508 /* Convert string to datapath key.
1509 *
1510 * It would actually be nicer to parse an OpenFlow-like flow key here, but
1511 * the code for that currently calls exit() on parse error. We have to
1512 * settle for parsing a datapath key for now.
1513 */
1514 ofpbuf_init(&odp_key, 0);
1515 error = odp_flow_from_string(s, NULL, &odp_key, NULL);
1516 if (error) {
1517 ofpbuf_uninit(&odp_key);
1518 return NULL;
1519 }
1520
1521 /* Convert odp_key to flow. */
1522 fitness = odp_flow_key_to_flow(odp_key.data, odp_key.size, &flow);
1523 if (fitness == ODP_FIT_ERROR) {
1524 ofpbuf_uninit(&odp_key);
1525 return NULL;
1526 }
1527
1528 packet = dp_packet_new(0);
1529 if (packet_size) {
1530 flow_compose(packet, &flow, NULL, 0);
1531 if (dp_packet_size(packet) < packet_size) {
1532 packet_expand(packet, &flow, packet_size);
1533 } else if (dp_packet_size(packet) > packet_size){
1534 dp_packet_delete(packet);
1535 packet = NULL;
1536 }
1537 } else {
1538 flow_compose(packet, &flow, NULL, 64);
1539 }
1540
1541 ofpbuf_uninit(&odp_key);
1542 return packet;
1543 }
1544
1545 static void
1546 netdev_dummy_queue_packet__(struct netdev_rxq_dummy *rx, struct dp_packet *packet)
1547 {
1548 struct pkt_list_node *pkt_node = xmalloc(sizeof *pkt_node);
1549
1550 pkt_node->pkt = packet;
1551 ovs_list_push_back(&rx->recv_queue, &pkt_node->list_node);
1552 rx->recv_queue_len++;
1553 seq_change(rx->seq);
1554 }
1555
1556 static void
1557 netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct dp_packet *packet,
1558 int queue_id)
1559 OVS_REQUIRES(dummy->mutex)
1560 {
1561 struct netdev_rxq_dummy *rx, *prev;
1562
1563 if (dummy->rxq_pcap) {
1564 ovs_pcap_write(dummy->rxq_pcap, packet);
1565 fflush(dummy->rxq_pcap);
1566 }
1567 prev = NULL;
1568 LIST_FOR_EACH (rx, node, &dummy->rxes) {
1569 if (rx->up.queue_id == queue_id &&
1570 rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
1571 if (prev) {
1572 netdev_dummy_queue_packet__(prev, dp_packet_clone(packet));
1573 }
1574 prev = rx;
1575 }
1576 }
1577 if (prev) {
1578 netdev_dummy_queue_packet__(prev, packet);
1579 } else {
1580 dp_packet_delete(packet);
1581 }
1582 }
1583
1584 static void
1585 netdev_dummy_receive(struct unixctl_conn *conn,
1586 int argc, const char *argv[], void *aux OVS_UNUSED)
1587 {
1588 struct netdev_dummy *dummy_dev;
1589 struct netdev *netdev;
1590 int i, k = 1, rx_qid = 0;
1591
1592 netdev = netdev_from_name(argv[k++]);
1593 if (!netdev || !is_dummy_class(netdev->netdev_class)) {
1594 unixctl_command_reply_error(conn, "no such dummy netdev");
1595 goto exit_netdev;
1596 }
1597 dummy_dev = netdev_dummy_cast(netdev);
1598
1599 ovs_mutex_lock(&dummy_dev->mutex);
1600
1601 if (argc > k + 1 && !strcmp(argv[k], "--qid")) {
1602 rx_qid = strtol(argv[k + 1], NULL, 10);
1603 if (rx_qid < 0 || rx_qid >= netdev->n_rxq) {
1604 unixctl_command_reply_error(conn, "bad rx queue id.");
1605 goto exit;
1606 }
1607 k += 2;
1608 }
1609
1610 for (i = k; i < argc; i++) {
1611 struct dp_packet *packet;
1612
1613 /* Try to parse 'argv[i]' as packet in hex. */
1614 packet = eth_from_packet(argv[i]);
1615
1616 if (!packet) {
1617 int packet_size = 0;
1618 const char *flow_str = argv[i];
1619
1620 /* Parse optional --len argument immediately follows a 'flow'. */
1621 if (argc >= i + 2 && !strcmp(argv[i + 1], "--len")) {
1622 packet_size = strtol(argv[i + 2], NULL, 10);
1623
1624 if (packet_size < ETH_TOTAL_MIN) {
1625 unixctl_command_reply_error(conn, "too small packet len");
1626 goto exit;
1627 }
1628 i += 2;
1629 }
1630 /* Try parse 'argv[i]' as odp flow. */
1631 packet = eth_from_flow(flow_str, packet_size);
1632
1633 if (!packet) {
1634 unixctl_command_reply_error(conn, "bad packet or flow syntax");
1635 goto exit;
1636 }
1637 }
1638
1639 netdev_dummy_queue_packet(dummy_dev, packet, rx_qid);
1640 }
1641
1642 unixctl_command_reply(conn, NULL);
1643
1644 exit:
1645 ovs_mutex_unlock(&dummy_dev->mutex);
1646 exit_netdev:
1647 netdev_close(netdev);
1648 }
1649
1650 static void
1651 netdev_dummy_set_admin_state__(struct netdev_dummy *dev, bool admin_state)
1652 OVS_REQUIRES(dev->mutex)
1653 {
1654 enum netdev_flags old_flags;
1655
1656 if (admin_state) {
1657 netdev_dummy_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1658 } else {
1659 netdev_dummy_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1660 }
1661 }
1662
1663 static void
1664 netdev_dummy_set_admin_state(struct unixctl_conn *conn, int argc,
1665 const char *argv[], void *aux OVS_UNUSED)
1666 {
1667 bool up;
1668
1669 if (!strcasecmp(argv[argc - 1], "up")) {
1670 up = true;
1671 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1672 up = false;
1673 } else {
1674 unixctl_command_reply_error(conn, "Invalid Admin State");
1675 return;
1676 }
1677
1678 if (argc > 2) {
1679 struct netdev *netdev = netdev_from_name(argv[1]);
1680 if (netdev && is_dummy_class(netdev->netdev_class)) {
1681 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1682
1683 ovs_mutex_lock(&dummy_dev->mutex);
1684 netdev_dummy_set_admin_state__(dummy_dev, up);
1685 ovs_mutex_unlock(&dummy_dev->mutex);
1686
1687 netdev_close(netdev);
1688 } else {
1689 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1690 netdev_close(netdev);
1691 return;
1692 }
1693 } else {
1694 struct netdev_dummy *netdev;
1695
1696 ovs_mutex_lock(&dummy_list_mutex);
1697 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1698 ovs_mutex_lock(&netdev->mutex);
1699 netdev_dummy_set_admin_state__(netdev, up);
1700 ovs_mutex_unlock(&netdev->mutex);
1701 }
1702 ovs_mutex_unlock(&dummy_list_mutex);
1703 }
1704 unixctl_command_reply(conn, "OK");
1705 }
1706
1707 static void
1708 display_conn_state__(struct ds *s, const char *name,
1709 enum dummy_netdev_conn_state state)
1710 {
1711 ds_put_format(s, "%s: ", name);
1712
1713 switch (state) {
1714 case CONN_STATE_CONNECTED:
1715 ds_put_cstr(s, "connected\n");
1716 break;
1717
1718 case CONN_STATE_NOT_CONNECTED:
1719 ds_put_cstr(s, "disconnected\n");
1720 break;
1721
1722 case CONN_STATE_UNKNOWN:
1723 default:
1724 ds_put_cstr(s, "unknown\n");
1725 break;
1726 };
1727 }
1728
1729 static void
1730 netdev_dummy_conn_state(struct unixctl_conn *conn, int argc,
1731 const char *argv[], void *aux OVS_UNUSED)
1732 {
1733 enum dummy_netdev_conn_state state = CONN_STATE_UNKNOWN;
1734 struct ds s;
1735
1736 ds_init(&s);
1737
1738 if (argc > 1) {
1739 const char *dev_name = argv[1];
1740 struct netdev *netdev = netdev_from_name(dev_name);
1741
1742 if (netdev && is_dummy_class(netdev->netdev_class)) {
1743 struct netdev_dummy *dummy_dev = netdev_dummy_cast(netdev);
1744
1745 ovs_mutex_lock(&dummy_dev->mutex);
1746 state = dummy_netdev_get_conn_state(&dummy_dev->conn);
1747 ovs_mutex_unlock(&dummy_dev->mutex);
1748
1749 netdev_close(netdev);
1750 }
1751 display_conn_state__(&s, dev_name, state);
1752 } else {
1753 struct netdev_dummy *netdev;
1754
1755 ovs_mutex_lock(&dummy_list_mutex);
1756 LIST_FOR_EACH (netdev, list_node, &dummy_list) {
1757 ovs_mutex_lock(&netdev->mutex);
1758 state = dummy_netdev_get_conn_state(&netdev->conn);
1759 ovs_mutex_unlock(&netdev->mutex);
1760 if (state != CONN_STATE_UNKNOWN) {
1761 display_conn_state__(&s, netdev->up.name, state);
1762 }
1763 }
1764 ovs_mutex_unlock(&dummy_list_mutex);
1765 }
1766
1767 unixctl_command_reply(conn, ds_cstr(&s));
1768 ds_destroy(&s);
1769 }
1770
1771 static void
1772 netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1773 const char *argv[], void *aux OVS_UNUSED)
1774 {
1775 struct netdev *netdev = netdev_from_name(argv[1]);
1776
1777 if (netdev && is_dummy_class(netdev->netdev_class)) {
1778 struct in_addr ip, mask;
1779 char *error;
1780
1781 error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr);
1782 if (!error) {
1783 netdev_dummy_set_in4(netdev, ip, mask);
1784 unixctl_command_reply(conn, "OK");
1785 } else {
1786 unixctl_command_reply_error(conn, error);
1787 free(error);
1788 }
1789 } else {
1790 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1791 }
1792
1793 netdev_close(netdev);
1794 }
1795
1796 static void
1797 netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED,
1798 const char *argv[], void *aux OVS_UNUSED)
1799 {
1800 struct netdev *netdev = netdev_from_name(argv[1]);
1801
1802 if (netdev && is_dummy_class(netdev->netdev_class)) {
1803 struct in6_addr ip6;
1804 char *error;
1805 uint32_t plen;
1806
1807 error = ipv6_parse_cidr(argv[2], &ip6, &plen);
1808 if (!error) {
1809 struct in6_addr mask;
1810
1811 mask = ipv6_create_mask(plen);
1812 netdev_dummy_set_in6(netdev, &ip6, &mask);
1813 unixctl_command_reply(conn, "OK");
1814 } else {
1815 unixctl_command_reply_error(conn, error);
1816 free(error);
1817 }
1818 } else {
1819 unixctl_command_reply_error(conn, "Unknown Dummy Interface");
1820 }
1821
1822 netdev_close(netdev);
1823 }
1824
1825
1826 static void
1827 netdev_dummy_override(const char *type)
1828 {
1829 if (!netdev_unregister_provider(type)) {
1830 struct netdev_class *class;
1831 int error;
1832
1833 class = xmemdup(&dummy_class, sizeof dummy_class);
1834 class->type = xstrdup(type);
1835 error = netdev_register_provider(class);
1836 if (error) {
1837 VLOG_ERR("%s: failed to register netdev provider (%s)",
1838 type, ovs_strerror(error));
1839 free(CONST_CAST(char *, class->type));
1840 free(class);
1841 }
1842 }
1843 }
1844
1845 void
1846 netdev_dummy_register(enum dummy_level level)
1847 {
1848 unixctl_command_register("netdev-dummy/receive",
1849 "name [--qid queue_id] packet|flow [--len packet_len]",
1850 2, INT_MAX, netdev_dummy_receive, NULL);
1851 unixctl_command_register("netdev-dummy/set-admin-state",
1852 "[netdev] up|down", 1, 2,
1853 netdev_dummy_set_admin_state, NULL);
1854 unixctl_command_register("netdev-dummy/conn-state",
1855 "[netdev]", 0, 1,
1856 netdev_dummy_conn_state, NULL);
1857 unixctl_command_register("netdev-dummy/ip4addr",
1858 "[netdev] ipaddr/mask-prefix-len", 2, 2,
1859 netdev_dummy_ip4addr, NULL);
1860 unixctl_command_register("netdev-dummy/ip6addr",
1861 "[netdev] ip6addr", 2, 2,
1862 netdev_dummy_ip6addr, NULL);
1863
1864 if (level == DUMMY_OVERRIDE_ALL) {
1865 struct sset types;
1866 const char *type;
1867
1868 sset_init(&types);
1869 netdev_enumerate_types(&types);
1870 SSET_FOR_EACH (type, &types) {
1871 if (strcmp(type, "patch")) {
1872 netdev_dummy_override(type);
1873 }
1874 }
1875 sset_destroy(&types);
1876 } else if (level == DUMMY_OVERRIDE_SYSTEM) {
1877 netdev_dummy_override("system");
1878 }
1879 netdev_register_provider(&dummy_class);
1880 netdev_register_provider(&dummy_internal_class);
1881 netdev_register_provider(&dummy_pmd_class);
1882
1883 netdev_vport_tunnel_register();
1884 }